diff --git a/.gitignore b/.gitignore index a0677f0..3078c01 100644 --- a/.gitignore +++ b/.gitignore @@ -79,8 +79,7 @@ __pycache__ /client/tp_rdp /server/tp_core/protocol/rdp /client/tools/tprdp -/client/tp_assist_win_it_doctor -/dist/client/windows/assist-it-doctor +/client/build-tp-player-* # for MacOS. .DS_Store @@ -104,4 +103,5 @@ profile /server/tp_core/testssh/Debug /server/tp_core/testssh/Release -/client/build-tp-player-* +/external/zlib +/client/tools/qt-redist diff --git a/.idea/encodings.xml b/.idea/encodings.xml index a363e60..8f8d6ca 100644 --- a/.idea/encodings.xml +++ b/.idea/encodings.xml @@ -1,6 +1,6 @@ - + @@ -21,6 +21,7 @@ + @@ -29,17 +30,12 @@ - - - - - diff --git a/CMakeCfg.txt b/CMakeCfg.txt index b4bbbc5..49102d2 100644 --- a/CMakeCfg.txt +++ b/CMakeCfg.txt @@ -20,6 +20,8 @@ elseif ("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux") set(OS_POSIX 1) MESSAGE(STATUS "build on Linux...") # add_subdirectory(server/tp_web/src) + # set(CMAKE_CXX_STANDARD 11) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(TP_EXTERNAL_RELEASE_DIR "${PROJECT_SOURCE_DIR}/external/linux/release") elseif ("${CMAKE_SYSTEM_NAME}" STREQUAL "Windows") # MESSAGE(FATAL_ERROR "unsupported platform: Windows") diff --git a/build/build-py-static.sh b/build/build-py-static.sh index 296612b..b3492af 100755 --- a/build/build-py-static.sh +++ b/build/build-py-static.sh @@ -3,10 +3,10 @@ ################################################################ # Basic settings. ################################################################ -VER_PYTHON="3.7.4" +VER_PYTHON="3.7.5" VER_PYTHON_SHORT="3.7" -VER_OPENSSL="1.0.2s" -VER_SQLITE="3290000" +VER_OPENSSL="1.1.1d" +VER_SQLITE="3300100" VER_ZLIB="1.2.11" VER_PYTHON_LIB="${VER_PYTHON_SHORT}m" @@ -38,15 +38,6 @@ function on_error() exit 1 } -function setp_build_git() -{ - # su -s - # yum install zlib-devel expat-devel libcurl-devel - # make prefix=/usr/local - # make prefix=/usr/local install - echo 'skip build git now.' -} - function dlfile() { echo -n "Downloading $1 ..." @@ -121,6 +112,16 @@ function step_prepare_source() on_error "Can not prepare source code for build sqlite3 module for Python." fi + if [ ! -f "${PATH_FIX}/Python-${VER_PYTHON}/Modules/Setup.dist" ]; then + on_error "Can not fix source for build Python." + fi + if [ ! -f "${PATH_FIX}/Python-${VER_PYTHON}/Modules/_sqlite/cache.h" ]; then + on_error "Can not fix source for build sqlite3 module for Python." + fi + if [ ! -f "${PATH_FIX}/Python-${VER_PYTHON}/Modules/_sqlite/prepare_protocol.h" ]; then + on_error "Can not fix source for build sqlite3 module for Python." + fi + cp "${PATH_FIX}/Python-${VER_PYTHON}/Modules/Setup.dist" "${PY_PATH_SRC}/Modules/Setup.dist" cp "${PATH_FIX}/Python-${VER_PYTHON}/Modules/Setup.dist" "${PY_PATH_SRC}/Modules/Setup" cp "${PATH_FIX}/Python-${VER_PYTHON}/Modules/_sqlite/cache.h" "${PY_PATH_SRC}/Modules/_sqlite/cache.h" diff --git a/build/build.py b/build/build.py index 8ed47a6..9916afa 100644 --- a/build/build.py +++ b/build/build.py @@ -64,6 +64,9 @@ def main(): elif x == 'a': clean_everything() continue + elif x == 'e': + clean_external() + continue try: x = int(x) @@ -117,6 +120,27 @@ def clean_everything(): utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libuv.a')) +def clean_external(): + #utils.remove(os.path.join(env.root_path, 'out')) + utils.remove(os.path.join(env.root_path, 'external', 'jsoncpp')) + utils.remove(os.path.join(env.root_path, 'external', 'libuv')) + utils.remove(os.path.join(env.root_path, 'external', 'mbedtls')) + utils.remove(os.path.join(env.root_path, 'external', 'mongoose')) + #utils.remove(os.path.join(env.root_path, 'external', 'openssl')) + #utils.remove(os.path.join(env.root_path, 'external', 'python')) + #utils.remove(os.path.join(env.root_path, 'external', 'libssh-win-static', 'lib')) + #utils.remove(os.path.join(env.root_path, 'external', 'libssh-win-static', 'src')) + #utils.remove(os.path.join(env.root_path, 'external', 'linux', 'tmp')) + utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libmbedcrypto.a')) + utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libmbedtls.a')) + utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libmbedx509.a')) + utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libsqlite3.a')) + utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libssh.a')) + utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libssh_threads.a')) + + utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libuv.a')) + + def do_opt(opt): arg = '' @@ -249,6 +273,7 @@ def show_menu(): cc.o((cc.CR_NORMAL, ' ['), (cc.CR_INFO, '%2d' % options[o]['id']), (cc.CR_NORMAL, '] ', options[o]['disp'])) cc.v(' -------------------------------------------------------') + cc.o((cc.CR_NORMAL, ' ['), (cc.CR_INFO, ' E'), (cc.CR_NORMAL, '] clean external temp. files.')) cc.o((cc.CR_NORMAL, ' ['), (cc.CR_INFO, ' C'), (cc.CR_NORMAL, '] clean build and dist.')) cc.o((cc.CR_NORMAL, ' ['), (cc.CR_INFO, ' A'), (cc.CR_NORMAL, '] clean everything.')) diff --git a/build/builder/build-assist.py b/build/builder/build-assist.py index da84c60..06cad85 100644 --- a/build/builder/build-assist.py +++ b/build/builder/build-assist.py @@ -13,21 +13,24 @@ class BuilderBase: def __init__(self): self.out_dir = '' - def build_exe(self): - pass + def build_assist(self): + cc.e("this is a pure-virtual function.") + + def build_player(self): + cc.e("this is a pure-virtual function.") def build_rdp(self): - pass + cc.e("this is a pure-virtual function.") def build_installer(self): - pass + cc.e("this is a pure-virtual function.") class BuilderWin(BuilderBase): def __init__(self): super().__init__() - def build_exe(self): + def build_assist(self): cc.i('build tp_assist...') sln_file = os.path.join(env.root_path, 'client', 'tp_assist_win', 'tp_assist.vs2017.sln') out_file = os.path.join(env.root_path, 'out', 'client', ctx.bits_path, ctx.target_path, 'tp_assist.exe') @@ -36,6 +39,15 @@ class BuilderWin(BuilderBase): utils.msvc_build(sln_file, 'tp_assist', ctx.target_path, ctx.bits_path, False) utils.ensure_file_exists(out_file) + def build_player(self): + cc.i('build tp-player...') + prj_path = os.path.join(env.root_path, 'client', 'tp-player') + out_file = os.path.join(env.root_path, 'out', 'client', ctx.bits_path, ctx.target_path, 'tp-player.exe') + if os.path.exists(out_file): + utils.remove(out_file) + utils.qt_build_win(prj_path, 'tp-player', ctx.bits_path, ctx.target_path) + utils.ensure_file_exists(out_file) + # def build_rdp(self): # cc.n('build tp_rdp...') # sln_file = os.path.join(ROOT_PATH, 'client', 'tp_rdp', 'tp_rdp.2015.sln') @@ -74,12 +86,13 @@ class BuilderWin(BuilderBase): utils.makedirs(tmp_cfg_path) utils.copy_file(os.path.join(env.root_path, 'out', 'client', ctx.bits_path, ctx.target_path), tmp_app_path, 'tp_assist.exe') - utils.copy_file(os.path.join(env.root_path, 'client', 'cfg'), tmp_cfg_path, ('tp-assist.windows.json', 'tp-assist.json')) + utils.copy_file(os.path.join(env.root_path, 'client', 'cfg'), tmp_cfg_path, ('tp-assist.windows.json', 'tp-assist.json')) utils.copy_file(os.path.join(env.root_path, 'client', 'cfg'), tmp_cfg_path, 'cacert.cer') utils.copy_file(os.path.join(env.root_path, 'client', 'cfg'), tmp_cfg_path, 'localhost.key') utils.copy_file(os.path.join(env.root_path, 'client', 'cfg'), tmp_cfg_path, 'localhost.pem') + # assist configuration web page utils.copy_ex(os.path.join(env.root_path, 'client', 'tp_assist_win'), tmp_app_path, 'site') utils.makedirs(os.path.join(tmp_app_path, 'tools', 'putty')) @@ -91,13 +104,35 @@ class BuilderWin(BuilderBase): utils.makedirs(os.path.join(tmp_app_path, 'tools', 'tprdp')) utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'tprdp-client.exe') - utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'tprdp-replay.exe') + # utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'tprdp-replay.exe') utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'libeay32.dll') utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'ssleay32.dll') utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'msvcr120.dll') utils.copy_file(os.path.join(env.root_path, 'client', 'tools'), os.path.join(tmp_app_path, 'tools'), 'securecrt-telnet.vbs') + # tp-player + utils.copy_file(os.path.join(env.root_path, 'out', 'client', ctx.bits_path, ctx.target_path), tmp_app_path, 'tp-player.exe') + + # qt-redist + qt_redist_path = os.path.join(env.root_path, 'client', 'tools', 'qt-redist') + utils.copy_file(qt_redist_path, tmp_app_path, 'Qt5Core.dll') + utils.copy_file(qt_redist_path, tmp_app_path, 'Qt5Gui.dll') + utils.copy_file(qt_redist_path, tmp_app_path, 'Qt5Network.dll') + utils.copy_file(qt_redist_path, tmp_app_path, 'Qt5Widgets.dll') + utils.copy_ex(os.path.join(qt_redist_path, 'platforms'), os.path.join(tmp_app_path, 'platforms')) + utils.copy_ex(os.path.join(qt_redist_path, 'styles'), os.path.join(tmp_app_path, 'styles')) + utils.copy_ex(os.path.join(qt_redist_path, 'translations'), os.path.join(tmp_app_path, 'translations')) + + # zlib + suffix = 'd' if ctx.target_path == 'debug' else '' + utils.copy_file(os.path.join(env.root_path, 'external', 'zlib', 'build', ctx.target_path), tmp_app_path, 'zlib{}.dll'.format(suffix)) + + # openssl + utils.copy_file(os.path.join(env.root_path, 'external', 'openssl', 'bin'), tmp_app_path, 'libcrypto-1_1.dll') + utils.copy_file(os.path.join(env.root_path, 'external', 'openssl', 'bin'), tmp_app_path, 'libssl-1_1.dll') + + # final build utils.nsis_build(os.path.join(env.root_path, 'dist', 'client', 'windows', 'assist', 'installer.nsi')) @@ -105,7 +140,7 @@ class BuilderMacOS(BuilderBase): def __init__(self): super().__init__() - def build_exe(self): + def build_assist(self): cc.i('build tp_assist...') configuration = ctx.target_path.capitalize() @@ -169,7 +204,7 @@ class BuilderLinux(BuilderBase): def __init__(self): super().__init__() - def build_exe(self): + def build_assist(self): cc.e('not support linux.') # def build_rdp(self): @@ -215,7 +250,8 @@ def main(): builder = gen_builder(ctx.host_os) if 'exe' in argv: - builder.build_exe() + builder.build_assist() + builder.build_player() # elif 'rdp' in argv: # builder.build_rdp() elif 'installer' in argv: diff --git a/build/builder/build-external.py b/build/builder/build-external.py index e3f9c7a..d80faf0 100644 --- a/build/builder/build-external.py +++ b/build/builder/build-external.py @@ -27,19 +27,21 @@ class BuilderBase: def build_jsoncpp(self): file_name = 'jsoncpp-{}.zip'.format(env.ver_jsoncpp) - if not utils.download_file('jsoncpp source tarball', 'https://github.com/open-source-parsers/jsoncpp/archive/{}.zip'.format(env.ver_jsoncpp), PATH_DOWNLOAD, file_name): - return self._build_jsoncpp(file_name) + def _download_jsoncpp(self, file_name): + return utils.download_file('jsoncpp source tarball', 'https://github.com/open-source-parsers/jsoncpp/archive/{}.zip'.format(env.ver_jsoncpp), PATH_DOWNLOAD, file_name) + def _build_jsoncpp(self, file_name): cc.e("this is a pure-virtual function.") def build_mongoose(self): file_name = 'mongoose-{}.zip'.format(env.ver_mongoose) - if not utils.download_file('mongoose source tarball', 'https://github.com/cesanta/mongoose/archive/{}.zip'.format(env.ver_mongoose), PATH_DOWNLOAD, file_name): - return self._build_mongoose(file_name) + def _download_mongoose(self, file_name): + return utils.download_file('mongoose source tarball', 'https://github.com/cesanta/mongoose/archive/{}.zip'.format(env.ver_mongoose), PATH_DOWNLOAD, file_name) + def _build_mongoose(self, file_name): cc.e("this is a pure-virtual function.") @@ -47,38 +49,56 @@ class BuilderBase: file_name = 'openssl-{}.zip'.format(env.ver_ossl) self._build_openssl(file_name) - def _build_openssl(self, file_name): + def _download_openssl(self, file_name): _alt_ver = '_'.join(env.ver_ossl.split('.')) - if not utils.download_file('openssl source tarball', 'https://github.com/openssl/openssl/archive/OpenSSL_{}.zip'.format(_alt_ver), PATH_DOWNLOAD, file_name): - cc.e("can not download openssl source tarball.") - return False - else: - return True + return utils.download_file('openssl source tarball', 'https://github.com/openssl/openssl/archive/OpenSSL_{}.zip'.format(_alt_ver), PATH_DOWNLOAD, file_name) + + def _build_openssl(self, file_name): + cc.e("this is a pure-virtual function.") + # _alt_ver = '_'.join(env.ver_ossl.split('.')) + # if not utils.download_file('openssl source tarball', 'https://github.com/openssl/openssl/archive/OpenSSL_{}.zip'.format(_alt_ver), PATH_DOWNLOAD, file_name): + # cc.e("can not download openssl source tarball.") + # return False + # else: + # return True def build_libuv(self): file_name = 'libuv-{}.zip'.format(env.ver_libuv) - if not utils.download_file('libuv source tarball', 'https://github.com/libuv/libuv/archive/v{}.zip'.format(env.ver_libuv), PATH_DOWNLOAD, file_name): - return self._build_libuv(file_name) + def _download_libuv(self, file_name): + return utils.download_file('libuv source tarball', 'https://github.com/libuv/libuv/archive/v{}.zip'.format(env.ver_libuv), PATH_DOWNLOAD, file_name) + def _build_libuv(self, file_name): cc.e("this is a pure-virtual function.") def build_mbedtls(self): file_name = 'mbedtls-mbedtls-{}.zip'.format(env.ver_mbedtls) - if not utils.download_file('mbedtls source tarball', 'https://github.com/ARMmbed/mbedtls/archive/mbedtls-{}.zip'.format(env.ver_mbedtls), PATH_DOWNLOAD, file_name): - return self._build_mbedtls(file_name) + def _download_mbedtls(self, file_name): + return utils.download_file('mbedtls source tarball', 'https://github.com/ARMmbed/mbedtls/archive/mbedtls-{}.zip'.format(env.ver_mbedtls), PATH_DOWNLOAD, file_name) + def _build_mbedtls(self, file_name): cc.e("this is a pure-virtual function.") + def build_zlib(self): + file_name = 'zlilb{}.zip'.format(env.ver_zlib_number) + self._build_zlib(file_name) + + def _download_zlib(self, file_name): + return utils.download_file('mbedtls source tarball', 'https://www.zlib.net/zlib{}.zip'.format(env.ver_zlib_number), PATH_DOWNLOAD, file_name) + + def _build_zlib(self, file_name): + cc.e("this is a pure-virtual function.") + def build_libssh(self): file_name = 'libssh-{}.zip'.format(env.ver_libssh) - if not utils.download_file('libssh source tarball', 'https://git.libssh.org/projects/libssh.git/snapshot/libssh-{}.zip'.format(env.ver_libssh), PATH_DOWNLOAD, file_name): - return self._build_libssh(file_name) + def _download_libssh(self, file_name): + return utils.download_file('libssh source tarball', 'https://git.libssh.org/projects/libssh.git/snapshot/libssh-{}.zip'.format(env.ver_libssh), PATH_DOWNLOAD, file_name) + def _build_libssh(self, file_name): cc.e("this is a pure-virtual function.") @@ -103,9 +123,10 @@ class BuilderWin(BuilderBase): self.MBEDTLS_PATH_SRC = os.path.join(PATH_EXTERNAL, 'mbedtls') self.LIBUV_PATH_SRC = os.path.join(PATH_EXTERNAL, 'libuv') self.LIBSSH_PATH_SRC = os.path.join(PATH_EXTERNAL, 'libssh') + self.ZLIB_PATH_SRC = os.path.join(PATH_EXTERNAL, 'zlib') def _prepare_python(self): - cc.n('prepare python header files ...', end='') + cc.n('prepare python header files ... ', end='') if os.path.exists(os.path.join(PATH_EXTERNAL, 'python', 'include', 'Python.h')): cc.w('already exists, skip.') @@ -125,68 +146,97 @@ class BuilderWin(BuilderBase): utils.copy_ex(_header_path, os.path.join(PATH_EXTERNAL, 'python', 'include')) def _build_openssl(self, file_name): - cc.n('build openssl static library from source code... ') - - if not super()._build_openssl(file_name): - return - - _chk_output = [ - os.path.join(self.OPENSSL_PATH_SRC, 'out32', 'libeay32.lib'), - os.path.join(self.OPENSSL_PATH_SRC, 'out32', 'ssleay32.lib'), - os.path.join(self.OPENSSL_PATH_SRC, 'inc32', 'openssl', 'opensslconf.h'), - ] - - need_build = False - for f in _chk_output: - if not os.path.exists(f): - need_build = True - break - - if not need_build: - cc.n('build openssl static library from source code... ', end='') + cc.n('prepare OpenSSL pre-built package ... ', end='') + if os.path.exists(self.OPENSSL_PATH_SRC): cc.w('already exists, skip.') return cc.v('') - cc.n('prepare openssl source code...') _alt_ver = '_'.join(env.ver_ossl.split('.')) - if not os.path.exists(self.OPENSSL_PATH_SRC): - utils.unzip(os.path.join(PATH_DOWNLOAD, file_name), PATH_EXTERNAL) - os.rename(os.path.join(PATH_EXTERNAL, 'openssl-OpenSSL_{}'.format(_alt_ver)), self.OPENSSL_PATH_SRC) - if not os.path.exists(self.OPENSSL_PATH_SRC): - raise RuntimeError('can not prepare openssl source code.') - else: - cc.w('already exists, skip.') - os.chdir(self.OPENSSL_PATH_SRC) - os.system('""{}" Configure VC-WIN32"'.format(env.perl)) - os.system(r'ms\do_nasm') - # for vs2015 - # utils.sys_exec(r'"{}\VC\bin\vcvars32.bat" && nmake -f ms\nt.mak'.format(env.visual_studio_path), direct_output=True) - # for vs2017 community - utils.sys_exec(r'"{}VC\Auxiliary\Build\vcvars32.bat" && nmake -f ms\nt.mak'.format(env.visual_studio_path), direct_output=True) + file_name = 'Win32OpenSSL-{}.msi'.format(_alt_ver) + installer = os.path.join(PATH_DOWNLOAD, file_name) - for f in _chk_output: - if not os.path.exists(f): - raise RuntimeError('build openssl static library from source code failed.') + if not os.path.exists(installer): + if not utils.download_file('openssl installer', 'http://slproweb.com/download/{}'.format(filename), PATH_DOWNLOAD, file_name): + cc.e('can not download pre-built installer of OpenSSL.') + return + + utils.ensure_file_exists(installer) + + cc.w('On Windows, we use pre-built package of OpenSSL.') + cc.w('The installer have been downloaded at "{}".'.format(installer)) + cc.w('please install OpenSSL into "{}".'.format(self.OPENSSL_PATH_SRC)) + cc.w('\nOnce the OpenSSL installed, press Enter to continue or Q to quit...', end='') + try: + x = env.input() + except EOFError: + x = 'q' + if x == 'q': + return + + + # cc.n('build openssl static library from source code... ') + + # if not super()._build_openssl(file_name): + # return + + # _chk_output = [ + # os.path.join(self.OPENSSL_PATH_SRC, 'out32', 'libeay32.lib'), + # os.path.join(self.OPENSSL_PATH_SRC, 'out32', 'ssleay32.lib'), + # os.path.join(self.OPENSSL_PATH_SRC, 'inc32', 'openssl', 'opensslconf.h'), + # ] + + # need_build = False + # for f in _chk_output: + # if not os.path.exists(f): + # need_build = True + # break + + # if not need_build: + # cc.n('build openssl static library from source code... ', end='') + # cc.w('already exists, skip.') + # return + # cc.v('') + + # cc.n('prepare openssl source code...') + # _alt_ver = '_'.join(env.ver_ossl.split('.')) + # if not os.path.exists(self.OPENSSL_PATH_SRC): + # utils.unzip(os.path.join(PATH_DOWNLOAD, file_name), PATH_EXTERNAL) + # os.rename(os.path.join(PATH_EXTERNAL, 'openssl-OpenSSL_{}'.format(_alt_ver)), self.OPENSSL_PATH_SRC) + # if not os.path.exists(self.OPENSSL_PATH_SRC): + # raise RuntimeError('can not prepare openssl source code.') + # else: + # cc.w('already exists, skip.') + + # os.chdir(self.OPENSSL_PATH_SRC) + # os.system('""{}" Configure VC-WIN32"'.format(env.perl)) + # os.system(r'ms\do_nasm') + # # for vs2015 + # # utils.sys_exec(r'"{}\VC\bin\vcvars32.bat" && nmake -f ms\nt.mak'.format(env.visual_studio_path), direct_output=True) + # # for vs2017 community + # utils.sys_exec(r'"{}VC\Auxiliary\Build\vcvars32.bat" && nmake -f ms\nt.mak'.format(env.visual_studio_path), direct_output=True) + + # for f in _chk_output: + # if not os.path.exists(f): + # raise RuntimeError('build openssl static library from source code failed.') def _build_libssh(self, file_name): - cc.n('build libssh static library from source code... ', end='') + if not self._download_libssh(file_name): + return + cc.n('build libssh library from source code... ', end='') if not os.path.exists(self.LIBSSH_PATH_SRC): cc.v('') utils.unzip(os.path.join(PATH_DOWNLOAD, file_name), PATH_EXTERNAL) os.rename(os.path.join(PATH_EXTERNAL, 'libssh-{}'.format(env.ver_libssh)), self.LIBSSH_PATH_SRC) - # cc.n('fix libssh source code... ', end='') - # utils.ensure_file_exists(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', 'src', 'sftp.c')) - # utils.copy_file(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', 'src'), os.path.join(self.LIBSSH_PATH_SRC, 'src'), 'sftp.c') cc.n('fix libssh source code... ', end='') s_name = 'libssh-{}'.format(env.ver_libssh) - # utils.ensure_file_exists(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src', 'session.c')) + utils.ensure_file_exists(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src', 'session.c')) # utils.ensure_file_exists(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src', 'libcrypto.c')) utils.ensure_file_exists(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src', 'libcrypto-compat.c')) - # utils.copy_file(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src'), os.path.join(self.LIBSSH_PATH_SRC, 'src'), 'session.c') + utils.copy_file(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src'), os.path.join(self.LIBSSH_PATH_SRC, 'src'), 'session.c') # utils.copy_file(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src'), os.path.join(self.LIBSSH_PATH_SRC, 'src'), 'libcrypto.c') utils.copy_file(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src'), os.path.join(self.LIBSSH_PATH_SRC, 'src'), 'libcrypto-compat.c') @@ -210,7 +260,7 @@ class BuilderWin(BuilderBase): cc.i('build libssh...') sln_file = os.path.join(self.LIBSSH_PATH_SRC, 'build', 'libssh.sln') - utils.msvc_build(sln_file, 'ssh_shared', ctx.target_path, 'win32', False) + utils.msvc_build(sln_file, 'ssh', ctx.target_path, 'win32', False) utils.ensure_file_exists(os.path.join(self.LIBSSH_PATH_SRC, 'build', 'src', ctx.target_path, 'ssh.lib')) utils.ensure_file_exists(os.path.join(self.LIBSSH_PATH_SRC, 'build', 'src', ctx.target_path, 'ssh.dll')) utils.copy_file(os.path.join(self.LIBSSH_PATH_SRC, 'build', 'src', ctx.target_path), os.path.join(self.LIBSSH_PATH_SRC, 'lib', ctx.target_path), 'ssh.lib') @@ -218,7 +268,53 @@ class BuilderWin(BuilderBase): utils.ensure_file_exists(out_file_lib) utils.ensure_file_exists(out_file_dll) + def _build_zlib(self, file_name): + if not self._download_zlib(file_name): + return + cc.n('build zlib library from source code... ', end='') + + if not os.path.exists(self.ZLIB_PATH_SRC): + cc.v('') + utils.unzip(os.path.join(PATH_DOWNLOAD, file_name), PATH_EXTERNAL) + os.rename(os.path.join(PATH_EXTERNAL, 'zlib-{}'.format(env.ver_zlib)), self.ZLIB_PATH_SRC) + + if ctx.target_path == 'debug': + olib = 'zlibd.lib' + odll = 'zlibd.dll' + else: + olib = 'zlib.lib' + odll = 'zlib.dll' + out_file_lib = os.path.join(self.ZLIB_PATH_SRC, 'build', ctx.target_path, olib) + out_file_dll = os.path.join(self.ZLIB_PATH_SRC, 'build', ctx.target_path, odll) + + if os.path.exists(out_file_lib) and os.path.exists(out_file_dll): + cc.w('already exists, skip.') + return + cc.v('') + + cc.w('On Windows, when build zlib, need you use cmake-gui.exe to generate solution file') + cc.w('for Visual Studio 2017. Visit https://docs.tp4a.com for more details.') + cc.w('\nOnce the zlib.sln generated, press Enter to continue or Q to quit...', end='') + try: + x = env.input() + except EOFError: + x = 'q' + if x == 'q': + return + + cc.i('build zlib...') + sln_file = os.path.join(self.ZLIB_PATH_SRC, 'build', 'zlib.sln') + utils.msvc_build(sln_file, 'zlib', ctx.target_path, 'win32', False) + # utils.ensure_file_exists(os.path.join(self.ZLIB_PATH_SRC, 'build', ctx.target_path, 'zlib.lib')) + # utils.ensure_file_exists(os.path.join(self.ZLIB_PATH_SRC, 'build', ctx.target_path, 'zlib.dll')) + # utils.copy_file(os.path.join(self.ZLIB_PATH_SRC, 'build', ctx.target_path), os.path.join(self.ZLIB_PATH_SRC, 'lib', ctx.target_path), 'zlib.lib') + # utils.copy_file(os.path.join(self.ZLIB_PATH_SRC, 'build', ctx.target_path), os.path.join(self.ZLIB_PATH_SRC, 'lib', ctx.target_path), 'zlib.dll') + utils.ensure_file_exists(out_file_lib) + utils.ensure_file_exists(out_file_dll) + def _build_jsoncpp(self, file_name): + if not self._download_jsoncpp(file_name): + return cc.n('prepare jsoncpp source code... ', end='') if not os.path.exists(self.JSONCPP_PATH_SRC): cc.v('') @@ -228,6 +324,8 @@ class BuilderWin(BuilderBase): cc.w('already exists, skip.') def _build_mongoose(self, file_name): + if not self._download_mongoose(file_name): + return cc.n('prepare mongoose source code... ', end='') if not os.path.exists(self.MONGOOSE_PATH_SRC): cc.v('') @@ -237,6 +335,8 @@ class BuilderWin(BuilderBase): cc.w('already exists, skip.') def _build_mbedtls(self, file_name): + if not self._download_mbedtls(file_name): + return cc.n('prepare mbedtls source code... ', end='') if not os.path.exists(self.MBEDTLS_PATH_SRC): cc.v('') @@ -254,6 +354,8 @@ class BuilderWin(BuilderBase): # utils.copy_file(os.path.join(PATH_EXTERNAL, 'fix-external', 'mbedtls', 'library'), os.path.join(self.MBEDTLS_PATH_SRC, 'library'), 'rsa.c') def _build_libuv(self, file_name): + if not self._download_libuv(file_name): + return cc.n('prepare libuv source code... ', end='') if not os.path.exists(self.LIBUV_PATH_SRC): cc.v('') @@ -277,6 +379,7 @@ class BuilderLinux(BuilderBase): self.LIBUV_PATH_SRC = os.path.join(self.PATH_TMP, 'libuv-{}'.format(env.ver_libuv)) self.MBEDTLS_PATH_SRC = os.path.join(self.PATH_TMP, 'mbedtls-mbedtls-{}'.format(env.ver_mbedtls)) self.LIBSSH_PATH_SRC = os.path.join(self.PATH_TMP, 'libssh-{}'.format(env.ver_libssh)) + self.ZLIB_PATH_SRC = os.path.join(self.PATH_TMP, 'zlib-{}'.format(env.ver_zlib)) self.JSONCPP_PATH_SRC = os.path.join(PATH_EXTERNAL, 'jsoncpp') self.MONGOOSE_PATH_SRC = os.path.join(PATH_EXTERNAL, 'mongoose') @@ -288,7 +391,7 @@ class BuilderLinux(BuilderBase): cc.n('prepare python header and lib files ...') if os.path.exists(os.path.join(self.PATH_RELEASE, 'include', 'python', 'Python.h')): - cc.w(' - header file already exists, skip.') + cc.w('python header file already exists, skip.') else: utils.ensure_file_exists(os.path.join(self.PATH_RELEASE, 'include', 'python{}m'.format(ctx.py_dot_ver), 'Python.h')) utils.sys_exec('ln -s "{}" "{}"'.format( @@ -300,6 +403,8 @@ class BuilderLinux(BuilderBase): utils.ensure_file_exists(os.path.join(self.PATH_RELEASE, 'lib', lib_file)) def _build_jsoncpp(self, file_name): + if not self._download_jsoncpp(file_name): + return cc.n('prepare jsoncpp source code...', end='') if not os.path.exists(self.JSONCPP_PATH_SRC): cc.v('') @@ -309,6 +414,8 @@ class BuilderLinux(BuilderBase): cc.w('already exists, skip.') def _build_mongoose(self, file_name): + if not self._download_mongoose(file_name): + return cc.n('prepare mongoose source code...', end='') if not os.path.exists(self.MONGOOSE_PATH_SRC): cc.v('') @@ -319,9 +426,11 @@ class BuilderLinux(BuilderBase): def _build_openssl(self, file_name): # we do not need build openssl anymore, because first time run build.sh we built Python with openssl included. - pass + cc.w('skip build openssl again.') def _build_libuv(self, file_name): + if not self._download_libuv(file_name): + return if not os.path.exists(self.LIBUV_PATH_SRC): os.system('unzip "{}/{}" -d "{}"'.format(PATH_DOWNLOAD, file_name, self.PATH_TMP)) @@ -351,6 +460,8 @@ class BuilderLinux(BuilderBase): utils.ensure_file_exists(os.path.join(self.PATH_RELEASE, 'lib', 'libuv.a')) def _build_mbedtls(self, file_name): + if not self._download_mbedtls(file_name): + return if not os.path.exists(self.MBEDTLS_PATH_SRC): os.system('unzip "{}/{}" -d "{}"'.format(PATH_DOWNLOAD, file_name, self.PATH_TMP)) @@ -393,21 +504,24 @@ class BuilderLinux(BuilderBase): os.chdir(old_p) def _build_libssh(self, file_name): + if not self._download_libssh(file_name): + return if not os.path.exists(self.LIBSSH_PATH_SRC): os.system('unzip "{}/{}" -d "{}"'.format(PATH_DOWNLOAD, file_name, self.PATH_TMP)) cc.n('build libssh...', end='') - if os.path.exists(os.path.join(self.PATH_RELEASE, 'lib', 'libssh.a')): + out_file = os.path.join(self.PATH_RELEASE, 'lib64', 'libssh.a') + if os.path.exists(out_file): cc.w('already exists, skip.') return cc.v('') cc.n('fix libssh source code... ', end='') s_name = 'libssh-{}'.format(env.ver_libssh) - # utils.ensure_file_exists(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src', 'session.c')) + utils.ensure_file_exists(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src', 'session.c')) # utils.ensure_file_exists(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src', 'libcrypto.c')) utils.ensure_file_exists(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src', 'libcrypto-compat.c')) - # utils.copy_file(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src'), os.path.join(self.LIBSSH_PATH_SRC, 'src'), 'session.c') + utils.copy_file(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src'), os.path.join(self.LIBSSH_PATH_SRC, 'src'), 'session.c') # utils.copy_file(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src'), os.path.join(self.LIBSSH_PATH_SRC, 'src'), 'libcrypto.c') utils.copy_file(os.path.join(PATH_EXTERNAL, 'fix-external', 'libssh', s_name, 'src'), os.path.join(self.LIBSSH_PATH_SRC, 'src'), 'libcrypto-compat.c') @@ -418,16 +532,54 @@ class BuilderLinux(BuilderBase): ' -DOPENSSL_LIBRARIES={path_release}/lib' \ ' -DWITH_SFTP=ON' \ ' -DWITH_SERVER=ON' \ - ' -DWITH_STATIC_LIB=ON' \ ' -DWITH_GSSAPI=OFF' \ - ' -DWITH_ZLIB=OFF' \ + ' -DWITH_ZLIB=ON' \ ' -DWITH_PCAP=OFF' \ + ' -DBUILD_SHARED_LIBS=OFF' \ ' -DUNIT_TESTING=OFF' \ ' -DWITH_EXAMPLES=OFF' \ ' -DWITH_BENCHMARKS=OFF' \ ' -DWITH_NACL=OFF' \ ' ..'.format(path_release=self.PATH_RELEASE) + # ' -DWITH_STATIC_LIB=ON' + + + old_p = os.getcwd() + try: + utils.cmake(build_path, 'Release', False, cmake_define=cmake_define, cmake_pre_define='CFLAGS="-fPIC"') + os.chdir(build_path) + utils.sys_exec('make install') + except: + pass + os.chdir(old_p) + + utils.ensure_file_exists(out_file) + # files = os.listdir(os.path.join(self.PATH_RELEASE, 'lib')) + # for i in files: + # if i.startswith('libssh.so'): + # # use os.unlink() because some file should be a link. + # os.unlink(os.path.join(self.PATH_RELEASE, 'lib', i)) + + def _build_zlib(self, file_name): + # cc.w('skip build zlib again.') + if not self._download_zlib(file_name): + return + if not os.path.exists(self.ZLIB_PATH_SRC): + os.system('unzip "{}/{}" -d "{}"'.format(PATH_DOWNLOAD, file_name, self.PATH_TMP)) + + cc.n('build zlib...', end='') + out_file = os.path.join(self.PATH_RELEASE, 'lib', 'libz.a') + if os.path.exists(out_file): + cc.w('already exists, skip.') + return + cc.v('') + + build_path = os.path.join(self.ZLIB_PATH_SRC, 'build') + + cmake_define = ' -DCMAKE_INSTALL_PREFIX={path_release}' \ + ' ..'.format(path_release=self.PATH_RELEASE) + old_p = os.getcwd() try: utils.cmake(build_path, 'Release', False, cmake_define=cmake_define, cmake_pre_define='CFLAGS="-fPIC"') @@ -437,10 +589,10 @@ class BuilderLinux(BuilderBase): pass os.chdir(old_p) - utils.ensure_file_exists(os.path.join(self.PATH_RELEASE, 'lib', 'libssh.a')) + utils.ensure_file_exists(out_file) files = os.listdir(os.path.join(self.PATH_RELEASE, 'lib')) for i in files: - if i.startswith('libssh.so'): + if i.startswith('libz.so'): # use os.unlink() because some file should be a link. os.unlink(os.path.join(self.PATH_RELEASE, 'lib', i)) @@ -674,6 +826,7 @@ def main(): builder.build_openssl() builder.build_libuv() builder.build_mbedtls() + builder.build_zlib() builder.build_libssh() builder.fix_output() diff --git a/build/builder/build-pysrt.py b/build/builder/build-pysrt.py index 629c737..637763c 100644 --- a/build/builder/build-pysrt.py +++ b/build/builder/build-pysrt.py @@ -13,11 +13,11 @@ ctx = BuildContext() MODULES_WIN = ['_asyncio', '_bz2', '_ctypes', '_hashlib', '_lzma', '_overlapped', '_socket', '_sqlite3', '_ssl', 'select', 'sqlite3', 'libcrypto-1_1', 'libssl-1_1', 'unicodedata'] -PY_LIB_REMOVE_WIN = ['ctypes/test', 'curses', 'dbm', 'distutils', 'email/test', 'ensurepip', 'idlelib', 'lib2to3', +PY_LIB_REMOVE_WIN = ['ctypes/test', 'curses', 'dbm', 'distutils/test', 'email/tests', 'ensurepip', 'idlelib', 'lib2to3', 'lib-dynload', 'pydoc_data', 'site-packages', 'sqlite3/test', 'test', 'tkinter', 'turtledemo', 'unittest', 'venv', 'wsgiref', 'doctest.py', 'pdb.py', 'py_compile.py', 'pydoc.py', 'this.py', 'wave.py', 'webbrowser.py', 'zipapp.py'] -PY_LIB_REMOVE_LINUX = ['ctypes/test', 'curses', 'dbm', 'distutils', 'ensurepip', 'idlelib', 'lib2to3', +PY_LIB_REMOVE_LINUX = ['ctypes/test', 'curses', 'dbm', 'distutils/tests', 'ensurepip', 'idlelib', 'lib2to3', 'lib-dynload', 'pydoc_data', 'site-packages', 'sqlite3/test', 'test', 'tkinter', 'turtledemo', 'unittest', 'venv', 'wsgiref', 'doctest.py', 'pdb.py', 'py_compile.py', 'pydoc.py', 'this.py', 'wave.py', 'webbrowser.py', 'zipapp.py'] PY_MODULE_REMOVE_LINUX = ['_ctypes_test', '_testbuffer', '_testcapi', '_testimportmultiple', '_testmultiphase', '_xxtestfuzz'] @@ -46,7 +46,7 @@ class PYSBase: utils.sys_exec('{} -m pip install -i https://pypi.tuna.tsinghua.edu.cn/simple pip --upgrade'.format(env.py_exec)) pip = self._get_pip() - pypi_modules = ['ldap3', 'mako', 'Pillow', 'psutil', 'pymysql', 'qrcode', 'tornado', 'wheezy.captcha'] + pypi_modules = ['cffi', 'cryptography', 'ldap3', 'mako', 'Pillow', 'psutil', 'pyasn1', 'pymysql', 'qrcode', 'tornado', 'wheezy.captcha'] for p in pypi_modules: cc.n('install {} ...'.format(p)) utils.sys_exec('{} install -i https://pypi.tuna.tsinghua.edu.cn/simple {}'.format(pip, p), direct_output=True) @@ -111,7 +111,7 @@ class PYSBase: utils.ensure_file_exists(out_file) cc.v('remove temp folder...') - utils.remove(_tmp_) + # utils.remove(_tmp_) def _make_py_ver_file(self): pass @@ -250,7 +250,7 @@ class PYSLinux(PYSBase): def _get_pip(self): _exec_path = os.path.dirname(env.py_exec) - return os.path.join(_exec_path, 'pip') + return os.path.join(_exec_path, 'pip3.7') def _make_py_ver_file(self): # do nothing. diff --git a/build/builder/core/env.py b/build/builder/core/env.py index 82a907e..4a89e73 100644 --- a/build/builder/core/env.py +++ b/build/builder/core/env.py @@ -148,6 +148,11 @@ class Env(object): if warn_miss_tool: cc.w(' - can not locate `nsis`, so I can not make installer.') + if 'qt' in _tmp: + self.qt = _tmp['qt'] + else: + self.qt = None + elif self.is_linux or self.is_macos: if 'cmake' in _tmp: self.cmake = _tmp['cmake'] @@ -178,6 +183,10 @@ class Env(object): self.ver_ossl = _v_openssl[0].strip() self.ver_ossl_number = _v_openssl[1].strip() + _v_zlib = _tmp['zlib'].split(',') + self.ver_zlib = _v_zlib[0].strip() + self.ver_zlib_number = _v_zlib[1].strip() + self.ver_libuv = _tmp['libuv'] self.ver_mbedtls = _tmp['mbedtls'] # self.ver_sqlite = _tmp['sqlite'] diff --git a/build/builder/core/utils.py b/build/builder/core/utils.py index c6aa97f..3aacf62 100644 --- a/build/builder/core/utils.py +++ b/build/builder/core/utils.py @@ -320,6 +320,21 @@ def msvc_build(sln_file, proj_name, target, platform, force_rebuild): raise RuntimeError('build MSVC project `{}` failed.'.format(proj_name)) +def qt_build_win(prj_path, prj_name, bit_path, target_path): + cc.n(env.visual_studio_path) + if env.qt is None: + raise RuntimeError('where is `qt`?') + + if env.is_win: + tmp_path = os.path.join(env.root_path, 'out', '_tmp_', prj_name, bit_path) + # C:\Windows\System32\cmd.exe /A /Q /K C:\Qt\Qt5.12.0\5.12.0\msvc2017\bin\qtenv2.bat + cmd = 'C:\\Windows\\System32\\cmd.exe /A /Q /C ""{}\qt-helper.bat" "{}\\bin\\qtenv2.bat" "{}VC\\Auxiliary\\Build\\vcvarsall.bat" {} "{}" "{}" {}"'.format(env.build_path, env.qt, env.visual_studio_path, bit_path, tmp_path, prj_path, target_path) + ret, _ = sys_exec(cmd, direct_output=True) + if ret != 0: + raise RuntimeError('build XCode project `{}` failed.'.format(proj_name)) + + + def xcode_build(proj_file, proj_name, target, force_rebuild): if force_rebuild: cmd = 'xcodebuild -project "{}" -target {} -configuration {} clean'.format(proj_file, proj_name, target) diff --git a/build/builder/core/ver.py b/build/builder/core/ver.py index 31996dd..30abc5a 100644 --- a/build/builder/core/ver.py +++ b/build/builder/core/ver.py @@ -1,3 +1,3 @@ # -*- coding: utf8 -*- -VER_TP_SERVER = "3.3.1" -VER_TP_ASSIST = "3.3.1" +VER_TP_SERVER = "3.5.1" +VER_TP_ASSIST = "3.5.1" diff --git a/client/tp-player/bar.cpp b/client/tp-player/bar.cpp index 0c9bea5..5ff3f28 100644 --- a/client/tp-player/bar.cpp +++ b/client/tp-player/bar.cpp @@ -491,7 +491,7 @@ void Bar::onMousePress(int x, int y, Qt::MouseButton button) { } } -void Bar::onMouseRelease(int x, int y, Qt::MouseButton button) { +void Bar::onMouseRelease(int, int, Qt::MouseButton button) { // 我们只关心左键释放 if(button != Qt::LeftButton) return; diff --git a/client/tp-player/main.cpp b/client/tp-player/main.cpp index b4a133e..82e2531 100644 --- a/client/tp-player/main.cpp +++ b/client/tp-player/main.cpp @@ -14,14 +14,10 @@ // tp-player.exe path/contains/tp-rdp.tpr 包含 .tpr 文件的路径 // // ## 从TP服务器上下载 -// (废弃) tp-player.exe "http://127.0.0.1:7190" 1234 "tp_1491560510_ca67fceb75a78c9d" "000000256-admin-administrator-218.244.140.14-20171209-020047" -// (废弃) TP服务器地址 记录编号 session-id(仅授权用户可下载) 合成的名称,用于本地生成路径来存放下载的文件 -// -// ## 从TP服务器上下载 // tp-player.exe http://teleport.domain.com:7190/{sub/path/}tp_1491560510_ca67fceb75a78c9d/1234 (注意,并不直接访问此URI,实际上其并不存在) -// TP服务器地址(可能包含子路径哦,例如上例中的{sub/path}部分)/session-id(用于判断当前授权用户)/录像会话编号 +// TP服务器地址(可能包含子路径,例如上例中的{sub/path}部分)/session-id(用于判断当前授权用户)/录像会话编号 // 按 “/” 进行分割后,去掉最后两个项,剩下部分是TP服务器的WEB地址,用于合成后续的文件下载URL。 -// 根据下载的.tpr文件内容,本地合成类似于 "000000256-admin-administrator-218.244.140.14-20171209-020047" 的路径来存放下载的文件 +// 根据下载的.tpr文件内容,本地合成类似于 "000000256-admin-administrator-123.45.77.88-20191109-020047" 的路径来存放下载的文件 // 特别注意,如果账号是 domain\user 这种形式,需要将 "\" 替换为下划线,否则此符号作为路径分隔符,会导致路径不存在而无法保存下载的文件。 // - 获取文件大小: http://127.0.0.1:7190/audit/get-file?act=size&type=rdp&rid=yyyyy&f=file-name // - 'act'为`size`表示获取文件大小(返回一个数字字符串,就是指定的文件大小) @@ -41,9 +37,9 @@ void show_usage(QCommandLineParser& parser) { + parser.helpText() + "\n\n" + "RESOURCE could be:\n" - + " teleport record file (.tpr).\n" - + " a directory contains .tpr file.\n" - + " an URL to download teleport record file." + + " - teleport record file (.tpr).\n" + + " - a directory contains .tpr file.\n" + + " - an URL to download teleport record file." + ""); } @@ -82,15 +78,6 @@ int main(int argc, char *argv[]) if(parser.isSet(opt_help)) { show_usage(parser); -// QMessageBox::warning(nullptr, QGuiApplication::applicationDisplayName(), -// "
"
-//                             + parser.helpText()
-//                             + "\n\n"
-//                             + "RESOURCE could be:\n"
-//                             + "    teleport record file (.tpr).\n"
-//                             + "    a directory contains .tpr file.\n"
-//                             + "    an URL for download teleport record file."
-//                             + "
"); return 2; } @@ -104,10 +91,6 @@ int main(int argc, char *argv[]) qDebug() << resource; -// QTextCodec::setCodecForTr(QTextCodec::codecForName("GB2312")); -// QTextCodec::setCodecForLocale(QTextCodec::codecForName("GBK")); -// QTextCodec::setCodecForCStrings(QTextCodec::codecForName("GB2312")); - MainWindow w; w.set_resource(resource); w.show(); diff --git a/client/tp-player/mainwindow.cpp b/client/tp-player/mainwindow.cpp index c9d9adf..2e2e458 100644 --- a/client/tp-player/mainwindow.cpp +++ b/client/tp-player/mainwindow.cpp @@ -33,6 +33,8 @@ MainWindow::MainWindow(QWidget *parent) : m_play_state = PLAY_STATE_UNKNOWN; m_thr_data = nullptr; + m_disable_draw = false; + ui->setupUi(this); ui->centralWidget->setMouseTracking(true); @@ -99,7 +101,7 @@ void MainWindow::set_resource(const QString &res) { void MainWindow::_do_first_run() { m_thr_data = new ThrData(this, m_res); connect(m_thr_data, SIGNAL(signal_update_data(UpdateData*)), this, SLOT(_do_update_data(UpdateData*))); - m_thr_data->start(); + m_thr_data->start(QThread::TimeCriticalPriority); m_thr_play = new ThrPlay(this); connect(m_thr_play, SIGNAL(signal_update_data(UpdateData*)), this, SLOT(_do_update_data(UpdateData*))); @@ -134,20 +136,6 @@ void MainWindow::paintEvent(QPaintEvent *e) painter.drawPixmap(m_pt.x-m_pt_normal.width()/2, m_pt.y-m_pt_normal.height()/2, m_pt_normal); } -// { -// QRect rc_draw = e->rect(); -// QRect rc(m_rc_message); -// //rc.moveTo(m_rc.left()+rc.left(), m_rc.top() + rc.top()); - -// int from_x = max(rc_draw.left(), rc.left()) - rc.left(); -// int from_y = max(rc_draw.top(), rc.top()) - rc.top(); -// int w = min(rc.right(), rc_draw.right()) - rc.left() - from_x + 1; -// int h = min(rc.bottom(), rc_draw.bottom()) - rc.top() - from_y + 1; -// int to_x = rc.left() + from_x; -// int to_y = rc.top() + from_y; -// painter.drawPixmap(to_x, to_y, m_img_message, from_x, from_y, w, h); -// } - // 绘制浮动控制窗 if(m_bar_fading) { painter.setOpacity(m_bar_opacity); @@ -161,15 +149,16 @@ void MainWindow::paintEvent(QPaintEvent *e) if(m_show_message) { QRect rc_draw = e->rect(); QRect rc(m_rc_message); - //rc.moveTo(m_rc.left()+rc.left(), m_rc.top() + rc.top()); - int from_x = max(rc_draw.left(), rc.left()) - rc.left(); - int from_y = max(rc_draw.top(), rc.top()) - rc.top(); - int w = min(rc.right(), rc_draw.right()) - rc.left() - from_x + 1; - int h = min(rc.bottom(), rc_draw.bottom()) - rc.top() - from_y + 1; - int to_x = rc.left() + from_x; - int to_y = rc.top() + from_y; - painter.drawPixmap(to_x, to_y, m_img_message, from_x, from_y, w, h); + if(e->rect().intersects(rc)) { + int from_x = max(rc_draw.left(), rc.left()) - rc.left(); + int from_y = max(rc_draw.top(), rc.top()) - rc.top(); + int w = min(rc.right(), rc_draw.right()) - rc.left() - from_x + 1; + int h = min(rc.bottom(), rc_draw.bottom()) - rc.top() - from_y + 1; + int to_x = rc.left() + from_x; + int to_y = rc.top() + from_y; + painter.drawPixmap(to_x, to_y, m_img_message, from_x, from_y, w, h); + } } } @@ -187,7 +176,6 @@ void MainWindow::resume(bool relocate, uint32_t ms) { m_thr_play->resume(relocate, ms); } else if(m_play_state == PLAY_STATE_STOP) { -// _start_play_thread(); m_thr_data->restart(ms); m_thr_play->resume(true, ms); } @@ -214,11 +202,11 @@ void MainWindow::_do_update_data(UpdateData* dat) { return; } else if(dat->data_type() == TYPE_IMAGE) { - UpdateImages uimgs; - if(!dat->get_images(uimgs)) + const UpdateImages uimgs = dat->get_images(); + if(uimgs.size() == 0) return; - if(uimgs.size() > 1) { + if(uimgs.size() > 1 && !m_disable_draw) { // 禁止界面更新 setUpdatesEnabled(false); } @@ -227,11 +215,13 @@ void MainWindow::_do_update_data(UpdateData* dat) { QPainter pp(&m_canvas); for(int i = 0; i < uimgs.size(); ++i) { pp.drawImage(uimgs[i].x, uimgs[i].y, *(uimgs[i].img), 0, 0, uimgs[i].w, uimgs[i].h, Qt::AutoColor); - update(uimgs[i].x, uimgs[i].y, uimgs[i].w, uimgs[i].h); + + if(!m_disable_draw) + update(uimgs[i].x, uimgs[i].y, uimgs[i].w, uimgs[i].h); } - if(uimgs.size() > 1) { + if(uimgs.size() > 1 && !m_disable_draw) { // 允许界面更新 setUpdatesEnabled(true); } @@ -246,13 +236,16 @@ void MainWindow::_do_update_data(UpdateData* dat) { else if(dat->data_type() == TYPE_DISABLE_DRAW) { // 禁止界面更新 + m_disable_draw = true; setUpdatesEnabled(false); return; } else if(dat->data_type() == TYPE_ENABLE_DRAW) { // 允许界面更新 + m_disable_draw = false; setUpdatesEnabled(true); + update(); return; } @@ -265,10 +258,6 @@ void MainWindow::_do_update_data(UpdateData* dat) { m_show_message = true; qDebug("1message, w=%d, h=%d", m_canvas.width(), m_canvas.height()); -// if(0 == m_canvas.width()) { -// QMessageBox::warning(nullptr, QGuiApplication::applicationDisplayName(), dat->message()); -// return; -// } QPainter pp(&m_canvas); QRect rcWin(0, 0, m_canvas.width(), m_canvas.height()); @@ -318,22 +307,13 @@ void MainWindow::_do_update_data(UpdateData* dat) { qDebug() << "resize (" << m_rec_hdr.basic.width << "," << m_rec_hdr.basic.height << ")"; - //if(m_canvas.width() != m_rec_hdr.basic.width && m_canvas.height() != m_rec_hdr.basic.height) { - m_canvas = QPixmap(m_rec_hdr.basic.width, m_rec_hdr.basic.height); + m_canvas = QPixmap(m_rec_hdr.basic.width, m_rec_hdr.basic.height); - //m_win_board_w = frameGeometry().width() - geometry().width(); - //m_win_board_h = frameGeometry().height() - geometry().height(); + QDesktopWidget *desktop = QApplication::desktop(); // =qApp->desktop();也可以 + qDebug("desktop w:%d,h:%d, this w:%d,h:%d", desktop->width(), desktop->height(), width(), height()); + move(10, (desktop->height() - m_rec_hdr.basic.height)/2); - QDesktopWidget *desktop = QApplication::desktop(); // =qApp->desktop();也可以 - qDebug("desktop w:%d,h:%d, this w:%d,h:%d", desktop->width(), desktop->height(), width(), height()); - //move((desktop->width() - this->width())/2, (desktop->height() - this->height())/2); - move(10, (desktop->height() - m_rec_hdr.basic.height)/2); - - //setFixedSize(m_rec_hdr.basic.width + m_win_board_w, m_rec_hdr.basic.height + m_win_board_h); - //resize(m_rec_hdr.basic.width + m_win_board_w, m_rec_hdr.basic.height + m_win_board_h); - //resize(m_rec_hdr.basic.width, m_rec_hdr.basic.height); - setFixedSize(m_rec_hdr.basic.width, m_rec_hdr.basic.height); - //} + setFixedSize(m_rec_hdr.basic.width, m_rec_hdr.basic.height); m_canvas.fill(QColor(38, 73, 111)); @@ -352,13 +332,11 @@ void MainWindow::_do_update_data(UpdateData* dat) { QString title; if (m_rec_hdr.basic.conn_port == 3389) { -// title = QString(LOCAL8BIT("[%1] %2@%3 [Teleport-RDP录像回放]").arg(m_rec_hdr.basic.acc_username, m_rec_hdr.basic.user_username, m_rec_hdr.basic.conn_ip)); title = QString(LOCAL8BIT("用户 %1 访问 %2 的 %3 账号").arg(m_rec_hdr.basic.user_username, m_rec_hdr.basic.conn_ip, m_rec_hdr.basic.acc_username)); } else { QString _port; _port.sprintf("%d", m_rec_hdr.basic.conn_port); - //title = QString(LOCAL8BIT("[%1] %2@%3:%4 [Teleport-RDP录像回放]").arg(m_rec_hdr.basic.acc_username, m_rec_hdr.basic.user_username, m_rec_hdr.basic.conn_ip, _port)); title = QString(LOCAL8BIT("用户 %1 访问 %2:%3 的 %4 账号").arg(m_rec_hdr.basic.user_username, m_rec_hdr.basic.conn_ip, _port, m_rec_hdr.basic.acc_username)); } diff --git a/client/tp-player/mainwindow.h b/client/tp-player/mainwindow.h index 511c864..174487f 100644 --- a/client/tp-player/mainwindow.h +++ b/client/tp-player/mainwindow.h @@ -46,8 +46,6 @@ private: void mousePressEvent(QMouseEvent *e); void mouseReleaseEvent(QMouseEvent *e); -// void _start_play_thread(); - private slots: void _do_first_run(); // 默认界面加载完成后,开始播放操作(可能会进行数据下载) void _do_update_data(UpdateData*); @@ -86,6 +84,7 @@ private: bool m_show_message; QPixmap m_img_message; QRect m_rc_message; + bool m_disable_draw; }; #endif // MAINWINDOW_H diff --git a/client/tp-player/record_format.h b/client/tp-player/record_format.h index ebc6041..9fc7ad5 100644 --- a/client/tp-player/record_format.h +++ b/client/tp-player/record_format.h @@ -16,6 +16,7 @@ #define TS_RDP_BTN_PRESSED 1 #define TS_RDP_IMG_RAW 0 // 未压缩,原始数据(根据bitsPerPixel,多个字节对应一个点的颜色) #define TS_RDP_IMG_BMP 1 // 压缩的BMP数据 +#define TS_RDP_IMG_ALT 2 #pragma pack(push,1) @@ -27,7 +28,6 @@ typedef struct TS_RECORD_HEADER_INFO { // uint32_t packages; // 总包数 uint32_t time_ms; // 总耗时(毫秒) uint32_t dat_file_count; // 数据文件数量 - uint8_t _reserve[64-4-2-2-4-4]; }TS_RECORD_HEADER_INFO; #define ts_record_header_info_size sizeof(TS_RECORD_HEADER_INFO) @@ -49,15 +49,14 @@ typedef struct TS_RECORD_HEADER_BASIC { // // RDP专有 // uint8_t rdp_security; // 0 = RDP, 1 = TLS - -// uint8_t _reserve[512 - 2 - 2 - 8 - 2 - 2 - 64 - 64 - 40 - 40 - 2 - 40 - 1 - ts_record_header_info_size]; - uint8_t _reserve[512 - 2 - 2 - 8 - 2 - 2 - 64 - 64 - 40 - 40 - 2 - 40 - ts_record_header_info_size]; }TS_RECORD_HEADER_BASIC; #define ts_record_header_basic_size sizeof(TS_RECORD_HEADER_BASIC) typedef struct TS_RECORD_HEADER { - TS_RECORD_HEADER_INFO info; - TS_RECORD_HEADER_BASIC basic; + TS_RECORD_HEADER_INFO info; + uint8_t _reserve1[64 - ts_record_header_info_size]; + TS_RECORD_HEADER_BASIC basic; + uint8_t _reserve2[512 - 64 - ts_record_header_basic_size]; }TS_RECORD_HEADER; // header部分(header-info + header-basic) = 512B @@ -66,10 +65,9 @@ typedef struct TS_RECORD_HEADER { // 一个数据包的头 typedef struct TS_RECORD_PKG { uint8_t type; // 包的数据类型 - uint8_t _reserve[3]; // 保留 uint32_t size; // 这个包的总大小(不含包头) uint32_t time_ms; // 这个包距起始时间的时间差(毫秒,意味着一个连接不能持续超过49天) - // uint32_t index; // 这个包的序号(最后一个包的序号与TS_RECORD_HEADER_INFO::packages数量匹配) + uint8_t _reserve[3]; // 保留 }TS_RECORD_PKG; @@ -92,6 +90,7 @@ typedef struct TS_RECORD_RDP_IMAGE_INFO { uint8_t format; uint8_t _reserved; uint32_t dat_len; + uint32_t zip_len; }TS_RECORD_RDP_IMAGE_INFO; // 关键帧索引 diff --git a/client/tp-player/thr_data.cpp b/client/tp-player/thr_data.cpp index 60502a3..0bcc255 100644 --- a/client/tp-player/thr_data.cpp +++ b/client/tp-player/thr_data.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include "thr_play.h" #include "thr_data.h" @@ -15,6 +16,80 @@ #include "rle.h" + +static QImage* _rdpimg2QImage(int w, int h, int bitsPerPixel, bool isCompressed, const uint8_t* dat, uint32_t len) { + QImage* out; + switch(bitsPerPixel) { + case 15: + if(isCompressed) { + uint8_t* _dat = reinterpret_cast(calloc(1, w*h*2)); + if(!bitmap_decompress1(_dat, w, h, dat, len)) { + free(_dat); + return nullptr; + } + out = new QImage(_dat, w, h, QImage::Format_RGB555); + free(_dat); + } + else { + out = new QImage(QImage(dat, w, h, QImage::Format_RGB555).transformed(QMatrix(1.0, 0.0, 0.0, -1.0, 0.0, 0.0))); + } + return out; + + case 16: + if(isCompressed) { + uint8_t* _dat = reinterpret_cast(calloc(1, w*h*2)); + if(!bitmap_decompress2(_dat, w, h, dat, len)) { + free(_dat); + qDebug() << "22------------------DECOMPRESS2 failed."; + return nullptr; + } + + // TODO: 这里需要进一步优化,直接操作QImage的buffer。 + out = new QImage(w, h, QImage::Format_RGB16); + for(int y = 0; y < h; y++) { + for(int x = 0; x < w; x++) { + uint16 a = ((uint16*)_dat)[y * w + x]; + uint8 r = ((a & 0xf800) >> 11) * 255 / 31; + uint8 g = ((a & 0x07e0) >> 5) * 255 / 63; + uint8 b = (a & 0x001f) * 255 / 31; + out->setPixelColor(x, y, QColor(r,g,b)); + } + } + free(_dat); + return out; + } + else { + out = new QImage(QImage(dat, w, h, QImage::Format_RGB16).transformed(QMatrix(1.0, 0.0, 0.0, -1.0, 0.0, 0.0))); + } + return out; + + case 24: + case 32: + default: + qDebug() << "--------NOT support UNKNOWN bitsPerPix" << bitsPerPixel; + return nullptr; + } +} + +static QImage* _raw2QImage(int w, int h, const uint8_t* dat, uint32_t len) { + QImage* out; + + // TODO: 这里需要进一步优化,直接操作QImage的buffer。 + out = new QImage(w, h, QImage::Format_RGB16); + for(int y = 0; y < h; y++) { + for(int x = 0; x < w; x++) { + uint16 a = ((uint16*)dat)[y * w + x]; + uint8 r = ((a & 0xf800) >> 11) * 255 / 31; + uint8 g = ((a & 0x07e0) >> 5) * 255 / 63; + uint8 b = (a & 0x001f) * 255 / 31; + out->setPixelColor(x, y, QColor(r,g,b)); + } + } + return out; +} + + + //================================================================= // ThrData //================================================================= @@ -186,9 +261,9 @@ void ThrData::_run() { pkg_count_in_queue = m_data.size(); m_locker.unlock(); - // 少于500个的话,补足到1000个 - if(m_data.size() < 500) - pkg_need_add = 1000 - pkg_count_in_queue; + // 少于1000个的话,补足到2000个 + if(m_data.size() < 1000) + pkg_need_add = 2000 - pkg_count_in_queue; if(pkg_need_add == 0) { msleep(100); @@ -236,7 +311,6 @@ void ThrData::_run() { file_processed = 0; qDebug("Open file tp-rdp-%d.tpd, processed: %" PRId64 ", size: %" PRId64, m_file_idx+1, file_processed, file_size); } -// qDebug("B processed: %" PRId64 ", size: %" PRId64, file_processed, file_size); // 如果指定了起始偏移,则跳过这部分数据 if(m_offset > 0) { @@ -256,8 +330,6 @@ void ThrData::_run() { TS_RECORD_PKG pkg; read_len = fdata->read(reinterpret_cast(&pkg), sizeof(TS_RECORD_PKG)); - // if(read_len == 0) - // break; if(read_len != sizeof(TS_RECORD_PKG)) { qDebug("invaid tp-rdp-%d.tpd file, read_len=%" PRId64 " (1).", m_file_idx+1, read_len); _notify_error(QString("%1\ntp-rdp-%2.tpd").arg(LOCAL8BIT("错误的录像数据文件!"), str_fidx)); @@ -283,13 +355,22 @@ void ThrData::_run() { } file_processed += pkg.size; - UpdateData* dat = new UpdateData(m_hdr.basic.width, m_hdr.basic.height); - if(!dat->parse(pkg, pkg_data)) { + UpdateData* dat = _parse(pkg, pkg_data); + if(dat == nullptr) { qDebug("invaid tp-rdp-%d.tpd file (4).", m_file_idx+1); _notify_error(QString("%1\ntp-rdp-%2.tpd").arg(LOCAL8BIT("错误的录像数据文件!"), str_fidx)); return; } + // 遇到关键帧,需要清除自上一个关键帧以来保存的缓存图像数据 + if(pkg.type == TS_RECORD_TYPE_RDP_KEYFRAME) { + for(size_t ci = 0; ci < m_cache_imgs.size(); ++ci) { + if(m_cache_imgs[ci] != nullptr) + delete m_cache_imgs[ci]; + } + m_cache_imgs.clear(); + } + // 拖动滚动条后,需要显示一次关键帧数据,然后跳过后续关键帧。 if(pkg.type == TS_RECORD_TYPE_RDP_KEYFRAME) { qDebug("----key frame: %ld, processed=%" PRId64 ", pkg.size=%d", pkg.time_ms, file_processed, pkg.size); @@ -312,7 +393,7 @@ void ThrData::_run() { } // 让线程调度器让播放线程有机会执行 - msleep(1); +// msleep(1); // 如果此文件已经处理完毕,则关闭文件,这样下次处理一个新的文件 if(file_processed >= file_size) { @@ -333,6 +414,147 @@ void ThrData::_run() { } } +UpdateData* ThrData::_parse(const TS_RECORD_PKG& pkg, const QByteArray& data) { + if(pkg.type == TS_RECORD_TYPE_RDP_POINTER) { + if(data.size() != sizeof(TS_RECORD_RDP_POINTER)) + return nullptr; + + UpdateData* ud = new UpdateData(); + ud->set_pointer(pkg.time_ms, reinterpret_cast(data.data())); + return ud; + } + else if(pkg.type == TS_RECORD_TYPE_RDP_IMAGE) { + UpdateData* ud = new UpdateData(TYPE_IMAGE, pkg.time_ms); + + if(data.size() < static_cast(sizeof(uint16_t) + sizeof(TS_RECORD_RDP_IMAGE_INFO))) { + delete ud; + return nullptr; + } + + const uint8_t* dat_ptr = reinterpret_cast(data.data()); + + uint16_t count = (reinterpret_cast(dat_ptr))[0]; + uint32_t offset = sizeof(uint16_t); + + UpdateImages& imgs = ud->get_images(); + + for(uint16_t i = 0; i < count; ++i) { + + const TS_RECORD_RDP_IMAGE_INFO* info = reinterpret_cast(dat_ptr+offset); + offset += sizeof(TS_RECORD_RDP_IMAGE_INFO); + + if(info->format != TS_RDP_IMG_ALT) { + const uint8_t* img_dat = dat_ptr + offset; + + const uint8_t* real_img_dat = nullptr; + QByteArray unzip_data; + if(info->zip_len > 0) { + // 数据被压缩了,需要解压缩 + unzip_data.resize(static_cast(info->dat_len)); + + uLong u_len = info->dat_len; + int err = uncompress(reinterpret_cast(unzip_data.data()), &u_len, img_dat, info->zip_len); + if(err != Z_OK || u_len != info->dat_len) { + qDebug("image uncompress failed. err=%d.", err); + } + else { + real_img_dat = reinterpret_cast(unzip_data.data()); + } + + offset += info->zip_len; + } + else { + real_img_dat = img_dat; + offset += info->dat_len; + } + + + UPDATE_IMAGE uimg; + uimg.x = info->destLeft; + uimg.y = info->destTop; + uimg.w = info->destRight - info->destLeft + 1; + uimg.h = info->destBottom - info->destTop + 1; + if(real_img_dat) + uimg.img = _rdpimg2QImage(info->width, info->height, info->bitsPerPixel, (info->format == TS_RDP_IMG_BMP) ? true : false, real_img_dat, info->dat_len); + else + uimg.img = nullptr; + imgs.push_back(uimg); + + QImage* cache_img = nullptr; + if(uimg.img != nullptr) + cache_img = new QImage(*uimg.img); + + m_cache_imgs.push_back(cache_img); + } + else { + UPDATE_IMAGE uimg; + uimg.x = info->destLeft; + uimg.y = info->destTop; + uimg.w = info->destRight - info->destLeft + 1; + uimg.h = info->destBottom - info->destTop + 1; + + size_t cache_idx = info->dat_len; + + if(cache_idx >= m_cache_imgs.size() || m_cache_imgs[cache_idx] == nullptr) { + uimg.img = nullptr; + } + else { + uimg.img = new QImage(*m_cache_imgs[cache_idx]); + } + imgs.push_back(uimg); + } + } + + return ud; + } + else if(pkg.type == TS_RECORD_TYPE_RDP_KEYFRAME) { + UpdateData* ud = new UpdateData(TYPE_IMAGE, pkg.time_ms); + const TS_RECORD_RDP_KEYFRAME_INFO* info = reinterpret_cast(data.data()); + const uint8_t* data_buf = reinterpret_cast(data.data() + sizeof(TS_RECORD_RDP_KEYFRAME_INFO)); + uint32_t data_len = data.size() - sizeof(TS_RECORD_RDP_KEYFRAME_INFO); + + UpdateImages& imgs = ud->get_images(); + + UPDATE_IMAGE uimg; + uimg.x = 0; + uimg.y = 0; + uimg.w = m_hdr.basic.width; + uimg.h = m_hdr.basic.height; + + const uint8_t* real_img_dat = nullptr; + uint32_t real_img_len = m_hdr.basic.width * m_hdr.basic.height * 2; + + QByteArray unzip_data; + if(data_len != real_img_len) { + // 数据被压缩了,需要解压缩 + unzip_data.resize(static_cast(real_img_len)); + + uLong u_len = real_img_len; + int err = uncompress(reinterpret_cast(unzip_data.data()), &u_len, data_buf, data_len); + if(err != Z_OK || u_len != real_img_len) { + qDebug("keyframe uncompress failed. err=%d.", err); + } + else { + real_img_dat = reinterpret_cast(unzip_data.data()); + } + } + else { + real_img_dat = data_buf; + } + + if(real_img_dat != nullptr) + uimg.img = _raw2QImage(m_hdr.basic.width, m_hdr.basic.height, real_img_dat, real_img_len); + else + uimg.img = nullptr; + imgs.push_back(uimg); + + return ud; + } + + return nullptr; +} + + void ThrData::restart(uint32_t start_ms) { qDebug("restart at %ld ms", start_ms); // 让处理线程处理完当前循环,然后等待 @@ -441,19 +663,19 @@ bool ThrData::_load_keyframe() { } qint64 fsize = f_kf.size(); - if(!fsize || fsize % sizeof(KEYFRAME_INFO) != 0) { + if(!fsize || fsize % sizeof(TS_RECORD_RDP_KEYFRAME_INFO) != 0) { qDebug() << "Can not open " << tpk_fname << " for read."; _notify_error(LOCAL8BIT("关键帧信息文件格式错误!")); return false; } qint64 read_len = 0; - int kf_count = static_cast(fsize / sizeof(KEYFRAME_INFO)); + int kf_count = static_cast(fsize / sizeof(TS_RECORD_RDP_KEYFRAME_INFO)); for(int i = 0; i < kf_count; ++i) { - KEYFRAME_INFO kf; - memset(&kf, 0, sizeof(KEYFRAME_INFO)); - read_len = f_kf.read(reinterpret_cast(&kf), sizeof(KEYFRAME_INFO)); - if(read_len != sizeof(KEYFRAME_INFO)) { + TS_RECORD_RDP_KEYFRAME_INFO kf; + memset(&kf, 0, sizeof(TS_RECORD_RDP_KEYFRAME_INFO)); + read_len = f_kf.read(reinterpret_cast(&kf), sizeof(TS_RECORD_RDP_KEYFRAME_INFO)); + if(read_len != sizeof(TS_RECORD_RDP_KEYFRAME_INFO)) { qDebug() << "invaid .tpk file."; _notify_error(LOCAL8BIT("关键帧信息文件格式错误!")); return false; @@ -465,20 +687,11 @@ bool ThrData::_load_keyframe() { return true; } -void ThrData::_prepare() { - UpdateData* d = new UpdateData(TYPE_HEADER_INFO); - - m_locker.lock(); - m_data.enqueue(d); - m_locker.unlock(); -} - UpdateData* ThrData::get_data() { UpdateData* d = nullptr; m_locker.lock(); if(m_data.size() > 0) { -// qDebug("get_data(), left: %d", m_data.size()); d = m_data.dequeue(); } m_locker.unlock(); diff --git a/client/tp-player/thr_data.h b/client/tp-player/thr_data.h index a00c5e0..92ad518 100644 --- a/client/tp-player/thr_data.h +++ b/client/tp-player/thr_data.h @@ -7,6 +7,7 @@ #include #include #include +#include #include "update_data.h" #include "record_format.h" #include "thr_download.h" @@ -33,13 +34,9 @@ 这样,下次需要下载指定文件时,如果发现对应的临时文件存在,可以根据已下载字节数,继续下载。 */ -typedef struct KEYFRAME_INFO { - uint32_t time_ms; // 此关键帧的时间点 - uint32_t file_index; // 此关键帧图像数据位于哪一个数据文件中 - uint32_t offset; // 此关键帧图像数据在数据文件中的偏移 -}KEYFRAME_INFO; +typedef std::vector KeyFrames; -typedef std::vector KeyFrames; +typedef std::vector CachedImages; class MainWindow; @@ -67,7 +64,9 @@ private: bool _load_keyframe(); void _clear_data(); - void _prepare(); +// void _prepare(); + + UpdateData* _parse(const TS_RECORD_PKG& pkg, const QByteArray& data); void _notify_message(const QString& msg); void _notify_error(const QString& err_msg); @@ -102,8 +101,7 @@ private: uint32_t m_file_idx; uint32_t m_offset; -// bool m_xxx; -// int m_restart_kf_idx; + CachedImages m_cache_imgs; }; #endif // THR_DATA_H diff --git a/client/tp-player/thr_play.cpp b/client/tp-player/thr_play.cpp index b76d017..424f7bd 100644 --- a/client/tp-player/thr_play.cpp +++ b/client/tp-player/thr_play.cpp @@ -36,19 +36,9 @@ void ThrPlay::stop() { if(!isRunning()) return; - // warning: never call stop() inside thread::run() loop. - m_need_stop = true; wait(); qDebug() << "play-thread end."; - -// if(m_thr_data) { -// m_thr_data->stop(); -// qDebug("delete thrData."); -// //m_thr_download->wait(); -// delete m_thr_data; -// m_thr_data = nullptr; -// } } void ThrPlay::_notify_message(const QString& msg) { @@ -144,6 +134,7 @@ void ThrPlay::run() { delete dat; dat = nullptr; UpdateData* _disable = new UpdateData(TYPE_DISABLE_DRAW); + msleep(500); emit signal_update_data(_disable); break; } diff --git a/client/tp-player/tp-player.pro b/client/tp-player/tp-player.pro index 0a8af1d..3b95bae 100644 --- a/client/tp-player/tp-player.pro +++ b/client/tp-player/tp-player.pro @@ -3,8 +3,6 @@ TARGET = tp-player QT += core gui widgets network -#DEFINES += QT_NO_DEBUG_OUTPUT - HEADERS += \ mainwindow.h \ bar.h \ @@ -37,3 +35,24 @@ RC_FILE += \ FORMS += \ mainwindow.ui + + +win32:CONFIG(release, debug|release): { + DEFINES += QT_NO_DEBUG_OUTPUT + LIBS += -L$$PWD/../../external/zlib/build/release/ -lzlib + DESTDIR = $$PWD/../../out/client/x86/Release +} +else:win32:CONFIG(debug, debug|release): { + LIBS += -L$$PWD/../../external/zlib/build/debug/ -lzlibd + DESTDIR = $$PWD/../../out/client/x86/Debug +} + +INCLUDEPATH += $$PWD/../../external/zlib +INCLUDEPATH += $$PWD/../../external/zlib/build +DEPENDPATH += $$PWD/../../external/zlib +DEPENDPATH += $$PWD/../../external/zlib/build + +#win32-g++:CONFIG(release, debug|release): PRE_TARGETDEPS += $$PWD/../../external/zlib/build/release/libzlibstatic.a +#else:win32-g++:CONFIG(debug, debug|release): PRE_TARGETDEPS += $$PWD/../../external/zlib/build/debug/libzlibstaticd.a +#else:win32:!win32-g++:CONFIG(release, debug|release): PRE_TARGETDEPS += $$PWD/../../external/zlib/build/release/zlibstatic.lib +#else:win32:!win32-g++:CONFIG(debug, debug|release): PRE_TARGETDEPS += $$PWD/../../external/zlib/build/debug/zlibstaticd.lib diff --git a/client/tp-player/update_data.cpp b/client/tp-player/update_data.cpp index 15e8165..b9cfdfd 100644 --- a/client/tp-player/update_data.cpp +++ b/client/tp-player/update_data.cpp @@ -1,81 +1,9 @@ #include "update_data.h" -#include "rle.h" #include #include -static QImage* _rdpimg2QImage(int w, int h, int bitsPerPixel, bool isCompressed, const uint8_t* dat, uint32_t len) { - QImage* out; - switch(bitsPerPixel) { - case 15: - if(isCompressed) { - uint8_t* _dat = reinterpret_cast(calloc(1, w*h*2)); - if(!bitmap_decompress1(_dat, w, h, dat, len)) { - free(_dat); - return nullptr; - } - out = new QImage(_dat, w, h, QImage::Format_RGB555); - free(_dat); - } - else { - out = new QImage(QImage(dat, w, h, QImage::Format_RGB555).transformed(QMatrix(1.0, 0.0, 0.0, -1.0, 0.0, 0.0))); - } - return out; - - case 16: - if(isCompressed) { - uint8_t* _dat = reinterpret_cast(calloc(1, w*h*2)); - if(!bitmap_decompress2(_dat, w, h, dat, len)) { - free(_dat); - qDebug() << "22------------------DECOMPRESS2 failed."; - return nullptr; - } - - // TODO: 这里需要进一步优化,直接操作QImage的buffer。 - out = new QImage(w, h, QImage::Format_RGB16); - for(int y = 0; y < h; y++) { - for(int x = 0; x < w; x++) { - uint16 a = ((uint16*)_dat)[y * w + x]; - uint8 r = ((a & 0xf800) >> 11) * 255 / 31; - uint8 g = ((a & 0x07e0) >> 5) * 255 / 63; - uint8 b = (a & 0x001f) * 255 / 31; - out->setPixelColor(x, y, QColor(r,g,b)); - } - } - free(_dat); - return out; - } - else { - out = new QImage(QImage(dat, w, h, QImage::Format_RGB16).transformed(QMatrix(1.0, 0.0, 0.0, -1.0, 0.0, 0.0))); - } - return out; - - case 24: - case 32: - default: - qDebug() << "--------NOT support UNKNOWN bitsPerPix" << bitsPerPixel; - return nullptr; - } -} - -static QImage* _raw2QImage(int w, int h, const uint8_t* dat, uint32_t len) { - QImage* out; - - // TODO: 这里需要进一步优化,直接操作QImage的buffer。 - out = new QImage(w, h, QImage::Format_RGB16); - for(int y = 0; y < h; y++) { - for(int x = 0; x < w; x++) { - uint16 a = ((uint16*)dat)[y * w + x]; - uint8 r = ((a & 0xf800) >> 11) * 255 / 31; - uint8 g = ((a & 0x07e0) >> 5) * 255 / 63; - uint8 b = (a & 0x001f) * 255 / 31; - out->setPixelColor(x, y, QColor(r,g,b)); - } - } - return out; -} - UpdateData::UpdateData() : QObject(nullptr) { _init(); @@ -87,6 +15,13 @@ UpdateData::UpdateData(int data_type) : QObject(nullptr) m_data_type = data_type; } +UpdateData::UpdateData(int data_type, uint32_t time_ms) : QObject(nullptr) +{ + _init(); + m_data_type = data_type; + m_time_ms = time_ms; +} + UpdateData::UpdateData(const TS_RECORD_HEADER& hdr) : QObject(nullptr) { _init(); @@ -95,25 +30,14 @@ UpdateData::UpdateData(const TS_RECORD_HEADER& hdr) : QObject(nullptr) memcpy(m_hdr, &hdr, sizeof(TS_RECORD_HEADER)); } -UpdateData::UpdateData(uint16_t screen_w, uint16_t screen_h) { - _init(); - m_screen_w = screen_w; - m_screen_h = screen_h; -} - void UpdateData::_init() { m_data_type = TYPE_UNKNOWN; m_hdr = nullptr; m_pointer = nullptr; -// m_img = nullptr; -// m_img_info = nullptr; m_data_buf = nullptr; m_data_len = 0; m_time_ms = 0; - - m_screen_w = 0; - m_screen_h = 0; } UpdateData::~UpdateData() { @@ -121,10 +45,6 @@ UpdateData::~UpdateData() { delete m_hdr; if(m_pointer) delete m_pointer; -// if(m_img) -// delete m_img; -// if(m_img_info) -// delete m_img_info; for(int i = 0; i < m_images.size(); ++i) { delete m_images[i].img; } @@ -134,87 +54,13 @@ UpdateData::~UpdateData() { delete m_data_buf; } -bool UpdateData::parse(const TS_RECORD_PKG& pkg, const QByteArray& data) { - m_time_ms = pkg.time_ms; - - if(pkg.type == TS_RECORD_TYPE_RDP_POINTER) { - m_data_type = TYPE_POINTER; - if(data.size() != sizeof(TS_RECORD_RDP_POINTER)) - return false; - m_pointer = new TS_RECORD_RDP_POINTER; - memcpy(m_pointer, data.data(), sizeof(TS_RECORD_RDP_POINTER)); - return true; - } - else if(pkg.type == TS_RECORD_TYPE_RDP_IMAGE) { - m_data_type = TYPE_IMAGE; - if(data.size() <= static_cast(sizeof(uint16_t) + sizeof(TS_RECORD_RDP_IMAGE_INFO))) - return false; - - const uint8_t* dat_ptr = reinterpret_cast(data.data()); - - uint16_t count = (reinterpret_cast(dat_ptr))[0]; - uint32_t offset = sizeof(uint16_t); - - for(uint16_t i = 0; i < count; ++i) { - - const TS_RECORD_RDP_IMAGE_INFO* info = reinterpret_cast(dat_ptr+offset); - offset += sizeof(TS_RECORD_RDP_IMAGE_INFO); - //const uint8_t* img_dat = reinterpret_cast(data.data() + sizeof(TS_RECORD_RDP_IMAGE_INFO)); - //uint32_t img_len = data.size() - sizeof(TS_RECORD_RDP_IMAGE_INFO); - const uint8_t* img_dat = dat_ptr + offset; - offset += info->dat_len; - - - QImage* img = _rdpimg2QImage(info->width, info->height, info->bitsPerPixel, (info->format == TS_RDP_IMG_BMP) ? true : false, img_dat, info->dat_len); - if(img == nullptr) - return false; - -// m_img = img; -// m_img_x = info->destLeft; -// m_img_y = info->destTop; -// m_img_w = info->destRight - info->destLeft + 1; -// m_img_h = info->destBottom - info->destTop + 1; - UPDATE_IMAGE uimg; - uimg.x = info->destLeft; - uimg.y = info->destTop; - uimg.w = info->destRight - info->destLeft + 1; - uimg.h = info->destBottom - info->destTop + 1; - uimg.img = img; - m_images.push_back(uimg); - } - - return true; - } - else if(pkg.type == TS_RECORD_TYPE_RDP_KEYFRAME) { - m_data_type = TYPE_IMAGE; -// const TS_RECORD_RDP_KEYFRAME_INFO* info = reinterpret_cast(data.data()); - const uint8_t* img_dat = reinterpret_cast(data.data() + sizeof(TS_RECORD_RDP_KEYFRAME_INFO)); - uint32_t img_len = data.size() - sizeof(TS_RECORD_RDP_KEYFRAME_INFO); - - QImage* img = _raw2QImage((int)m_screen_w, (int)m_screen_h, img_dat, img_len); - if(img == nullptr) - return false; - - UPDATE_IMAGE uimg; - uimg.x = 0; - uimg.y = 0; - uimg.w = m_screen_w; - uimg.h = m_screen_h; - uimg.img = img; - m_images.push_back(uimg); - -// m_img = img; -// m_img_x = 0; -// m_img_y = 0; -// m_img_w = m_screen_w; -// m_img_h = m_screen_h; - return true; - } - - return false; +void UpdateData::set_pointer(uint32_t ts, const TS_RECORD_RDP_POINTER* p) { + m_data_type = TYPE_POINTER; + m_time_ms = ts; + m_pointer = new TS_RECORD_RDP_POINTER; + memcpy(m_pointer, p, sizeof(TS_RECORD_RDP_POINTER)); } - void UpdateData::alloc_data(uint32_t len) { if(m_data_buf) delete m_data_buf; diff --git a/client/tp-player/update_data.h b/client/tp-player/update_data.h index d6e1380..f142629 100644 --- a/client/tp-player/update_data.h +++ b/client/tp-player/update_data.h @@ -37,29 +37,16 @@ class UpdateData : public QObject public: explicit UpdateData(); explicit UpdateData(int data_type); + explicit UpdateData(int data_type, uint32_t time_ms); explicit UpdateData(const TS_RECORD_HEADER& hdr); - explicit UpdateData(uint16_t screen_w, uint16_t screen_h); virtual ~UpdateData(); - bool parse(const TS_RECORD_PKG& pkg, const QByteArray& data); + void set_pointer(uint32_t ts, const TS_RECORD_RDP_POINTER* p); + TS_RECORD_HEADER* get_header() {return m_hdr;} TS_RECORD_RDP_POINTER* get_pointer() {return m_pointer;} -// bool get_image(QImage** img, int& x, int& y, int& w, int& h) { -// if(m_img == nullptr) -// return false; -// *img = m_img; -// x = m_img_x; -// y = m_img_y; -// w = m_img_w; -// h = m_img_h; -// return true; -// } - bool get_images(UpdateImages& uimgs) const { - if(m_images.size() == 0) - return false; - uimgs = m_images; - return true; - } + UpdateImages& get_images() {return m_images;} + const UpdateImages& get_images() const {return m_images;} uint32_t get_time() {return m_time_ms;} @@ -98,17 +85,7 @@ private: // for POINTER TS_RECORD_RDP_POINTER* m_pointer; // for IMAGE -// QImage* m_img; -// int m_img_x; -// int m_img_y; -// int m_img_w; -// int m_img_h; UpdateImages m_images; - -// TS_RECORD_RDP_IMAGE_INFO* m_img_info; - - uint16_t m_screen_w; - uint16_t m_screen_h; }; class UpdateDataHelper { diff --git a/client/tp_assist_macos/src/TP-Assist-Info.plist b/client/tp_assist_macos/src/TP-Assist-Info.plist index 3231d75..4b86b89 100644 --- a/client/tp_assist_macos/src/TP-Assist-Info.plist +++ b/client/tp_assist_macos/src/TP-Assist-Info.plist @@ -17,11 +17,11 @@ CFBundlePackageType APPL CFBundleShortVersionString - 3.3.1 + 3.5.1 CFBundleSignature ???? CFBundleVersion - 3.3.1 + 3.5.1 LSApplicationCategoryType public.app-category.productivity LSMinimumSystemVersion diff --git a/client/tp_assist_macos/src/csrc/ts_ver.h b/client/tp_assist_macos/src/csrc/ts_ver.h index 6aa10dc..9c55de2 100644 --- a/client/tp_assist_macos/src/csrc/ts_ver.h +++ b/client/tp_assist_macos/src/csrc/ts_ver.h @@ -1,6 +1,6 @@ #ifndef __TS_ASSIST_VER_H__ #define __TS_ASSIST_VER_H__ -#define TP_ASSIST_VER L"3.3.1" +#define TP_ASSIST_VER L"3.5.1" #endif // __TS_ASSIST_VER_H__ diff --git a/client/tp_assist_win/stdafx.cpp b/client/tp_assist_win/stdafx.cpp index fd4f341..6c5f035 100644 --- a/client/tp_assist_win/stdafx.cpp +++ b/client/tp_assist_win/stdafx.cpp @@ -1 +1,14 @@ -#include "stdafx.h" +#include "stdafx.h" + +#include + +// #ifdef EX_DEBUG +// // # pragma comment(lib, "libssl32MTd.lib") +// // # pragma comment(lib, "libcrypto32MTd.lib") +// #else +// # pragma comment(lib, "libssl32MT.lib") +// # pragma comment(lib, "libcrypto32MT.lib") +// #endif + +# pragma comment(lib, "libssl.lib") +# pragma comment(lib, "libcrypto.lib") diff --git a/client/tp_assist_win/tp_assist.rc b/client/tp_assist_win/tp_assist.rc index 8dc73d0..b1c8832 100644 Binary files a/client/tp_assist_win/tp_assist.rc and b/client/tp_assist_win/tp_assist.rc differ diff --git a/client/tp_assist_win/tp_assist.vs2017.vcxproj b/client/tp_assist_win/tp_assist.vs2017.vcxproj index d2ab99a..c5ef904 100644 --- a/client/tp_assist_win/tp_assist.vs2017.vcxproj +++ b/client/tp_assist_win/tp_assist.vs2017.vcxproj @@ -61,13 +61,14 @@ Disabled WIN32;MG_ENABLE_SSL;_DEBUG;_WINDOWS;_WINSOCK_DEPRECATED_NO_WARNINGS;MG_ENABLE_THREADS;MG_DISABLE_HTTP_DIGEST_AUTH;MG_DISABLE_MQTT;MG_DISABLE_SSI;MG_DISABLE_FILESYSTEM;%(PreprocessorDefinitions) true - ..\..\common\teleport;..\..\common\libex\include;..\..\external\jsoncpp\include;..\..\external\openssl\inc32 + ..\..\common\teleport;..\..\common\libex\include;..\..\external\jsoncpp\include;..\..\external\openssl\include MultiThreadedDebug Windows true - ..\..\external\openssl\out32\ssleay32.lib;..\..\external\openssl\out32\libeay32.lib;%(AdditionalDependencies) + %(AdditionalDependencies) + ..\..\external\openssl\lib;%(AdditionalLibraryDirectories) @@ -79,7 +80,7 @@ true WIN32;MG_ENABLE_SSL;NDEBUG;_WINDOWS;_WINSOCK_DEPRECATED_NO_WARNINGS;MG_ENABLE_THREADS;MG_DISABLE_HTTP_DIGEST_AUTH;MG_DISABLE_MQTT;MG_DISABLE_SSI;MG_DISABLE_FILESYSTEM;%(PreprocessorDefinitions) true - ..\..\common\teleport;..\..\common\libex\include;..\..\external\jsoncpp\include;..\..\external\openssl\inc32 + ..\..\common\teleport;..\..\common\libex\include;..\..\external\jsoncpp\include;..\..\external\openssl\include MultiThreaded @@ -87,7 +88,8 @@ true true true - ..\..\external\openssl\out32\ssleay32.lib;..\..\external\openssl\out32\libeay32.lib;%(AdditionalDependencies) + %(AdditionalDependencies) + ..\..\external\openssl\lib;%(AdditionalLibraryDirectories) diff --git a/client/tp_assist_win/ts_cfg.cpp b/client/tp_assist_win/ts_cfg.cpp index 722a7b5..f9214f6 100644 --- a/client/tp_assist_win/ts_cfg.cpp +++ b/client/tp_assist_win/ts_cfg.cpp @@ -1,266 +1,274 @@ -#include "stdafx.h" -#include "ts_cfg.h" -#include "ts_env.h" - - -TsCfg g_cfg; - -TsCfg::TsCfg() -{} - -TsCfg::~TsCfg() -{} - -bool TsCfg::init(void) { - ex_astr file_content; - if (!ex_read_text_file(g_env.m_cfg_file, file_content)) { - EXLOGE("can not load config file.\n"); - return false; - } - - if (!_load(file_content)) - return false; - - return true; -} - -bool TsCfg::save(const ex_astr& new_value) -{ - if (!_load(new_value)) - return false; - - Json::StyledWriter jwriter; - ex_astr val = jwriter.write(m_root); - - if (!ex_write_text_file(g_env.m_cfg_file, val)) { - EXLOGE("can not save config file.\n"); - return false; - } - - return true; -} - -bool TsCfg::_load(const ex_astr& str_json) { - Json::Reader jreader; - - if (!jreader.parse(str_json.c_str(), m_root)) { - EXLOGE("can not parse new config data, not in json format? %s\n", jreader.getFormattedErrorMessages().c_str()); - return false; - } - - ex_astr sel_name; - size_t i = 0; - ex_astr tmp; - - //=================================== - // check ssh config - //=================================== - - if (!m_root["ssh"].isObject()) { - EXLOGE("invalid config, error 1.\n"); - return false; - } - - if (!m_root["ssh"]["selected"].isString()) { - EXLOGE("invalid config, error 2.\n"); - return false; - } - - sel_name = m_root["ssh"]["selected"].asCString(); - - if (!m_root["ssh"]["available"].isArray() || m_root["ssh"]["available"].size() == 0) { - EXLOGE("invalid config, error 3.\n"); - return false; - } - - for (i = 0; i < m_root["ssh"]["available"].size(); ++i) { - - if ( - !m_root["ssh"]["available"][i]["name"].isString() - || !m_root["ssh"]["available"][i]["app"].isString() - || !m_root["ssh"]["available"][i]["cmdline"].isString() - ) { - EXLOGE("invalid config, error 4.\n"); - return false; - } - - if (m_root["ssh"]["available"][i]["display"].isNull()) { - m_root["ssh"]["available"][i]["display"] = m_root["ssh"]["available"][i]["name"]; - } - - if (m_root["ssh"]["available"][i]["name"].asCString() != sel_name) - continue; - - tmp = m_root["ssh"]["available"][i]["app"].asCString(); - ex_astr2wstr(tmp, ssh_app, EX_CODEPAGE_UTF8); - tmp = m_root["ssh"]["available"][i]["cmdline"].asCString(); - ex_astr2wstr(tmp, ssh_cmdline, EX_CODEPAGE_UTF8); - - break; - } - - if (ssh_app.length() == 0 || ssh_cmdline.length() == 0) { - EXLOGE("invalid config, error 6.\n"); - return false; - } - - //=================================== - // check sftp config - //=================================== - - if (!m_root["scp"].isObject()) { - EXLOGE("invalid config, error 1.\n"); - return false; - } - - if (!m_root["scp"]["selected"].isString()) { - EXLOGE("invalid config, error 2.\n"); - return false; - } - - sel_name = m_root["scp"]["selected"].asCString(); - - if (!m_root["scp"]["available"].isArray() || m_root["scp"]["available"].size() == 0) { - EXLOGE("invalid config, error 3.\n"); - return false; - } - - for (i = 0; i < m_root["scp"]["available"].size(); ++i) { - - if ( - !m_root["scp"]["available"][i]["name"].isString() - || !m_root["scp"]["available"][i]["app"].isString() - || !m_root["scp"]["available"][i]["cmdline"].isString() - ) { - EXLOGE("invalid config, error 4.\n"); - return false; - } - - if (m_root["scp"]["available"][i]["display"].isNull()) { - m_root["scp"]["available"][i]["display"] = m_root["scp"]["available"][i]["name"]; - } - - if (m_root["scp"]["available"][i]["name"].asCString() != sel_name) - continue; - - tmp = m_root["scp"]["available"][i]["app"].asCString(); - ex_astr2wstr(tmp, scp_app, EX_CODEPAGE_UTF8); - tmp = m_root["scp"]["available"][i]["cmdline"].asCString(); - ex_astr2wstr(tmp, scp_cmdline, EX_CODEPAGE_UTF8); - - break; - } - - if (scp_app.length() == 0 || scp_cmdline.length() == 0) { - EXLOGE("invalid config, error 6.\n"); - return false; - } - - //=================================== - // check telnet config - //=================================== - - if (!m_root["telnet"].isObject()) { - EXLOGE("invalid config, error 1.\n"); - return false; - } - - if (!m_root["telnet"]["selected"].isString()) { - EXLOGE("invalid config, error 2.\n"); - return false; - } - - sel_name = m_root["telnet"]["selected"].asCString(); - - if (!m_root["telnet"]["available"].isArray() || m_root["telnet"]["available"].size() == 0) { - EXLOGE("invalid config, error 3.\n"); - return false; - } - - for (i = 0; i < m_root["telnet"]["available"].size(); ++i) { - - if ( - !m_root["telnet"]["available"][i]["name"].isString() - || !m_root["telnet"]["available"][i]["app"].isString() - || !m_root["telnet"]["available"][i]["cmdline"].isString() - ) { - EXLOGE("invalid config, error 4.\n"); - return false; - } - - if (m_root["telnet"]["available"][i]["display"].isNull()) { - m_root["telnet"]["available"][i]["display"] = m_root["telnet"]["available"][i]["name"]; - } - - if (m_root["telnet"]["available"][i]["name"].asCString() != sel_name) - continue; - - tmp = m_root["telnet"]["available"][i]["app"].asCString(); - ex_astr2wstr(tmp, telnet_app, EX_CODEPAGE_UTF8); - tmp = m_root["telnet"]["available"][i]["cmdline"].asCString(); - ex_astr2wstr(tmp, telnet_cmdline, EX_CODEPAGE_UTF8); - - break; - } - - if (telnet_app.length() == 0 || telnet_cmdline.length() == 0) { - EXLOGE("invalid config, error 6.\n"); - return false; - } - - //=================================== - // check rdp config - //=================================== - - if (!m_root["rdp"].isObject()) { - EXLOGE("invalid config, error 1.\n"); - return false; - } - - if (!m_root["rdp"]["selected"].isString()) { - EXLOGE("invalid config, error 2.\n"); - return false; - } - - sel_name = m_root["rdp"]["selected"].asCString(); - - if (!m_root["rdp"]["available"].isArray() || m_root["rdp"]["available"].size() == 0) { - EXLOGE("invalid config, error 3.\n"); - return false; - } - - for (i = 0; i < m_root["rdp"]["available"].size(); ++i) { - - if ( - !m_root["rdp"]["available"][i]["name"].isString() - || !m_root["rdp"]["available"][i]["app"].isString() - || !m_root["rdp"]["available"][i]["cmdline"].isString() - ) { - EXLOGE("invalid config, error 4.\n"); - return false; - } - - if (m_root["rdp"]["available"][i]["display"].isNull()) { - m_root["rdp"]["available"][i]["display"] = m_root["rdp"]["available"][i]["name"]; - } - - if (m_root["rdp"]["available"][i]["name"].asCString() != sel_name) - continue; - - tmp = m_root["rdp"]["available"][i]["app"].asCString(); - ex_astr2wstr(tmp, rdp_app, EX_CODEPAGE_UTF8); - tmp = m_root["rdp"]["available"][i]["cmdline"].asCString(); - ex_astr2wstr(tmp, rdp_cmdline, EX_CODEPAGE_UTF8); - tmp = m_root["rdp"]["available"][i]["name"].asCString(); - ex_astr2wstr(tmp, rdp_name, EX_CODEPAGE_UTF8); - - break; - } - - if (rdp_app.length() == 0 || rdp_cmdline.length() == 0 || rdp_name.length() == 0) { - EXLOGE("invalid config, error 6.\n"); - return false; - } - - return true; -} +#include "stdafx.h" +#include "ts_cfg.h" +#include "ts_env.h" + + +TsCfg g_cfg; + +TsCfg::TsCfg() +{} + +TsCfg::~TsCfg() +{} + +bool TsCfg::init(void) { + ex_astr file_content; + if (!ex_read_text_file(g_env.m_cfg_file, file_content)) { + EXLOGE("can not load config file.\n"); + return false; + } + + if (!_load(file_content)) + return false; + + return true; +} + +bool TsCfg::save(const ex_astr& new_value) +{ + if (!_load(new_value)) + return false; + + //Json::StyledWriter jwriter; + Json::StreamWriterBuilder jwb; + std::unique_ptr jwriter(jwb.newStreamWriter()); + ex_aoss os; + jwriter->write(m_root, &os); + ex_astr val = os.str(); + + if (!ex_write_text_file(g_env.m_cfg_file, val)) { + EXLOGE("can not save config file.\n"); + return false; + } + + return true; +} + +bool TsCfg::_load(const ex_astr& str_json) { + //Json::Reader jreader; + Json::CharReaderBuilder jcrb; + std::unique_ptr const jreader(jcrb.newCharReader()); + const char *str_json_begin = str_json.c_str(); + + ex_astr err; + if (!jreader->parse(str_json_begin, str_json_begin + str_json.length(), &m_root, &err)) { + EXLOGE("can not parse new config data, not in json format? %s\n", err.c_str()); + return false; + } + + ex_astr sel_name; + size_t i = 0; + ex_astr tmp; + + //=================================== + // check ssh config + //=================================== + + if (!m_root["ssh"].isObject()) { + EXLOGE("invalid config, error 1.\n"); + return false; + } + + if (!m_root["ssh"]["selected"].isString()) { + EXLOGE("invalid config, error 2.\n"); + return false; + } + + sel_name = m_root["ssh"]["selected"].asCString(); + + if (!m_root["ssh"]["available"].isArray() || m_root["ssh"]["available"].size() == 0) { + EXLOGE("invalid config, error 3.\n"); + return false; + } + + for (i = 0; i < m_root["ssh"]["available"].size(); ++i) { + + if ( + !m_root["ssh"]["available"][i]["name"].isString() + || !m_root["ssh"]["available"][i]["app"].isString() + || !m_root["ssh"]["available"][i]["cmdline"].isString() + ) { + EXLOGE("invalid config, error 4.\n"); + return false; + } + + if (m_root["ssh"]["available"][i]["display"].isNull()) { + m_root["ssh"]["available"][i]["display"] = m_root["ssh"]["available"][i]["name"]; + } + + if (m_root["ssh"]["available"][i]["name"].asCString() != sel_name) + continue; + + tmp = m_root["ssh"]["available"][i]["app"].asCString(); + ex_astr2wstr(tmp, ssh_app, EX_CODEPAGE_UTF8); + tmp = m_root["ssh"]["available"][i]["cmdline"].asCString(); + ex_astr2wstr(tmp, ssh_cmdline, EX_CODEPAGE_UTF8); + + break; + } + + if (ssh_app.length() == 0 || ssh_cmdline.length() == 0) { + EXLOGE("invalid config, error 6.\n"); + return false; + } + + //=================================== + // check sftp config + //=================================== + + if (!m_root["scp"].isObject()) { + EXLOGE("invalid config, error 1.\n"); + return false; + } + + if (!m_root["scp"]["selected"].isString()) { + EXLOGE("invalid config, error 2.\n"); + return false; + } + + sel_name = m_root["scp"]["selected"].asCString(); + + if (!m_root["scp"]["available"].isArray() || m_root["scp"]["available"].size() == 0) { + EXLOGE("invalid config, error 3.\n"); + return false; + } + + for (i = 0; i < m_root["scp"]["available"].size(); ++i) { + + if ( + !m_root["scp"]["available"][i]["name"].isString() + || !m_root["scp"]["available"][i]["app"].isString() + || !m_root["scp"]["available"][i]["cmdline"].isString() + ) { + EXLOGE("invalid config, error 4.\n"); + return false; + } + + if (m_root["scp"]["available"][i]["display"].isNull()) { + m_root["scp"]["available"][i]["display"] = m_root["scp"]["available"][i]["name"]; + } + + if (m_root["scp"]["available"][i]["name"].asCString() != sel_name) + continue; + + tmp = m_root["scp"]["available"][i]["app"].asCString(); + ex_astr2wstr(tmp, scp_app, EX_CODEPAGE_UTF8); + tmp = m_root["scp"]["available"][i]["cmdline"].asCString(); + ex_astr2wstr(tmp, scp_cmdline, EX_CODEPAGE_UTF8); + + break; + } + + if (scp_app.length() == 0 || scp_cmdline.length() == 0) { + EXLOGE("invalid config, error 6.\n"); + return false; + } + + //=================================== + // check telnet config + //=================================== + + if (!m_root["telnet"].isObject()) { + EXLOGE("invalid config, error 1.\n"); + return false; + } + + if (!m_root["telnet"]["selected"].isString()) { + EXLOGE("invalid config, error 2.\n"); + return false; + } + + sel_name = m_root["telnet"]["selected"].asCString(); + + if (!m_root["telnet"]["available"].isArray() || m_root["telnet"]["available"].size() == 0) { + EXLOGE("invalid config, error 3.\n"); + return false; + } + + for (i = 0; i < m_root["telnet"]["available"].size(); ++i) { + + if ( + !m_root["telnet"]["available"][i]["name"].isString() + || !m_root["telnet"]["available"][i]["app"].isString() + || !m_root["telnet"]["available"][i]["cmdline"].isString() + ) { + EXLOGE("invalid config, error 4.\n"); + return false; + } + + if (m_root["telnet"]["available"][i]["display"].isNull()) { + m_root["telnet"]["available"][i]["display"] = m_root["telnet"]["available"][i]["name"]; + } + + if (m_root["telnet"]["available"][i]["name"].asCString() != sel_name) + continue; + + tmp = m_root["telnet"]["available"][i]["app"].asCString(); + ex_astr2wstr(tmp, telnet_app, EX_CODEPAGE_UTF8); + tmp = m_root["telnet"]["available"][i]["cmdline"].asCString(); + ex_astr2wstr(tmp, telnet_cmdline, EX_CODEPAGE_UTF8); + + break; + } + + if (telnet_app.length() == 0 || telnet_cmdline.length() == 0) { + EXLOGE("invalid config, error 6.\n"); + return false; + } + + //=================================== + // check rdp config + //=================================== + + if (!m_root["rdp"].isObject()) { + EXLOGE("invalid config, error 1.\n"); + return false; + } + + if (!m_root["rdp"]["selected"].isString()) { + EXLOGE("invalid config, error 2.\n"); + return false; + } + + sel_name = m_root["rdp"]["selected"].asCString(); + + if (!m_root["rdp"]["available"].isArray() || m_root["rdp"]["available"].size() == 0) { + EXLOGE("invalid config, error 3.\n"); + return false; + } + + for (i = 0; i < m_root["rdp"]["available"].size(); ++i) { + + if ( + !m_root["rdp"]["available"][i]["name"].isString() + || !m_root["rdp"]["available"][i]["app"].isString() + || !m_root["rdp"]["available"][i]["cmdline"].isString() + ) { + EXLOGE("invalid config, error 4.\n"); + return false; + } + + if (m_root["rdp"]["available"][i]["display"].isNull()) { + m_root["rdp"]["available"][i]["display"] = m_root["rdp"]["available"][i]["name"]; + } + + if (m_root["rdp"]["available"][i]["name"].asCString() != sel_name) + continue; + + tmp = m_root["rdp"]["available"][i]["app"].asCString(); + ex_astr2wstr(tmp, rdp_app, EX_CODEPAGE_UTF8); + tmp = m_root["rdp"]["available"][i]["cmdline"].asCString(); + ex_astr2wstr(tmp, rdp_cmdline, EX_CODEPAGE_UTF8); + tmp = m_root["rdp"]["available"][i]["name"].asCString(); + ex_astr2wstr(tmp, rdp_name, EX_CODEPAGE_UTF8); + + break; + } + + if (rdp_app.length() == 0 || rdp_cmdline.length() == 0 || rdp_name.length() == 0) { + EXLOGE("invalid config, error 6.\n"); + return false; + } + + return true; +} diff --git a/client/tp_assist_win/ts_env.cpp b/client/tp_assist_win/ts_env.cpp index db08129..767ef7b 100644 --- a/client/tp_assist_win/ts_env.cpp +++ b/client/tp_assist_win/ts_env.cpp @@ -1,71 +1,74 @@ -#include "stdafx.h" -#include "ts_env.h" - -#include -#ifdef EX_OS_WIN32 -# include -//# include -#endif - -TsEnv g_env; - -//======================================================= -// -//======================================================= - -TsEnv::TsEnv() -{} - -TsEnv::~TsEnv() -{} - -bool TsEnv::init(void) -{ - if (!ex_exec_file(m_exec_file)) - return false; - - m_exec_path = m_exec_file; - if (!ex_dirname(m_exec_path)) - return false; - - m_cfg_file = m_exec_path; - ex_path_join(m_cfg_file, false, L"cfg", L"tp-assist.json", NULL); - - m_log_path = m_exec_path; - ex_path_join(m_log_path, false, L"log", NULL); - - ex_wstr cfg_default; - -#ifdef _DEBUG - m_site_path = m_exec_path; - ex_path_join(m_site_path, true, L"..", L"..", L"..", L"..", L"client", L"tp_assist_win", L"site", NULL); - - m_tools_path = m_exec_path; - ex_path_join(m_tools_path, true, L"..", L"..", L"..", L"..", L"client", L"tools", NULL); - - cfg_default = m_exec_path; - ex_path_join(cfg_default, true, L"..", L"..", L"..", L"..", L"client", L"tp_assist_win", L"cfg", L"tp-assist.default.json", NULL); - -#else - m_site_path = m_exec_path; - ex_path_join(m_site_path, false, L"site", NULL); - - m_tools_path = m_exec_path; - ex_path_join(m_tools_path, false, L"tools", NULL); - - cfg_default = m_exec_path; - ex_path_join(cfg_default, false, L"tp-assist.default.json", NULL); -#endif - - if (!ex_is_file_exists(m_cfg_file.c_str())) { - ex_wstr cfg_path = m_exec_path; - ex_path_join(cfg_path, false, L"cfg", NULL); - - ex_mkdirs(cfg_path); - - if (!ex_copy_file(cfg_default.c_str(), m_cfg_file.c_str())) - return false; -} - - return true; -} +#include "stdafx.h" +#include "ts_env.h" + +#include +#ifdef EX_OS_WIN32 +# include +//# include +#endif + +TsEnv g_env; + +//======================================================= +// +//======================================================= + +TsEnv::TsEnv() +{} + +TsEnv::~TsEnv() +{} + +bool TsEnv::init(void) +{ + if (!ex_exec_file(m_exec_file)) + return false; + + m_exec_path = m_exec_file; + if (!ex_dirname(m_exec_path)) + return false; + + m_cfg_file = m_exec_path; + ex_path_join(m_cfg_file, false, L"cfg", L"tp-assist.json", NULL); + + m_log_path = m_exec_path; + ex_path_join(m_log_path, false, L"log", NULL); + + ex_wstr cfg_default; + +#ifdef _DEBUG + m_site_path = m_exec_path; + ex_path_join(m_site_path, true, L"..", L"..", L"..", L"..", L"client", L"tp_assist_win", L"site", NULL); + +// m_tools_path = m_exec_path; +// ex_path_join(m_tools_path, true, L"..", L"..", L"..", L"..", L"client", L"tools", NULL); + + cfg_default = m_exec_path; + ex_path_join(cfg_default, true, L"..", L"..", L"..", L"..", L"client", L"tp_assist_win", L"cfg", L"tp-assist.default.json", NULL); + +#else + m_site_path = m_exec_path; + ex_path_join(m_site_path, false, L"site", NULL); + +// m_tools_path = m_exec_path; +// ex_path_join(m_tools_path, false, L"tools", NULL); + + cfg_default = m_exec_path; + ex_path_join(cfg_default, false, L"tp-assist.default.json", NULL); +#endif + + m_tools_path = m_exec_path; + ex_path_join(m_tools_path, false, L"tools", NULL); + + if (!ex_is_file_exists(m_cfg_file.c_str())) { + ex_wstr cfg_path = m_exec_path; + ex_path_join(cfg_path, false, L"cfg", NULL); + + ex_mkdirs(cfg_path); + + if (!ex_copy_file(cfg_default.c_str(), m_cfg_file.c_str())) + return false; +} + + return true; +} diff --git a/client/tp_assist_win/ts_http_rpc.cpp b/client/tp_assist_win/ts_http_rpc.cpp index 8c57c25..3cebf9c 100644 --- a/client/tp_assist_win/ts_http_rpc.cpp +++ b/client/tp_assist_win/ts_http_rpc.cpp @@ -1,1207 +1,1191 @@ -#include "stdafx.h" - -#pragma warning(disable:4091) - -#include -#include -#include - -#pragma comment(lib, "Crypt32.lib") - -#include - -#include "ts_http_rpc.h" -#include "dlg_main.h" -#include "ts_ver.h" -#include "ts_env.h" - -/* -1. -SecureCRT֧ñǩҳı⣬в /N "tab name"Ϳ -Example: -To launch a new Telnet session, displaying the name "Houston, TX" on the tab, use the following: -/T /N "Houston, TX" /TELNET 192.168.0.6 - -2. -SecureCRTŵһڵIJͬǩҳУʹò /T - SecureCRT.exe /T /N "TP#ssh://192.168.1.3" /SSH2 /L root /PASSWORD 1234 120.26.109.25 - -3. -telnetͻ˵ - putty.exe telnet://administrator@127.0.0.1:52389 -SecureCRTҪ - SecureCRT.exe /T /N "TP#telnet://192.168.1.3" /SCRIPT X:\path\to\startup.vbs /TELNET 127.0.0.1 52389 -Уstartup.vbsΪ ----------ļʼ--------- -#$language = "VBScript" -#$interface = "1.0" -Sub main - crt.Screen.Synchronous = True - crt.Screen.WaitForString "ogin: " - crt.Screen.Send "SESSION-ID" & VbCr - crt.Screen.Synchronous = False -End Sub ----------ļ--------- - -4. ΪputtyĴڱǩʾIPԳӳɹ˷ - PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@192.168.1.2: \w\a\]$PS1" -ֹˣubuntuԣ֪Ƿܹ֧еLinuxSecureCRTԴ˱ʾԡ -*/ - -//#define RDP_CLIENT_SYSTEM_BUILTIN -// #define RDP_CLIENT_SYSTEM_ACTIVE_CONTROL -//#define RDP_CLIENT_FREERDP - - -//#ifdef RDP_CLIENT_SYSTEM_BUILTIN - -//connect to console:i:%d -//compression:i:1 -//bitmapcachepersistenable:i:1 - -std::string rdp_content = "\ -administrative session:i:%d\n\ -screen mode id:i:%d\n\ -use multimon:i:0\n\ -desktopwidth:i:%d\n\ -desktopheight:i:%d\n\ -session bpp:i:16\n\ -winposstr:s:0,1,%d,%d,%d,%d\n\ -compression:i:1\n\ -bitmapcachepersistenable:i:1\n\ -keyboardhook:i:2\n\ -audiocapturemode:i:0\n\ -videoplaybackmode:i:1\n\ -connection type:i:7\n\ -networkautodetect:i:1\n\ -bandwidthautodetect:i:1\n\ -displayconnectionbar:i:1\n\ -enableworkspacereconnect:i:0\n\ -disable wallpaper:i:1\n\ -allow font smoothing:i:0\n\ -allow desktop composition:i:0\n\ -disable full window drag:i:1\n\ -disable menu anims:i:1\n\ -disable themes:i:1\n\ -disable cursor setting:i:1\n\ -full address:s:%s:%d\n\ -audiomode:i:0\n\ -redirectprinters:i:0\n\ -redirectcomports:i:0\n\ -redirectsmartcards:i:0\n\ -redirectclipboard:i:%d\n\ -redirectposdevices:i:0\n\ -autoreconnection enabled:i:0\n\ -authentication level:i:2\n\ -prompt for credentials:i:0\n\ -negotiate security layer:i:1\n\ -remoteapplicationmode:i:0\n\ -alternate shell:s:\n\ -shell working directory:s:\n\ -gatewayhostname:s:\n\ -gatewayusagemethod:i:4\n\ -gatewaycredentialssource:i:4\n\ -gatewayprofileusagemethod:i:0\n\ -promptcredentialonce:i:0\n\ -gatewaybrokeringtype:i:0\n\ -use redirection server name:i:0\n\ -rdgiskdcproxy:i:0\n\ -kdcproxyname:s:\n\ -drivestoredirect:s:%s\n\ -username:s:%s\n\ -password 51:b:%s\n\ -"; - -//redirectdirectx:i:0\n\ -//prompt for credentials on client:i:0\n\ - -//#endif - - -TsHttpRpc g_http_interface; -TsHttpRpc g_https_interface; - -void http_rpc_main_loop(bool is_https) { - if (is_https) { - if (!g_https_interface.init_https()) { - EXLOGE("[ERROR] can not start HTTPS-RPC listener, maybe port %d is already in use.\n", TS_HTTPS_RPC_PORT); - return; - } - - EXLOGW("======================================================\n"); - EXLOGW("[rpc] TeleportAssist-HTTPS-RPC ready on 127.0.0.1:%d\n", TS_HTTPS_RPC_PORT); - - g_https_interface.run(); - - EXLOGW("[rpc] HTTPS-Server main loop end.\n"); - } else { - if (!g_http_interface.init_http()) { - EXLOGE("[ERROR] can not start HTTP-RPC listener, maybe port %d is already in use.\n", TS_HTTP_RPC_PORT); - return; - } - - EXLOGW("======================================================\n"); - EXLOGW("[rpc] TeleportAssist-HTTP-RPC ready on 127.0.0.1:%d\n", TS_HTTP_RPC_PORT); - - g_http_interface.run(); - - EXLOGW("[rpc] HTTP-Server main loop end.\n"); - } -} - -void http_rpc_stop(bool is_https) { - if (is_https) - g_https_interface.stop(); - else - g_http_interface.stop(); -} - -#define HEXTOI(x) (isdigit(x) ? x - '0' : x - 'W') - -int ts_url_decode(const char *src, int src_len, char *dst, int dst_len, int is_form_url_encoded) { - int i, j, a, b; - - for (i = j = 0; i < src_len && j < dst_len - 1; i++, j++) { - if (src[i] == '%') { - if (i < src_len - 2 && isxdigit(*(const unsigned char *)(src + i + 1)) && - isxdigit(*(const unsigned char *)(src + i + 2))) { - a = tolower(*(const unsigned char *)(src + i + 1)); - b = tolower(*(const unsigned char *)(src + i + 2)); - dst[j] = (char)((HEXTOI(a) << 4) | HEXTOI(b)); - i += 2; - } else { - return -1; - } - } else if (is_form_url_encoded && src[i] == '+') { - dst[j] = ' '; - } else { - dst[j] = src[i]; - } - } - - dst[j] = '\0'; /* Null-terminate the destination */ - - return i >= src_len ? j : -1; -} - -bool calc_psw51b(const char* password, std::string& ret) { - DATA_BLOB DataIn; - DATA_BLOB DataOut; - - ex_wstr w_pswd; - ex_astr2wstr(password, w_pswd, EX_CODEPAGE_ACP); - - DataIn.cbData = w_pswd.length() * sizeof(wchar_t); - DataIn.pbData = (BYTE*)w_pswd.c_str(); - - - if (!CryptProtectData(&DataIn, L"psw", nullptr, nullptr, nullptr, 0, &DataOut)) - return false; - - char szRet[5] = { 0 }; - for (DWORD i = 0; i < DataOut.cbData; ++i) { - sprintf_s(szRet, 5, "%02X", DataOut.pbData[i]); - ret += szRet; - } - - LocalFree(DataOut.pbData); - return true; -} - -bool isDegital(std::string str) { - for (int i = 0; i < str.size(); i++) { - if (str.at(i) == '-' && str.size() > 1) // пָܳ - continue; - if (str.at(i) > '9' || str.at(i) < '0') - return false; - } - return true; -} - -std::string strtolower(std::string str) { - for (int i = 0; i < str.size(); i++) - { - str[i] = tolower(str[i]); - } - return str; -} - -void SplitString(const std::string& s, std::vector& v, const std::string& c) -{ - std::string::size_type pos1, pos2; - pos2 = s.find(c); - pos1 = 0; - while (std::string::npos != pos2) - { - v.push_back(s.substr(pos1, pos2 - pos1)); - - pos1 = pos2 + c.size(); - pos2 = s.find(c, pos1); - } - if (pos1 != s.length()) - v.push_back(s.substr(pos1)); -} - -TsHttpRpc::TsHttpRpc() { - m_stop = false; - mg_mgr_init(&m_mg_mgr, nullptr); -} - -TsHttpRpc::~TsHttpRpc() { - mg_mgr_free(&m_mg_mgr); -} - -bool TsHttpRpc::init_http() { - struct mg_connection* nc = nullptr; - - char addr[128] = { 0 }; - ex_strformat(addr, 128, "tcp://127.0.0.1:%d", TS_HTTP_RPC_PORT); - - nc = mg_bind(&m_mg_mgr, addr, _mg_event_handler); - if (!nc) { - EXLOGE("[rpc] TsHttpRpc::init 127.0.0.1:%d\n", TS_HTTP_RPC_PORT); - return false; - } - nc->user_data = this; - - mg_set_protocol_http_websocket(nc); - - return _on_init(); -} - -bool TsHttpRpc::init_https() { - ex_wstr file_ssl_cert = g_env.m_exec_path; - ex_path_join(file_ssl_cert, true, L"cfg", L"localhost.pem", NULL); - ex_wstr file_ssl_key = g_env.m_exec_path; - ex_path_join(file_ssl_key, true, L"cfg", L"localhost.key", NULL); - ex_astr _ssl_cert; - ex_wstr2astr(file_ssl_cert, _ssl_cert); - ex_astr _ssl_key; - ex_wstr2astr(file_ssl_key, _ssl_key); - - const char *err = NULL; - struct mg_bind_opts bind_opts; - memset(&bind_opts, 0, sizeof(bind_opts)); - bind_opts.ssl_cert = _ssl_cert.c_str(); - bind_opts.ssl_key = _ssl_key.c_str(); - bind_opts.error_string = &err; - - - char addr[128] = { 0 }; - ex_strformat(addr, 128, "tcp://127.0.0.1:%d", TS_HTTPS_RPC_PORT); - - struct mg_connection* nc = nullptr; - nc = mg_bind_opt(&m_mg_mgr, addr, _mg_event_handler, bind_opts); - if (!nc) { - EXLOGE("[rpc] TsHttpRpc::init 127.0.0.1:%d\n", TS_HTTPS_RPC_PORT); - return false; - } - nc->user_data = this; - - mg_set_protocol_http_websocket(nc); - - return _on_init(); -} - -bool TsHttpRpc::_on_init() { - char file_name[MAX_PATH] = { 0 }; - if (!GetModuleFileNameA(nullptr, file_name, MAX_PATH)) - return false; - - int len = strlen(file_name); - - if (file_name[len] == '\\') - file_name[len] = '\0'; - - char* match = strrchr(file_name, '\\'); - if (match) - *match = '\0'; - - m_content_type_map[".js"] = "application/javascript"; - m_content_type_map[".png"] = "image/png"; - m_content_type_map[".jpeg"] = "image/jpeg"; - m_content_type_map[".jpg"] = "image/jpeg"; - m_content_type_map[".gif"] = "image/gif"; - m_content_type_map[".ico"] = "image/x-icon"; - m_content_type_map[".json"] = "image/json"; - m_content_type_map[".html"] = "text/html"; - m_content_type_map[".css"] = "text/css"; - m_content_type_map[".tif"] = "image/tiff"; - m_content_type_map[".tiff"] = "image/tiff"; - m_content_type_map[".svg"] = "text/html"; - - return true; -} - -void TsHttpRpc::run(void) { - while (!m_stop) { - mg_mgr_poll(&m_mg_mgr, 500); - } -} - -void TsHttpRpc::stop(void) { - m_stop = true; -} - -void TsHttpRpc::_mg_event_handler(struct mg_connection *nc, int ev, void *ev_data) { - struct http_message *hm = (struct http_message*)ev_data; - - TsHttpRpc* _this = (TsHttpRpc*)nc->user_data; - if (!_this) { - EXLOGE("[ERROR] invalid http request.\n"); - return; - } - - switch (ev) { - case MG_EV_HTTP_REQUEST: - { - ex_astr uri; - ex_chars _uri; - _uri.resize(hm->uri.len + 1); - memset(&_uri[0], 0, hm->uri.len + 1); - memcpy(&_uri[0], hm->uri.p, hm->uri.len); - uri = &_uri[0]; - -#ifdef EX_DEBUG - char* dbg_method = nullptr; - if (hm->method.len == 3 && 0 == memcmp(hm->method.p, "GET", hm->method.len)) - dbg_method = "GET"; - else if (hm->method.len == 4 && 0 == memcmp(hm->method.p, "POST", hm->method.len)) - dbg_method = "POST"; - else - dbg_method = "UNSUPPORTED-HTTP-METHOD"; - - EXLOGV("[rpc] got %s request: %s\n", dbg_method, uri.c_str()); -#endif - ex_astr ret_buf; - bool b_is_html = false; - -// if (uri == "/") { -// ex_wstr page = L"Teleport\n\n
Teleportֹ
"; -// ex_wstr2astr(page, ret_buf, EX_CODEPAGE_UTF8); -// -// mg_printf(nc, "HTTP/1.0 200 OK\r\nAccess-Control-Allow-Origin: *\r\nContent-Length: %d\r\nContent-Type: text/html\r\n\r\n%s", ret_buf.size() - 1, &ret_buf[0]); -// nc->flags |= MG_F_SEND_AND_CLOSE; -// return; -// } - - if (uri == "/") { - uri = "/status.html"; - b_is_html = true; - } - else if (uri == "/config") { - uri = "/index.html"; - b_is_html = true; - } - - ex_astr temp; - int offset = uri.find("/", 1); - if (offset > 0) { - temp = uri.substr(1, offset - 1); - - if (temp == "api") { - ex_astr method; - ex_astr json_param; - int rv = _this->_parse_request(hm, method, json_param); - if (0 != rv) { - EXLOGE("[ERROR] http-rpc got invalid request.\n"); - _this->_create_json_ret(ret_buf, rv); - } else { - _this->_process_js_request(method, json_param, ret_buf); - } - - mg_printf(nc, "HTTP/1.0 200 OK\r\nAccess-Control-Allow-Origin: *\r\nContent-Length: %ld\r\nContent-Type: application/json\r\n\r\n%s", ret_buf.size() - 1, &ret_buf[0]); - nc->flags |= MG_F_SEND_AND_CLOSE; - return; - } - } - - - ex_astr file_suffix; - offset = uri.rfind("."); - if (offset > 0) { - file_suffix = uri.substr(offset, uri.length()); - } - - ex_wstr2astr(g_env.m_site_path, temp); - ex_astr index_path = temp + uri; - - - FILE* file = ex_fopen(index_path.c_str(), "rb"); - if (file) { - unsigned long file_size = 0; - char* buf = nullptr; - size_t ret = 0; - - fseek(file, 0, SEEK_END); - file_size = ftell(file); - buf = new char[file_size]; - memset(buf, 0, file_size); - fseek(file, 0, SEEK_SET); - ret = fread(buf, 1, file_size, file); - fclose(file); - - ex_astr content_type = _this->get_content_type(file_suffix); - - mg_printf(nc, "HTTP/1.0 200 OK\r\nAccess-Control-Allow-Origin: *\r\nContent-Length: %ld\r\nContent-Type: %s\r\n\r\n", file_size, content_type.c_str()); - mg_send(nc, buf, (int)file_size); - delete[]buf; - nc->flags |= MG_F_SEND_AND_CLOSE; - return; - } else if (b_is_html) { - ex_wstr page = L"404 Not Found

404 Not Found


Teleport Assistor configuration page not found.

"; - ex_wstr2astr(page, ret_buf, EX_CODEPAGE_UTF8); - - mg_printf(nc, "HTTP/1.0 404 File Not Found\r\nAccess-Control-Allow-Origin: *\r\nContent-Length: %ld\r\nContent-Type: text/html\r\n\r\n%s", ret_buf.size() - 1, &ret_buf[0]); - nc->flags |= MG_F_SEND_AND_CLOSE; - return; - } - - } - break; - default: - break; - } -} - -int TsHttpRpc::_parse_request(struct http_message* req, ex_astr& func_cmd, ex_astr& func_args) { - if (!req) - return TPE_FAILED; - - bool is_get = true; - if (req->method.len == 3 && 0 == memcmp(req->method.p, "GET", req->method.len)) - is_get = true; - else if (req->method.len == 4 && 0 == memcmp(req->method.p, "POST", req->method.len)) - is_get = false; - else - return TPE_HTTP_METHOD; - - ex_astrs strs; - - size_t pos_start = 1; // һֽڣһ '/' - - size_t i = 0; - for (i = pos_start; i < req->uri.len; ++i) { - if (req->uri.p[i] == '/') { - if (i - pos_start > 0) { - ex_astr tmp_uri; - tmp_uri.assign(req->uri.p + pos_start, i - pos_start); - strs.push_back(tmp_uri); - } - pos_start = i + 1; // ǰҵķָ - } - } - if (pos_start < req->uri.len) { - ex_astr tmp_uri; - tmp_uri.assign(req->uri.p + pos_start, req->uri.len - pos_start); - strs.push_back(tmp_uri); - } - - if (strs.empty() || strs[0] != "api") - return TPE_PARAM; - - if (is_get) { - if (2 == strs.size()) { - func_cmd = strs[1]; - } else if (3 == strs.size()) { - func_cmd = strs[1]; - func_args = strs[2]; - } else { - return TPE_PARAM; - } - } else { - if (2 == strs.size()) { - func_cmd = strs[1]; - } else { - return TPE_PARAM; - } - - if (req->body.len > 0) { - func_args.assign(req->body.p, req->body.len); - } - } - - if (func_args.length() > 0) { - // url-decode - int len = func_args.length() * 2; - ex_chars sztmp; - sztmp.resize(len); - memset(&sztmp[0], 0, len); - if (-1 == ts_url_decode(func_args.c_str(), func_args.length(), &sztmp[0], len, 0)) - return TPE_HTTP_URL_ENCODE; - - func_args = &sztmp[0]; - } - - EXLOGV("[rpc] method=%s, json_param=%s\n", func_cmd.c_str(), func_args.c_str()); - - return TPE_OK; -} - -void TsHttpRpc::_process_js_request(const ex_astr& func_cmd, const ex_astr& func_args, ex_astr& buf) { - if (func_cmd == "get_version") { - _rpc_func_get_version(func_args, buf); - } else if (func_cmd == "run") { - _rpc_func_run_client(func_args, buf); - } else if (func_cmd == "rdp_play") { - _rpc_func_rdp_play(func_args, buf); - } else if (func_cmd == "get_config") { - _rpc_func_get_config(func_args, buf); - } else if (func_cmd == "set_config") { - _rpc_func_set_config(func_args, buf); - } else if (func_cmd == "file_action") { - _rpc_func_file_action(func_args, buf); - } else { - EXLOGE("[rpc] got unknown command: %s\n", func_cmd.c_str()); - _create_json_ret(buf, TPE_UNKNOWN_CMD); - } -} - -void TsHttpRpc::_create_json_ret(ex_astr& buf, int errcode) { - // أ {"code":123} - - Json::FastWriter jr_writer; - Json::Value jr_root; - - jr_root["code"] = errcode; - buf = jr_writer.write(jr_root); -} - -void TsHttpRpc::_create_json_ret(ex_astr& buf, Json::Value& jr_root) { - Json::FastWriter jr_writer; - buf = jr_writer.write(jr_root); -} - -void TsHttpRpc::_rpc_func_url_protocol(const ex_astr& args, ex_astr& buf) -{ - //urlprotocol÷ʽ - // url-decode - std::string func_args = args; - if (func_args.length() > 0) - { - int len = func_args.length() * 2; - ex_chars sztmp; - sztmp.resize(len); - memset(&sztmp[0], 0, len); - if (-1 == ts_url_decode(func_args.c_str(), func_args.length(), &sztmp[0], len, 0)) - return ; - - func_args = &sztmp[0]; - } - EXLOGD(("%s\n"), func_args.c_str()); - //ιteleport://{}/,ֻ - std::string urlproto_appname = TP_URLPROTO_APP_NAME; - urlproto_appname += "://{"; - func_args.erase(0, urlproto_appname.length());//ȥһURLPROTO_APP_NAMEԼ://ַ - int pos = func_args.length() - 1; - if (func_args.substr(pos, 1) == "/") - func_args.erase(pos - 1, 2);//ȥһ}/ַ - else - func_args.erase(pos, 1); - - //Сieʱԭjsonṹе"ȥҪ¸ʽΪjsonʽ - if (func_args.find("\"", 0) == std::string::npos) { - std::vector strv; - SplitString(func_args, strv, ","); - func_args = ""; - for (std::vector::size_type i = 0; i < strv.size(); i++) { - std::vector strv1; - SplitString(strv[i], strv1, ":"); - strv1[0] = "\"" + strv1[0] + "\""; - if (!isDegital(strv1[1]) && strtolower(strv1[1]) != "true" && strtolower(strv1[1]) != "false") - strv1[1] = "\"" + strv1[1] + "\""; - - strv[i] = strv1[0] + ":" + strv1[1]; - if (i == 0) - func_args = strv[i]; - else - func_args += "," + strv[i]; - } - } - func_args = "{" + func_args + "}"; - EXLOGD(("%s\n"), func_args.c_str()); - //TsHttpRpc_rpc_func_run_clientͻ - _rpc_func_run_client(func_args, buf); -} - -void TsHttpRpc::_rpc_func_run_client(const ex_astr& func_args, ex_astr& buf) { - // Σ{"ip":"192.168.5.11","port":22,"uname":"root","uauth":"abcdefg","authmode":1,"protocol":2} - // authmode: 1=password, 2=private-key - // protocol: 1=rdp, 2=ssh - // SSHأ {"code":0, "data":{"sid":"0123abcde"}} - // RDPأ {"code":0, "data":{"sid":"0123abcde0A"}} - - Json::Reader jreader; - Json::Value jsRoot; - - if (!jreader.parse(func_args.c_str(), jsRoot)) { - _create_json_ret(buf, TPE_JSON_FORMAT); - return; - } - if (!jsRoot.isObject()) { - _create_json_ret(buf, TPE_PARAM); - return; - } - - // жϲǷȷ - if (!jsRoot["teleport_ip"].isString() - || !jsRoot["teleport_port"].isNumeric() || !jsRoot["remote_host_ip"].isString() - || !jsRoot["session_id"].isString() || !jsRoot["protocol_type"].isNumeric() || !jsRoot["protocol_sub_type"].isNumeric() - || !jsRoot["protocol_flag"].isNumeric() - ) { - _create_json_ret(buf, TPE_PARAM); - return; - } - - int pro_type = jsRoot["protocol_type"].asUInt(); - int pro_sub = jsRoot["protocol_sub_type"].asInt(); - ex_u32 protocol_flag = jsRoot["protocol_flag"].asUInt(); - - ex_astr teleport_ip = jsRoot["teleport_ip"].asCString(); - int teleport_port = jsRoot["teleport_port"].asUInt(); - - ex_astr remote_host_name = jsRoot["remote_host_name"].asCString(); - - ex_astr real_host_ip = jsRoot["remote_host_ip"].asCString(); - ex_astr sid = jsRoot["session_id"].asCString(); - - ex_wstr w_exe_path; - WCHAR w_szCommandLine[MAX_PATH] = { 0 }; - - - ex_wstr w_sid; - ex_astr2wstr(sid, w_sid); - ex_wstr w_teleport_ip; - ex_astr2wstr(teleport_ip, w_teleport_ip); - ex_wstr w_real_host_ip; - ex_astr2wstr(real_host_ip, w_real_host_ip); - ex_wstr w_remote_host_name; - ex_astr2wstr(remote_host_name, w_remote_host_name); - WCHAR w_port[32] = { 0 }; - swprintf_s(w_port, _T("%d"), teleport_port); - - ex_wstr tmp_rdp_file; // for .rdp file - - if (pro_type == TP_PROTOCOL_TYPE_RDP) { - //============================================== - // RDP - //============================================== - - bool flag_clipboard = ((protocol_flag & TP_FLAG_RDP_CLIPBOARD) == TP_FLAG_RDP_CLIPBOARD); - bool flag_disk = ((protocol_flag & TP_FLAG_RDP_DISK) == TP_FLAG_RDP_DISK); - bool flag_console = ((protocol_flag & TP_FLAG_RDP_CONSOLE) == TP_FLAG_RDP_CONSOLE); - - int rdp_w = 800; - int rdp_h = 640; - bool rdp_console = false; - - if (!jsRoot["rdp_width"].isNull()) { - if (jsRoot["rdp_width"].isNumeric()) { - rdp_w = jsRoot["rdp_width"].asUInt(); - } else { - _create_json_ret(buf, TPE_PARAM); - return; - } - } - - if (!jsRoot["rdp_height"].isNull()) { - if (jsRoot["rdp_height"].isNumeric()) { - rdp_h = jsRoot["rdp_height"].asUInt(); - } else { - _create_json_ret(buf, TPE_PARAM); - return; - } - } - - if (!jsRoot["rdp_console"].isNull()) { - if (jsRoot["rdp_console"].isBool()) { - rdp_console = jsRoot["rdp_console"].asBool(); - } else { - _create_json_ret(buf, TPE_PARAM); - return; - } - } - - if (!flag_console) - rdp_console = false; - - - int split_pos = sid.length() - 2; - ex_astr real_sid = sid.substr(0, split_pos); - ex_astr str_pwd_len = sid.substr(split_pos, sid.length()); - int n_pwd_len = strtol(str_pwd_len.c_str(), nullptr, 16); - n_pwd_len -= real_sid.length(); - n_pwd_len -= 2; - char szPwd[256] = { 0 }; - for (int i = 0; i < n_pwd_len; i++) { - szPwd[i] = '*'; - } - - ex_astr2wstr(real_sid, w_sid); - - w_exe_path = _T("\""); - w_exe_path += g_cfg.rdp_app + _T("\" "); - - ex_wstr rdp_name = g_cfg.rdp_name; - if (rdp_name == L"mstsc") { - w_exe_path += g_cfg.rdp_cmdline; - - int width = 0; - int higth = 0; - int cx = 0; - int cy = 0; - - int display = 1; - int iWidth = GetSystemMetrics(SM_CXSCREEN); - int iHeight = GetSystemMetrics(SM_CYSCREEN); - - if (rdp_w == 0 || rdp_h == 0) { - //ȫ - width = iWidth; - higth = iHeight; - display = 2; - } else { - width = rdp_w; - higth = rdp_h; - display = 1; - } - - cx = (iWidth - width) / 2; - cy = (iHeight - higth) / 2; - if (cx < 0) { - cx = 0; - } - if (cy < 0) { - cy = 0; - } - - // int console_mode = 0; - // if (rdp_console) - // console_mode = 1; - - std::string psw51b; - if (!calc_psw51b(szPwd, psw51b)) { - EXLOGE("calc password failed.\n"); - _create_json_ret(buf, TPE_FAILED); - return; - } - - real_sid = "01" + real_sid; - - char sz_rdp_file_content[4096] = { 0 }; - sprintf_s(sz_rdp_file_content, 4096, rdp_content.c_str() - , (flag_console && rdp_console) ? 1 : 0 - , display, width, higth - , cx, cy, cx + width + 100, cy + higth + 100 - , teleport_ip.c_str(), teleport_port - , flag_clipboard ? 1 : 0 - , flag_disk ? "*" : "" - , real_sid.c_str() - , psw51b.c_str() - ); - - char sz_file_name[MAX_PATH] = { 0 }; - char temp_path[MAX_PATH] = { 0 }; - DWORD ret = GetTempPathA(MAX_PATH, temp_path); - if (ret <= 0) { - EXLOGE("fopen failed (%d).\n", GetLastError()); - _create_json_ret(buf, TPE_FAILED); - return; - } - - ex_astr temp_host_ip = real_host_ip; - ex_replace_all(temp_host_ip, ".", "-"); - - sprintf_s(sz_file_name, MAX_PATH, ("%s%s.rdp"), temp_path, temp_host_ip.c_str()); - - FILE* f = NULL; - if (fopen_s(&f, sz_file_name, "wt") != 0) { - EXLOGE("fopen failed (%d).\n", GetLastError()); - _create_json_ret(buf, TPE_OPENFILE); - return; - } - // Write a string into the file. - fwrite(sz_rdp_file_content, strlen(sz_rdp_file_content), 1, f); - fclose(f); - ex_astr2wstr(sz_file_name, tmp_rdp_file); - - // 滻 - ex_replace_all(w_exe_path, _T("{tmp_rdp_file}"), tmp_rdp_file); - } else if (g_cfg.rdp_name == L"freerdp") { - w_exe_path += L"{size} {console} {clipboard} {drives} "; - w_exe_path += g_cfg.rdp_cmdline; - - ex_wstr w_screen; - - if (rdp_w == 0 || rdp_h == 0) { - //ȫ - w_screen = _T("/f"); - } else { - char sz_size[64] = { 0 }; - ex_strformat(sz_size, 63, "/size:%dx%d", rdp_w, rdp_h); - ex_astr2wstr(sz_size, w_screen); - } - - // wchar_t* w_console = NULL; - // - // if (flag_console && rdp_console) - // { - // w_console = L"/admin"; - // } - // else - // { - // w_console = L""; - // } - - ex_wstr w_password; - ex_astr2wstr(szPwd, w_password); - w_exe_path += L" /p:"; - w_exe_path += w_password; - - w_sid = L"02" + w_sid; - - w_exe_path += L" /gdi:sw"; // ʹȾgdi:hwʹӲ٣ǻֺܶڿ飨¼طʱģ - w_exe_path += L" -grab-keyboard"; // [new style] ֹFreeRDPʧȥؼӦСһFreeRDPڣòƲã - - // 滻 - ex_replace_all(w_exe_path, _T("{size}"), w_screen); - - if (flag_console && rdp_console) - ex_replace_all(w_exe_path, _T("{console}"), L"/admin"); - else - ex_replace_all(w_exe_path, _T("{console}"), L""); - - //ex_replace_all(w_exe_path, _T("{clipboard}"), L"+clipboard"); - - if (flag_clipboard) - ex_replace_all(w_exe_path, _T("{clipboard}"), L"/clipboard"); - else - ex_replace_all(w_exe_path, _T("{clipboard}"), L"-clipboard"); - - if (flag_disk) - ex_replace_all(w_exe_path, _T("{drives}"), L"/drives"); - else - ex_replace_all(w_exe_path, _T("{drives}"), L"-drives"); - } else { - _create_json_ret(buf, TPE_FAILED); - return; - } - } else if (pro_type == TP_PROTOCOL_TYPE_SSH) { - //============================================== - // SSH - //============================================== - - if (pro_sub == TP_PROTOCOL_TYPE_SSH_SHELL) { - w_exe_path = _T("\""); - w_exe_path += g_cfg.ssh_app + _T("\" "); - w_exe_path += g_cfg.ssh_cmdline; - } else { - w_exe_path = _T("\""); - w_exe_path += g_cfg.scp_app + _T("\" "); - w_exe_path += g_cfg.scp_cmdline; - } - } else if (pro_type == TP_PROTOCOL_TYPE_TELNET) { - //============================================== - // TELNET - //============================================== - w_exe_path = _T("\""); - w_exe_path += g_cfg.telnet_app + _T("\" "); - w_exe_path += g_cfg.telnet_cmdline; - } - - ex_replace_all(w_exe_path, _T("{host_ip}"), w_teleport_ip.c_str()); - ex_replace_all(w_exe_path, _T("{host_port}"), w_port); - ex_replace_all(w_exe_path, _T("{user_name}"), w_sid.c_str()); - ex_replace_all(w_exe_path, _T("{host_name}"), w_remote_host_name.c_str()); - ex_replace_all(w_exe_path, _T("{real_ip}"), w_real_host_ip.c_str()); - ex_replace_all(w_exe_path, _T("{assist_tools_path}"), g_env.m_tools_path.c_str()); - - - STARTUPINFO si; - PROCESS_INFORMATION pi; - - ZeroMemory(&si, sizeof(si)); - si.cb = sizeof(si); - ZeroMemory(&pi, sizeof(pi)); - - Json::Value root_ret; - ex_astr utf8_path; - ex_wstr2astr(w_exe_path, utf8_path, EX_CODEPAGE_UTF8); - root_ret["path"] = utf8_path; - - if (!CreateProcess(NULL, (wchar_t *)w_exe_path.c_str(), NULL, NULL, FALSE, 0, NULL, NULL, &si, &pi)) { - EXLOGE(_T("CreateProcess() failed. Error=0x%08X.\n %s\n"), GetLastError(), w_exe_path.c_str()); - root_ret["code"] = TPE_START_CLIENT; - _create_json_ret(buf, root_ret); - return; - } - - root_ret["code"] = TPE_OK; - _create_json_ret(buf, root_ret); -} - -void TsHttpRpc::_rpc_func_rdp_play(const ex_astr& func_args, ex_astr& buf) { - Json::Reader jreader; - Json::Value jsRoot; - - if (!jreader.parse(func_args.c_str(), jsRoot)) { - _create_json_ret(buf, TPE_JSON_FORMAT); - return; - } - - // жϲǷȷ - if (!jsRoot["rid"].isInt() - || !jsRoot["web"].isString() - || !jsRoot["sid"].isString() - || !jsRoot["user"].isString() - || !jsRoot["acc"].isString() - || !jsRoot["host"].isString() - || !jsRoot["start"].isString() - ) { - _create_json_ret(buf, TPE_PARAM); - return; - } - - int rid = jsRoot["rid"].asInt(); - ex_astr a_url_base = jsRoot["web"].asCString(); - ex_astr a_sid = jsRoot["sid"].asCString(); - ex_astr a_user = jsRoot["user"].asCString(); - ex_astr a_acc = jsRoot["acc"].asCString(); - ex_astr a_host = jsRoot["host"].asCString(); - ex_astr a_start = jsRoot["start"].asCString(); - - char cmd_args[1024] = { 0 }; - ex_strformat(cmd_args, 1023, "%d \"%s\" \"%09d-%s-%s-%s-%s\"", rid, a_sid.c_str(), rid, a_user.c_str(), a_acc.c_str(), a_host.c_str(), a_start.c_str()); - - // TODO: ϲӦǰתΪIPIJӦý͸ɲԼȥ - // ڸFreeRDPIJʱΪ˴ӷļʹMongoose⣬⣨ò첽ѯDNS⣩ - // ʱֽIPת - { - unsigned int port_i = 0; - struct mg_str scheme, query, fragment, user_info, host, path; - - if (mg_parse_uri(mg_mk_str(a_url_base.c_str()), &scheme, &user_info, &host, &port_i, &path, &query, &fragment) != 0) { - EXLOGE(_T("parse url failed.\n")); - Json::Value root_ret; - root_ret["code"] = TPE_PARAM; - _create_json_ret(buf, root_ret); - return; - } - - ex_astr _scheme; - _scheme.assign(scheme.p, scheme.len); - - // hostתΪIP - ex_astr str_tp_host; - str_tp_host.assign(host.p, host.len); - struct hostent *tp_host = gethostbyname(str_tp_host.c_str()); - if (NULL == tp_host) { - EXLOGE(_T("resolve host name failed.\n")); - Json::Value root_ret; - root_ret["code"] = TPE_PARAM; - _create_json_ret(buf, root_ret); - return; - } - - int i = 0; - char* _ip = NULL; - if (tp_host->h_addrtype == AF_INET) { - struct in_addr addr; - while (tp_host->h_addr_list[i] != 0) { - addr.s_addr = *(u_long *)tp_host->h_addr_list[i++]; - _ip = inet_ntoa(addr); - break; - } - } - - if (NULL == _ip) { - EXLOGE(_T("resolve host name failed.\n")); - Json::Value root_ret; - root_ret["code"] = TPE_PARAM; - _create_json_ret(buf, root_ret); - return; - } - - char _url_base[256]; - ex_strformat(_url_base, 255, "%s://%s:%d", _scheme.c_str(), _ip, port_i); - a_url_base = _url_base; - } - - ex_wstr w_url_base; - ex_astr2wstr(a_url_base, w_url_base); - ex_wstr w_cmd_args; - ex_astr2wstr(cmd_args, w_cmd_args); - - ex_wstr w_exe_path; - w_exe_path = _T("\""); - w_exe_path += g_env.m_tools_path + _T("\\tprdp\\tprdp-replay.exe\""); - w_exe_path += _T(" \""); - w_exe_path += w_url_base; - w_exe_path += _T("\" "); - w_exe_path += w_cmd_args; - - Json::Value root_ret; - ex_astr utf8_path; - ex_wstr2astr(w_exe_path, utf8_path, EX_CODEPAGE_UTF8); - root_ret["cmdline"] = utf8_path; - - EXLOGD(w_exe_path.c_str()); - - STARTUPINFO si; - PROCESS_INFORMATION pi; - - ZeroMemory(&si, sizeof(si)); - si.cb = sizeof(si); - ZeroMemory(&pi, sizeof(pi)); - if (!CreateProcess(NULL, (wchar_t *)w_exe_path.c_str(), NULL, NULL, FALSE, 0, NULL, NULL, &si, &pi)) { - EXLOGE(_T("CreateProcess() failed. Error=0x%08X.\n %s\n"), GetLastError(), w_exe_path.c_str()); - root_ret["code"] = TPE_START_CLIENT; - _create_json_ret(buf, root_ret); - return; - } - - root_ret["code"] = TPE_OK; - _create_json_ret(buf, root_ret); - return; -} - -void TsHttpRpc::_rpc_func_get_config(const ex_astr& func_args, ex_astr& buf) { - Json::Value jr_root; - jr_root["code"] = 0; - jr_root["data"] = g_cfg.get_root(); - _create_json_ret(buf, jr_root); -} - -void TsHttpRpc::_rpc_func_set_config(const ex_astr& func_args, ex_astr& buf) { - Json::Reader jreader; - Json::Value jsRoot; - if (!jreader.parse(func_args.c_str(), jsRoot)) { - _create_json_ret(buf, TPE_JSON_FORMAT); - return; - } - - if (!g_cfg.save(func_args)) - _create_json_ret(buf, TPE_FAILED); - else - _create_json_ret(buf, TPE_OK); -} - -void TsHttpRpc::_rpc_func_file_action(const ex_astr& func_args, ex_astr& buf) { - - Json::Reader jreader; - Json::Value jsRoot; - - if (!jreader.parse(func_args.c_str(), jsRoot)) { - _create_json_ret(buf, TPE_JSON_FORMAT); - return; - } - // жϲǷȷ - if (!jsRoot["action"].isNumeric()) { - _create_json_ret(buf, TPE_PARAM); - return; - } - int action = jsRoot["action"].asUInt(); - - HWND hParent = GetForegroundWindow(); - if (NULL == hParent) - hParent = g_hDlgMain; - - BOOL ret = FALSE; - wchar_t wszReturnPath[MAX_PATH] = _T(""); - - if (action == 1 || action == 2) { - OPENFILENAME ofn; - ex_wstr wsDefaultName; - ex_wstr wsDefaultPath; - StringCchCopy(wszReturnPath, MAX_PATH, wsDefaultName.c_str()); - - ZeroMemory(&ofn, sizeof(ofn)); - - ofn.lStructSize = sizeof(ofn); - ofn.lpstrTitle = _T("ѡļ"); - ofn.hwndOwner = hParent; - ofn.lpstrFilter = _T("ִг (*.exe)\0*.exe\0"); - ofn.lpstrFile = wszReturnPath; - ofn.nMaxFile = MAX_PATH; - ofn.lpstrInitialDir = wsDefaultPath.c_str(); - ofn.Flags = OFN_EXPLORER | OFN_PATHMUSTEXIST; - - if (action == 1) { - ofn.Flags |= OFN_FILEMUSTEXIST; - ret = GetOpenFileName(&ofn); - } else { - ofn.Flags |= OFN_OVERWRITEPROMPT; - ret = GetSaveFileName(&ofn); - } - } else if (action == 3) { - BROWSEINFO bi; - ZeroMemory(&bi, sizeof(BROWSEINFO)); - bi.hwndOwner = NULL; - bi.pidlRoot = NULL; - bi.pszDisplayName = wszReturnPath; //˲ΪNULLʾԻ - bi.lpszTitle = _T("ѡĿ¼"); - bi.ulFlags = BIF_RETURNONLYFSDIRS; - bi.lpfn = NULL; - bi.iImage = 0; //ʼڲbi - LPITEMIDLIST pIDList = SHBrowseForFolder(&bi);//ʾѡԻ - if (pIDList) { - ret = true; - SHGetPathFromIDList(pIDList, wszReturnPath); - } else { - ret = false; - } - } else if (action == 4) { - ex_wstr wsDefaultName; - ex_wstr wsDefaultPath; - - if (wsDefaultPath.length() == 0) { - _create_json_ret(buf, TPE_PARAM); - return; - } - - ex_wstr::size_type pos = 0; - - while (ex_wstr::npos != (pos = wsDefaultPath.find(L"/", pos))) { - wsDefaultPath.replace(pos, 1, L"\\"); - pos += 1; - } - - ex_wstr wArg = L"/select, \""; - wArg += wsDefaultPath; - wArg += L"\""; - if ((int)ShellExecute(hParent, _T("open"), _T("explorer"), wArg.c_str(), NULL, SW_SHOW) > 32) - ret = true; - else - ret = false; - } - - if (ret) { - if (action == 1 || action == 2 || action == 3) { - ex_astr utf8_path; - ex_wstr2astr(wszReturnPath, utf8_path, EX_CODEPAGE_UTF8); - Json::Value root; - root["code"] = TPE_OK; - root["path"] = utf8_path; - _create_json_ret(buf, root); - - return; - } else { - _create_json_ret(buf, TPE_OK); - return; - } - } else { - _create_json_ret(buf, TPE_DATA); - return; - } -} - -void TsHttpRpc::_rpc_func_get_version(const ex_astr& func_args, ex_astr& buf) { - Json::Value root_ret; - ex_wstr w_version = TP_ASSIST_VER; - ex_astr version; - ex_wstr2astr(w_version, version, EX_CODEPAGE_UTF8); - root_ret["version"] = version; - root_ret["code"] = TPE_OK; - _create_json_ret(buf, root_ret); - return; -} +#include "stdafx.h" + +#pragma warning(disable:4091) + +#include +#include +#include + +#pragma comment(lib, "Crypt32.lib") + +#include + +#include "ts_http_rpc.h" +#include "dlg_main.h" +#include "ts_ver.h" +#include "ts_env.h" + +/* +1. +SecureCRT支持设置标签页的标题,命令行参数 /N "tab name"就可以 +Example: +To launch a new Telnet session, displaying the name "Houston, TX" on the tab, use the following: +/T /N "Houston, TX" /TELNET 192.168.0.6 + +2. +多次启动的SecureCRT放到一个窗口的不同标签页中,使用参数: /T + SecureCRT.exe /T /N "TP#ssh://192.168.1.3" /SSH2 /L root /PASSWORD 1234 120.26.109.25 + +3. +telnet客户端的启动: + putty.exe telnet://administrator@127.0.0.1:52389 +如果是SecureCRT,则需要 + SecureCRT.exe /T /N "TP#telnet://192.168.1.3" /SCRIPT X:\path\to\startup.vbs /TELNET 127.0.0.1 52389 +其中,startup.vbs的内容为: +---------文件开始--------- +#$language = "VBScript" +#$interface = "1.0" +Sub main + crt.Screen.Synchronous = True + crt.Screen.WaitForString "ogin: " + crt.Screen.Send "SESSION-ID" & VbCr + crt.Screen.Synchronous = False +End Sub +---------文件结束--------- + +4. 为了让putty的窗口标签显示正常的IP,可以尝试在连接成功后,主动向服务端发送下列命令: + PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@192.168.1.2: \w\a\]$PS1" +手工测试了,ubuntu服务器可以,不知道是否能够支持所有的Linux。SecureCRT对此表示忽略。 +*/ + +//#define RDP_CLIENT_SYSTEM_BUILTIN +// #define RDP_CLIENT_SYSTEM_ACTIVE_CONTROL +//#define RDP_CLIENT_FREERDP + + +//#ifdef RDP_CLIENT_SYSTEM_BUILTIN + +//connect to console:i:%d +//compression:i:1 +//bitmapcachepersistenable:i:1 + +std::string rdp_content = "\ +administrative session:i:%d\n\ +screen mode id:i:%d\n\ +use multimon:i:0\n\ +desktopwidth:i:%d\n\ +desktopheight:i:%d\n\ +session bpp:i:16\n\ +winposstr:s:0,1,%d,%d,%d,%d\n\ +bitmapcachepersistenable:i:1\n\ +bitmapcachesize:i:32000\n\ +compression:i:1\n\ +keyboardhook:i:2\n\ +audiocapturemode:i:0\n\ +videoplaybackmode:i:1\n\ +connection type:i:7\n\ +networkautodetect:i:1\n\ +bandwidthautodetect:i:1\n\ +disableclipboardredirection:i:0\n\ +displayconnectionbar:i:1\n\ +enableworkspacereconnect:i:0\n\ +disable wallpaper:i:1\n\ +allow font smoothing:i:0\n\ +allow desktop composition:i:0\n\ +disable full window drag:i:1\n\ +disable menu anims:i:1\n\ +disable themes:i:1\n\ +disable cursor setting:i:1\n\ +full address:s:%s:%d\n\ +audiomode:i:0\n\ +redirectprinters:i:0\n\ +redirectcomports:i:0\n\ +redirectsmartcards:i:0\n\ +redirectclipboard:i:%d\n\ +redirectposdevices:i:0\n\ +autoreconnection enabled:i:0\n\ +authentication level:i:2\n\ +prompt for credentials:i:0\n\ +negotiate security layer:i:1\n\ +remoteapplicationmode:i:0\n\ +alternate shell:s:\n\ +shell working directory:s:\n\ +gatewayhostname:s:\n\ +gatewayusagemethod:i:4\n\ +gatewaycredentialssource:i:4\n\ +gatewayprofileusagemethod:i:0\n\ +promptcredentialonce:i:0\n\ +gatewaybrokeringtype:i:0\n\ +use redirection server name:i:0\n\ +rdgiskdcproxy:i:0\n\ +kdcproxyname:s:\n\ +drivestoredirect:s:%s\n\ +username:s:%s\n\ +password 51:b:%s\n\ +"; + +// https://www.donkz.nl/overview-rdp-file-settings/ +// +// authentication level:i:2\n +// +// +// negotiate security layer:i:1\n +// 0 = negotiation is not enabled and the session is started by using Secure Sockets Layer (SSL). +// 1 = negotiation is enabled and the session is started by using x.224 encryption. + + + +//redirectdirectx:i:0\n\ +//prompt for credentials on client:i:0\n\ + +//#endif + + +TsHttpRpc g_http_interface; +TsHttpRpc g_https_interface; + +void http_rpc_main_loop(bool is_https) { + if (is_https) { + if (!g_https_interface.init_https()) { + EXLOGE("[ERROR] can not start HTTPS-RPC listener, maybe port %d is already in use.\n", TS_HTTPS_RPC_PORT); + return; + } + + EXLOGW("======================================================\n"); + EXLOGW("[rpc] TeleportAssist-HTTPS-RPC ready on 127.0.0.1:%d\n", TS_HTTPS_RPC_PORT); + + g_https_interface.run(); + + EXLOGW("[rpc] HTTPS-Server main loop end.\n"); + } else { + if (!g_http_interface.init_http()) { + EXLOGE("[ERROR] can not start HTTP-RPC listener, maybe port %d is already in use.\n", TS_HTTP_RPC_PORT); + return; + } + + EXLOGW("======================================================\n"); + EXLOGW("[rpc] TeleportAssist-HTTP-RPC ready on 127.0.0.1:%d\n", TS_HTTP_RPC_PORT); + + g_http_interface.run(); + + EXLOGW("[rpc] HTTP-Server main loop end.\n"); + } +} + +void http_rpc_stop(bool is_https) { + if (is_https) + g_https_interface.stop(); + else + g_http_interface.stop(); +} + +#define HEXTOI(x) (isdigit(x) ? x - '0' : x - 'W') + +int ts_url_decode(const char *src, int src_len, char *dst, int dst_len, int is_form_url_encoded) { + int i, j, a, b; + + for (i = j = 0; i < src_len && j < dst_len - 1; i++, j++) { + if (src[i] == '%') { + if (i < src_len - 2 && isxdigit(*(const unsigned char *)(src + i + 1)) && + isxdigit(*(const unsigned char *)(src + i + 2))) { + a = tolower(*(const unsigned char *)(src + i + 1)); + b = tolower(*(const unsigned char *)(src + i + 2)); + dst[j] = (char)((HEXTOI(a) << 4) | HEXTOI(b)); + i += 2; + } else { + return -1; + } + } else if (is_form_url_encoded && src[i] == '+') { + dst[j] = ' '; + } else { + dst[j] = src[i]; + } + } + + dst[j] = '\0'; /* Null-terminate the destination */ + + return i >= src_len ? j : -1; +} + +bool calc_psw51b(const char* password, std::string& ret) { + DATA_BLOB DataIn; + DATA_BLOB DataOut; + + ex_wstr w_pswd; + ex_astr2wstr(password, w_pswd, EX_CODEPAGE_ACP); + + DataIn.cbData = w_pswd.length() * sizeof(wchar_t); + DataIn.pbData = (BYTE*)w_pswd.c_str(); + + + if (!CryptProtectData(&DataIn, L"psw", nullptr, nullptr, nullptr, 0, &DataOut)) + return false; + + char szRet[5] = { 0 }; + for (DWORD i = 0; i < DataOut.cbData; ++i) { + sprintf_s(szRet, 5, "%02X", DataOut.pbData[i]); + ret += szRet; + } + + LocalFree(DataOut.pbData); + return true; +} + +bool isDegital(std::string str) { + for (int i = 0; i < str.size(); i++) { + if (str.at(i) == '-' && str.size() > 1) // 有可能出现负数 + continue; + if (str.at(i) > '9' || str.at(i) < '0') + return false; + } + return true; +} + +std::string strtolower(std::string str) { + for (int i = 0; i < str.size(); i++) + { + str[i] = tolower(str[i]); + } + return str; +} + +void SplitString(const std::string& s, std::vector& v, const std::string& c) +{ + std::string::size_type pos1, pos2; + pos2 = s.find(c); + pos1 = 0; + while (std::string::npos != pos2) + { + v.push_back(s.substr(pos1, pos2 - pos1)); + + pos1 = pos2 + c.size(); + pos2 = s.find(c, pos1); + } + if (pos1 != s.length()) + v.push_back(s.substr(pos1)); +} + +TsHttpRpc::TsHttpRpc() { + m_stop = false; + mg_mgr_init(&m_mg_mgr, nullptr); +} + +TsHttpRpc::~TsHttpRpc() { + mg_mgr_free(&m_mg_mgr); +} + +bool TsHttpRpc::init_http() { + struct mg_connection* nc = nullptr; + + char addr[128] = { 0 }; + ex_strformat(addr, 128, "tcp://127.0.0.1:%d", TS_HTTP_RPC_PORT); + + nc = mg_bind(&m_mg_mgr, addr, _mg_event_handler); + if (!nc) { + EXLOGE("[rpc] TsHttpRpc::init 127.0.0.1:%d\n", TS_HTTP_RPC_PORT); + return false; + } + nc->user_data = this; + + mg_set_protocol_http_websocket(nc); + + return _on_init(); +} + +bool TsHttpRpc::init_https() { + ex_wstr file_ssl_cert = g_env.m_exec_path; + ex_path_join(file_ssl_cert, true, L"cfg", L"localhost.pem", NULL); + ex_wstr file_ssl_key = g_env.m_exec_path; + ex_path_join(file_ssl_key, true, L"cfg", L"localhost.key", NULL); + ex_astr _ssl_cert; + ex_wstr2astr(file_ssl_cert, _ssl_cert); + ex_astr _ssl_key; + ex_wstr2astr(file_ssl_key, _ssl_key); + + const char *err = NULL; + struct mg_bind_opts bind_opts; + memset(&bind_opts, 0, sizeof(bind_opts)); + bind_opts.ssl_cert = _ssl_cert.c_str(); + bind_opts.ssl_key = _ssl_key.c_str(); + bind_opts.error_string = &err; + + + char addr[128] = { 0 }; + ex_strformat(addr, 128, "tcp://127.0.0.1:%d", TS_HTTPS_RPC_PORT); + + struct mg_connection* nc = nullptr; + nc = mg_bind_opt(&m_mg_mgr, addr, _mg_event_handler, bind_opts); + if (!nc) { + EXLOGE("[rpc] TsHttpRpc::init 127.0.0.1:%d\n", TS_HTTPS_RPC_PORT); + return false; + } + nc->user_data = this; + + mg_set_protocol_http_websocket(nc); + + return _on_init(); +} + +bool TsHttpRpc::_on_init() { + char file_name[MAX_PATH] = { 0 }; + if (!GetModuleFileNameA(nullptr, file_name, MAX_PATH)) + return false; + + int len = strlen(file_name); + + if (file_name[len] == '\\') + file_name[len] = '\0'; + + char* match = strrchr(file_name, '\\'); + if (match) + *match = '\0'; + + m_content_type_map[".js"] = "application/javascript"; + m_content_type_map[".png"] = "image/png"; + m_content_type_map[".jpeg"] = "image/jpeg"; + m_content_type_map[".jpg"] = "image/jpeg"; + m_content_type_map[".gif"] = "image/gif"; + m_content_type_map[".ico"] = "image/x-icon"; + m_content_type_map[".json"] = "image/json"; + m_content_type_map[".html"] = "text/html"; + m_content_type_map[".css"] = "text/css"; + m_content_type_map[".tif"] = "image/tiff"; + m_content_type_map[".tiff"] = "image/tiff"; + m_content_type_map[".svg"] = "text/html"; + + return true; +} + +void TsHttpRpc::run(void) { + while (!m_stop) { + mg_mgr_poll(&m_mg_mgr, 500); + } +} + +void TsHttpRpc::stop(void) { + m_stop = true; +} + +void TsHttpRpc::_mg_event_handler(struct mg_connection *nc, int ev, void *ev_data) { + struct http_message *hm = (struct http_message*)ev_data; + + TsHttpRpc* _this = (TsHttpRpc*)nc->user_data; + if (!_this) { + EXLOGE("[ERROR] invalid http request.\n"); + return; + } + + switch (ev) { + case MG_EV_HTTP_REQUEST: + { + ex_astr uri; + ex_chars _uri; + _uri.resize(hm->uri.len + 1); + memset(&_uri[0], 0, hm->uri.len + 1); + memcpy(&_uri[0], hm->uri.p, hm->uri.len); + uri = &_uri[0]; + +#ifdef EX_DEBUG + char* dbg_method = nullptr; + if (hm->method.len == 3 && 0 == memcmp(hm->method.p, "GET", hm->method.len)) + dbg_method = "GET"; + else if (hm->method.len == 4 && 0 == memcmp(hm->method.p, "POST", hm->method.len)) + dbg_method = "POST"; + else + dbg_method = "UNSUPPORTED-HTTP-METHOD"; + + EXLOGV("[rpc] got %s request: %s\n", dbg_method, uri.c_str()); +#endif + ex_astr ret_buf; + bool b_is_html = false; + +// if (uri == "/") { +// ex_wstr page = L"Teleport助手\n\n
Teleport助手工作正常!
"; +// ex_wstr2astr(page, ret_buf, EX_CODEPAGE_UTF8); +// +// mg_printf(nc, "HTTP/1.0 200 OK\r\nAccess-Control-Allow-Origin: *\r\nContent-Length: %d\r\nContent-Type: text/html\r\n\r\n%s", ret_buf.size() - 1, &ret_buf[0]); +// nc->flags |= MG_F_SEND_AND_CLOSE; +// return; +// } + + if (uri == "/") { + uri = "/status.html"; + b_is_html = true; + } + else if (uri == "/config") { + uri = "/index.html"; + b_is_html = true; + } + + ex_astr temp; + int offset = uri.find("/", 1); + if (offset > 0) { + temp = uri.substr(1, offset - 1); + + if (temp == "api") { + ex_astr method; + ex_astr json_param; + int rv = _this->_parse_request(hm, method, json_param); + if (0 != rv) { + EXLOGE("[ERROR] http-rpc got invalid request.\n"); + _this->_create_json_ret(ret_buf, rv); + } else { + _this->_process_js_request(method, json_param, ret_buf); + } + + mg_printf(nc, "HTTP/1.0 200 OK\r\nAccess-Control-Allow-Origin: *\r\nContent-Length: %ld\r\nContent-Type: application/json\r\n\r\n%s", ret_buf.length(), &ret_buf[0]); + nc->flags |= MG_F_SEND_AND_CLOSE; + return; + } + } + + + ex_astr file_suffix; + offset = uri.rfind("."); + if (offset > 0) { + file_suffix = uri.substr(offset, uri.length()); + } + + ex_wstr2astr(g_env.m_site_path, temp); + ex_astr index_path = temp + uri; + + + FILE* file = ex_fopen(index_path.c_str(), "rb"); + if (file) { + unsigned long file_size = 0; + char* buf = nullptr; + size_t ret = 0; + + fseek(file, 0, SEEK_END); + file_size = ftell(file); + buf = new char[file_size]; + memset(buf, 0, file_size); + fseek(file, 0, SEEK_SET); + ret = fread(buf, 1, file_size, file); + fclose(file); + + ex_astr content_type = _this->get_content_type(file_suffix); + + mg_printf(nc, "HTTP/1.0 200 OK\r\nAccess-Control-Allow-Origin: *\r\nContent-Length: %ld\r\nContent-Type: %s\r\n\r\n", file_size, content_type.c_str()); + mg_send(nc, buf, (int)file_size); + delete[]buf; + nc->flags |= MG_F_SEND_AND_CLOSE; + return; + } else if (b_is_html) { + ex_wstr page = L"404 Not Found

404 Not Found


Teleport Assistor configuration page not found.

"; + ex_wstr2astr(page, ret_buf, EX_CODEPAGE_UTF8); + + mg_printf(nc, "HTTP/1.0 404 File Not Found\r\nAccess-Control-Allow-Origin: *\r\nContent-Length: %ld\r\nContent-Type: text/html\r\n\r\n%s", ret_buf.size() - 1, &ret_buf[0]); + nc->flags |= MG_F_SEND_AND_CLOSE; + return; + } + + } + break; + default: + break; + } +} + +int TsHttpRpc::_parse_request(struct http_message* req, ex_astr& func_cmd, ex_astr& func_args) { + if (!req) + return TPE_FAILED; + + bool is_get = true; + if (req->method.len == 3 && 0 == memcmp(req->method.p, "GET", req->method.len)) + is_get = true; + else if (req->method.len == 4 && 0 == memcmp(req->method.p, "POST", req->method.len)) + is_get = false; + else + return TPE_HTTP_METHOD; + + ex_astrs strs; + + size_t pos_start = 1; // 跳过第一个字节,一定是 '/' + + size_t i = 0; + for (i = pos_start; i < req->uri.len; ++i) { + if (req->uri.p[i] == '/') { + if (i - pos_start > 0) { + ex_astr tmp_uri; + tmp_uri.assign(req->uri.p + pos_start, i - pos_start); + strs.push_back(tmp_uri); + } + pos_start = i + 1; // 跳过当前找到的分隔符 + } + } + if (pos_start < req->uri.len) { + ex_astr tmp_uri; + tmp_uri.assign(req->uri.p + pos_start, req->uri.len - pos_start); + strs.push_back(tmp_uri); + } + + if (strs.empty() || strs[0] != "api") + return TPE_PARAM; + + if (is_get) { + if (2 == strs.size()) { + func_cmd = strs[1]; + } else if (3 == strs.size()) { + func_cmd = strs[1]; + func_args = strs[2]; + } else { + return TPE_PARAM; + } + } else { + if (2 == strs.size()) { + func_cmd = strs[1]; + } else { + return TPE_PARAM; + } + + if (req->body.len > 0) { + func_args.assign(req->body.p, req->body.len); + } + } + + if (func_args.length() > 0) { + // 将参数进行 url-decode 解码 + int len = func_args.length() * 2; + ex_chars sztmp; + sztmp.resize(len); + memset(&sztmp[0], 0, len); + if (-1 == ts_url_decode(func_args.c_str(), func_args.length(), &sztmp[0], len, 0)) + return TPE_HTTP_URL_ENCODE; + + func_args = &sztmp[0]; + } + + EXLOGV("[rpc] method=%s, json_param=%s\n", func_cmd.c_str(), func_args.c_str()); + + return TPE_OK; +} + +void TsHttpRpc::_process_js_request(const ex_astr& func_cmd, const ex_astr& func_args, ex_astr& buf) { + if (func_cmd == "get_version") { + _rpc_func_get_version(func_args, buf); + } else if (func_cmd == "run") { + _rpc_func_run_client(func_args, buf); + } else if (func_cmd == "rdp_play") { + _rpc_func_rdp_play(func_args, buf); + } else if (func_cmd == "get_config") { + _rpc_func_get_config(func_args, buf); + } else if (func_cmd == "set_config") { + _rpc_func_set_config(func_args, buf); + } else if (func_cmd == "file_action") { + _rpc_func_file_action(func_args, buf); + } else { + EXLOGE("[rpc] got unknown command: %s\n", func_cmd.c_str()); + _create_json_ret(buf, TPE_UNKNOWN_CMD); + } +} + +void TsHttpRpc::_create_json_ret(ex_astr& buf, int errcode) { + // 返回: {"code":123} + + Json::Value jr_root; + jr_root["code"] = errcode; + + // buf = jr_writer.write(jr_root); + Json::StreamWriterBuilder jwb; + std::unique_ptr jwriter(jwb.newStreamWriter()); + ex_aoss os; + jwriter->write(jr_root, &os); + buf = os.str(); +} + +void TsHttpRpc::_create_json_ret(ex_astr& buf, Json::Value& jr_root) { +// Json::FastWriter jr_writer; +// buf = jr_writer.write(jr_root); + Json::StreamWriterBuilder jwb; + std::unique_ptr jwriter(jwb.newStreamWriter()); + ex_aoss os; + jwriter->write(jr_root, &os); + buf = os.str(); +} + +void TsHttpRpc::_rpc_func_url_protocol(const ex_astr& args, ex_astr& buf) +{ + //处理urlprotocol调用访式 + // 将参数进行 url-decode 解码 + std::string func_args = args; + if (func_args.length() > 0) + { + int len = func_args.length() * 2; + ex_chars sztmp; + sztmp.resize(len); + memset(&sztmp[0], 0, len); + if (-1 == ts_url_decode(func_args.c_str(), func_args.length(), &sztmp[0], len, 0)) + return ; + + func_args = &sztmp[0]; + } + EXLOGD(("%s\n"), func_args.c_str()); + //处理传参过来的teleport://{}/,只保留参数部份 + std::string urlproto_appname = TP_URLPROTO_APP_NAME; + urlproto_appname += "://{"; + func_args.erase(0, urlproto_appname.length());//去除第一个URLPROTO_APP_NAME以及://字符 + int pos = func_args.length() - 1; + if (func_args.substr(pos, 1) == "/") + func_args.erase(pos - 1, 2);//去除最后一个}/字符 + else + func_args.erase(pos, 1); + + //由于命令行、ie浏览器参数传递时会把原来json结构中的"号去掉,需要重新格式化参数为json格式 + if (func_args.find("\"", 0) == std::string::npos) { + std::vector strv; + SplitString(func_args, strv, ","); + func_args = ""; + for (std::vector::size_type i = 0; i < strv.size(); i++) { + std::vector strv1; + SplitString(strv[i], strv1, ":"); + strv1[0] = "\"" + strv1[0] + "\""; + if (!isDegital(strv1[1]) && strtolower(strv1[1]) != "true" && strtolower(strv1[1]) != "false") + strv1[1] = "\"" + strv1[1] + "\""; + + strv[i] = strv1[0] + ":" + strv1[1]; + if (i == 0) + func_args = strv[i]; + else + func_args += "," + strv[i]; + } + } + func_args = "{" + func_args + "}"; + EXLOGD(("%s\n"), func_args.c_str()); + //调用TsHttpRpc类里的_rpc_func_run_client启动客户端 + _rpc_func_run_client(func_args, buf); +} + +void TsHttpRpc::_rpc_func_run_client(const ex_astr& func_args, ex_astr& buf) { + // 入参:{"ip":"192.168.5.11","port":22,"uname":"root","uauth":"abcdefg","authmode":1,"protocol":2} + // authmode: 1=password, 2=private-key + // protocol: 1=rdp, 2=ssh + // SSH返回: {"code":0, "data":{"sid":"0123abcde"}} + // RDP返回: {"code":0, "data":{"sid":"0123abcde0A"}} + + //Json::Reader jreader; + Json::Value jsRoot; + + Json::CharReaderBuilder jcrb; + std::unique_ptr const jreader(jcrb.newCharReader()); + const char *str_json_begin = func_args.c_str(); + ex_astr err; + + //if (!jreader.parse(func_args.c_str(), jsRoot)) { + if (!jreader->parse(str_json_begin, str_json_begin + func_args.length(), &jsRoot, &err)) { + _create_json_ret(buf, TPE_JSON_FORMAT); + return; + } + if (!jsRoot.isObject()) { + _create_json_ret(buf, TPE_PARAM); + return; + } + + // 判断参数是否正确 + if (!jsRoot["teleport_ip"].isString() + || !jsRoot["teleport_port"].isNumeric() || !jsRoot["remote_host_ip"].isString() + || !jsRoot["session_id"].isString() || !jsRoot["protocol_type"].isNumeric() || !jsRoot["protocol_sub_type"].isNumeric() + || !jsRoot["protocol_flag"].isNumeric() + ) { + _create_json_ret(buf, TPE_PARAM); + return; + } + + int pro_type = jsRoot["protocol_type"].asUInt(); + int pro_sub = jsRoot["protocol_sub_type"].asInt(); + ex_u32 protocol_flag = jsRoot["protocol_flag"].asUInt(); + + ex_astr teleport_ip = jsRoot["teleport_ip"].asCString(); + int teleport_port = jsRoot["teleport_port"].asUInt(); + + ex_astr remote_host_name = jsRoot["remote_host_name"].asCString(); + + ex_astr real_host_ip = jsRoot["remote_host_ip"].asCString(); + ex_astr sid = jsRoot["session_id"].asCString(); + + ex_wstr w_exe_path; + WCHAR w_szCommandLine[MAX_PATH] = { 0 }; + + + ex_wstr w_sid; + ex_astr2wstr(sid, w_sid); + ex_wstr w_teleport_ip; + ex_astr2wstr(teleport_ip, w_teleport_ip); + ex_wstr w_real_host_ip; + ex_astr2wstr(real_host_ip, w_real_host_ip); + ex_wstr w_remote_host_name; + ex_astr2wstr(remote_host_name, w_remote_host_name); + WCHAR w_port[32] = { 0 }; + swprintf_s(w_port, _T("%d"), teleport_port); + + ex_wstr tmp_rdp_file; // for .rdp file + + if (pro_type == TP_PROTOCOL_TYPE_RDP) { + //============================================== + // RDP + //============================================== + + bool flag_clipboard = ((protocol_flag & TP_FLAG_RDP_CLIPBOARD) == TP_FLAG_RDP_CLIPBOARD); + bool flag_disk = ((protocol_flag & TP_FLAG_RDP_DISK) == TP_FLAG_RDP_DISK); + bool flag_console = ((protocol_flag & TP_FLAG_RDP_CONSOLE) == TP_FLAG_RDP_CONSOLE); + + int rdp_w = 800; + int rdp_h = 640; + bool rdp_console = false; + + if (!jsRoot["rdp_width"].isNull()) { + if (jsRoot["rdp_width"].isNumeric()) { + rdp_w = jsRoot["rdp_width"].asUInt(); + } else { + _create_json_ret(buf, TPE_PARAM); + return; + } + } + + if (!jsRoot["rdp_height"].isNull()) { + if (jsRoot["rdp_height"].isNumeric()) { + rdp_h = jsRoot["rdp_height"].asUInt(); + } else { + _create_json_ret(buf, TPE_PARAM); + return; + } + } + + if (!jsRoot["rdp_console"].isNull()) { + if (jsRoot["rdp_console"].isBool()) { + rdp_console = jsRoot["rdp_console"].asBool(); + } else { + _create_json_ret(buf, TPE_PARAM); + return; + } + } + + if (!flag_console) + rdp_console = false; + + + int split_pos = sid.length() - 2; + ex_astr real_sid = sid.substr(0, split_pos); + ex_astr str_pwd_len = sid.substr(split_pos, sid.length()); + int n_pwd_len = strtol(str_pwd_len.c_str(), nullptr, 16); + n_pwd_len -= real_sid.length(); + n_pwd_len -= 2; + char szPwd[256] = { 0 }; + for (int i = 0; i < n_pwd_len; i++) { + szPwd[i] = '*'; + } + + ex_astr2wstr(real_sid, w_sid); + + w_exe_path = _T("\""); + w_exe_path += g_cfg.rdp_app + _T("\" "); + + ex_wstr rdp_name = g_cfg.rdp_name; + if (rdp_name == L"mstsc") { + w_exe_path += g_cfg.rdp_cmdline; + + int width = 0; + int higth = 0; + int cx = 0; + int cy = 0; + + int display = 1; + int iWidth = GetSystemMetrics(SM_CXSCREEN); + int iHeight = GetSystemMetrics(SM_CYSCREEN); + + if (rdp_w == 0 || rdp_h == 0) { + //全屏 + width = iWidth; + higth = iHeight; + display = 2; + } else { + width = rdp_w; + higth = rdp_h; + display = 1; + } + + cx = (iWidth - width) / 2; + cy = (iHeight - higth) / 2; + if (cx < 0) { + cx = 0; + } + if (cy < 0) { + cy = 0; + } + + // int console_mode = 0; + // if (rdp_console) + // console_mode = 1; + + std::string psw51b; + if (!calc_psw51b(szPwd, psw51b)) { + EXLOGE("calc password failed.\n"); + _create_json_ret(buf, TPE_FAILED); + return; + } + + real_sid = "01" + real_sid; + + char sz_rdp_file_content[4096] = { 0 }; + sprintf_s(sz_rdp_file_content, 4096, rdp_content.c_str() + , (flag_console && rdp_console) ? 1 : 0 + , display, width, higth + , cx, cy, cx + width + 100, cy + higth + 100 + , teleport_ip.c_str(), teleport_port + , flag_clipboard ? 1 : 0 + , flag_disk ? "*" : "" + , real_sid.c_str() + , psw51b.c_str() + ); + + char sz_file_name[MAX_PATH] = { 0 }; + char temp_path[MAX_PATH] = { 0 }; + DWORD ret = GetTempPathA(MAX_PATH, temp_path); + if (ret <= 0) { + EXLOGE("fopen failed (%d).\n", GetLastError()); + _create_json_ret(buf, TPE_FAILED); + return; + } + + ex_astr temp_host_ip = real_host_ip; + ex_replace_all(temp_host_ip, ".", "-"); + + sprintf_s(sz_file_name, MAX_PATH, ("%s%s.rdp"), temp_path, temp_host_ip.c_str()); + + FILE* f = NULL; + if (fopen_s(&f, sz_file_name, "wt") != 0) { + EXLOGE("fopen failed (%d).\n", GetLastError()); + _create_json_ret(buf, TPE_OPENFILE); + return; + } + // Write a string into the file. + fwrite(sz_rdp_file_content, strlen(sz_rdp_file_content), 1, f); + fclose(f); + ex_astr2wstr(sz_file_name, tmp_rdp_file); + + // 变量替换 + ex_replace_all(w_exe_path, _T("{tmp_rdp_file}"), tmp_rdp_file); + } else if (g_cfg.rdp_name == L"freerdp") { + w_exe_path += L"{size} {console} {clipboard} {drives} "; + w_exe_path += g_cfg.rdp_cmdline; + + ex_wstr w_screen; + + if (rdp_w == 0 || rdp_h == 0) { + //全屏 + w_screen = _T("/f"); + } else { + char sz_size[64] = { 0 }; + ex_strformat(sz_size, 63, "/size:%dx%d", rdp_w, rdp_h); + ex_astr2wstr(sz_size, w_screen); + } + + // wchar_t* w_console = NULL; + // + // if (flag_console && rdp_console) + // { + // w_console = L"/admin"; + // } + // else + // { + // w_console = L""; + // } + + ex_wstr w_password; + ex_astr2wstr(szPwd, w_password); + w_exe_path += L" /p:"; + w_exe_path += w_password; + + w_sid = L"02" + w_sid; + + w_exe_path += L" /gdi:sw"; // 使用软件渲染,gdi:hw使用硬件加速,但是会出现很多黑块(录像回放时又是正常的!) + w_exe_path += L" -grab-keyboard"; // [new style] 防止启动FreeRDP后,失去本地键盘响应,必须得先最小化一下FreeRDP窗口(不过貌似不起作用) + + // 变量替换 + ex_replace_all(w_exe_path, _T("{size}"), w_screen); + + if (flag_console && rdp_console) + ex_replace_all(w_exe_path, _T("{console}"), L"/admin"); + else + ex_replace_all(w_exe_path, _T("{console}"), L""); + + //ex_replace_all(w_exe_path, _T("{clipboard}"), L"+clipboard"); + + if (flag_clipboard) + ex_replace_all(w_exe_path, _T("{clipboard}"), L"/clipboard"); + else + ex_replace_all(w_exe_path, _T("{clipboard}"), L"-clipboard"); + + if (flag_disk) + ex_replace_all(w_exe_path, _T("{drives}"), L"/drives"); + else + ex_replace_all(w_exe_path, _T("{drives}"), L"-drives"); + } else { + _create_json_ret(buf, TPE_FAILED); + return; + } + } else if (pro_type == TP_PROTOCOL_TYPE_SSH) { + //============================================== + // SSH + //============================================== + + if (pro_sub == TP_PROTOCOL_TYPE_SSH_SHELL) { + w_exe_path = _T("\""); + w_exe_path += g_cfg.ssh_app + _T("\" "); + w_exe_path += g_cfg.ssh_cmdline; + } else { + w_exe_path = _T("\""); + w_exe_path += g_cfg.scp_app + _T("\" "); + w_exe_path += g_cfg.scp_cmdline; + } + } else if (pro_type == TP_PROTOCOL_TYPE_TELNET) { + //============================================== + // TELNET + //============================================== + w_exe_path = _T("\""); + w_exe_path += g_cfg.telnet_app + _T("\" "); + w_exe_path += g_cfg.telnet_cmdline; + } + + ex_replace_all(w_exe_path, _T("{host_ip}"), w_teleport_ip.c_str()); + ex_replace_all(w_exe_path, _T("{host_port}"), w_port); + ex_replace_all(w_exe_path, _T("{user_name}"), w_sid.c_str()); + ex_replace_all(w_exe_path, _T("{host_name}"), w_remote_host_name.c_str()); + ex_replace_all(w_exe_path, _T("{real_ip}"), w_real_host_ip.c_str()); + ex_replace_all(w_exe_path, _T("{assist_tools_path}"), g_env.m_tools_path.c_str()); + + + STARTUPINFO si; + PROCESS_INFORMATION pi; + + ZeroMemory(&si, sizeof(si)); + si.cb = sizeof(si); + ZeroMemory(&pi, sizeof(pi)); + + Json::Value root_ret; + ex_astr utf8_path; + ex_wstr2astr(w_exe_path, utf8_path, EX_CODEPAGE_UTF8); + root_ret["path"] = utf8_path; + + if (!CreateProcess(NULL, (wchar_t *)w_exe_path.c_str(), NULL, NULL, FALSE, 0, NULL, NULL, &si, &pi)) { + EXLOGE(_T("CreateProcess() failed. Error=0x%08X.\n %s\n"), GetLastError(), w_exe_path.c_str()); + root_ret["code"] = TPE_START_CLIENT; + _create_json_ret(buf, root_ret); + return; + } + + root_ret["code"] = TPE_OK; + _create_json_ret(buf, root_ret); +} + +void TsHttpRpc::_rpc_func_rdp_play(const ex_astr& func_args, ex_astr& buf) { + //Json::Reader jreader; + Json::Value jsRoot; + + Json::CharReaderBuilder jcrb; + std::unique_ptr const jreader(jcrb.newCharReader()); + const char *str_json_begin = func_args.c_str(); + ex_astr err; + + //if (!jreader.parse(func_args.c_str(), jsRoot)) { + if (!jreader->parse(str_json_begin, str_json_begin + func_args.length(), &jsRoot, &err)) { + _create_json_ret(buf, TPE_JSON_FORMAT); + return; + } + + // 判断参数是否正确 + if (!jsRoot["rid"].isInt() + || !jsRoot["web"].isString() + || !jsRoot["sid"].isString() + ) { + _create_json_ret(buf, TPE_PARAM); + return; + } + + int rid = jsRoot["rid"].asInt(); + ex_astr a_url_base = jsRoot["web"].asCString(); + ex_astr a_sid = jsRoot["sid"].asCString(); + + char cmd_args[1024] = { 0 }; + ex_strformat(cmd_args, 1023, "%s/%d", a_sid.c_str(), rid); + + ex_wstr w_url_base; + ex_astr2wstr(a_url_base, w_url_base); + ex_wstr w_cmd_args; + ex_astr2wstr(cmd_args, w_cmd_args); + + ex_wstr w_exe_path; + w_exe_path = _T("\""); + w_exe_path += g_env.m_exec_path + _T("\\tp-player.exe\""); + w_exe_path += _T(" \""); + w_exe_path += w_url_base; + w_exe_path += _T("/"); + w_exe_path += w_cmd_args; + + Json::Value root_ret; + ex_astr utf8_path; + ex_wstr2astr(w_exe_path, utf8_path, EX_CODEPAGE_UTF8); + root_ret["cmdline"] = utf8_path; + + EXLOGD(w_exe_path.c_str()); + + STARTUPINFO si; + PROCESS_INFORMATION pi; + + ZeroMemory(&si, sizeof(si)); + si.cb = sizeof(si); + ZeroMemory(&pi, sizeof(pi)); + if (!CreateProcess(NULL, (wchar_t *)w_exe_path.c_str(), NULL, NULL, FALSE, 0, NULL, NULL, &si, &pi)) { + EXLOGE(_T("CreateProcess() failed. Error=0x%08X.\n %s\n"), GetLastError(), w_exe_path.c_str()); + root_ret["code"] = TPE_START_CLIENT; + _create_json_ret(buf, root_ret); + return; + } + + root_ret["code"] = TPE_OK; + _create_json_ret(buf, root_ret); + return; +} + +void TsHttpRpc::_rpc_func_get_config(const ex_astr& func_args, ex_astr& buf) { + Json::Value jr_root; + jr_root["code"] = 0; + jr_root["data"] = g_cfg.get_root(); + _create_json_ret(buf, jr_root); +} + +void TsHttpRpc::_rpc_func_set_config(const ex_astr& func_args, ex_astr& buf) { + //Json::Reader jreader; + Json::Value jsRoot; + Json::CharReaderBuilder jcrb; + std::unique_ptr const jreader(jcrb.newCharReader()); + const char *str_json_begin = func_args.c_str(); + ex_astr err; + + //if (!jreader.parse(func_args.c_str(), jsRoot)) { + if (!jreader->parse(str_json_begin, str_json_begin + func_args.length(), &jsRoot, &err)) { + _create_json_ret(buf, TPE_JSON_FORMAT); + return; + } + + if (!g_cfg.save(func_args)) + _create_json_ret(buf, TPE_FAILED); + else + _create_json_ret(buf, TPE_OK); +} + +void TsHttpRpc::_rpc_func_file_action(const ex_astr& func_args, ex_astr& buf) { + + //Json::Reader jreader; + Json::Value jsRoot; + + Json::CharReaderBuilder jcrb; + std::unique_ptr const jreader(jcrb.newCharReader()); + const char *str_json_begin = func_args.c_str(); + ex_astr err; + + //if (!jreader.parse(func_args.c_str(), jsRoot)) { + if (!jreader->parse(str_json_begin, str_json_begin + func_args.length(), &jsRoot, &err)) { + _create_json_ret(buf, TPE_JSON_FORMAT); + return; + } + // 判断参数是否正确 + if (!jsRoot["action"].isNumeric()) { + _create_json_ret(buf, TPE_PARAM); + return; + } + int action = jsRoot["action"].asUInt(); + + HWND hParent = GetForegroundWindow(); + if (NULL == hParent) + hParent = g_hDlgMain; + + BOOL ret = FALSE; + wchar_t wszReturnPath[MAX_PATH] = _T(""); + + if (action == 1 || action == 2) { + OPENFILENAME ofn; + ex_wstr wsDefaultName; + ex_wstr wsDefaultPath; + StringCchCopy(wszReturnPath, MAX_PATH, wsDefaultName.c_str()); + + ZeroMemory(&ofn, sizeof(ofn)); + + ofn.lStructSize = sizeof(ofn); + ofn.lpstrTitle = _T("选择文件"); + ofn.hwndOwner = hParent; + ofn.lpstrFilter = _T("可执行程序 (*.exe)\0*.exe\0"); + ofn.lpstrFile = wszReturnPath; + ofn.nMaxFile = MAX_PATH; + ofn.lpstrInitialDir = wsDefaultPath.c_str(); + ofn.Flags = OFN_EXPLORER | OFN_PATHMUSTEXIST; + + if (action == 1) { + ofn.Flags |= OFN_FILEMUSTEXIST; + ret = GetOpenFileName(&ofn); + } else { + ofn.Flags |= OFN_OVERWRITEPROMPT; + ret = GetSaveFileName(&ofn); + } + } else if (action == 3) { + BROWSEINFO bi; + ZeroMemory(&bi, sizeof(BROWSEINFO)); + bi.hwndOwner = NULL; + bi.pidlRoot = NULL; + bi.pszDisplayName = wszReturnPath; //此参数如为NULL则不能显示对话框 + bi.lpszTitle = _T("选择目录"); + bi.ulFlags = BIF_RETURNONLYFSDIRS; + bi.lpfn = NULL; + bi.iImage = 0; //初始化入口参数bi结束 + LPITEMIDLIST pIDList = SHBrowseForFolder(&bi);//调用显示选择对话框 + if (pIDList) { + ret = true; + SHGetPathFromIDList(pIDList, wszReturnPath); + } else { + ret = false; + } + } else if (action == 4) { + ex_wstr wsDefaultName; + ex_wstr wsDefaultPath; + + if (wsDefaultPath.length() == 0) { + _create_json_ret(buf, TPE_PARAM); + return; + } + + ex_wstr::size_type pos = 0; + + while (ex_wstr::npos != (pos = wsDefaultPath.find(L"/", pos))) { + wsDefaultPath.replace(pos, 1, L"\\"); + pos += 1; + } + + ex_wstr wArg = L"/select, \""; + wArg += wsDefaultPath; + wArg += L"\""; + if ((int)ShellExecute(hParent, _T("open"), _T("explorer"), wArg.c_str(), NULL, SW_SHOW) > 32) + ret = true; + else + ret = false; + } + + if (ret) { + if (action == 1 || action == 2 || action == 3) { + ex_astr utf8_path; + ex_wstr2astr(wszReturnPath, utf8_path, EX_CODEPAGE_UTF8); + Json::Value root; + root["code"] = TPE_OK; + root["path"] = utf8_path; + _create_json_ret(buf, root); + + return; + } else { + _create_json_ret(buf, TPE_OK); + return; + } + } else { + _create_json_ret(buf, TPE_DATA); + return; + } +} + +void TsHttpRpc::_rpc_func_get_version(const ex_astr& func_args, ex_astr& buf) { + Json::Value root_ret; + ex_wstr w_version = TP_ASSIST_VER; + ex_astr version; + ex_wstr2astr(w_version, version, EX_CODEPAGE_UTF8); + root_ret["version"] = version; + root_ret["code"] = TPE_OK; + _create_json_ret(buf, root_ret); + return; +} diff --git a/client/tp_assist_win/ts_ver.h b/client/tp_assist_win/ts_ver.h index 6aa10dc..9c55de2 100644 --- a/client/tp_assist_win/ts_ver.h +++ b/client/tp_assist_win/ts_ver.h @@ -1,6 +1,6 @@ #ifndef __TS_ASSIST_VER_H__ #define __TS_ASSIST_VER_H__ -#define TP_ASSIST_VER L"3.3.1" +#define TP_ASSIST_VER L"3.5.1" #endif // __TS_ASSIST_VER_H__ diff --git a/common/libex/include/ex/ex_platform.h b/common/libex/include/ex/ex_platform.h index 6a23715..099cfbf 100644 --- a/common/libex/include/ex/ex_platform.h +++ b/common/libex/include/ex/ex_platform.h @@ -67,6 +67,7 @@ # include // O_RDONLY, etc. # include # include +# include # include # include # include diff --git a/common/libex/include/ex/ex_str.h b/common/libex/include/ex/ex_str.h index 070b999..4fc9ce4 100644 --- a/common/libex/include/ex/ex_str.h +++ b/common/libex/include/ex/ex_str.h @@ -55,9 +55,13 @@ int ex_wcsformat(wchar_t* out_buf, size_t buf_size, const wchar_t* fmt, ...); #include #include +//#include +#include typedef std::string ex_astr; typedef std::wstring ex_wstr; +typedef std::ostringstream ex_aoss; +typedef std::wostringstream ex_woss; typedef std::vector ex_astrs; typedef std::vector ex_wstrs; diff --git a/common/libex/src/ex_thread.cpp b/common/libex/src/ex_thread.cpp index 9358a77..395dfba 100644 --- a/common/libex/src/ex_thread.cpp +++ b/common/libex/src/ex_thread.cpp @@ -80,8 +80,10 @@ bool ExThreadBase::stop(void) { return false; } #else - if (pthread_join(m_handle, NULL) != 0) { - return false; + if(m_handle != 0) { + if (pthread_join(m_handle, NULL) != 0) { + return false; + } } #endif diff --git a/config.ini.in b/config.ini.in index fbc4f5e..27cb9fa 100644 --- a/config.ini.in +++ b/config.ini.in @@ -22,6 +22,8 @@ wget = C:\Program Files (x86)\wget\wget.exe # if not set msbuild path, default to get it by register. #msbuild = C:\Program Files (x86)\MSBuild\14.0\bin\MSBuild.exe +# need qt to build tp-player. +qt = C:\Qt\Qt5.12.0\5.12.0\msvc2017 # ============================================ # for Linux and macOS diff --git a/dist/client/windows/assist/installer.nsi b/dist/client/windows/assist/installer.nsi index f6c82e0..f6fc853 100644 Binary files a/dist/client/windows/assist/installer.nsi and b/dist/client/windows/assist/installer.nsi differ diff --git a/external/fix-external/Python-3.7.5/Modules/Setup.dist b/external/fix-external/Python-3.7.5/Modules/Setup.dist new file mode 100755 index 0000000..730619e --- /dev/null +++ b/external/fix-external/Python-3.7.5/Modules/Setup.dist @@ -0,0 +1,400 @@ +# -*- makefile -*- +# The file Setup is used by the makesetup script to construct the files +# Makefile and config.c, from Makefile.pre and config.c.in, +# respectively. The file Setup itself is initially copied from +# Setup.dist; once it exists it will not be overwritten, so you can edit +# Setup to your heart's content. Note that Makefile.pre is created +# from Makefile.pre.in by the toplevel configure script. + +# (VPATH notes: Setup and Makefile.pre are in the build directory, as +# are Makefile and config.c; the *.in and *.dist files are in the source +# directory.) + +# Each line in this file describes one or more optional modules. +# Modules configured here will not be compiled by the setup.py script, +# so the file can be used to override setup.py's behavior. +# Tag lines containing just the word "*static*", "*shared*" or "*disabled*" +# (without the quotes but with the stars) are used to tag the following module +# descriptions. Tag lines may alternate throughout this file. Modules are +# built statically when they are preceded by a "*static*" tag line or when +# there is no tag line between the start of the file and the module +# description. Modules are built as a shared library when they are preceded by +# a "*shared*" tag line. Modules are not built at all, not by the Makefile, +# nor by the setup.py script, when they are preceded by a "*disabled*" tag +# line. + +# Lines have the following structure: +# +# ... [ ...] [ ...] [ ...] +# +# is anything ending in .c (.C, .cc, .c++ are C++ files) +# is anything starting with -I, -D, -U or -C +# is anything ending in .a or beginning with -l or -L +# is anything else but should be a valid Python +# identifier (letters, digits, underscores, beginning with non-digit) +# +# (As the makesetup script changes, it may recognize some other +# arguments as well, e.g. *.so and *.sl as libraries. See the big +# case statement in the makesetup script.) +# +# Lines can also have the form +# +# = +# +# which defines a Make variable definition inserted into Makefile.in +# +# The build process works like this: +# +# 1. Build all modules that are declared as static in Modules/Setup, +# combine them into libpythonxy.a, combine that into python. +# 2. Build all modules that are listed as shared in Modules/Setup. +# 3. Invoke setup.py. That builds all modules that +# a) are not builtin, and +# b) are not listed in Modules/Setup, and +# c) can be build on the target +# +# Therefore, modules declared to be shared will not be +# included in the config.c file, nor in the list of objects to be +# added to the library archive, and their linker options won't be +# added to the linker options. Rules to create their .o files and +# their shared libraries will still be added to the Makefile, and +# their names will be collected in the Make variable SHAREDMODS. This +# is used to build modules as shared libraries. (They can be +# installed using "make sharedinstall", which is implied by the +# toplevel "make install" target.) (For compatibility, +# *noconfig* has the same effect as *shared*.) +# +# NOTE: As a standard policy, as many modules as can be supported by a +# platform should be present. The distribution comes with all modules +# enabled that are supported by most platforms and don't require you +# to ftp sources from elsewhere. + + +# Some special rules to define PYTHONPATH. +# Edit the definitions below to indicate which options you are using. +# Don't add any whitespace or comments! + +# Directories where library files get installed. +# DESTLIB is for Python modules; MACHDESTLIB for shared libraries. +DESTLIB=$(LIBDEST) +MACHDESTLIB=$(BINLIBDEST) + +# NOTE: all the paths are now relative to the prefix that is computed +# at run time! + +# Standard path -- don't edit. +# No leading colon since this is the first entry. +# Empty since this is now just the runtime prefix. +DESTPATH= + +# Site specific path components -- should begin with : if non-empty +SITEPATH= + +# Standard path components for test modules +TESTPATH= + +COREPYTHONPATH=$(DESTPATH)$(SITEPATH)$(TESTPATH) +PYTHONPATH=$(COREPYTHONPATH) + + +# The modules listed here can't be built as shared libraries for +# various reasons; therefore they are listed here instead of in the +# normal order. + +# This only contains the minimal set of modules required to run the +# setup.py script in the root of the Python source tree. + +posix -DPy_BUILD_CORE posixmodule.c # posix (UNIX) system calls +errno errnomodule.c # posix (UNIX) errno values +pwd pwdmodule.c # this is needed to find out the user's home dir + # if $HOME is not set +_sre _sre.c # Fredrik Lundh's new regular expressions +_codecs _codecsmodule.c # access to the builtin codecs and codec registry +_weakref _weakref.c # weak references +_functools -DPy_BUILD_CORE _functoolsmodule.c # Tools for working with functions and callable objects +_operator _operator.c # operator.add() and similar goodies +_collections _collectionsmodule.c # Container types +_abc _abc.c # Abstract base classes +itertools itertoolsmodule.c # Functions creating iterators for efficient looping +atexit atexitmodule.c # Register functions to be run at interpreter-shutdown +_signal -DPy_BUILD_CORE signalmodule.c +_stat _stat.c # stat.h interface +time -DPy_BUILD_CORE timemodule.c # -lm # time operations and variables +_thread -DPy_BUILD_CORE _threadmodule.c # low-level threading interface + +# access to ISO C locale support +_locale _localemodule.c # -lintl + +# Standard I/O baseline +_io -DPy_BUILD_CORE -I$(srcdir)/Modules/_io _io/_iomodule.c _io/iobase.c _io/fileio.c _io/bytesio.c _io/bufferedio.c _io/textio.c _io/stringio.c + +# The zipimport module is always imported at startup. Having it as a +# builtin module avoids some bootstrapping problems and reduces overhead. +zipimport -DPy_BUILD_CORE zipimport.c + +# faulthandler module +faulthandler faulthandler.c + +# debug tool to trace memory blocks allocated by Python +_tracemalloc _tracemalloc.c hashtable.c + +# The rest of the modules listed in this file are all commented out by +# default. Usually they can be detected and built as dynamically +# loaded modules by the new setup.py script added in Python 2.1. If +# you're on a platform that doesn't support dynamic loading, want to +# compile modules statically into the Python binary, or need to +# specify some odd set of compiler switches, you can uncomment the +# appropriate lines below. + +# ====================================================================== + +# The Python symtable module depends on .h files that setup.py doesn't track +_symtable symtablemodule.c + +# Uncommenting the following line tells makesetup that all following +# modules are to be built as shared libraries (see above for more +# detail; also note that *static* or *disabled* cancels this effect): + +#*shared* +*static* + +# GNU readline. Unlike previous Python incarnations, GNU readline is +# now incorporated in an optional module, configured in the Setup file +# instead of by a configure script switch. You may have to insert a +# -L option pointing to the directory where libreadline.* lives, +# and you may have to change -ltermcap to -ltermlib or perhaps remove +# it, depending on your system -- see the GNU readline instructions. +# It's okay for this to be a shared library, too. + +#readline readline.c -lreadline -ltermcap + + +# Modules that should always be present (non UNIX dependent): + +array arraymodule.c # array objects +cmath cmathmodule.c _math.c # -lm # complex math library functions +math mathmodule.c _math.c # -lm # math library functions, e.g. sin() +_contextvars _contextvarsmodule.c # Context Variables +_struct _struct.c # binary structure packing/unpacking +_weakref _weakref.c # basic weak reference support +#_testcapi _testcapimodule.c # Python C API test module +_random _randommodule.c # Random number generator +_elementtree -I$(srcdir)/Modules/expat -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI _elementtree.c # elementtree accelerator +_pickle _pickle.c # pickle accelerator +_datetime _datetimemodule.c # datetime accelerator +_bisect _bisectmodule.c # Bisection algorithms +_heapq _heapqmodule.c # Heap queue algorithm +_asyncio _asynciomodule.c # Fast asyncio Future + +unicodedata unicodedata.c # static Unicode character database + + +# Modules with some UNIX dependencies -- on by default: +# (If you have a really backward UNIX, select and socket may not be +# supported...) + +fcntl fcntlmodule.c # fcntl(2) and ioctl(2) +spwd spwdmodule.c # spwd(3) +grp grpmodule.c # grp(3) +select selectmodule.c # select(2); not on ancient System V + +# Memory-mapped files (also works on Win32). +mmap mmapmodule.c + +# CSV file helper +_csv _csv.c + +# Socket module helper for socket(2) +_socket socketmodule.c + +# Socket module helper for SSL support; you must comment out the other +# socket line above, and possibly edit the SSL variable: +SSL=$(srcdir)/../../release +_ssl _ssl.c \ + -DUSE_SSL -I$(SSL)/include \ + $(SSL)/lib/libssl.a $(SSL)/lib/libcrypto.a +# -lssl -lcrypto + +# The crypt module is now disabled by default because it breaks builds +# on many systems (where -lcrypt is needed), e.g. Linux (I believe). + +#_crypt _cryptmodule.c # -lcrypt # crypt(3); needs -lcrypt on some systems + + +# Some more UNIX dependent modules -- off by default, since these +# are not supported by all UNIX systems: + +#nis nismodule.c -lnsl # Sun yellow pages -- not everywhere +termios termios.c # Steen Lumholt's termios module +resource resource.c # Jeremy Hylton's rlimit interface + +_posixsubprocess _posixsubprocess.c # POSIX subprocess module helper + +# Multimedia modules -- off by default. +# These don't work for 64-bit platforms!!! +# #993173 says audioop works on 64-bit platforms, though. +# These represent audio samples or images as strings: + +#audioop audioop.c # Operations on audio samples + + +# Note that the _md5 and _sha modules are normally only built if the +# system does not have the OpenSSL libs containing an optimized version. + +# The _md5 module implements the RSA Data Security, Inc. MD5 +# Message-Digest Algorithm, described in RFC 1321. + +_md5 md5module.c + + +# The _sha module implements the SHA checksum algorithms. +# (NIST's Secure Hash Algorithms.) +_sha1 sha1module.c +_sha256 sha256module.c +_sha512 sha512module.c +_sha3 _sha3/sha3module.c + +# _blake module +_blake2 _blake2/blake2module.c _blake2/blake2b_impl.c _blake2/blake2s_impl.c + +# The _tkinter module. +# +# The command for _tkinter is long and site specific. Please +# uncomment and/or edit those parts as indicated. If you don't have a +# specific extension (e.g. Tix or BLT), leave the corresponding line +# commented out. (Leave the trailing backslashes in! If you +# experience strange errors, you may want to join all uncommented +# lines and remove the backslashes -- the backslash interpretation is +# done by the shell's "read" command and it may not be implemented on +# every system. + +# *** Always uncomment this (leave the leading underscore in!): +# _tkinter _tkinter.c tkappinit.c -DWITH_APPINIT \ +# *** Uncomment and edit to reflect where your Tcl/Tk libraries are: +# -L/usr/local/lib \ +# *** Uncomment and edit to reflect where your Tcl/Tk headers are: +# -I/usr/local/include \ +# *** Uncomment and edit to reflect where your X11 header files are: +# -I/usr/X11R6/include \ +# *** Or uncomment this for Solaris: +# -I/usr/openwin/include \ +# *** Uncomment and edit for Tix extension only: +# -DWITH_TIX -ltix8.1.8.2 \ +# *** Uncomment and edit for BLT extension only: +# -DWITH_BLT -I/usr/local/blt/blt8.0-unoff/include -lBLT8.0 \ +# *** Uncomment and edit for PIL (TkImaging) extension only: +# (See http://www.pythonware.com/products/pil/ for more info) +# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \ +# *** Uncomment and edit for TOGL extension only: +# -DWITH_TOGL togl.c \ +# *** Uncomment and edit to reflect your Tcl/Tk versions: +# -ltk8.2 -ltcl8.2 \ +# *** Uncomment and edit to reflect where your X11 libraries are: +# -L/usr/X11R6/lib \ +# *** Or uncomment this for Solaris: +# -L/usr/openwin/lib \ +# *** Uncomment these for TOGL extension only: +# -lGL -lGLU -lXext -lXmu \ +# *** Uncomment for AIX: +# -lld \ +# *** Always uncomment this; X11 libraries to link with: +# -lX11 + +# Lance Ellinghaus's syslog module +syslog syslogmodule.c # syslog daemon interface + + +# Curses support, requiring the System V version of curses, often +# provided by the ncurses library. e.g. on Linux, link with -lncurses +# instead of -lcurses). + +#_curses _cursesmodule.c -lcurses -ltermcap +# Wrapper for the panel library that's part of ncurses and SYSV curses. +#_curses_panel _curses_panel.c -lpanel -lncurses + + +# Modules that provide persistent dictionary-like semantics. You will +# probably want to arrange for at least one of them to be available on +# your machine, though none are defined by default because of library +# dependencies. The Python module dbm/__init__.py provides an +# implementation independent wrapper for these; dbm/dumb.py provides +# similar functionality (but slower of course) implemented in Python. + +#_dbm _dbmmodule.c # dbm(3) may require -lndbm or similar + +# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm: + +#_gdbm _gdbmmodule.c -I/usr/local/include -L/usr/local/lib -lgdbm + + +# Helper module for various ascii-encoders +binascii binascii.c + +# Fred Drake's interface to the Python parser +#parser parsermodule.c + + +# Andrew Kuchling's zlib module. +# This require zlib 1.1.3 (or later). +# See http://www.gzip.org/zlib/ +#zlib zlibmodule.c -I$(prefix)/include -L$(exec_prefix)/lib -lz + +# Interface to the Expat XML parser +# +# Expat was written by James Clark and is now maintained by a group of +# developers on SourceForge; see www.libexpat.org for more +# information. The pyexpat module was written by Paul Prescod after a +# prototype by Jack Jansen. Source of Expat 1.95.2 is included in +# Modules/expat/. Usage of a system shared libexpat.so/expat.dll is +# not advised. +# +# More information on Expat can be found at www.libexpat.org. +# +#pyexpat expat/xmlparse.c expat/xmlrole.c expat/xmltok.c pyexpat.c -I$(srcdir)/Modules/expat -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI + +# Hye-Shik Chang's CJKCodecs + +# multibytecodec is required for all the other CJK codec modules +_multibytecodec cjkcodecs/multibytecodec.c + +_codecs_cn cjkcodecs/_codecs_cn.c +_codecs_hk cjkcodecs/_codecs_hk.c +_codecs_iso2022 cjkcodecs/_codecs_iso2022.c +_codecs_jp cjkcodecs/_codecs_jp.c +_codecs_kr cjkcodecs/_codecs_kr.c +_codecs_tw cjkcodecs/_codecs_tw.c + +# Example -- included for reference only: +# xx xxmodule.c + +# Another example -- the 'xxsubtype' module shows C-level subtyping in action +#xxsubtype xxsubtype.c + + + +######################################################################## +# add extra-builtin-module by Apex Liu. +######################################################################## + +zlib zlibmodule.c -I$(srcdir)/Modules/zlib \ + zlib/adler32.c zlib/crc32.c zlib/deflate.c zlib/infback.c zlib/inffast.c zlib/inflate.c zlib/inftrees.c zlib/trees.c zlib/zutil.c \ + zlib/compress.c zlib/uncompr.c zlib/gzclose.c zlib/gzlib.c zlib/gzread.c zlib/gzwrite.c + +_json _json.c + +_sqlite3 -I$(srcdir)/Modules/_sqlite/sqlite3 -I$(srcdir)/Modules/_sqlite \ + $(srcdir)/Modules/_sqlite/sqlite3/sqlite3.c \ + _sqlite/cache.c _sqlite/connection.c _sqlite/cursor.c _sqlite/microprotocols.c _sqlite/module.c \ + _sqlite/prepare_protocol.c _sqlite/row.c _sqlite/statement.c _sqlite/util.c + +# Uncommenting the following line tells makesetup that all following modules +# are not built (see above for more detail). +# +#*disabled* +# +#_sqlite3 _tkinter _curses pyexpat +#_codecs_jp _codecs_kr _codecs_tw unicodedata + +*disabled* +_tkinter _curses + diff --git a/external/fix-external/Python-3.7.5/Modules/_sqlite/cache.h b/external/fix-external/Python-3.7.5/Modules/_sqlite/cache.h new file mode 100644 index 0000000..9d9a95b --- /dev/null +++ b/external/fix-external/Python-3.7.5/Modules/_sqlite/cache.h @@ -0,0 +1,75 @@ +/* cache.h - definitions for the LRU cache + * + * Copyright (C) 2004-2010 Gerhard Häring + * + * This file is part of pysqlite. + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgment in the product documentation would be + * appreciated but is not required. + * 2. Altered source versions must be plainly marked as such, and must not be + * misrepresented as being the original software. + * 3. This notice may not be removed or altered from any source distribution. + */ + +#ifndef PYSQLITE_CACHE_H +#define PYSQLITE_CACHE_H +#include "Python.h" + +#define MODULE_NAME "sqlite3" + +/* The LRU cache is implemented as a combination of a doubly-linked with a + * dictionary. The list items are of type 'Node' and the dictionary has the + * nodes as values. */ + +typedef struct _pysqlite_Node +{ + PyObject_HEAD + PyObject* key; + PyObject* data; + long count; + struct _pysqlite_Node* prev; + struct _pysqlite_Node* next; +} pysqlite_Node; + +typedef struct +{ + PyObject_HEAD + int size; + + /* a dictionary mapping keys to Node entries */ + PyObject* mapping; + + /* the factory callable */ + PyObject* factory; + + pysqlite_Node* first; + pysqlite_Node* last; + + /* if set, decrement the factory function when the Cache is deallocated. + * this is almost always desirable, but not in the pysqlite context */ + int decref_factory; +} pysqlite_Cache; + +extern PyTypeObject pysqlite_NodeType; +extern PyTypeObject pysqlite_CacheType; + +int pysqlite_node_init(pysqlite_Node* self, PyObject* args, PyObject* kwargs); +void pysqlite_node_dealloc(pysqlite_Node* self); + +int pysqlite_cache_init(pysqlite_Cache* self, PyObject* args, PyObject* kwargs); +void pysqlite_cache_dealloc(pysqlite_Cache* self); +PyObject* pysqlite_cache_get(pysqlite_Cache* self, PyObject* args); + +int pysqlite_cache_setup_types(void); + +#endif diff --git a/external/fix-external/Python-3.7.5/Modules/_sqlite/prepare_protocol.h b/external/fix-external/Python-3.7.5/Modules/_sqlite/prepare_protocol.h new file mode 100644 index 0000000..2de505c --- /dev/null +++ b/external/fix-external/Python-3.7.5/Modules/_sqlite/prepare_protocol.h @@ -0,0 +1,43 @@ +/* prepare_protocol.h - the protocol for preparing values for SQLite + * + * Copyright (C) 2005-2010 Gerhard Häring + * + * This file is part of pysqlite. + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgment in the product documentation would be + * appreciated but is not required. + * 2. Altered source versions must be plainly marked as such, and must not be + * misrepresented as being the original software. + * 3. This notice may not be removed or altered from any source distribution. + */ + +#ifndef PYSQLITE_PREPARE_PROTOCOL_H +#define PYSQLITE_PREPARE_PROTOCOL_H +#include "Python.h" + +#define MODULE_NAME "sqlite3" + +typedef struct +{ + PyObject_HEAD +} pysqlite_PrepareProtocol; + +extern PyTypeObject pysqlite_PrepareProtocolType; + +int pysqlite_prepare_protocol_init(pysqlite_PrepareProtocol* self, PyObject* args, PyObject* kwargs); +void pysqlite_prepare_protocol_dealloc(pysqlite_PrepareProtocol* self); + +int pysqlite_prepare_protocol_setup_types(void); + +#define UNKNOWN (-1) +#endif diff --git a/external/fix-external/libssh/libssh-0.9.2/src/libcrypto-compat.c b/external/fix-external/libssh/libssh-0.9.2/src/libcrypto-compat.c new file mode 100644 index 0000000..36de72f --- /dev/null +++ b/external/fix-external/libssh/libssh-0.9.2/src/libcrypto-compat.c @@ -0,0 +1,398 @@ +/* + * Copyright 2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#include "config.h" + +#include +#include "libcrypto-compat.h" + +#ifndef OPENSSL_NO_ENGINE +#include +#endif + +static void *OPENSSL_zalloc(size_t num) +{ + void *ret = OPENSSL_malloc(num); + + if (ret != NULL) + memset(ret, 0, num); + return ret; +} + +int RSA_set0_key(RSA *r, BIGNUM *n, BIGNUM *e, BIGNUM *d) +{ + /* If the fields n and e in r are NULL, the corresponding input + * parameters MUST be non-NULL for n and e. d may be + * left NULL (in case only the public key is used). + */ + if ((r->n == NULL && n == NULL) + || (r->e == NULL && e == NULL)) + return 0; + + if (n != NULL) { + BN_free(r->n); + r->n = n; + } + if (e != NULL) { + BN_free(r->e); + r->e = e; + } + if (d != NULL) { + BN_free(r->d); + r->d = d; + } + + return 1; +} + +int RSA_set0_factors(RSA *r, BIGNUM *p, BIGNUM *q) +{ + /* If the fields p and q in r are NULL, the corresponding input + * parameters MUST be non-NULL. + */ + if ((r->p == NULL && p == NULL) + || (r->q == NULL && q == NULL)) + return 0; + + if (p != NULL) { + BN_free(r->p); + r->p = p; + } + if (q != NULL) { + BN_free(r->q); + r->q = q; + } + + return 1; +} + +int RSA_set0_crt_params(RSA *r, BIGNUM *dmp1, BIGNUM *dmq1, BIGNUM *iqmp) +{ + /* If the fields dmp1, dmq1 and iqmp in r are NULL, the corresponding input + * parameters MUST be non-NULL. + */ + if ((r->dmp1 == NULL && dmp1 == NULL) + || (r->dmq1 == NULL && dmq1 == NULL) + || (r->iqmp == NULL && iqmp == NULL)) + return 0; + + if (dmp1 != NULL) { + BN_free(r->dmp1); + r->dmp1 = dmp1; + } + if (dmq1 != NULL) { + BN_free(r->dmq1); + r->dmq1 = dmq1; + } + if (iqmp != NULL) { + BN_free(r->iqmp); + r->iqmp = iqmp; + } + + return 1; +} + +void RSA_get0_key(const RSA *r, + const BIGNUM **n, const BIGNUM **e, const BIGNUM **d) +{ + if (n != NULL) + *n = r->n; + if (e != NULL) + *e = r->e; + if (d != NULL) + *d = r->d; +} + +void RSA_get0_factors(const RSA *r, const BIGNUM **p, const BIGNUM **q) +{ + if (p != NULL) + *p = r->p; + if (q != NULL) + *q = r->q; +} + +void RSA_get0_crt_params(const RSA *r, + const BIGNUM **dmp1, const BIGNUM **dmq1, + const BIGNUM **iqmp) +{ + if (dmp1 != NULL) + *dmp1 = r->dmp1; + if (dmq1 != NULL) + *dmq1 = r->dmq1; + if (iqmp != NULL) + *iqmp = r->iqmp; +} + +void DSA_get0_pqg(const DSA *d, + const BIGNUM **p, const BIGNUM **q, const BIGNUM **g) +{ + if (p != NULL) + *p = d->p; + if (q != NULL) + *q = d->q; + if (g != NULL) + *g = d->g; +} + +int DSA_set0_pqg(DSA *d, BIGNUM *p, BIGNUM *q, BIGNUM *g) +{ + /* If the fields p, q and g in d are NULL, the corresponding input + * parameters MUST be non-NULL. + */ + if ((d->p == NULL && p == NULL) + || (d->q == NULL && q == NULL) + || (d->g == NULL && g == NULL)) + return 0; + + if (p != NULL) { + BN_free(d->p); + d->p = p; + } + if (q != NULL) { + BN_free(d->q); + d->q = q; + } + if (g != NULL) { + BN_free(d->g); + d->g = g; + } + + return 1; +} + +void DSA_get0_key(const DSA *d, + const BIGNUM **pub_key, const BIGNUM **priv_key) +{ + if (pub_key != NULL) + *pub_key = d->pub_key; + if (priv_key != NULL) + *priv_key = d->priv_key; +} + +int DSA_set0_key(DSA *d, BIGNUM *pub_key, BIGNUM *priv_key) +{ + /* If the field pub_key in d is NULL, the corresponding input + * parameters MUST be non-NULL. The priv_key field may + * be left NULL. + */ + if (d->pub_key == NULL && pub_key == NULL) + return 0; + + if (pub_key != NULL) { + BN_free(d->pub_key); + d->pub_key = pub_key; + } + if (priv_key != NULL) { + BN_free(d->priv_key); + d->priv_key = priv_key; + } + + return 1; +} + +void DSA_SIG_get0(const DSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps) +{ + if (pr != NULL) + *pr = sig->r; + if (ps != NULL) + *ps = sig->s; +} + +int DSA_SIG_set0(DSA_SIG *sig, BIGNUM *r, BIGNUM *s) +{ + if (r == NULL || s == NULL) + return 0; + BN_clear_free(sig->r); + BN_clear_free(sig->s); + sig->r = r; + sig->s = s; + return 1; +} + +void ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps) +{ + if (pr != NULL) + *pr = sig->r; + if (ps != NULL) + *ps = sig->s; +} + +int ECDSA_SIG_set0(ECDSA_SIG *sig, BIGNUM *r, BIGNUM *s) +{ + if (r == NULL || s == NULL) + return 0; + BN_clear_free(sig->r); + BN_clear_free(sig->s); + sig->r = r; + sig->s = s; + return 1; +} + +EVP_MD_CTX *EVP_MD_CTX_new(void) +{ + return OPENSSL_zalloc(sizeof(EVP_MD_CTX)); +} + +static void OPENSSL_clear_free(void *str, size_t num) +{ + if (str == NULL) + return; + if (num) + OPENSSL_cleanse(str, num); + OPENSSL_free(str); +} + +/* This call frees resources associated with the context */ +int EVP_MD_CTX_reset(EVP_MD_CTX *ctx) +{ + if (ctx == NULL) + return 1; + + /* + * Don't assume ctx->md_data was cleaned in EVP_Digest_Final, because + * sometimes only copies of the context are ever finalised. + */ + if (ctx->digest && ctx->digest->cleanup + && !EVP_MD_CTX_test_flags(ctx, EVP_MD_CTX_FLAG_CLEANED)) + ctx->digest->cleanup(ctx); + if (ctx->digest && ctx->digest->ctx_size && ctx->md_data + && !EVP_MD_CTX_test_flags(ctx, EVP_MD_CTX_FLAG_REUSE)) { + OPENSSL_clear_free(ctx->md_data, ctx->digest->ctx_size); + } + EVP_PKEY_CTX_free(ctx->pctx); +#ifndef OPENSSL_NO_ENGINE + ENGINE_finish(ctx->engine); +#endif + OPENSSL_cleanse(ctx, sizeof(*ctx)); + + return 1; +} + +void EVP_MD_CTX_free(EVP_MD_CTX *ctx) +{ + EVP_MD_CTX_reset(ctx); + OPENSSL_free(ctx); +} + +HMAC_CTX *HMAC_CTX_new(void) +{ + HMAC_CTX *ctx = OPENSSL_zalloc(sizeof(HMAC_CTX)); + + if (ctx != NULL) { + if (!HMAC_CTX_reset(ctx)) { + HMAC_CTX_free(ctx); + return NULL; + } + } + return ctx; +} + +static void hmac_ctx_cleanup(HMAC_CTX *ctx) +{ + EVP_MD_CTX_reset(&ctx->i_ctx); + EVP_MD_CTX_reset(&ctx->o_ctx); + EVP_MD_CTX_reset(&ctx->md_ctx); + ctx->md = NULL; + ctx->key_length = 0; + OPENSSL_cleanse(ctx->key, sizeof(ctx->key)); +} + +void HMAC_CTX_free(HMAC_CTX *ctx) +{ + if (ctx != NULL) { + hmac_ctx_cleanup(ctx); +#if OPENSSL_VERSION_NUMBER > 0x10100000L + EVP_MD_CTX_free(&ctx->i_ctx); + EVP_MD_CTX_free(&ctx->o_ctx); + EVP_MD_CTX_free(&ctx->md_ctx); +#endif + OPENSSL_free(ctx); + } +} + +int HMAC_CTX_reset(HMAC_CTX *ctx) +{ + HMAC_CTX_init(ctx); + return 1; +} + +#if 0 // by apex.liu +#ifndef HAVE_OPENSSL_EVP_CIPHER_CTX_NEW +EVP_CIPHER_CTX *EVP_CIPHER_CTX_new(void) +{ + return OPENSSL_zalloc(sizeof(EVP_CIPHER_CTX)); +} + +void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx) +{ + /* EVP_CIPHER_CTX_reset(ctx); alias */ + EVP_CIPHER_CTX_init(ctx); + OPENSSL_free(ctx); +} +#endif +#endif // endif by apex.liu + +void DH_get0_pqg(const DH *dh, + const BIGNUM **p, const BIGNUM **q, const BIGNUM **g) +{ + if (p) { + *p = dh->p; + } + if (q) { + *q = NULL; + } + if (g) { + *g = dh->g; + } +} + +int DH_set0_pqg(DH *dh, BIGNUM *p, BIGNUM *q, BIGNUM *g) +{ + if (p) { + if (dh->p) { + BN_free(dh->p); + } + dh->p = p; + } + if (g) { + if (dh->g) { + BN_free(dh->g); + } + dh->g = g; + } + return 1; +} + +void DH_get0_key(const DH *dh, + const BIGNUM **pub_key, const BIGNUM **priv_key) +{ + if (pub_key) { + *pub_key = dh->pub_key; + } + if (priv_key) { + *priv_key = dh->priv_key; + } +} + +int DH_set0_key(DH *dh, BIGNUM *pub_key, BIGNUM *priv_key) +{ + if (pub_key) { + if (dh->pub_key) { + BN_free(dh->pub_key); + } + dh->pub_key = pub_key; + } + if (priv_key) { + if (dh->priv_key) { + BN_free(dh->priv_key); + } + dh->priv_key = priv_key; + } + return 1; +} diff --git a/external/fix-external/libssh/libssh-0.9.2/src/session.c b/external/fix-external/libssh/libssh-0.9.2/src/session.c new file mode 100644 index 0000000..7ec5522 --- /dev/null +++ b/external/fix-external/libssh/libssh-0.9.2/src/session.c @@ -0,0 +1,1210 @@ +/* + * session.c - non-networking functions + * + * This file is part of the SSH Library + * + * Copyright (c) 2005-2013 by Aris Adamantiadis + * + * The SSH Library is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation; either version 2.1 of the License, or (at your + * option) any later version. + * + * The SSH Library is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with the SSH Library; see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + */ + +#include "config.h" + +#include +#include + +#include "libssh/priv.h" +#include "libssh/libssh.h" +#include "libssh/crypto.h" +#include "libssh/server.h" +#include "libssh/socket.h" +#include "libssh/ssh2.h" +#include "libssh/agent.h" +#include "libssh/packet.h" +#include "libssh/session.h" +#include "libssh/misc.h" +#include "libssh/buffer.h" +#include "libssh/poll.h" +#include "libssh/pki.h" + +#define FIRST_CHANNEL 42 // why not ? it helps to find bugs. + +/** + * @defgroup libssh_session The SSH session functions. + * @ingroup libssh + * + * Functions that manage a session. + * + * @{ + */ + +/** + * @brief Create a new ssh session. + * + * @returns A new ssh_session pointer, NULL on error. + */ +ssh_session ssh_new(void) { + ssh_session session; + char *id = NULL; + int rc; + + session = calloc(1, sizeof (struct ssh_session_struct)); + if (session == NULL) { + return NULL; + } + + session->next_crypto = crypto_new(); + if (session->next_crypto == NULL) { + goto err; + } + + session->socket = ssh_socket_new(session); + if (session->socket == NULL) { + goto err; + } + + session->out_buffer = ssh_buffer_new(); + if (session->out_buffer == NULL) { + goto err; + } + + session->in_buffer=ssh_buffer_new(); + if (session->in_buffer == NULL) { + goto err; + } + + session->out_queue = ssh_list_new(); + if (session->out_queue == NULL) { + goto err; + } + + session->alive = 0; + session->auth.supported_methods = 0; + ssh_set_blocking(session, 1); + session->maxchannel = FIRST_CHANNEL; + +#ifndef _WIN32 + session->agent = ssh_agent_new(session); + if (session->agent == NULL) { + goto err; + } +#endif /* _WIN32 */ + + /* OPTIONS */ + session->opts.StrictHostKeyChecking = 1; + session->opts.port = 0; + session->opts.fd = -1; + session->opts.compressionlevel=7; + session->opts.nodelay = 0; + session->opts.flags = SSH_OPT_FLAG_PASSWORD_AUTH | SSH_OPT_FLAG_PUBKEY_AUTH | + SSH_OPT_FLAG_KBDINT_AUTH | SSH_OPT_FLAG_GSSAPI_AUTH; + session->opts.identity = ssh_list_new(); + if (session->opts.identity == NULL) { + goto err; + } + + id = strdup("%d/id_ed25519"); + if (id == NULL) { + goto err; + } + rc = ssh_list_append(session->opts.identity, id); + if (rc == SSH_ERROR) { + goto err; + } + +#ifdef HAVE_ECC + id = strdup("%d/id_ecdsa"); + if (id == NULL) { + goto err; + } + rc = ssh_list_append(session->opts.identity, id); + if (rc == SSH_ERROR) { + goto err; + } +#endif + + id = strdup("%d/id_rsa"); + if (id == NULL) { + goto err; + } + rc = ssh_list_append(session->opts.identity, id); + if (rc == SSH_ERROR) { + goto err; + } + +#ifdef HAVE_DSA + id = strdup("%d/id_dsa"); + if (id == NULL) { + goto err; + } + rc = ssh_list_append(session->opts.identity, id); + if (rc == SSH_ERROR) { + goto err; + } +#endif + + return session; + +err: + free(id); + ssh_free(session); + return NULL; +} + +/** + * @brief Deallocate a SSH session handle. + * + * @param[in] session The SSH session to free. + * + * @see ssh_disconnect() + * @see ssh_new() + */ +void ssh_free(ssh_session session) +{ + int i; + struct ssh_iterator *it = NULL; + struct ssh_buffer_struct *b = NULL; + + if (session == NULL) { + return; + } + + /* + * Delete all channels + * + * This needs the first thing we clean up cause if there is still an open + * channel we call ssh_channel_close() first. So we need a working socket + * and poll context for it. + */ + for (it = ssh_list_get_iterator(session->channels); + it != NULL; + it = ssh_list_get_iterator(session->channels)) { + ssh_channel_do_free(ssh_iterator_value(ssh_channel,it)); + ssh_list_remove(session->channels, it); + } + ssh_list_free(session->channels); + session->channels = NULL; + +#ifdef WITH_PCAP + if (session->pcap_ctx) { + ssh_pcap_context_free(session->pcap_ctx); + session->pcap_ctx = NULL; + } +#endif + + ssh_socket_free(session->socket); + session->socket = NULL; + + if (session->default_poll_ctx) { + ssh_poll_ctx_free(session->default_poll_ctx); + } + + ssh_buffer_free(session->in_buffer); + ssh_buffer_free(session->out_buffer); + session->in_buffer = session->out_buffer = NULL; + + if (session->in_hashbuf != NULL) { + ssh_buffer_free(session->in_hashbuf); + } + if (session->out_hashbuf != NULL) { + ssh_buffer_free(session->out_hashbuf); + } + + crypto_free(session->current_crypto); + crypto_free(session->next_crypto); + +#ifndef _WIN32 + ssh_agent_free(session->agent); +#endif /* _WIN32 */ + + ssh_key_free(session->srv.dsa_key); + session->srv.dsa_key = NULL; + ssh_key_free(session->srv.rsa_key); + session->srv.rsa_key = NULL; + ssh_key_free(session->srv.ecdsa_key); + session->srv.ecdsa_key = NULL; + ssh_key_free(session->srv.ed25519_key); + session->srv.ed25519_key = NULL; + + if (session->ssh_message_list) { + ssh_message msg; + + for (msg = ssh_list_pop_head(ssh_message, session->ssh_message_list); + msg != NULL; + msg = ssh_list_pop_head(ssh_message, session->ssh_message_list)) { + ssh_message_free(msg); + } + ssh_list_free(session->ssh_message_list); + } + + if (session->kbdint != NULL) { + ssh_kbdint_free(session->kbdint); + } + + if (session->packet_callbacks) { + ssh_list_free(session->packet_callbacks); + } + + /* options */ + if (session->opts.identity) { + char *id; + + for (id = ssh_list_pop_head(char *, session->opts.identity); + id != NULL; + id = ssh_list_pop_head(char *, session->opts.identity)) { + SAFE_FREE(id); + } + ssh_list_free(session->opts.identity); + } + + while ((b = ssh_list_pop_head(struct ssh_buffer_struct *, + session->out_queue)) != NULL) { + ssh_buffer_free(b); + } + ssh_list_free(session->out_queue); + +#ifndef _WIN32 + ssh_agent_state_free (session->agent_state); +#endif + session->agent_state = NULL; + + SAFE_FREE(session->auth.auto_state); + SAFE_FREE(session->serverbanner); + SAFE_FREE(session->clientbanner); + SAFE_FREE(session->banner); + + SAFE_FREE(session->opts.bindaddr); + SAFE_FREE(session->opts.custombanner); + SAFE_FREE(session->opts.username); + SAFE_FREE(session->opts.host); + SAFE_FREE(session->opts.sshdir); + SAFE_FREE(session->opts.knownhosts); + SAFE_FREE(session->opts.global_knownhosts); + SAFE_FREE(session->opts.ProxyCommand); + SAFE_FREE(session->opts.gss_server_identity); + SAFE_FREE(session->opts.gss_client_identity); + SAFE_FREE(session->opts.pubkey_accepted_types); + + for (i = 0; i < 10; i++) { + if (session->opts.wanted_methods[i]) { + SAFE_FREE(session->opts.wanted_methods[i]); + } + } + + /* burn connection, it could contain sensitive data */ + explicit_bzero(session, sizeof(struct ssh_session_struct)); + SAFE_FREE(session); +} + +/** + * @brief get the client banner + * + * @param[in] session The SSH session + * + * @return Returns the client banner string or NULL. + */ +const char* ssh_get_clientbanner(ssh_session session) { + if (session == NULL) { + return NULL; + } + + return session->clientbanner; +} + +/** + * @brief get the server banner + * + * @param[in] session The SSH session + * + * @return Returns the server banner string or NULL. + */ +const char* ssh_get_serverbanner(ssh_session session) { + if(!session) { + return NULL; + } + return session->serverbanner; +} + +/** + * @brief get the name of the current key exchange algorithm. + * + * @param[in] session The SSH session + * + * @return Returns the key exchange algorithm string or NULL. + */ +const char* ssh_get_kex_algo(ssh_session session) { + if ((session == NULL) || + (session->current_crypto == NULL)) { + return NULL; + } + + switch (session->current_crypto->kex_type) { + case SSH_KEX_DH_GROUP1_SHA1: + return "diffie-hellman-group1-sha1"; + case SSH_KEX_DH_GROUP14_SHA1: + return "diffie-hellman-group14-sha1"; + case SSH_KEX_DH_GROUP16_SHA512: + return "diffie-hellman-group16-sha512"; + case SSH_KEX_DH_GROUP18_SHA512: + return "diffie-hellman-group18-sha512"; + case SSH_KEX_ECDH_SHA2_NISTP256: + return "ecdh-sha2-nistp256"; + case SSH_KEX_ECDH_SHA2_NISTP384: + return "ecdh-sha2-nistp384"; + case SSH_KEX_ECDH_SHA2_NISTP521: + return "ecdh-sha2-nistp521"; + case SSH_KEX_CURVE25519_SHA256: + return "curve25519-sha256"; + case SSH_KEX_CURVE25519_SHA256_LIBSSH_ORG: + return "curve25519-sha256@libssh.org"; + default: + break; + } + + return NULL; +} + +/** + * @brief get the name of the input cipher for the given session. + * + * @param[in] session The SSH session. + * + * @return Returns cipher name or NULL. + */ +const char* ssh_get_cipher_in(ssh_session session) { + if ((session != NULL) && + (session->current_crypto != NULL) && + (session->current_crypto->in_cipher != NULL)) { + return session->current_crypto->in_cipher->name; + } + return NULL; +} + +/** + * @brief get the name of the output cipher for the given session. + * + * @param[in] session The SSH session. + * + * @return Returns cipher name or NULL. + */ +const char* ssh_get_cipher_out(ssh_session session) { + if ((session != NULL) && + (session->current_crypto != NULL) && + (session->current_crypto->out_cipher != NULL)) { + return session->current_crypto->out_cipher->name; + } + return NULL; +} + +/** + * @brief get the name of the input HMAC algorithm for the given session. + * + * @param[in] session The SSH session. + * + * @return Returns HMAC algorithm name or NULL if unknown. + */ +const char* ssh_get_hmac_in(ssh_session session) { + if ((session != NULL) && + (session->current_crypto != NULL)) { + return ssh_hmac_type_to_string(session->current_crypto->in_hmac, session->current_crypto->in_hmac_etm); + } + return NULL; +} + +/** + * @brief get the name of the output HMAC algorithm for the given session. + * + * @param[in] session The SSH session. + * + * @return Returns HMAC algorithm name or NULL if unknown. + */ +const char* ssh_get_hmac_out(ssh_session session) { + if ((session != NULL) && + (session->current_crypto != NULL)) { + return ssh_hmac_type_to_string(session->current_crypto->out_hmac, session->current_crypto->out_hmac_etm); + } + return NULL; +} + +/** + * @brief Disconnect impolitely from a remote host by closing the socket. + * + * Suitable if you forked and want to destroy this session. + * + * @param[in] session The SSH session to disconnect. + */ +void ssh_silent_disconnect(ssh_session session) { + if (session == NULL) { + return; + } + + ssh_socket_close(session->socket); + session->alive = 0; + ssh_disconnect(session); +} + +/** + * @brief Set the session in blocking/nonblocking mode. + * + * @param[in] session The ssh session to change. + * + * @param[in] blocking Zero for nonblocking mode. + */ +void ssh_set_blocking(ssh_session session, int blocking) +{ + if (session == NULL) { + return; + } + session->flags &= ~SSH_SESSION_FLAG_BLOCKING; + session->flags |= blocking ? SSH_SESSION_FLAG_BLOCKING : 0; +} + +/** + * @brief Return the blocking mode of libssh + * @param[in] session The SSH session + * @returns 0 if the session is nonblocking, + * @returns 1 if the functions may block. + */ +int ssh_is_blocking(ssh_session session) +{ + return (session->flags & SSH_SESSION_FLAG_BLOCKING) ? 1 : 0; +} + +/* Waits until the output socket is empty */ +static int ssh_flush_termination(void *c){ + ssh_session session = c; + if (ssh_socket_buffered_write_bytes(session->socket) == 0 || + session->session_state == SSH_SESSION_STATE_ERROR) + return 1; + else + return 0; +} + +/** + * @brief Blocking flush of the outgoing buffer + * @param[in] session The SSH session + * @param[in] timeout Set an upper limit on the time for which this function + * will block, in milliseconds. Specifying -1 + * means an infinite timeout. This parameter is passed to + * the poll() function. + * @returns SSH_OK on success, SSH_AGAIN if timeout occurred, + * SSH_ERROR otherwise. + */ + +int ssh_blocking_flush(ssh_session session, int timeout){ + int rc; + if (session == NULL) { + return SSH_ERROR; + } + + rc = ssh_handle_packets_termination(session, timeout, + ssh_flush_termination, session); + if (rc == SSH_ERROR) { + return rc; + } + if (!ssh_flush_termination(session)) { + rc = SSH_AGAIN; + } + + return rc; +} + +/** + * @brief Check if we are connected. + * + * @param[in] session The session to check if it is connected. + * + * @return 1 if we are connected, 0 if not. + */ +int ssh_is_connected(ssh_session session) { + if (session == NULL) { + return 0; + } + + return session->alive; +} + +/** + * @brief Get the fd of a connection. + * + * In case you'd need the file descriptor of the connection to the server/client. + * + * @param[in] session The ssh session to use. + * + * @return The file descriptor of the connection, or -1 if it is + * not connected + */ +socket_t ssh_get_fd(ssh_session session) { + if (session == NULL) { + return -1; + } + + return ssh_socket_get_fd(session->socket); +} + +/** + * @brief Tell the session it has data to read on the file descriptor without + * blocking. + * + * @param[in] session The ssh session to use. + */ +void ssh_set_fd_toread(ssh_session session) { + if (session == NULL) { + return; + } + + ssh_socket_set_read_wontblock(session->socket); +} + +/** + * @brief Tell the session it may write to the file descriptor without blocking. + * + * @param[in] session The ssh session to use. + */ +void ssh_set_fd_towrite(ssh_session session) { + if (session == NULL) { + return; + } + + ssh_socket_set_write_wontblock(session->socket); +} + +/** + * @brief Tell the session it has an exception to catch on the file descriptor. + * + * \param[in] session The ssh session to use. + */ +void ssh_set_fd_except(ssh_session session) { + if (session == NULL) { + return; + } + + ssh_socket_set_except(session->socket); +} + +/** + * @internal + * + * @brief Poll the current session for an event and call the appropriate + * callbacks. This function will not loop until the timeout is expired. + * + * This will block until one event happens. + * + * @param[in] session The session handle to use. + * + * @param[in] timeout Set an upper limit on the time for which this function + * will block, in milliseconds. Specifying SSH_TIMEOUT_INFINITE + * (-1) means an infinite timeout. + * Specifying SSH_TIMEOUT_USER means to use the timeout + * specified in options. 0 means poll will return immediately. + * This parameter is passed to the poll() function. + * + * @return SSH_OK on success, SSH_ERROR otherwise. + */ +int ssh_handle_packets(ssh_session session, int timeout) { + ssh_poll_handle spoll; + ssh_poll_ctx ctx; + int tm = timeout; + int rc; + + if (session == NULL || session->socket == NULL) { + return SSH_ERROR; + } + + spoll = ssh_socket_get_poll_handle(session->socket); + // apex.liu + // ssh_poll_add_events(spoll, POLLIN); + ssh_poll_add_events(spoll, POLLIN|POLLOUT); + ctx = ssh_poll_get_ctx(spoll); + + if (!ctx) { + ctx = ssh_poll_get_default_ctx(session); + ssh_poll_ctx_add(ctx, spoll); + } + + if (timeout == SSH_TIMEOUT_USER) { + if (ssh_is_blocking(session)) + tm = ssh_make_milliseconds(session->opts.timeout, + session->opts.timeout_usec); + else + tm = 0; + } + rc = ssh_poll_ctx_dopoll(ctx, tm); + if (rc == SSH_ERROR) { + session->session_state = SSH_SESSION_STATE_ERROR; + } + + return rc; +} + +/** + * @internal + * + * @brief Poll the current session for an event and call the appropriate + * callbacks. + * + * This will block until termination function returns true, or timeout expired. + * + * @param[in] session The session handle to use. + * + * @param[in] timeout Set an upper limit on the time for which this function + * will block, in milliseconds. Specifying + * SSH_TIMEOUT_INFINITE (-1) means an infinite timeout. + * Specifying SSH_TIMEOUT_USER means to use the timeout + * specified in options. 0 means poll will return + * immediately. + * SSH_TIMEOUT_DEFAULT uses the session timeout if set or + * uses blocking parameters of the session. + * This parameter is passed to the poll() function. + * + * @param[in] fct Termination function to be used to determine if it is + * possible to stop polling. + * @param[in] user User parameter to be passed to fct termination function. + * @return SSH_OK on success, SSH_ERROR otherwise. + */ +int ssh_handle_packets_termination(ssh_session session, + long timeout, + ssh_termination_function fct, + void *user) +{ + struct ssh_timestamp ts; + long timeout_ms = SSH_TIMEOUT_INFINITE; + long tm; + int ret = SSH_OK; + + // apex.liu + timeout = SSH_TIMEOUT_USER; + + /* If a timeout has been provided, use it */ + if (timeout >= 0) { + timeout_ms = timeout; + } else { + if (ssh_is_blocking(session)) { + if (timeout == SSH_TIMEOUT_USER || timeout == SSH_TIMEOUT_DEFAULT) { + if (session->opts.timeout > 0 || + session->opts.timeout_usec > 0) { + timeout_ms = + ssh_make_milliseconds(session->opts.timeout, + session->opts.timeout_usec); + } + } + } else { + timeout_ms = SSH_TIMEOUT_NONBLOCKING; + } + } + + /* avoid unnecessary syscall for the SSH_TIMEOUT_NONBLOCKING case */ + if (timeout_ms != SSH_TIMEOUT_NONBLOCKING) { + ssh_timestamp_init(&ts); + } + + tm = timeout_ms; + while(!fct(user)) { + ret = ssh_handle_packets(session, tm); + if (ret == SSH_ERROR) { + break; + } + if (ssh_timeout_elapsed(&ts, timeout_ms)) { + ret = fct(user) ? SSH_OK : SSH_AGAIN; + break; + } + + tm = ssh_timeout_update(&ts, timeout_ms); + } + + return ret; +} + +/** + * @brief Get session status + * + * @param session The ssh session to use. + * + * @returns A bitmask including SSH_CLOSED, SSH_READ_PENDING, SSH_WRITE_PENDING + * or SSH_CLOSED_ERROR which respectively means the session is closed, + * has data to read on the connection socket and session was closed + * due to an error. + */ +int ssh_get_status(ssh_session session) { + int socketstate; + int r = 0; + + if (session == NULL) { + return 0; + } + + socketstate = ssh_socket_get_status(session->socket); + + if (session->session_state == SSH_SESSION_STATE_DISCONNECTED) { + r |= SSH_CLOSED; + } + if (socketstate & SSH_READ_PENDING) { + r |= SSH_READ_PENDING; + } + if (socketstate & SSH_WRITE_PENDING) { + r |= SSH_WRITE_PENDING; + } + if ((session->session_state == SSH_SESSION_STATE_DISCONNECTED && + (socketstate & SSH_CLOSED_ERROR)) || + session->session_state == SSH_SESSION_STATE_ERROR) { + r |= SSH_CLOSED_ERROR; + } + + return r; +} + +/** + * @brief Get poll flags for an external mainloop + * + * @param session The ssh session to use. + * + * @returns A bitmask including SSH_READ_PENDING or SSH_WRITE_PENDING. + * For SSH_READ_PENDING, your invocation of poll() should include + * POLLIN. For SSH_WRITE_PENDING, your invocation of poll() should + * include POLLOUT. + */ +int ssh_get_poll_flags(ssh_session session) +{ + if (session == NULL) { + return 0; + } + + return ssh_socket_get_poll_flags (session->socket); +} + +/** + * @brief Get the disconnect message from the server. + * + * @param[in] session The ssh session to use. + * + * @return The message sent by the server along with the + * disconnect, or NULL in which case the reason of the + * disconnect may be found with ssh_get_error. + * + * @see ssh_get_error() + */ +const char *ssh_get_disconnect_message(ssh_session session) { + if (session == NULL) { + return NULL; + } + + if (session->session_state != SSH_SESSION_STATE_DISCONNECTED) { + ssh_set_error(session, SSH_REQUEST_DENIED, + "Connection not closed yet"); + } else if(!session->discon_msg) { + ssh_set_error(session, SSH_FATAL, + "Connection correctly closed but no disconnect message"); + } else { + return session->discon_msg; + } + + return NULL; +} + +/** + * @brief Get the protocol version of the session. + * + * @param session The ssh session to use. + * + * @return The SSH version as integer, < 0 on error. + */ +int ssh_get_version(ssh_session session) { + if (session == NULL) { + return -1; + } + + return 2; +} + +/** + * @internal + * @brief Callback to be called when the socket received an exception code. + * @param user is a pointer to session + */ +void ssh_socket_exception_callback(int code, int errno_code, void *user){ + ssh_session session=(ssh_session)user; + + SSH_LOG(SSH_LOG_RARE,"Socket exception callback: %d (%d)",code, errno_code); + session->session_state = SSH_SESSION_STATE_ERROR; + if (errno_code == 0 && code == SSH_SOCKET_EXCEPTION_EOF) { + ssh_set_error(session, SSH_FATAL, "Socket error: disconnected"); + } else { + ssh_set_error(session, SSH_FATAL, "Socket error: %s", strerror(errno_code)); + } + + session->ssh_connection_callback(session); +} + +/** + * @brief Send a message that should be ignored + * + * @param[in] session The SSH session + * @param[in] data Data to be sent + * + * @return SSH_OK on success, SSH_ERROR otherwise. + */ +int ssh_send_ignore (ssh_session session, const char *data) { + const int type = SSH2_MSG_IGNORE; + int rc; + + if (ssh_socket_is_open(session->socket)) { + rc = ssh_buffer_pack(session->out_buffer, + "bs", + type, + data); + if (rc != SSH_OK){ + ssh_set_error_oom(session); + goto error; + } + ssh_packet_send(session); + ssh_handle_packets(session, 0); + } + + return SSH_OK; + +error: + ssh_buffer_reinit(session->out_buffer); + return SSH_ERROR; +} + +/** + * @brief Send a debug message + * + * @param[in] session The SSH session + * @param[in] message Data to be sent + * @param[in] always_display Message SHOULD be displayed by the server. It + * SHOULD NOT be displayed unless debugging + * information has been explicitly requested. + * + * @return SSH_OK on success, SSH_ERROR otherwise. + */ +int ssh_send_debug (ssh_session session, const char *message, int always_display) { + int rc; + + if (ssh_socket_is_open(session->socket)) { + rc = ssh_buffer_pack(session->out_buffer, + "bbsd", + SSH2_MSG_DEBUG, + always_display != 0 ? 1 : 0, + message, + 0); /* empty language tag */ + if (rc != SSH_OK) { + ssh_set_error_oom(session); + goto error; + } + ssh_packet_send(session); + ssh_handle_packets(session, 0); + } + + return SSH_OK; + +error: + ssh_buffer_reinit(session->out_buffer); + return SSH_ERROR; +} + + /** + * @brief Set the session data counters. + * + * This functions sets the counter structures to be used to calculate data + * which comes in and goes out through the session at different levels. + * + * @code + * struct ssh_counter_struct scounter = { + * .in_bytes = 0, + * .out_bytes = 0, + * .in_packets = 0, + * .out_packets = 0 + * }; + * + * struct ssh_counter_struct rcounter = { + * .in_bytes = 0, + * .out_bytes = 0, + * .in_packets = 0, + * .out_packets = 0 + * }; + * + * ssh_set_counters(session, &scounter, &rcounter); + * @endcode + * + * @param[in] session The SSH session. + * + * @param[in] scounter Counter for byte data handled by the session sockets. + * + * @param[in] rcounter Counter for byte and packet data handled by the session, + * prior compression and SSH overhead. + */ +void ssh_set_counters(ssh_session session, ssh_counter scounter, + ssh_counter rcounter) { + if (session != NULL) { + session->socket_counter = scounter; + session->raw_counter = rcounter; + } +} + +/** + * @deprecated Use ssh_get_publickey_hash() + */ +int ssh_get_pubkey_hash(ssh_session session, unsigned char **hash) +{ + ssh_key pubkey = NULL; + ssh_string pubkey_blob = NULL; + MD5CTX ctx; + unsigned char *h; + int rc; + + if (session == NULL || hash == NULL) { + return SSH_ERROR; + } + + /* In FIPS mode, we cannot use MD5 */ + if (ssh_fips_mode()) { + ssh_set_error(session, + SSH_FATAL, + "In FIPS mode MD5 is not allowed." + "Try ssh_get_publickey_hash() with" + "SSH_PUBLICKEY_HASH_SHA256"); + return SSH_ERROR; + } + + *hash = NULL; + if (session->current_crypto == NULL || + session->current_crypto->server_pubkey == NULL) { + ssh_set_error(session,SSH_FATAL,"No current cryptographic context"); + return SSH_ERROR; + } + + h = calloc(MD5_DIGEST_LEN, sizeof(unsigned char)); + if (h == NULL) { + return SSH_ERROR; + } + + ctx = md5_init(); + if (ctx == NULL) { + SAFE_FREE(h); + return SSH_ERROR; + } + + rc = ssh_get_server_publickey(session, &pubkey); + if (rc != SSH_OK) { + md5_final(h, ctx); + SAFE_FREE(h); + return SSH_ERROR; + } + + rc = ssh_pki_export_pubkey_blob(pubkey, &pubkey_blob); + ssh_key_free(pubkey); + if (rc != SSH_OK) { + md5_final(h, ctx); + SAFE_FREE(h); + return SSH_ERROR; + } + + md5_update(ctx, ssh_string_data(pubkey_blob), ssh_string_len(pubkey_blob)); + ssh_string_free(pubkey_blob); + md5_final(h, ctx); + + *hash = h; + + return MD5_DIGEST_LEN; +} + +/** + * @brief Deallocate the hash obtained by ssh_get_pubkey_hash. + * + * This is required under Microsoft platform as this library might use a + * different C library than your software, hence a different heap. + * + * @param[in] hash The buffer to deallocate. + * + * @see ssh_get_pubkey_hash() + */ +void ssh_clean_pubkey_hash(unsigned char **hash) { + SAFE_FREE(*hash); +} + +/** + * @brief Get the server public key from a session. + * + * @param[in] session The session to get the key from. + * + * @param[out] key A pointer to store the allocated key. You need to free + * the key. + * + * @return SSH_OK on success, SSH_ERROR on errror. + * + * @see ssh_key_free() + */ +int ssh_get_server_publickey(ssh_session session, ssh_key *key) +{ + ssh_key pubkey = NULL; + + if (session == NULL || + session->current_crypto == NULL || + session->current_crypto->server_pubkey == NULL) { + return SSH_ERROR; + } + + pubkey = ssh_key_dup(session->current_crypto->server_pubkey); + if (pubkey == NULL) { + return SSH_ERROR; + } + + *key = pubkey; + return SSH_OK; +} + +/** + * @deprecated Use ssh_get_server_publickey() + */ +int ssh_get_publickey(ssh_session session, ssh_key *key) +{ + return ssh_get_server_publickey(session, key); +} + +/** + * @brief Allocates a buffer with the hash of the public key. + * + * This function allows you to get a hash of the public key. You can then + * print this hash in a human-readable form to the user so that he is able to + * verify it. Use ssh_get_hexa() or ssh_print_hash() to display it. + * + * @param[in] key The public key to create the hash for. + * + * @param[in] type The type of the hash you want. + * + * @param[in] hash A pointer to store the allocated buffer. It can be + * freed using ssh_clean_pubkey_hash(). + * + * @param[in] hlen The length of the hash. + * + * @return 0 on success, -1 if an error occured. + * + * @warning It is very important that you verify at some moment that the hash + * matches a known server. If you don't do it, cryptography wont help + * you at making things secure. + * OpenSSH uses SHA256 to print public key digests. + * + * @see ssh_session_update_known_hosts() + * @see ssh_get_hexa() + * @see ssh_print_hash() + * @see ssh_clean_pubkey_hash() + */ +int ssh_get_publickey_hash(const ssh_key key, + enum ssh_publickey_hash_type type, + unsigned char **hash, + size_t *hlen) +{ + ssh_string blob; + unsigned char *h; + int rc; + + rc = ssh_pki_export_pubkey_blob(key, &blob); + if (rc < 0) { + return rc; + } + + switch (type) { + case SSH_PUBLICKEY_HASH_SHA1: + { + SHACTX ctx; + + h = calloc(1, SHA_DIGEST_LEN); + if (h == NULL) { + rc = -1; + goto out; + } + + ctx = sha1_init(); + if (ctx == NULL) { + free(h); + rc = -1; + goto out; + } + + sha1_update(ctx, ssh_string_data(blob), ssh_string_len(blob)); + sha1_final(h, ctx); + + *hlen = SHA_DIGEST_LEN; + } + break; + case SSH_PUBLICKEY_HASH_SHA256: + { + SHA256CTX ctx; + + h = calloc(1, SHA256_DIGEST_LEN); + if (h == NULL) { + rc = -1; + goto out; + } + + ctx = sha256_init(); + if (ctx == NULL) { + free(h); + rc = -1; + goto out; + } + + sha256_update(ctx, ssh_string_data(blob), ssh_string_len(blob)); + sha256_final(h, ctx); + + *hlen = SHA256_DIGEST_LEN; + } + break; + case SSH_PUBLICKEY_HASH_MD5: + { + MD5CTX ctx; + + /* In FIPS mode, we cannot use MD5 */ + if (ssh_fips_mode()) { + SSH_LOG(SSH_LOG_WARN, "In FIPS mode MD5 is not allowed." + "Try using SSH_PUBLICKEY_HASH_SHA256"); + rc = SSH_ERROR; + goto out; + } + + h = calloc(1, MD5_DIGEST_LEN); + if (h == NULL) { + rc = -1; + goto out; + } + + ctx = md5_init(); + if (ctx == NULL) { + free(h); + rc = -1; + goto out; + } + + md5_update(ctx, ssh_string_data(blob), ssh_string_len(blob)); + md5_final(h, ctx); + + *hlen = MD5_DIGEST_LEN; + } + break; + default: + rc = -1; + goto out; + } + + *hash = h; + rc = 0; +out: + ssh_string_free(blob); + return rc; +} + +/** @} */ diff --git a/external/version.ini b/external/version.ini index 78dacf0..481d0a0 100644 --- a/external/version.ini +++ b/external/version.ini @@ -1,8 +1,16 @@ [external_ver] -openssl = 1.0.2s,1000210f -libuv = 1.28.0 -mbedtls = 2.12.0 -libssh = 0.9.0 -jsoncpp = 0.10.6 -mongoose = 6.12 - +; https://github.com/openssl/openssl/releases +; http://slproweb.com/download/Win32OpenSSL-1_1_1d.exe +openssl = 1.1.1d,1010104f +; https://github.com/libuv/libuv/releases +libuv = 1.33.1 +; https://github.com/ARMmbed/mbedtls/releases +mbedtls = 2.16.3 +; https://github.com/open-source-parsers/jsoncpp/releases +jsoncpp = 1.9.2 +; https://github.com/cesanta/mongoose/releases +mongoose = 6.16 +; https://www.zlib.net/zlib1211.zip +zlib = 1.2.11,1211 +; https://git.libssh.org/projects/libssh.git/ +libssh = 0.9.2 diff --git a/server/tp_core/common/base_record.h b/server/tp_core/common/base_record.h index f4ac222..4e614d1 100644 --- a/server/tp_core/common/base_record.h +++ b/server/tp_core/common/base_record.h @@ -39,7 +39,6 @@ typedef struct TS_RECORD_HEADER_INFO { // ex_u32 packages; // 总包数 ex_u32 time_ms; // 总耗时(毫秒) ex_u32 dat_file_count; // 数据文件数量 - ex_u8 _reserve[64-4-2-2-4-4]; }TS_RECORD_HEADER_INFO; #define ts_record_header_info_size sizeof(TS_RECORD_HEADER_INFO) @@ -62,13 +61,14 @@ typedef struct TS_RECORD_HEADER_BASIC { // // RDP专有 - v3.5.0废弃并移除 // ex_u8 rdp_security; // 0 = RDP, 1 = TLS - ex_u8 _reserve[512 - ts_record_header_info_size - 2 - 2 - 8 - 2 - 2 - 64 - 64 - 40 - 40 - 2 - 40]; }TS_RECORD_HEADER_BASIC; #define ts_record_header_basic_size sizeof(TS_RECORD_HEADER_BASIC) typedef struct TS_RECORD_HEADER { TS_RECORD_HEADER_INFO info; + ex_u8 _reserve1[64 - ts_record_header_info_size]; TS_RECORD_HEADER_BASIC basic; + ex_u8 _reserve2[512 - 64 - ts_record_header_basic_size]; }TS_RECORD_HEADER; // header部分(header-info + header-basic) = 512B @@ -77,10 +77,9 @@ typedef struct TS_RECORD_HEADER { // 一个数据包的头 typedef struct TS_RECORD_PKG { ex_u8 type; // 包的数据类型 - ex_u8 _reserve[3]; // 保留 ex_u32 size; // 这个包的总大小(不含包头) ex_u32 time_ms; // 这个包距起始时间的时间差(毫秒,意味着一个连接不能持续超过49天) - //ex_u32 index; // 这个包的序号(最后一个包的序号与TS_RECORD_HEADER_INFO::packages数量匹配) + ex_u8 _reserve[3]; // 保留 }TS_RECORD_PKG; #pragma pack(pop) diff --git a/server/tp_core/core/tp_core.rc b/server/tp_core/core/tp_core.rc index 85a2b2f..0be0f40 100644 Binary files a/server/tp_core/core/tp_core.rc and b/server/tp_core/core/tp_core.rc differ diff --git a/server/tp_core/core/ts_http_rpc.cpp b/server/tp_core/core/ts_http_rpc.cpp index e8bd3f6..4340e1d 100644 --- a/server/tp_core/core/ts_http_rpc.cpp +++ b/server/tp_core/core/ts_http_rpc.cpp @@ -1,553 +1,571 @@ -#include "ts_http_rpc.h" -#include "ts_ver.h" -#include "ts_env.h" -#include "ts_session.h" -#include "ts_crypto.h" -#include "ts_web_rpc.h" -#include "tp_tpp_mgr.h" - -extern TppManager g_tpp_mgr; - -#include - - -#define HEXTOI(x) (isdigit(x) ? x - '0' : x - 'W') -int ts_url_decode(const char *src, int src_len, char *dst, int dst_len, int is_form_url_encoded) -{ - int i, j, a, b; - - for (i = j = 0; i < src_len && j < dst_len - 1; i++, j++) - { - if (src[i] == '%') - { - if (i < src_len - 2 && isxdigit(*(const unsigned char *)(src + i + 1)) && - isxdigit(*(const unsigned char *)(src + i + 2))) { - a = tolower(*(const unsigned char *)(src + i + 1)); - b = tolower(*(const unsigned char *)(src + i + 2)); - dst[j] = (char)((HEXTOI(a) << 4) | HEXTOI(b)); - i += 2; - } - else - { - return -1; - } - } - else if (is_form_url_encoded && src[i] == '+') - { - dst[j] = ' '; - } - else - { - dst[j] = src[i]; - } - } - - dst[j] = '\0'; /* Null-terminate the destination */ - - return i >= src_len ? j : -1; -} - -TsHttpRpc::TsHttpRpc() : - ExThreadBase("http-rpc-thread") -{ - mg_mgr_init(&m_mg_mgr, NULL); -} - -TsHttpRpc::~TsHttpRpc() -{ - mg_mgr_free(&m_mg_mgr); -} - -void TsHttpRpc::_thread_loop(void) -{ - EXLOGI("[core] TeleportServer-RPC ready on %s:%d\n", m_host_ip.c_str(), m_host_port); - - while (!m_need_stop) - { - mg_mgr_poll(&m_mg_mgr, 500); - } - - EXLOGV("[core] rpc main loop end.\n"); -} - - -bool TsHttpRpc::init(void) -{ - struct mg_connection* nc = NULL; - - m_host_ip = g_env.rpc_bind_ip; - m_host_port = g_env.rpc_bind_port; - - char addr[128] = { 0 }; - // if (0 == strcmp(m_host_ip.c_str(), "127.0.0.1") || 0 == strcmp(m_host_ip.c_str(), "localhost")) - // ex_strformat(addr, 128, ":%d", m_host_port); - // else - // ex_strformat(addr, 128, "%s:%d", m_host_ip.c_str(), m_host_port); - if (0 == strcmp(m_host_ip.c_str(), "0.0.0.0")) - ex_strformat(addr, 128, ":%d", m_host_port); - else - ex_strformat(addr, 128, "%s:%d", m_host_ip.c_str(), m_host_port); - - nc = mg_bind(&m_mg_mgr, addr, _mg_event_handler); - if (NULL == nc) - { - EXLOGE("[core] rpc listener failed to bind at %s.\n", addr); - return false; - } - - nc->user_data = this; - - mg_set_protocol_http_websocket(nc); - - // ڴй¶ĵطÿԼ1KBڴ棩 - // DO NOT USE MULTITHREADING OF MG. - // cpq (one of the authors of MG) commented on 3 Feb: Multithreading support has been removed. - // https://github.com/cesanta/mongoose/commit/707b9ed2d6f177b3ad8787cb16a1bff90ddad992 - //mg_enable_multithreading(nc); - - return true; -} - -void TsHttpRpc::_mg_event_handler(struct mg_connection *nc, int ev, void *ev_data) -{ - struct http_message *hm = (struct http_message*)ev_data; - - TsHttpRpc* _this = (TsHttpRpc*)nc->user_data; - if (NULL == _this) - { - EXLOGE("[core] rpc invalid http request.\n"); - return; - } - - switch (ev) - { - case MG_EV_HTTP_REQUEST: - { - ex_astr ret_buf; - - ex_astr uri; - uri.assign(hm->uri.p, hm->uri.len); - - //EXLOGD("[core] rpc got request: %s\n", uri.c_str()); - - if (uri == "/rpc") - { - ex_astr method; - Json::Value json_param; - - ex_rv rv = _this->_parse_request(hm, method, json_param); - if (TPE_OK != rv) - { - EXLOGE("[core] rpc got invalid request.\n"); - _this->_create_json_ret(ret_buf, rv); - } - else - { - EXLOGD("[core] rpc got request method `%s`\n", method.c_str()); - _this->_process_request(method, json_param, ret_buf); - } - } - else - { - EXLOGE("[core] rpc got invalid request: not `rpc` uri.\n"); - _this->_create_json_ret(ret_buf, TPE_PARAM, "not a `rpc` request."); - } - - mg_printf(nc, "HTTP/1.0 200 OK\r\nAccess-Control-Allow-Origin: *\r\nContent-Length: %d\r\nContent-Type: application/json\r\n\r\n%s", (int)ret_buf.size() - 1, &ret_buf[0]); - nc->flags |= MG_F_SEND_AND_CLOSE; - } - break; - default: - break; - } -} - -ex_rv TsHttpRpc::_parse_request(struct http_message* req, ex_astr& func_cmd, Json::Value& json_param) -{ - if (NULL == req) - return TPE_PARAM; - - bool is_get = true; - if (req->method.len == 3 && 0 == memcmp(req->method.p, "GET", req->method.len)) - is_get = true; - else if (req->method.len == 4 && 0 == memcmp(req->method.p, "POST", req->method.len)) - is_get = false; - else - return TPE_HTTP_METHOD; - - ex_astr json_str; - bool need_decode = false; - if (is_get) { - json_str.assign(req->query_string.p, req->query_string.len); - need_decode = true; - } - else { - json_str.assign(req->body.p, req->body.len); - if (json_str.length() > 0 && json_str[0] == '%') - need_decode = true; - } - - if (need_decode) { - // url-decode - int len = json_str.length() * 2; - ex_chars sztmp; - sztmp.resize(len); - memset(&sztmp[0], 0, len); - if (-1 == ts_url_decode(json_str.c_str(), json_str.length(), &sztmp[0], len, 0)) - return TPE_HTTP_URL_ENCODE; - - json_str = &sztmp[0]; - } - - if (0 == json_str.length()) - return TPE_PARAM; - - Json::Reader jreader; - - if (!jreader.parse(json_str.c_str(), json_param)) - return TPE_JSON_FORMAT; - - if (json_param.isArray()) - return TPE_PARAM; - - if (json_param["method"].isNull() || !json_param["method"].isString()) - return TPE_PARAM; - - func_cmd = json_param["method"].asCString(); - json_param = json_param["param"]; - - return TPE_OK; -} - -void TsHttpRpc::_create_json_ret(ex_astr& buf, int errcode, const Json::Value& jr_data) -{ - // أ {"code":errcode, "data":{jr_data}} - - Json::FastWriter jr_writer; - Json::Value jr_root; - - jr_root["code"] = errcode; - jr_root["data"] = jr_data; - buf = jr_writer.write(jr_root); -} - -void TsHttpRpc::_create_json_ret(ex_astr& buf, int errcode) -{ - // أ {"code":errcode} - - Json::FastWriter jr_writer; - Json::Value jr_root; - - jr_root["code"] = errcode; - buf = jr_writer.write(jr_root); -} - -void TsHttpRpc::_create_json_ret(ex_astr& buf, int errcode, const char* message) -{ - // أ {"code":errcode, "message":message} - - Json::FastWriter jr_writer; - Json::Value jr_root; - - jr_root["code"] = errcode; - jr_root["message"] = message; - buf = jr_writer.write(jr_root); -} - -void TsHttpRpc::_process_request(const ex_astr& func_cmd, const Json::Value& json_param, ex_astr& buf) -{ - if (func_cmd == "request_session") { - _rpc_func_request_session(json_param, buf); - } - else if (func_cmd == "kill_sessions") { - _rpc_func_kill_sessions(json_param, buf); - } - else if (func_cmd == "get_config") { - _rpc_func_get_config(json_param, buf); - } - else if (func_cmd == "set_config") { - _rpc_func_set_config(json_param, buf); - } - else if (func_cmd == "enc") { - _rpc_func_enc(json_param, buf); - } - else if (func_cmd == "exit") { - _rpc_func_exit(json_param, buf); - } - else { - EXLOGE("[core] rpc got unknown command: %s\n", func_cmd.c_str()); - _create_json_ret(buf, TPE_UNKNOWN_CMD); - } -} - -extern bool g_exit_flag; // ҪTS˳ı־ֹ̣ͣ߳ -void TsHttpRpc::_rpc_func_exit(const Json::Value& json_param, ex_astr& buf) -{ - // һȫ˳־ - g_exit_flag = true; - _create_json_ret(buf, TPE_OK); -} - -void TsHttpRpc::_rpc_func_get_config(const Json::Value& json_param, ex_astr& buf) -{ - Json::Value jr_data; - - ex_astr _replay_name; - ex_wstr2astr(g_env.m_replay_path, _replay_name); - jr_data["replay-path"] = _replay_name; - - jr_data["web-server-rpc"] = g_env.web_server_rpc; - - ex_astr _version; - ex_wstr2astr(TP_SERVER_VER, _version); - jr_data["version"] = _version; - - ExIniFile& ini = g_env.get_ini(); - ex_ini_sections& secs = ini.GetAllSections(); - ex_ini_sections::iterator it = secs.begin(); - for (; it != secs.end(); ++it) - { - if (it->first.length() > 9 && 0 == wcsncmp(it->first.c_str(), L"protocol-", 9)) - { - ex_wstr name; - name.assign(it->first, 9, it->first.length() - 9); - ex_astr _name; - ex_wstr2astr(name, _name); - - bool enabled = false; - it->second->GetBool(L"enabled", enabled, false); - - ex_wstr ip; - if (!it->second->GetStr(L"bind-ip", ip)) - continue; - ex_astr _ip; - ex_wstr2astr(ip, _ip); - - int port; - it->second->GetInt(L"bind-port", port, 52189); - - jr_data[_name.c_str()]["enable"] = enabled; - jr_data[_name.c_str()]["ip"] = _ip; - jr_data[_name.c_str()]["port"] = port; - } - } - - _create_json_ret(buf, TPE_OK, jr_data); -} - -void TsHttpRpc::_rpc_func_request_session(const Json::Value& json_param, ex_astr& buf) -{ - // https://github.com/tp4a/teleport/wiki/TELEPORT-CORE-JSON-RPC#request_session - - int conn_id = 0; - ex_rv rv = TPE_OK; - - if (json_param["conn_id"].isNull()) - { - _create_json_ret(buf, TPE_PARAM); - return; - } - if (!json_param["conn_id"].isInt()) - { - _create_json_ret(buf, TPE_PARAM); - return; - } - - conn_id = json_param["conn_id"].asInt(); - if (0 == conn_id) - { - _create_json_ret(buf, TPE_PARAM); - return; - } - - TS_CONNECT_INFO* info = new TS_CONNECT_INFO; - if ((rv = ts_web_rpc_get_conn_info(conn_id, *info)) != TPE_OK) - { - _create_json_ret(buf, rv); - return; - } - -// info->ref_count = 0; -// info->ticket_start = ex_get_tick_count(); -// - // һsession-idڲظ - ex_astr sid; - if (!g_session_mgr.request_session(sid, info)) { - _create_json_ret(buf, TPE_FAILED); - return; - } - - EXLOGD("[core] rpc new session-id: %s\n", sid.c_str()); - - Json::Value jr_data; - jr_data["sid"] = sid; - - _create_json_ret(buf, TPE_OK, jr_data); -} - -void TsHttpRpc::_rpc_func_kill_sessions(const Json::Value& json_param, ex_astr& buf) { - /* - { - "sessions": ["0123456", "ABCDEF", ...] - } - */ - - if (json_param.isArray()) { - _create_json_ret(buf, TPE_PARAM); - return; - } - - if (json_param["sessions"].isNull() || !json_param["sessions"].isArray()) { - _create_json_ret(buf, TPE_PARAM); - return; - } - - Json::Value s = json_param["sessions"]; - int cnt = s.size(); - for (int i = 0; i < cnt; ++i) { - if (!s[i].isString()) { - _create_json_ret(buf, TPE_PARAM); - return; - } - } - - EXLOGV("[core] try to kill %d sessions.\n", cnt); - ex_astr sp = s.toStyledString(); - g_tpp_mgr.kill_sessions(sp); - - _create_json_ret(buf, TPE_OK); -} - -void TsHttpRpc::_rpc_func_enc(const Json::Value& json_param, ex_astr& buf) -{ - // https://github.com/tp4a/teleport/wiki/TELEPORT-CORE-JSON-RPC#enc - // һַ [ p=plain-text, c=cipher-text ] - // : {"p":"need be encrypt"} - // ʾ: {"p":"this-is-a-password"} - // p: ַܵ - // أ - // dataе"c"Ǽܺĵbase64 - // ʾ: {"code":0, "data":{"c":"Mxs340a9r3fs+3sdf=="}} - // 󷵻أ {"code":1234} - - if (json_param.isArray()) - { - _create_json_ret(buf, TPE_PARAM); - return; - } - - ex_astr plain_text; - - if (json_param["p"].isNull() || !json_param["p"].isString()) - { - _create_json_ret(buf, TPE_PARAM); - return; - } - - plain_text = json_param["p"].asCString(); - if (plain_text.length() == 0) - { - _create_json_ret(buf, TPE_PARAM); - return; - } - ex_astr cipher_text; - - if (!ts_db_field_encrypt(plain_text, cipher_text)) - { - _create_json_ret(buf, TPE_FAILED); - return; - } - - Json::Value jr_data; - jr_data["c"] = cipher_text; - _create_json_ret(buf, TPE_OK, jr_data); -} - -void TsHttpRpc::_rpc_func_set_config(const Json::Value& json_param, ex_astr& buf) -{ - // https://github.com/tp4a/teleport/wiki/TELEPORT-CORE-JSON-RPC#set_config - /* - { - "noop-timeout": 15 # Ӽ - } - */ - - if (json_param.isArray()) { - _create_json_ret(buf, TPE_PARAM); - return; - } - - if (json_param["noop_timeout"].isNull() || !json_param["noop_timeout"].isUInt()) { - _create_json_ret(buf, TPE_PARAM); - return; - } - - int noop_timeout = json_param["noop_timeout"].asUInt(); - EXLOGV("[core] set run-time config:\n"); - EXLOGV("[core] noop_timeout = %dm\n", noop_timeout); - - ex_astr sp = json_param.toStyledString(); - g_tpp_mgr.set_runtime_config(sp); - - _create_json_ret(buf, TPE_OK); -} - - -/* -void TsHttpRpc::_rpc_func_enc(const Json::Value& json_param, ex_astr& buf) -{ - // https://github.com/tp4a/teleport/wiki/TELEPORT-CORE-JSON-RPC#enc - // ַܶ [ p=plain-text, c=cipher-text ] - // : {"p":["need be encrypt", "plain to cipher"]} - // ʾ: {"p":["password-for-A"]} - // p: ַܵ - // أ - // dataе"c"Ǽܺĵbase64 - // ʾ: {"code":0, "data":{"c":["Mxs340a9r3fs+3sdf=="]}} - // 󷵻أ {"code":1234} - - if (json_param.isArray()) - { - _create_json_ret(buf, TPE_PARAM); - return; - } - - ex_astr plain_text; - - if (json_param["p"].isNull() || !json_param["p"].isArray()) - { - _create_json_ret(buf, TPE_PARAM); - return; - } - - Json::Value c; - - Json::Value p = json_param["p"]; - int cnt = p.size(); - for (int i = 0; i < cnt; ++i) - { - if (!p[i].isString()) { - _create_json_ret(buf, TPE_PARAM); - return; - } - - ex_astr p_txt = p[i].asCString(); - if (p_txt.length() == 0) { - c["c"].append(""); - } - - ex_astr c_txt; - if (!ts_db_field_encrypt(p_txt, c_txt)) - { - _create_json_ret(buf, TPE_FAILED); - return; - } - - c["c"].append(c_txt); - } - - Json::Value jr_data; - jr_data["c"] = c; - _create_json_ret(buf, TPE_OK, jr_data); -} -*/ +#include "ts_http_rpc.h" +#include "ts_ver.h" +#include "ts_env.h" +#include "ts_session.h" +#include "ts_crypto.h" +#include "ts_web_rpc.h" +#include "tp_tpp_mgr.h" + +extern TppManager g_tpp_mgr; + +#include +#include + + +#define HEXTOI(x) (isdigit(x) ? x - '0' : x - 'W') +int ts_url_decode(const char *src, int src_len, char *dst, int dst_len, int is_form_url_encoded) +{ + int i, j, a, b; + + for (i = j = 0; i < src_len && j < dst_len - 1; i++, j++) + { + if (src[i] == '%') + { + if (i < src_len - 2 && isxdigit(*(const unsigned char *)(src + i + 1)) && + isxdigit(*(const unsigned char *)(src + i + 2))) { + a = tolower(*(const unsigned char *)(src + i + 1)); + b = tolower(*(const unsigned char *)(src + i + 2)); + dst[j] = (char)((HEXTOI(a) << 4) | HEXTOI(b)); + i += 2; + } + else + { + return -1; + } + } + else if (is_form_url_encoded && src[i] == '+') + { + dst[j] = ' '; + } + else + { + dst[j] = src[i]; + } + } + + dst[j] = '\0'; /* Null-terminate the destination */ + + return i >= src_len ? j : -1; +} + +TsHttpRpc::TsHttpRpc() : + ExThreadBase("http-rpc-thread") +{ + mg_mgr_init(&m_mg_mgr, NULL); +} + +TsHttpRpc::~TsHttpRpc() +{ + mg_mgr_free(&m_mg_mgr); +} + +void TsHttpRpc::_thread_loop(void) +{ + EXLOGI("[core] TeleportServer-RPC ready on %s:%d\n", m_host_ip.c_str(), m_host_port); + + while (!m_need_stop) + { + mg_mgr_poll(&m_mg_mgr, 500); + } + + EXLOGV("[core] rpc main loop end.\n"); +} + + +bool TsHttpRpc::init(void) +{ + struct mg_connection* nc = NULL; + + m_host_ip = g_env.rpc_bind_ip; + m_host_port = g_env.rpc_bind_port; + + char addr[128] = { 0 }; + // if (0 == strcmp(m_host_ip.c_str(), "127.0.0.1") || 0 == strcmp(m_host_ip.c_str(), "localhost")) + // ex_strformat(addr, 128, ":%d", m_host_port); + // else + // ex_strformat(addr, 128, "%s:%d", m_host_ip.c_str(), m_host_port); + if (0 == strcmp(m_host_ip.c_str(), "0.0.0.0")) + ex_strformat(addr, 128, ":%d", m_host_port); + else + ex_strformat(addr, 128, "%s:%d", m_host_ip.c_str(), m_host_port); + + nc = mg_bind(&m_mg_mgr, addr, _mg_event_handler); + if (NULL == nc) + { + EXLOGE("[core] rpc listener failed to bind at %s.\n", addr); + return false; + } + + nc->user_data = this; + + mg_set_protocol_http_websocket(nc); + + // 导致内存泄露的地方(每次请求约消耗1KB内存) + // DO NOT USE MULTITHREADING OF MG. + // cpq (one of the authors of MG) commented on 3 Feb: Multithreading support has been removed. + // https://github.com/cesanta/mongoose/commit/707b9ed2d6f177b3ad8787cb16a1bff90ddad992 + //mg_enable_multithreading(nc); + + return true; +} + +void TsHttpRpc::_mg_event_handler(struct mg_connection *nc, int ev, void *ev_data) +{ + struct http_message *hm = (struct http_message*)ev_data; + + TsHttpRpc* _this = (TsHttpRpc*)nc->user_data; + if (NULL == _this) + { + EXLOGE("[core] rpc invalid http request.\n"); + return; + } + + switch (ev) + { + case MG_EV_HTTP_REQUEST: + { + ex_astr ret_buf; + + ex_astr uri; + uri.assign(hm->uri.p, hm->uri.len); + + //EXLOGD("[core] rpc got request: %s\n", uri.c_str()); + + if (uri == "/rpc") + { + ex_astr method; + Json::Value json_param; + + ex_rv rv = _this->_parse_request(hm, method, json_param); + if (TPE_OK != rv) + { + EXLOGE("[core] rpc got invalid request.\n"); + _this->_create_json_ret(ret_buf, rv); + } + else + { + EXLOGD("[core] rpc got request method `%s`\n", method.c_str()); + _this->_process_request(method, json_param, ret_buf); + } + } + else + { + EXLOGE("[core] rpc got invalid request: not `rpc` uri.\n"); + _this->_create_json_ret(ret_buf, TPE_PARAM, "not a `rpc` request."); + } + + mg_printf(nc, "HTTP/1.0 200 OK\r\nAccess-Control-Allow-Origin: *\r\nContent-Length: %d\r\nContent-Type: application/json\r\n\r\n%s", (int)ret_buf.length(), &ret_buf[0]); + nc->flags |= MG_F_SEND_AND_CLOSE; + } + break; + default: + break; + } +} + +ex_rv TsHttpRpc::_parse_request(struct http_message* req, ex_astr& func_cmd, Json::Value& json_param) +{ + if (NULL == req) + return TPE_PARAM; + + bool is_get = true; + if (req->method.len == 3 && 0 == memcmp(req->method.p, "GET", req->method.len)) + is_get = true; + else if (req->method.len == 4 && 0 == memcmp(req->method.p, "POST", req->method.len)) + is_get = false; + else + return TPE_HTTP_METHOD; + + ex_astr json_str; + bool need_decode = false; + if (is_get) { + json_str.assign(req->query_string.p, req->query_string.len); + need_decode = true; + } + else { + json_str.assign(req->body.p, req->body.len); + if (json_str.length() > 0 && json_str[0] == '%') + need_decode = true; + } + + if (need_decode) { + // 将参数进行 url-decode 解码 + int len = json_str.length() * 2; + ex_chars sztmp; + sztmp.resize(len); + memset(&sztmp[0], 0, len); + if (-1 == ts_url_decode(json_str.c_str(), json_str.length(), &sztmp[0], len, 0)) + return TPE_HTTP_URL_ENCODE; + + json_str = &sztmp[0]; + } + + if (0 == json_str.length()) + return TPE_PARAM; + + //Json::Reader jreader; + Json::CharReaderBuilder jcrb; + std::unique_ptr const jreader(jcrb.newCharReader()); + const char *str_json_begin = json_str.c_str(); + ex_astr err; + + //if (!jreader.parse(json_str.c_str(), json_param)) + if (!jreader->parse(str_json_begin, str_json_begin + json_str.length(), &json_param, &err)) + return TPE_JSON_FORMAT; + + if (json_param.isArray()) + return TPE_PARAM; + + if (json_param["method"].isNull() || !json_param["method"].isString()) + return TPE_PARAM; + + func_cmd = json_param["method"].asCString(); + json_param = json_param["param"]; + + return TPE_OK; +} + +void TsHttpRpc::_create_json_ret(ex_astr& buf, int errcode, const Json::Value& jr_data) +{ + // 返回: {"code":errcode, "data":{jr_data}} + + //Json::FastWriter jr_writer; + Json::Value jr_root; + jr_root["code"] = errcode; + jr_root["data"] = jr_data; + //buf = jr_writer.write(jr_root); + Json::StreamWriterBuilder jwb; + std::unique_ptr jwriter(jwb.newStreamWriter()); + ex_aoss os; + jwriter->write(jr_root, &os); + buf = os.str(); +} + +void TsHttpRpc::_create_json_ret(ex_astr& buf, int errcode) +{ + // 返回: {"code":errcode} + + //Json::FastWriter jr_writer; + Json::Value jr_root; + jr_root["code"] = errcode; + //buf = jr_writer.write(jr_root); + Json::StreamWriterBuilder jwb; + std::unique_ptr jwriter(jwb.newStreamWriter()); + ex_aoss os; + jwriter->write(jr_root, &os); + buf = os.str(); +} + +void TsHttpRpc::_create_json_ret(ex_astr& buf, int errcode, const char* message) +{ + // 返回: {"code":errcode, "message":message} + + //Json::FastWriter jr_writer; + Json::Value jr_root; + jr_root["code"] = errcode; + jr_root["message"] = message; + //buf = jr_writer.write(jr_root); + Json::StreamWriterBuilder jwb; + std::unique_ptr jwriter(jwb.newStreamWriter()); + ex_aoss os; + jwriter->write(jr_root, &os); + buf = os.str(); +} + +void TsHttpRpc::_process_request(const ex_astr& func_cmd, const Json::Value& json_param, ex_astr& buf) +{ + if (func_cmd == "request_session") { + _rpc_func_request_session(json_param, buf); + } + else if (func_cmd == "kill_sessions") { + _rpc_func_kill_sessions(json_param, buf); + } + else if (func_cmd == "get_config") { + _rpc_func_get_config(json_param, buf); + } + else if (func_cmd == "set_config") { + _rpc_func_set_config(json_param, buf); + } + else if (func_cmd == "enc") { + _rpc_func_enc(json_param, buf); + } + else if (func_cmd == "exit") { + _rpc_func_exit(json_param, buf); + } + else { + EXLOGE("[core] rpc got unknown command: %s\n", func_cmd.c_str()); + _create_json_ret(buf, TPE_UNKNOWN_CMD); + } +} + +extern bool g_exit_flag; // 要求整个TS退出的标志(用于停止各个工作线程) +void TsHttpRpc::_rpc_func_exit(const Json::Value& json_param, ex_astr& buf) +{ + // 设置一个全局退出标志 + g_exit_flag = true; + _create_json_ret(buf, TPE_OK); +} + +void TsHttpRpc::_rpc_func_get_config(const Json::Value& json_param, ex_astr& buf) +{ + Json::Value jr_data; + + ex_astr _replay_name; + ex_wstr2astr(g_env.m_replay_path, _replay_name); + jr_data["replay-path"] = _replay_name; + + jr_data["web-server-rpc"] = g_env.web_server_rpc; + + ex_astr _version; + ex_wstr2astr(TP_SERVER_VER, _version); + jr_data["version"] = _version; + + ExIniFile& ini = g_env.get_ini(); + ex_ini_sections& secs = ini.GetAllSections(); + ex_ini_sections::iterator it = secs.begin(); + for (; it != secs.end(); ++it) + { + if (it->first.length() > 9 && 0 == wcsncmp(it->first.c_str(), L"protocol-", 9)) + { + ex_wstr name; + name.assign(it->first, 9, it->first.length() - 9); + ex_astr _name; + ex_wstr2astr(name, _name); + + bool enabled = false; + it->second->GetBool(L"enabled", enabled, false); + + ex_wstr ip; + if (!it->second->GetStr(L"bind-ip", ip)) + continue; + ex_astr _ip; + ex_wstr2astr(ip, _ip); + + int port; + it->second->GetInt(L"bind-port", port, 52189); + + jr_data[_name.c_str()]["enable"] = enabled; + jr_data[_name.c_str()]["ip"] = _ip; + jr_data[_name.c_str()]["port"] = port; + } + } + + _create_json_ret(buf, TPE_OK, jr_data); +} + +void TsHttpRpc::_rpc_func_request_session(const Json::Value& json_param, ex_astr& buf) +{ + // https://github.com/tp4a/teleport/wiki/TELEPORT-CORE-JSON-RPC#request_session + + int conn_id = 0; + ex_rv rv = TPE_OK; + + if (json_param["conn_id"].isNull()) + { + _create_json_ret(buf, TPE_PARAM); + return; + } + if (!json_param["conn_id"].isInt()) + { + _create_json_ret(buf, TPE_PARAM); + return; + } + + conn_id = json_param["conn_id"].asInt(); + if (0 == conn_id) + { + _create_json_ret(buf, TPE_PARAM); + return; + } + + TS_CONNECT_INFO* info = new TS_CONNECT_INFO; + if ((rv = ts_web_rpc_get_conn_info(conn_id, *info)) != TPE_OK) + { + _create_json_ret(buf, rv); + return; + } + +// info->ref_count = 0; +// info->ticket_start = ex_get_tick_count(); +// + // 生成一个session-id(内部会避免重复) + ex_astr sid; + if (!g_session_mgr.request_session(sid, info)) { + _create_json_ret(buf, TPE_FAILED); + return; + } + + EXLOGD("[core] rpc new session-id: %s\n", sid.c_str()); + + Json::Value jr_data; + jr_data["sid"] = sid; + + _create_json_ret(buf, TPE_OK, jr_data); +} + +void TsHttpRpc::_rpc_func_kill_sessions(const Json::Value& json_param, ex_astr& buf) { + /* + { + "sessions": ["0123456", "ABCDEF", ...] + } + */ + + if (json_param.isArray()) { + _create_json_ret(buf, TPE_PARAM); + return; + } + + if (json_param["sessions"].isNull() || !json_param["sessions"].isArray()) { + _create_json_ret(buf, TPE_PARAM); + return; + } + + Json::Value s = json_param["sessions"]; + int cnt = s.size(); + for (int i = 0; i < cnt; ++i) { + if (!s[i].isString()) { + _create_json_ret(buf, TPE_PARAM); + return; + } + } + + EXLOGV("[core] try to kill %d sessions.\n", cnt); + ex_astr sp = s.toStyledString(); + g_tpp_mgr.kill_sessions(sp); + + _create_json_ret(buf, TPE_OK); +} + +void TsHttpRpc::_rpc_func_enc(const Json::Value& json_param, ex_astr& buf) +{ + // https://github.com/tp4a/teleport/wiki/TELEPORT-CORE-JSON-RPC#enc + // 加密一个字符串 [ p=plain-text, c=cipher-text ] + // 入参: {"p":"need be encrypt"} + // 示例: {"p":"this-is-a-password"} + // p: 被加密的字符串 + // 返回: + // data域中的"c"的内容是加密后密文的base64编码结果 + // 示例: {"code":0, "data":{"c":"Mxs340a9r3fs+3sdf=="}} + // 错误返回: {"code":1234} + + if (json_param.isArray()) + { + _create_json_ret(buf, TPE_PARAM); + return; + } + + ex_astr plain_text; + + if (json_param["p"].isNull() || !json_param["p"].isString()) + { + _create_json_ret(buf, TPE_PARAM); + return; + } + + plain_text = json_param["p"].asCString(); + if (plain_text.length() == 0) + { + _create_json_ret(buf, TPE_PARAM); + return; + } + ex_astr cipher_text; + + if (!ts_db_field_encrypt(plain_text, cipher_text)) + { + _create_json_ret(buf, TPE_FAILED); + return; + } + + Json::Value jr_data; + jr_data["c"] = cipher_text; + _create_json_ret(buf, TPE_OK, jr_data); +} + +void TsHttpRpc::_rpc_func_set_config(const Json::Value& json_param, ex_astr& buf) +{ + // https://github.com/tp4a/teleport/wiki/TELEPORT-CORE-JSON-RPC#set_config + /* + { + "noop-timeout": 15 # 按分钟计 + } + */ + + if (json_param.isArray()) { + _create_json_ret(buf, TPE_PARAM); + return; + } + + if (json_param["noop_timeout"].isNull() || !json_param["noop_timeout"].isUInt()) { + _create_json_ret(buf, TPE_PARAM); + return; + } + + int noop_timeout = json_param["noop_timeout"].asUInt(); + EXLOGV("[core] set run-time config:\n"); + EXLOGV("[core] noop_timeout = %dm\n", noop_timeout); + + ex_astr sp = json_param.toStyledString(); + g_tpp_mgr.set_runtime_config(sp); + + _create_json_ret(buf, TPE_OK); +} + + +/* +void TsHttpRpc::_rpc_func_enc(const Json::Value& json_param, ex_astr& buf) +{ + // https://github.com/tp4a/teleport/wiki/TELEPORT-CORE-JSON-RPC#enc + // 加密多个个字符串 [ p=plain-text, c=cipher-text ] + // 入参: {"p":["need be encrypt", "plain to cipher"]} + // 示例: {"p":["password-for-A"]} + // p: 被加密的字符串 + // 返回: + // data域中的"c"的内容是加密后密文的base64编码结果 + // 示例: {"code":0, "data":{"c":["Mxs340a9r3fs+3sdf=="]}} + // 错误返回: {"code":1234} + + if (json_param.isArray()) + { + _create_json_ret(buf, TPE_PARAM); + return; + } + + ex_astr plain_text; + + if (json_param["p"].isNull() || !json_param["p"].isArray()) + { + _create_json_ret(buf, TPE_PARAM); + return; + } + + Json::Value c; + + Json::Value p = json_param["p"]; + int cnt = p.size(); + for (int i = 0; i < cnt; ++i) + { + if (!p[i].isString()) { + _create_json_ret(buf, TPE_PARAM); + return; + } + + ex_astr p_txt = p[i].asCString(); + if (p_txt.length() == 0) { + c["c"].append(""); + } + + ex_astr c_txt; + if (!ts_db_field_encrypt(p_txt, c_txt)) + { + _create_json_ret(buf, TPE_FAILED); + return; + } + + c["c"].append(c_txt); + } + + Json::Value jr_data; + jr_data["c"] = c; + _create_json_ret(buf, TPE_OK, jr_data); +} +*/ diff --git a/server/tp_core/core/ts_ver.h b/server/tp_core/core/ts_ver.h index 4da7b50..dda936f 100644 --- a/server/tp_core/core/ts_ver.h +++ b/server/tp_core/core/ts_ver.h @@ -1,6 +1,6 @@ #ifndef __TS_SERVER_VER_H__ #define __TS_SERVER_VER_H__ -#define TP_SERVER_VER L"3.3.0" +#define TP_SERVER_VER L"3.5.0" #endif // __TS_SERVER_VER_H__ diff --git a/server/tp_core/core/ts_web_rpc.cpp b/server/tp_core/core/ts_web_rpc.cpp index cf5b899..1ec813a 100644 --- a/server/tp_core/core/ts_web_rpc.cpp +++ b/server/tp_core/core/ts_web_rpc.cpp @@ -1,320 +1,359 @@ -#include "ts_web_rpc.h" -#include "ts_env.h" -#include "ts_crypto.h" -#include "ts_http_client.h" - -#include "../common/ts_const.h" - -#include -#include - -bool ts_web_rpc_register_core() -{ - Json::FastWriter json_writer; - Json::Value jreq; - jreq["method"] = "register_core"; - jreq["param"]["rpc"] = g_env.core_server_rpc; - - ex_astr json_param; - json_param = json_writer.write(jreq); - - ex_astr param; - ts_url_encode(json_param.c_str(), param); - - ex_astr url = g_env.web_server_rpc; - url += "?"; - url += param; - - ex_astr body; - return ts_http_get(url, body); -} - -int ts_web_rpc_get_conn_info(int conn_id, TS_CONNECT_INFO& info) -{ - Json::FastWriter json_writer; - Json::Value jreq; - jreq["method"] = "get_conn_info"; - jreq["param"]["conn_id"] = conn_id; - - ex_astr json_param; - json_param = json_writer.write(jreq); - - ex_astr param; - ts_url_encode(json_param.c_str(), param); - - ex_astr url = g_env.web_server_rpc; - url += "?"; - url += param; - - ex_astr body; - if (!ts_http_get(url, body)) - { - EXLOGE("[core] get conn info from web-server failed: can not connect to web-server.\n"); - return TPE_NETWORK; - } - if (body.length() == 0) { - EXLOGE("[core] get conn info from web-server failed: got nothing.\n"); - return TPE_NETWORK; - } - - Json::Reader jreader; - Json::Value jret; - - if (!jreader.parse(body.c_str(), jret)) - return TPE_PARAM; - if (!jret.isObject()) - return TPE_PARAM; - if (!jret["data"].isObject()) - return TPE_PARAM; - - Json::Value& _jret = jret["data"]; - - if(!_jret["user_id"].isInt()) - EXLOGE("connection info: need `user_id`.\n"); - if(!_jret["host_id"].isInt()) - EXLOGE("connection info: need `host_id`.\n"); - if(!_jret["acc_id"].isInt()) - EXLOGE("connection info: need `acc_id`.\n"); - if(!_jret["conn_port"].isInt()) - EXLOGE("connection info: need `conn_port`.\n"); - if(!_jret["protocol_type"].isInt()) - EXLOGE("connection info: need `protocol_type`.\n"); - if(!_jret["protocol_sub_type"].isInt()) - EXLOGE("connection info: need `protocol_sub_type`.\n"); - if(!_jret["auth_type"].isInt()) - EXLOGE("connection info: need `auth_type`.\n"); - if (!_jret["protocol_flag"].isUInt()) - EXLOGE("connection info: need `protocol_flag`.\n"); - if (!_jret["record_flag"].isUInt()) - EXLOGE("connection info: need `record_flag`.\n"); - if (!_jret["_enc"].isInt()) - EXLOGE("connection info: need `_enc`.\n"); - if(!_jret["user_username"].isString()) - EXLOGE("connection info: need `user_username`.\n"); - if(!_jret["host_ip"].isString()) - EXLOGE("connection info: need `host_ip`.\n"); - if(!_jret["conn_ip"].isString()) - EXLOGE("connection info: need `conn_ip`.\n"); - if(!_jret["client_ip"].isString()) - EXLOGE("connection info: need `client_ip`.\n"); - if(!_jret["acc_username"].isString()) - EXLOGE("connection info: need `acc_username`.\n"); - if(!_jret["acc_secret"].isString()) - EXLOGE("connection info: need `acc_secret`.\n"); - if(!_jret["username_prompt"].isString()) - EXLOGE("connection info: need `username_prompt`.\n"); - if(!_jret["password_prompt"].isString()) - EXLOGE("connection info: need `password_prompt`.\n"); - - if ( - !_jret["user_id"].isInt() - || !_jret["host_id"].isInt() - || !_jret["acc_id"].isInt() - || !_jret["conn_port"].isInt() - || !_jret["protocol_type"].isInt() - || !_jret["protocol_sub_type"].isInt() - || !_jret["auth_type"].isInt() - || !_jret["protocol_flag"].isUInt() - || !_jret["record_flag"].isUInt() - || !_jret["_enc"].isInt() - - || !_jret["user_username"].isString() - || !_jret["host_ip"].isString() - || !_jret["conn_ip"].isString() - || !_jret["client_ip"].isString() - || !_jret["acc_username"].isString() - || !_jret["acc_secret"].isString() - || !_jret["username_prompt"].isString() - || !_jret["password_prompt"].isString() - ) - { - EXLOGE("got connection info from web-server, but not all info valid.\n"); - return TPE_PARAM; - } - - int user_id; - int host_id; - int acc_id; - ex_astr user_username;// 뱾ӵû - ex_astr host_ip;// ԶIPֱģʽremote_host_ipͬ - ex_astr conn_ip;// ҪӵԶIPǶ˿ӳģʽΪ·IP - int conn_port;// ҪӵԶĶ˿ڣǶ˿ӳģʽΪ·Ķ˿ڣ - ex_astr client_ip; - ex_astr acc_username; // Զ˺ - ex_astr acc_secret;// Զ˺ŵ루˽Կ - ex_astr username_prompt; - ex_astr password_prompt; - int protocol_type = 0; - int protocol_sub_type = 0; - int auth_type = 0; - int protocol_flag = 0; - int record_flag = 0; - bool _enc; - - user_id = _jret["user_id"].asInt(); - host_id = _jret["host_id"].asInt(); - acc_id = _jret["acc_id"].asInt(); - user_username = _jret["user_username"].asString(); - host_ip = _jret["host_ip"].asString(); - conn_ip = _jret["conn_ip"].asString(); - conn_port = _jret["conn_port"].asInt(); - client_ip = _jret["client_ip"].asString(); - acc_username = _jret["acc_username"].asString(); - acc_secret = _jret["acc_secret"].asString(); - username_prompt = _jret["username_prompt"].asString(); - password_prompt = _jret["password_prompt"].asString(); - protocol_type = _jret["protocol_type"].asInt(); - protocol_sub_type = _jret["protocol_sub_type"].asInt(); - protocol_flag = _jret["protocol_flag"].asUInt(); - record_flag = _jret["record_flag"].asUInt(); - auth_type = _jret["auth_type"].asInt(); - _enc = _jret["_enc"].asBool(); - - - // һжϲǷϷ - // ע⣬account_idΪ-1ʾһβӡ - if (user_id <= 0 || host_id <= 0 - || user_username.length() == 0 - || host_ip.length() == 0 || conn_ip.length() == 0 || client_ip.length() == 0 - || conn_port <= 0 || conn_port >= 65535 - || acc_username.length() == 0 || acc_secret.length() == 0 - || !(protocol_type == TP_PROTOCOL_TYPE_RDP || protocol_type == TP_PROTOCOL_TYPE_SSH || protocol_type == TP_PROTOCOL_TYPE_TELNET) - || !(auth_type == TP_AUTH_TYPE_NONE || auth_type == TP_AUTH_TYPE_PASSWORD || auth_type == TP_AUTH_TYPE_PRIVATE_KEY) - ) - { - return TPE_PARAM; - } - - if (_enc) { - ex_astr _auth; - if (!ts_db_field_decrypt(acc_secret, _auth)) - return TPE_FAILED; - - acc_secret = _auth; - } - - info.user_id = user_id; - info.host_id = host_id; - info.acc_id = acc_id; - info.user_username = user_username; - info.host_ip = host_ip; - info.conn_ip = conn_ip; - info.conn_port = conn_port; - info.client_ip = client_ip; - info.acc_username = acc_username; - info.acc_secret = acc_secret; - info.username_prompt = username_prompt; - info.password_prompt = password_prompt; - info.protocol_type = protocol_type; - info.protocol_sub_type = protocol_sub_type; - info.auth_type = auth_type; - info.protocol_flag = protocol_flag; - info.record_flag = record_flag; - - return TPE_OK; -} - -bool ts_web_rpc_session_begin(TS_CONNECT_INFO& info, int& record_id) -{ - Json::FastWriter json_writer; - Json::Value jreq; - - jreq["method"] = "session_begin"; - jreq["param"]["sid"] = info.sid.c_str(); - jreq["param"]["user_id"] = info.user_id; - jreq["param"]["host_id"] = info.host_id; - jreq["param"]["acc_id"] = info.acc_id; - jreq["param"]["user_username"] = info.user_username.c_str(); - jreq["param"]["acc_username"] = info.acc_username.c_str(); - jreq["param"]["host_ip"] = info.host_ip.c_str(); - jreq["param"]["conn_ip"] = info.conn_ip.c_str(); - jreq["param"]["client_ip"] = info.client_ip.c_str(); - //jreq["param"]["sys_type"] = info.sys_type; - jreq["param"]["conn_port"] = info.conn_port; - jreq["param"]["auth_type"] = info.auth_type; - jreq["param"]["protocol_type"] = info.protocol_type; - jreq["param"]["protocol_sub_type"] = info.protocol_sub_type; - - ex_astr json_param; - json_param = json_writer.write(jreq); - - ex_astr param; - ts_url_encode(json_param.c_str(), param); - - ex_astr url = g_env.web_server_rpc; - url += "?"; - url += param; - - ex_astr body; - if (!ts_http_get(url, body)) - { - // EXLOGV("request `rpc::session_begin` from web return: "); - // EXLOGV(body.c_str()); - // EXLOGV("\n"); - return false; - } - - Json::Reader jreader; - Json::Value jret; - - if (!jreader.parse(body.c_str(), jret)) - return false; - if (!jret.isObject()) - return false; - if (!jret["data"].isObject()) - return false; - if (!jret["data"]["rid"].isUInt()) - return false; - - record_id = jret["data"]["rid"].asUInt(); - - return true; -} - -bool ts_web_rpc_session_update(int record_id, int protocol_sub_type, int state) { - Json::FastWriter json_writer; - Json::Value jreq; - jreq["method"] = "session_update"; - jreq["param"]["rid"] = record_id; - jreq["param"]["protocol_sub_type"] = protocol_sub_type; - jreq["param"]["code"] = state; - - ex_astr json_param; - json_param = json_writer.write(jreq); - - ex_astr param; - ts_url_encode(json_param.c_str(), param); - - ex_astr url = g_env.web_server_rpc; - url += "?"; - url += param; - - ex_astr body; - return ts_http_get(url, body); -} - - -//session -bool ts_web_rpc_session_end(const char* sid, int record_id, int ret_code) -{ - // TODO: ָsidصĻỰüһ0ʱ٣ - - Json::FastWriter json_writer; - Json::Value jreq; - jreq["method"] = "session_end"; - jreq["param"]["rid"] = record_id; - jreq["param"]["code"] = ret_code; - - ex_astr json_param; - json_param = json_writer.write(jreq); - - ex_astr param; - ts_url_encode(json_param.c_str(), param); - - ex_astr url = g_env.web_server_rpc; - url += "?"; - url += param; - - ex_astr body; - return ts_http_get(url, body); -} +#include "ts_web_rpc.h" +#include "ts_env.h" +#include "ts_crypto.h" +#include "ts_http_client.h" + +#include "../common/ts_const.h" + +#include +#include + +bool ts_web_rpc_register_core() +{ + //Json::FastWriter json_writer; + Json::Value jreq; + jreq["method"] = "register_core"; + jreq["param"]["rpc"] = g_env.core_server_rpc; + + ex_astr json_param; + //json_param = json_writer.write(jreq); + Json::StreamWriterBuilder jwb; + std::unique_ptr jwriter(jwb.newStreamWriter()); + ex_aoss os; + jwriter->write(jreq, &os); + json_param = os.str(); + + ex_astr param; + ts_url_encode(json_param.c_str(), param); + + ex_astr url = g_env.web_server_rpc; + url += "?"; + url += param; + + ex_astr body; + return ts_http_get(url, body); +} + +int ts_web_rpc_get_conn_info(int conn_id, TS_CONNECT_INFO& info) +{ + //Json::FastWriter json_writer; + Json::Value jreq; + jreq["method"] = "get_conn_info"; + jreq["param"]["conn_id"] = conn_id; + + ex_astr json_param; + //json_param = json_writer.write(jreq); + Json::StreamWriterBuilder jwb; + std::unique_ptr jwriter(jwb.newStreamWriter()); + ex_aoss os; + jwriter->write(jreq, &os); + json_param = os.str(); + + ex_astr param; + ts_url_encode(json_param.c_str(), param); + + ex_astr url = g_env.web_server_rpc; + url += "?"; + url += param; + + ex_astr body; + if (!ts_http_get(url, body)) + { + EXLOGE("[core] get conn info from web-server failed: can not connect to web-server.\n"); + return TPE_NETWORK; + } + if (body.length() == 0) { + EXLOGE("[core] get conn info from web-server failed: got nothing.\n"); + return TPE_NETWORK; + } + + //Json::Reader jreader; + Json::Value jret; + + //if (!jreader.parse(body.c_str(), jret)) + Json::CharReaderBuilder jcrb; + std::unique_ptr const jreader(jcrb.newCharReader()); + const char *str_json_begin = body.c_str(); + ex_astr err; + + //if (!jreader.parse(func_args.c_str(), jsRoot)) { + if (!jreader->parse(str_json_begin, str_json_begin + body.length(), &jret, &err)) + return TPE_PARAM; + if (!jret.isObject()) + return TPE_PARAM; + if (!jret["data"].isObject()) + return TPE_PARAM; + + Json::Value& _jret = jret["data"]; + + if(!_jret["user_id"].isInt()) + EXLOGE("connection info: need `user_id`.\n"); + if(!_jret["host_id"].isInt()) + EXLOGE("connection info: need `host_id`.\n"); + if(!_jret["acc_id"].isInt()) + EXLOGE("connection info: need `acc_id`.\n"); + if(!_jret["conn_port"].isInt()) + EXLOGE("connection info: need `conn_port`.\n"); + if(!_jret["protocol_type"].isInt()) + EXLOGE("connection info: need `protocol_type`.\n"); + if(!_jret["protocol_sub_type"].isInt()) + EXLOGE("connection info: need `protocol_sub_type`.\n"); + if(!_jret["auth_type"].isInt()) + EXLOGE("connection info: need `auth_type`.\n"); + if (!_jret["protocol_flag"].isUInt()) + EXLOGE("connection info: need `protocol_flag`.\n"); + if (!_jret["record_flag"].isUInt()) + EXLOGE("connection info: need `record_flag`.\n"); + if (!_jret["_enc"].isInt()) + EXLOGE("connection info: need `_enc`.\n"); + if(!_jret["user_username"].isString()) + EXLOGE("connection info: need `user_username`.\n"); + if(!_jret["host_ip"].isString()) + EXLOGE("connection info: need `host_ip`.\n"); + if(!_jret["conn_ip"].isString()) + EXLOGE("connection info: need `conn_ip`.\n"); + if(!_jret["client_ip"].isString()) + EXLOGE("connection info: need `client_ip`.\n"); + if(!_jret["acc_username"].isString()) + EXLOGE("connection info: need `acc_username`.\n"); + if(!_jret["acc_secret"].isString()) + EXLOGE("connection info: need `acc_secret`.\n"); + if(!_jret["username_prompt"].isString()) + EXLOGE("connection info: need `username_prompt`.\n"); + if(!_jret["password_prompt"].isString()) + EXLOGE("connection info: need `password_prompt`.\n"); + + if ( + !_jret["user_id"].isInt() + || !_jret["host_id"].isInt() + || !_jret["acc_id"].isInt() + || !_jret["conn_port"].isInt() + || !_jret["protocol_type"].isInt() + || !_jret["protocol_sub_type"].isInt() + || !_jret["auth_type"].isInt() + || !_jret["protocol_flag"].isUInt() + || !_jret["record_flag"].isUInt() + || !_jret["_enc"].isInt() + + || !_jret["user_username"].isString() + || !_jret["host_ip"].isString() + || !_jret["conn_ip"].isString() + || !_jret["client_ip"].isString() + || !_jret["acc_username"].isString() + || !_jret["acc_secret"].isString() + || !_jret["username_prompt"].isString() + || !_jret["password_prompt"].isString() + ) + { + EXLOGE("got connection info from web-server, but not all info valid.\n"); + return TPE_PARAM; + } + + int user_id; + int host_id; + int acc_id; + ex_astr user_username;// 申请本次连接的用户名 + ex_astr host_ip;// 真正的远程主机IP(如果是直接连接模式,则与remote_host_ip相同) + ex_astr conn_ip;// 要连接的远程主机的IP(如果是端口映射模式,则为路由主机的IP) + int conn_port;// 要连接的远程主机的端口(如果是端口映射模式,则为路由主机的端口) + ex_astr client_ip; + ex_astr acc_username; // 远程主机的账号 + ex_astr acc_secret;// 远程主机账号的密码(或者私钥) + ex_astr username_prompt; + ex_astr password_prompt; + int protocol_type = 0; + int protocol_sub_type = 0; + int auth_type = 0; + int protocol_flag = 0; + int record_flag = 0; + bool _enc; + + user_id = _jret["user_id"].asInt(); + host_id = _jret["host_id"].asInt(); + acc_id = _jret["acc_id"].asInt(); + user_username = _jret["user_username"].asString(); + host_ip = _jret["host_ip"].asString(); + conn_ip = _jret["conn_ip"].asString(); + conn_port = _jret["conn_port"].asInt(); + client_ip = _jret["client_ip"].asString(); + acc_username = _jret["acc_username"].asString(); + acc_secret = _jret["acc_secret"].asString(); + username_prompt = _jret["username_prompt"].asString(); + password_prompt = _jret["password_prompt"].asString(); + protocol_type = _jret["protocol_type"].asInt(); + protocol_sub_type = _jret["protocol_sub_type"].asInt(); + protocol_flag = _jret["protocol_flag"].asUInt(); + record_flag = _jret["record_flag"].asUInt(); + auth_type = _jret["auth_type"].asInt(); + _enc = _jret["_enc"].asBool(); + + + // 进一步判断参数是否合法 + // 注意,account_id可以为-1,表示这是一次测试连接。 + if (user_id <= 0 || host_id <= 0 + || user_username.length() == 0 + || host_ip.length() == 0 || conn_ip.length() == 0 || client_ip.length() == 0 + || conn_port <= 0 || conn_port >= 65535 + || acc_username.length() == 0 || acc_secret.length() == 0 + || !(protocol_type == TP_PROTOCOL_TYPE_RDP || protocol_type == TP_PROTOCOL_TYPE_SSH || protocol_type == TP_PROTOCOL_TYPE_TELNET) + || !(auth_type == TP_AUTH_TYPE_NONE || auth_type == TP_AUTH_TYPE_PASSWORD || auth_type == TP_AUTH_TYPE_PRIVATE_KEY) + ) + { + return TPE_PARAM; + } + + if (_enc) { + ex_astr _auth; + if (!ts_db_field_decrypt(acc_secret, _auth)) + return TPE_FAILED; + + acc_secret = _auth; + } + + info.user_id = user_id; + info.host_id = host_id; + info.acc_id = acc_id; + info.user_username = user_username; + info.host_ip = host_ip; + info.conn_ip = conn_ip; + info.conn_port = conn_port; + info.client_ip = client_ip; + info.acc_username = acc_username; + info.acc_secret = acc_secret; + info.username_prompt = username_prompt; + info.password_prompt = password_prompt; + info.protocol_type = protocol_type; + info.protocol_sub_type = protocol_sub_type; + info.auth_type = auth_type; + info.protocol_flag = protocol_flag; + info.record_flag = record_flag; + + return TPE_OK; +} + +bool ts_web_rpc_session_begin(TS_CONNECT_INFO& info, int& record_id) +{ + //Json::FastWriter json_writer; + Json::Value jreq; + + jreq["method"] = "session_begin"; + jreq["param"]["sid"] = info.sid.c_str(); + jreq["param"]["user_id"] = info.user_id; + jreq["param"]["host_id"] = info.host_id; + jreq["param"]["acc_id"] = info.acc_id; + jreq["param"]["user_username"] = info.user_username.c_str(); + jreq["param"]["acc_username"] = info.acc_username.c_str(); + jreq["param"]["host_ip"] = info.host_ip.c_str(); + jreq["param"]["conn_ip"] = info.conn_ip.c_str(); + jreq["param"]["client_ip"] = info.client_ip.c_str(); + //jreq["param"]["sys_type"] = info.sys_type; + jreq["param"]["conn_port"] = info.conn_port; + jreq["param"]["auth_type"] = info.auth_type; + jreq["param"]["protocol_type"] = info.protocol_type; + jreq["param"]["protocol_sub_type"] = info.protocol_sub_type; + + ex_astr json_param; + //json_param = json_writer.write(jreq); + Json::StreamWriterBuilder jwb; + std::unique_ptr jwriter(jwb.newStreamWriter()); + ex_aoss os; + jwriter->write(jreq, &os); + json_param = os.str(); + + ex_astr param; + ts_url_encode(json_param.c_str(), param); + + ex_astr url = g_env.web_server_rpc; + url += "?"; + url += param; + + ex_astr body; + if (!ts_http_get(url, body)) + { + // EXLOGV("request `rpc::session_begin` from web return: "); + // EXLOGV(body.c_str()); + // EXLOGV("\n"); + return false; + } + + //Json::Reader jreader; + Json::Value jret; + + //if (!jreader.parse(body.c_str(), jret)) + Json::CharReaderBuilder jcrb; + std::unique_ptr const jreader(jcrb.newCharReader()); + const char *str_json_begin = body.c_str(); + ex_astr err; + + //if (!jreader.parse(func_args.c_str(), jsRoot)) { + if (!jreader->parse(str_json_begin, str_json_begin + body.length(), &jret, &err)) + return false; + if (!jret.isObject()) + return false; + if (!jret["data"].isObject()) + return false; + if (!jret["data"]["rid"].isUInt()) + return false; + + record_id = jret["data"]["rid"].asUInt(); + + return true; +} + +bool ts_web_rpc_session_update(int record_id, int protocol_sub_type, int state) { + //Json::FastWriter json_writer; + Json::Value jreq; + jreq["method"] = "session_update"; + jreq["param"]["rid"] = record_id; + jreq["param"]["protocol_sub_type"] = protocol_sub_type; + jreq["param"]["code"] = state; + + ex_astr json_param; + //json_param = json_writer.write(jreq); + Json::StreamWriterBuilder jwb; + std::unique_ptr jwriter(jwb.newStreamWriter()); + ex_aoss os; + jwriter->write(jreq, &os); + json_param = os.str(); + + ex_astr param; + ts_url_encode(json_param.c_str(), param); + + ex_astr url = g_env.web_server_rpc; + url += "?"; + url += param; + + ex_astr body; + return ts_http_get(url, body); +} + + +//session 结束 +bool ts_web_rpc_session_end(const char* sid, int record_id, int ret_code) +{ + // TODO: 对指定的sid相关的会话的引用计数减一(但减到0时销毁) + + //Json::FastWriter json_writer; + Json::Value jreq; + jreq["method"] = "session_end"; + jreq["param"]["rid"] = record_id; + jreq["param"]["code"] = ret_code; + + ex_astr json_param; + //json_param = json_writer.write(jreq); + Json::StreamWriterBuilder jwb; + std::unique_ptr jwriter(jwb.newStreamWriter()); + ex_aoss os; + jwriter->write(jreq, &os); + json_param = os.str(); + + ex_astr param; + ts_url_encode(json_param.c_str(), param); + + ex_astr url = g_env.web_server_rpc; + url += "?"; + url += param; + + ex_astr body; + return ts_http_get(url, body); +} diff --git a/server/tp_core/protocol/ssh/CMakeLists.txt b/server/tp_core/protocol/ssh/CMakeLists.txt index 98f736b..4d122e0 100644 --- a/server/tp_core/protocol/ssh/CMakeLists.txt +++ b/server/tp_core/protocol/ssh/CMakeLists.txt @@ -8,8 +8,9 @@ MESSAGE(STATUS "=======================================================") include(../../../../CMakeCfg.txt) -set(CMAKE_CXX_FLAGS "-fPIC") -set(CMAKE_C_FLAGS "-fPIC") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC") + aux_source_directory(. DIR_SSH_SRCS) aux_source_directory(../../common DIR_SSH_SRCS) @@ -27,15 +28,18 @@ include_directories( ) include_directories( - ${TP_EXTERNAL_RELEASE_DIR}/include -) -link_directories(${TP_EXTERNAL_RELEASE_DIR}/lib) + ${TP_EXTERNAL_RELEASE_DIR}/include + ) +link_directories( + ${TP_EXTERNAL_RELEASE_DIR}/lib + ${TP_EXTERNAL_RELEASE_DIR}/lib64 + ) add_library(tpssh SHARED ${DIR_SSH_SRCS}) if (OS_LINUX) - target_link_libraries(tpssh ssh ssl crypto mbedx509 mbedtls mbedcrypto dl pthread rt util) + target_link_libraries(tpssh ssh ssl crypto mbedx509 mbedtls mbedcrypto z dl pthread rt util) elseif (OS_MACOS) - target_link_libraries(tpssh ssh ssl crypto mbedx509 mbedtls mbedcrypto dl pthread util) + target_link_libraries(tpssh ssh ssl crypto mbedx509 mbedtls mbedcrypto z dl pthread util) endif() diff --git a/server/tp_core/protocol/ssh/ssh_proxy.cpp b/server/tp_core/protocol/ssh/ssh_proxy.cpp index 3a8f4d4..330ccaf 100644 --- a/server/tp_core/protocol/ssh/ssh_proxy.cpp +++ b/server/tp_core/protocol/ssh/ssh_proxy.cpp @@ -86,7 +86,7 @@ void SshProxy::kill_sessions(const ex_astrs &sessions) { for (size_t i = 0; i < sessions.size(); ++i) { if (it->first->sid() == sessions[i]) { EXLOGW("[ssh] try to kill %s\n", sessions[i].c_str()); - it->first->check_noop_timeout(0, 0); // + it->first->check_noop_timeout(0, 0); // 立即结束 } } } @@ -96,13 +96,15 @@ void SshProxy::_thread_loop() { EXLOGI("[ssh] TeleportServer-SSH ready on %s:%d\n", m_host_ip.c_str(), m_host_port); for (;;) { - // ע⣬ssh_new()ָ룬ֹͣ־ڲͷˣָ뽻SshSessionʵʱͷš + // 注意,ssh_new()出来的指针,如果遇到停止标志,本函数内部就释放了,否则这个指针交给了SshSession类实例管理,其析构时会释放。 ssh_session sess_to_client = ssh_new(); - // int flag = SSH_LOG_FUNCTIONS; - // ssh_options_set(sess_to_client, SSH_OPTIONS_LOG_VERBOSITY, &flag); +// #ifdef EX_DEBUG +// int flag = SSH_LOG_FUNCTIONS; +// ssh_options_set(sess_to_client, SSH_OPTIONS_LOG_VERBOSITY, &flag); +// #endif - ssh_set_blocking(sess_to_client, 1); + //ssh_set_blocking(sess_to_client, 1); struct sockaddr_storage sock_client; char ip[32] = {0}; @@ -145,14 +147,14 @@ void SshProxy::_thread_loop() { sess->start(); } - // ȴй߳˳ + // 等待所有工作线程退出 //m_thread_mgr.stop_all(); { ExThreadSmartLock locker(m_lock); ts_ssh_sessions::iterator it = m_sessions.begin(); for (; it != m_sessions.end(); ++it) { - it->first->check_noop_timeout(0, 0); // + it->first->check_noop_timeout(0, 0); // 立即结束 } } @@ -173,7 +175,7 @@ void SshProxy::_on_stop() { ExThreadBase::_on_stop(); if (m_is_running) { - // һͨķʽеļһ + // 用一个变通的方式来结束阻塞中的监听,就是连接一下它。 ex_astr host_ip = m_host_ip; if (host_ip == "0.0.0.0") host_ip = "127.0.0.1"; @@ -185,14 +187,17 @@ void SshProxy::_on_stop() { int _timeout_us = 10; ssh_options_set(_session, SSH_OPTIONS_TIMEOUT, &_timeout_us); ssh_connect(_session); + ssh_disconnect(_session); ssh_free(_session); + + ex_sleep_ms(100); } // m_thread_mgr.stop_all(); } void SshProxy::session_finished(SshSession *sess) { - // TODO: ģ㱨˻ỰֹԼٶӦϢü + // TODO: 向核心模块汇报此会话终止,以减少对应连接信息的引用计数 ExThreadSmartLock locker(m_lock); ts_ssh_sessions::iterator it = m_sessions.find(sess); diff --git a/server/tp_core/protocol/ssh/ssh_recorder.cpp b/server/tp_core/protocol/ssh/ssh_recorder.cpp index 9d66b86..0190493 100644 --- a/server/tp_core/protocol/ssh/ssh_recorder.cpp +++ b/server/tp_core/protocol/ssh/ssh_recorder.cpp @@ -1,4 +1,4 @@ -#include "ssh_recorder.h" +#include "ssh_recorder.h" //#include static ex_u8 TPP_RECORD_MAGIC[4] = {'T', 'P', 'P', 'R'}; @@ -8,7 +8,8 @@ TppSshRec::TppSshRec() { memset(&m_head, 0, sizeof(TS_RECORD_HEADER)); memcpy((ex_u8 *) (&m_head.info.magic), TPP_RECORD_MAGIC, sizeof(ex_u32)); - m_head.info.ver = 0x03; + m_head.info.ver = 0x04; + m_head.info.type = TS_TPPR_TYPE_SSH; m_header_changed = false; m_save_full_header = false; @@ -40,7 +41,7 @@ bool TppSshRec::_on_begin(const TPP_CONNECT_INFO *info) { } bool TppSshRec::_on_end() { - // ʣδдݣдļС + // 如果还有剩下未写入的数据,写入文件中。 save_record(); if (m_file_info != NULL) @@ -73,13 +74,14 @@ void TppSshRec::record(ex_u8 type, const ex_u8 *data, size_t size) { if (m_start_time > 0) { pkg.time_ms = (ex_u32) (ex_get_tick_count() - m_start_time); m_head.info.time_ms = pkg.time_ms; + m_header_changed = true; } m_cache.append((ex_u8 *) &pkg, sizeof(TS_RECORD_PKG)); m_cache.append(data, size); - m_head.info.packages++; - m_header_changed = true; + //m_head.info.packages++; + //m_header_changed = true; } void TppSshRec::record_win_size_startup(int width, int height) { @@ -95,7 +97,7 @@ void TppSshRec::record_win_size_change(int width, int height) { record(TS_RECORD_TYPE_SSH_TERM_SIZE, (ex_u8 *) &pkg, sizeof(TS_RECORD_WIN_SIZE)); } -// Ϊ¼طźʷܹӦֱ¼ĶӦʱ㣩¼ݰķʽ¼ʱƫƣǾʱ䡣 +// 为了录像回放和命令历史能够对应(比如点击命令直接跳到录像的对应时点),仿照录像数据包的方式记录相对时间偏移,而不是绝对时间。 void TppSshRec::record_command(int flag, const ex_astr &cmd) { char szTime[100] = {0}; #ifdef EX_OS_WIN32 diff --git a/server/tp_core/protocol/ssh/ssh_session.cpp b/server/tp_core/protocol/ssh/ssh_session.cpp index 0585442..8b35d89 100644 --- a/server/tp_core/protocol/ssh/ssh_session.cpp +++ b/server/tp_core/protocol/ssh/ssh_session.cpp @@ -129,7 +129,7 @@ void SshSession::_record_end(TP_SSH_CHANNEL_PAIR *cp) { if (cp->db_id > 0) { //EXLOGD("[ssh] [channel:%d] channel end with code: %d\n", cp->channel_id, cp->state); - // Ựûз״̬Ϊ¼´ֵ + // 如果会话过程中没有发生错误,则将其状态改为结束,否则记录下错误值 if (cp->state == TP_SESS_STAT_RUNNING || cp->state == TP_SESS_STAT_STARTED) cp->state = TP_SESS_STAT_END; @@ -257,7 +257,7 @@ void SshSession::_run(void) { int err = SSH_OK; - // ȫӣԿ + // 安全连接(密钥交换) err = ssh_handle_key_exchange(m_cli_session); if (err != SSH_OK) { EXLOGE("[ssh] key exchange with client failed: %s\n", ssh_get_error(m_cli_session)); @@ -275,7 +275,7 @@ void SshSession::_run(void) { return; } - // ֤һͨ + // 认证,并打开一个通道 while (!(m_is_logon && !m_channels.empty())) { if (m_have_error) break; @@ -296,7 +296,7 @@ void SshSession::_run(void) { EXLOGW("[ssh] authenticated and got a channel.\n"); - // ˫Ѿˣʼת + // 现在双方的连接已经建立好了,开始转发 ssh_event_add_session(event_loop, m_srv_session); do { //err = ssh_event_dopoll(event_loop, 5000); @@ -333,11 +333,11 @@ void SshSession::_run(void) { ssh_event_free(event_loop); - // һSSHv1һSSHv2ͬһevent_loopʱSSHv1ղݣŵѭʱSSHv2ò - // ԣSSHv1ԶӺ󣬵shell֮󣬾ͽһֶȡݵѭʹssh_event_dopoll()ˡ + // 如果一边是走SSHv1,另一边是SSHv2,放在同一个event_loop时,SSHv1会收不到数据,放到循环中时,SSHv2得不到数据 + // 所以,当SSHv1的远程主机连接后,到建立好shell环境之后,就进入另一种读取数据的循环,不再使用ssh_event_dopoll()了。 if (m_ssh_ver == 1) { - tp_channels::iterator it = m_channels.begin(); // SSHv1ֻܴһchannel + tp_channels::iterator it = m_channels.begin(); // SSHv1只能打开一个channel ssh_channel cli = (*it)->cli_channel; ssh_channel srv = (*it)->srv_channel; @@ -447,7 +447,7 @@ int SshSession::_on_auth_password_request(ssh_session session, const char *user, EXLOGV("[ssh] try to connect to real SSH server %s:%d\n", _this->m_conn_ip.c_str(), _this->m_conn_port); _this->m_srv_session = ssh_new(); - ssh_set_blocking(_this->m_srv_session, 1); +// ssh_set_blocking(_this->m_srv_session, 1); ssh_options_set(_this->m_srv_session, SSH_OPTIONS_HOST, _this->m_conn_ip.c_str()); int port = (int) _this->m_conn_port; @@ -460,12 +460,16 @@ int SshSession::_on_auth_password_request(ssh_session session, const char *user, // ssh_options_set(_this->m_srv_session, SSH_OPTIONS_LOG_VERBOSITY, &flag); //#endif + int _timeout_cli = 120; // 120 sec. + ssh_options_set(_this->m_cli_session, SSH_OPTIONS_TIMEOUT, &_timeout_cli); + if (_this->m_auth_type != TP_AUTH_TYPE_NONE) ssh_options_set(_this->m_srv_session, SSH_OPTIONS_USER, _this->m_acc_name.c_str()); - // default timeout is 10 seconds, it is too short for connect progress, so set it to 60 sec. - int _timeout = 60; // 60 sec. + // default timeout is 10 seconds, it is too short for connect progress, so set it to 120 sec. + // usually when sshd config to UseDNS. + int _timeout = 120; // 120 sec. ssh_options_set(_this->m_srv_session, SSH_OPTIONS_TIMEOUT, &_timeout); int rc = 0; @@ -477,14 +481,19 @@ int SshSession::_on_auth_password_request(ssh_session session, const char *user, return SSH_AUTH_ERROR; } + if(ssh_is_blocking(_this->m_cli_session)) + EXLOGD("[ssh] client session is blocking.\n"); + if(ssh_is_blocking(_this->m_srv_session)) + EXLOGD("[ssh] server session is blocking.\n"); + // once the server are connected, change the timeout back to default. - _timeout = 30; // in seconds. + _timeout = 120; // in seconds. ssh_options_set(_this->m_srv_session, SSH_OPTIONS_TIMEOUT, &_timeout); // get ssh version of host, v1 or v2 // TODO: libssh-0.8.5 does not support sshv1 anymore. _this->m_ssh_ver = ssh_get_version(_this->m_srv_session); - EXLOGW("[ssh] real host is SSHv%d\n", _this->m_ssh_ver); + //EXLOGW("[ssh] real host is SSHv%d\n", _this->m_ssh_ver); #if 0 // check supported auth type by host @@ -634,13 +643,13 @@ int SshSession::_on_auth_password_request(ssh_session session, const char *user, } ssh_channel SshSession::_on_new_channel_request(ssh_session session, void *userdata) { - // ͻ˳ԴһͨȻͨͨշݣ + // 客户端尝试打开一个通道(然后才能通过这个通道发控制命令或者收发数据) EXLOGV("[ssh] client open channel\n"); SshSession *_this = (SshSession *) userdata; - // TODO: ͻTPʹõSSHv2Э飬Ϊʼʱ֪ԶDzSSHv1 - // ˴˴ΪͻֱԶЩһֱʱSecureCRTĿ¡ỰܻΪΪӵSSHv1ԶӣǴͨ + // TODO: 客户端与TP连接使用的总是SSHv2协议,因为最开始连接时还不知道真正的远程主机是不是SSHv1。 + // 因此此处行为与客户端直连远程主机有些不一样。直连时,SecureCRT的克隆会话功能会因为以为连接的是SSHv1而自动重新连接,而不是打开新通道。 if (_this->m_ssh_ver == 1 && _this->m_channels.size() != 0) { EXLOGE("[ssh] SSH1 supports only one execution channel. One has already been opened.\n"); return NULL; @@ -653,7 +662,7 @@ ssh_channel SshSession::_on_new_channel_request(ssh_session session, void *userd } ssh_set_channel_callbacks(cli_channel, &_this->m_cli_channel_cb); - // ҲҪķһͨת + // 我们也要向真正的服务器申请打开一个通道,来进行转发 ssh_channel srv_channel = ssh_channel_new(_this->m_srv_session); if (srv_channel == NULL) { EXLOGE("[ssh] can not create channel for server.\n"); @@ -682,7 +691,7 @@ ssh_channel SshSession::_on_new_channel_request(ssh_session session, void *userd return NULL; } - // ͻ˺ͷ˵ͨ + // 将客户端和服务端的通道关联起来 { ExThreadSmartLock locker(_this->m_lock); _this->m_channels.push_back(cp); @@ -794,7 +803,7 @@ int SshSession::_on_client_channel_data(ssh_session session, ssh_channel channel SshSession *_this = (SshSession *) userdata; - // ǰ߳ڽշ˷صݣֱӷأŻٷʹݵ + // 当前线程正在接收服务端返回的数据,因此我们直接返回,这样紧跟着会重新再发送此数据的 if (_this->m_recving_from_srv) { // EXLOGD("recving from srv...try again later...\n"); return 0; @@ -815,14 +824,14 @@ int SshSession::_on_client_channel_data(ssh_session session, ssh_channel channel int _len = len; if (cp->type == TS_SSH_CHANNEL_TYPE_SHELL) { - // ȡֱʾʾ֮ǰͿͻݵˣ־¼ҡ + // 在收取服务端数据直到显示命令行提示符之前,不允许发送客户端数据到服务端,避免日志记录混乱。 if (!cp->server_ready) { _this->m_recving_from_cli = false; return 0; } - // Բִ֣ rz - // xxxx ûճıǽΪÿһзһݰ + // 不可以拆分!!否则执行 rz 命令会出错! + // xxxx 如果用户复制粘贴多行文本,我们将其拆分为每一行发送一次数据包 // for (unsigned int i = 0; i < len; ++i) { // if (((ex_u8 *) data)[i] == 0x0d) { // _len = i + 1; @@ -889,7 +898,7 @@ int SshSession::_on_client_channel_subsystem_request(ssh_session session, ssh_ch cp->last_access_timestamp = (ex_u32) time(NULL); - // Ŀǰֻ֧SFTPϵͳ + // 目前只支持SFTP子系统 if (strcmp(subsystem, "sftp") != 0) { EXLOGE("[ssh] support `sftp` subsystem only, but got `%s`.\n", subsystem); cp->state = TP_SESS_STAT_ERR_UNSUPPORT_PROTOCOL; @@ -961,7 +970,7 @@ int SshSession::_on_server_channel_data(ssh_session session, ssh_channel channel int ret = 0; - // յһ˷صʱ֮ǰʾһЩԶϢ + // 收到第一包服务端返回的数据时,在输出数据之前显示一些自定义的信息 #if 1 if (!is_stderr && cp->is_first_server_data) { cp->is_first_server_data = false; @@ -988,13 +997,15 @@ int SshSession::_on_server_channel_data(ssh_session session, ssh_channel channel "\r\n"\ "%s\r\n"\ "Teleport SSH Bastion Server...\r\n"\ - " - teleport to %s:%d\r\n"\ + " - teleport to %s:%d [%d]\r\n"\ " - authroized by %s\r\n"\ "%s\r\n"\ "\r\n\r\n", line.c_str(), _this->m_conn_ip.c_str(), - _this->m_conn_port, auth_mode, + _this->m_conn_port, + cp->db_id, + auth_mode, line.c_str() ); @@ -1013,15 +1024,45 @@ int SshSession::_on_server_channel_data(ssh_session session, ssh_channel channel #endif #if 1 - // ֱתݵͻ - if (is_stderr) - ret = ssh_channel_write_stderr(cp->cli_channel, data, len); - else - ret = ssh_channel_write(cp->cli_channel, data, len); + //static int idx = 0; + + ssh_set_blocking(_this->m_cli_session, 0); + + int xx = 0; + for(xx = 0; xx < 10; ++xx) { + +// idx++; +// EXLOGD(">>>>> %d . %d\n", cp->db_id, idx); + + // 直接转发数据到客户端 + if (is_stderr) + ret = ssh_channel_write_stderr(cp->cli_channel, data, len); + else + ret = ssh_channel_write(cp->cli_channel, data, len); + +// EXLOGD("<<<<< %d . %d\n", cp->db_id, idx); + + if(ret == SSH_OK) { +// EXLOGD("ssh_channel_write() ok.\n"); + break; + } + else if(ret == SSH_AGAIN) { +// EXLOGD("ssh_channel_write() need again, %d.\n", xx); + ex_sleep_ms(500); + continue; + } + else { +// EXLOGD("ssh_channel_write() failed.\n"); + break; + } + } + + ssh_set_blocking(_this->m_cli_session, 1); + #else - // յķݰ \033]0;AABB\007 ݣͻ˻ݴ˸ı䴰ڱ - // Ҫ滻ⲿݣʹ֮ʾ \033]0;TP#ssh://remote-ip\007 ı⡣ - // ήһЩܣĿǰã˲ִ뱸á + // 分析收到的服务端数据包,如果包含类似 \033]0;AABB\007 这样的数据,客户端会根据此改变窗口标题 + // 我们需要替换这部分数据,使之显示类似 \033]0;TP#ssh://remote-ip\007 这样的标题。 + // 但是这样会降低一些性能,因此目前不启用,保留此部分代码备用。 if (is_stderr) { ret = ssh_channel_write_stderr(cp->cli_channel, data, len); } @@ -1038,7 +1079,7 @@ int SshSession::_on_server_channel_data(ssh_session session, ssh_channel channel { _end++; - // киıݣ⻻ΪҪ + // 这个包中含有改变标题的数据,将标题换为我们想要的 EXLOGD("-- found title\n"); size_t len_end = len - (_end - (const ex_u8*)data); MemBuffer mbuf; @@ -1060,7 +1101,7 @@ int SshSession::_on_server_channel_data(ssh_session session, ssh_channel channel if (ret == SSH_ERROR) break; if (ret == mbuf.size()) { - ret = len; // ʾѾеˡ + ret = len; // 表示我们已经处理了所有的数据了。 break; } else { @@ -1139,7 +1180,7 @@ void SshSession::_process_ssh_command(TP_SSH_CHANNEL_PAIR *cp, int from, const e if (TP_SSH_CLIENT_SIDE == from) { if (len >= 2) { if (((ex_u8 *) data)[len - 1] == 0x0d) { - // ƸճһִУ¼־ļ + // 疑似复制粘贴多行命令一次性执行,将其记录到日志文件中 ex_astr str((const char *) data, len - 1); cp->rec.record_command(1, str); @@ -1148,13 +1189,13 @@ void SshSession::_process_ssh_command(TP_SSH_CHANNEL_PAIR *cp, int from, const e } } - // ͻسʱʱִһҪݷ˷صݽнһж + // 客户端输入回车时,可能时执行了一条命令,需要根据服务端返回的数据进行进一步判断 cp->maybe_cmd = (data[len - 1] == 0x0d); // if (cp->maybe_cmd) // EXLOGD("[ssh] maybe cmd.\n"); - // ʱִtop£һĸ'q'˳ûسܻᵼº¼ʱصʾΪ - // ¼ˣҪųķʽǣͻ˵ĸյǿ 1b 5b xx xxͲ + // 有时在执行类似top命令的情况下,输入一个字母'q'就退出程序,没有输入回车,可能会导致后续记录命令时将返回的命令行提示符作为命令 + // 记录下来了,要避免这种情况,排除的方式是:客户端单个字母,后续服务端如果收到的是控制序列 1b 5b xx xx,就不计做命令。 cp->client_single_char = (len == 1 && isprint(data[0])); cp->process_srv = true; @@ -1193,15 +1234,15 @@ void SshSession::_process_ssh_command(TP_SSH_CHANNEL_PAIR *cp, int from, const e case 0x4b: { // 'K' if (0 == esc_arg) { - // ɾ굽βַ + // 删除光标到行尾的字符串 cp->cmd_char_list.erase(cp->cmd_char_pos, cp->cmd_char_list.end()); cp->cmd_char_pos = cp->cmd_char_list.end(); } else if (1 == esc_arg) { - // ɾӿʼ괦ַ + // 删除从开始到光标处的字符串 cp->cmd_char_list.erase(cp->cmd_char_list.begin(), cp->cmd_char_pos); cp->cmd_char_pos = cp->cmd_char_list.end(); } else if (2 == esc_arg) { - // ɾ + // 删除整行 cp->cmd_char_list.clear(); cp->cmd_char_pos = cp->cmd_char_list.begin(); } @@ -1210,7 +1251,7 @@ void SshSession::_process_ssh_command(TP_SSH_CHANNEL_PAIR *cp, int from, const e break; } case 0x43: {// ^[C - // + // 光标右移 if (esc_arg == 0) esc_arg = 1; for (int j = 0; j < esc_arg; ++j) { @@ -1221,7 +1262,7 @@ void SshSession::_process_ssh_command(TP_SSH_CHANNEL_PAIR *cp, int from, const e break; } case 0x44: { // ^[D - // + // 光标左移 if (esc_arg == 0) esc_arg = 1; for (int j = 0; j < esc_arg; ++j) { @@ -1233,7 +1274,7 @@ void SshSession::_process_ssh_command(TP_SSH_CHANNEL_PAIR *cp, int from, const e break; } - case 0x50: {// 'P' ɾַָ + case 0x50: {// 'P' 删除指定数量的字符 if (esc_arg == 0) esc_arg = 1; @@ -1245,7 +1286,7 @@ void SshSession::_process_ssh_command(TP_SSH_CHANNEL_PAIR *cp, int from, const e break; } - case 0x40: { // '@' ָĿհַ + case 0x40: { // '@' 插入指定数量的空白字符 if (esc_arg == 0) esc_arg = 1; for (int j = 0; j < esc_arg; ++j) @@ -1267,10 +1308,10 @@ void SshSession::_process_ssh_command(TP_SSH_CHANNEL_PAIR *cp, int from, const e switch (ch) { case 0x07: - // + // 响铃 break; case 0x08: { - // + // 光标左移 if (cp->cmd_char_pos != cp->cmd_char_list.begin()) cp->cmd_char_pos--; break; @@ -1343,10 +1384,10 @@ void SshSession::_process_sftp_command(TP_SSH_CHANNEL_PAIR *cp, const ex_u8 *dat // SFTP protocol: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13 //EXLOG_BIN(data, len, "[sftp] client channel data"); - // TODO: ݿͻ˵ͷ˵ķأԽһжûβļģдȵȣԼĽdzɹʧܡ - // ¼ʽ time-offset,flag,action,result,file-path,[file-path] - // УflagĿǰΪ0ԺԣΪ֤ssh-cmdʽһ£time-offset/action/result - // file-pathDZĶ󣬹Ϊ :ʵݣ磬 13:/root/abc.txt + // TODO: 根据客户端的请求和服务端的返回,可以进一步判断用户是如何操作文件的,比如读、写等等,以及操作的结果是成功还是失败。 + // 记录格式: time-offset,flag,action,result,file-path,[file-path] + // 其中,flag目前总是为0,可以忽略(为保证与ssh-cmd格式一致),time-offset/action/result 都是数字 + // file-path是被操作的对象,规格为 长度:实际内容,例如, 13:/root/abc.txt if (len < 9) @@ -1364,7 +1405,7 @@ void SshSession::_process_sftp_command(TP_SSH_CHANNEL_PAIR *cp, const ex_u8 *dat return; } - // Ҫ14ֽ + // 需要的数据至少14字节 // uint32 + byte + uint32 + (uint32 + char + ...) // pkg_len + cmd + req_id + string( length + content...) if (len < 14) @@ -1397,13 +1438,13 @@ void SshSession::_process_sftp_command(TP_SSH_CHANNEL_PAIR *cp, const ex_u8 *dat break; case 0x12: // 0x12 = 18 = SSH_FXP_RENAME - // renameаַ + // rename操作数据中包含两个字符串 str2_ptr = str1_ptr + str1_len + 4; str2_len = (int) ((str2_ptr[0] << 24) | (str2_ptr[1] << 16) | (str2_ptr[2] << 8) | str2_ptr[3]); break; case 0x15: // 0x15 = 21 = SSH_FXP_LINK - // linkаַǰµļбӵļ + // link操作数据中包含两个字符串,前者是新的链接文件名,后者是现有被链接的文件名 str2_ptr = str1_ptr + str1_len + 4; str2_len = (int) ((str2_ptr[0] << 24) | (str2_ptr[1] << 16) | (str2_ptr[2] << 8) | str2_ptr[3]); break; diff --git a/server/tp_core/protocol/ssh/stdafx.cpp b/server/tp_core/protocol/ssh/stdafx.cpp index 46afe00..faf90a3 100644 --- a/server/tp_core/protocol/ssh/stdafx.cpp +++ b/server/tp_core/protocol/ssh/stdafx.cpp @@ -1,4 +1,4 @@ -// stdafx.cpp : source file that includes just the standard includes +// stdafx.cpp : source file that includes just the standard includes // tpssh.pch will be the pre-compiled header // stdafx.obj will contain the pre-compiled type information @@ -11,11 +11,17 @@ #ifdef EX_OS_WIN32 # ifdef EX_DEBUG -# pragma comment(lib, "debug/ssh.lib") +# pragma comment(lib, "debug/ssh.lib") +# pragma comment(lib, "libcrypto32MTd.lib") +# pragma comment(lib, "libssl32MTd.lib") # else -# pragma comment(lib, "release/ssh.lib") +# pragma comment(lib, "release/ssh.lib") +# pragma comment(lib, "libcrypto32MT.lib") +# pragma comment(lib, "libssl32MT.lib") # endif -# pragma comment(lib, "libeay32.lib") +// # pragma comment(lib, "libcrypto.lib") +// # pragma comment(lib, "libeay32.lib") # pragma comment(lib, "ws2_32.lib") +# pragma comment(lib, "crypt32.lib") #endif diff --git a/server/tp_core/protocol/ssh/tpssh.cpp b/server/tp_core/protocol/ssh/tpssh.cpp index 5bfa7d1..a605635 100644 --- a/server/tp_core/protocol/ssh/tpssh.cpp +++ b/server/tp_core/protocol/ssh/tpssh.cpp @@ -1,4 +1,4 @@ -#include "ssh_proxy.h" +#include "ssh_proxy.h" #include "tpp_env.h" #include @@ -46,10 +46,15 @@ TPP_API void tpp_timer(void) { static ex_rv _set_runtime_config(const char* param) { Json::Value jp; - Json::Reader jreader; + //Json::Reader jreader; + Json::CharReaderBuilder jcrb; + std::unique_ptr const jreader(jcrb.newCharReader()); + const char *str_json_begin = param; + ex_astr err; - if (!jreader.parse(param, jp)) - return TPE_JSON_FORMAT; + //if (!jreader.parse(param, jp)) + if (!jreader->parse(str_json_begin, param + strlen(param), &jp, &err)) + return TPE_JSON_FORMAT; if (!jp.isObject()) return TPE_PARAM; @@ -68,10 +73,16 @@ static ex_rv _set_runtime_config(const char* param) { static ex_rv _kill_sessions(const char* param) { Json::Value jp; - Json::Reader jreader; +// Json::Reader jreader; +// if (!jreader.parse(param, jp)) + Json::CharReaderBuilder jcrb; + std::unique_ptr const jreader(jcrb.newCharReader()); + const char *str_json_begin = param; + ex_astr err; - if (!jreader.parse(param, jp)) - return TPE_JSON_FORMAT; + //if (!jreader.parse(param, jp)) + if (!jreader->parse(str_json_begin, param + strlen(param), &jp, &err)) + return TPE_JSON_FORMAT; if (!jp.isArray()) return TPE_PARAM; diff --git a/server/tp_core/protocol/ssh/tpssh.vs2017.vcxproj b/server/tp_core/protocol/ssh/tpssh.vs2017.vcxproj index c2c091e..cd5eb27 100644 --- a/server/tp_core/protocol/ssh/tpssh.vs2017.vcxproj +++ b/server/tp_core/protocol/ssh/tpssh.vs2017.vcxproj @@ -66,7 +66,7 @@ Windows Debug - ..\..\..\..\external\libssh\build\src\static;..\..\..\..\external\openssl\out32;%(AdditionalLibraryDirectories) + ..\..\..\..\external\libssh\build\src;..\..\..\..\external\openssl\lib\VC\static;%(AdditionalLibraryDirectories) @@ -86,7 +86,7 @@ true true true - ..\..\..\..\external\libssh\build\src\static;..\..\..\..\external\openssl\out32;%(AdditionalLibraryDirectories) + ..\..\..\..\external\libssh\build\src;..\..\..\..\external\openssl\lib\VC\static;%(AdditionalLibraryDirectories) diff --git a/server/tp_core/protocol/telnet/telnet_recorder.cpp b/server/tp_core/protocol/telnet/telnet_recorder.cpp index 985e896..c1700ee 100644 --- a/server/tp_core/protocol/telnet/telnet_recorder.cpp +++ b/server/tp_core/protocol/telnet/telnet_recorder.cpp @@ -8,7 +8,7 @@ TppTelnetRec::TppTelnetRec() memset(&m_head, 0, sizeof(TS_RECORD_HEADER)); memcpy((ex_u8*)(&m_head.info.magic), TPP_RECORD_MAGIC, sizeof(ex_u32)); - m_head.info.ver = 0x03; + m_head.info.ver = 0x04; m_header_changed = false; m_save_full_header = false; @@ -42,7 +42,7 @@ bool TppTelnetRec::_on_begin(const TPP_CONNECT_INFO* info) bool TppTelnetRec::_on_end() { - // ʣδдݣдļС + // 如果还有剩下未写入的数据,写入文件中。 save_record(); if (m_file_info != NULL) @@ -76,13 +76,14 @@ void TppTelnetRec::record(ex_u8 type, const ex_u8* data, size_t size) { pkg.time_ms = (ex_u32)(ex_get_tick_count() - m_start_time); m_head.info.time_ms = pkg.time_ms; + m_header_changed = true; } m_cache.append((ex_u8*)&pkg, sizeof(TS_RECORD_PKG)); m_cache.append(data, size); - m_head.info.packages++; - m_header_changed = true; + // m_head.info.packages++; + // m_header_changed = true; } // void TppTelnetRec::record_win_size(int width, int height) @@ -93,20 +94,20 @@ void TppTelnetRec::record(ex_u8 type, const ex_u8* data, size_t size) // m_header_changed = true; // } -void TppTelnetRec::record_win_size_startup(int width, int height) -{ - m_head.basic.width = (ex_u16)width; - m_head.basic.height = (ex_u16)height; - m_save_full_header = true; -} - -void TppTelnetRec::record_win_size_change(int width, int height) -{ - TS_RECORD_WIN_SIZE pkg = { 0 }; - pkg.width = (ex_u16)width; - pkg.height = (ex_u16)height; - record(TS_RECORD_TYPE_TELNET_TERM_SIZE, (ex_u8*)&pkg, sizeof(TS_RECORD_WIN_SIZE)); -} +void TppTelnetRec::record_win_size_startup(int width, int height) +{ + m_head.basic.width = (ex_u16)width; + m_head.basic.height = (ex_u16)height; + m_save_full_header = true; +} + +void TppTelnetRec::record_win_size_change(int width, int height) +{ + TS_RECORD_WIN_SIZE pkg = { 0 }; + pkg.width = (ex_u16)width; + pkg.height = (ex_u16)height; + record(TS_RECORD_TYPE_TELNET_TERM_SIZE, (ex_u8*)&pkg, sizeof(TS_RECORD_WIN_SIZE)); +} bool TppTelnetRec::_save_to_info_file() { if (!m_header_changed) diff --git a/server/tp_core/protocol/telnet/tptelnet.cpp b/server/tp_core/protocol/telnet/tptelnet.cpp index 70cf6ed..4ec8976 100644 --- a/server/tp_core/protocol/telnet/tptelnet.cpp +++ b/server/tp_core/protocol/telnet/tptelnet.cpp @@ -1,60 +1,66 @@ -#include "telnet_proxy.h" -#include "tpp_env.h" - -#include -#include - -TPP_API ex_rv tpp_init(TPP_INIT_ARGS* init_args) -{ - if (!g_telnet_env.init(init_args)) - return TPE_FAILED; - - return 0; -} - -TPP_API ex_rv tpp_start(void) -{ - if (!g_telnet_proxy.init()) - return TPE_FAILED; - if (!g_telnet_proxy.start()) - return TPE_FAILED; - - return 0; -} - -TPP_API ex_rv tpp_stop(void) -{ - g_telnet_proxy.stop(); - return 0; -} - -TPP_API void tpp_timer(void) { - // be called per one second. - g_telnet_proxy.timer(); -} - +#include "telnet_proxy.h" +#include "tpp_env.h" + +#include +#include + +TPP_API ex_rv tpp_init(TPP_INIT_ARGS* init_args) +{ + if (!g_telnet_env.init(init_args)) + return TPE_FAILED; + + return 0; +} + +TPP_API ex_rv tpp_start(void) +{ + if (!g_telnet_proxy.init()) + return TPE_FAILED; + if (!g_telnet_proxy.start()) + return TPE_FAILED; + + return 0; +} + +TPP_API ex_rv tpp_stop(void) +{ + g_telnet_proxy.stop(); + return 0; +} + +TPP_API void tpp_timer(void) { + // be called per one second. + g_telnet_proxy.timer(); +} + + +// TPP_API void tpp_set_cfg(TPP_SET_CFG_ARGS* cfg_args) { +// g_telnet_proxy.set_cfg(cfg_args); +// } -// TPP_API void tpp_set_cfg(TPP_SET_CFG_ARGS* cfg_args) { -// g_telnet_proxy.set_cfg(cfg_args); -// } - static ex_rv _set_runtime_config(const char* param) { - Json::Value jp; - Json::Reader jreader; +// Json::Value jp; +// Json::Reader jreader; +// +// if (!jreader.parse(param, jp)) + Json::CharReaderBuilder jcrb; + std::unique_ptr const jreader(jcrb.newCharReader()); - if (!jreader.parse(param, jp)) - return TPE_JSON_FORMAT; + ex_astr err; + Json::Value jp; + if (!jreader->parse(param, param + strlen(param), &jp, &err)) + return TPE_JSON_FORMAT; if (!jp.isObject()) return TPE_PARAM; - if (jp["noop_timeout"].isNull() || !jp["noop_timeout"].isUInt()) - return TPE_PARAM; - - ex_u32 noop_timeout = jp["noop_timeout"].asUInt(); - if (noop_timeout == 0) - return TPE_PARAM; + if (jp["noop_timeout"].isNull() || !jp["noop_timeout"].isUInt()) + return TPE_PARAM; + + ex_u32 noop_timeout = jp["noop_timeout"].asUInt(); + if (noop_timeout == 0) + return TPE_PARAM; g_telnet_proxy.set_cfg(noop_timeout * 60); @@ -62,25 +68,31 @@ static ex_rv _set_runtime_config(const char* param) { } static ex_rv _kill_sessions(const char* param) { - Json::Value jp; - Json::Reader jreader; +// Json::Value jp; +// Json::Reader jreader; +// +// if (!jreader.parse(param, jp)) + Json::CharReaderBuilder jcrb; + std::unique_ptr const jreader(jcrb.newCharReader()); - if (!jreader.parse(param, jp)) - return TPE_JSON_FORMAT; + ex_astr err; + Json::Value jp; + if (!jreader->parse(param, param + strlen(param), &jp, &err)) + return TPE_JSON_FORMAT; if (!jp.isArray()) return TPE_PARAM; ex_astrs ss; - int cnt = jp.size(); - for (int i = 0; i < cnt; ++i) - { - if (!jp[i].isString()) { - return TPE_PARAM; - } - - ss.push_back(jp[i].asString()); - } + int cnt = jp.size(); + for (int i = 0; i < cnt; ++i) + { + if (!jp[i].isString()) { + return TPE_PARAM; + } + + ss.push_back(jp[i].asString()); + } g_telnet_proxy.kill_sessions(ss); diff --git a/server/tp_core/testssh/CMakeLists.txt b/server/tp_core/testssh/CMakeLists.txt index 4a23a43..c5fd10f 100644 --- a/server/tp_core/testssh/CMakeLists.txt +++ b/server/tp_core/testssh/CMakeLists.txt @@ -17,14 +17,17 @@ include_directories( include_directories( ${TP_EXTERNAL_RELEASE_DIR}/include ) -link_directories(${TP_EXTERNAL_RELEASE_DIR}/lib) +link_directories( + ${TP_EXTERNAL_RELEASE_DIR}/lib + ${TP_EXTERNAL_RELEASE_DIR}/lib64 + ) add_executable(testssh ${DIR_SRCS}) if (OS_LINUX) set(CMAKE_EXE_LINKER_FLAGS "-export-dynamic") - target_link_libraries(testssh ssh ssl crypto dl pthread rt util) + target_link_libraries(testssh ssh ssl z crypto dl pthread rt util) elseif (OS_MACOS) - target_link_libraries(testssh ssh ssl crypto dl pthread util) + target_link_libraries(testssh ssh ssl z crypto dl pthread util) endif () diff --git a/server/tp_core/testssh/stdafx.cpp b/server/tp_core/testssh/stdafx.cpp index 846b803..68931a3 100644 --- a/server/tp_core/testssh/stdafx.cpp +++ b/server/tp_core/testssh/stdafx.cpp @@ -1,17 +1,24 @@ -// stdafx.cpp : source file that includes just the standard includes -// testssh.pch will be the pre-compiled header -// stdafx.obj will contain the pre-compiled type information - -#include "stdafx.h" - -// TODO: reference any additional headers you need in STDAFX.H -// and not in this file - -#ifdef _DEBUG -# pragma comment(lib, "debug/ssh.lib") -#else -# pragma comment(lib, "release/ssh.lib") -#endif -#pragma comment(lib, "libeay32.lib") -#pragma comment(lib, "ws2_32.lib") - +// stdafx.cpp : source file that includes just the standard includes +// testssh.pch will be the pre-compiled header +// stdafx.obj will contain the pre-compiled type information + +#include "stdafx.h" +#include + +// TODO: reference any additional headers you need in STDAFX.H +// and not in this file + +#ifdef _DEBUG +# pragma comment(lib, "debug/ssh.lib") +# pragma comment(lib, "libcrypto32MTd.lib") +# pragma comment(lib, "libssl32MTd.lib") +#else +# pragma comment(lib, "release/ssh.lib") +# pragma comment(lib, "libcrypto32MT.lib") +# pragma comment(lib, "libssl32MT.lib") +#endif +// #pragma comment(lib, "libeay32.lib") +// #pragma comment(lib, "libcrypto.lib") +#pragma comment(lib, "ws2_32.lib") +#pragma comment(lib, "crypt32.lib") + diff --git a/server/tp_core/testssh/testssh.cpp b/server/tp_core/testssh/testssh.cpp index 4d8c8a4..5a6a40b 100644 --- a/server/tp_core/testssh/testssh.cpp +++ b/server/tp_core/testssh/testssh.cpp @@ -1,163 +1,176 @@ -// testssh.cpp : Defines the entry point for the console application. -// - -//#include "stdafx.h" - -#include -#include - -void show_usage() { - printf("Usage:\n"); - printf(" testssh USERNAME PASSWORD IP PORT\n"); -} - -int main(int argc, char** argv) -{ - if (argc != 5) { - show_usage(); - return -1; - } - - ssh_init(); - - ssh_session sess = ssh_new(); - ssh_set_blocking(sess, 1); - - char* username = argv[1]; - char* password = argv[2]; - - char* ip = argv[3]; - ssh_options_set(sess, SSH_OPTIONS_HOST, ip); - - int port = atoi(argv[4]); - ssh_options_set(sess, SSH_OPTIONS_PORT, &port); - - int flag = SSH_LOG_FUNCTIONS; - ssh_options_set(sess, SSH_OPTIONS_LOG_VERBOSITY, &flag); - - int val = 0; - ssh_options_set(sess, SSH_OPTIONS_STRICTHOSTKEYCHECK, &val); - - ssh_options_set(sess, SSH_OPTIONS_USER, username); - - int _timeout = 60; // 60 sec. - ssh_options_set(sess, SSH_OPTIONS_TIMEOUT, &_timeout); - - // connect to real SSH host. - int rc = 0; - rc = ssh_connect(sess); - if (rc != SSH_OK) { - printf("[ERROR] can not connect to SSH server %s:%d. [%d] %s\n", ip, port, rc, ssh_get_error(sess)); - ssh_free(sess); - return -1; - } - - _timeout = 10; // 60 sec. - ssh_options_set(sess, SSH_OPTIONS_TIMEOUT, &_timeout); - - // get version of SSH server. - int ver = ssh_get_version(sess); - printf("[INFO] host is SSHv%d\n", ver); - - // get supported auth-type of SSH server. - //ssh_userauth_none(sess, username); - rc = ssh_userauth_none(sess, NULL); - if (rc == SSH_AUTH_ERROR) { - printf("[ERROR] can not got auth type supported by SSH server.\n"); - ssh_free(sess); - return -1; - } - - int auth_methods = ssh_userauth_list(sess, username); - printf("[INFO] supported auth-type: 0x%08x\n", auth_methods); - - // get banner. - const char* banner = ssh_get_issue_banner(sess); - if (banner != NULL) { - printf("[INFO] server issue banner: %s\n", banner); - } - - // try auth. - bool ok = false; - int retry_count = 0; - - // first try interactive login mode if server allow. - if (!ok && (auth_methods & SSH_AUTH_METHOD_INTERACTIVE) == SSH_AUTH_METHOD_INTERACTIVE) { - retry_count = 0; - rc = ssh_userauth_kbdint(sess, NULL, NULL); - for (;;) { - if (rc == SSH_AUTH_SUCCESS) { - ok = true; - break; - } - - if (rc == SSH_AUTH_AGAIN) { - retry_count += 1; - if (retry_count >= 5) - break; - ex_sleep_ms(500); - // Sleep(500); - rc = ssh_userauth_kbdint(sess, NULL, NULL); - continue; - } - - if (rc != SSH_AUTH_INFO) - break; - - int nprompts = ssh_userauth_kbdint_getnprompts(sess); - if (0 == nprompts) { - rc = ssh_userauth_kbdint(sess, NULL, NULL); - continue; - } - - for (int iprompt = 0; iprompt < nprompts; ++iprompt) { - char echo = 0; - const char *prompt = ssh_userauth_kbdint_getprompt(sess, iprompt, &echo); - printf("[INFO] interactive login prompt: %s\n", prompt); - - rc = ssh_userauth_kbdint_setanswer(sess, iprompt, password); - if (rc < 0) { - printf("[ERROR] invalid password for interactive mode to login to SSH server.\n"); - ssh_free(sess); - return -1; - } - } - - rc = ssh_userauth_kbdint(sess, NULL, NULL); - } - } - - // and then try password login mode if server allow. - if (!ok && (auth_methods & SSH_AUTH_METHOD_PASSWORD) == SSH_AUTH_METHOD_PASSWORD) { - retry_count = 0; - rc = ssh_userauth_password(sess, NULL, password); - for (;;) { - if (rc == SSH_AUTH_AGAIN) { - retry_count += 1; - if (retry_count >= 3) - break; - ex_sleep_ms(100); - // Sleep(100); - rc = ssh_userauth_password(sess, NULL, password); - continue; - } - if (rc == SSH_AUTH_SUCCESS) { - ok = true; - printf("[INFO] login with password mode OK.\n"); - break; - } else { - printf("[ERROR] failed to login with password mode, got %d.\n", rc); - break; - } - } - } - - if (!ok) { - printf("[ERROR] can not use password mode or interactive mode to login to SSH server.\n"); - } - - ssh_disconnect(sess); - ssh_free(sess); - return 0; -} - +// testssh.cpp : Defines the entry point for the console application. +// + +#ifdef _WIN32 +# include "stdafx.h" +#endif + +#include +#include + +void show_usage() { + printf("Usage:\n"); + printf(" testssh USERNAME PASSWORD IP PORT\n"); +} + +int main(int argc, char** argv) +{ + if (argc != 5) { + show_usage(); + return -1; + } + + ssh_init(); + + ssh_session sess = ssh_new(); + ssh_set_blocking(sess, 1); + + char* username = argv[1]; + char* password = argv[2]; + + char* ip = argv[3]; + ssh_options_set(sess, SSH_OPTIONS_HOST, ip); + + int port = atoi(argv[4]); + ssh_options_set(sess, SSH_OPTIONS_PORT, &port); + + int flag = SSH_LOG_FUNCTIONS; + ssh_options_set(sess, SSH_OPTIONS_LOG_VERBOSITY, &flag); + + int val = 0; + ssh_options_set(sess, SSH_OPTIONS_STRICTHOSTKEYCHECK, &val); + + ssh_options_set(sess, SSH_OPTIONS_USER, username); + + int _timeout = 120; // 60 sec. + ssh_options_set(sess, SSH_OPTIONS_TIMEOUT, &_timeout); + + // connect to real SSH host. + int rc = 0; + rc = ssh_connect(sess); + if (rc != SSH_OK) { + printf("[ERROR] can not connect to SSH server %s:%d. [%d] %s\n", ip, port, rc, ssh_get_error(sess)); + ssh_free(sess); + return -1; + } + + _timeout = 120; // 60 sec. + ssh_options_set(sess, SSH_OPTIONS_TIMEOUT, &_timeout); + + // get version of SSH server. + int ver = ssh_get_version(sess); + printf("[INFO] host is SSHv%d\n", ver); + + // get supported auth-type of SSH server. + //ssh_userauth_none(sess, username); + rc = ssh_userauth_none(sess, NULL); + if (rc == SSH_AUTH_ERROR) { + printf("[ERROR] can not got auth type supported by SSH server.\n"); + ssh_free(sess); + return -1; + } + + int auth_methods = ssh_userauth_list(sess, username); + printf("[INFO] supported auth-type: 0x%08x\n", auth_methods); + if(auth_methods == SSH_AUTH_METHOD_UNKNOWN) { +// auth_methods = SSH_AUTH_METHOD_PASSWORD|SSH_AUTH_METHOD_INTERACTIVE; +// printf("[WRN] unknown auth-type, try PASSWORD and INTERACTIVE\n"); + auth_methods = SSH_AUTH_METHOD_PASSWORD; + printf("[WRN] unknown auth-type, try PASSWORD mode.\n"); + } + + // get banner. + const char* banner = ssh_get_issue_banner(sess); + if (banner != NULL) { + printf("[INFO] server issue banner: %s\n", banner); + } + + // try auth. + bool ok = false; + int retry_count = 0; + + // first try interactive login mode if server allow. + if (!ok && (auth_methods & SSH_AUTH_METHOD_INTERACTIVE) == SSH_AUTH_METHOD_INTERACTIVE) { + retry_count = 0; + rc = ssh_userauth_kbdint(sess, NULL, NULL); + for (;;) { + if (rc == SSH_AUTH_SUCCESS) { + ok = true; + break; + } + + if (rc == SSH_AUTH_AGAIN) { + retry_count += 1; + if (retry_count >= 5) + break; + ex_sleep_ms(500); + // Sleep(500); + rc = ssh_userauth_kbdint(sess, NULL, NULL); + continue; + } + + if (rc != SSH_AUTH_INFO) + break; + + int nprompts = ssh_userauth_kbdint_getnprompts(sess); + if (0 == nprompts) { + rc = ssh_userauth_kbdint(sess, NULL, NULL); + continue; + } + + for (int iprompt = 0; iprompt < nprompts; ++iprompt) { + char echo = 0; + const char *prompt = ssh_userauth_kbdint_getprompt(sess, iprompt, &echo); + printf("[INFO] interactive login prompt: %s\n", prompt); + + rc = ssh_userauth_kbdint_setanswer(sess, iprompt, password); + if (rc < 0) { + printf("[ERROR] invalid password for interactive mode to login to SSH server.\n"); + ssh_free(sess); + return -1; + } + } + + rc = ssh_userauth_kbdint(sess, NULL, NULL); + } + } + + // and then try password login mode if server allow. + if (!ok && (auth_methods & SSH_AUTH_METHOD_PASSWORD) == SSH_AUTH_METHOD_PASSWORD) { + retry_count = 0; + rc = ssh_userauth_password(sess, NULL, password); + for (;;) { + if (rc == SSH_AUTH_AGAIN) { + retry_count += 1; + if (retry_count >= 3) + break; + ex_sleep_ms(100); + // Sleep(100); + rc = ssh_userauth_password(sess, NULL, password); + continue; + } + if (rc == SSH_AUTH_SUCCESS) { + ok = true; + printf("[INFO] login with password mode OK.\n"); + break; + } else { + printf("[ERROR] failed to login with password mode, got %d.\n", rc); + break; + } + } + } + + if (!ok) { + printf("[ERROR] can not use password mode or interactive mode to login to SSH server.\n"); + } + else { + printf("[INFO] login success.\n"); + } + + ssh_disconnect(sess); + ssh_free(sess); + ssh_finalize(); + + return 0; +} + diff --git a/server/tp_core/testssh/testssh.vcxproj b/server/tp_core/testssh/testssh.vcxproj index fb14cbc..d6e2ed7 100644 --- a/server/tp_core/testssh/testssh.vcxproj +++ b/server/tp_core/testssh/testssh.vcxproj @@ -55,13 +55,13 @@ Disabled WIN32;_DEBUG;_CONSOLE;LIBSSH_STATIC;%(PreprocessorDefinitions) true - ..\..\..\external\libssh\include;%(AdditionalIncludeDirectories) + ..\..\..\external\libssh\include;..\..\..\common\libex\include;%(AdditionalIncludeDirectories) MultiThreadedDebug Console true - ..\..\..\external\libssh\build\src\static;..\..\..\external\openssl\out32;%(AdditionalLibraryDirectories) + ..\..\..\external\libssh\build\src;..\..\..\external\openssl\lib\VC\static;%(AdditionalLibraryDirectories) @@ -73,7 +73,7 @@ true WIN32;NDEBUG;_CONSOLE;LIBSSH_STATIC;%(PreprocessorDefinitions) true - ..\..\..\external\libssh\include;C:\Program Files (x86)\Visual Leak Detector\include;%(AdditionalIncludeDirectories) + ..\..\..\external\libssh\include;..\..\..\common\libex\include;%(AdditionalIncludeDirectories) MultiThreaded @@ -92,6 +92,26 @@ + + NotUsing + NotUsing + + + NotUsing + NotUsing + + + NotUsing + NotUsing + + + NotUsing + NotUsing + + + NotUsing + NotUsing + Create Create diff --git a/server/tp_core/testssh/testssh.vcxproj.filters b/server/tp_core/testssh/testssh.vcxproj.filters index ac6addb..0388b1a 100644 --- a/server/tp_core/testssh/testssh.vcxproj.filters +++ b/server/tp_core/testssh/testssh.vcxproj.filters @@ -13,6 +13,9 @@ {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + {d9087583-83c7-4d47-bba3-fd86b0cb3901} + @@ -32,5 +35,20 @@ Source Files + + libex + + + libex + + + libex + + + libex + + + libex +
\ No newline at end of file diff --git a/server/tp_web/src/CMakeLists.txt b/server/tp_web/src/CMakeLists.txt index 33f81ea..c8ba2fb 100644 --- a/server/tp_web/src/CMakeLists.txt +++ b/server/tp_web/src/CMakeLists.txt @@ -1,15 +1,13 @@ cmake_minimum_required(VERSION 3.5) MESSAGE(STATUS "=======================================================") -MESSAGE(STATUS " libtptelnet") +MESSAGE(STATUS " tp_web") MESSAGE(STATUS "=======================================================") #MESSAGE(STATUS "operation system is ${CMAKE_SYSTEM}") #MESSAGE(STATUS "current source directory is ${CMAKE_CURRENT_SOURCE_DIR}") include(../../../CMakeCfg.txt) -#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") - set(CMAKE_EXE_LINKER_FLAGS "-export-dynamic") aux_source_directory(. DIR_SRCS) @@ -23,7 +21,7 @@ include_directories( include_directories( ${TP_EXTERNAL_RELEASE_DIR}/include - ${TP_EXTERNAL_RELEASE_DIR}/include/python + ${TP_EXTERNAL_RELEASE_DIR}/include/python3.7m ) link_directories(${TP_EXTERNAL_RELEASE_DIR}/lib) diff --git a/server/www/packages/packages-linux/x64/.libs_cffi_backend/libffi-806b1a9d.so.6.0.4 b/server/www/packages/packages-linux/x64/.libs_cffi_backend/libffi-806b1a9d.so.6.0.4 new file mode 100755 index 0000000..a74aa90 Binary files /dev/null and b/server/www/packages/packages-linux/x64/.libs_cffi_backend/libffi-806b1a9d.so.6.0.4 differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libfreetype-5766911d.so.6.17.1 b/server/www/packages/packages-linux/x64/PIL/.libs/libfreetype-5766911d.so.6.17.1 new file mode 100755 index 0000000..ac7bae1 Binary files /dev/null and b/server/www/packages/packages-linux/x64/PIL/.libs/libfreetype-5766911d.so.6.17.1 differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libfreetype-7ce95de6.so.6.16.1 b/server/www/packages/packages-linux/x64/PIL/.libs/libfreetype-7ce95de6.so.6.16.1 deleted file mode 100755 index 179f60c..0000000 Binary files a/server/www/packages/packages-linux/x64/PIL/.libs/libfreetype-7ce95de6.so.6.16.1 and /dev/null differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libjpeg-3b10b538.so.9.3.0 b/server/www/packages/packages-linux/x64/PIL/.libs/libjpeg-3b10b538.so.9.3.0 new file mode 100755 index 0000000..19484f6 Binary files /dev/null and b/server/www/packages/packages-linux/x64/PIL/.libs/libjpeg-3b10b538.so.9.3.0 differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libjpeg-3fe7dfc0.so.9.3.0 b/server/www/packages/packages-linux/x64/PIL/.libs/libjpeg-3fe7dfc0.so.9.3.0 deleted file mode 100755 index 835b57b..0000000 Binary files a/server/www/packages/packages-linux/x64/PIL/.libs/libjpeg-3fe7dfc0.so.9.3.0 and /dev/null differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libopenjp2-b3d7668a.so.2.3.1 b/server/www/packages/packages-linux/x64/PIL/.libs/libopenjp2-b3d7668a.so.2.3.1 new file mode 100755 index 0000000..c676915 Binary files /dev/null and b/server/www/packages/packages-linux/x64/PIL/.libs/libopenjp2-b3d7668a.so.2.3.1 differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libopenjp2-e366d6b0.so.2.1.0 b/server/www/packages/packages-linux/x64/PIL/.libs/libopenjp2-e366d6b0.so.2.1.0 deleted file mode 100755 index c31cea4..0000000 Binary files a/server/www/packages/packages-linux/x64/PIL/.libs/libopenjp2-e366d6b0.so.2.1.0 and /dev/null differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libpng16-898afbbd.so.16.35.0 b/server/www/packages/packages-linux/x64/PIL/.libs/libpng16-898afbbd.so.16.35.0 deleted file mode 100755 index 9cf16ab..0000000 Binary files a/server/www/packages/packages-linux/x64/PIL/.libs/libpng16-898afbbd.so.16.35.0 and /dev/null differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libpng16-bedcb7ea.so.16.37.0 b/server/www/packages/packages-linux/x64/PIL/.libs/libpng16-bedcb7ea.so.16.37.0 new file mode 100755 index 0000000..99a0058 Binary files /dev/null and b/server/www/packages/packages-linux/x64/PIL/.libs/libpng16-bedcb7ea.so.16.37.0 differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libtiff-8a6d997d.so.5.3.0 b/server/www/packages/packages-linux/x64/PIL/.libs/libtiff-8267adfe.so.5.4.0 similarity index 52% rename from server/www/packages/packages-linux/x64/PIL/.libs/libtiff-8a6d997d.so.5.3.0 rename to server/www/packages/packages-linux/x64/PIL/.libs/libtiff-8267adfe.so.5.4.0 index ff0a9dd..b0e5d57 100755 Binary files a/server/www/packages/packages-linux/x64/PIL/.libs/libtiff-8a6d997d.so.5.3.0 and b/server/www/packages/packages-linux/x64/PIL/.libs/libtiff-8267adfe.so.5.4.0 differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libwebp-3a2aeecf.so.7.0.5 b/server/www/packages/packages-linux/x64/PIL/.libs/libwebp-3a2aeecf.so.7.0.5 new file mode 100755 index 0000000..61005d4 Binary files /dev/null and b/server/www/packages/packages-linux/x64/PIL/.libs/libwebp-3a2aeecf.so.7.0.5 differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libwebp-8ccd29fd.so.7.0.2 b/server/www/packages/packages-linux/x64/PIL/.libs/libwebp-8ccd29fd.so.7.0.2 deleted file mode 100755 index b2f0763..0000000 Binary files a/server/www/packages/packages-linux/x64/PIL/.libs/libwebp-8ccd29fd.so.7.0.2 and /dev/null differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libwebpdemux-e9ec482e.so.2.0.6 b/server/www/packages/packages-linux/x64/PIL/.libs/libwebpdemux-e9ec482e.so.2.0.6 new file mode 100755 index 0000000..3cde1b8 Binary files /dev/null and b/server/www/packages/packages-linux/x64/PIL/.libs/libwebpdemux-e9ec482e.so.2.0.6 differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libwebpdemux-eba3dc32.so.2.0.4 b/server/www/packages/packages-linux/x64/PIL/.libs/libwebpdemux-eba3dc32.so.2.0.4 deleted file mode 100755 index 66adccb..0000000 Binary files a/server/www/packages/packages-linux/x64/PIL/.libs/libwebpdemux-eba3dc32.so.2.0.4 and /dev/null differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libwebpmux-1c63fe99.so.3.0.2 b/server/www/packages/packages-linux/x64/PIL/.libs/libwebpmux-1c63fe99.so.3.0.2 deleted file mode 100755 index 2045a77..0000000 Binary files a/server/www/packages/packages-linux/x64/PIL/.libs/libwebpmux-1c63fe99.so.3.0.2 and /dev/null differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libwebpmux-40630b44.so.3.0.4 b/server/www/packages/packages-linux/x64/PIL/.libs/libwebpmux-40630b44.so.3.0.4 new file mode 100755 index 0000000..a04c69c Binary files /dev/null and b/server/www/packages/packages-linux/x64/PIL/.libs/libwebpmux-40630b44.so.3.0.4 differ diff --git a/server/www/packages/packages-linux/x64/PIL/.libs/libz-a147dcb0.so.1.2.3 b/server/www/packages/packages-linux/x64/PIL/.libs/libz-a147dcb0.so.1.2.3 index c123f89..1ce02c2 100755 Binary files a/server/www/packages/packages-linux/x64/PIL/.libs/libz-a147dcb0.so.1.2.3 and b/server/www/packages/packages-linux/x64/PIL/.libs/libz-a147dcb0.so.1.2.3 differ diff --git a/server/www/packages/packages-linux/x64/PIL/BdfFontFile.py b/server/www/packages/packages-linux/x64/PIL/BdfFontFile.py index eac19bd..fdf2c09 100644 --- a/server/www/packages/packages-linux/x64/PIL/BdfFontFile.py +++ b/server/www/packages/packages-linux/x64/PIL/BdfFontFile.py @@ -19,8 +19,7 @@ from __future__ import print_function -from . import Image, FontFile - +from . import FontFile, Image # -------------------------------------------------------------------- # parse X Bitmap Distribution Format (BDF) @@ -32,14 +31,10 @@ bdf_slant = { "O": "Oblique", "RI": "Reverse Italic", "RO": "Reverse Oblique", - "OT": "Other" + "OT": "Other", } -bdf_spacing = { - "P": "Proportional", - "M": "Monospaced", - "C": "Cell" -} +bdf_spacing = {"P": "Proportional", "M": "Monospaced", "C": "Cell"} def bdf_char(f): @@ -50,7 +45,7 @@ def bdf_char(f): return None if s[:9] == b"STARTCHAR": break - id = s[9:].strip().decode('ascii') + id = s[9:].strip().decode("ascii") # load symbol properties props = {} @@ -59,7 +54,7 @@ def bdf_char(f): if not s or s[:6] == b"BITMAP": break i = s.find(b" ") - props[s[:i].decode('ascii')] = s[i+1:-1].decode('ascii') + props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii") # load bitmap bitmap = [] @@ -73,7 +68,7 @@ def bdf_char(f): [x, y, l, d] = [int(p) for p in props["BBX"].split()] [dx, dy] = [int(p) for p in props["DWIDTH"].split()] - bbox = (dx, dy), (l, -d-y, x+l, -d), (0, 0, x, y) + bbox = (dx, dy), (l, -d - y, x + l, -d), (0, 0, x, y) try: im = Image.frombytes("1", (x, y), bitmap, "hex", "1") @@ -87,8 +82,8 @@ def bdf_char(f): ## # Font file plugin for the X11 BDF format. -class BdfFontFile(FontFile.FontFile): +class BdfFontFile(FontFile.FontFile): def __init__(self, fp): FontFile.FontFile.__init__(self) @@ -105,10 +100,10 @@ class BdfFontFile(FontFile.FontFile): if not s or s[:13] == b"ENDPROPERTIES": break i = s.find(b" ") - props[s[:i].decode('ascii')] = s[i+1:-1].decode('ascii') + props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii") if s[:i] in [b"COMMENT", b"COPYRIGHT"]: if s.find(b"LogicalFontDescription") < 0: - comments.append(s[i+1:-1].decode('ascii')) + comments.append(s[i + 1 : -1].decode("ascii")) while True: c = bdf_char(fp) diff --git a/server/www/packages/packages-linux/x64/PIL/BlpImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/BlpImagePlugin.py index 398e0fa..7b97964 100644 --- a/server/www/packages/packages-linux/x64/PIL/BlpImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/BlpImagePlugin.py @@ -34,7 +34,6 @@ from io import BytesIO from . import Image, ImageFile - BLP_FORMAT_JPEG = 0 BLP_ENCODING_UNCOMPRESSED = 1 @@ -47,11 +46,7 @@ BLP_ALPHA_ENCODING_DXT5 = 7 def unpack_565(i): - return ( - ((i >> 11) & 0x1f) << 3, - ((i >> 5) & 0x3f) << 2, - (i & 0x1f) << 3 - ) + return (((i >> 11) & 0x1F) << 3, ((i >> 5) & 0x3F) << 2, (i & 0x1F) << 3) def decode_dxt1(data, alpha=False): @@ -119,7 +114,7 @@ def decode_dxt3(data): for block in range(blocks): idx = block * 16 - block = data[idx:idx + 16] + block = data[idx : idx + 16] # Decode next 16-byte block. bits = struct.unpack_from("<8B", block) color0, color1 = struct.unpack_from(">= 4 else: high = True - a &= 0xf + a &= 0xF a *= 17 # We get a value between 0 and 15 color_code = (code >> 2 * (4 * j + i)) & 0x03 @@ -172,14 +167,12 @@ def decode_dxt5(data): for block in range(blocks): idx = block * 16 - block = data[idx:idx + 16] + block = data[idx : idx + 16] # Decode next 16-byte block. a0, a1 = struct.unpack_from("= 40: # v3 and OS/2 - file_info['y_flip'] = i8(header_data[7]) == 0xff - file_info['direction'] = 1 if file_info['y_flip'] else -1 - file_info['width'] = i32(header_data[0:4]) - file_info['height'] = (i32(header_data[4:8]) - if not file_info['y_flip'] - else 2**32 - i32(header_data[4:8])) - file_info['planes'] = i16(header_data[8:10]) - file_info['bits'] = i16(header_data[10:12]) - file_info['compression'] = i32(header_data[12:16]) - # byte size of pixel data - file_info['data_size'] = i32(header_data[16:20]) - file_info['pixels_per_meter'] = (i32(header_data[20:24]), - i32(header_data[24:28])) - file_info['colors'] = i32(header_data[28:32]) - file_info['palette_padding'] = 4 - self.info["dpi"] = tuple( - map(lambda x: int(math.ceil(x / 39.3701)), - file_info['pixels_per_meter'])) - if file_info['compression'] == self.BITFIELDS: - if len(header_data) >= 52: - for idx, mask in enumerate(['r_mask', - 'g_mask', - 'b_mask', - 'a_mask']): - file_info[mask] = i32(header_data[36+idx*4:40+idx*4]) - else: - # 40 byte headers only have the three components in the - # bitfields masks, - # ref: https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx - # See also https://github.com/python-pillow/Pillow/issues/1293 - # There is a 4th component in the RGBQuad, in the alpha - # location, but it is listed as a reserved component, - # and it is not generally an alpha channel - file_info['a_mask'] = 0x0 - for mask in ['r_mask', 'g_mask', 'b_mask']: - file_info[mask] = i32(read(4)) - file_info['rgb_mask'] = (file_info['r_mask'], - file_info['g_mask'], - file_info['b_mask']) - file_info['rgba_mask'] = (file_info['r_mask'], - file_info['g_mask'], - file_info['b_mask'], - file_info['a_mask']) + header_data = ImageFile._safe_read(self.fp, file_info["header_size"] - 4) + + # -------------------------------------------------- IBM OS/2 Bitmap v1 + # ----- This format has different offsets because of width/height types + if file_info["header_size"] == 12: + file_info["width"] = i16(header_data[0:2]) + file_info["height"] = i16(header_data[2:4]) + file_info["planes"] = i16(header_data[4:6]) + file_info["bits"] = i16(header_data[6:8]) + file_info["compression"] = self.RAW + file_info["palette_padding"] = 3 + + # --------------------------------------------- Windows Bitmap v2 to v5 + # v3, OS/2 v2, v4, v5 + elif file_info["header_size"] in (40, 64, 108, 124): + file_info["y_flip"] = i8(header_data[7]) == 0xFF + file_info["direction"] = 1 if file_info["y_flip"] else -1 + file_info["width"] = i32(header_data[0:4]) + file_info["height"] = ( + i32(header_data[4:8]) + if not file_info["y_flip"] + else 2 ** 32 - i32(header_data[4:8]) + ) + file_info["planes"] = i16(header_data[8:10]) + file_info["bits"] = i16(header_data[10:12]) + file_info["compression"] = i32(header_data[12:16]) + # byte size of pixel data + file_info["data_size"] = i32(header_data[16:20]) + file_info["pixels_per_meter"] = ( + i32(header_data[20:24]), + i32(header_data[24:28]), + ) + file_info["colors"] = i32(header_data[28:32]) + file_info["palette_padding"] = 4 + self.info["dpi"] = tuple( + int(x / 39.3701 + 0.5) for x in file_info["pixels_per_meter"] + ) + if file_info["compression"] == self.BITFIELDS: + if len(header_data) >= 52: + for idx, mask in enumerate( + ["r_mask", "g_mask", "b_mask", "a_mask"] + ): + file_info[mask] = i32(header_data[36 + idx * 4 : 40 + idx * 4]) + else: + # 40 byte headers only have the three components in the + # bitfields masks, ref: + # https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx + # See also + # https://github.com/python-pillow/Pillow/issues/1293 + # There is a 4th component in the RGBQuad, in the alpha + # location, but it is listed as a reserved component, + # and it is not generally an alpha channel + file_info["a_mask"] = 0x0 + for mask in ["r_mask", "g_mask", "b_mask"]: + file_info[mask] = i32(read(4)) + file_info["rgb_mask"] = ( + file_info["r_mask"], + file_info["g_mask"], + file_info["b_mask"], + ) + file_info["rgba_mask"] = ( + file_info["r_mask"], + file_info["g_mask"], + file_info["b_mask"], + file_info["a_mask"], + ) else: - raise IOError("Unsupported BMP header type (%d)" % - file_info['header_size']) + raise IOError("Unsupported BMP header type (%d)" % file_info["header_size"]) + # ------------------ Special case : header is reported 40, which # ---------------------- is shorter than real size for bpp >= 16 - self._size = file_info['width'], file_info['height'] - # -------- If color count was not found in the header, compute from bits - file_info['colors'] = file_info['colors'] if file_info.get('colors', 0) else (1 << file_info['bits']) - # -------------------------------- Check abnormal values for DOS attacks - if file_info['width'] * file_info['height'] > 2**31: + self._size = file_info["width"], file_info["height"] + + # ------- If color count was not found in the header, compute from bits + file_info["colors"] = ( + file_info["colors"] + if file_info.get("colors", 0) + else (1 << file_info["bits"]) + ) + + # ------------------------------- Check abnormal values for DOS attacks + if file_info["width"] * file_info["height"] > 2 ** 31: raise IOError("Unsupported BMP Size: (%dx%d)" % self.size) - # ----------------------- Check bit depth for unusual unsupported values - self.mode, raw_mode = BIT2MODE.get(file_info['bits'], (None, None)) + + # ---------------------- Check bit depth for unusual unsupported values + self.mode, raw_mode = BIT2MODE.get(file_info["bits"], (None, None)) if self.mode is None: - raise IOError("Unsupported BMP pixel depth (%d)" - % file_info['bits']) - # ----------------- Process BMP with Bitfields compression (not palette) - if file_info['compression'] == self.BITFIELDS: + raise IOError("Unsupported BMP pixel depth (%d)" % file_info["bits"]) + + # ---------------- Process BMP with Bitfields compression (not palette) + if file_info["compression"] == self.BITFIELDS: SUPPORTED = { - 32: [(0xff0000, 0xff00, 0xff, 0x0), - (0xff0000, 0xff00, 0xff, 0xff000000), - (0x0, 0x0, 0x0, 0x0), - (0xff000000, 0xff0000, 0xff00, 0x0)], - 24: [(0xff0000, 0xff00, 0xff)], - 16: [(0xf800, 0x7e0, 0x1f), (0x7c00, 0x3e0, 0x1f)] + 32: [ + (0xFF0000, 0xFF00, 0xFF, 0x0), + (0xFF0000, 0xFF00, 0xFF, 0xFF000000), + (0xFF, 0xFF00, 0xFF0000, 0xFF000000), + (0x0, 0x0, 0x0, 0x0), + (0xFF000000, 0xFF0000, 0xFF00, 0x0), + ], + 24: [(0xFF0000, 0xFF00, 0xFF)], + 16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)], } MASK_MODES = { - (32, (0xff0000, 0xff00, 0xff, 0x0)): "BGRX", - (32, (0xff000000, 0xff0000, 0xff00, 0x0)): "XBGR", - (32, (0xff0000, 0xff00, 0xff, 0xff000000)): "BGRA", + (32, (0xFF0000, 0xFF00, 0xFF, 0x0)): "BGRX", + (32, (0xFF000000, 0xFF0000, 0xFF00, 0x0)): "XBGR", + (32, (0xFF, 0xFF00, 0xFF0000, 0xFF000000)): "RGBA", + (32, (0xFF0000, 0xFF00, 0xFF, 0xFF000000)): "BGRA", (32, (0x0, 0x0, 0x0, 0x0)): "BGRA", - (24, (0xff0000, 0xff00, 0xff)): "BGR", - (16, (0xf800, 0x7e0, 0x1f)): "BGR;16", - (16, (0x7c00, 0x3e0, 0x1f)): "BGR;15" + (24, (0xFF0000, 0xFF00, 0xFF)): "BGR", + (16, (0xF800, 0x7E0, 0x1F)): "BGR;16", + (16, (0x7C00, 0x3E0, 0x1F)): "BGR;15", } - if file_info['bits'] in SUPPORTED: - if file_info['bits'] == 32 and \ - file_info['rgba_mask'] in SUPPORTED[file_info['bits']]: - raw_mode = MASK_MODES[(file_info['bits'], file_info['rgba_mask'])] - self.mode = "RGBA" if raw_mode in ("BGRA",) else self.mode - elif (file_info['bits'] in (24, 16) and - file_info['rgb_mask'] in SUPPORTED[file_info['bits']]): - raw_mode = MASK_MODES[ - (file_info['bits'], file_info['rgb_mask']) - ] + if file_info["bits"] in SUPPORTED: + if ( + file_info["bits"] == 32 + and file_info["rgba_mask"] in SUPPORTED[file_info["bits"]] + ): + raw_mode = MASK_MODES[(file_info["bits"], file_info["rgba_mask"])] + self.mode = "RGBA" if "A" in raw_mode else self.mode + elif ( + file_info["bits"] in (24, 16) + and file_info["rgb_mask"] in SUPPORTED[file_info["bits"]] + ): + raw_mode = MASK_MODES[(file_info["bits"], file_info["rgb_mask"])] else: raise IOError("Unsupported BMP bitfields layout") else: raise IOError("Unsupported BMP bitfields layout") - elif file_info['compression'] == self.RAW: - if file_info['bits'] == 32 and header == 22: # 32-bit .cur offset + elif file_info["compression"] == self.RAW: + if file_info["bits"] == 32 and header == 22: # 32-bit .cur offset raw_mode, self.mode = "BGRA", "RGBA" else: - raise IOError("Unsupported BMP compression (%d)" % - file_info['compression']) - # ---------------- Once the header is processed, process the palette/LUT + raise IOError("Unsupported BMP compression (%d)" % file_info["compression"]) + + # --------------- Once the header is processed, process the palette/LUT if self.mode == "P": # Paletted for 1, 4 and 8 bit images - # ----------------------------------------------------- 1-bit images - if not (0 < file_info['colors'] <= 65536): - raise IOError("Unsupported BMP Palette size (%d)" % - file_info['colors']) + + # ---------------------------------------------------- 1-bit images + if not (0 < file_info["colors"] <= 65536): + raise IOError("Unsupported BMP Palette size (%d)" % file_info["colors"]) else: - padding = file_info['palette_padding'] - palette = read(padding * file_info['colors']) + padding = file_info["palette_padding"] + palette = read(padding * file_info["colors"]) greyscale = True - indices = (0, 255) if file_info['colors'] == 2 else \ - list(range(file_info['colors'])) - # ------------------ Check if greyscale and ignore palette if so + indices = ( + (0, 255) + if file_info["colors"] == 2 + else list(range(file_info["colors"])) + ) + + # ----------------- Check if greyscale and ignore palette if so for ind, val in enumerate(indices): - rgb = palette[ind*padding:ind*padding + 3] + rgb = palette[ind * padding : ind * padding + 3] if rgb != o8(val) * 3: greyscale = False - # -------- If all colors are grey, white or black, ditch palette + + # ------- If all colors are grey, white or black, ditch palette if greyscale: - self.mode = "1" if file_info['colors'] == 2 else "L" + self.mode = "1" if file_info["colors"] == 2 else "L" raw_mode = self.mode else: self.mode = "P" self.palette = ImagePalette.raw( - "BGRX" if padding == 4 else "BGR", palette) + "BGRX" if padding == 4 else "BGR", palette + ) - # ----------------------------- Finally set the tile data for the plugin - self.info['compression'] = file_info['compression'] + # ---------------------------- Finally set the tile data for the plugin + self.info["compression"] = file_info["compression"] self.tile = [ - ('raw', - (0, 0, file_info['width'], file_info['height']), - offset or self.fp.tell(), - (raw_mode, - ((file_info['width'] * file_info['bits'] + 31) >> 3) & (~3), - file_info['direction'])) + ( + "raw", + (0, 0, file_info["width"], file_info["height"]), + offset or self.fp.tell(), + ( + raw_mode, + ((file_info["width"] * file_info["bits"] + 31) >> 3) & (~3), + file_info["direction"], + ), + ) ] def _open(self): @@ -243,9 +275,9 @@ class BmpImageFile(ImageFile.ImageFile): self._bitmap(offset=offset) -# ============================================================================== +# ============================================================================= # Image plugin for the DIB format (BMP alias) -# ============================================================================== +# ============================================================================= class DibImageFile(BmpImageFile): format = "DIB" @@ -254,6 +286,7 @@ class DibImageFile(BmpImageFile): def _open(self): self._bitmap() + # # -------------------------------------------------------------------- # Write BMP file @@ -268,7 +301,11 @@ SAVE = { } -def _save(im, fp, filename): +def _dib_save(im, fp, filename): + _save(im, fp, filename, False) + + +def _save(im, fp, filename, bitmap_header=True): try: rawmode, bits, colors = SAVE[im.mode] except KeyError: @@ -279,32 +316,38 @@ def _save(im, fp, filename): dpi = info.get("dpi", (96, 96)) # 1 meter == 39.3701 inches - ppm = tuple(map(lambda x: int(x * 39.3701), dpi)) + ppm = tuple(map(lambda x: int(x * 39.3701 + 0.5), dpi)) - stride = ((im.size[0]*bits+7)//8+3) & (~3) + stride = ((im.size[0] * bits + 7) // 8 + 3) & (~3) header = 40 # or 64 for OS/2 version 2 - offset = 14 + header + colors * 4 image = stride * im.size[1] # bitmap header - fp.write(b"BM" + # file type (magic) - o32(offset+image) + # file size - o32(0) + # reserved - o32(offset)) # image data offset + if bitmap_header: + offset = 14 + header + colors * 4 + fp.write( + b"BM" + + o32(offset + image) # file type (magic) + + o32(0) # file size + + o32(offset) # reserved + ) # image data offset # bitmap info header - fp.write(o32(header) + # info header size - o32(im.size[0]) + # width - o32(im.size[1]) + # height - o16(1) + # planes - o16(bits) + # depth - o32(0) + # compression (0=uncompressed) - o32(image) + # size of bitmap - o32(ppm[0]) + o32(ppm[1]) + # resolution - o32(colors) + # colors used - o32(colors)) # colors important + fp.write( + o32(header) # info header size + + o32(im.size[0]) # width + + o32(im.size[1]) # height + + o16(1) # planes + + o16(bits) # depth + + o32(0) # compression (0=uncompressed) + + o32(image) # size of bitmap + + o32(ppm[0]) # resolution + + o32(ppm[1]) # resolution + + o32(colors) # colors used + + o32(colors) # colors important + ) - fp.write(b"\0" * (header - 40)) # padding (for OS/2 format) + fp.write(b"\0" * (header - 40)) # padding (for OS/2 format) if im.mode == "1": for i in (0, 255): @@ -315,8 +358,8 @@ def _save(im, fp, filename): elif im.mode == "P": fp.write(im.im.getpalette("RGB", "BGRX")) - ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 0, - (rawmode, stride, -1))]) + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, stride, -1))]) + # # -------------------------------------------------------------------- @@ -329,3 +372,10 @@ Image.register_save(BmpImageFile.format, _save) Image.register_extension(BmpImageFile.format, ".bmp") Image.register_mime(BmpImageFile.format, "image/bmp") + +Image.register_open(DibImageFile.format, DibImageFile, _dib_accept) +Image.register_save(DibImageFile.format, _dib_save) + +Image.register_extension(DibImageFile.format, ".dib") + +Image.register_mime(DibImageFile.format, "image/bmp") diff --git a/server/www/packages/packages-linux/x64/PIL/BufrStubImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/BufrStubImagePlugin.py index a1957b3..56cac3b 100644 --- a/server/www/packages/packages-linux/x64/PIL/BufrStubImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/BufrStubImagePlugin.py @@ -27,6 +27,7 @@ def register_handler(handler): # -------------------------------------------------------------------- # Image adapter + def _accept(prefix): return prefix[:4] == b"BUFR" or prefix[:4] == b"ZCZC" diff --git a/server/www/packages/packages-linux/x64/PIL/ContainerIO.py b/server/www/packages/packages-linux/x64/PIL/ContainerIO.py index 682ad90..3cf9d82 100644 --- a/server/www/packages/packages-linux/x64/PIL/ContainerIO.py +++ b/server/www/packages/packages-linux/x64/PIL/ContainerIO.py @@ -18,9 +18,10 @@ # A file object that provides read access to a part of an existing # file (for example a TAR file). +import io + class ContainerIO(object): - def __init__(self, file, offset, length): """ Create file object. @@ -39,9 +40,9 @@ class ContainerIO(object): # Always false. def isatty(self): - return 0 + return False - def seek(self, offset, mode=0): + def seek(self, offset, mode=io.SEEK_SET): """ Move file pointer. diff --git a/server/www/packages/packages-linux/x64/PIL/CurImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/CurImagePlugin.py index 3e8f321..9e2d8c9 100644 --- a/server/www/packages/packages-linux/x64/PIL/CurImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/CurImagePlugin.py @@ -18,9 +18,11 @@ from __future__ import print_function -from . import Image, BmpImagePlugin +from . import BmpImagePlugin, Image from ._binary import i8, i16le as i16, i32le as i32 +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.1" # @@ -34,6 +36,7 @@ def _accept(prefix): ## # Image plugin for Windows Cursor files. + class CurImageFile(BmpImagePlugin.BmpImageFile): format = "CUR" @@ -63,9 +66,9 @@ class CurImageFile(BmpImagePlugin.BmpImageFile): self._bitmap(i32(m[12:]) + offset) # patch up the bitmap height - self._size = self.size[0], self.size[1]//2 + self._size = self.size[0], self.size[1] // 2 d, e, o, a = self.tile[0] - self.tile[0] = d, (0, 0)+self.size, o, a + self.tile[0] = d, (0, 0) + self.size, o, a return diff --git a/server/www/packages/packages-linux/x64/PIL/DcxImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/DcxImagePlugin.py index 2045927..57c3214 100644 --- a/server/www/packages/packages-linux/x64/PIL/DcxImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/DcxImagePlugin.py @@ -25,6 +25,8 @@ from . import Image from ._binary import i32le as i32 from .PcxImagePlugin import PcxImageFile +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.2" MAGIC = 0x3ADE68B1 # QUIZ: what's this value, then? @@ -37,6 +39,7 @@ def _accept(prefix): ## # Image plugin for the Intel DCX format. + class DcxImageFile(PcxImageFile): format = "DCX" @@ -81,6 +84,15 @@ class DcxImageFile(PcxImageFile): def tell(self): return self.frame + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + Image.register_open(DcxImageFile.format, DcxImageFile, _accept) diff --git a/server/www/packages/packages-linux/x64/PIL/DdsImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/DdsImagePlugin.py index 7660827..b2d5089 100644 --- a/server/www/packages/packages-linux/x64/PIL/DdsImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/DdsImagePlugin.py @@ -12,8 +12,8 @@ Full text of the CC0 license: import struct from io import BytesIO -from . import Image, ImageFile +from . import Image, ImageFile # Magic ("DDS ") DDS_MAGIC = 0x20534444 @@ -61,8 +61,7 @@ DDS_LUMINANCEA = DDPF_LUMINANCE | DDPF_ALPHAPIXELS DDS_ALPHA = DDPF_ALPHA DDS_PAL8 = DDPF_PALETTEINDEXED8 -DDS_HEADER_FLAGS_TEXTURE = (DDSD_CAPS | DDSD_HEIGHT | DDSD_WIDTH | - DDSD_PIXELFORMAT) +DDS_HEADER_FLAGS_TEXTURE = DDSD_CAPS | DDSD_HEIGHT | DDSD_WIDTH | DDSD_PIXELFORMAT DDS_HEADER_FLAGS_MIPMAP = DDSD_MIPMAPCOUNT DDS_HEADER_FLAGS_VOLUME = DDSD_DEPTH DDS_HEADER_FLAGS_PITCH = DDSD_PITCH @@ -118,48 +117,54 @@ class DdsImageFile(ImageFile.ImageFile): self.mode = "RGBA" pitch, depth, mipmaps = struct.unpack("<3I", header.read(12)) - reserved = struct.unpack("<11I", header.read(44)) + struct.unpack("<11I", header.read(44)) # reserved # pixel format pfsize, pfflags = struct.unpack("<2I", header.read(8)) fourcc = header.read(4) - bitcount, rmask, gmask, bmask, amask = struct.unpack("<5I", - header.read(20)) + bitcount, = struct.unpack(" 0: - s = fp.read(min(lengthfile, 100*1024)) + s = fp.read(min(lengthfile, 100 * 1024)) if not s: break lengthfile -= len(s) f.write(s) # Build Ghostscript command - command = ["gs", - "-q", # quiet mode - "-g%dx%d" % size, # set output geometry (pixels) - "-r%fx%f" % res, # set input DPI (dots per inch) - "-dBATCH", # exit after processing - "-dNOPAUSE", # don't pause between pages - "-dSAFER", # safe mode - "-sDEVICE=ppmraw", # ppm driver - "-sOutputFile=%s" % outfile, # output file - "-c", "%d %d translate" % (-bbox[0], -bbox[1]), - # adjust for image origin - "-f", infile, # input file - "-c", "showpage", # showpage (see: https://bugs.ghostscript.com/show_bug.cgi?id=698272) - ] + command = [ + "gs", + "-q", # quiet mode + "-g%dx%d" % size, # set output geometry (pixels) + "-r%fx%f" % res, # set input DPI (dots per inch) + "-dBATCH", # exit after processing + "-dNOPAUSE", # don't pause between pages + "-dSAFER", # safe mode + "-sDEVICE=ppmraw", # ppm driver + "-sOutputFile=%s" % outfile, # output file + # adjust for image origin + "-c", + "%d %d translate" % (-bbox[0], -bbox[1]), + "-f", + infile, # input file + # showpage (see https://bugs.ghostscript.com/show_bug.cgi?id=698272) + "-c", + "showpage", + ] if gs_windows_binary is not None: if not gs_windows_binary: - raise WindowsError('Unable to locate Ghostscript on paths') + raise WindowsError("Unable to locate Ghostscript on paths") command[0] = gs_windows_binary # push data through Ghostscript try: - with open(os.devnull, 'w+b') as devnull: - startupinfo = None - if sys.platform.startswith('win'): - startupinfo = subprocess.STARTUPINFO() - startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW - subprocess.check_call(command, stdin=devnull, stdout=devnull, - startupinfo=startupinfo) + startupinfo = None + if sys.platform.startswith("win"): + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + subprocess.check_call(command, startupinfo=startupinfo) im = Image.open(outfile) im.load() finally: @@ -162,11 +173,12 @@ class PSFile(object): """ Wrapper for bytesio object that treats either CR or LF as end of line. """ + def __init__(self, fp): self.fp = fp self.char = None - def seek(self, offset, whence=0): + def seek(self, offset, whence=io.SEEK_SET): self.char = None self.fp.seek(offset, whence) @@ -184,12 +196,12 @@ class PSFile(object): if self.char in b"\r\n": self.char = None - return s.decode('latin-1') + return s.decode("latin-1") def _accept(prefix): - return prefix[:4] == b"%!PS" or \ - (len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5) + return prefix[:4] == b"%!PS" or (len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5) + ## # Image plugin for Encapsulated Postscript. This plugin supports only @@ -223,7 +235,7 @@ class EpsImageFile(ImageFile.ImageFile): # Load EPS header s_raw = fp.readline() - s = s_raw.strip('\r\n') + s = s_raw.strip("\r\n") while s_raw: if s: @@ -245,8 +257,9 @@ class EpsImageFile(ImageFile.ImageFile): # put floating point values there anyway. box = [int(float(i)) for i in v.split()] self._size = box[2] - box[0], box[3] - box[1] - self.tile = [("eps", (0, 0) + self.size, offset, - (length, box))] + self.tile = [ + ("eps", (0, 0) + self.size, offset, (length, box)) + ] except Exception: pass @@ -261,7 +274,7 @@ class EpsImageFile(ImageFile.ImageFile): self.info[k[:8]] = k[9:] else: self.info[k] = "" - elif s[0] == '%': + elif s[0] == "%": # handle non-DSC Postscript comments that some # tools mistakenly put in the Comments section pass @@ -269,7 +282,7 @@ class EpsImageFile(ImageFile.ImageFile): raise IOError("bad EPS header") s_raw = fp.readline() - s = s_raw.strip('\r\n') + s = s_raw.strip("\r\n") if s and s[:1] != "%": break @@ -296,7 +309,7 @@ class EpsImageFile(ImageFile.ImageFile): self._size = int(x), int(y) return - s = fp.readline().strip('\r\n') + s = fp.readline().strip("\r\n") if not s: break @@ -309,7 +322,7 @@ class EpsImageFile(ImageFile.ImageFile): if s[:4] == b"%!PS": # for HEAD without binary preview - fp.seek(0, 2) + fp.seek(0, io.SEEK_END) length = fp.tell() offset = 0 elif i32(s[0:4]) == 0xC6D3D0C5: @@ -343,6 +356,7 @@ class EpsImageFile(ImageFile.ImageFile): # # -------------------------------------------------------------------- + def _save(im, fp, filename, eps=1): """EPS Writer for the Python Imaging Library.""" @@ -365,7 +379,7 @@ def _save(im, fp, filename, eps=1): wrapped_fp = False if fp != sys.stdout: if sys.version_info.major > 2: - fp = io.TextIOWrapper(fp, encoding='latin-1') + fp = io.TextIOWrapper(fp, encoding="latin-1") wrapped_fp = True try: @@ -380,7 +394,7 @@ def _save(im, fp, filename, eps=1): fp.write("%%EndComments\n") fp.write("%%Page: 1 1\n") fp.write("%%ImageData: %d %d " % im.size) - fp.write("%d %d 0 1 1 \"%s\"\n" % operator) + fp.write('%d %d 0 1 1 "%s"\n' % operator) # # image header @@ -395,7 +409,7 @@ def _save(im, fp, filename, eps=1): if hasattr(fp, "flush"): fp.flush() - ImageFile._save(im, base_fp, [("eps", (0, 0)+im.size, 0, None)]) + ImageFile._save(im, base_fp, [("eps", (0, 0) + im.size, 0, None)]) fp.write("\n%%%%EndBinary\n") fp.write("grestore end\n") @@ -405,6 +419,7 @@ def _save(im, fp, filename, eps=1): if wrapped_fp: fp.detach() + # # -------------------------------------------------------------------- diff --git a/server/www/packages/packages-linux/x64/PIL/ExifTags.py b/server/www/packages/packages-linux/x64/PIL/ExifTags.py index a8ad26b..47a981e 100644 --- a/server/www/packages/packages-linux/x64/PIL/ExifTags.py +++ b/server/www/packages/packages-linux/x64/PIL/ExifTags.py @@ -18,11 +18,10 @@ # Maps EXIF tags to tag names. TAGS = { - # possibly incomplete - 0x000b: "ProcessingSoftware", - 0x00fe: "NewSubfileType", - 0x00ff: "SubfileType", + 0x000B: "ProcessingSoftware", + 0x00FE: "NewSubfileType", + 0x00FF: "SubfileType", 0x0100: "ImageWidth", 0x0101: "ImageLength", 0x0102: "BitsPerSample", @@ -31,10 +30,10 @@ TAGS = { 0x0107: "Thresholding", 0x0108: "CellWidth", 0x0109: "CellLength", - 0x010a: "FillOrder", - 0x010d: "DocumentName", - 0x010e: "ImageDescription", - 0x010f: "Make", + 0x010A: "FillOrder", + 0x010D: "DocumentName", + 0x010E: "ImageDescription", + 0x010F: "Make", 0x0110: "Model", 0x0111: "StripOffsets", 0x0112: "Orientation", @@ -43,10 +42,10 @@ TAGS = { 0x0117: "StripByteCounts", 0x0118: "MinSampleValue", 0x0119: "MaxSampleValue", - 0x011a: "XResolution", - 0x011b: "YResolution", - 0x011c: "PlanarConfiguration", - 0x011d: "PageName", + 0x011A: "XResolution", + 0x011B: "YResolution", + 0x011C: "PlanarConfiguration", + 0x011D: "PageName", 0x0120: "FreeOffsets", 0x0121: "FreeByteCounts", 0x0122: "GrayResponseUnit", @@ -55,24 +54,24 @@ TAGS = { 0x0125: "T6Options", 0x0128: "ResolutionUnit", 0x0129: "PageNumber", - 0x012d: "TransferFunction", + 0x012D: "TransferFunction", 0x0131: "Software", 0x0132: "DateTime", - 0x013b: "Artist", - 0x013c: "HostComputer", - 0x013d: "Predictor", - 0x013e: "WhitePoint", - 0x013f: "PrimaryChromaticities", + 0x013B: "Artist", + 0x013C: "HostComputer", + 0x013D: "Predictor", + 0x013E: "WhitePoint", + 0x013F: "PrimaryChromaticities", 0x0140: "ColorMap", 0x0141: "HalftoneHints", 0x0142: "TileWidth", 0x0143: "TileLength", 0x0144: "TileOffsets", 0x0145: "TileByteCounts", - 0x014a: "SubIFDs", - 0x014c: "InkSet", - 0x014d: "InkNames", - 0x014e: "NumberOfInks", + 0x014A: "SubIFDs", + 0x014C: "InkSet", + 0x014D: "InkNames", + 0x014E: "NumberOfInks", 0x0150: "DotRange", 0x0151: "TargetPrinter", 0x0152: "ExtraSamples", @@ -83,9 +82,9 @@ TAGS = { 0x0157: "ClipPath", 0x0158: "XClipPathUnits", 0x0159: "YClipPathUnits", - 0x015a: "Indexed", - 0x015b: "JPEGTables", - 0x015f: "OPIProxy", + 0x015A: "Indexed", + 0x015B: "JPEGTables", + 0x015F: "OPIProxy", 0x0200: "JPEGProc", 0x0201: "JpegIFOffset", 0x0202: "JpegIFByteCount", @@ -99,20 +98,20 @@ TAGS = { 0x0212: "YCbCrSubSampling", 0x0213: "YCbCrPositioning", 0x0214: "ReferenceBlackWhite", - 0x02bc: "XMLPacket", + 0x02BC: "XMLPacket", 0x1000: "RelatedImageFileFormat", 0x1001: "RelatedImageWidth", 0x1002: "RelatedImageLength", 0x4746: "Rating", 0x4749: "RatingPercent", - 0x800d: "ImageID", - 0x828d: "CFARepeatPatternDim", - 0x828e: "CFAPattern", - 0x828f: "BatteryLevel", + 0x800D: "ImageID", + 0x828D: "CFARepeatPatternDim", + 0x828E: "CFAPattern", + 0x828F: "BatteryLevel", 0x8298: "Copyright", - 0x829a: "ExposureTime", - 0x829d: "FNumber", - 0x83bb: "IPTCNAA", + 0x829A: "ExposureTime", + 0x829D: "FNumber", + 0x83BB: "IPTCNAA", 0x8649: "ImageResources", 0x8769: "ExifOffset", 0x8773: "InterColorProfile", @@ -122,8 +121,8 @@ TAGS = { 0x8827: "ISOSpeedRatings", 0x8828: "OECF", 0x8829: "Interlace", - 0x882a: "TimeZoneOffset", - 0x882b: "SelfTimerMode", + 0x882A: "TimeZoneOffset", + 0x882B: "SelfTimerMode", 0x9000: "ExifVersion", 0x9003: "DateTimeOriginal", 0x9004: "DateTimeDigitized", @@ -138,142 +137,142 @@ TAGS = { 0x9207: "MeteringMode", 0x9208: "LightSource", 0x9209: "Flash", - 0x920a: "FocalLength", - 0x920b: "FlashEnergy", - 0x920c: "SpatialFrequencyResponse", - 0x920d: "Noise", + 0x920A: "FocalLength", + 0x920B: "FlashEnergy", + 0x920C: "SpatialFrequencyResponse", + 0x920D: "Noise", 0x9211: "ImageNumber", 0x9212: "SecurityClassification", 0x9213: "ImageHistory", 0x9214: "SubjectLocation", 0x9215: "ExposureIndex", 0x9216: "TIFF/EPStandardID", - 0x927c: "MakerNote", + 0x927C: "MakerNote", 0x9286: "UserComment", 0x9290: "SubsecTime", 0x9291: "SubsecTimeOriginal", 0x9292: "SubsecTimeDigitized", - 0x9c9b: "XPTitle", - 0x9c9c: "XPComment", - 0x9c9d: "XPAuthor", - 0x9c9e: "XPKeywords", - 0x9c9f: "XPSubject", - 0xa000: "FlashPixVersion", - 0xa001: "ColorSpace", - 0xa002: "ExifImageWidth", - 0xa003: "ExifImageHeight", - 0xa004: "RelatedSoundFile", - 0xa005: "ExifInteroperabilityOffset", - 0xa20b: "FlashEnergy", - 0xa20c: "SpatialFrequencyResponse", - 0xa20e: "FocalPlaneXResolution", - 0xa20f: "FocalPlaneYResolution", - 0xa210: "FocalPlaneResolutionUnit", - 0xa214: "SubjectLocation", - 0xa215: "ExposureIndex", - 0xa217: "SensingMethod", - 0xa300: "FileSource", - 0xa301: "SceneType", - 0xa302: "CFAPattern", - 0xa401: "CustomRendered", - 0xa402: "ExposureMode", - 0xa403: "WhiteBalance", - 0xa404: "DigitalZoomRatio", - 0xa405: "FocalLengthIn35mmFilm", - 0xa406: "SceneCaptureType", - 0xa407: "GainControl", - 0xa408: "Contrast", - 0xa409: "Saturation", - 0xa40a: "Sharpness", - 0xa40b: "DeviceSettingDescription", - 0xa40c: "SubjectDistanceRange", - 0xa420: "ImageUniqueID", - 0xa430: "CameraOwnerName", - 0xa431: "BodySerialNumber", - 0xa432: "LensSpecification", - 0xa433: "LensMake", - 0xa434: "LensModel", - 0xa435: "LensSerialNumber", - 0xa500: "Gamma", - 0xc4a5: "PrintImageMatching", - 0xc612: "DNGVersion", - 0xc613: "DNGBackwardVersion", - 0xc614: "UniqueCameraModel", - 0xc615: "LocalizedCameraModel", - 0xc616: "CFAPlaneColor", - 0xc617: "CFALayout", - 0xc618: "LinearizationTable", - 0xc619: "BlackLevelRepeatDim", - 0xc61a: "BlackLevel", - 0xc61b: "BlackLevelDeltaH", - 0xc61c: "BlackLevelDeltaV", - 0xc61d: "WhiteLevel", - 0xc61e: "DefaultScale", - 0xc61f: "DefaultCropOrigin", - 0xc620: "DefaultCropSize", - 0xc621: "ColorMatrix1", - 0xc622: "ColorMatrix2", - 0xc623: "CameraCalibration1", - 0xc624: "CameraCalibration2", - 0xc625: "ReductionMatrix1", - 0xc626: "ReductionMatrix2", - 0xc627: "AnalogBalance", - 0xc628: "AsShotNeutral", - 0xc629: "AsShotWhiteXY", - 0xc62a: "BaselineExposure", - 0xc62b: "BaselineNoise", - 0xc62c: "BaselineSharpness", - 0xc62d: "BayerGreenSplit", - 0xc62e: "LinearResponseLimit", - 0xc62f: "CameraSerialNumber", - 0xc630: "LensInfo", - 0xc631: "ChromaBlurRadius", - 0xc632: "AntiAliasStrength", - 0xc633: "ShadowScale", - 0xc634: "DNGPrivateData", - 0xc635: "MakerNoteSafety", - 0xc65a: "CalibrationIlluminant1", - 0xc65b: "CalibrationIlluminant2", - 0xc65c: "BestQualityScale", - 0xc65d: "RawDataUniqueID", - 0xc68b: "OriginalRawFileName", - 0xc68c: "OriginalRawFileData", - 0xc68d: "ActiveArea", - 0xc68e: "MaskedAreas", - 0xc68f: "AsShotICCProfile", - 0xc690: "AsShotPreProfileMatrix", - 0xc691: "CurrentICCProfile", - 0xc692: "CurrentPreProfileMatrix", - 0xc6bf: "ColorimetricReference", - 0xc6f3: "CameraCalibrationSignature", - 0xc6f4: "ProfileCalibrationSignature", - 0xc6f6: "AsShotProfileName", - 0xc6f7: "NoiseReductionApplied", - 0xc6f8: "ProfileName", - 0xc6f9: "ProfileHueSatMapDims", - 0xc6fa: "ProfileHueSatMapData1", - 0xc6fb: "ProfileHueSatMapData2", - 0xc6fc: "ProfileToneCurve", - 0xc6fd: "ProfileEmbedPolicy", - 0xc6fe: "ProfileCopyright", - 0xc714: "ForwardMatrix1", - 0xc715: "ForwardMatrix2", - 0xc716: "PreviewApplicationName", - 0xc717: "PreviewApplicationVersion", - 0xc718: "PreviewSettingsName", - 0xc719: "PreviewSettingsDigest", - 0xc71a: "PreviewColorSpace", - 0xc71b: "PreviewDateTime", - 0xc71c: "RawImageDigest", - 0xc71d: "OriginalRawFileDigest", - 0xc71e: "SubTileBlockSize", - 0xc71f: "RowInterleaveFactor", - 0xc725: "ProfileLookTableDims", - 0xc726: "ProfileLookTableData", - 0xc740: "OpcodeList1", - 0xc741: "OpcodeList2", - 0xc74e: "OpcodeList3", - 0xc761: "NoiseProfile" + 0x9C9B: "XPTitle", + 0x9C9C: "XPComment", + 0x9C9D: "XPAuthor", + 0x9C9E: "XPKeywords", + 0x9C9F: "XPSubject", + 0xA000: "FlashPixVersion", + 0xA001: "ColorSpace", + 0xA002: "ExifImageWidth", + 0xA003: "ExifImageHeight", + 0xA004: "RelatedSoundFile", + 0xA005: "ExifInteroperabilityOffset", + 0xA20B: "FlashEnergy", + 0xA20C: "SpatialFrequencyResponse", + 0xA20E: "FocalPlaneXResolution", + 0xA20F: "FocalPlaneYResolution", + 0xA210: "FocalPlaneResolutionUnit", + 0xA214: "SubjectLocation", + 0xA215: "ExposureIndex", + 0xA217: "SensingMethod", + 0xA300: "FileSource", + 0xA301: "SceneType", + 0xA302: "CFAPattern", + 0xA401: "CustomRendered", + 0xA402: "ExposureMode", + 0xA403: "WhiteBalance", + 0xA404: "DigitalZoomRatio", + 0xA405: "FocalLengthIn35mmFilm", + 0xA406: "SceneCaptureType", + 0xA407: "GainControl", + 0xA408: "Contrast", + 0xA409: "Saturation", + 0xA40A: "Sharpness", + 0xA40B: "DeviceSettingDescription", + 0xA40C: "SubjectDistanceRange", + 0xA420: "ImageUniqueID", + 0xA430: "CameraOwnerName", + 0xA431: "BodySerialNumber", + 0xA432: "LensSpecification", + 0xA433: "LensMake", + 0xA434: "LensModel", + 0xA435: "LensSerialNumber", + 0xA500: "Gamma", + 0xC4A5: "PrintImageMatching", + 0xC612: "DNGVersion", + 0xC613: "DNGBackwardVersion", + 0xC614: "UniqueCameraModel", + 0xC615: "LocalizedCameraModel", + 0xC616: "CFAPlaneColor", + 0xC617: "CFALayout", + 0xC618: "LinearizationTable", + 0xC619: "BlackLevelRepeatDim", + 0xC61A: "BlackLevel", + 0xC61B: "BlackLevelDeltaH", + 0xC61C: "BlackLevelDeltaV", + 0xC61D: "WhiteLevel", + 0xC61E: "DefaultScale", + 0xC61F: "DefaultCropOrigin", + 0xC620: "DefaultCropSize", + 0xC621: "ColorMatrix1", + 0xC622: "ColorMatrix2", + 0xC623: "CameraCalibration1", + 0xC624: "CameraCalibration2", + 0xC625: "ReductionMatrix1", + 0xC626: "ReductionMatrix2", + 0xC627: "AnalogBalance", + 0xC628: "AsShotNeutral", + 0xC629: "AsShotWhiteXY", + 0xC62A: "BaselineExposure", + 0xC62B: "BaselineNoise", + 0xC62C: "BaselineSharpness", + 0xC62D: "BayerGreenSplit", + 0xC62E: "LinearResponseLimit", + 0xC62F: "CameraSerialNumber", + 0xC630: "LensInfo", + 0xC631: "ChromaBlurRadius", + 0xC632: "AntiAliasStrength", + 0xC633: "ShadowScale", + 0xC634: "DNGPrivateData", + 0xC635: "MakerNoteSafety", + 0xC65A: "CalibrationIlluminant1", + 0xC65B: "CalibrationIlluminant2", + 0xC65C: "BestQualityScale", + 0xC65D: "RawDataUniqueID", + 0xC68B: "OriginalRawFileName", + 0xC68C: "OriginalRawFileData", + 0xC68D: "ActiveArea", + 0xC68E: "MaskedAreas", + 0xC68F: "AsShotICCProfile", + 0xC690: "AsShotPreProfileMatrix", + 0xC691: "CurrentICCProfile", + 0xC692: "CurrentPreProfileMatrix", + 0xC6BF: "ColorimetricReference", + 0xC6F3: "CameraCalibrationSignature", + 0xC6F4: "ProfileCalibrationSignature", + 0xC6F6: "AsShotProfileName", + 0xC6F7: "NoiseReductionApplied", + 0xC6F8: "ProfileName", + 0xC6F9: "ProfileHueSatMapDims", + 0xC6FA: "ProfileHueSatMapData1", + 0xC6FB: "ProfileHueSatMapData2", + 0xC6FC: "ProfileToneCurve", + 0xC6FD: "ProfileEmbedPolicy", + 0xC6FE: "ProfileCopyright", + 0xC714: "ForwardMatrix1", + 0xC715: "ForwardMatrix2", + 0xC716: "PreviewApplicationName", + 0xC717: "PreviewApplicationVersion", + 0xC718: "PreviewSettingsName", + 0xC719: "PreviewSettingsDigest", + 0xC71A: "PreviewColorSpace", + 0xC71B: "PreviewDateTime", + 0xC71C: "RawImageDigest", + 0xC71D: "OriginalRawFileDigest", + 0xC71E: "SubTileBlockSize", + 0xC71F: "RowInterleaveFactor", + 0xC725: "ProfileLookTableDims", + 0xC726: "ProfileLookTableData", + 0xC740: "OpcodeList1", + 0xC741: "OpcodeList2", + 0xC74E: "OpcodeList3", + 0xC761: "NoiseProfile", } ## diff --git a/server/www/packages/packages-linux/x64/PIL/FitsStubImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/FitsStubImagePlugin.py index 63c195c..7e6d35e 100644 --- a/server/www/packages/packages-linux/x64/PIL/FitsStubImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/FitsStubImagePlugin.py @@ -23,6 +23,7 @@ def register_handler(handler): global _handler _handler = handler + # -------------------------------------------------------------------- # Image adapter diff --git a/server/www/packages/packages-linux/x64/PIL/FliImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/FliImagePlugin.py index c78c8c6..82015e2 100644 --- a/server/www/packages/packages-linux/x64/PIL/FliImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/FliImagePlugin.py @@ -19,12 +19,15 @@ from . import Image, ImageFile, ImagePalette from ._binary import i8, i16le as i16, i32le as i32, o8 +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.2" # # decoder + def _accept(prefix): return len(prefix) >= 6 and i16(prefix[4:6]) in [0xAF11, 0xAF12] @@ -33,6 +36,7 @@ def _accept(prefix): # Image plugin for the FLI/FLC animation format. Use the seek # method to load individual frames. + class FliImageFile(ImageFile.ImageFile): format = "FLI" @@ -44,9 +48,11 @@ class FliImageFile(ImageFile.ImageFile): # HEAD s = self.fp.read(128) magic = i16(s[4:6]) - if not (magic in [0xAF11, 0xAF12] and - i16(s[14:16]) in [0, 3] and # flags - s[20:22] == b"\x00\x00"): # reserved + if not ( + magic in [0xAF11, 0xAF12] + and i16(s[14:16]) in [0, 3] # flags + and s[20:22] == b"\x00\x00" # reserved + ): raise SyntaxError("not an FLI/FLC file") # frames @@ -82,7 +88,7 @@ class FliImageFile(ImageFile.ImageFile): elif i16(s[4:6]) == 4: self._palette(palette, 0) - palette = [o8(r)+o8(g)+o8(b) for (r, g, b) in palette] + palette = [o8(r) + o8(g) + o8(b) for (r, g, b) in palette] self.palette = ImagePalette.raw("RGB", b"".join(palette)) # set things up to decode first frame @@ -104,8 +110,8 @@ class FliImageFile(ImageFile.ImageFile): s = self.fp.read(n * 3) for n in range(0, len(s), 3): r = i8(s[n]) << shift - g = i8(s[n+1]) << shift - b = i8(s[n+2]) << shift + g = i8(s[n + 1]) << shift + b = i8(s[n + 2]) << shift palette[i] = (r, g, b) i += 1 @@ -131,6 +137,9 @@ class FliImageFile(ImageFile.ImageFile): self.__frame = -1 self.__fp.seek(self.__rewind) self.__offset = 128 + else: + # ensure that the previous frame was loaded + self.load() if frame != self.__frame + 1: raise ValueError("cannot seek to frame %d" % frame) @@ -147,13 +156,22 @@ class FliImageFile(ImageFile.ImageFile): framesize = i32(s) self.decodermaxblock = framesize - self.tile = [("fli", (0, 0)+self.size, self.__offset, None)] + self.tile = [("fli", (0, 0) + self.size, self.__offset, None)] self.__offset += framesize def tell(self): return self.__frame + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + # # registry diff --git a/server/www/packages/packages-linux/x64/PIL/FontFile.py b/server/www/packages/packages-linux/x64/PIL/FontFile.py index 305e8af..e57c2f3 100644 --- a/server/www/packages/packages-linux/x64/PIL/FontFile.py +++ b/server/www/packages/packages-linux/x64/PIL/FontFile.py @@ -17,6 +17,7 @@ from __future__ import print_function import os + from . import Image, _binary WIDTH = 800 @@ -33,6 +34,7 @@ def puti16(fp, values): ## # Base class for raster font file handlers. + class FontFile(object): bitmap = None @@ -46,7 +48,7 @@ class FontFile(object): return self.glyph[ix] def compile(self): - "Create metrics and bitmap" + """Create metrics and bitmap""" if self.bitmap: return @@ -61,7 +63,7 @@ class FontFile(object): w = w + (src[2] - src[0]) if w > WIDTH: lines += 1 - w = (src[2] - src[0]) + w = src[2] - src[0] maxwidth = max(maxwidth, w) xsize = maxwidth @@ -93,7 +95,7 @@ class FontFile(object): self.metrics[i] = d, dst, s def save(self, filename): - "Save font" + """Save font""" self.compile() @@ -103,7 +105,7 @@ class FontFile(object): # font metrics with open(os.path.splitext(filename)[0] + ".pil", "wb") as fp: fp.write(b"PILfont\n") - fp.write((";;;;;;%d;\n" % self.ysize).encode('ascii')) # HACK!!! + fp.write((";;;;;;%d;\n" % self.ysize).encode("ascii")) # HACK!!! fp.write(b"DATA\n") for id in range(256): m = self.metrics[id] diff --git a/server/www/packages/packages-linux/x64/PIL/FpxImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/FpxImagePlugin.py index 9f284fd..15ebe0e 100644 --- a/server/www/packages/packages-linux/x64/PIL/FpxImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/FpxImagePlugin.py @@ -17,32 +17,35 @@ from __future__ import print_function -from . import Image, ImageFile -from ._binary import i32le as i32, i8 - import olefile +from . import Image, ImageFile +from ._binary import i8, i32le as i32 + +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.1" # we map from colour field tuples to (mode, rawmode) descriptors MODES = { # opacity - (0x00007ffe): ("A", "L"), + (0x00007FFE): ("A", "L"), # monochrome (0x00010000,): ("L", "L"), - (0x00018000, 0x00017ffe): ("RGBA", "LA"), + (0x00018000, 0x00017FFE): ("RGBA", "LA"), # photo YCC (0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"), - (0x00028000, 0x00028001, 0x00028002, 0x00027ffe): ("RGBA", "YCCA;P"), + (0x00028000, 0x00028001, 0x00028002, 0x00027FFE): ("RGBA", "YCCA;P"), # standard RGB (NIFRGB) (0x00030000, 0x00030001, 0x00030002): ("RGB", "RGB"), - (0x00038000, 0x00038001, 0x00038002, 0x00037ffe): ("RGBA", "RGBA"), + (0x00038000, 0x00038001, 0x00038002, 0x00037FFE): ("RGBA", "RGBA"), } # # -------------------------------------------------------------------- + def _accept(prefix): return prefix[:8] == olefile.MAGIC @@ -50,6 +53,7 @@ def _accept(prefix): ## # Image plugin for the FlashPix images. + class FpxImageFile(ImageFile.ImageFile): format = "FPX" @@ -74,10 +78,9 @@ class FpxImageFile(ImageFile.ImageFile): # # get the Image Contents Property Set - prop = self.ole.getproperties([ - "Data Object Store %06d" % index, - "\005Image Contents" - ]) + prop = self.ole.getproperties( + ["Data Object Store %06d" % index, "\005Image Contents"] + ) # size (highest resolution) @@ -103,7 +106,7 @@ class FpxImageFile(ImageFile.ImageFile): colors = [] for i in range(i32(s, 4)): # note: for now, we ignore the "uncalibrated" flag - colors.append(i32(s, 8+i*4) & 0x7fffffff) + colors.append(i32(s, 8 + i * 4) & 0x7FFFFFFF) self.mode, self.rawmode = MODES[tuple(colors)] @@ -123,7 +126,7 @@ class FpxImageFile(ImageFile.ImageFile): stream = [ "Data Object Store %06d" % index, "Resolution %04d" % subimage, - "Subimage 0000 Header" + "Subimage 0000 Header", ] fp = self.ole.openstream(stream) @@ -155,17 +158,29 @@ class FpxImageFile(ImageFile.ImageFile): for i in range(0, len(s), length): - compression = i32(s, i+8) + compression = i32(s, i + 8) if compression == 0: - self.tile.append(("raw", (x, y, x+xtile, y+ytile), - i32(s, i) + 28, (self.rawmode))) + self.tile.append( + ( + "raw", + (x, y, x + xtile, y + ytile), + i32(s, i) + 28, + (self.rawmode), + ) + ) elif compression == 1: # FIXME: the fill decoder is not implemented - self.tile.append(("fill", (x, y, x+xtile, y+ytile), - i32(s, i) + 28, (self.rawmode, s[12:16]))) + self.tile.append( + ( + "fill", + (x, y, x + xtile, y + ytile), + i32(s, i) + 28, + (self.rawmode, s[12:16]), + ) + ) elif compression == 2: @@ -187,8 +202,14 @@ class FpxImageFile(ImageFile.ImageFile): # The image is stored as defined by rawmode jpegmode = rawmode - self.tile.append(("jpeg", (x, y, x+xtile, y+ytile), - i32(s, i) + 28, (rawmode, jpegmode))) + self.tile.append( + ( + "jpeg", + (x, y, x + xtile, y + ytile), + i32(s, i) + 28, + (rawmode, jpegmode), + ) + ) # FIXME: jpeg tables are tile dependent; the prefix # data must be placed in the tile descriptor itself! @@ -211,11 +232,11 @@ class FpxImageFile(ImageFile.ImageFile): def load(self): if not self.fp: - self.fp = self.ole.openstream(self.stream[:2] + - ["Subimage 0000 Data"]) + self.fp = self.ole.openstream(self.stream[:2] + ["Subimage 0000 Data"]) return ImageFile.ImageFile.load(self) + # # -------------------------------------------------------------------- diff --git a/server/www/packages/packages-linux/x64/PIL/FtexImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/FtexImagePlugin.py index 08ce0e0..06f4a72 100644 --- a/server/www/packages/packages-linux/x64/PIL/FtexImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/FtexImagePlugin.py @@ -20,7 +20,14 @@ has the following structure: {format_directory} {data} Where: -{header} = { u32:magic, u32:version, u32:width, u32:height, u32:mipmap_count, u32:format_count } +{header} = { + u32:magic, + u32:version, + u32:width, + u32:height, + u32:mipmap_count, + u32:format_count +} * The "magic" number is "FTEX". * "width" and "height" are the dimensions of the texture. @@ -46,8 +53,8 @@ Note: All data is stored in little-Endian (Intel) byte order. import struct from io import BytesIO -from . import Image, ImageFile +from . import Image, ImageFile MAGIC = b"FTEX" FORMAT_DXT1 = 0 @@ -59,8 +66,8 @@ class FtexImageFile(ImageFile.ImageFile): format_description = "Texture File Format (IW2:EOC)" def _open(self): - magic = struct.unpack("= 8 and \ - i32(prefix[:4]) >= 20 and i32(prefix[4:8]) in (1, 2) + return len(prefix) >= 8 and i32(prefix[:4]) >= 20 and i32(prefix[4:8]) in (1, 2) ## # Image plugin for the GIMP brush format. + class GbrImageFile(ImageFile.ImageFile): format = "GBR" @@ -55,24 +55,23 @@ class GbrImageFile(ImageFile.ImageFile): if width <= 0 or height <= 0: raise SyntaxError("not a GIMP brush") if color_depth not in (1, 4): - raise SyntaxError( - "Unsupported GIMP brush color depth: %s" % color_depth) + raise SyntaxError("Unsupported GIMP brush color depth: %s" % color_depth) if version == 1: - comment_length = header_size-20 + comment_length = header_size - 20 else: - comment_length = header_size-28 + comment_length = header_size - 28 magic_number = self.fp.read(4) - if magic_number != b'GIMP': + if magic_number != b"GIMP": raise SyntaxError("not a GIMP brush, bad magic number") - self.info['spacing'] = i32(self.fp.read(4)) + self.info["spacing"] = i32(self.fp.read(4)) comment = self.fp.read(comment_length)[:-1] if color_depth == 1: self.mode = "L" else: - self.mode = 'RGBA' + self.mode = "RGBA" self._size = width, height @@ -88,6 +87,7 @@ class GbrImageFile(ImageFile.ImageFile): self.im = Image.core.new(self.mode, self.size) self.frombytes(self.fp.read(self._data_size)) + # # registry diff --git a/server/www/packages/packages-linux/x64/PIL/GdImageFile.py b/server/www/packages/packages-linux/x64/PIL/GdImageFile.py index 1361542..2d49235 100644 --- a/server/www/packages/packages-linux/x64/PIL/GdImageFile.py +++ b/server/www/packages/packages-linux/x64/PIL/GdImageFile.py @@ -26,6 +26,8 @@ from . import ImageFile, ImagePalette from ._binary import i8, i16be as i16, i32be as i32 +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.1" @@ -35,6 +37,7 @@ __version__ = "0.1" # this plugin, you have to import the GdImageFile module and # use the GdImageFile.open function. + class GdImageFile(ImageFile.ImageFile): format = "GD" @@ -55,14 +58,17 @@ class GdImageFile(ImageFile.ImageFile): trueColorOffset = 2 if trueColor else 0 # transparency index - tindex = i32(s[7+trueColorOffset:7+trueColorOffset+4]) + tindex = i32(s[7 + trueColorOffset : 7 + trueColorOffset + 4]) if tindex < 256: self.info["transparency"] = tindex - self.palette = ImagePalette.raw("XBGR", s[7+trueColorOffset+4:7+trueColorOffset+4+256*4]) + self.palette = ImagePalette.raw( + "XBGR", s[7 + trueColorOffset + 4 : 7 + trueColorOffset + 4 + 256 * 4] + ) - self.tile = [("raw", (0, 0)+self.size, 7+trueColorOffset+4+256*4, - ("L", 0, 1))] + self.tile = [ + ("raw", (0, 0) + self.size, 7 + trueColorOffset + 4 + 256 * 4, ("L", 0, 1)) + ] def open(fp, mode="r"): diff --git a/server/www/packages/packages-linux/x64/PIL/GifImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/GifImagePlugin.py index 107c015..9d8e96f 100644 --- a/server/www/packages/packages-linux/x64/PIL/GifImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/GifImagePlugin.py @@ -24,17 +24,20 @@ # See the README file for information on usage and redistribution. # -from . import Image, ImageFile, ImagePalette, ImageChops, ImageSequence -from ._binary import i8, i16le as i16, o8, o16le as o16 - import itertools +from . import Image, ImageChops, ImageFile, ImagePalette, ImageSequence +from ._binary import i8, i16le as i16, o8, o16le as o16 + +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.9" # -------------------------------------------------------------------- # Identify/read GIF files + def _accept(prefix): return prefix[:6] in [b"GIF87a", b"GIF89a"] @@ -43,6 +46,7 @@ def _accept(prefix): # Image plugin for GIF images. This plugin supports both GIF87 and # GIF89 images. + class GifImageFile(ImageFile.ImageFile): format = "GIF" @@ -76,7 +80,7 @@ class GifImageFile(ImageFile.ImageFile): # check if palette contains colour indices p = self.fp.read(3 << bits) for i in range(0, len(p), 3): - if not (i//3 == i8(p[i]) == i8(p[i+1]) == i8(p[i+2])): + if not (i // 3 == i8(p[i]) == i8(p[i + 1]) == i8(p[i + 2])): p = ImagePalette.raw("RGB", p) self.global_palette = self.palette = p break @@ -120,6 +124,8 @@ class GifImageFile(ImageFile.ImageFile): if not self._seek_check(frame): return if frame < self.__frame: + if frame != 0: + self.im = None self._seek(0) last_frame = self.__frame @@ -164,6 +170,7 @@ class GifImageFile(ImageFile.ImageFile): self.im.paste(self.dispose, self.dispose_extent) from copy import copy + self.palette = copy(self.global_palette) info = {} @@ -201,7 +208,13 @@ class GifImageFile(ImageFile.ImageFile): # # comment extension # - info["comment"] = block + while block: + if "comment" in info: + info["comment"] += block + else: + info["comment"] = block + block = self.data() + continue elif i8(s) == 255: # # application extension @@ -223,6 +236,8 @@ class GifImageFile(ImageFile.ImageFile): # extent x0, y0 = i16(s[0:]), i16(s[2:]) x1, y1 = x0 + i16(s[4:]), y0 + i16(s[6:]) + if x1 > self.size[0] or y1 > self.size[1]: + self._size = max(x1, self.size[0]), max(y1, self.size[1]) self.dispose_extent = x0, y0, x1, y1 flags = i8(s[8]) @@ -230,16 +245,14 @@ class GifImageFile(ImageFile.ImageFile): if flags & 128: bits = (flags & 7) + 1 - self.palette =\ - ImagePalette.raw("RGB", self.fp.read(3 << bits)) + self.palette = ImagePalette.raw("RGB", self.fp.read(3 << bits)) # image data bits = i8(self.fp.read(1)) self.__offset = self.fp.tell() - self.tile = [("gif", - (x0, y0, x1, y1), - self.__offset, - (bits, interlace))] + self.tile = [ + ("gif", (x0, y0, x1, y1), self.__offset, (bits, interlace)) + ] break else: @@ -252,8 +265,8 @@ class GifImageFile(ImageFile.ImageFile): self.dispose = None elif self.disposal_method == 2: # replace with background colour - self.dispose = Image.core.fill("P", self.size, - self.info["background"]) + Image._decompression_bomb_check(self.size) + self.dispose = Image.core.fill("P", self.size, self.info["background"]) else: # replace with previous contents if self.im: @@ -291,20 +304,25 @@ class GifImageFile(ImageFile.ImageFile): # we do this by pasting the updated area onto the previous # frame which we then use as the current image content updated = self._crop(self.im, self.dispose_extent) - self._prev_im.paste(updated, self.dispose_extent, - updated.convert('RGBA')) + self._prev_im.paste(updated, self.dispose_extent, updated.convert("RGBA")) self.im = self._prev_im self._prev_im = self.im.copy() + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + # -------------------------------------------------------------------- # Write GIF files -RAWMODE = { - "1": "L", - "L": "L", - "P": "P" -} +RAWMODE = {"1": "L", "L": "L", "P": "P"} def _normalize_mode(im, initial_call=False): @@ -355,19 +373,23 @@ def _normalize_palette(im, palette, info): if isinstance(palette, (bytes, bytearray, list)): source_palette = bytearray(palette[:768]) if isinstance(palette, ImagePalette.ImagePalette): - source_palette = bytearray(itertools.chain.from_iterable( - zip(palette.palette[:256], - palette.palette[256:512], - palette.palette[512:768]))) + source_palette = bytearray( + itertools.chain.from_iterable( + zip( + palette.palette[:256], + palette.palette[256:512], + palette.palette[512:768], + ) + ) + ) if im.mode == "P": if not source_palette: source_palette = im.im.getpalette("RGB")[:768] else: # L-mode if not source_palette: - source_palette = bytearray(i//3 for i in range(768)) - im.palette = ImagePalette.ImagePalette("RGB", - palette=source_palette) + source_palette = bytearray(i // 3 for i in range(768)) + im.palette = ImagePalette.ImagePalette("RGB", palette=source_palette) used_palette_colors = _get_optimize(im, info) if used_palette_colors is not None: @@ -379,6 +401,8 @@ def _normalize_palette(im, palette, info): def _write_single_frame(im, fp, palette): im_out = _normalize_mode(im, True) + for k, v in im_out.info.items(): + im.encoderinfo.setdefault(k, v) im_out = _normalize_palette(im_out, palette, im.encoderinfo) for s in _get_global_header(im_out, im.encoderinfo): @@ -391,29 +415,31 @@ def _write_single_frame(im, fp, palette): _write_local_header(fp, im, (0, 0), flags) im_out.encoderconfig = (8, get_interlace(im)) - ImageFile._save(im_out, fp, [("gif", (0, 0)+im.size, 0, - RAWMODE[im_out.mode])]) + ImageFile._save(im_out, fp, [("gif", (0, 0) + im.size, 0, RAWMODE[im_out.mode])]) fp.write(b"\0") # end of image data def _write_multiple_frames(im, fp, palette): - duration = im.encoderinfo.get("duration", None) - disposal = im.encoderinfo.get('disposal', None) + duration = im.encoderinfo.get("duration", im.info.get("duration")) + disposal = im.encoderinfo.get("disposal", im.info.get("disposal")) im_frames = [] frame_count = 0 - for imSequence in itertools.chain([im], - im.encoderinfo.get("append_images", [])): + background_im = None + for imSequence in itertools.chain([im], im.encoderinfo.get("append_images", [])): for im_frame in ImageSequence.Iterator(imSequence): # a copy is required here since seek can still mutate the image im_frame = _normalize_mode(im_frame.copy()) + if frame_count == 0: + for k, v in im_frame.info.items(): + im.encoderinfo.setdefault(k, v) im_frame = _normalize_palette(im_frame, palette, im.encoderinfo) encoderinfo = im.encoderinfo.copy() if isinstance(duration, (list, tuple)): - encoderinfo['duration'] = duration[frame_count] + encoderinfo["duration"] = duration[frame_count] if isinstance(disposal, (list, tuple)): encoderinfo["disposal"] = disposal[frame_count] frame_count += 1 @@ -421,45 +447,54 @@ def _write_multiple_frames(im, fp, palette): if im_frames: # delta frame previous = im_frames[-1] - if _get_palette_bytes(im_frame) == \ - _get_palette_bytes(previous['im']): - delta = ImageChops.subtract_modulo(im_frame, - previous['im']) + if encoderinfo.get("disposal") == 2: + if background_im is None: + background = _get_background( + im, + im.encoderinfo.get("background", im.info.get("background")), + ) + background_im = Image.new("P", im_frame.size, background) + background_im.putpalette(im_frames[0]["im"].palette) + base_im = background_im + else: + base_im = previous["im"] + if _get_palette_bytes(im_frame) == _get_palette_bytes(base_im): + delta = ImageChops.subtract_modulo(im_frame, base_im) else: delta = ImageChops.subtract_modulo( - im_frame.convert('RGB'), previous['im'].convert('RGB')) + im_frame.convert("RGB"), base_im.convert("RGB") + ) bbox = delta.getbbox() if not bbox: # This frame is identical to the previous frame if duration: - previous['encoderinfo']['duration'] += \ - encoderinfo['duration'] + previous["encoderinfo"]["duration"] += encoderinfo["duration"] continue else: bbox = None - im_frames.append({ - 'im': im_frame, - 'bbox': bbox, - 'encoderinfo': encoderinfo - }) + im_frames.append({"im": im_frame, "bbox": bbox, "encoderinfo": encoderinfo}) if len(im_frames) > 1: for frame_data in im_frames: - im_frame = frame_data['im'] - if not frame_data['bbox']: + im_frame = frame_data["im"] + if not frame_data["bbox"]: # global header - for s in _get_global_header(im_frame, - frame_data['encoderinfo']): + for s in _get_global_header(im_frame, frame_data["encoderinfo"]): fp.write(s) offset = (0, 0) else: # compress difference - frame_data['encoderinfo']['include_color_table'] = True + frame_data["encoderinfo"]["include_color_table"] = True - im_frame = im_frame.crop(frame_data['bbox']) - offset = frame_data['bbox'][:2] - _write_frame_data(fp, im_frame, offset, frame_data['encoderinfo']) + im_frame = im_frame.crop(frame_data["bbox"]) + offset = frame_data["bbox"][:2] + _write_frame_data(fp, im_frame, offset, frame_data["encoderinfo"]) return True + elif "duration" in im.encoderinfo and isinstance( + im.encoderinfo["duration"], (list, tuple) + ): + # Since multiple frames will not be written, add together the frame durations + im.encoderinfo["duration"] = sum(im.encoderinfo["duration"]) def _save_all(im, fp, filename): @@ -467,12 +502,10 @@ def _save_all(im, fp, filename): def _save(im, fp, filename, save_all=False): - for k, v in im.info.items(): - im.encoderinfo.setdefault(k, v) # header - try: - palette = im.encoderinfo["palette"] - except KeyError: + if "palette" in im.encoderinfo or "palette" in im.info: + palette = im.encoderinfo.get("palette", im.info.get("palette")) + else: palette = None im.encoderinfo["optimize"] = im.encoderinfo.get("optimize", True) @@ -519,7 +552,7 @@ def _write_local_header(fp, im, offset, flags): else: duration = 0 - disposal = int(im.encoderinfo.get('disposal', 0)) + disposal = int(im.encoderinfo.get("disposal", 0)) if transparent_color_exists or duration != 0 or disposal: packed_flag = 1 if transparent_color_exists else 0 @@ -527,48 +560,53 @@ def _write_local_header(fp, im, offset, flags): if not transparent_color_exists: transparency = 0 - fp.write(b"!" + - o8(249) + # extension intro - o8(4) + # length - o8(packed_flag) + # packed fields - o16(duration) + # duration - o8(transparency) + # transparency index - o8(0)) + fp.write( + b"!" + + o8(249) # extension intro + + o8(4) # length + + o8(packed_flag) # packed fields + + o16(duration) # duration + + o8(transparency) # transparency index + + o8(0) + ) - if "comment" in im.encoderinfo and \ - 1 <= len(im.encoderinfo["comment"]) <= 255: - fp.write(b"!" + - o8(254) + # extension intro - o8(len(im.encoderinfo["comment"])) + - im.encoderinfo["comment"] + - o8(0)) + if "comment" in im.encoderinfo and 1 <= len(im.encoderinfo["comment"]): + fp.write(b"!" + o8(254)) # extension intro + for i in range(0, len(im.encoderinfo["comment"]), 255): + subblock = im.encoderinfo["comment"][i : i + 255] + fp.write(o8(len(subblock)) + subblock) + fp.write(o8(0)) if "loop" in im.encoderinfo: number_of_loops = im.encoderinfo["loop"] - fp.write(b"!" + - o8(255) + # extension intro - o8(11) + - b"NETSCAPE2.0" + - o8(3) + - o8(1) + - o16(number_of_loops) + # number of loops - o8(0)) - include_color_table = im.encoderinfo.get('include_color_table') + fp.write( + b"!" + + o8(255) # extension intro + + o8(11) + + b"NETSCAPE2.0" + + o8(3) + + o8(1) + + o16(number_of_loops) # number of loops + + o8(0) + ) + include_color_table = im.encoderinfo.get("include_color_table") if include_color_table: palette_bytes = _get_palette_bytes(im) color_table_size = _get_color_table_size(palette_bytes) if color_table_size: - flags = flags | 128 # local color table flag + flags = flags | 128 # local color table flag flags = flags | color_table_size - fp.write(b"," + - o16(offset[0]) + # offset - o16(offset[1]) + - o16(im.size[0]) + # size - o16(im.size[1]) + - o8(flags)) # flags + fp.write( + b"," + + o16(offset[0]) # offset + + o16(offset[1]) + + o16(im.size[0]) # size + + o16(im.size[1]) + + o8(flags) # flags + ) if include_color_table and color_table_size: fp.write(_get_header_palette(palette_bytes)) - fp.write(o8(8)) # bits + fp.write(o8(8)) # bits def _save_netpbm(im, fp, filename): @@ -582,21 +620,23 @@ def _save_netpbm(im, fp, filename): import os from subprocess import Popen, check_call, PIPE, CalledProcessError - file = im._dump() - with open(filename, 'wb') as f: + tempfile = im._dump() + + with open(filename, "wb") as f: if im.mode != "RGB": - with open(os.devnull, 'wb') as devnull: - check_call(["ppmtogif", file], stdout=f, stderr=devnull) + with open(os.devnull, "wb") as devnull: + check_call(["ppmtogif", tempfile], stdout=f, stderr=devnull) else: # Pipe ppmquant output into ppmtogif - # "ppmquant 256 %s | ppmtogif > %s" % (file, filename) - quant_cmd = ["ppmquant", "256", file] + # "ppmquant 256 %s | ppmtogif > %s" % (tempfile, filename) + quant_cmd = ["ppmquant", "256", tempfile] togif_cmd = ["ppmtogif"] - with open(os.devnull, 'wb') as devnull: + with open(os.devnull, "wb") as devnull: quant_proc = Popen(quant_cmd, stdout=PIPE, stderr=devnull) - togif_proc = Popen(togif_cmd, stdin=quant_proc.stdout, - stdout=f, stderr=devnull) + togif_proc = Popen( + togif_cmd, stdin=quant_proc.stdout, stdout=f, stderr=devnull + ) # Allow ppmquant to receive SIGPIPE if ppmtogif exits quant_proc.stdout.close() @@ -610,7 +650,7 @@ def _save_netpbm(im, fp, filename): raise CalledProcessError(retcode, togif_cmd) try: - os.unlink(file) + os.unlink(tempfile) except OSError: pass @@ -642,7 +682,7 @@ def _get_optimize(im, info): # * If we have a 'large' image, the palette is in the noise. # create the new palette if not every color is used - optimise = _FORCE_OPTIMIZE or im.mode == 'L' + optimise = _FORCE_OPTIMIZE or im.mode == "L" if optimise or im.width * im.height < 512 * 512: # check which colors are used used_palette_colors = [] @@ -650,18 +690,23 @@ def _get_optimize(im, info): if count: used_palette_colors.append(i) - if optimise or (len(used_palette_colors) <= 128 and - max(used_palette_colors) > len(used_palette_colors)): + if optimise or ( + len(used_palette_colors) <= 128 + and max(used_palette_colors) > len(used_palette_colors) + ): return used_palette_colors def _get_color_table_size(palette_bytes): # calculate the palette size for the header import math - color_table_size = int(math.ceil(math.log(len(palette_bytes)//3, 2)))-1 - if color_table_size < 0: - color_table_size = 0 - return color_table_size + + if not palette_bytes: + return 0 + elif len(palette_bytes) < 9: + return 1 + else: + return int(math.ceil(math.log(len(palette_bytes) // 3, 2))) - 1 def _get_header_palette(palette_bytes): @@ -676,7 +721,7 @@ def _get_header_palette(palette_bytes): # add the missing amount of bytes # the palette has to be 2< 0: palette_bytes += o8(0) * 3 * actual_target_size_diff return palette_bytes @@ -692,6 +737,18 @@ def _get_palette_bytes(im): return im.palette.palette +def _get_background(im, infoBackground): + background = 0 + if infoBackground: + background = infoBackground + if isinstance(background, tuple): + # WebPImagePlugin stores an RGBA value in info["background"] + # So it must be converted to the same format as GifImagePlugin's + # info["background"] - a global color table index + background = im.palette.getcolor(background) + return background + + def _get_global_header(im, info): """Return a list of strings representing a GIF header""" @@ -701,9 +758,9 @@ def _get_global_header(im, info): version = b"87a" for extensionKey in ["transparency", "duration", "loop", "comment"]: if info and extensionKey in info: - if ((extensionKey == "duration" and info[extensionKey] == 0) or - (extensionKey == "comment" and - not (1 <= len(info[extensionKey]) <= 255))): + if (extensionKey == "duration" and info[extensionKey] == 0) or ( + extensionKey == "comment" and not (1 <= len(info[extensionKey]) <= 255) + ): continue version = b"89a" break @@ -711,24 +768,23 @@ def _get_global_header(im, info): if im.info.get("version") == b"89a": version = b"89a" + background = _get_background(im, info.get("background")) + palette_bytes = _get_palette_bytes(im) color_table_size = _get_color_table_size(palette_bytes) - background = info["background"] if "background" in info else 0 - return [ - b"GIF"+version + # signature + version - o16(im.size[0]) + # canvas width - o16(im.size[1]), # canvas height - + b"GIF" # signature + + version # version + + o16(im.size[0]) # canvas width + + o16(im.size[1]), # canvas height # Logical Screen Descriptor # size of global color table + global color table flag - o8(color_table_size + 128), # packed fields + o8(color_table_size + 128), # packed fields # background + reserved/aspect o8(background) + o8(0), - # Global Color Table - _get_header_palette(palette_bytes) + _get_header_palette(palette_bytes), ] @@ -739,13 +795,15 @@ def _write_frame_data(fp, im_frame, offset, params): # local image header _write_local_header(fp, im_frame, offset, 0) - ImageFile._save(im_frame, fp, [("gif", (0, 0)+im_frame.size, 0, - RAWMODE[im_frame.mode])]) + ImageFile._save( + im_frame, fp, [("gif", (0, 0) + im_frame.size, 0, RAWMODE[im_frame.mode])] + ) fp.write(b"\0") # end of image data finally: del im_frame.encoderinfo + # -------------------------------------------------------------------- # Legacy GIF utilities @@ -794,6 +852,7 @@ def getdata(im, offset=(0, 0), **params): :returns: List of Bytes containing gif encoded frame data """ + class Collector(object): data = [] diff --git a/server/www/packages/packages-linux/x64/PIL/GimpGradientFile.py b/server/www/packages/packages-linux/x64/PIL/GimpGradientFile.py index 10593da..f48e7f7 100644 --- a/server/www/packages/packages-linux/x64/PIL/GimpGradientFile.py +++ b/server/www/packages/packages-linux/x64/PIL/GimpGradientFile.py @@ -13,7 +13,8 @@ # See the README file for information on usage and redistribution. # -from math import pi, log, sin, sqrt +from math import log, pi, sin, sqrt + from ._binary import o8 # -------------------------------------------------------------------- @@ -72,7 +73,7 @@ class GradientFile(object): for i in range(entries): - x = i / float(entries-1) + x = i / float(entries - 1) while x1 < x: ix += 1 @@ -100,8 +101,8 @@ class GradientFile(object): ## # File handler for GIMP's gradient format. -class GimpGradientFile(GradientFile): +class GimpGradientFile(GradientFile): def __init__(self, fp): if fp.readline()[:13] != b"GIMP Gradient": diff --git a/server/www/packages/packages-linux/x64/PIL/GimpPaletteFile.py b/server/www/packages/packages-linux/x64/PIL/GimpPaletteFile.py index 6eef6a2..2994bbe 100644 --- a/server/www/packages/packages-linux/x64/PIL/GimpPaletteFile.py +++ b/server/www/packages/packages-linux/x64/PIL/GimpPaletteFile.py @@ -15,31 +15,30 @@ # import re -from ._binary import o8 +from ._binary import o8 ## # File handler for GIMP's palette format. + class GimpPaletteFile(object): rawmode = "RGB" def __init__(self, fp): - self.palette = [o8(i)*3 for i in range(256)] + self.palette = [o8(i) * 3 for i in range(256)] if fp.readline()[:12] != b"GIMP Palette": raise SyntaxError("not a GIMP palette file") - i = 0 - - while i <= 255: + for i in range(256): s = fp.readline() - if not s: break + # skip fields and comment lines if re.match(br"\w+:|#", s): continue @@ -50,10 +49,7 @@ class GimpPaletteFile(object): if len(v) != 3: raise ValueError("bad palette entry") - if 0 <= i <= 255: - self.palette[i] = o8(v[0]) + o8(v[1]) + o8(v[2]) - - i += 1 + self.palette[i] = o8(v[0]) + o8(v[1]) + o8(v[2]) self.palette = b"".join(self.palette) diff --git a/server/www/packages/packages-linux/x64/PIL/GribStubImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/GribStubImagePlugin.py index 243ea2a..8a24a98 100644 --- a/server/www/packages/packages-linux/x64/PIL/GribStubImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/GribStubImagePlugin.py @@ -28,6 +28,7 @@ def register_handler(handler): # -------------------------------------------------------------------- # Image adapter + def _accept(prefix): return prefix[0:4] == b"GRIB" and i8(prefix[7]) == 1 diff --git a/server/www/packages/packages-linux/x64/PIL/Hdf5StubImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/Hdf5StubImagePlugin.py index 8783f80..a3ea12f 100644 --- a/server/www/packages/packages-linux/x64/PIL/Hdf5StubImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/Hdf5StubImagePlugin.py @@ -27,6 +27,7 @@ def register_handler(handler): # -------------------------------------------------------------------- # Image adapter + def _accept(prefix): return prefix[:8] == b"\x89HDF\r\n\x1a\n" diff --git a/server/www/packages/packages-linux/x64/PIL/IcnsImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/IcnsImagePlugin.py index 21236d4..75ea18b 100644 --- a/server/www/packages/packages-linux/x64/PIL/IcnsImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/IcnsImagePlugin.py @@ -15,8 +15,6 @@ # See the README file for information on usage and redistribution. # -from PIL import Image, ImageFile, PngImagePlugin -from PIL._binary import i8 import io import os import shutil @@ -24,7 +22,10 @@ import struct import sys import tempfile -enable_jpeg2k = hasattr(Image.core, 'jp2klib_version') +from PIL import Image, ImageFile, PngImagePlugin +from PIL._binary import i8 + +enable_jpeg2k = hasattr(Image.core, "jp2klib_version") if enable_jpeg2k: from PIL import Jpeg2KImagePlugin @@ -32,7 +33,7 @@ HEADERSIZE = 8 def nextheader(fobj): - return struct.unpack('>4sI', fobj.read(HEADERSIZE)) + return struct.unpack(">4sI", fobj.read(HEADERSIZE)) def read_32t(fobj, start_length, size): @@ -40,8 +41,8 @@ def read_32t(fobj, start_length, size): (start, length) = start_length fobj.seek(start) sig = fobj.read(4) - if sig != b'\x00\x00\x00\x00': - raise SyntaxError('Unknown signature, expecting 0x00000000') + if sig != b"\x00\x00\x00\x00": + raise SyntaxError("Unknown signature, expecting 0x00000000") return read_32(fobj, (start + 4, length - 4), size) @@ -81,12 +82,8 @@ def read_32(fobj, start_length, size): if bytesleft <= 0: break if bytesleft != 0: - raise SyntaxError( - "Error reading channel [%r left]" % bytesleft - ) - band = Image.frombuffer( - "L", pixel_size, b"".join(data), "raw", "L", 0, 1 - ) + raise SyntaxError("Error reading channel [%r left]" % bytesleft) + band = Image.frombuffer("L", pixel_size, b"".join(data), "raw", "L", 0, 1) im.im.putband(band.im, band_ix) return {"RGB": im} @@ -97,9 +94,7 @@ def read_mk(fobj, start_length, size): fobj.seek(start) pixel_size = (size[0] * size[2], size[1] * size[2]) sizesq = pixel_size[0] * pixel_size[1] - band = Image.frombuffer( - "L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1 - ) + band = Image.frombuffer("L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1) return {"A": band} @@ -107,73 +102,58 @@ def read_png_or_jpeg2000(fobj, start_length, size): (start, length) = start_length fobj.seek(start) sig = fobj.read(12) - if sig[:8] == b'\x89PNG\x0d\x0a\x1a\x0a': + if sig[:8] == b"\x89PNG\x0d\x0a\x1a\x0a": fobj.seek(start) im = PngImagePlugin.PngImageFile(fobj) return {"RGBA": im} - elif sig[:4] == b'\xff\x4f\xff\x51' \ - or sig[:4] == b'\x0d\x0a\x87\x0a' \ - or sig == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a': + elif ( + sig[:4] == b"\xff\x4f\xff\x51" + or sig[:4] == b"\x0d\x0a\x87\x0a" + or sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a" + ): if not enable_jpeg2k: - raise ValueError('Unsupported icon subimage format (rebuild PIL ' - 'with JPEG 2000 support to fix this)') + raise ValueError( + "Unsupported icon subimage format (rebuild PIL " + "with JPEG 2000 support to fix this)" + ) # j2k, jpc or j2c fobj.seek(start) jp2kstream = fobj.read(length) f = io.BytesIO(jp2kstream) im = Jpeg2KImagePlugin.Jpeg2KImageFile(f) - if im.mode != 'RGBA': - im = im.convert('RGBA') + if im.mode != "RGBA": + im = im.convert("RGBA") return {"RGBA": im} else: - raise ValueError('Unsupported icon subimage format') + raise ValueError("Unsupported icon subimage format") class IcnsFile(object): SIZES = { - (512, 512, 2): [ - (b'ic10', read_png_or_jpeg2000), - ], - (512, 512, 1): [ - (b'ic09', read_png_or_jpeg2000), - ], - (256, 256, 2): [ - (b'ic14', read_png_or_jpeg2000), - ], - (256, 256, 1): [ - (b'ic08', read_png_or_jpeg2000), - ], - (128, 128, 2): [ - (b'ic13', read_png_or_jpeg2000), - ], + (512, 512, 2): [(b"ic10", read_png_or_jpeg2000)], + (512, 512, 1): [(b"ic09", read_png_or_jpeg2000)], + (256, 256, 2): [(b"ic14", read_png_or_jpeg2000)], + (256, 256, 1): [(b"ic08", read_png_or_jpeg2000)], + (128, 128, 2): [(b"ic13", read_png_or_jpeg2000)], (128, 128, 1): [ - (b'ic07', read_png_or_jpeg2000), - (b'it32', read_32t), - (b't8mk', read_mk), - ], - (64, 64, 1): [ - (b'icp6', read_png_or_jpeg2000), - ], - (32, 32, 2): [ - (b'ic12', read_png_or_jpeg2000), - ], - (48, 48, 1): [ - (b'ih32', read_32), - (b'h8mk', read_mk), + (b"ic07", read_png_or_jpeg2000), + (b"it32", read_32t), + (b"t8mk", read_mk), ], + (64, 64, 1): [(b"icp6", read_png_or_jpeg2000)], + (32, 32, 2): [(b"ic12", read_png_or_jpeg2000)], + (48, 48, 1): [(b"ih32", read_32), (b"h8mk", read_mk)], (32, 32, 1): [ - (b'icp5', read_png_or_jpeg2000), - (b'il32', read_32), - (b'l8mk', read_mk), - ], - (16, 16, 2): [ - (b'ic11', read_png_or_jpeg2000), + (b"icp5", read_png_or_jpeg2000), + (b"il32", read_32), + (b"l8mk", read_mk), ], + (16, 16, 2): [(b"ic11", read_png_or_jpeg2000)], (16, 16, 1): [ - (b'icp4', read_png_or_jpeg2000), - (b'is32', read_32), - (b's8mk', read_mk), + (b"icp4", read_png_or_jpeg2000), + (b"is32", read_32), + (b"s8mk", read_mk), ], } @@ -185,17 +165,17 @@ class IcnsFile(object): self.dct = dct = {} self.fobj = fobj sig, filesize = nextheader(fobj) - if sig != b'icns': - raise SyntaxError('not an icns file') + if sig != b"icns": + raise SyntaxError("not an icns file") i = HEADERSIZE while i < filesize: sig, blocksize = nextheader(fobj) if blocksize <= 0: - raise SyntaxError('invalid block header') + raise SyntaxError("invalid block header") i += HEADERSIZE blocksize -= HEADERSIZE dct[sig] = (i, blocksize) - fobj.seek(blocksize, 1) + fobj.seek(blocksize, io.SEEK_CUR) i += blocksize def itersizes(self): @@ -233,7 +213,7 @@ class IcnsFile(object): size = (size[0], size[1], 1) channels = self.dataforsize(size) - im = channels.get('RGBA', None) + im = channels.get("RGBA", None) if im: return im @@ -248,6 +228,7 @@ class IcnsFile(object): ## # Image plugin for Mac OS icons. + class IcnsImageFile(ImageFile.ImageFile): """ PIL image support for Mac OS .icns files. @@ -264,13 +245,13 @@ class IcnsImageFile(ImageFile.ImageFile): def _open(self): self.icns = IcnsFile(self.fp) - self.mode = 'RGBA' - self.info['sizes'] = self.icns.itersizes() + self.mode = "RGBA" + self.info["sizes"] = self.icns.itersizes() self.best_size = self.icns.bestsize() - self.size = (self.best_size[0] * self.best_size[2], - self.best_size[1] * self.best_size[2]) - # Just use this to see if it's loaded or not yet. - self.tile = ('',) + self.size = ( + self.best_size[0] * self.best_size[2], + self.best_size[1] * self.best_size[2], + ) @property def size(self): @@ -279,27 +260,33 @@ class IcnsImageFile(ImageFile.ImageFile): @size.setter def size(self, value): info_size = value - if info_size not in self.info['sizes'] and len(info_size) == 2: + if info_size not in self.info["sizes"] and len(info_size) == 2: info_size = (info_size[0], info_size[1], 1) - if info_size not in self.info['sizes'] and len(info_size) == 3 and \ - info_size[2] == 1: - simple_sizes = [(size[0] * size[2], size[1] * size[2]) - for size in self.info['sizes']] + if ( + info_size not in self.info["sizes"] + and len(info_size) == 3 + and info_size[2] == 1 + ): + simple_sizes = [ + (size[0] * size[2], size[1] * size[2]) for size in self.info["sizes"] + ] if value in simple_sizes: - info_size = self.info['sizes'][simple_sizes.index(value)] - if info_size not in self.info['sizes']: - raise ValueError( - "This is not one of the allowed sizes of this image") + info_size = self.info["sizes"][simple_sizes.index(value)] + if info_size not in self.info["sizes"]: + raise ValueError("This is not one of the allowed sizes of this image") self._size = value def load(self): if len(self.size) == 3: self.best_size = self.size - self.size = (self.best_size[0] * self.best_size[2], - self.best_size[1] * self.best_size[2]) + self.size = ( + self.best_size[0] * self.best_size[2], + self.best_size[1] * self.best_size[2], + ) Image.Image.load(self) - if not self.tile: + if self.im and self.im.size == self.size: + # Already loaded return self.load_prepare() # This is likely NOT the best way to do it, but whatever. @@ -311,9 +298,6 @@ class IcnsImageFile(ImageFile.ImageFile): self.im = im.im self.mode = im.mode self.size = im.size - self.fp = None - self.icns = None - self.tile = () self.load_end() @@ -329,30 +313,30 @@ def _save(im, fp, filename): fp.flush() # create the temporary set of pngs - iconset = tempfile.mkdtemp('.iconset') - provided_images = {im.width: im - for im in im.encoderinfo.get("append_images", [])} + iconset = tempfile.mkdtemp(".iconset") + provided_images = {im.width: im for im in im.encoderinfo.get("append_images", [])} last_w = None + second_path = None for w in [16, 32, 128, 256, 512]: - prefix = 'icon_{}x{}'.format(w, w) + prefix = "icon_{}x{}".format(w, w) - first_path = os.path.join(iconset, prefix+'.png') + first_path = os.path.join(iconset, prefix + ".png") if last_w == w: shutil.copyfile(second_path, first_path) else: im_w = provided_images.get(w, im.resize((w, w), Image.LANCZOS)) im_w.save(first_path) - second_path = os.path.join(iconset, prefix+'@2x.png') - im_w2 = provided_images.get(w*2, im.resize((w*2, w*2), Image.LANCZOS)) + second_path = os.path.join(iconset, prefix + "@2x.png") + im_w2 = provided_images.get(w * 2, im.resize((w * 2, w * 2), Image.LANCZOS)) im_w2.save(second_path) - last_w = w*2 + last_w = w * 2 # iconutil -c icns -o {} {} from subprocess import Popen, PIPE, CalledProcessError convert_cmd = ["iconutil", "-c", "icns", "-o", filename, iconset] - with open(os.devnull, 'wb') as devnull: + with open(os.devnull, "wb") as devnull: convert_proc = Popen(convert_cmd, stdout=PIPE, stderr=devnull) convert_proc.stdout.close() @@ -366,29 +350,28 @@ def _save(im, fp, filename): raise CalledProcessError(retcode, convert_cmd) -Image.register_open(IcnsImageFile.format, IcnsImageFile, - lambda x: x[:4] == b'icns') -Image.register_extension(IcnsImageFile.format, '.icns') +Image.register_open(IcnsImageFile.format, IcnsImageFile, lambda x: x[:4] == b"icns") +Image.register_extension(IcnsImageFile.format, ".icns") -if sys.platform == 'darwin': +if sys.platform == "darwin": Image.register_save(IcnsImageFile.format, _save) Image.register_mime(IcnsImageFile.format, "image/icns") -if __name__ == '__main__': +if __name__ == "__main__": if len(sys.argv) < 2: print("Syntax: python IcnsImagePlugin.py [file]") sys.exit() - imf = IcnsImageFile(open(sys.argv[1], 'rb')) - for size in imf.info['sizes']: + imf = IcnsImageFile(open(sys.argv[1], "rb")) + for size in imf.info["sizes"]: imf.size = size imf.load() im = imf.im - im.save('out-%s-%s-%s.png' % size) + im.save("out-%s-%s-%s.png" % size) im = Image.open(sys.argv[1]) im.save("out.png") - if sys.platform == 'windows': + if sys.platform == "windows": os.startfile("out.png") diff --git a/server/www/packages/packages-linux/x64/PIL/IcoImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/IcoImagePlugin.py index 589ef3c..148e604 100644 --- a/server/www/packages/packages-linux/x64/PIL/IcoImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/IcoImagePlugin.py @@ -23,12 +23,15 @@ import struct +import warnings from io import BytesIO +from math import ceil, log -from . import Image, ImageFile, BmpImagePlugin, PngImagePlugin +from . import BmpImagePlugin, Image, ImageFile, PngImagePlugin from ._binary import i8, i16le as i16, i32le as i32 -from math import log, ceil +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.1" # @@ -39,16 +42,20 @@ _MAGIC = b"\0\0\1\0" def _save(im, fp, filename): fp.write(_MAGIC) # (2+2) - sizes = im.encoderinfo.get("sizes", - [(16, 16), (24, 24), (32, 32), (48, 48), - (64, 64), (128, 128), (256, 256)]) + sizes = im.encoderinfo.get( + "sizes", + [(16, 16), (24, 24), (32, 32), (48, 48), (64, 64), (128, 128), (256, 256)], + ) width, height = im.size - sizes = filter(lambda x: False if (x[0] > width or x[1] > height or - x[0] > 256 or x[1] > 256) else True, - sizes) + sizes = filter( + lambda x: False + if (x[0] > width or x[1] > height or x[0] > 256 or x[1] > 256) + else True, + sizes, + ) sizes = list(sizes) fp.write(struct.pack("=8bpp) - 'reserved': i8(s[3]), - 'planes': i16(s[4:]), - 'bpp': i16(s[6:]), - 'size': i32(s[8:]), - 'offset': i32(s[12:]) + "width": i8(s[0]), + "height": i8(s[1]), + "nb_color": i8(s[2]), # No. of colors in image (0 if >=8bpp) + "reserved": i8(s[3]), + "planes": i16(s[4:]), + "bpp": i16(s[6:]), + "size": i32(s[8:]), + "offset": i32(s[12:]), } # See Wikipedia - for j in ('width', 'height'): + for j in ("width", "height"): if not icon_header[j]: icon_header[j] = 256 # See Wikipedia notes about color depth. # We need this just to differ images with equal sizes - icon_header['color_depth'] = (icon_header['bpp'] or - (icon_header['nb_color'] != 0 and - ceil(log(icon_header['nb_color'], - 2))) or 256) + icon_header["color_depth"] = ( + icon_header["bpp"] + or ( + icon_header["nb_color"] != 0 + and ceil(log(icon_header["nb_color"], 2)) + ) + or 256 + ) - icon_header['dim'] = (icon_header['width'], icon_header['height']) - icon_header['square'] = (icon_header['width'] * - icon_header['height']) + icon_header["dim"] = (icon_header["width"], icon_header["height"]) + icon_header["square"] = icon_header["width"] * icon_header["height"] self.entry.append(icon_header) - self.entry = sorted(self.entry, key=lambda x: x['color_depth']) + self.entry = sorted(self.entry, key=lambda x: x["color_depth"]) # ICO images are usually squares # self.entry = sorted(self.entry, key=lambda x: x['width']) - self.entry = sorted(self.entry, key=lambda x: x['square']) + self.entry = sorted(self.entry, key=lambda x: x["square"]) self.entry.reverse() def sizes(self): """ Get a list of all available icon sizes and color depths. """ - return {(h['width'], h['height']) for h in self.entry} + return {(h["width"], h["height"]) for h in self.entry} + + def getentryindex(self, size, bpp=False): + for (i, h) in enumerate(self.entry): + if size == h["dim"] and (bpp is False or bpp == h["color_depth"]): + return i + return 0 def getimage(self, size, bpp=False): """ Get an image from the icon """ - for (i, h) in enumerate(self.entry): - if size == h['dim'] and (bpp is False or bpp == h['color_depth']): - return self.frame(i) - return self.frame(0) + return self.frame(self.getentryindex(size, bpp)) def frame(self, idx): """ @@ -157,9 +170,9 @@ class IcoFile(object): header = self.entry[idx] - self.buf.seek(header['offset']) + self.buf.seek(header["offset"]) data = self.buf.read(8) - self.buf.seek(header['offset']) + self.buf.seek(header["offset"]) if data[:8] == PngImagePlugin._MAGIC: # png frame @@ -167,6 +180,7 @@ class IcoFile(object): else: # XOR + AND mask bmp frame im = BmpImagePlugin.DibImageFile(self.buf) + Image._decompression_bomb_check(im.size) # change tile dimension to only encompass XOR image im._size = (im.size[0], int(im.size[1] / 2)) @@ -194,11 +208,11 @@ class IcoFile(object): # convert to an 8bpp grayscale image mask = Image.frombuffer( - 'L', # 8bpp - im.size, # (w, h) - alpha_bytes, # source chars - 'raw', # raw decoder - ('L', 0, -1) # 8bpp inverted, unpadded, reversed + "L", # 8bpp + im.size, # (w, h) + alpha_bytes, # source chars + "raw", # raw decoder + ("L", 0, -1), # 8bpp inverted, unpadded, reversed ) else: # get AND image from end of bitmap @@ -210,8 +224,7 @@ class IcoFile(object): # the total mask data is # padded row size * height / bits per char - and_mask_offset = o + int(im.size[0] * im.size[1] * - (bpp / 8.0)) + and_mask_offset = o + int(im.size[0] * im.size[1] * (bpp / 8.0)) total_bytes = int((w * im.size[1]) / 8) self.buf.seek(and_mask_offset) @@ -219,17 +232,17 @@ class IcoFile(object): # convert raw data to image mask = Image.frombuffer( - '1', # 1 bpp - im.size, # (w, h) - mask_data, # source chars - 'raw', # raw decoder - ('1;I', int(w/8), -1) # 1bpp inverted, padded, reversed + "1", # 1 bpp + im.size, # (w, h) + mask_data, # source chars + "raw", # raw decoder + ("1;I", int(w / 8), -1), # 1bpp inverted, padded, reversed ) # now we have two images, im is XOR image and mask is AND image # apply mask image as alpha channel - im = im.convert('RGBA') + im = im.convert("RGBA") im.putalpha(mask) return im @@ -238,6 +251,7 @@ class IcoFile(object): ## # Image plugin for Windows Icon files. + class IcoImageFile(ImageFile.ImageFile): """ PIL read-only image support for Microsoft Windows .ico files. @@ -250,17 +264,21 @@ class IcoImageFile(ImageFile.ImageFile): Handles classic, XP and Vista icon formats. + When saving, PNG compression is used. Support for this was only added in + Windows Vista. + This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis . https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki """ + format = "ICO" format_description = "Windows Icon" def _open(self): self.ico = IcoFile(self.fp) - self.info['sizes'] = self.ico.sizes() - self.size = self.ico.entry[0]['dim'] + self.info["sizes"] = self.ico.sizes() + self.size = self.ico.entry[0]["dim"] self.load() @property @@ -269,23 +287,35 @@ class IcoImageFile(ImageFile.ImageFile): @size.setter def size(self, value): - if value not in self.info['sizes']: - raise ValueError( - "This is not one of the allowed sizes of this image") + if value not in self.info["sizes"]: + raise ValueError("This is not one of the allowed sizes of this image") self._size = value def load(self): + if self.im and self.im.size == self.size: + # Already loaded + return im = self.ico.getimage(self.size) # if tile is PNG, it won't really be loaded yet im.load() self.im = im.im self.mode = im.mode - self.size = im.size + if im.size != self.size: + warnings.warn("Image was not the expected size") + + index = self.ico.getentryindex(self.size) + sizes = list(self.info["sizes"]) + sizes[index] = im.size + self.info["sizes"] = set(sizes) + + self.size = im.size def load_seek(self): # Flag the ImageFile.Parser so that it # just does all the decode at the end. pass + + # # -------------------------------------------------------------------- @@ -293,3 +323,5 @@ class IcoImageFile(ImageFile.ImageFile): Image.register_open(IcoImageFile.format, IcoImageFile, _accept) Image.register_save(IcoImageFile.format, _save) Image.register_extension(IcoImageFile.format, ".ico") + +Image.register_mime(IcoImageFile.format, "image/x-icon") diff --git a/server/www/packages/packages-linux/x64/PIL/ImImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/ImImagePlugin.py index 2896bb4..77127fa 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/ImImagePlugin.py @@ -27,9 +27,12 @@ import re + from . import Image, ImageFile, ImagePalette from ._binary import i8 +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.7" @@ -46,8 +49,17 @@ SCALE = "Scale (x,y)" SIZE = "Image size (x*y)" MODE = "Image type" -TAGS = {COMMENT: 0, DATE: 0, EQUIPMENT: 0, FRAMES: 0, LUT: 0, NAME: 0, - SCALE: 0, SIZE: 0, MODE: 0} +TAGS = { + COMMENT: 0, + DATE: 0, + EQUIPMENT: 0, + FRAMES: 0, + LUT: 0, + NAME: 0, + SCALE: 0, + SIZE: 0, + MODE: 0, +} OPEN = { # ifunc93/p3cfunc formats @@ -69,6 +81,7 @@ OPEN = { "RYB3 image": ("RGB", "RYB;T"), # extensions "LA image": ("LA", "LA;L"), + "PA image": ("LA", "PA;L"), "RGBA image": ("RGBA", "RGBA;L"), "RGBX image": ("RGBX", "RGBX;L"), "CMYK image": ("CMYK", "CMYK;L"), @@ -105,6 +118,7 @@ def number(s): ## # Image plugin for the IFUNC IM file format. + class ImImageFile(ImageFile.ImageFile): format = "IM" @@ -137,7 +151,7 @@ class ImImageFile(ImageFile.ImageFile): if s == b"\r": continue - if not s or s == b'\0' or s == b'\x1A': + if not s or s == b"\0" or s == b"\x1A": break # FIXME: this may read whole file if not a text file @@ -146,14 +160,14 @@ class ImImageFile(ImageFile.ImageFile): if len(s) > 100: raise SyntaxError("not an IM file") - if s[-2:] == b'\r\n': + if s[-2:] == b"\r\n": s = s[:-2] - elif s[-1:] == b'\n': + elif s[-1:] == b"\n": s = s[:-1] try: m = split.match(s) - except re.error as v: + except re.error: raise SyntaxError("not an IM file") if m: @@ -162,8 +176,8 @@ class ImImageFile(ImageFile.ImageFile): # Don't know if this is the correct encoding, # but a decent guess (I guess) - k = k.decode('latin-1', 'replace') - v = v.decode('latin-1', 'replace') + k = k.decode("latin-1", "replace") + v = v.decode("latin-1", "replace") # Convert value as appropriate if k in [FRAMES, SCALE, SIZE]: @@ -189,8 +203,9 @@ class ImImageFile(ImageFile.ImageFile): else: - raise SyntaxError("Syntax error in IM header: " + - s.decode('ascii', 'replace')) + raise SyntaxError( + "Syntax error in IM header: " + s.decode("ascii", "replace") + ) if not n: raise SyntaxError("Not an IM file") @@ -200,7 +215,7 @@ class ImImageFile(ImageFile.ImageFile): self.mode = self.info[MODE] # Skip forward to start of image data - while s and s[0:1] != b'\x1A': + while s and s[0:1] != b"\x1A": s = self.fp.read(1) if not s: raise SyntaxError("File truncated") @@ -211,20 +226,21 @@ class ImImageFile(ImageFile.ImageFile): greyscale = 1 # greyscale palette linear = 1 # linear greyscale palette for i in range(256): - if palette[i] == palette[i+256] == palette[i+512]: + if palette[i] == palette[i + 256] == palette[i + 512]: if i8(palette[i]) != i: linear = 0 else: greyscale = 0 - if self.mode == "L" or self.mode == "LA": + if self.mode in ["L", "LA", "P", "PA"]: if greyscale: if not linear: self.lut = [i8(c) for c in palette[:256]] else: - if self.mode == "L": + if self.mode in ["L", "P"]: self.mode = self.rawmode = "P" - elif self.mode == "LA": - self.mode = self.rawmode = "PA" + elif self.mode in ["LA", "PA"]: + self.mode = "PA" + self.rawmode = "PA;L" self.palette = ImagePalette.raw("RGB;L", palette) elif self.mode == "RGB": if not greyscale or not linear: @@ -243,8 +259,7 @@ class ImImageFile(ImageFile.ImageFile): # use bit decoder (if necessary) bits = int(self.rawmode[2:]) if bits not in [8, 16, 32]: - self.tile = [("bit", (0, 0)+self.size, offs, - (bits, 8, 3, 0, -1))] + self.tile = [("bit", (0, 0) + self.size, offs, (bits, 8, 3, 0, -1))] return except ValueError: pass @@ -253,13 +268,14 @@ class ImImageFile(ImageFile.ImageFile): # Old LabEye/3PC files. Would be very surprised if anyone # ever stumbled upon such a file ;-) size = self.size[0] * self.size[1] - self.tile = [("raw", (0, 0)+self.size, offs, ("G", 0, -1)), - ("raw", (0, 0)+self.size, offs+size, ("R", 0, -1)), - ("raw", (0, 0)+self.size, offs+2*size, ("B", 0, -1))] + self.tile = [ + ("raw", (0, 0) + self.size, offs, ("G", 0, -1)), + ("raw", (0, 0) + self.size, offs + size, ("R", 0, -1)), + ("raw", (0, 0) + self.size, offs + 2 * size, ("B", 0, -1)), + ] else: # LabEye/IFUNC files - self.tile = [("raw", (0, 0)+self.size, offs, - (self.rawmode, 0, -1))] + self.tile = [("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))] @property def n_frames(self): @@ -285,11 +301,21 @@ class ImImageFile(ImageFile.ImageFile): self.fp = self.__fp - self.tile = [("raw", (0, 0)+self.size, offs, (self.rawmode, 0, -1))] + self.tile = [("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))] def tell(self): return self.frame + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + # # -------------------------------------------------------------------- # Save IM files @@ -311,7 +337,7 @@ SAVE = { "RGBA": ("RGBA", "RGBA;L"), "RGBX": ("RGBX", "RGBX;L"), "CMYK": ("CMYK", "CMYK;L"), - "YCbCr": ("YCC", "YCbCr;L") + "YCbCr": ("YCC", "YCbCr;L"), } @@ -324,17 +350,18 @@ def _save(im, fp, filename): frames = im.encoderinfo.get("frames", 1) - fp.write(("Image type: %s image\r\n" % image_type).encode('ascii')) + fp.write(("Image type: %s image\r\n" % image_type).encode("ascii")) if filename: - fp.write(("Name: %s\r\n" % filename).encode('ascii')) - fp.write(("Image size (x*y): %d*%d\r\n" % im.size).encode('ascii')) - fp.write(("File size (no of images): %d\r\n" % frames).encode('ascii')) - if im.mode == "P": + fp.write(("Name: %s\r\n" % filename).encode("ascii")) + fp.write(("Image size (x*y): %d*%d\r\n" % im.size).encode("ascii")) + fp.write(("File size (no of images): %d\r\n" % frames).encode("ascii")) + if im.mode in ["P", "PA"]: fp.write(b"Lut: 1\r\n") - fp.write(b"\000" * (511-fp.tell()) + b"\032") - if im.mode == "P": + fp.write(b"\000" * (511 - fp.tell()) + b"\032") + if im.mode in ["P", "PA"]: fp.write(im.im.getpalette("RGB", "RGB;L")) # 768 bytes - ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 0, (rawmode, 0, -1))]) + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, -1))]) + # # -------------------------------------------------------------------- diff --git a/server/www/packages/packages-linux/x64/PIL/Image.py b/server/www/packages/packages-linux/x64/PIL/Image.py index edea312..0cdfcc9 100644 --- a/server/www/packages/packages-linux/x64/PIL/Image.py +++ b/server/www/packages/packages-linux/x64/PIL/Image.py @@ -24,15 +24,41 @@ # See the README file for information on usage and redistribution. # -# VERSION is deprecated and will be removed in Pillow 6.0.0. -# PILLOW_VERSION is deprecated and will be removed after that. -# Use __version__ instead. -from . import VERSION, PILLOW_VERSION, __version__, _plugins -from ._util import py3 - +import atexit +import io import logging -import warnings import math +import numbers +import os +import struct +import sys +import warnings + +# VERSION was removed in Pillow 6.0.0. +# PILLOW_VERSION is deprecated and will be removed in Pillow 7.0.0. +# Use __version__ instead. +from . import PILLOW_VERSION, ImageMode, TiffTags, __version__, _plugins +from ._binary import i8, i32le +from ._util import deferred_error, isPath, isStringType, py3 + +try: + import builtins +except ImportError: + import __builtin__ + + builtins = __builtin__ + + +try: + # Python 3 + from collections.abc import Callable, MutableMapping +except ImportError: + # Python 2.7 + from collections import Callable, MutableMapping + + +# Silence warning +assert PILLOW_VERSION logger = logging.getLogger(__name__) @@ -62,13 +88,13 @@ try: # Also note that Image.core is not a publicly documented interface, # and should be considered private and subject to change. from . import _imaging as core - if __version__ != getattr(core, 'PILLOW_VERSION', None): - raise ImportError("The _imaging extension was built for another " - "version of Pillow or PIL:\n" - "Core version: %s\n" - "Pillow version: %s" % - (getattr(core, 'PILLOW_VERSION', None), - __version__)) + + if __version__ != getattr(core, "PILLOW_VERSION", None): + raise ImportError( + "The _imaging extension was built for another version of Pillow or PIL:\n" + "Core version: %s\n" + "Pillow version: %s" % (getattr(core, "PILLOW_VERSION", None), __version__) + ) except ImportError as v: core = _imaging_not_installed() @@ -78,10 +104,9 @@ except ImportError as v: # the right version (windows only). Print a warning, if # possible. warnings.warn( - "The _imaging extension was built for another version " - "of Python.", - RuntimeWarning - ) + "The _imaging extension was built for another version of Python.", + RuntimeWarning, + ) elif str(v).startswith("The _imaging extension"): warnings.warn(str(v), RuntimeWarning) elif "Symbol not found: _PyUnicodeUCS2_" in str(v): @@ -90,60 +115,36 @@ except ImportError as v: warnings.warn( "The _imaging extension was built for Python with UCS2 support; " "recompile Pillow or build Python --without-wide-unicode. ", - RuntimeWarning - ) + RuntimeWarning, + ) elif "Symbol not found: _PyUnicodeUCS4_" in str(v): # should match _PyUnicodeUCS4_FromString and # _PyUnicodeUCS4_AsLatin1String warnings.warn( "The _imaging extension was built for Python with UCS4 support; " "recompile Pillow or build Python --with-wide-unicode. ", - RuntimeWarning - ) + RuntimeWarning, + ) # Fail here anyway. Don't let people run with a mostly broken Pillow. # see docs/porting.rst raise -try: - import builtins -except ImportError: - import __builtin__ - builtins = __builtin__ - -from . import ImageMode -from ._binary import i8 -from ._util import isPath, isStringType, deferred_error - -import os -import sys -import io -import struct -import atexit - -# type stuff -import numbers -try: - # Python 3 - from collections.abc import Callable -except ImportError: - # Python 2.7 - from collections import Callable - # works everywhere, win for pypy, not cpython -USE_CFFI_ACCESS = hasattr(sys, 'pypy_version_info') +USE_CFFI_ACCESS = hasattr(sys, "pypy_version_info") try: import cffi - HAS_CFFI = True except ImportError: - HAS_CFFI = False + cffi = None try: from pathlib import Path + HAS_PATHLIB = True except ImportError: try: from pathlib2 import Path + HAS_PATHLIB = True except ImportError: HAS_PATHLIB = False @@ -164,7 +165,7 @@ def isImageType(t): # -# Constants (also defined in _imagingmodule.c!) +# Constants NONE = 0 @@ -177,14 +178,14 @@ ROTATE_270 = 4 TRANSPOSE = 5 TRANSVERSE = 6 -# transforms +# transforms (also defined in Imaging.h) AFFINE = 0 EXTENT = 1 PERSPECTIVE = 2 QUAD = 3 MESH = 4 -# resampling filters +# resampling filters (also defined in Imaging.h) NEAREST = NONE = 0 BOX = 4 BILINEAR = LINEAR = 2 @@ -212,7 +213,7 @@ NORMAL = 0 SEQUENCE = 1 CONTAINER = 2 -if hasattr(core, 'DEFAULT_STRATEGY'): +if hasattr(core, "DEFAULT_STRATEGY"): DEFAULT_STRATEGY = core.DEFAULT_STRATEGY FILTERED = core.FILTERED HUFFMAN_ONLY = core.HUFFMAN_ONLY @@ -238,13 +239,12 @@ ENCODERS = {} _MODEINFO = { # NOTE: this table will be removed in future versions. use # getmode* functions or ImageMode descriptors instead. - # official modes "1": ("L", "L", ("1",)), "L": ("L", "L", ("L",)), "I": ("L", "I", ("I",)), "F": ("L", "F", ("F",)), - "P": ("RGB", "L", ("P",)), + "P": ("P", "L", ("P",)), "RGB": ("RGB", "L", ("R", "G", "B")), "RGBX": ("RGB", "L", ("R", "G", "B", "X")), "RGBA": ("RGB", "L", ("R", "G", "B", "A")), @@ -252,46 +252,44 @@ _MODEINFO = { "YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")), "LAB": ("RGB", "L", ("L", "A", "B")), "HSV": ("RGB", "L", ("H", "S", "V")), - # Experimental modes include I;16, I;16L, I;16B, RGBa, BGR;15, and # BGR;24. Use these modes only if you know exactly what you're # doing... - } -if sys.byteorder == 'little': - _ENDIAN = '<' +if sys.byteorder == "little": + _ENDIAN = "<" else: - _ENDIAN = '>' + _ENDIAN = ">" _MODE_CONV = { # official modes - "1": ('|b1', None), # Bits need to be extended to bytes - "L": ('|u1', None), - "LA": ('|u1', 2), - "I": (_ENDIAN + 'i4', None), - "F": (_ENDIAN + 'f4', None), - "P": ('|u1', None), - "RGB": ('|u1', 3), - "RGBX": ('|u1', 4), - "RGBA": ('|u1', 4), - "CMYK": ('|u1', 4), - "YCbCr": ('|u1', 3), - "LAB": ('|u1', 3), # UNDONE - unsigned |u1i1i1 - "HSV": ('|u1', 3), + "1": ("|b1", None), # Bits need to be extended to bytes + "L": ("|u1", None), + "LA": ("|u1", 2), + "I": (_ENDIAN + "i4", None), + "F": (_ENDIAN + "f4", None), + "P": ("|u1", None), + "RGB": ("|u1", 3), + "RGBX": ("|u1", 4), + "RGBA": ("|u1", 4), + "CMYK": ("|u1", 4), + "YCbCr": ("|u1", 3), + "LAB": ("|u1", 3), # UNDONE - unsigned |u1i1i1 + "HSV": ("|u1", 3), # I;16 == I;16L, and I;32 == I;32L - "I;16": ('u2', None), - "I;16L": ('i2', None), - "I;16LS": ('u4', None), - "I;32L": ('i4', None), - "I;32LS": ('u2", None), + "I;16L": ("i2", None), + "I;16LS": ("u4", None), + "I;32L": ("i4", None), + "I;32LS": ("= 3: + def __del__(self): - if (hasattr(self, 'fp') and hasattr(self, '_exclusive_fp') - and self.fp and self._exclusive_fp): - self.fp.close() - self.fp = None + self.__exit__() def _copy(self): self.load() @@ -619,9 +644,9 @@ class Image(object): def _dump(self, file=None, format=None, **options): import tempfile - suffix = '' + suffix = "" if format: - suffix = '.'+format + suffix = "." + format if not file: f, filename = tempfile.mkstemp(suffix) @@ -641,35 +666,38 @@ class Image(object): return filename def __eq__(self, other): - return (isinstance(other, Image) and - self.__class__.__name__ == other.__class__.__name__ and - self.mode == other.mode and - self.size == other.size and - self.info == other.info and - self.category == other.category and - self.readonly == other.readonly and - self.getpalette() == other.getpalette() and - self.tobytes() == other.tobytes()) + return ( + self.__class__ is other.__class__ + and self.mode == other.mode + and self.size == other.size + and self.info == other.info + and self.category == other.category + and self.readonly == other.readonly + and self.getpalette() == other.getpalette() + and self.tobytes() == other.tobytes() + ) def __ne__(self, other): - eq = (self == other) + eq = self == other return not eq def __repr__(self): return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % ( - self.__class__.__module__, self.__class__.__name__, - self.mode, self.size[0], self.size[1], - id(self) - ) + self.__class__.__module__, + self.__class__.__name__, + self.mode, + self.size[0], + self.size[1], + id(self), + ) def _repr_png_(self): """ iPython display hook support :returns: png version of the image as bytes """ - from io import BytesIO - b = BytesIO() - self.save(b, 'PNG') + b = io.BytesIO() + self.save(b, "PNG") return b.getvalue() @property @@ -677,24 +705,19 @@ class Image(object): # numpy array interface support new = {} shape, typestr = _conv_type_shape(self) - new['shape'] = shape - new['typestr'] = typestr - new['version'] = 3 - if self.mode == '1': + new["shape"] = shape + new["typestr"] = typestr + new["version"] = 3 + if self.mode == "1": # Binary images need to be extended from bits to bytes # See: https://github.com/python-pillow/Pillow/issues/350 - new['data'] = self.tobytes('raw', 'L') + new["data"] = self.tobytes("raw", "L") else: - new['data'] = self.tobytes() + new["data"] = self.tobytes() return new def __getstate__(self): - return [ - self.info, - self.mode, - self.size, - self.getpalette(), - self.tobytes()] + return [self.info, self.mode, self.size, self.getpalette(), self.tobytes()] def __setstate__(self, state): Image.__init__(self) @@ -704,7 +727,7 @@ class Image(object): self.mode = mode self._size = size self.im = core.new(mode, size) - if mode in ("L", "P") and palette: + if mode in ("L", "LA", "P", "PA") and palette: self.putpalette(palette) self.frombytes(data) @@ -752,8 +775,9 @@ class Image(object): return b"".join(data) def tostring(self, *args, **kw): - raise NotImplementedError("tostring() has been removed. " - "Please call tobytes() instead.") + raise NotImplementedError( + "tostring() has been removed. Please call tobytes() instead." + ) def tobitmap(self, name="image"): """ @@ -770,11 +794,15 @@ class Image(object): if self.mode != "1": raise ValueError("not a bitmap") data = self.tobytes("xbm") - return b"".join([ - ("#define %s_width %d\n" % (name, self.size[0])).encode('ascii'), - ("#define %s_height %d\n" % (name, self.size[1])).encode('ascii'), - ("static char %s_bits[] = {\n" % name).encode('ascii'), data, b"};" - ]) + return b"".join( + [ + ("#define %s_width %d\n" % (name, self.size[0])).encode("ascii"), + ("#define %s_height %d\n" % (name, self.size[1])).encode("ascii"), + ("static char %s_bits[] = {\n" % name).encode("ascii"), + data, + b"};", + ] + ) def frombytes(self, data, decoder_name="raw", *args): """ @@ -803,8 +831,9 @@ class Image(object): raise ValueError("cannot decode image data") def fromstring(self, *args, **kw): - raise NotImplementedError("fromstring() has been removed. " - "Please call frombytes() instead.") + raise NotImplementedError( + "fromstring() has been removed. Please call frombytes() instead." + ) def load(self): """ @@ -813,8 +842,10 @@ class Image(object): Image class automatically loads an opened image when it is accessed for the first time. - This method will close the file associated with the image. See - :ref:`file-handling` for more information. + If the file associated with the image was opened by Pillow, then this + method will close it. The exception to this is if the image has + multiple frames, in which case the file will be left open for seek + operations. See :ref:`file-handling` for more information. :returns: An image access object. :rtype: :ref:`PixelAccess` or :py:class:`PIL.PyAccess` @@ -833,10 +864,11 @@ class Image(object): self.palette.mode = "RGBA" if self.im: - if HAS_CFFI and USE_CFFI_ACCESS: + if cffi and USE_CFFI_ACCESS: if self.pyaccess: return self.pyaccess from . import PyAccess + self.pyaccess = PyAccess.new(self, self.readonly) if self.pyaccess: return self.pyaccess @@ -853,8 +885,7 @@ class Image(object): """ pass - def convert(self, mode=None, matrix=None, dither=None, - palette=WEB, colors=256): + def convert(self, mode=None, matrix=None, dither=None, palette=WEB, colors=256): """ Returns a converted copy of this image. For the "P" mode, this method translates pixels through the palette. If mode is @@ -865,7 +896,7 @@ class Image(object): "L", "RGB" and "CMYK." The **matrix** argument only supports "L" and "RGB". - When translating a color image to black and white (mode "L"), + When translating a color image to greyscale (mode "L"), the library uses the ITU-R 601-2 luma transform:: L = R * 299/1000 + G * 587/1000 + B * 114/1000 @@ -873,9 +904,9 @@ class Image(object): The default method of converting a greyscale ("L") or "RGB" image into a bilevel (mode "1") image uses Floyd-Steinberg dither to approximate the original image luminosity levels. If - dither is NONE, all non-zero values are set to 255 (white). To - use other thresholds, use the :py:meth:`~PIL.Image.Image.point` - method. + dither is NONE, all values larger than 128 are set to 255 (white), + all other values to 0 (black). To use other thresholds, use the + :py:meth:`~PIL.Image.Image.point` method. When converting from "RGBA" to "P" without a **matrix** argument, this passes the operation to :py:meth:`~PIL.Image.Image.quantize`, @@ -907,7 +938,7 @@ class Image(object): if not mode or (mode == self.mode and not matrix): return self.copy() - has_transparency = self.info.get('transparency') is not None + has_transparency = self.info.get("transparency") is not None if matrix: # matrix conversion if mode not in ("L", "RGB"): @@ -915,19 +946,24 @@ class Image(object): im = self.im.convert_matrix(mode, matrix) new = self._new(im) if has_transparency and self.im.bands == 3: - transparency = new.info['transparency'] + transparency = new.info["transparency"] def convert_transparency(m, v): - v = m[0]*v[0] + m[1]*v[1] + m[2]*v[2] + m[3]*0.5 + v = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3] * 0.5 return max(0, min(255, int(v))) + if mode == "L": transparency = convert_transparency(matrix, transparency) elif len(mode) == 3: - transparency = tuple([ - convert_transparency(matrix[i*4:i*4+4], transparency) - for i in range(0, len(transparency)) - ]) - new.info['transparency'] = transparency + transparency = tuple( + [ + convert_transparency( + matrix[i * 4 : i * 4 + 4], transparency + ) + for i in range(0, len(transparency)) + ] + ) + new.info["transparency"] = transparency return new if mode == "P" and self.mode == "RGBA": @@ -937,45 +973,48 @@ class Image(object): delete_trns = False # transparency handling if has_transparency: - if self.mode in ('L', 'RGB') and mode == 'RGBA': + if self.mode in ("1", "L", "I", "RGB") and mode == "RGBA": # Use transparent conversion to promote from transparent # color to an alpha channel. - new_im = self._new(self.im.convert_transparent( - mode, self.info['transparency'])) - del(new_im.info['transparency']) + new_im = self._new( + self.im.convert_transparent(mode, self.info["transparency"]) + ) + del new_im.info["transparency"] return new_im - elif self.mode in ('L', 'RGB', 'P') and mode in ('L', 'RGB', 'P'): - t = self.info['transparency'] + elif self.mode in ("L", "RGB", "P") and mode in ("L", "RGB", "P"): + t = self.info["transparency"] if isinstance(t, bytes): # Dragons. This can't be represented by a single color - warnings.warn('Palette images with Transparency ' + - ' expressed in bytes should be converted ' + - 'to RGBA images') + warnings.warn( + "Palette images with Transparency expressed in bytes should be " + "converted to RGBA images" + ) delete_trns = True else: # get the new transparency color. # use existing conversions trns_im = Image()._new(core.new(self.mode, (1, 1))) - if self.mode == 'P': + if self.mode == "P": trns_im.putpalette(self.palette) if isinstance(t, tuple): try: t = trns_im.palette.getcolor(t) - except: - raise ValueError("Couldn't allocate a palette " - "color for transparency") + except Exception: + raise ValueError( + "Couldn't allocate a palette color for transparency" + ) trns_im.putpixel((0, 0), t) - if mode in ('L', 'RGB'): + if mode in ("L", "RGB"): trns_im = trns_im.convert(mode) else: # can't just retrieve the palette number, got to do it # after quantization. - trns_im = trns_im.convert('RGB') + trns_im = trns_im.convert("RGB") trns = trns_im.getpixel((0, 0)) - elif self.mode == 'P' and mode == 'RGBA': - t = self.info['transparency'] + elif self.mode == "P" and mode == "RGBA": + t = self.info["transparency"] delete_trns = True if isinstance(t, bytes): @@ -983,27 +1022,26 @@ class Image(object): elif isinstance(t, int): self.im.putpalettealpha(t, 0) else: - raise ValueError("Transparency for P mode should" + - " be bytes or int") + raise ValueError("Transparency for P mode should be bytes or int") if mode == "P" and palette == ADAPTIVE: im = self.im.quantize(colors) new = self._new(im) from . import ImagePalette + new.palette = ImagePalette.raw("RGB", new.im.getpalette("RGB")) if delete_trns: # This could possibly happen if we requantize to fewer colors. # The transparency would be totally off in that case. - del(new.info['transparency']) + del new.info["transparency"] if trns is not None: try: - new.info['transparency'] = new.palette.getcolor(trns) - except: + new.info["transparency"] = new.palette.getcolor(trns) + except Exception: # if we can't make a transparent color, don't leave the old # transparency hanging around to mess us up. - del(new.info['transparency']) - warnings.warn("Couldn't allocate palette entry " + - "for transparency") + del new.info["transparency"] + warnings.warn("Couldn't allocate palette entry for transparency") return new # colorspace conversion @@ -1023,20 +1061,19 @@ class Image(object): new_im = self._new(im) if delete_trns: # crash fail if we leave a bytes transparency in an rgb/l mode. - del(new_im.info['transparency']) + del new_im.info["transparency"] if trns is not None: - if new_im.mode == 'P': + if new_im.mode == "P": try: - new_im.info['transparency'] = new_im.palette.getcolor(trns) - except: - del(new_im.info['transparency']) - warnings.warn("Couldn't allocate palette entry " + - "for transparency") + new_im.info["transparency"] = new_im.palette.getcolor(trns) + except Exception: + del new_im.info["transparency"] + warnings.warn("Couldn't allocate palette entry for transparency") else: - new_im.info['transparency'] = trns + new_im.info["transparency"] = trns return new_im - def quantize(self, colors=256, method=None, kmeans=0, palette=None): + def quantize(self, colors=256, method=None, kmeans=0, palette=None, dither=1): """ Convert the image to 'P' mode with the specified number of colors. @@ -1047,7 +1084,12 @@ class Image(object): 2 = fast octree 3 = libimagequant :param kmeans: Integer - :param palette: Quantize to the palette of given :py:class:`PIL.Image.Image`. + :param palette: Quantize to the palette of given + :py:class:`PIL.Image.Image`. + :param dither: Dithering method, used when converting from + mode "RGB" to "P" or from "RGB" or "L" to "1". + Available methods are NONE or FLOYDSTEINBERG (default). + Default: 1 (legacy setting) :returns: A new image """ @@ -1057,14 +1099,15 @@ class Image(object): if method is None: # defaults: method = 0 - if self.mode == 'RGBA': + if self.mode == "RGBA": method = 2 - if self.mode == 'RGBA' and method not in (2, 3): + if self.mode == "RGBA" and method not in (2, 3): # Caller specified an invalid mode. raise ValueError( - 'Fast Octree (method == 2) and libimagequant (method == 3) ' + - 'are the only valid methods for quantizing RGBA images') + "Fast Octree (method == 2) and libimagequant (method == 3) " + "are the only valid methods for quantizing RGBA images" + ) if palette: # use palette from reference image @@ -1074,11 +1117,18 @@ class Image(object): if self.mode != "RGB" and self.mode != "L": raise ValueError( "only RGB or L mode images can be quantized to a palette" - ) - im = self.im.convert("P", 1, palette.im) + ) + im = self.im.convert("P", dither, palette.im) return self._new(im) - return self._new(self.im.quantize(colors, method, kmeans)) + im = self._new(self.im.quantize(colors, method, kmeans)) + + from . import ImagePalette + + mode = im.im.getpalettemode() + im.palette = ImagePalette.ImagePalette(mode, im.im.getpalette(mode, mode)) + + return im def copy(self): """ @@ -1173,8 +1223,9 @@ class Image(object): if isinstance(filter, Callable): filter = filter() if not hasattr(filter, "filter"): - raise TypeError("filter argument should be ImageFilter.Filter " + - "instance or class") + raise TypeError( + "filter argument should be ImageFilter.Filter instance or class" + ) multiband = isinstance(filter, ImageFilter.MultibandFilter) if self.im.bands == 1 or multiband: @@ -1273,6 +1324,12 @@ class Image(object): return tuple(extrema) return self.im.getextrema() + def getexif(self): + if self._exif is None: + self._exif = Exif() + self._exif.load(self.info.get("exif")) + return self._exif + def getim(self): """ Returns a capsule that points to the internal image memory. @@ -1344,6 +1401,7 @@ class Image(object): bi-level image (mode "1") or a greyscale image ("L"). :param mask: An optional mask. + :param extrema: An optional tuple of manually-specified extrema. :returns: A list containing pixel counts. """ self.load() @@ -1356,9 +1414,36 @@ class Image(object): return self.im.histogram(extrema) return self.im.histogram() + def entropy(self, mask=None, extrema=None): + """ + Calculates and returns the entropy for the image. + + A bilevel image (mode "1") is treated as a greyscale ("L") + image by this method. + + If a mask is provided, the method employs the histogram for + those parts of the image where the mask image is non-zero. + The mask image must have the same size as the image, and be + either a bi-level image (mode "1") or a greyscale image ("L"). + + :param mask: An optional mask. + :param extrema: An optional tuple of manually-specified extrema. + :returns: A float value representing the image entropy + """ + self.load() + if mask: + mask.load() + return self.im.entropy((0, 0), mask.im) + if self.mode in ("I", "F"): + if extrema is None: + extrema = self.getextrema() + return self.im.entropy(extrema) + return self.im.entropy() + def offset(self, xoffset, yoffset=None): - raise NotImplementedError("offset() has been removed. " - "Please call ImageChops.offset() instead.") + raise NotImplementedError( + "offset() has been removed. Please call ImageChops.offset() instead." + ) def paste(self, im, box=None, mask=None): """ @@ -1416,13 +1501,12 @@ class Image(object): size = mask.size else: # FIXME: use self.size here? - raise ValueError( - "cannot determine region size; use 4-item box" - ) - box += (box[0]+size[0], box[1]+size[1]) + raise ValueError("cannot determine region size; use 4-item box") + box += (box[0] + size[0], box[1] + size[1]) if isStringType(im): from . import ImageColor + im = ImageColor.getcolor(im, self.mode) elif isImageType(im): @@ -1541,7 +1625,7 @@ class Image(object): self._ensure_mutable() - if self.mode not in ("LA", "RGBA"): + if self.mode not in ("LA", "PA", "RGBA"): # attempt to promote self to a matching alpha mode try: mode = getmodebase(self.mode) + "A" @@ -1550,7 +1634,7 @@ class Image(object): except (AttributeError, ValueError): # do things the hard way im = self.im.convert(mode) - if im.mode not in ("LA", "RGBA"): + if im.mode not in ("LA", "PA", "RGBA"): raise ValueError # sanity check self.im = im self.pyaccess = None @@ -1558,7 +1642,7 @@ class Image(object): except (KeyError, ValueError): raise ValueError("illegal image mode") - if self.mode == "LA": + if self.mode in ("LA", "PA"): band = 1 else: band = 3 @@ -1601,10 +1685,10 @@ class Image(object): def putpalette(self, data, rawmode="RGB"): """ - Attaches a palette to this image. The image must be a "P" or - "L" image, and the palette sequence must contain 768 integer - values, where each group of three values represent the red, - green, and blue values for the corresponding pixel + Attaches a palette to this image. The image must be a "P", + "PA", "L" or "LA" image, and the palette sequence must contain + 768 integer values, where each group of three values represent + the red, green, and blue values for the corresponding pixel index. Instead of an integer sequence, you can use an 8-bit string. @@ -1613,7 +1697,7 @@ class Image(object): """ from . import ImagePalette - if self.mode not in ("L", "P"): + if self.mode not in ("L", "LA", "P", "PA"): raise ValueError("illegal image mode") self.load() if isinstance(data, ImagePalette.ImagePalette): @@ -1625,7 +1709,7 @@ class Image(object): else: data = "".join(chr(x) for x in data) palette = ImagePalette.raw(rawmode, data) - self.mode = "P" + self.mode = "PA" if "A" in self.mode else "P" self.palette = palette self.palette.mode = "RGB" self.load() # install new palette @@ -1634,7 +1718,8 @@ class Image(object): """ Modifies the pixel at the given position. The color is given as a single numerical value for single-band images, and a tuple for - multi-band images. + multi-band images. In addition to this, RGB and RGBA tuples are + accepted for P images. Note that this method is relatively slow. For more extensive changes, use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw` @@ -1657,6 +1742,14 @@ class Image(object): if self.pyaccess: return self.pyaccess.putpixel(xy, value) + + if ( + self.mode == "P" + and isinstance(value, (list, tuple)) + and len(value) in [3, 4] + ): + # RGB or RGBA value for a P image + value = self.palette.getcolor(value) return self.im.putpixel(xy, value) def remap_palette(self, dest_map, source_palette=None): @@ -1664,7 +1757,7 @@ class Image(object): Rewrites the image to reorder the palette. :param dest_map: A list of indexes into the original palette. - e.g. [1,0] would swap a two item palette, and list(range(255)) + e.g. [1,0] would swap a two item palette, and list(range(256)) is the identity transform. :param source_palette: Bytes or None. :returns: An :py:class:`~PIL.Image.Image` object. @@ -1679,16 +1772,16 @@ class Image(object): if self.mode == "P": real_source_palette = self.im.getpalette("RGB")[:768] else: # L-mode - real_source_palette = bytearray(i//3 for i in range(768)) + real_source_palette = bytearray(i // 3 for i in range(768)) else: real_source_palette = source_palette palette_bytes = b"" - new_positions = [0]*256 + new_positions = [0] * 256 # pick only the used colors from the palette for i, oldPosition in enumerate(dest_map): - palette_bytes += real_source_palette[oldPosition*3:oldPosition*3+3] + palette_bytes += real_source_palette[oldPosition * 3 : oldPosition * 3 + 3] new_positions[oldPosition] = i # replace the palette color id of all pixel with the new id @@ -1712,26 +1805,25 @@ class Image(object): mapping_palette = bytearray(new_positions) m_im = self.copy() - m_im.mode = 'P' + m_im.mode = "P" - m_im.palette = ImagePalette.ImagePalette("RGB", - palette=mapping_palette*3, - size=768) + m_im.palette = ImagePalette.ImagePalette( + "RGB", palette=mapping_palette * 3, size=768 + ) # possibly set palette dirty, then # m_im.putpalette(mapping_palette, 'L') # converts to 'P' # or just force it. # UNDONE -- this is part of the general issue with palettes m_im.im.putpalette(*m_im.palette.getdata()) - m_im = m_im.convert('L') + m_im = m_im.convert("L") # Internally, we require 768 bytes for a palette. - new_palette_bytes = (palette_bytes + - (768 - len(palette_bytes)) * b'\x00') + new_palette_bytes = palette_bytes + (768 - len(palette_bytes)) * b"\x00" m_im.putpalette(new_palette_bytes) - m_im.palette = ImagePalette.ImagePalette("RGB", - palette=palette_bytes, - size=len(palette_bytes)) + m_im.palette = ImagePalette.ImagePalette( + "RGB", palette=palette_bytes, size=len(palette_bytes) + ) return m_im @@ -1755,10 +1847,23 @@ class Image(object): :returns: An :py:class:`~PIL.Image.Image` object. """ - if resample not in ( - NEAREST, BILINEAR, BICUBIC, LANCZOS, BOX, HAMMING, - ): - raise ValueError("unknown resampling filter") + if resample not in (NEAREST, BILINEAR, BICUBIC, LANCZOS, BOX, HAMMING): + message = "Unknown resampling filter ({}).".format(resample) + + filters = [ + "{} ({})".format(filter[1], filter[0]) + for filter in ( + (NEAREST, "Image.NEAREST"), + (LANCZOS, "Image.LANCZOS"), + (BILINEAR, "Image.BILINEAR"), + (BICUBIC, "Image.BICUBIC"), + (BOX, "Image.BOX"), + (HAMMING, "Image.HAMMING"), + ) + ] + raise ValueError( + message + " Use " + ", ".join(filters[:-1]) + " or " + filters[-1] + ) size = tuple(size) @@ -1773,18 +1878,24 @@ class Image(object): if self.mode in ("1", "P"): resample = NEAREST - if self.mode == 'LA': - return self.convert('La').resize(size, resample, box).convert('LA') - - if self.mode == 'RGBA': - return self.convert('RGBa').resize(size, resample, box).convert('RGBA') + if self.mode in ["LA", "RGBA"]: + im = self.convert(self.mode[:-1] + "a") + im = im.resize(size, resample, box) + return im.convert(self.mode) self.load() return self._new(self.im.resize(size, resample, box)) - def rotate(self, angle, resample=NEAREST, expand=0, center=None, - translate=None, fillcolor=None): + def rotate( + self, + angle, + resample=NEAREST, + expand=0, + center=None, + translate=None, + fillcolor=None, + ): """ Returns a rotated copy of this image. This method returns a copy of this image, rotated the given number of degrees counter @@ -1849,22 +1960,28 @@ class Image(object): else: post_trans = translate if center is None: - rotn_center = (w / 2.0, h / 2.0) # FIXME These should be rounded to ints? + # FIXME These should be rounded to ints? + rotn_center = (w / 2.0, h / 2.0) else: rotn_center = center - angle = - math.radians(angle) + angle = -math.radians(angle) matrix = [ - round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0, - round(-math.sin(angle), 15), round(math.cos(angle), 15), 0.0 + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, ] def transform(x, y, matrix): (a, b, c, d, e, f) = matrix - return a*x + b*y + c, d*x + e*y + f + return a * x + b * y + c, d * x + e * y + f - matrix[2], matrix[5] = transform(-rotn_center[0] - post_trans[0], - -rotn_center[1] - post_trans[1], matrix) + matrix[2], matrix[5] = transform( + -rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix + ) matrix[2] += rotn_center[0] matrix[5] += rotn_center[1] @@ -1882,9 +1999,7 @@ class Image(object): # We multiply a translation matrix from the right. Because of its # special form, this is the same as taking the image of the # translation vector as new translation vector. - matrix[2], matrix[5] = transform(-(nw - w) / 2.0, - -(nh - h) / 2.0, - matrix) + matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix) w, h = nw, nh return self.transform((w, h), AFFINE, matrix, resample, fillcolor=fillcolor) @@ -1932,9 +2047,9 @@ class Image(object): filename = fp.name # may mutate self! - self.load() + self._ensure_mutable() - save_all = params.pop('save_all', False) + save_all = params.pop("save_all", False) self.encoderinfo = params self.encoderconfig = () @@ -1948,7 +2063,7 @@ class Image(object): try: format = EXTENSION[ext] except KeyError: - raise ValueError('unknown file extension: {}'.format(ext)) + raise ValueError("unknown file extension: {}".format(ext)) if format.upper() not in SAVE: init() @@ -1958,11 +2073,11 @@ class Image(object): save_handler = SAVE[format.upper()] if open_fp: - if params.get('append', False): - fp = builtins.open(filename, "r+b") - else: + if params.get("append", False): # Open also for reading ("+"), because TIFF save_all # writer needs to go back and edit the written data. + fp = builtins.open(filename, "r+b") + else: fp = builtins.open(filename, "w+b") try: @@ -1979,9 +2094,6 @@ class Image(object): **EOFError** exception. When a sequence file is opened, the library automatically seeks to frame 0. - Note that in the current version of the library, most sequence - formats only allows you to seek to the next frame. - See :py:meth:`~PIL.Image.Image.tell`. :param frame: Frame number, starting at 0. @@ -1998,15 +2110,15 @@ class Image(object): Displays this image. This method is mainly intended for debugging purposes. - On Unix platforms, this method saves the image to a temporary - PPM file, and calls either the **xv** utility or the **display** - utility, depending on which one can be found. + The image is first saved to a temporary file. By default, it will be in + PNG format. - On macOS, this method saves the image to a temporary BMP file, and - opens it with the native Preview application. + On Unix, the image is then opened using the **display**, **eog** or + **xv** utility, depending on which one can be found. - On Windows, it saves the image to a temporary BMP file, and uses - the standard BMP display utility to show it (usually Paint). + On macOS, the image is opened with the native Preview application. + + On Windows, the image is opened with the standard PNG display utility. :param title: Optional title to use for the image window, where possible. @@ -2053,8 +2165,7 @@ class Image(object): try: channel = self.getbands().index(channel) except ValueError: - raise ValueError( - 'The image has no channel "{}"'.format(channel)) + raise ValueError('The image has no channel "{}"'.format(channel)) return self._new(self.im.getband(channel)) @@ -2104,19 +2215,21 @@ class Image(object): self.draft(None, size) - im = self.resize(size, resample) + if self.size != size: + im = self.resize(size, resample) - self.im = im.im - self.mode = im.mode - self._size = size + self.im = im.im + self._size = size + self.mode = self.im.mode self.readonly = 0 self.pyaccess = None # FIXME: the different transform methods need further explanation # instead of bloating the method docs, add a separate chapter. - def transform(self, size, method, data=None, resample=NEAREST, - fill=1, fillcolor=None): + def transform( + self, size, method, data=None, resample=NEAREST, fill=1, fillcolor=None + ): """ Transforms this image. This method creates a new image with the given size, and the same mode as the original, and copies data @@ -2154,18 +2267,24 @@ class Image(object): :param fill: If **method** is an :py:class:`~PIL.Image.ImageTransformHandler` object, this is one of the arguments passed to it. Otherwise, it is unused. - :param fillcolor: Optional fill color for the area outside the transform - in the output image. + :param fillcolor: Optional fill color for the area outside the + transform in the output image. :returns: An :py:class:`~PIL.Image.Image` object. """ - if self.mode == 'LA': - return self.convert('La').transform( - size, method, data, resample, fill, fillcolor).convert('LA') + if self.mode == "LA": + return ( + self.convert("La") + .transform(size, method, data, resample, fill, fillcolor) + .convert("LA") + ) - if self.mode == 'RGBA': - return self.convert('RGBa').transform( - size, method, data, resample, fill, fillcolor).convert('RGBA') + if self.mode == "RGBA": + return ( + self.convert("RGBa") + .transform(size, method, data, resample, fill, fillcolor) + .convert("RGBA") + ) if isinstance(method, ImageTransformHandler): return method.transform(size, self, resample=resample, fill=fill) @@ -2181,16 +2300,15 @@ class Image(object): if method == MESH: # list of quads for box, quad in data: - im.__transformer(box, self, QUAD, quad, resample, - fillcolor is None) + im.__transformer(box, self, QUAD, quad, resample, fillcolor is None) else: - im.__transformer((0, 0)+size, self, method, data, - resample, fillcolor is None) + im.__transformer( + (0, 0) + size, self, method, data, resample, fillcolor is None + ) return im - def __transformer(self, box, image, method, data, - resample=NEAREST, fill=1): + def __transformer(self, box, image, method, data, resample=NEAREST, fill=1): w = box[2] - box[0] h = box[3] - box[1] @@ -2218,16 +2336,41 @@ class Image(object): x0, y0 = nw As = 1.0 / w At = 1.0 / h - data = (x0, (ne[0]-x0)*As, (sw[0]-x0)*At, - (se[0]-sw[0]-ne[0]+x0)*As*At, - y0, (ne[1]-y0)*As, (sw[1]-y0)*At, - (se[1]-sw[1]-ne[1]+y0)*As*At) + data = ( + x0, + (ne[0] - x0) * As, + (sw[0] - x0) * At, + (se[0] - sw[0] - ne[0] + x0) * As * At, + y0, + (ne[1] - y0) * As, + (sw[1] - y0) * At, + (se[1] - sw[1] - ne[1] + y0) * As * At, + ) else: raise ValueError("unknown transformation method") if resample not in (NEAREST, BILINEAR, BICUBIC): - raise ValueError("unknown resampling filter") + if resample in (BOX, HAMMING, LANCZOS): + message = { + BOX: "Image.BOX", + HAMMING: "Image.HAMMING", + LANCZOS: "Image.LANCZOS/Image.ANTIALIAS", + }[resample] + " ({}) cannot be used.".format(resample) + else: + message = "Unknown resampling filter ({}).".format(resample) + + filters = [ + "{} ({})".format(filter[1], filter[0]) + for filter in ( + (NEAREST, "Image.NEAREST"), + (BILINEAR, "Image.BILINEAR"), + (BICUBIC, "Image.BICUBIC"), + ) + ] + raise ValueError( + message + " Use " + ", ".join(filters[:-1]) + " or " + filters[-1] + ) image.load() @@ -2264,6 +2407,7 @@ class Image(object): def toqimage(self): """Returns a QImage copy of this image""" from . import ImageQt + if not ImageQt.qt_is_installed: raise ImportError("Qt bindings are not installed") return ImageQt.toqimage(self) @@ -2271,6 +2415,7 @@ class Image(object): def toqpixmap(self): """Returns a QPixmap copy of this image""" from . import ImageQt + if not ImageQt.qt_is_installed: raise ImportError("Qt bindings are not installed") return ImageQt.toqpixmap(self) @@ -2279,6 +2424,7 @@ class Image(object): # -------------------------------------------------------------------- # Abstract handlers. + class ImagePointHandler(object): # used as a mixin by point transforms (for use with im.point) pass @@ -2295,6 +2441,7 @@ class ImageTransformHandler(object): # # Debugging + def _wedge(): """Create greyscale wedge (for debugging only)""" @@ -2345,9 +2492,17 @@ def new(mode, size, color=0): # css3-style specifier from . import ImageColor + color = ImageColor.getcolor(color, mode) - return Image()._new(core.fill(mode, size, color)) + im = Image() + if mode == "P" and isinstance(color, (list, tuple)) and len(color) in [3, 4]: + # RGB or RGBA value for a P image + from . import ImagePalette + + im.palette = ImagePalette.ImagePalette() + color = im.palette.getcolor(color) + return im._new(core.fill(mode, size, color)) def frombytes(mode, size, data, decoder_name="raw", *args): @@ -2389,8 +2544,9 @@ def frombytes(mode, size, data, decoder_name="raw", *args): def fromstring(*args, **kw): - raise NotImplementedError("fromstring() has been removed. " + - "Please call frombytes() instead.") + raise NotImplementedError( + "fromstring() has been removed. Please call frombytes() instead." + ) def frombuffer(mode, size, data, decoder_name="raw", *args): @@ -2437,17 +2593,16 @@ def frombuffer(mode, size, data, decoder_name="raw", *args): if decoder_name == "raw": if args == (): warnings.warn( - "the frombuffer defaults may change in a future release; " + "the frombuffer defaults will change in Pillow 7.0.0; " "for portability, change the call to read:\n" " frombuffer(mode, size, data, 'raw', mode, 0, 1)", - RuntimeWarning, stacklevel=2 + RuntimeWarning, + stacklevel=2, ) args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6 if args[0] in _MAPMODES: im = new(mode, (1, 1)) - im = im._new( - core.map_buffer(data, size, decoder_name, None, 0, args) - ) + im = im._new(core.map_buffer(data, size, decoder_name, None, 0, args)) im.readonly = 1 return im @@ -2481,12 +2636,12 @@ def fromarray(obj, mode=None): .. versionadded:: 1.1.6 """ arr = obj.__array_interface__ - shape = arr['shape'] + shape = arr["shape"] ndim = len(shape) - strides = arr.get('strides', None) + strides = arr.get("strides", None) if mode is None: try: - typekey = (1, 1) + shape[2:], arr['typestr'] + typekey = (1, 1) + shape[2:], arr["typestr"] mode, rawmode = _fromarray_typemap[typekey] except KeyError: raise TypeError("Cannot handle this data type") @@ -2503,7 +2658,7 @@ def fromarray(obj, mode=None): size = shape[1], shape[0] if strides is not None: - if hasattr(obj, 'tobytes'): + if hasattr(obj, "tobytes"): obj = obj.tobytes() else: obj = obj.tostring() @@ -2514,6 +2669,7 @@ def fromarray(obj, mode=None): def fromqimage(im): """Creates an image instance from a QImage image""" from . import ImageQt + if not ImageQt.qt_is_installed: raise ImportError("Qt bindings are not installed") return ImageQt.fromqimage(im) @@ -2522,6 +2678,7 @@ def fromqimage(im): def fromqpixmap(im): """Creates an image instance from a QPixmap image""" from . import ImageQt + if not ImageQt.qt_is_installed: raise ImportError("Qt bindings are not installed") return ImageQt.fromqpixmap(im) @@ -2548,7 +2705,7 @@ _fromarray_typemap = { ((1, 1, 2), "|u1"): ("LA", "LA"), ((1, 1, 3), "|u1"): ("RGB", "RGB"), ((1, 1, 4), "|u1"): ("RGBA", "RGBA"), - } +} # shortcuts _fromarray_typemap[((1, 1), _ENDIAN + "i4")] = ("I", "I") @@ -2564,15 +2721,15 @@ def _decompression_bomb_check(size): if pixels > 2 * MAX_IMAGE_PIXELS: raise DecompressionBombError( "Image size (%d pixels) exceeds limit of %d pixels, " - "could be decompression bomb DOS attack." % - (pixels, 2 * MAX_IMAGE_PIXELS)) + "could be decompression bomb DOS attack." % (pixels, 2 * MAX_IMAGE_PIXELS) + ) if pixels > MAX_IMAGE_PIXELS: warnings.warn( "Image size (%d pixels) exceeds limit of %d pixels, " - "could be decompression bomb DOS attack." % - (pixels, MAX_IMAGE_PIXELS), - DecompressionBombWarning) + "could be decompression bomb DOS attack." % (pixels, MAX_IMAGE_PIXELS), + DecompressionBombWarning, + ) def open(fp, mode="r"): @@ -2600,10 +2757,10 @@ def open(fp, mode="r"): exclusive_fp = False filename = "" - if isPath(fp): - filename = fp - elif HAS_PATHLIB and isinstance(fp, Path): + if HAS_PATHLIB and isinstance(fp, Path): filename = str(fp.resolve()) + elif isPath(fp): + filename = fp if filename: fp = builtins.open(filename, "rb") @@ -2620,6 +2777,7 @@ def open(fp, mode="r"): preinit() accept_warnings = [] + def _open_core(fp, filename, prefix): for i in ID: try: @@ -2637,6 +2795,10 @@ def open(fp, mode="r"): # opening failures that are entirely expected. # logger.debug("", exc_info=True) continue + except BaseException: + if exclusive_fp: + fp.close() + raise return None im = _open_core(fp, filename, prefix) @@ -2653,8 +2815,8 @@ def open(fp, mode="r"): fp.close() for message in accept_warnings: warnings.warn(message) - raise IOError("cannot identify image file %r" - % (filename if filename else fp)) + raise IOError("cannot identify image file %r" % (filename if filename else fp)) + # # Image processing. @@ -2758,6 +2920,7 @@ def merge(mode, bands): # -------------------------------------------------------------------- # Plugin registry + def register_open(id, factory, accept=None): """ Register an image file plugin. This function should not be used @@ -2871,6 +3034,7 @@ def register_encoder(name, encoder): # -------------------------------------------------------------------- # Simple display support. User code may override this. + def _show(image, **options): # override me, as necessary _showxv(image, **options) @@ -2878,12 +3042,14 @@ def _show(image, **options): def _showxv(image, title=None, **options): from . import ImageShow + ImageShow.show(image, title, **options) # -------------------------------------------------------------------- # Effects + def effect_mandelbrot(size, extent, quality): """ Generate a Mandelbrot set covering the given extent. @@ -2929,14 +3095,15 @@ def radial_gradient(mode): # -------------------------------------------------------------------- # Resources + def _apply_env_variables(env=None): if env is None: env = os.environ for var_name, setter in [ - ('PILLOW_ALIGNMENT', core.set_alignment), - ('PILLOW_BLOCK_SIZE', core.set_block_size), - ('PILLOW_BLOCKS_MAX', core.set_blocks_max), + ("PILLOW_ALIGNMENT", core.set_alignment), + ("PILLOW_BLOCK_SIZE", core.set_block_size), + ("PILLOW_BLOCKS_MAX", core.set_blocks_max), ]: if var_name not in env: continue @@ -2944,22 +3111,233 @@ def _apply_env_variables(env=None): var = env[var_name].lower() units = 1 - for postfix, mul in [('k', 1024), ('m', 1024*1024)]: + for postfix, mul in [("k", 1024), ("m", 1024 * 1024)]: if var.endswith(postfix): units = mul - var = var[:-len(postfix)] + var = var[: -len(postfix)] try: var = int(var) * units except ValueError: - warnings.warn("{0} is not int".format(var_name)) + warnings.warn("{} is not int".format(var_name)) continue try: setter(var) except ValueError as e: - warnings.warn("{0}: {1}".format(var_name, e)) + warnings.warn("{}: {}".format(var_name, e)) _apply_env_variables() atexit.register(core.clear_cache) + + +class Exif(MutableMapping): + endian = "<" + + def __init__(self): + self._data = {} + self._ifds = {} + self._info = None + self._loaded_exif = None + + def _fixup(self, value): + try: + if len(value) == 1 and not isinstance(value, dict): + return value[0] + except Exception: + pass + return value + + def _fixup_dict(self, src_dict): + # Helper function for _getexif() + # returns a dict with any single item tuples/lists as individual values + return {k: self._fixup(v) for k, v in src_dict.items()} + + def _get_ifd_dict(self, tag): + try: + # an offset pointer to the location of the nested embedded IFD. + # It should be a long, but may be corrupted. + self.fp.seek(self[tag]) + except (KeyError, TypeError): + pass + else: + from . import TiffImagePlugin + + info = TiffImagePlugin.ImageFileDirectory_v1(self.head) + info.load(self.fp) + return self._fixup_dict(info) + + def load(self, data): + # Extract EXIF information. This is highly experimental, + # and is likely to be replaced with something better in a future + # version. + + # The EXIF record consists of a TIFF file embedded in a JPEG + # application marker (!). + if data == self._loaded_exif: + return + self._loaded_exif = data + self._data.clear() + self._ifds.clear() + self._info = None + if not data: + return + + self.fp = io.BytesIO(data[6:]) + self.head = self.fp.read(8) + # process dictionary + from . import TiffImagePlugin + + self._info = TiffImagePlugin.ImageFileDirectory_v1(self.head) + self.endian = self._info._endian + self.fp.seek(self._info.next) + self._info.load(self.fp) + + # get EXIF extension + ifd = self._get_ifd_dict(0x8769) + if ifd: + self._data.update(ifd) + self._ifds[0x8769] = ifd + + def tobytes(self, offset=0): + from . import TiffImagePlugin + + if self.endian == "<": + head = b"II\x2A\x00\x08\x00\x00\x00" + else: + head = b"MM\x00\x2A\x00\x00\x00\x08" + ifd = TiffImagePlugin.ImageFileDirectory_v2(ifh=head) + for tag, value in self.items(): + ifd[tag] = value + return b"Exif\x00\x00" + head + ifd.tobytes(offset) + + def get_ifd(self, tag): + if tag not in self._ifds and tag in self: + if tag in [0x8825, 0xA005]: + # gpsinfo, interop + self._ifds[tag] = self._get_ifd_dict(tag) + elif tag == 0x927C: # makernote + from .TiffImagePlugin import ImageFileDirectory_v2 + + if self[0x927C][:8] == b"FUJIFILM": + exif_data = self[0x927C] + ifd_offset = i32le(exif_data[8:12]) + ifd_data = exif_data[ifd_offset:] + + makernote = {} + for i in range(0, struct.unpack(" 4: + offset, = struct.unpack("H", ifd_data[:2])[0]): + ifd_tag, typ, count, data = struct.unpack( + ">HHL4s", ifd_data[i * 12 + 2 : (i + 1) * 12 + 2] + ) + if ifd_tag == 0x1101: + # CameraInfo + offset, = struct.unpack(">L", data) + self.fp.seek(offset) + + camerainfo = {"ModelID": self.fp.read(4)} + + self.fp.read(4) + # Seconds since 2000 + camerainfo["TimeStamp"] = i32le(self.fp.read(12)) + + self.fp.read(4) + camerainfo["InternalSerialNumber"] = self.fp.read(4) + + self.fp.read(12) + parallax = self.fp.read(4) + handler = ImageFileDirectory_v2._load_dispatch[ + TiffTags.FLOAT + ][1] + camerainfo["Parallax"] = handler( + ImageFileDirectory_v2(), parallax, False + ) + + self.fp.read(4) + camerainfo["Category"] = self.fp.read(2) + + makernote = {0x1101: dict(self._fixup_dict(camerainfo))} + self._ifds[0x927C] = makernote + return self._ifds.get(tag, {}) + + def __str__(self): + if self._info is not None: + # Load all keys into self._data + for tag in self._info.keys(): + self[tag] + + return str(self._data) + + def __len__(self): + keys = set(self._data) + if self._info is not None: + keys.update(self._info) + return len(keys) + + def __getitem__(self, tag): + if self._info is not None and tag not in self._data and tag in self._info: + self._data[tag] = self._fixup(self._info[tag]) + if tag == 0x8825: + self._data[tag] = self.get_ifd(tag) + del self._info[tag] + return self._data[tag] + + def __contains__(self, tag): + return tag in self._data or (self._info is not None and tag in self._info) + + if not py3: + + def has_key(self, tag): + return tag in self + + def __setitem__(self, tag, value): + if self._info is not None and tag in self._info: + del self._info[tag] + self._data[tag] = value + + def __delitem__(self, tag): + if self._info is not None and tag in self._info: + del self._info[tag] + del self._data[tag] + + def __iter__(self): + keys = set(self._data) + if self._info is not None: + keys.update(self._info) + return iter(keys) diff --git a/server/www/packages/packages-linux/x64/PIL/ImageChops.py b/server/www/packages/packages-linux/x64/PIL/ImageChops.py index 8901673..b1f71b5 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageChops.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageChops.py @@ -54,7 +54,7 @@ def invert(image): def lighter(image1, image2): """ Compares the two images, pixel by pixel, and returns a new image containing - the lighter values. + the lighter values. At least one of the images must have mode "1". .. code-block:: python @@ -70,8 +70,8 @@ def lighter(image1, image2): def darker(image1, image2): """ - Compares the two images, pixel by pixel, and returns a new image - containing the darker values. + Compares the two images, pixel by pixel, and returns a new image containing + the darker values. At least one of the images must have mode "1". .. code-block:: python @@ -88,7 +88,7 @@ def darker(image1, image2): def difference(image1, image2): """ Returns the absolute value of the pixel-by-pixel difference between the two - images. + images. At least one of the images must have mode "1". .. code-block:: python @@ -107,7 +107,8 @@ def multiply(image1, image2): Superimposes two images on top of each other. If you multiply an image with a solid black image, the result is black. If - you multiply with a solid white image, the image is unaffected. + you multiply with a solid white image, the image is unaffected. At least + one of the images must have mode "1". .. code-block:: python @@ -123,7 +124,8 @@ def multiply(image1, image2): def screen(image1, image2): """ - Superimposes two inverted images on top of each other. + Superimposes two inverted images on top of each other. At least one of the + images must have mode "1". .. code-block:: python @@ -141,6 +143,7 @@ def add(image1, image2, scale=1.0, offset=0): """ Adds two images, dividing the result by scale and adding the offset. If omitted, scale defaults to 1.0, and offset to 0.0. + At least one of the images must have mode "1". .. code-block:: python @@ -156,8 +159,9 @@ def add(image1, image2, scale=1.0, offset=0): def subtract(image1, image2, scale=1.0, offset=0): """ - Subtracts two images, dividing the result by scale and adding the - offset. If omitted, scale defaults to 1.0, and offset to 0.0. + Subtracts two images, dividing the result by scale and adding the offset. + If omitted, scale defaults to 1.0, and offset to 0.0. At least one of the + images must have mode "1". .. code-block:: python @@ -172,7 +176,8 @@ def subtract(image1, image2, scale=1.0, offset=0): def add_modulo(image1, image2): - """Add two images, without clipping the result. + """Add two images, without clipping the result. At least one of the images + must have mode "1". .. code-block:: python @@ -187,7 +192,8 @@ def add_modulo(image1, image2): def subtract_modulo(image1, image2): - """Subtract two images, without clipping the result. + """Subtract two images, without clipping the result. At least one of the + images must have mode "1". .. code-block:: python @@ -202,7 +208,8 @@ def subtract_modulo(image1, image2): def logical_and(image1, image2): - """Logical AND between two images. + """Logical AND between two images. At least one of the images must have + mode "1". .. code-block:: python @@ -217,7 +224,8 @@ def logical_and(image1, image2): def logical_or(image1, image2): - """Logical OR between two images. + """Logical OR between two images. At least one of the images must have + mode "1". .. code-block:: python @@ -232,7 +240,8 @@ def logical_or(image1, image2): def logical_xor(image1, image2): - """Logical XOR between two images. + """Logical XOR between two images. At least one of the images must have + mode "1". .. code-block:: python diff --git a/server/www/packages/packages-linux/x64/PIL/ImageCms.py b/server/www/packages/packages-linux/x64/PIL/ImageCms.py index 4b6281f..ed4eefc 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageCms.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageCms.py @@ -16,17 +16,20 @@ # below for the original description. from __future__ import print_function + import sys from PIL import Image +from PIL._util import isStringType + try: from PIL import _imagingcms except ImportError as ex: # Allow error import for doc purposes, but error out when accessing # anything in core. - from _util import deferred_error + from ._util import deferred_error + _imagingcms = deferred_error(ex) -from PIL._util import isStringType DESCRIPTION = """ pyCMS @@ -132,7 +135,7 @@ FLAGS = { "SOFTPROOFING": 16384, # Do softproofing "PRESERVEBLACK": 32768, # Black preservation "NODEFAULTRESOURCEDEF": 16777216, # CRD special - "GRIDPOINTS": lambda n: ((n) & 0xFF) << 16 # Gridpoints + "GRIDPOINTS": lambda n: ((n) & 0xFF) << 16, # Gridpoints } _MAX_FLAG = 0 @@ -148,8 +151,8 @@ for flag in FLAGS.values(): ## # Profile. -class ImageCmsProfile(object): +class ImageCmsProfile(object): def __init__(self, profile): """ :param profile: Either a string representing a filename, @@ -197,22 +200,31 @@ class ImageCmsTransform(Image.ImagePointHandler): Will return the output profile in the output.info['icc_profile']. """ - def __init__(self, input, output, input_mode, output_mode, - intent=INTENT_PERCEPTUAL, proof=None, - proof_intent=INTENT_ABSOLUTE_COLORIMETRIC, flags=0): + def __init__( + self, + input, + output, + input_mode, + output_mode, + intent=INTENT_PERCEPTUAL, + proof=None, + proof_intent=INTENT_ABSOLUTE_COLORIMETRIC, + flags=0, + ): if proof is None: self.transform = core.buildTransform( - input.profile, output.profile, - input_mode, output_mode, - intent, - flags + input.profile, output.profile, input_mode, output_mode, intent, flags ) else: self.transform = core.buildProofTransform( - input.profile, output.profile, proof.profile, - input_mode, output_mode, - intent, proof_intent, - flags + input.profile, + output.profile, + proof.profile, + input_mode, + output_mode, + intent, + proof_intent, + flags, ) # Note: inputMode and outputMode are for pyCMS compatibility only self.input_mode = self.inputMode = input_mode @@ -228,7 +240,7 @@ class ImageCmsTransform(Image.ImagePointHandler): if imOut is None: imOut = Image.new(self.output_mode, im.size, None) self.transform.apply(im.im.id, imOut.im.id) - imOut.info['icc_profile'] = self.output_profile.tobytes() + imOut.info["icc_profile"] = self.output_profile.tobytes() return imOut def apply_in_place(self, im): @@ -236,7 +248,7 @@ class ImageCmsTransform(Image.ImagePointHandler): if im.mode != self.output_mode: raise ValueError("mode mismatch") # wrong output mode self.transform.apply(im.im.id, im.im.id) - im.info['icc_profile'] = self.output_profile.tobytes() + im.info["icc_profile"] = self.output_profile.tobytes() return im @@ -247,6 +259,7 @@ def get_display_profile(handle=None): if sys.platform == "win32": from PIL import ImageWin + if isinstance(handle, ImageWin.HDC): profile = core.get_display_profile_win32(handle, 1) else: @@ -265,22 +278,30 @@ def get_display_profile(handle=None): # pyCMS compatible layer # --------------------------------------------------------------------. + class PyCMSError(Exception): """ (pyCMS) Exception class. This is used for all errors in the pyCMS API. """ + pass def profileToProfile( - im, inputProfile, outputProfile, renderingIntent=INTENT_PERCEPTUAL, - outputMode=None, inPlace=0, flags=0): + im, + inputProfile, + outputProfile, + renderingIntent=INTENT_PERCEPTUAL, + outputMode=None, + inPlace=False, + flags=0, +): """ (pyCMS) Applies an ICC transformation to a given image, mapping from inputProfile to outputProfile. If the input or output profiles specified are not valid filenames, a - PyCMSError will be raised. If inPlace == TRUE and outputMode != im.mode, + PyCMSError will be raised. If inPlace is True and outputMode != im.mode, a PyCMSError will be raised. If an error occurs during application of the profiles, a PyCMSError will be raised. If outputMode is not a mode supported by the outputProfile (or by pyCMS), a PyCMSError will be @@ -317,9 +338,9 @@ def profileToProfile( MUST be the same mode as the input, or omitted completely. If omitted, the outputMode will be the same as the mode of the input image (im.mode) - :param inPlace: Boolean (1 = True, None or 0 = False). If True, the - original image is modified in-place, and None is returned. If False - (default), a new Image object is returned with the transform applied. + :param inPlace: Boolean. If True, the original image is modified in-place, + and None is returned. If False (default), a new Image object is + returned with the transform applied. :param flags: Integer (0-...) specifying additional flags :returns: Either None or a new PIL image object, depending on value of inPlace @@ -333,8 +354,7 @@ def profileToProfile( raise PyCMSError("renderingIntent must be an integer between 0 and 3") if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG): - raise PyCMSError( - "flags must be an integer between 0 and %s" + _MAX_FLAG) + raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG) try: if not isinstance(inputProfile, ImageCmsProfile): @@ -342,8 +362,12 @@ def profileToProfile( if not isinstance(outputProfile, ImageCmsProfile): outputProfile = ImageCmsProfile(outputProfile) transform = ImageCmsTransform( - inputProfile, outputProfile, im.mode, outputMode, - renderingIntent, flags=flags + inputProfile, + outputProfile, + im.mode, + outputMode, + renderingIntent, + flags=flags, ) if inPlace: transform.apply_in_place(im) @@ -379,8 +403,13 @@ def getOpenProfile(profileFilename): def buildTransform( - inputProfile, outputProfile, inMode, outMode, - renderingIntent=INTENT_PERCEPTUAL, flags=0): + inputProfile, + outputProfile, + inMode, + outMode, + renderingIntent=INTENT_PERCEPTUAL, + flags=0, +): """ (pyCMS) Builds an ICC transform mapping from the inputProfile to the outputProfile. Use applyTransform to apply the transform to a given @@ -440,8 +469,7 @@ def buildTransform( raise PyCMSError("renderingIntent must be an integer between 0 and 3") if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG): - raise PyCMSError( - "flags must be an integer between 0 and %s" + _MAX_FLAG) + raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG) try: if not isinstance(inputProfile, ImageCmsProfile): @@ -449,17 +477,22 @@ def buildTransform( if not isinstance(outputProfile, ImageCmsProfile): outputProfile = ImageCmsProfile(outputProfile) return ImageCmsTransform( - inputProfile, outputProfile, inMode, outMode, - renderingIntent, flags=flags) + inputProfile, outputProfile, inMode, outMode, renderingIntent, flags=flags + ) except (IOError, TypeError, ValueError) as v: raise PyCMSError(v) def buildProofTransform( - inputProfile, outputProfile, proofProfile, inMode, outMode, - renderingIntent=INTENT_PERCEPTUAL, - proofRenderingIntent=INTENT_ABSOLUTE_COLORIMETRIC, - flags=FLAGS["SOFTPROOFING"]): + inputProfile, + outputProfile, + proofProfile, + inMode, + outMode, + renderingIntent=INTENT_PERCEPTUAL, + proofRenderingIntent=INTENT_ABSOLUTE_COLORIMETRIC, + flags=FLAGS["SOFTPROOFING"], +): """ (pyCMS) Builds an ICC transform mapping from the inputProfile to the outputProfile, but tries to simulate the result that would be @@ -538,8 +571,7 @@ def buildProofTransform( raise PyCMSError("renderingIntent must be an integer between 0 and 3") if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG): - raise PyCMSError( - "flags must be an integer between 0 and %s" + _MAX_FLAG) + raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG) try: if not isinstance(inputProfile, ImageCmsProfile): @@ -549,8 +581,15 @@ def buildProofTransform( if not isinstance(proofProfile, ImageCmsProfile): proofProfile = ImageCmsProfile(proofProfile) return ImageCmsTransform( - inputProfile, outputProfile, inMode, outMode, renderingIntent, - proofProfile, proofRenderingIntent, flags) + inputProfile, + outputProfile, + inMode, + outMode, + renderingIntent, + proofProfile, + proofRenderingIntent, + flags, + ) except (IOError, TypeError, ValueError) as v: raise PyCMSError(v) @@ -559,16 +598,16 @@ buildTransformFromOpenProfiles = buildTransform buildProofTransformFromOpenProfiles = buildProofTransform -def applyTransform(im, transform, inPlace=0): +def applyTransform(im, transform, inPlace=False): """ (pyCMS) Applies a transform to a given image. If im.mode != transform.inMode, a PyCMSError is raised. - If inPlace == TRUE and transform.inMode != transform.outMode, a + If inPlace is True and transform.inMode != transform.outMode, a PyCMSError is raised. - If im.mode, transfer.inMode, or transfer.outMode is not supported by + If im.mode, transform.inMode, or transform.outMode is not supported by pyCMSdll or the profiles you used for the transform, a PyCMSError is raised. @@ -581,7 +620,7 @@ def applyTransform(im, transform, inPlace=0): considerable calculation time if doing the same conversion multiple times. If you want to modify im in-place instead of receiving a new image as - the return value, set inPlace to TRUE. This can only be done if + the return value, set inPlace to True. This can only be done if transform.inMode and transform.outMode are the same, because we can't change the mode in-place (the buffer sizes for some modes are different). The default behavior is to return a new Image object of @@ -590,10 +629,9 @@ def applyTransform(im, transform, inPlace=0): :param im: A PIL Image object, and im.mode must be the same as the inMode supported by the transform. :param transform: A valid CmsTransform class object - :param inPlace: Bool (1 == True, 0 or None == False). If True, im is - modified in place and None is returned, if False, a new Image object - with the transform applied is returned (and im is not changed). The - default is False. + :param inPlace: Bool. If True, im is modified in place and None is + returned, if False, a new Image object with the transform applied is + returned (and im is not changed). The default is False. :returns: Either None, or a new PIL Image object, depending on the value of inPlace. The profile will be returned in the image's info['icc_profile']. @@ -642,15 +680,16 @@ def createProfile(colorSpace, colorTemp=-1): if colorSpace not in ["LAB", "XYZ", "sRGB"]: raise PyCMSError( "Color space not supported for on-the-fly profile creation (%s)" - % colorSpace) + % colorSpace + ) if colorSpace == "LAB": try: colorTemp = float(colorTemp) - except: + except (TypeError, ValueError): raise PyCMSError( - "Color temperature must be numeric, \"%s\" not valid" - % colorTemp) + 'Color temperature must be numeric, "%s" not valid' % colorTemp + ) try: return core.createProfile(colorSpace, colorTemp) @@ -687,11 +726,11 @@ def getProfileName(profile): # // name was "%s - %s" (model, manufacturer) || Description , # // but if the Model and Manufacturer were the same or the model # // was long, Just the model, in 1.x - model = profile.profile.product_model - manufacturer = profile.profile.product_manufacturer + model = profile.profile.model + manufacturer = profile.profile.manufacturer if not (model or manufacturer): - return profile.profile.product_description + "\n" + return (profile.profile.profile_description or "") + "\n" if not manufacturer or len(model) > 30: return model + "\n" return "%s - %s\n" % (model, manufacturer) @@ -727,9 +766,9 @@ def getProfileInfo(profile): # add an extra newline to preserve pyCMS compatibility # Python, not C. the white point bits weren't working well, # so skipping. - # // info was description \r\n\r\n copyright \r\n\r\n K007 tag \r\n\r\n whitepoint - description = profile.profile.product_description - cpright = profile.profile.product_copyright + # info was description \r\n\r\n copyright \r\n\r\n K007 tag \r\n\r\n whitepoint + description = profile.profile.profile_description + cpright = profile.profile.copyright arr = [] for elt in (description, cpright): if elt: @@ -763,7 +802,7 @@ def getProfileCopyright(profile): # add an extra newline to preserve pyCMS compatibility if not isinstance(profile, ImageCmsProfile): profile = ImageCmsProfile(profile) - return profile.profile.product_copyright + "\n" + return (profile.profile.copyright or "") + "\n" except (AttributeError, IOError, TypeError, ValueError) as v: raise PyCMSError(v) @@ -791,7 +830,7 @@ def getProfileManufacturer(profile): # add an extra newline to preserve pyCMS compatibility if not isinstance(profile, ImageCmsProfile): profile = ImageCmsProfile(profile) - return profile.profile.product_manufacturer + "\n" + return (profile.profile.manufacturer or "") + "\n" except (AttributeError, IOError, TypeError, ValueError) as v: raise PyCMSError(v) @@ -820,7 +859,7 @@ def getProfileModel(profile): # add an extra newline to preserve pyCMS compatibility if not isinstance(profile, ImageCmsProfile): profile = ImageCmsProfile(profile) - return profile.profile.product_model + "\n" + return (profile.profile.model or "") + "\n" except (AttributeError, IOError, TypeError, ValueError) as v: raise PyCMSError(v) @@ -849,7 +888,7 @@ def getProfileDescription(profile): # add an extra newline to preserve pyCMS compatibility if not isinstance(profile, ImageCmsProfile): profile = ImageCmsProfile(profile) - return profile.profile.product_description + "\n" + return (profile.profile.profile_description or "") + "\n" except (AttributeError, IOError, TypeError, ValueError) as v: raise PyCMSError(v) @@ -949,7 +988,4 @@ def versions(): (pyCMS) Fetches versions. """ - return ( - VERSION, core.littlecms_version, - sys.version.split()[0], Image.VERSION - ) + return (VERSION, core.littlecms_version, sys.version.split()[0], Image.__version__) diff --git a/server/www/packages/packages-linux/x64/PIL/ImageColor.py b/server/www/packages/packages-linux/x64/PIL/ImageColor.py index 08c00fd..692d7d2 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageColor.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageColor.py @@ -17,9 +17,10 @@ # See the README file for information on usage and redistribution. # -from . import Image import re +from . import Image + def getrgb(color): """ @@ -41,89 +42,77 @@ def getrgb(color): return rgb # check for known string formats - if re.match('#[a-f0-9]{3}$', color): - return ( - int(color[1]*2, 16), - int(color[2]*2, 16), - int(color[3]*2, 16), - ) + if re.match("#[a-f0-9]{3}$", color): + return (int(color[1] * 2, 16), int(color[2] * 2, 16), int(color[3] * 2, 16)) - if re.match('#[a-f0-9]{4}$', color): + if re.match("#[a-f0-9]{4}$", color): return ( - int(color[1]*2, 16), - int(color[2]*2, 16), - int(color[3]*2, 16), - int(color[4]*2, 16), - ) + int(color[1] * 2, 16), + int(color[2] * 2, 16), + int(color[3] * 2, 16), + int(color[4] * 2, 16), + ) - if re.match('#[a-f0-9]{6}$', color): - return ( - int(color[1:3], 16), - int(color[3:5], 16), - int(color[5:7], 16), - ) + if re.match("#[a-f0-9]{6}$", color): + return (int(color[1:3], 16), int(color[3:5], 16), int(color[5:7], 16)) - if re.match('#[a-f0-9]{8}$', color): + if re.match("#[a-f0-9]{8}$", color): return ( int(color[1:3], 16), int(color[3:5], 16), int(color[5:7], 16), int(color[7:9], 16), - ) + ) m = re.match(r"rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color) if m: - return ( - int(m.group(1)), - int(m.group(2)), - int(m.group(3)) - ) + return (int(m.group(1)), int(m.group(2)), int(m.group(3))) m = re.match(r"rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color) if m: return ( int((int(m.group(1)) * 255) / 100.0 + 0.5), int((int(m.group(2)) * 255) / 100.0 + 0.5), - int((int(m.group(3)) * 255) / 100.0 + 0.5) - ) + int((int(m.group(3)) * 255) / 100.0 + 0.5), + ) - m = re.match(r"hsl\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color) + m = re.match( + r"hsl\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color + ) if m: from colorsys import hls_to_rgb + rgb = hls_to_rgb( float(m.group(1)) / 360.0, float(m.group(3)) / 100.0, float(m.group(2)) / 100.0, - ) + ) return ( int(rgb[0] * 255 + 0.5), int(rgb[1] * 255 + 0.5), - int(rgb[2] * 255 + 0.5) - ) + int(rgb[2] * 255 + 0.5), + ) - m = re.match(r"hs[bv]\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color) + m = re.match( + r"hs[bv]\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color + ) if m: from colorsys import hsv_to_rgb + rgb = hsv_to_rgb( float(m.group(1)) / 360.0, float(m.group(2)) / 100.0, float(m.group(3)) / 100.0, - ) + ) return ( int(rgb[0] * 255 + 0.5), int(rgb[1] * 255 + 0.5), - int(rgb[2] * 255 + 0.5) - ) + int(rgb[2] * 255 + 0.5), + ) - m = re.match(r"rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", - color) + m = re.match(r"rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color) if m: - return ( - int(m.group(1)), - int(m.group(2)), - int(m.group(3)), - int(m.group(4)) - ) + return (int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))) raise ValueError("unknown color specifier: %r" % color) @@ -145,11 +134,11 @@ def getcolor(color, mode): if Image.getmodebase(mode) == "L": r, g, b = color - color = (r*299 + g*587 + b*114)//1000 - if mode[-1] == 'A': + color = (r * 299 + g * 587 + b * 114) // 1000 + if mode[-1] == "A": return (color, alpha) else: - if mode[-1] == 'A': + if mode[-1] == "A": return color + (alpha,) return color diff --git a/server/www/packages/packages-linux/x64/PIL/ImageDraw.py b/server/www/packages/packages-linux/x64/PIL/ImageDraw.py index 6a70def..ed3383f 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageDraw.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageDraw.py @@ -36,6 +36,7 @@ import numbers from . import Image, ImageColor from ._util import isStringType + """ A simple 2D drawing interface for PIL images.

@@ -45,7 +46,6 @@ directly. class ImageDraw(object): - def __init__(self, im, mode=None): """ Create a drawing instance. @@ -76,9 +76,9 @@ class ImageDraw(object): self.draw = Image.core.draw(self.im, blend) self.mode = mode if mode in ("I", "F"): - self.ink = self.draw.draw_ink(1, mode) + self.ink = self.draw.draw_ink(1) else: - self.ink = self.draw.draw_ink(-1, mode) + self.ink = self.draw.draw_ink(-1) if mode in ("1", "P", "I", "F"): # FIXME: fix Fill2 to properly support matte for I+F images self.fontmode = "1" @@ -95,6 +95,7 @@ class ImageDraw(object): if not self.font: # FIXME: should add a font repository from . import ImageFont + self.font = ImageFont.load_default() return self.font @@ -110,13 +111,13 @@ class ImageDraw(object): ink = ImageColor.getcolor(ink, self.mode) if self.palette and not isinstance(ink, numbers.Number): ink = self.palette.getcolor(ink) - ink = self.draw.draw_ink(ink, self.mode) + ink = self.draw.draw_ink(ink) if fill is not None: if isStringType(fill): fill = ImageColor.getcolor(fill, self.mode) if self.palette and not isinstance(fill, numbers.Number): fill = self.palette.getcolor(fill) - fill = self.draw.draw_ink(fill, self.mode) + fill = self.draw.draw_ink(fill) return ink, fill def arc(self, xy, start, end, fill=None, width=0): @@ -156,13 +157,12 @@ class ImageDraw(object): if ink is not None: self.draw.draw_lines(xy, ink, width) if joint == "curve" and width > 4: - for i in range(1, len(xy)-1): + for i in range(1, len(xy) - 1): point = xy[i] angles = [ - math.degrees(math.atan2( - end[0] - start[0], start[1] - end[1] - )) % 360 - for start, end in ((xy[i-1], point), (point, xy[i+1])) + math.degrees(math.atan2(end[0] - start[0], start[1] - end[1])) + % 360 + for start, end in ((xy[i - 1], point), (point, xy[i + 1])) ] if angles[0] == angles[1]: # This is a straight line, so no joint is required @@ -171,21 +171,23 @@ class ImageDraw(object): def coord_at_angle(coord, angle): x, y = coord angle -= 90 - distance = width/2 - 1 - return tuple([ - p + - (math.floor(p_d) if p_d > 0 else math.ceil(p_d)) - for p, p_d in - ((x, distance * math.cos(math.radians(angle))), - (y, distance * math.sin(math.radians(angle)))) - ]) - flipped = ((angles[1] > angles[0] and - angles[1] - 180 > angles[0]) or - (angles[1] < angles[0] and - angles[1] + 180 > angles[0])) + distance = width / 2 - 1 + return tuple( + [ + p + (math.floor(p_d) if p_d > 0 else math.ceil(p_d)) + for p, p_d in ( + (x, distance * math.cos(math.radians(angle))), + (y, distance * math.sin(math.radians(angle))), + ) + ] + ) + + flipped = ( + angles[1] > angles[0] and angles[1] - 180 > angles[0] + ) or (angles[1] < angles[0] and angles[1] + 180 > angles[0]) coords = [ - (point[0] - width/2 + 1, point[1] - width/2 + 1), - (point[0] + width/2 - 1, point[1] + width/2 - 1) + (point[0] - width / 2 + 1, point[1] - width / 2 + 1), + (point[0] + width / 2 - 1, point[1] + width / 2 - 1), ] if flipped: start, end = (angles[1] + 90, angles[0] + 90) @@ -197,15 +199,15 @@ class ImageDraw(object): # Cover potential gaps between the line and the joint if flipped: gapCoords = [ - coord_at_angle(point, angles[0]+90), + coord_at_angle(point, angles[0] + 90), point, - coord_at_angle(point, angles[1]+90) + coord_at_angle(point, angles[1] + 90), ] else: gapCoords = [ - coord_at_angle(point, angles[0]-90), + coord_at_angle(point, angles[0] - 90), point, - coord_at_angle(point, angles[1]-90) + coord_at_angle(point, angles[1] - 90), ] self.line(gapCoords, fill, width=3) @@ -259,36 +261,126 @@ class ImageDraw(object): return text.split(split_character) - def text(self, xy, text, fill=None, font=None, anchor=None, - *args, **kwargs): + def text( + self, + xy, + text, + fill=None, + font=None, + anchor=None, + spacing=4, + align="left", + direction=None, + features=None, + language=None, + stroke_width=0, + stroke_fill=None, + *args, + **kwargs + ): if self._multiline_check(text): - return self.multiline_text(xy, text, fill, font, anchor, - *args, **kwargs) - ink, fill = self._getink(fill) + return self.multiline_text( + xy, + text, + fill, + font, + anchor, + spacing, + align, + direction, + features, + language, + stroke_width, + stroke_fill, + ) + if font is None: font = self.getfont() - if ink is None: - ink = fill - if ink is not None: + + def getink(fill): + ink, fill = self._getink(fill) + if ink is None: + return fill + return ink + + def draw_text(ink, stroke_width=0, stroke_offset=None): + coord = xy try: - mask, offset = font.getmask2(text, self.fontmode, - *args, **kwargs) - xy = xy[0] + offset[0], xy[1] + offset[1] + mask, offset = font.getmask2( + text, + self.fontmode, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + *args, + **kwargs + ) + coord = coord[0] + offset[0], coord[1] + offset[1] except AttributeError: try: - mask = font.getmask(text, self.fontmode, *args, **kwargs) + mask = font.getmask( + text, + self.fontmode, + direction, + features, + language, + stroke_width, + *args, + **kwargs + ) except TypeError: mask = font.getmask(text) - self.draw.draw_bitmap(xy, mask, ink) + if stroke_offset: + coord = coord[0] + stroke_offset[0], coord[1] + stroke_offset[1] + self.draw.draw_bitmap(coord, mask, ink) - def multiline_text(self, xy, text, fill=None, font=None, anchor=None, - spacing=4, align="left", direction=None, features=None): + ink = getink(fill) + if ink is not None: + stroke_ink = None + if stroke_width: + stroke_ink = getink(stroke_fill) if stroke_fill is not None else ink + + if stroke_ink is not None: + # Draw stroked text + draw_text(stroke_ink, stroke_width) + + # Draw normal text + draw_text(ink, 0, (stroke_width, stroke_width)) + else: + # Only draw normal text + draw_text(ink) + + def multiline_text( + self, + xy, + text, + fill=None, + font=None, + anchor=None, + spacing=4, + align="left", + direction=None, + features=None, + language=None, + stroke_width=0, + stroke_fill=None, + ): widths = [] max_width = 0 lines = self._multiline_split(text) - line_spacing = self.textsize('A', font=font)[1] + spacing + line_spacing = ( + self.textsize("A", font=font, stroke_width=stroke_width)[1] + spacing + ) for line in lines: - line_width, line_height = self.textsize(line, font) + line_width, line_height = self.textsize( + line, + font, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + ) widths.append(line_width) max_width = max(max_width, line_width) left, top = xy @@ -298,35 +390,65 @@ class ImageDraw(object): elif align == "center": left += (max_width - widths[idx]) / 2.0 elif align == "right": - left += (max_width - widths[idx]) + left += max_width - widths[idx] else: raise ValueError('align must be "left", "center" or "right"') - self.text((left, top), line, fill, font, anchor, - direction=direction, features=features) + self.text( + (left, top), + line, + fill, + font, + anchor, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + stroke_fill=stroke_fill, + ) top += line_spacing left = xy[0] - def textsize(self, text, font=None, spacing=4, direction=None, - features=None): + def textsize( + self, + text, + font=None, + spacing=4, + direction=None, + features=None, + language=None, + stroke_width=0, + ): """Get the size of a given string, in pixels.""" if self._multiline_check(text): - return self.multiline_textsize(text, font, spacing, - direction, features) + return self.multiline_textsize( + text, font, spacing, direction, features, language, stroke_width + ) if font is None: font = self.getfont() - return font.getsize(text, direction, features) + return font.getsize(text, direction, features, language, stroke_width) - def multiline_textsize(self, text, font=None, spacing=4, direction=None, - features=None): + def multiline_textsize( + self, + text, + font=None, + spacing=4, + direction=None, + features=None, + language=None, + stroke_width=0, + ): max_width = 0 lines = self._multiline_split(text) - line_spacing = self.textsize('A', font=font)[1] + spacing + line_spacing = ( + self.textsize("A", font=font, stroke_width=stroke_width)[1] + spacing + ) for line in lines: - line_width, line_height = self.textsize(line, font, spacing, - direction, features) + line_width, line_height = self.textsize( + line, font, spacing, direction, features, language, stroke_width + ) max_width = max(max_width, line_width) - return max_width, len(lines)*line_spacing - spacing + return max_width, len(lines) * line_spacing - spacing def Draw(im, mode=None): @@ -391,8 +513,8 @@ def floodfill(image, xy, value, border=None, thresh=0): pixel. :param thresh: Optional threshold value which specifies a maximum tolerable difference of a pixel value from the 'background' in - order for it to be replaced. Useful for filling regions of non- - homogeneous, but similar, colors. + order for it to be replaced. Useful for filling regions of + non-homogeneous, but similar, colors. """ # based on an implementation by Eric S. Raymond # amended by yo1995 @20180806 @@ -406,13 +528,16 @@ def floodfill(image, xy, value, border=None, thresh=0): except (ValueError, IndexError): return # seed point outside image edge = {(x, y)} - full_edge = set() # use a set to keep record of current and previous edge pixels to reduce memory consumption + # use a set to keep record of current and previous edge pixels + # to reduce memory consumption + full_edge = set() while edge: new_edge = set() for (x, y) in edge: # 4 adjacent method - for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)): - if (s, t) in full_edge: - continue # if already processed, skip + for (s, t) in ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)): + # If already processed, or if a coordinate is negative, skip + if (s, t) in full_edge or s < 0 or t < 0: + continue try: p = pixel[s, t] except (ValueError, IndexError): @@ -435,6 +560,6 @@ def _color_diff(color1, color2): Uses 1-norm distance to calculate difference between two values. """ if isinstance(color2, tuple): - return sum([abs(color1[i]-color2[i]) for i in range(0, len(color2))]) + return sum([abs(color1[i] - color2[i]) for i in range(0, len(color2))]) else: - return abs(color1-color2) + return abs(color1 - color2) diff --git a/server/www/packages/packages-linux/x64/PIL/ImageDraw2.py b/server/www/packages/packages-linux/x64/PIL/ImageDraw2.py index f7902b0..324d869 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageDraw2.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageDraw2.py @@ -38,7 +38,6 @@ class Font(object): class Draw(object): - def __init__(self, image, size=None, color=None): if not hasattr(image, "im"): image = Image.new(image, size, color) diff --git a/server/www/packages/packages-linux/x64/PIL/ImageEnhance.py b/server/www/packages/packages-linux/x64/PIL/ImageEnhance.py index 1b78bfd..534eb4f 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageEnhance.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageEnhance.py @@ -22,7 +22,6 @@ from . import Image, ImageFilter, ImageStat class _Enhance(object): - def enhance(self, factor): """ Returns an enhanced image. @@ -45,14 +44,14 @@ class Color(_Enhance): factor of 0.0 gives a black and white image. A factor of 1.0 gives the original image. """ + def __init__(self, image): self.image = image - self.intermediate_mode = 'L' - if 'A' in image.getbands(): - self.intermediate_mode = 'LA' + self.intermediate_mode = "L" + if "A" in image.getbands(): + self.intermediate_mode = "LA" - self.degenerate = image.convert( - self.intermediate_mode).convert(image.mode) + self.degenerate = image.convert(self.intermediate_mode).convert(image.mode) class Contrast(_Enhance): @@ -62,13 +61,14 @@ class Contrast(_Enhance): to the contrast control on a TV set. An enhancement factor of 0.0 gives a solid grey image. A factor of 1.0 gives the original image. """ + def __init__(self, image): self.image = image mean = int(ImageStat.Stat(image.convert("L")).mean[0] + 0.5) self.degenerate = Image.new("L", image.size, mean).convert(image.mode) - if 'A' in image.getbands(): - self.degenerate.putalpha(image.getchannel('A')) + if "A" in image.getbands(): + self.degenerate.putalpha(image.getchannel("A")) class Brightness(_Enhance): @@ -78,12 +78,13 @@ class Brightness(_Enhance): enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the original image. """ + def __init__(self, image): self.image = image self.degenerate = Image.new(image.mode, image.size, 0) - if 'A' in image.getbands(): - self.degenerate.putalpha(image.getchannel('A')) + if "A" in image.getbands(): + self.degenerate.putalpha(image.getchannel("A")) class Sharpness(_Enhance): @@ -93,9 +94,10 @@ class Sharpness(_Enhance): enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the original image, and a factor of 2.0 gives a sharpened image. """ + def __init__(self, image): self.image = image self.degenerate = image.filter(ImageFilter.SMOOTH) - if 'A' in image.getbands(): - self.degenerate.putalpha(image.getchannel('A')) + if "A" in image.getbands(): + self.degenerate.putalpha(image.getchannel("A")) diff --git a/server/www/packages/packages-linux/x64/PIL/ImageFile.py b/server/www/packages/packages-linux/x64/PIL/ImageFile.py index 915557a..836e631 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageFile.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageFile.py @@ -27,15 +27,16 @@ # See the README file for information on usage and redistribution. # +import io +import struct +import sys + from . import Image from ._util import isPath -import io -import sys -import struct MAXBLOCK = 65536 -SAFEBLOCK = 1024*1024 +SAFEBLOCK = 1024 * 1024 LOAD_TRUNCATED_IMAGES = False @@ -44,7 +45,7 @@ ERRORS = { -2: "decoding error", -3: "unknown error", -8: "bad configuration", - -9: "out of memory error" + -9: "out of memory error", } @@ -62,6 +63,7 @@ def raise_ioerror(error): # -------------------------------------------------------------------- # Helpers + def _tilesort(t): # sort on offset return t[2] @@ -71,6 +73,7 @@ def _tilesort(t): # -------------------------------------------------------------------- # ImageFile base class + class ImageFile(Image.Image): "Base class for image file format handlers." @@ -79,6 +82,8 @@ class ImageFile(Image.Image): self._min_frame = 0 + self.custom_mimetype = None + self.tile = None self.readonly = 1 # until we know better @@ -99,11 +104,13 @@ class ImageFile(Image.Image): try: self._open() - except (IndexError, # end of data - TypeError, # end of data (ord) - KeyError, # unsupported mode - EOFError, # got header but not the first frame - struct.error) as v: + except ( + IndexError, # end of data + TypeError, # end of data (ord) + KeyError, # unsupported mode + EOFError, # got header but not the first frame + struct.error, + ) as v: # close the file only if we have opened it this constructor if self._exclusive_fp: self.fp.close() @@ -113,17 +120,18 @@ class ImageFile(Image.Image): raise SyntaxError("not identified by this driver") def draft(self, mode, size): - "Set draft mode" + """Set draft mode""" pass def get_format_mimetype(self): - if self.format is None: - return - return Image.MIME.get(self.format.upper()) + if self.custom_mimetype: + return self.custom_mimetype + if self.format is not None: + return Image.MIME.get(self.format.upper()) def verify(self): - "Check file integrity" + """Check file integrity""" # raise exception if something's wrong. must be called # directly after open, and closes file when finished. @@ -132,7 +140,7 @@ class ImageFile(Image.Image): self.fp = None def load(self): - "Load image data based on tile list" + """Load image data based on tile list""" pixel = Image.Image.load(self) @@ -144,7 +152,7 @@ class ImageFile(Image.Image): self.map = None use_mmap = self.filename and len(self.tile) == 1 # As of pypy 2.1.0, memory mapping was failing here. - use_mmap = use_mmap and not hasattr(sys, 'pypy_version_info') + use_mmap = use_mmap and not hasattr(sys, "pypy_version_info") readonly = 0 @@ -165,9 +173,12 @@ class ImageFile(Image.Image): if use_mmap: # try memory mapping decoder_name, extents, offset, args = self.tile[0] - if decoder_name == "raw" and len(args) >= 3 and \ - args[0] == self.mode and \ - args[0] in Image._MAPMODES: + if ( + decoder_name == "raw" + and len(args) >= 3 + and args[0] == self.mode + and args[0] in Image._MAPMODES + ): try: if hasattr(Image.core, "map"): # use built-in mapper WIN32 only @@ -175,16 +186,18 @@ class ImageFile(Image.Image): self.map.seek(offset) self.im = self.map.readimage( self.mode, self.size, args[1], args[2] - ) + ) else: # use mmap, if possible import mmap + with open(self.filename, "r") as fp: - self.map = mmap.mmap(fp.fileno(), 0, - access=mmap.ACCESS_READ) + self.map = mmap.mmap( + fp.fileno(), 0, access=mmap.ACCESS_READ + ) self.im = Image.core.map_buffer( - self.map, self.size, decoder_name, extents, - offset, args) + self.map, self.size, decoder_name, extents, offset, args + ) readonly = 1 # After trashing self.im, # we might need to reload the palette data. @@ -206,8 +219,9 @@ class ImageFile(Image.Image): prefix = b"" for decoder_name, extents, offset, args in self.tile: - decoder = Image._getdecoder(self.mode, decoder_name, - args, self.decoderconfig) + decoder = Image._getdecoder( + self.mode, decoder_name, args, self.decoderconfig + ) try: seek(offset) decoder.setimage(self.im, extents) @@ -230,10 +244,10 @@ class ImageFile(Image.Image): if LOAD_TRUNCATED_IMAGES: break else: - self.tile = [] - raise IOError("image file is truncated " - "(%d bytes not processed)" % - len(b)) + raise IOError( + "image file is truncated " + "(%d bytes not processed)" % len(b) + ) b = b + s n, err_code = decoder.decode(b) @@ -261,8 +275,7 @@ class ImageFile(Image.Image): def load_prepare(self): # create image memory if necessary - if not self.im or\ - self.im.mode != self.mode or self.im.size != self.size: + if not self.im or self.im.mode != self.mode or self.im.size != self.size: self.im = Image.core.new(self.mode, self.size) # create palette (optional) if self.mode == "P": @@ -281,11 +294,15 @@ class ImageFile(Image.Image): # pass def _seek_check(self, frame): - if (frame < self._min_frame or + if ( + frame < self._min_frame # Only check upper limit on frames if additional seek operations # are not required to do so - (not (hasattr(self, "_n_frames") and self._n_frames is None) and - frame >= self.n_frames+self._min_frame)): + or ( + not (hasattr(self, "_n_frames") and self._n_frames is None) + and frame >= self.n_frames + self._min_frame + ) + ): raise EOFError("attempt to seek outside sequence") return self.tell() != frame @@ -300,9 +317,7 @@ class StubImageFile(ImageFile): """ def _open(self): - raise NotImplementedError( - "StubImageFile subclass must implement _open" - ) + raise NotImplementedError("StubImageFile subclass must implement _open") def load(self): loader = self._load() @@ -315,10 +330,8 @@ class StubImageFile(ImageFile): self.__dict__ = image.__dict__ def _load(self): - "(Hook) Find actual image loader." - raise NotImplementedError( - "StubImageFile subclass must implement _load" - ) + """(Hook) Find actual image loader.""" + raise NotImplementedError("StubImageFile subclass must implement _load") class Parser(object): @@ -326,6 +339,7 @@ class Parser(object): Incremental image parser. This class implements the standard feed/close consumer interface. """ + incremental = None image = None data = None @@ -410,15 +424,13 @@ class Parser(object): im.load_prepare() d, e, o, a = im.tile[0] im.tile = [] - self.decoder = Image._getdecoder( - im.mode, d, a, im.decoderconfig - ) + self.decoder = Image._getdecoder(im.mode, d, a, im.decoderconfig) self.decoder.setimage(im.im, e) # calculate decoder offset self.offset = o if self.offset <= len(self.data): - self.data = self.data[self.offset:] + self.data = self.data[self.offset :] self.offset = 0 self.image = im @@ -460,6 +472,7 @@ class Parser(object): # -------------------------------------------------------------------- + def _save(im, fp, tile, bufsize=0): """Helper to save image based on tile list @@ -489,7 +502,7 @@ def _save(im, fp, tile, bufsize=0): for e, b, o, a in tile: e = Image._getencoder(im.mode, e, a, im.encoderconfig) if o > 0: - fp.seek(o, 0) + fp.seek(o) e.setimage(im.im, b) if e.pushes_fd: e.setfd(fp) @@ -508,7 +521,7 @@ def _save(im, fp, tile, bufsize=0): for e, b, o, a in tile: e = Image._getencoder(im.mode, e, a, im.encoderconfig) if o > 0: - fp.seek(o, 0) + fp.seek(o) e.setimage(im.im, b) if e.pushes_fd: e.setfd(fp) @@ -554,8 +567,7 @@ class PyCodecState(object): self.yoff = 0 def extents(self): - return (self.xoff, self.yoff, - self.xoff+self.xsize, self.yoff+self.ysize) + return (self.xoff, self.yoff, self.xoff + self.xsize, self.yoff + self.ysize) class PyDecoder(object): @@ -593,8 +605,6 @@ class PyDecoder(object): Override to perform the decoding process. :param buffer: A bytes object with the data to be decoded. - If `handles_eof` is set, then `buffer` will be empty and `self.fd` - will be set. :returns: A tuple of (bytes consumed, errcode). If finished with decoding return <0 for the bytes consumed. Err codes are from `ERRORS` @@ -647,8 +657,10 @@ class PyDecoder(object): if self.state.xsize <= 0 or self.state.ysize <= 0: raise ValueError("Size cannot be negative") - if (self.state.xsize + self.state.xoff > self.im.size[0] or - self.state.ysize + self.state.yoff > self.im.size[1]): + if ( + self.state.xsize + self.state.xoff > self.im.size[0] + or self.state.ysize + self.state.yoff > self.im.size[1] + ): raise ValueError("Tile cannot extend outside image") def set_as_raw(self, data, rawmode=None): @@ -663,7 +675,7 @@ class PyDecoder(object): if not rawmode: rawmode = self.mode - d = Image._getdecoder(self.mode, 'raw', (rawmode)) + d = Image._getdecoder(self.mode, "raw", (rawmode)) d.setimage(self.im, self.state.extents()) s = d.decode(data) diff --git a/server/www/packages/packages-linux/x64/PIL/ImageFilter.py b/server/www/packages/packages-linux/x64/PIL/ImageFilter.py index de99e64..fa4162b 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageFilter.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageFilter.py @@ -57,12 +57,13 @@ class Kernel(BuiltinFilter): :param offset: Offset. If given, this value is added to the result, after it has been divided by the scale factor. """ + name = "Kernel" def __init__(self, size, kernel, scale=None, offset=0): if scale is None: # default scale is sum of kernel - scale = functools.reduce(lambda a, b: a+b, kernel) + scale = functools.reduce(lambda a, b: a + b, kernel) if size[0] * size[1] != len(kernel): raise ValueError("not enough coefficients in kernel") self.filterargs = size, scale, offset, kernel @@ -78,6 +79,7 @@ class RankFilter(Filter): ``size * size / 2`` for a median filter, ``size * size - 1`` for a max filter, etc. """ + name = "Rank" def __init__(self, size, rank): @@ -87,7 +89,7 @@ class RankFilter(Filter): def filter(self, image): if image.mode == "P": raise ValueError("cannot filter palette images") - image = image.expand(self.size//2, self.size//2) + image = image.expand(self.size // 2, self.size // 2) return image.rankfilter(self.size, self.rank) @@ -98,11 +100,12 @@ class MedianFilter(RankFilter): :param size: The kernel size, in pixels. """ + name = "Median" def __init__(self, size=3): self.size = size - self.rank = size*size//2 + self.rank = size * size // 2 class MinFilter(RankFilter): @@ -112,6 +115,7 @@ class MinFilter(RankFilter): :param size: The kernel size, in pixels. """ + name = "Min" def __init__(self, size=3): @@ -126,11 +130,12 @@ class MaxFilter(RankFilter): :param size: The kernel size, in pixels. """ + name = "Max" def __init__(self, size=3): self.size = size - self.rank = size*size-1 + self.rank = size * size - 1 class ModeFilter(Filter): @@ -141,6 +146,7 @@ class ModeFilter(Filter): :param size: The kernel size, in pixels. """ + name = "Mode" def __init__(self, size=3): @@ -155,6 +161,7 @@ class GaussianBlur(MultibandFilter): :param radius: Blur radius. """ + name = "GaussianBlur" def __init__(self, radius=2): @@ -175,6 +182,7 @@ class BoxBlur(MultibandFilter): returns an identical image. Radius 1 takes 1 pixel in each direction, i.e. 9 pixels in total. """ + name = "BoxBlur" def __init__(self, radius): @@ -197,7 +205,8 @@ class UnsharpMask(MultibandFilter): .. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking - """ + """ # noqa: E501 + name = "UnsharpMask" def __init__(self, radius=2, percent=150, threshold=3): @@ -211,96 +220,116 @@ class UnsharpMask(MultibandFilter): class BLUR(BuiltinFilter): name = "Blur" + # fmt: off filterargs = (5, 5), 16, 0, ( - 1, 1, 1, 1, 1, - 1, 0, 0, 0, 1, - 1, 0, 0, 0, 1, - 1, 0, 0, 0, 1, - 1, 1, 1, 1, 1 - ) + 1, 1, 1, 1, 1, + 1, 0, 0, 0, 1, + 1, 0, 0, 0, 1, + 1, 0, 0, 0, 1, + 1, 1, 1, 1, 1, + ) + # fmt: on class CONTOUR(BuiltinFilter): name = "Contour" + # fmt: off filterargs = (3, 3), 1, 255, ( -1, -1, -1, -1, 8, -1, - -1, -1, -1 - ) + -1, -1, -1, + ) + # fmt: on class DETAIL(BuiltinFilter): name = "Detail" + # fmt: off filterargs = (3, 3), 6, 0, ( - 0, -1, 0, + 0, -1, 0, -1, 10, -1, - 0, -1, 0 - ) + 0, -1, 0, + ) + # fmt: on class EDGE_ENHANCE(BuiltinFilter): name = "Edge-enhance" + # fmt: off filterargs = (3, 3), 2, 0, ( -1, -1, -1, -1, 10, -1, - -1, -1, -1 - ) + -1, -1, -1, + ) + # fmt: on class EDGE_ENHANCE_MORE(BuiltinFilter): name = "Edge-enhance More" + # fmt: off filterargs = (3, 3), 1, 0, ( -1, -1, -1, -1, 9, -1, - -1, -1, -1 - ) + -1, -1, -1, + ) + # fmt: on class EMBOSS(BuiltinFilter): name = "Emboss" + # fmt: off filterargs = (3, 3), 1, 128, ( - -1, 0, 0, - 0, 1, 0, - 0, 0, 0 - ) + -1, 0, 0, + 0, 1, 0, + 0, 0, 0, + ) + # fmt: on class FIND_EDGES(BuiltinFilter): name = "Find Edges" + # fmt: off filterargs = (3, 3), 1, 0, ( -1, -1, -1, -1, 8, -1, - -1, -1, -1 - ) + -1, -1, -1, + ) + # fmt: on class SHARPEN(BuiltinFilter): name = "Sharpen" + # fmt: off filterargs = (3, 3), 16, 0, ( -2, -2, -2, -2, 32, -2, - -2, -2, -2 - ) + -2, -2, -2, + ) + # fmt: on class SMOOTH(BuiltinFilter): name = "Smooth" + # fmt: off filterargs = (3, 3), 13, 0, ( - 1, 1, 1, - 1, 5, 1, - 1, 1, 1 - ) + 1, 1, 1, + 1, 5, 1, + 1, 1, 1, + ) + # fmt: on class SMOOTH_MORE(BuiltinFilter): name = "Smooth More" + # fmt: off filterargs = (5, 5), 100, 0, ( - 1, 1, 1, 1, 1, - 1, 5, 5, 5, 1, - 1, 5, 44, 5, 1, - 1, 5, 5, 5, 1, - 1, 1, 1, 1, 1 - ) + 1, 1, 1, 1, 1, + 1, 5, 5, 5, 1, + 1, 5, 44, 5, 1, + 1, 5, 5, 5, 1, + 1, 1, 1, 1, 1, + ) + # fmt: on class Color3DLUT(MultibandFilter): @@ -327,6 +356,7 @@ class Color3DLUT(MultibandFilter): than ``channels`` channels. Default is ``None``, which means that mode wouldn't be changed. """ + name = "Color 3D LUT" def __init__(self, size, table, channels=3, target_mode=None, **kwargs): @@ -338,7 +368,7 @@ class Color3DLUT(MultibandFilter): # Hidden flag `_copy_table=False` could be used to avoid extra copying # of the table if the table is specially made for the constructor. - copy_table = kwargs.get('_copy_table', True) + copy_table = kwargs.get("_copy_table", True) items = size[0] * size[1] * size[2] wrong_size = False @@ -346,8 +376,11 @@ class Color3DLUT(MultibandFilter): if copy_table: table = table.copy() - if table.shape in [(items * channels,), (items, channels), - (size[2], size[1], size[0], channels)]: + if table.shape in [ + (items * channels,), + (items, channels), + (size[2], size[1], size[0], channels), + ]: table = table.reshape(items * channels) else: wrong_size = True @@ -363,7 +396,8 @@ class Color3DLUT(MultibandFilter): if len(pixel) != channels: raise ValueError( "The elements of the table should " - "have a length of {}.".format(channels)) + "have a length of {}.".format(channels) + ) table.extend(pixel) if wrong_size or len(table) != items * channels: @@ -371,7 +405,9 @@ class Color3DLUT(MultibandFilter): "The table should have either channels * size**3 float items " "or size**3 items of channels-sized tuples with floats. " "Table should be: {}x{}x{}x{}. Actual length: {}".format( - channels, size[0], size[1], size[2], len(table))) + channels, size[0], size[1], size[2], len(table) + ) + ) self.table = table @staticmethod @@ -379,8 +415,9 @@ class Color3DLUT(MultibandFilter): try: _, _, _ = size except ValueError: - raise ValueError("Size should be either an integer or " - "a tuple of three integers.") + raise ValueError( + "Size should be either an integer or a tuple of three integers." + ) except TypeError: size = (size, size, size) size = [int(x) for x in size] @@ -411,15 +448,20 @@ class Color3DLUT(MultibandFilter): for b in range(size3D): for g in range(size2D): for r in range(size1D): - table[idx_out:idx_out + channels] = callback( - r / (size1D-1), g / (size2D-1), b / (size3D-1)) + table[idx_out : idx_out + channels] = callback( + r / (size1D - 1), g / (size2D - 1), b / (size3D - 1) + ) idx_out += channels - return cls((size1D, size2D, size3D), table, channels=channels, - target_mode=target_mode, _copy_table=False) + return cls( + (size1D, size2D, size3D), + table, + channels=channels, + target_mode=target_mode, + _copy_table=False, + ) - def transform(self, callback, with_normals=False, channels=None, - target_mode=None): + def transform(self, callback, with_normals=False, channels=None, target_mode=None): """Transforms the table values using provided callback and returns a new LUT with altered values. @@ -450,24 +492,31 @@ class Color3DLUT(MultibandFilter): for b in range(size3D): for g in range(size2D): for r in range(size1D): - values = self.table[idx_in:idx_in + ch_in] + values = self.table[idx_in : idx_in + ch_in] if with_normals: - values = callback(r / (size1D-1), g / (size2D-1), - b / (size3D-1), *values) + values = callback( + r / (size1D - 1), + g / (size2D - 1), + b / (size3D - 1), + *values + ) else: values = callback(*values) - table[idx_out:idx_out + ch_out] = values + table[idx_out : idx_out + ch_out] = values idx_in += ch_in idx_out += ch_out - return type(self)(self.size, table, channels=ch_out, - target_mode=target_mode or self.mode, - _copy_table=False) + return type(self)( + self.size, + table, + channels=ch_out, + target_mode=target_mode or self.mode, + _copy_table=False, + ) def __repr__(self): r = [ - "{} from {}".format(self.__class__.__name__, - self.table.__class__.__name__), + "{} from {}".format(self.__class__.__name__, self.table.__class__.__name__), "size={:d}x{:d}x{:d}".format(*self.size), "channels={:d}".format(self.channels), ] @@ -479,5 +528,11 @@ class Color3DLUT(MultibandFilter): from . import Image return image.color_lut_3d( - self.mode or image.mode, Image.LINEAR, self.channels, - self.size[0], self.size[1], self.size[2], self.table) + self.mode or image.mode, + Image.LINEAR, + self.channels, + self.size[0], + self.size[1], + self.size[2], + self.table, + ) diff --git a/server/www/packages/packages-linux/x64/PIL/ImageFont.py b/server/www/packages/packages-linux/x64/PIL/ImageFont.py index 5384a72..5cce9af 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageFont.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageFont.py @@ -25,11 +25,12 @@ # See the README file for information on usage and redistribution. # -from . import Image -from ._util import isDirectory, isPath, py3 import os import sys +from . import Image +from ._util import isDirectory, isPath, py3 + LAYOUT_BASIC = 0 LAYOUT_RAQM = 1 @@ -72,7 +73,7 @@ class ImageFont(object): try: fullname = os.path.splitext(filename)[0] + ext image = Image.open(fullname) - except: + except Exception: pass else: if image and image.mode in ("1", "L"): @@ -98,7 +99,7 @@ class ImageFont(object): self.info.append(s) # read PILfont metrics - data = file.read(256*20) + data = file.read(256 * 20) # check image if image.mode not in ("1", "L"): @@ -109,9 +110,33 @@ class ImageFont(object): self.font = Image.core.font(image.im, data) def getsize(self, text, *args, **kwargs): + """ + Returns width and height (in pixels) of given text. + + :param text: Text to measure. + + :return: (width, height) + """ return self.font.getsize(text) def getmask(self, text, mode="", *args, **kwargs): + """ + Create a bitmap for the text. + + If the font uses antialiasing, the bitmap should have mode ``L`` and use a + maximum value of 255. Otherwise, it should have mode ``1``. + + :param text: Text to render. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + .. versionadded:: 1.1.5 + + :return: An internal PIL storage memory instance as defined by the + :py:mod:`PIL.Image.core` interface module. + """ return self.font.getmask(text, mode) @@ -119,11 +144,11 @@ class ImageFont(object): # Wrapper for FreeType fonts. Application code should use the # truetype factory function to create font objects. + class FreeTypeFont(object): "FreeType font wrapper (requires _imagingft service)" - def __init__(self, font=None, size=10, index=0, encoding="", - layout_engine=None): + def __init__(self, font=None, size=10, index=0, encoding="", layout_engine=None): # FIXME: use service provider instead self.path = font @@ -135,60 +160,317 @@ class FreeTypeFont(object): layout_engine = LAYOUT_BASIC if core.HAVE_RAQM: layout_engine = LAYOUT_RAQM - if layout_engine == LAYOUT_RAQM and not core.HAVE_RAQM: + elif layout_engine == LAYOUT_RAQM and not core.HAVE_RAQM: layout_engine = LAYOUT_BASIC self.layout_engine = layout_engine - if isPath(font): - self.font = core.getfont(font, size, index, encoding, - layout_engine=layout_engine) - else: - self.font_bytes = font.read() + def load_from_bytes(f): + self.font_bytes = f.read() self.font = core.getfont( - "", size, index, encoding, self.font_bytes, layout_engine) + "", size, index, encoding, self.font_bytes, layout_engine + ) + + if isPath(font): + if sys.platform == "win32": + font_bytes_path = font if isinstance(font, bytes) else font.encode() + try: + font_bytes_path.decode("ascii") + except UnicodeDecodeError: + # FreeType cannot load fonts with non-ASCII characters on Windows + # So load it into memory first + with open(font, "rb") as f: + load_from_bytes(f) + return + self.font = core.getfont( + font, size, index, encoding, layout_engine=layout_engine + ) + else: + load_from_bytes(font) def _multiline_split(self, text): split_character = "\n" if isinstance(text, str) else b"\n" return text.split(split_character) def getname(self): + """ + :return: A tuple of the font family (e.g. Helvetica) and the font style + (e.g. Bold) + """ return self.font.family, self.font.style def getmetrics(self): + """ + :return: A tuple of the font ascent (the distance from the baseline to + the highest outline point) and descent (the distance from the + baseline to the lowest outline point, a negative value) + """ return self.font.ascent, self.font.descent - def getsize(self, text, direction=None, features=None): - size, offset = self.font.getsize(text, direction, features) - return (size[0] + offset[0], size[1] + offset[1]) + def getsize( + self, text, direction=None, features=None, language=None, stroke_width=0 + ): + """ + Returns width and height (in pixels) of given text if rendered in font with + provided direction, features, and language. - def getsize_multiline(self, text, direction=None, - spacing=4, features=None): + :param text: Text to measure. + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + ` + Requires libraqm. + + .. versionadded:: 6.0.0 + + :param stroke_width: The width of the text stroke. + + .. versionadded:: 6.2.0 + + :return: (width, height) + """ + size, offset = self.font.getsize(text, direction, features, language) + return ( + size[0] + stroke_width * 2 + offset[0], + size[1] + stroke_width * 2 + offset[1], + ) + + def getsize_multiline( + self, + text, + direction=None, + spacing=4, + features=None, + language=None, + stroke_width=0, + ): + """ + Returns width and height (in pixels) of given text if rendered in font + with provided direction, features, and language, while respecting + newline characters. + + :param text: Text to measure. + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + :param spacing: The vertical gap between lines, defaulting to 4 pixels. + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + ` + Requires libraqm. + + .. versionadded:: 6.0.0 + + :param stroke_width: The width of the text stroke. + + .. versionadded:: 6.2.0 + + :return: (width, height) + """ max_width = 0 lines = self._multiline_split(text) - line_spacing = self.getsize('A')[1] + spacing + line_spacing = self.getsize("A", stroke_width=stroke_width)[1] + spacing for line in lines: - line_width, line_height = self.getsize(line, direction, features) + line_width, line_height = self.getsize( + line, direction, features, language, stroke_width + ) max_width = max(max_width, line_width) - return max_width, len(lines)*line_spacing - spacing + return max_width, len(lines) * line_spacing - spacing def getoffset(self, text): + """ + Returns the offset of given text. This is the gap between the + starting coordinate and the first marking. Note that this gap is + included in the result of :py:func:`~PIL.ImageFont.FreeTypeFont.getsize`. + + :param text: Text to measure. + + :return: A tuple of the x and y offset + """ return self.font.getsize(text)[1] - def getmask(self, text, mode="", direction=None, features=None): - return self.getmask2(text, mode, direction=direction, - features=features)[0] + def getmask( + self, + text, + mode="", + direction=None, + features=None, + language=None, + stroke_width=0, + ): + """ + Create a bitmap for the text. - def getmask2(self, text, mode="", fill=Image.core.fill, direction=None, - features=None, *args, **kwargs): - size, offset = self.font.getsize(text, direction, features) + If the font uses antialiasing, the bitmap should have mode ``L`` and use a + maximum value of 255. Otherwise, it should have mode ``1``. + + :param text: Text to render. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + .. versionadded:: 1.1.5 + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + ` + Requires libraqm. + + .. versionadded:: 6.0.0 + + :param stroke_width: The width of the text stroke. + + .. versionadded:: 6.2.0 + + :return: An internal PIL storage memory instance as defined by the + :py:mod:`PIL.Image.core` interface module. + """ + return self.getmask2( + text, + mode, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + )[0] + + def getmask2( + self, + text, + mode="", + fill=Image.core.fill, + direction=None, + features=None, + language=None, + stroke_width=0, + *args, + **kwargs + ): + """ + Create a bitmap for the text. + + If the font uses antialiasing, the bitmap should have mode ``L`` and use a + maximum value of 255. Otherwise, it should have mode ``1``. + + :param text: Text to render. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + .. versionadded:: 1.1.5 + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + ` + Requires libraqm. + + .. versionadded:: 6.0.0 + + :param stroke_width: The width of the text stroke. + + .. versionadded:: 6.2.0 + + :return: A tuple of an internal PIL storage memory instance as defined by the + :py:mod:`PIL.Image.core` interface module, and the text offset, the + gap between the starting coordinate and the first marking + """ + size, offset = self.font.getsize(text, direction, features, language) + size = size[0] + stroke_width * 2, size[1] + stroke_width * 2 im = fill("L", size, 0) - self.font.render(text, im.id, mode == "1", direction, features) + self.font.render( + text, im.id, mode == "1", direction, features, language, stroke_width + ) return im, offset - def font_variant(self, font=None, size=None, index=None, encoding=None, - layout_engine=None): + def font_variant( + self, font=None, size=None, index=None, encoding=None, layout_engine=None + ): """ Create a copy of this FreeTypeFont object, using any specified arguments to override the settings. @@ -203,9 +485,62 @@ class FreeTypeFont(object): size=self.size if size is None else size, index=self.index if index is None else index, encoding=self.encoding if encoding is None else encoding, - layout_engine=self.layout_engine if layout_engine is None else layout_engine + layout_engine=layout_engine or self.layout_engine, ) + def get_variation_names(self): + """ + :returns: A list of the named styles in a variation font. + :exception IOError: If the font is not a variation font. + """ + try: + names = self.font.getvarnames() + except AttributeError: + raise NotImplementedError("FreeType 2.9.1 or greater is required") + return [name.replace(b"\x00", b"") for name in names] + + def set_variation_by_name(self, name): + """ + :param name: The name of the style. + :exception IOError: If the font is not a variation font. + """ + names = self.get_variation_names() + if not isinstance(name, bytes): + name = name.encode() + index = names.index(name) + + if index == getattr(self, "_last_variation_index", None): + # When the same name is set twice in a row, + # there is an 'unknown freetype error' + # https://savannah.nongnu.org/bugs/?56186 + return + self._last_variation_index = index + + self.font.setvarname(index) + + def get_variation_axes(self): + """ + :returns: A list of the axes in a variation font. + :exception IOError: If the font is not a variation font. + """ + try: + axes = self.font.getvaraxes() + except AttributeError: + raise NotImplementedError("FreeType 2.9.1 or greater is required") + for axis in axes: + axis["name"] = axis["name"].replace(b"\x00", b"") + return axes + + def set_variation_by_axes(self, axes): + """ + :param axes: A list of values for each axis. + :exception IOError: If the font is not a variation font. + """ + try: + self.font.setvaraxes(axes) + except AttributeError: + raise NotImplementedError("FreeType 2.9.1 or greater is required") + class TransposedFont(object): "Wrapper for writing rotated or mirrored text" @@ -250,35 +585,62 @@ def load(filename): return f -def truetype(font=None, size=10, index=0, encoding="", - layout_engine=None): +def truetype(font=None, size=10, index=0, encoding="", layout_engine=None): """ Load a TrueType or OpenType font from a file or file-like object, and create a font object. This function loads a font object from the given file or file-like object, and creates a font object for a font of the given size. + Pillow uses FreeType to open font files. If you are opening many fonts + simultaneously on Windows, be aware that Windows limits the number of files + that can be open in C at once to 512. If you approach that limit, an + ``OSError`` may be thrown, reporting that FreeType "cannot open resource". + This function requires the _imagingft service. :param font: A filename or file-like object containing a TrueType font. - Under Windows, if the file is not found in this filename, - the loader also looks in Windows :file:`fonts/` directory. + If the file is not found in this filename, the loader may also + search in other directories, such as the :file:`fonts/` + directory on Windows or :file:`/Library/Fonts/`, + :file:`/System/Library/Fonts/` and :file:`~/Library/Fonts/` on + macOS. + :param size: The requested size, in points. :param index: Which font face to load (default is first available face). - :param encoding: Which font encoding to use (default is Unicode). Common - encodings are "unic" (Unicode), "symb" (Microsoft - Symbol), "ADOB" (Adobe Standard), "ADBE" (Adobe Expert), - and "armn" (Apple Roman). See the FreeType documentation - for more information. + :param encoding: Which font encoding to use (default is Unicode). Possible + encodings include (see the FreeType documentation for more + information): + + * "unic" (Unicode) + * "symb" (Microsoft Symbol) + * "ADOB" (Adobe Standard) + * "ADBE" (Adobe Expert) + * "ADBC" (Adobe Custom) + * "armn" (Apple Roman) + * "sjis" (Shift JIS) + * "gb " (PRC) + * "big5" + * "wans" (Extended Wansung) + * "joha" (Johab) + * "lat1" (Latin-1) + + This specifies the character set to use. It does not alter the + encoding of any text provided in subsequent operations. :param layout_engine: Which layout engine to use, if available: `ImageFont.LAYOUT_BASIC` or `ImageFont.LAYOUT_RAQM`. :return: A font object. :exception IOError: If the file could not be read. """ - try: + def freetype(font): return FreeTypeFont(font, size, index, encoding, layout_engine) + + try: + return freetype(font) except IOError: + if not isPath(font): + raise ttf_filename = os.path.basename(font) dirs = [] @@ -289,17 +651,19 @@ def truetype(font=None, size=10, index=0, encoding="", windir = os.environ.get("WINDIR") if windir: dirs.append(os.path.join(windir, "fonts")) - elif sys.platform in ('linux', 'linux2'): + elif sys.platform in ("linux", "linux2"): lindirs = os.environ.get("XDG_DATA_DIRS", "") if not lindirs: # According to the freedesktop spec, XDG_DATA_DIRS should # default to /usr/share - lindirs = '/usr/share' - dirs += [os.path.join(lindir, "fonts") - for lindir in lindirs.split(":")] - elif sys.platform == 'darwin': - dirs += ['/Library/Fonts', '/System/Library/Fonts', - os.path.expanduser('~/Library/Fonts')] + lindirs = "/usr/share" + dirs += [os.path.join(lindir, "fonts") for lindir in lindirs.split(":")] + elif sys.platform == "darwin": + dirs += [ + "/Library/Fonts", + "/System/Library/Fonts", + os.path.expanduser("~/Library/Fonts"), + ] ext = os.path.splitext(ttf_filename)[1] first_font_with_a_different_extension = None @@ -307,21 +671,15 @@ def truetype(font=None, size=10, index=0, encoding="", for walkroot, walkdir, walkfilenames in os.walk(directory): for walkfilename in walkfilenames: if ext and walkfilename == ttf_filename: + return freetype(os.path.join(walkroot, walkfilename)) + elif not ext and os.path.splitext(walkfilename)[0] == ttf_filename: fontpath = os.path.join(walkroot, walkfilename) - return FreeTypeFont(fontpath, size, index, - encoding, layout_engine) - elif (not ext and - os.path.splitext(walkfilename)[0] == ttf_filename): - fontpath = os.path.join(walkroot, walkfilename) - if os.path.splitext(fontpath)[1] == '.ttf': - return FreeTypeFont(fontpath, size, index, - encoding, layout_engine) - if not ext \ - and first_font_with_a_different_extension is None: + if os.path.splitext(fontpath)[1] == ".ttf": + return freetype(fontpath) + if not ext and first_font_with_a_different_extension is None: first_font_with_a_different_extension = fontpath if first_font_with_a_different_extension: - return FreeTypeFont(first_font_with_a_different_extension, size, - index, encoding, layout_engine) + return freetype(first_font_with_a_different_extension) raise @@ -357,10 +715,13 @@ def load_default(): """ from io import BytesIO import base64 + f = ImageFont() f._load_pilfont_data( # courB08 - BytesIO(base64.b64decode(b''' + BytesIO( + base64.b64decode( + b""" UElMZm9udAo7Ozs7OzsxMDsKREFUQQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA @@ -452,7 +813,13 @@ AJsAEQAGAAAAAP/6AAX//wCbAAoAoAAPAAYAAAAA//oABQABAKAACgClABEABgAA////+AAGAAAA pQAKAKwAEgAGAAD////4AAYAAACsAAoAswASAAYAAP////gABgAAALMACgC6ABIABgAA////+QAG AAAAugAKAMEAEQAGAAD////4AAYAAgDBAAoAyAAUAAYAAP////kABQACAMgACgDOABMABgAA//// +QAGAAIAzgAKANUAEw== -''')), Image.open(BytesIO(base64.b64decode(b''' +""" + ) + ), + Image.open( + BytesIO( + base64.b64decode( + b""" iVBORw0KGgoAAAANSUhEUgAAAx4AAAAUAQAAAAArMtZoAAAEwElEQVR4nABlAJr/AHVE4czCI/4u Mc4b7vuds/xzjz5/3/7u/n9vMe7vnfH/9++vPn/xyf5zhxzjt8GHw8+2d83u8x27199/nxuQ6Od9 M43/5z2I+9n9ZtmDBwMQECDRQw/eQIQohJXxpBCNVE6QCCAAAAD//wBlAJr/AgALyj1t/wINwq0g @@ -476,5 +843,9 @@ evta/58PTEWzr21hufPjA8N+qlnBwAAAAAD//2JiWLci5v1+HmFXDqcnULE/MxgYGBj+f6CaJQAA AAD//2Ji2FrkY3iYpYC5qDeGgeEMAwPDvwQBBoYvcTwOVLMEAAAA//9isDBgkP///0EOg9z35v// Gc/eeW7BwPj5+QGZhANUswMAAAD//2JgqGBgYGBgqEMXlvhMPUsAAAAA//8iYDd1AAAAAP//AwDR w7IkEbzhVQAAAABJRU5ErkJggg== -''')))) +""" + ) + ) + ), + ) return f diff --git a/server/www/packages/packages-linux/x64/PIL/ImageGrab.py b/server/www/packages/packages-linux/x64/PIL/ImageGrab.py index 712b02c..9b44135 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageGrab.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageGrab.py @@ -15,11 +15,9 @@ # See the README file for information on usage and redistribution. # -from . import Image - import sys -if sys.platform not in ["win32", "darwin"]: - raise ImportError("ImageGrab is macOS and Windows only") + +from . import Image if sys.platform == "win32": grabber = Image.core.grabscreen @@ -27,38 +25,51 @@ elif sys.platform == "darwin": import os import tempfile import subprocess +else: + raise ImportError("ImageGrab is macOS and Windows only") -def grab(bbox=None): +def grab(bbox=None, include_layered_windows=False, all_screens=False): if sys.platform == "darwin": - fh, filepath = tempfile.mkstemp('.png') + fh, filepath = tempfile.mkstemp(".png") os.close(fh) - subprocess.call(['screencapture', '-x', filepath]) + subprocess.call(["screencapture", "-x", filepath]) im = Image.open(filepath) im.load() os.unlink(filepath) + if bbox: + im = im.crop(bbox) else: - size, data = grabber() + offset, size, data = grabber(include_layered_windows, all_screens) im = Image.frombytes( - "RGB", size, data, + "RGB", + size, + data, # RGB, 32-bit line padding, origin lower left corner - "raw", "BGR", (size[0]*3 + 3) & -4, -1 - ) - if bbox: - im = im.crop(bbox) + "raw", + "BGR", + (size[0] * 3 + 3) & -4, + -1, + ) + if bbox: + x0, y0 = offset + left, top, right, bottom = bbox + im = im.crop((left - x0, top - y0, right - x0, bottom - y0)) return im def grabclipboard(): if sys.platform == "darwin": - fh, filepath = tempfile.mkstemp('.jpg') + fh, filepath = tempfile.mkstemp(".jpg") os.close(fh) commands = [ - "set theFile to (open for access POSIX file \""+filepath+"\" with write permission)", + 'set theFile to (open for access POSIX file "' + + filepath + + '" with write permission)', "try", - "write (the clipboard as JPEG picture) to theFile", + " write (the clipboard as JPEG picture) to theFile", "end try", - "close access theFile" + "close access theFile", ] script = ["osascript"] for command in commands: @@ -76,5 +87,6 @@ def grabclipboard(): if isinstance(data, bytes): from . import BmpImagePlugin import io + return BmpImagePlugin.DibImageFile(io.BytesIO(data)) return data diff --git a/server/www/packages/packages-linux/x64/PIL/ImageMath.py b/server/www/packages/packages-linux/x64/PIL/ImageMath.py index d985877..392151c 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageMath.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageMath.py @@ -22,13 +22,14 @@ try: import builtins except ImportError: import __builtin__ + builtins = __builtin__ VERBOSE = 0 def _isconstant(v): - return isinstance(v, int) or isinstance(v, float) + return isinstance(v, (int, float)) class _Operand(object): @@ -61,7 +62,7 @@ class _Operand(object): out = Image.new(mode or im1.mode, im1.size, None) im1.load() try: - op = getattr(_imagingmath, op+"_"+im1.mode) + op = getattr(_imagingmath, op + "_" + im1.mode) except AttributeError: raise TypeError("bad operand type for '%s'" % op) _imagingmath.unop(op, out.im.id, im1.im.id) @@ -78,8 +79,7 @@ class _Operand(object): raise ValueError("mode mismatch") if im1.size != im2.size: # crop both arguments to a common size - size = (min(im1.size[0], im2.size[0]), - min(im1.size[1], im2.size[1])) + size = (min(im1.size[0], im2.size[0]), min(im1.size[1], im2.size[1])) if im1.size != size: im1 = im1.crop((0, 0) + size) if im2.size != size: @@ -90,7 +90,7 @@ class _Operand(object): im1.load() im2.load() try: - op = getattr(_imagingmath, op+"_"+im1.mode) + op = getattr(_imagingmath, op + "_" + im1.mode) except AttributeError: raise TypeError("bad operand type for '%s'" % op) _imagingmath.binop(op, out.im.id, im1.im.id, im2.im.id) diff --git a/server/www/packages/packages-linux/x64/PIL/ImageMode.py b/server/www/packages/packages-linux/x64/PIL/ImageMode.py index 2b3377a..596be7b 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageMode.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageMode.py @@ -37,20 +37,28 @@ def getmode(mode): # initialize mode cache from . import Image + modes = {} # core modes for m, (basemode, basetype, bands) in Image._MODEINFO.items(): modes[m] = ModeDescriptor(m, bands, basemode, basetype) # extra experimental modes - modes["RGBa"] = ModeDescriptor("RGBa", - ("R", "G", "B", "a"), "RGB", "L") + modes["RGBa"] = ModeDescriptor("RGBa", ("R", "G", "B", "a"), "RGB", "L") modes["LA"] = ModeDescriptor("LA", ("L", "A"), "L", "L") modes["La"] = ModeDescriptor("La", ("L", "a"), "L", "L") modes["PA"] = ModeDescriptor("PA", ("P", "A"), "RGB", "L") # mapping modes - modes["I;16"] = ModeDescriptor("I;16", "I", "L", "L") - modes["I;16L"] = ModeDescriptor("I;16L", "I", "L", "L") - modes["I;16B"] = ModeDescriptor("I;16B", "I", "L", "L") + for i16mode in ( + "I;16", + "I;16S", + "I;16L", + "I;16LS", + "I;16B", + "I;16BS", + "I;16N", + "I;16NS", + ): + modes[i16mode] = ModeDescriptor(i16mode, ("I",), "L", "L") # set global mode cache atomically _modes = modes return _modes[mode] diff --git a/server/www/packages/packages-linux/x64/PIL/ImageMorph.py b/server/www/packages/packages-linux/x64/PIL/ImageMorph.py index 54ceb79..6119923 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageMorph.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageMorph.py @@ -7,11 +7,25 @@ from __future__ import print_function -from . import Image, _imagingmorph import re +from . import Image, _imagingmorph + LUT_SIZE = 1 << 9 +# fmt: off +ROTATION_MATRIX = [ + 6, 3, 0, + 7, 4, 1, + 8, 5, 2, +] +MIRROR_MATRIX = [ + 2, 1, 0, + 5, 4, 3, + 8, 7, 6, +] +# fmt: on + class LutBuilder(object): """A class for building a MorphLut from a descriptive language @@ -48,6 +62,7 @@ class LutBuilder(object): lut = lb.build_lut() """ + def __init__(self, patterns=None, op_name=None): if patterns is not None: self.patterns = patterns @@ -56,20 +71,19 @@ class LutBuilder(object): self.lut = None if op_name is not None: known_patterns = { - 'corner': ['1:(... ... ...)->0', - '4:(00. 01. ...)->1'], - 'dilation4': ['4:(... .0. .1.)->1'], - 'dilation8': ['4:(... .0. .1.)->1', - '4:(... .0. ..1)->1'], - 'erosion4': ['4:(... .1. .0.)->0'], - 'erosion8': ['4:(... .1. .0.)->0', - '4:(... .1. ..0)->0'], - 'edge': ['1:(... ... ...)->0', - '4:(.0. .1. ...)->1', - '4:(01. .1. ...)->1'] + "corner": ["1:(... ... ...)->0", "4:(00. 01. ...)->1"], + "dilation4": ["4:(... .0. .1.)->1"], + "dilation8": ["4:(... .0. .1.)->1", "4:(... .0. ..1)->1"], + "erosion4": ["4:(... .1. .0.)->0"], + "erosion8": ["4:(... .1. .0.)->0", "4:(... .1. ..0)->0"], + "edge": [ + "1:(... ... ...)->0", + "4:(.0. .1. ...)->1", + "4:(01. .1. ...)->1", + ], } if op_name not in known_patterns: - raise Exception('Unknown pattern '+op_name+'!') + raise Exception("Unknown pattern " + op_name + "!") self.patterns = known_patterns[op_name] @@ -88,8 +102,8 @@ class LutBuilder(object): """string_permute takes a pattern and a permutation and returns the string permuted according to the permutation list. """ - assert(len(permutation) == 9) - return ''.join(pattern[p] for p in permutation) + assert len(permutation) == 9 + return "".join(pattern[p] for p in permutation) def _pattern_permute(self, basic_pattern, options, basic_result): """pattern_permute takes a basic pattern and its result and clones @@ -98,32 +112,25 @@ class LutBuilder(object): patterns = [(basic_pattern, basic_result)] # rotations - if '4' in options: + if "4" in options: res = patterns[-1][1] for i in range(4): patterns.append( - (self._string_permute(patterns[-1][0], [6, 3, 0, - 7, 4, 1, - 8, 5, 2]), res)) + (self._string_permute(patterns[-1][0], ROTATION_MATRIX), res) + ) # mirror - if 'M' in options: + if "M" in options: n = len(patterns) for pattern, res in patterns[0:n]: - patterns.append( - (self._string_permute(pattern, [2, 1, 0, - 5, 4, 3, - 8, 7, 6]), res)) + patterns.append((self._string_permute(pattern, MIRROR_MATRIX), res)) # negate - if 'N' in options: + if "N" in options: n = len(patterns) for pattern, res in patterns[0:n]: # Swap 0 and 1 - pattern = (pattern - .replace('0', 'Z') - .replace('1', '0') - .replace('Z', '1')) - res = 1-int(res) + pattern = pattern.replace("0", "Z").replace("1", "0").replace("Z", "1") + res = 1 - int(res) patterns.append((pattern, res)) return patterns @@ -138,22 +145,21 @@ class LutBuilder(object): # Parse and create symmetries of the patterns strings for p in self.patterns: - m = re.search( - r'(\w*):?\s*\((.+?)\)\s*->\s*(\d)', p.replace('\n', '')) + m = re.search(r"(\w*):?\s*\((.+?)\)\s*->\s*(\d)", p.replace("\n", "")) if not m: - raise Exception('Syntax error in pattern "'+p+'"') + raise Exception('Syntax error in pattern "' + p + '"') options = m.group(1) pattern = m.group(2) result = int(m.group(3)) # Get rid of spaces - pattern = pattern.replace(' ', '').replace('\n', '') + pattern = pattern.replace(" ", "").replace("\n", "") patterns += self._pattern_permute(pattern, options, result) # compile the patterns into regular expressions for speed for i, pattern in enumerate(patterns): - p = pattern[0].replace('.', 'X').replace('X', '[01]') + p = pattern[0].replace(".", "X").replace("X", "[01]") p = re.compile(p) patterns[i] = (p, pattern[1]) @@ -163,7 +169,7 @@ class LutBuilder(object): for i in range(LUT_SIZE): # Build the bit pattern bitpattern = bin(i)[2:] - bitpattern = ('0'*(9-len(bitpattern)) + bitpattern)[::-1] + bitpattern = ("0" * (9 - len(bitpattern)) + bitpattern)[::-1] for p, r in patterns: if p.match(bitpattern): @@ -175,10 +181,7 @@ class LutBuilder(object): class MorphOp(object): """A class for binary morphological operators""" - def __init__(self, - lut=None, - op_name=None, - patterns=None): + def __init__(self, lut=None, op_name=None, patterns=None): """Create a binary morphological operator""" self.lut = lut if op_name is not None: @@ -192,13 +195,12 @@ class MorphOp(object): Returns a tuple of the number of changed pixels and the morphed image""" if self.lut is None: - raise Exception('No operator loaded') + raise Exception("No operator loaded") - if image.mode != 'L': - raise Exception('Image must be binary, meaning it must use mode L') + if image.mode != "L": + raise Exception("Image must be binary, meaning it must use mode L") outimage = Image.new(image.mode, image.size, None) - count = _imagingmorph.apply( - bytes(self.lut), image.im.id, outimage.im.id) + count = _imagingmorph.apply(bytes(self.lut), image.im.id, outimage.im.id) return count, outimage def match(self, image): @@ -208,10 +210,10 @@ class MorphOp(object): Returns a list of tuples of (x,y) coordinates of all matching pixels. See :ref:`coordinate-system`.""" if self.lut is None: - raise Exception('No operator loaded') + raise Exception("No operator loaded") - if image.mode != 'L': - raise Exception('Image must be binary, meaning it must use mode L') + if image.mode != "L": + raise Exception("Image must be binary, meaning it must use mode L") return _imagingmorph.match(bytes(self.lut), image.im.id) def get_on_pixels(self, image): @@ -220,24 +222,24 @@ class MorphOp(object): Returns a list of tuples of (x,y) coordinates of all matching pixels. See :ref:`coordinate-system`.""" - if image.mode != 'L': - raise Exception('Image must be binary, meaning it must use mode L') + if image.mode != "L": + raise Exception("Image must be binary, meaning it must use mode L") return _imagingmorph.get_on_pixels(image.im.id) def load_lut(self, filename): """Load an operator from an mrl file""" - with open(filename, 'rb') as f: + with open(filename, "rb") as f: self.lut = bytearray(f.read()) if len(self.lut) != LUT_SIZE: self.lut = None - raise Exception('Wrong size operator file!') + raise Exception("Wrong size operator file!") def save_lut(self, filename): """Save an operator to an mrl file""" if self.lut is None: - raise Exception('No operator loaded') - with open(filename, 'wb') as f: + raise Exception("No operator loaded") + with open(filename, "wb") as f: f.write(self.lut) def set_lut(self, lut): diff --git a/server/www/packages/packages-linux/x64/PIL/ImageOps.py b/server/www/packages/packages-linux/x64/PIL/ImageOps.py index 9f516ba..5052cb7 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageOps.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageOps.py @@ -17,16 +17,16 @@ # See the README file for information on usage and redistribution. # +import functools +import operator + from . import Image from ._util import isStringType -import operator -import functools -import warnings - # # helpers + def _border(border): if isinstance(border, tuple): if len(border) == 2: @@ -41,6 +41,7 @@ def _border(border): def _color(color, mode): if isStringType(color): from . import ImageColor + color = ImageColor.getcolor(color, mode) return color @@ -56,6 +57,7 @@ def _lut(image, lut): else: raise IOError("not supported for this image mode") + # # actions @@ -76,7 +78,7 @@ def autocontrast(image, cutoff=0, ignore=None): histogram = image.histogram() lut = [] for layer in range(0, len(histogram), 256): - h = histogram[layer:layer+256] + h = histogram[layer : layer + 256] if ignore is not None: # get rid of outliers try: @@ -136,8 +138,7 @@ def autocontrast(image, cutoff=0, ignore=None): return _lut(image, lut) -def colorize(image, black, white, mid=None, blackpoint=0, - whitepoint=255, midpoint=127): +def colorize(image, black, white, mid=None, blackpoint=0, whitepoint=255, midpoint=127): """ Colorize grayscale image. This function calculates a color wedge which maps all black pixels in @@ -277,9 +278,7 @@ def crop(image, border=0): :return: An image. """ left, top, right, bottom = _border(border) - return image.crop( - (left, top, image.size[0]-right, image.size[1]-bottom) - ) + return image.crop((left, top, image.size[0] - right, image.size[1] - bottom)) def scale(image, factor, resample=Image.NEAREST): @@ -299,8 +298,7 @@ def scale(image, factor, resample=Image.NEAREST): elif factor <= 0: raise ValueError("the factor must be greater than 0") else: - size = (int(round(factor * image.width)), - int(round(factor * image.height))) + size = (int(round(factor * image.width)), int(round(factor * image.height))) return image.resize(size, resample) @@ -315,9 +313,7 @@ def deform(image, deformer, resample=Image.BILINEAR): in the PIL.Image.transform function. :return: An image. """ - return image.transform( - image.size, Image.MESH, deformer.getmesh(image), resample - ) + return image.transform(image.size, Image.MESH, deformer.getmesh(image), resample) def equalize(image, mask=None): @@ -336,7 +332,7 @@ def equalize(image, mask=None): h = image.histogram(mask) lut = [] for b in range(0, len(h), 256): - histo = [_f for _f in h[b:b+256] if _f] + histo = [_f for _f in h[b : b + 256] if _f] if len(histo) <= 1: lut.extend(list(range(256))) else: @@ -347,7 +343,7 @@ def equalize(image, mask=None): n = step // 2 for i in range(256): lut.append(n // step) - n = n + h[i+b] + n = n + h[i + b] return _lut(image, lut) @@ -380,9 +376,10 @@ def fit(image, size, method=Image.NEAREST, bleed=0.0, centering=(0.5, 0.5)): (width, height) tuple. :param method: What resampling method to use. Default is :py:attr:`PIL.Image.NEAREST`. - :param bleed: Remove a border around the outside of the image (from all + :param bleed: Remove a border around the outside of the image from all four edges. The value is a decimal percentage (use 0.01 for one percent). The default value is 0 (no border). + Cannot be greater than or equal to 0.5. :param centering: Control the cropping position. Use (0.5, 0.5) for center cropping (e.g. if cropping the width, take 50% off of the left side, and therefore 50% off the right side). @@ -400,66 +397,56 @@ def fit(image, size, method=Image.NEAREST, bleed=0.0, centering=(0.5, 0.5)): # kevin@cazabon.com # http://www.cazabon.com - # ensure inputs are valid - if not isinstance(centering, list): - centering = [centering[0], centering[1]] + # ensure centering is mutable + centering = list(centering) - if centering[0] > 1.0 or centering[0] < 0.0: - centering[0] = 0.50 - if centering[1] > 1.0 or centering[1] < 0.0: - centering[1] = 0.50 + if not 0.0 <= centering[0] <= 1.0: + centering[0] = 0.5 + if not 0.0 <= centering[1] <= 1.0: + centering[1] = 0.5 - if bleed > 0.49999 or bleed < 0.0: + if not 0.0 <= bleed < 0.5: bleed = 0.0 # calculate the area to use for resizing and cropping, subtracting # the 'bleed' around the edges # number of pixels to trim off on Top and Bottom, Left and Right - bleedPixels = ( - int((float(bleed) * float(image.size[0])) + 0.5), - int((float(bleed) * float(image.size[1])) + 0.5) - ) + bleed_pixels = (bleed * image.size[0], bleed * image.size[1]) - liveArea = (0, 0, image.size[0], image.size[1]) - if bleed > 0.0: - liveArea = ( - bleedPixels[0], bleedPixels[1], image.size[0] - bleedPixels[0] - 1, - image.size[1] - bleedPixels[1] - 1 - ) + live_size = ( + image.size[0] - bleed_pixels[0] * 2, + image.size[1] - bleed_pixels[1] * 2, + ) - liveSize = (liveArea[2] - liveArea[0], liveArea[3] - liveArea[1]) - - # calculate the aspect ratio of the liveArea - liveAreaAspectRatio = float(liveSize[0])/float(liveSize[1]) + # calculate the aspect ratio of the live_size + live_size_ratio = float(live_size[0]) / live_size[1] # calculate the aspect ratio of the output image - aspectRatio = float(size[0]) / float(size[1]) + output_ratio = float(size[0]) / size[1] # figure out if the sides or top/bottom will be cropped off - if liveAreaAspectRatio >= aspectRatio: - # liveArea is wider than what's needed, crop the sides - cropWidth = int((aspectRatio * float(liveSize[1])) + 0.5) - cropHeight = liveSize[1] + if live_size_ratio == output_ratio: + # live_size is already the needed ratio + crop_width = live_size[0] + crop_height = live_size[1] + elif live_size_ratio >= output_ratio: + # live_size is wider than what's needed, crop the sides + crop_width = output_ratio * live_size[1] + crop_height = live_size[1] else: - # liveArea is taller than what's needed, crop the top and bottom - cropWidth = liveSize[0] - cropHeight = int((float(liveSize[0])/aspectRatio) + 0.5) + # live_size is taller than what's needed, crop the top and bottom + crop_width = live_size[0] + crop_height = live_size[0] / output_ratio # make the crop - leftSide = int(liveArea[0] + (float(liveSize[0]-cropWidth) * centering[0])) - if leftSide < 0: - leftSide = 0 - topSide = int(liveArea[1] + (float(liveSize[1]-cropHeight) * centering[1])) - if topSide < 0: - topSide = 0 + crop_left = bleed_pixels[0] + (live_size[0] - crop_width) * centering[0] + crop_top = bleed_pixels[1] + (live_size[1] - crop_height) * centering[1] - out = image.crop( - (leftSide, topSide, leftSide + cropWidth, topSide + cropHeight) - ) + crop = (crop_left, crop_top, crop_left + crop_width, crop_top + crop_height) # resize the image and return it - return out.resize(size, method) + return image.resize(size, method, box=crop) def flip(image): @@ -491,7 +478,7 @@ def invert(image): """ lut = [] for i in range(256): - lut.append(255-i) + lut.append(255 - i) return _lut(image, lut) @@ -514,7 +501,7 @@ def posterize(image, bits): :return: An image. """ lut = [] - mask = ~(2**(8-bits)-1) + mask = ~(2 ** (8 - bits) - 1) for i in range(256): lut.append(i & mask) return _lut(image, lut) @@ -533,100 +520,32 @@ def solarize(image, threshold=128): if i < threshold: lut.append(i) else: - lut.append(255-i) + lut.append(255 - i) return _lut(image, lut) -# -------------------------------------------------------------------- -# PIL USM components, from Kevin Cazabon. - -def gaussian_blur(im, radius=None): - """ PIL_usm.gblur(im, [radius])""" - - warnings.warn( - 'PIL.ImageOps.gaussian_blur is deprecated. ' - 'Use PIL.ImageFilter.GaussianBlur instead. ' - 'This function will be removed in a future version.', - DeprecationWarning - ) - - if radius is None: - radius = 5.0 - - im.load() - - return im.im.gaussian_blur(radius) - - -def gblur(im, radius=None): - """ PIL_usm.gblur(im, [radius])""" - - warnings.warn( - 'PIL.ImageOps.gblur is deprecated. ' - 'Use PIL.ImageFilter.GaussianBlur instead. ' - 'This function will be removed in a future version.', - DeprecationWarning - ) - - return gaussian_blur(im, radius) - - -def unsharp_mask(im, radius=None, percent=None, threshold=None): - """ PIL_usm.usm(im, [radius, percent, threshold])""" - - warnings.warn( - 'PIL.ImageOps.unsharp_mask is deprecated. ' - 'Use PIL.ImageFilter.UnsharpMask instead. ' - 'This function will be removed in a future version.', - DeprecationWarning - ) - - if radius is None: - radius = 5.0 - if percent is None: - percent = 150 - if threshold is None: - threshold = 3 - - im.load() - - return im.im.unsharp_mask(radius, percent, threshold) - - -def usm(im, radius=None, percent=None, threshold=None): - """ PIL_usm.usm(im, [radius, percent, threshold])""" - - warnings.warn( - 'PIL.ImageOps.usm is deprecated. ' - 'Use PIL.ImageFilter.UnsharpMask instead. ' - 'This function will be removed in a future version.', - DeprecationWarning - ) - - return unsharp_mask(im, radius, percent, threshold) - - -def box_blur(image, radius): +def exif_transpose(image): """ - Blur the image by setting each pixel to the average value of the pixels - in a square box extending radius pixels in each direction. - Supports float radius of arbitrary size. Uses an optimized implementation - which runs in linear time relative to the size of the image - for any radius value. + If an image has an EXIF Orientation tag, return a new image that is + transposed accordingly. Otherwise, return a copy of the image. - :param image: The image to blur. - :param radius: Size of the box in one direction. Radius 0 does not blur, - returns an identical image. Radius 1 takes 1 pixel - in each direction, i.e. 9 pixels in total. + :param image: The image to transpose. :return: An image. """ - warnings.warn( - 'PIL.ImageOps.box_blur is deprecated. ' - 'Use PIL.ImageFilter.BoxBlur instead. ' - 'This function will be removed in a future version.', - DeprecationWarning - ) - - image.load() - - return image._new(image.im.box_blur(radius)) + exif = image.getexif() + orientation = exif.get(0x0112) + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90, + }.get(orientation) + if method is not None: + transposed_image = image.transpose(method) + del exif[0x0112] + transposed_image.info["exif"] = exif.tobytes() + return transposed_image + return image.copy() diff --git a/server/www/packages/packages-linux/x64/PIL/ImagePalette.py b/server/www/packages/packages-linux/x64/PIL/ImagePalette.py index 81e99ab..2d4f5cb 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImagePalette.py +++ b/server/www/packages/packages-linux/x64/PIL/ImagePalette.py @@ -17,7 +17,8 @@ # import array -from . import ImageColor, GimpPaletteFile, GimpGradientFile, PaletteFile + +from . import GimpGradientFile, GimpPaletteFile, ImageColor, PaletteFile class ImagePalette(object): @@ -38,11 +39,12 @@ class ImagePalette(object): def __init__(self, mode="RGB", palette=None, size=0): self.mode = mode self.rawmode = None # if set, palette contains raw data - self.palette = palette or bytearray(range(256))*len(self.mode) + self.palette = palette or bytearray(range(256)) * len(self.mode) self.colors = {} self.dirty = None - if ((size == 0 and len(self.mode)*256 != len(self.palette)) or - (size != 0 and size != len(self.palette))): + if (size == 0 and len(self.mode) * 256 != len(self.palette)) or ( + size != 0 and size != len(self.palette) + ): raise ValueError("wrong palette size") def copy(self): @@ -78,7 +80,7 @@ class ImagePalette(object): if isinstance(self.palette, bytes): return self.palette arr = array.array("B", self.palette) - if hasattr(arr, 'tobytes'): + if hasattr(arr, "tobytes"): return arr.tobytes() return arr.tostring() @@ -104,8 +106,8 @@ class ImagePalette(object): raise ValueError("cannot allocate more than 256 colors") self.colors[color] = index self.palette[index] = color[0] - self.palette[index+256] = color[1] - self.palette[index+512] = color[2] + self.palette[index + 256] = color[1] + self.palette[index + 512] = color[2] self.dirty = 1 return index else: @@ -124,7 +126,7 @@ class ImagePalette(object): fp.write("# Mode: %s\n" % self.mode) for i in range(256): fp.write("%d" % i) - for j in range(i*len(self.mode), (i+1)*len(self.mode)): + for j in range(i * len(self.mode), (i + 1) * len(self.mode)): try: fp.write(" %d" % self.palette[j]) except IndexError: @@ -136,6 +138,7 @@ class ImagePalette(object): # -------------------------------------------------------------------- # Internal + def raw(rawmode, data): palette = ImagePalette() palette.rawmode = rawmode @@ -147,11 +150,12 @@ def raw(rawmode, data): # -------------------------------------------------------------------- # Factories + def make_linear_lut(black, white): lut = [] if black == 0: for i in range(256): - lut.append(white*i//255) + lut.append(white * i // 255) else: raise NotImplementedError # FIXME return lut @@ -172,8 +176,9 @@ def negative(mode="RGB"): def random(mode="RGB"): from random import randint + palette = [] - for i in range(256*len(mode)): + for i in range(256 * len(mode)): palette.append(randint(0, 255)) return ImagePalette(mode, palette) @@ -199,7 +204,7 @@ def load(filename): for paletteHandler in [ GimpPaletteFile.GimpPaletteFile, GimpGradientFile.GimpGradientFile, - PaletteFile.PaletteFile + PaletteFile.PaletteFile, ]: try: fp.seek(0) diff --git a/server/www/packages/packages-linux/x64/PIL/ImagePath.py b/server/www/packages/packages-linux/x64/PIL/ImagePath.py index 8cbfec0..3d3538c 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImagePath.py +++ b/server/www/packages/packages-linux/x64/PIL/ImagePath.py @@ -16,5 +16,4 @@ from . import Image - Path = Image.core.path diff --git a/server/www/packages/packages-linux/x64/PIL/ImageQt.py b/server/www/packages/packages-linux/x64/PIL/ImageQt.py index e602613..2edb0a1 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageQt.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageQt.py @@ -16,34 +16,40 @@ # See the README file for information on usage and redistribution. # +import sys +import warnings +from io import BytesIO + from . import Image from ._util import isPath, py3 -from io import BytesIO -import sys -qt_versions = [ - ['5', 'PyQt5'], - ['side2', 'PySide2'], - ['4', 'PyQt4'], - ['side', 'PySide'] -] +qt_versions = [["5", "PyQt5"], ["side2", "PySide2"], ["4", "PyQt4"], ["side", "PySide"]] + +WARNING_TEXT = ( + "Support for EOL {} is deprecated and will be removed in a future version. " + "Please upgrade to PyQt5 or PySide2." +) + # If a version has already been imported, attempt it first -qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, - reverse=True) +qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True) for qt_version, qt_module in qt_versions: try: - if qt_module == 'PyQt5': + if qt_module == "PyQt5": from PyQt5.QtGui import QImage, qRgba, QPixmap from PyQt5.QtCore import QBuffer, QIODevice - elif qt_module == 'PySide2': + elif qt_module == "PySide2": from PySide2.QtGui import QImage, qRgba, QPixmap from PySide2.QtCore import QBuffer, QIODevice - elif qt_module == 'PyQt4': + elif qt_module == "PyQt4": from PyQt4.QtGui import QImage, qRgba, QPixmap from PyQt4.QtCore import QBuffer, QIODevice - elif qt_module == 'PySide': + + warnings.warn(WARNING_TEXT.format(qt_module), DeprecationWarning) + elif qt_module == "PySide": from PySide.QtGui import QImage, qRgba, QPixmap from PySide.QtCore import QBuffer, QIODevice + + warnings.warn(WARNING_TEXT.format(qt_module), DeprecationWarning) except (ImportError, RuntimeError): continue qt_is_installed = True @@ -57,7 +63,7 @@ def rgb(r, g, b, a=255): """(Internal) Turns an RGB color into a Qt compatible color integer.""" # use qRgb to pack the colors, and then turn the resulting long # into a negative integer with the same bitpattern. - return (qRgba(r, g, b, a) & 0xffffffff) + return qRgba(r, g, b, a) & 0xFFFFFFFF def fromqimage(im): @@ -67,12 +73,12 @@ def fromqimage(im): """ buffer = QBuffer() buffer.open(QIODevice.ReadWrite) - # preserve alha channel with png + # preserve alpha channel with png # otherwise ppm is more friendly with Image.open if im.hasAlphaChannel(): - im.save(buffer, 'png') + im.save(buffer, "png") else: - im.save(buffer, 'ppm') + im.save(buffer, "ppm") b = BytesIO() try: @@ -105,11 +111,7 @@ def align8to32(bytes, width, mode): converts each scanline of data from 8 bit to 32 bit aligned """ - bits_per_pixel = { - '1': 1, - 'L': 8, - 'P': 8, - }[mode] + bits_per_pixel = {"1": 1, "L": 8, "P": 8}[mode] # calculate bytes per line and the extra padding if needed bits_per_line = bits_per_pixel * width @@ -124,10 +126,12 @@ def align8to32(bytes, width, mode): new_data = [] for i in range(len(bytes) // bytes_per_line): - new_data.append(bytes[i*bytes_per_line:(i+1)*bytes_per_line] - + b'\x00' * extra_padding) + new_data.append( + bytes[i * bytes_per_line : (i + 1) * bytes_per_line] + + b"\x00" * extra_padding + ) - return b''.join(new_data) + return b"".join(new_data) def _toqclass_helper(im): @@ -140,7 +144,7 @@ def _toqclass_helper(im): if py3: im = str(im.toUtf8(), "utf-8") else: - im = unicode(im.toUtf8(), "utf-8") + im = unicode(im.toUtf8(), "utf-8") # noqa: F821 if isPath(im): im = Image.open(im) @@ -156,7 +160,7 @@ def _toqclass_helper(im): colortable = [] palette = im.getpalette() for i in range(0, len(palette), 3): - colortable.append(rgb(*palette[i:i+3])) + colortable.append(rgb(*palette[i : i + 3])) elif im.mode == "RGB": data = im.tobytes("raw", "BGRX") format = QImage.Format_RGB32 @@ -172,33 +176,35 @@ def _toqclass_helper(im): raise ValueError("unsupported image mode %r" % im.mode) __data = data or align8to32(im.tobytes(), im.size[0], im.mode) - return { - 'data': __data, 'im': im, 'format': format, 'colortable': colortable - } + return {"data": __data, "im": im, "format": format, "colortable": colortable} if qt_is_installed: - class ImageQt(QImage): + class ImageQt(QImage): def __init__(self, im): """ An PIL image wrapper for Qt. This is a subclass of PyQt's QImage class. - :param im: A PIL Image object, or a file name (given either as Python - string or a PyQt string object). + :param im: A PIL Image object, or a file name (given either as + Python string or a PyQt string object). """ im_data = _toqclass_helper(im) # must keep a reference, or Qt will crash! # All QImage constructors that take data operate on an existing # buffer, so this buffer has to hang on for the life of the image. # Fixes https://github.com/python-pillow/Pillow/issues/1370 - self.__data = im_data['data'] - QImage.__init__(self, - self.__data, im_data['im'].size[0], - im_data['im'].size[1], im_data['format']) - if im_data['colortable']: - self.setColorTable(im_data['colortable']) + self.__data = im_data["data"] + QImage.__init__( + self, + self.__data, + im_data["im"].size[0], + im_data["im"].size[1], + im_data["format"], + ) + if im_data["colortable"]: + self.setColorTable(im_data["colortable"]) def toqimage(im): @@ -211,8 +217,8 @@ def toqpixmap(im): # result = QPixmap(im_data['im'].size[0], im_data['im'].size[1]) # result.loadFromData(im_data['data']) # Fix some strange bug that causes - if im.mode == 'RGB': - im = im.convert('RGBA') + if im.mode == "RGB": + im = im.convert("RGBA") qimage = toqimage(im) return QPixmap.fromImage(qimage) diff --git a/server/www/packages/packages-linux/x64/PIL/ImageSequence.py b/server/www/packages/packages-linux/x64/PIL/ImageSequence.py index 1fc6e5d..f9be92d 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageSequence.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageSequence.py @@ -32,7 +32,7 @@ class Iterator(object): if not hasattr(im, "seek"): raise AttributeError("im must have seek method") self.im = im - self.position = 0 + self.position = getattr(self.im, "_min_frame", 0) def __getitem__(self, ix): try: @@ -54,3 +54,25 @@ class Iterator(object): def next(self): return self.__next__() + + +def all_frames(im, func=None): + """ + Applies a given function to all frames in an image or a list of images. + The frames are returned as a list of separate images. + + :param im: An image, or a list of images. + :param func: The function to apply to all of the image frames. + :returns: A list of images. + """ + if not isinstance(im, list): + im = [im] + + ims = [] + for imSequence in im: + current = imSequence.tell() + + ims += [im_frame.copy() for im_frame in Iterator(imSequence)] + + imSequence.seek(current) + return [func(im) for im in ims] if func else ims diff --git a/server/www/packages/packages-linux/x64/PIL/ImageShow.py b/server/www/packages/packages-linux/x64/PIL/ImageShow.py index b50d613..ca622c5 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageShow.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageShow.py @@ -14,9 +14,12 @@ from __future__ import print_function -from PIL import Image import os +import subprocess import sys +import tempfile + +from PIL import Image if sys.version_info.major >= 3: from shlex import quote @@ -61,16 +64,12 @@ class Viewer(object): def show(self, image, **options): # save temporary image to disk - if image.mode[:4] == "I;16": - # @PIL88 @PIL101 - # "I;16" isn't an 'official' mode, but we still want to - # provide a simple way to show 16-bit images. - base = "L" - # FIXME: auto-contrast if max() > 255? - else: + if not ( + image.mode in ("1", "RGBA") or (self.format == "PNG" and image.mode == "LA") + ): base = Image.getmodebase(image.mode) - if base != image.mode and image.mode != "1" and image.mode != "RGBA": - image = image.convert(base) + if image.mode != base: + image = image.convert(base) return self.show_image(image, **options) @@ -99,18 +98,22 @@ class Viewer(object): os.system(self.get_command(file, **options)) return 1 + # -------------------------------------------------------------------- if sys.platform == "win32": class WindowsViewer(Viewer): - format = "BMP" + format = "PNG" + options = {"compress_level": 1} def get_command(self, file, **options): - return ('start "Pillow" /WAIT "%s" ' - '&& ping -n 2 127.0.0.1 >NUL ' - '&& del /f "%s"' % (file, file)) + return ( + 'start "Pillow" /WAIT "%s" ' + "&& ping -n 2 127.0.0.1 >NUL " + '&& del /f "%s"' % (file, file) + ) register(WindowsViewer) @@ -118,16 +121,33 @@ elif sys.platform == "darwin": class MacViewer(Viewer): format = "PNG" - options = {'compress_level': 1} + options = {"compress_level": 1} def get_command(self, file, **options): # on darwin open returns immediately resulting in the temp # file removal while app is opening - command = "open -a /Applications/Preview.app" - command = "(%s %s; sleep 20; rm -f %s)&" % (command, quote(file), - quote(file)) + command = "open -a Preview.app" + command = "(%s %s; sleep 20; rm -f %s)&" % ( + command, + quote(file), + quote(file), + ) return command + def show_file(self, file, **options): + """Display given file""" + fd, path = tempfile.mkstemp() + with os.fdopen(fd, "w") as f: + f.write(file) + with open(path, "r") as f: + subprocess.Popen( + ["im=$(cat); open -a Preview.app $im; sleep 20; rm -f $im"], + shell=True, + stdin=f, + ) + os.remove(path) + return 1 + register(MacViewer) else: @@ -146,13 +166,23 @@ else: class UnixViewer(Viewer): format = "PNG" - options = {'compress_level': 1} + options = {"compress_level": 1} + + def get_command(self, file, **options): + command = self.get_command_ex(file, **options)[0] + return "(%s %s; rm -f %s)&" % (command, quote(file), quote(file)) def show_file(self, file, **options): - command, executable = self.get_command_ex(file, **options) - command = "(%s %s; rm -f %s)&" % (command, quote(file), - quote(file)) - os.system(command) + """Display given file""" + fd, path = tempfile.mkstemp() + with os.fdopen(fd, "w") as f: + f.write(file) + with open(path, "r") as f: + command = self.get_command_ex(file, **options)[0] + subprocess.Popen( + ["im=$(cat);" + command + " $im; rm -f $im"], shell=True, stdin=f + ) + os.remove(path) return 1 # implementations diff --git a/server/www/packages/packages-linux/x64/PIL/ImageStat.py b/server/www/packages/packages-linux/x64/PIL/ImageStat.py index d4b38d8..9ba16fd 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageStat.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageStat.py @@ -21,13 +21,12 @@ # See the README file for information on usage and redistribution. # +import functools import math import operator -import functools class Stat(object): - def __init__(self, image_or_list, mask=None): try: if mask: @@ -41,7 +40,7 @@ class Stat(object): self.bands = list(range(len(self.h) // 256)) def __getattr__(self, id): - "Calculate missing attribute" + """Calculate missing attribute""" if id[:4] == "_get": raise AttributeError(id) # calculate missing attribute @@ -50,7 +49,7 @@ class Stat(object): return v def _getextrema(self): - "Get min/max values for each band in the image" + """Get min/max values for each band in the image""" def minmax(histogram): n = 255 @@ -67,15 +66,15 @@ class Stat(object): return v def _getcount(self): - "Get total number of pixels in each layer" + """Get total number of pixels in each layer""" v = [] for i in range(0, len(self.h), 256): - v.append(functools.reduce(operator.add, self.h[i:i+256])) + v.append(functools.reduce(operator.add, self.h[i : i + 256])) return v def _getsum(self): - "Get sum of all pixels in each layer" + """Get sum of all pixels in each layer""" v = [] for i in range(0, len(self.h), 256): @@ -86,7 +85,7 @@ class Stat(object): return v def _getsum2(self): - "Get squared sum of all pixels in each layer" + """Get squared sum of all pixels in each layer""" v = [] for i in range(0, len(self.h), 256): @@ -97,7 +96,7 @@ class Stat(object): return v def _getmean(self): - "Get average pixel level for each layer" + """Get average pixel level for each layer""" v = [] for i in self.bands: @@ -105,22 +104,22 @@ class Stat(object): return v def _getmedian(self): - "Get median pixel level for each layer" + """Get median pixel level for each layer""" v = [] for i in self.bands: s = 0 - half = self.count[i]//2 + half = self.count[i] // 2 b = i * 256 for j in range(256): - s = s + self.h[b+j] + s = s + self.h[b + j] if s > half: break v.append(j) return v def _getrms(self): - "Get RMS for each layer" + """Get RMS for each layer""" v = [] for i in self.bands: @@ -128,16 +127,16 @@ class Stat(object): return v def _getvar(self): - "Get variance for each layer" + """Get variance for each layer""" v = [] for i in self.bands: n = self.count[i] - v.append((self.sum2[i]-(self.sum[i]**2.0)/n)/n) + v.append((self.sum2[i] - (self.sum[i] ** 2.0) / n) / n) return v def _getstddev(self): - "Get standard deviation for each layer" + """Get standard deviation for each layer""" v = [] for i in self.bands: diff --git a/server/www/packages/packages-linux/x64/PIL/ImageTk.py b/server/www/packages/packages-linux/x64/PIL/ImageTk.py index c56f556..fd48000 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageTk.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageTk.py @@ -26,15 +26,15 @@ # import sys +from io import BytesIO + +from . import Image if sys.version_info.major > 2: import tkinter else: import Tkinter as tkinter -from . import Image -from io import BytesIO - # -------------------------------------------------------------------- # Check for Tkinter interface hooks @@ -67,6 +67,7 @@ def _get_image_from_kw(kw): # -------------------------------------------------------------------- # PhotoImage + class PhotoImage(object): """ A Tkinter-compatible photo image. This can be used @@ -124,7 +125,7 @@ class PhotoImage(object): self.__photo.name = None try: self.__photo.tk.call("image", "delete", name) - except: + except Exception: pass # ignore internal errors def __str__(self): @@ -183,17 +184,18 @@ class PhotoImage(object): # activate Tkinter hook try: from . import _imagingtk + try: - if hasattr(tk, 'interp'): + if hasattr(tk, "interp"): # Required for PyPy, which always has CFFI installed from cffi import FFI + ffi = FFI() # PyPy is using an FFI CDATA element # (Pdb) self.tk.interp # - _imagingtk.tkinit( - int(ffi.cast("uintptr_t", tk.interp)), 1) + _imagingtk.tkinit(int(ffi.cast("uintptr_t", tk.interp)), 1) else: _imagingtk.tkinit(tk.interpaddr(), 1) except AttributeError: @@ -202,6 +204,7 @@ class PhotoImage(object): except (ImportError, AttributeError, tkinter.TclError): raise # configuration problem; cannot attach to Tkinter + # -------------------------------------------------------------------- # BitmapImage @@ -244,7 +247,7 @@ class BitmapImage(object): self.__photo.name = None try: self.__photo.tk.call("image", "delete", name) - except: + except Exception: pass # ignore internal errors def width(self): @@ -275,10 +278,13 @@ class BitmapImage(object): def getimage(photo): - """ This function is unimplemented """ - """Copies the contents of a PhotoImage to a PIL image memory.""" - photo.tk.call("PyImagingPhotoGet", photo) + im = Image.new("RGBA", (photo.width(), photo.height())) + block = im.im + + photo.tk.call("PyImagingPhotoGet", photo, block.id) + + return im def _show(image, title): @@ -290,8 +296,7 @@ def _show(image, title): self.image = BitmapImage(im, foreground="white", master=master) else: self.image = PhotoImage(im, master=master) - tkinter.Label.__init__(self, master, image=self.image, - bg="black", bd=0) + tkinter.Label.__init__(self, master, image=self.image, bg="black", bd=0) if not tkinter._default_root: raise IOError("tkinter not initialized") diff --git a/server/www/packages/packages-linux/x64/PIL/ImageTransform.py b/server/www/packages/packages-linux/x64/PIL/ImageTransform.py index c3f6af8..77791ab 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageTransform.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageTransform.py @@ -46,6 +46,7 @@ class AffineTransform(Transform): :param matrix: A 6-tuple (a, b, c, d, e, f) containing the first two rows from an affine transform matrix. """ + method = Image.AFFINE @@ -67,6 +68,7 @@ class ExtentTransform(Transform): :param bbox: A 4-tuple (x0, y0, x1, y1) which specifies two points in the input image's coordinate system. See :ref:`coordinate-system`. """ + method = Image.EXTENT @@ -83,6 +85,7 @@ class QuadTransform(Transform): upper left, lower left, lower right, and upper right corner of the source quadrilateral. """ + method = Image.QUAD @@ -95,4 +98,5 @@ class MeshTransform(Transform): :param data: A list of (bbox, quad) tuples. """ + method = Image.MESH diff --git a/server/www/packages/packages-linux/x64/PIL/ImageWin.py b/server/www/packages/packages-linux/x64/PIL/ImageWin.py index 9b86270..ed2c18e 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImageWin.py +++ b/server/www/packages/packages-linux/x64/PIL/ImageWin.py @@ -26,6 +26,7 @@ class HDC(object): :py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose` methods. """ + def __init__(self, dc): self.dc = dc @@ -39,6 +40,7 @@ class HWND(object): :py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose` methods, instead of a DC. """ + def __init__(self, wnd): self.wnd = wnd @@ -190,7 +192,7 @@ class Window(object): def __init__(self, title="PIL", width=None, height=None): self.hwnd = Image.core.createwindow( title, self.__dispatcher, width or 0, height or 0 - ) + ) def __dispatcher(self, action, *args): return getattr(self, "ui_handle_" + action)(*args) diff --git a/server/www/packages/packages-linux/x64/PIL/ImtImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/ImtImagePlugin.py index 5a6623c..a9e991f 100644 --- a/server/www/packages/packages-linux/x64/PIL/ImtImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/ImtImagePlugin.py @@ -19,6 +19,8 @@ import re from . import Image, ImageFile +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.2" @@ -31,6 +33,7 @@ field = re.compile(br"([a-z]*) ([^ \r\n]*)") ## # Image plugin for IM Tools images. + class ImtImageFile(ImageFile.ImageFile): format = "IMT" @@ -53,12 +56,12 @@ class ImtImageFile(ImageFile.ImageFile): if not s: break - if s == b'\x0C': + if s == b"\x0C": # image data begins - self.tile = [("raw", (0, 0)+self.size, - self.fp.tell(), - (self.mode, 0, 1))] + self.tile = [ + ("raw", (0, 0) + self.size, self.fp.tell(), (self.mode, 0, 1)) + ] break diff --git a/server/www/packages/packages-linux/x64/PIL/IptcImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/IptcImagePlugin.py index b63e1ab..aedf2e4 100644 --- a/server/www/packages/packages-linux/x64/PIL/IptcImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/IptcImagePlugin.py @@ -17,17 +17,17 @@ from __future__ import print_function -from . import Image, ImageFile -from ._binary import i8, i16be as i16, i32be as i32, o8 import os import tempfile +from . import Image, ImageFile +from ._binary import i8, i16be as i16, i32be as i32, o8 + +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.3" -COMPRESSION = { - 1: "raw", - 5: "jpeg" -} +COMPRESSION = {1: "raw", 5: "jpeg"} PAD = o8(0) * 4 @@ -35,13 +35,14 @@ PAD = o8(0) * 4 # # Helpers + def i(c): return i32((PAD + c)[-4:]) def dump(c): for i in c: - print("%02x" % i8(i), end=' ') + print("%02x" % i8(i), end=" ") print() @@ -49,6 +50,7 @@ def dump(c): # Image plugin for IPTC/NAA datastreams. To read IPTC/NAA fields # from TIFF and JPEG files, use the getiptcinfo function. + class IptcImageFile(ImageFile.ImageFile): format = "IPTC" @@ -77,7 +79,7 @@ class IptcImageFile(ImageFile.ImageFile): elif size == 128: size = 0 elif size > 128: - size = i(self.fp.read(size-128)) + size = i(self.fp.read(size - 128)) else: size = i16(s[3:]) @@ -107,7 +109,7 @@ class IptcImageFile(ImageFile.ImageFile): layers = i8(self.info[(3, 60)][0]) component = i8(self.info[(3, 60)][1]) if (3, 65) in self.info: - id = i8(self.info[(3, 65)][0])-1 + id = i8(self.info[(3, 65)][0]) - 1 else: id = 0 if layers == 1 and not component: @@ -128,8 +130,9 @@ class IptcImageFile(ImageFile.ImageFile): # tile if tag == (8, 10): - self.tile = [("iptc", (compression, offset), - (0, 0, self.size[0], self.size[1]))] + self.tile = [ + ("iptc", (compression, offset), (0, 0, self.size[0], self.size[1])) + ] def load(self): @@ -196,35 +199,9 @@ def getiptcinfo(im): elif isinstance(im, JpegImagePlugin.JpegImageFile): # extract the IPTC/NAA resource - try: - app = im.app["APP13"] - if app[:14] == b"Photoshop 3.0\x00": - app = app[14:] - # parse the image resource block - offset = 0 - while app[offset:offset+4] == b"8BIM": - offset += 4 - # resource code - code = i16(app, offset) - offset += 2 - # resource name (usually empty) - name_len = i8(app[offset]) - # name = app[offset+1:offset+1+name_len] - offset = 1 + offset + name_len - if offset & 1: - offset += 1 - # resource data block - size = i32(app, offset) - offset += 4 - if code == 0x0404: - # 0x0404 contains IPTC/NAA data - data = app[offset:offset+size] - break - offset = offset + size - if offset & 1: - offset += 1 - except (AttributeError, KeyError): - pass + photoshop = im.info.get("photoshop") + if photoshop: + data = photoshop.get(0x0404) elif isinstance(im, TiffImagePlugin.TiffImageFile): # get raw data from the IPTC/NAA tag (PhotoShop tags the data @@ -240,6 +217,7 @@ def getiptcinfo(im): # create an IptcImagePlugin object without initializing it class FakeImage(object): pass + im = FakeImage() im.__class__ = IptcImageFile diff --git a/server/www/packages/packages-linux/x64/PIL/Jpeg2KImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/Jpeg2KImagePlugin.py index 7659b6b..37f1117 100644 --- a/server/www/packages/packages-linux/x64/PIL/Jpeg2KImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/Jpeg2KImagePlugin.py @@ -12,11 +12,14 @@ # # See the README file for information on usage and redistribution. # -from . import Image, ImageFile -import struct -import os import io +import os +import struct +from . import Image, ImageFile + +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.1" @@ -25,30 +28,29 @@ def _parse_codestream(fp): count from the SIZ marker segment, returning a PIL (size, mode) tuple.""" hdr = fp.read(2) - lsiz = struct.unpack('>H', hdr)[0] + lsiz = struct.unpack(">H", hdr)[0] siz = hdr + fp.read(lsiz - 2) - lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, xtsiz, ytsiz, \ - xtosiz, ytosiz, csiz \ - = struct.unpack_from('>HHIIIIIIIIH', siz) - ssiz = [None]*csiz - xrsiz = [None]*csiz - yrsiz = [None]*csiz + lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, _, _, _, _, csiz = struct.unpack_from( + ">HHIIIIIIIIH", siz + ) + ssiz = [None] * csiz + xrsiz = [None] * csiz + yrsiz = [None] * csiz for i in range(csiz): - ssiz[i], xrsiz[i], yrsiz[i] \ - = struct.unpack_from('>BBB', siz, 36 + 3 * i) + ssiz[i], xrsiz[i], yrsiz[i] = struct.unpack_from(">BBB", siz, 36 + 3 * i) size = (xsiz - xosiz, ysiz - yosiz) if csiz == 1: - if (yrsiz[0] & 0x7f) > 8: - mode = 'I;16' + if (yrsiz[0] & 0x7F) > 8: + mode = "I;16" else: - mode = 'L' + mode = "L" elif csiz == 2: - mode = 'LA' + mode = "LA" elif csiz == 3: - mode = 'RGB' + mode = "RGB" elif csiz == 4: - mode = 'RGBA' + mode = "RGBA" else: mode = None @@ -57,29 +59,34 @@ def _parse_codestream(fp): def _parse_jp2_header(fp): """Parse the JP2 header box to extract size, component count and - color space information, returning a PIL (size, mode) tuple.""" + color space information, returning a (size, mode, mimetype) tuple.""" # Find the JP2 header box header = None + mimetype = None while True: - lbox, tbox = struct.unpack('>I4s', fp.read(8)) + lbox, tbox = struct.unpack(">I4s", fp.read(8)) if lbox == 1: - lbox = struct.unpack('>Q', fp.read(8))[0] + lbox = struct.unpack(">Q", fp.read(8))[0] hlen = 16 else: hlen = 8 if lbox < hlen: - raise SyntaxError('Invalid JP2 header length') + raise SyntaxError("Invalid JP2 header length") - if tbox == b'jp2h': + if tbox == b"jp2h": header = fp.read(lbox - hlen) break + elif tbox == b"ftyp": + if fp.read(4) == b"jpx ": + mimetype = "image/jpx" + fp.seek(lbox - hlen - 4, os.SEEK_CUR) else: fp.seek(lbox - hlen, os.SEEK_CUR) if header is None: - raise SyntaxError('could not find JP2 header') + raise SyntaxError("could not find JP2 header") size = None mode = None @@ -88,64 +95,64 @@ def _parse_jp2_header(fp): hio = io.BytesIO(header) while True: - lbox, tbox = struct.unpack('>I4s', hio.read(8)) + lbox, tbox = struct.unpack(">I4s", hio.read(8)) if lbox == 1: - lbox = struct.unpack('>Q', hio.read(8))[0] + lbox = struct.unpack(">Q", hio.read(8))[0] hlen = 16 else: hlen = 8 content = hio.read(lbox - hlen) - if tbox == b'ihdr': - height, width, nc, bpc, c, unkc, ipr \ - = struct.unpack('>IIHBBBB', content) + if tbox == b"ihdr": + height, width, nc, bpc, c, unkc, ipr = struct.unpack(">IIHBBBB", content) size = (width, height) if unkc: - if nc == 1 and (bpc & 0x7f) > 8: - mode = 'I;16' + if nc == 1 and (bpc & 0x7F) > 8: + mode = "I;16" elif nc == 1: - mode = 'L' + mode = "L" elif nc == 2: - mode = 'LA' + mode = "LA" elif nc == 3: - mode = 'RGB' + mode = "RGB" elif nc == 4: - mode = 'RGBA' + mode = "RGBA" break - elif tbox == b'colr': - meth, prec, approx = struct.unpack_from('>BBB', content) + elif tbox == b"colr": + meth, prec, approx = struct.unpack_from(">BBB", content) if meth == 1: - cs = struct.unpack_from('>I', content, 3)[0] - if cs == 16: # sRGB - if nc == 1 and (bpc & 0x7f) > 8: - mode = 'I;16' + cs = struct.unpack_from(">I", content, 3)[0] + if cs == 16: # sRGB + if nc == 1 and (bpc & 0x7F) > 8: + mode = "I;16" elif nc == 1: - mode = 'L' + mode = "L" elif nc == 3: - mode = 'RGB' + mode = "RGB" elif nc == 4: - mode = 'RGBA' + mode = "RGBA" break elif cs == 17: # grayscale - if nc == 1 and (bpc & 0x7f) > 8: - mode = 'I;16' + if nc == 1 and (bpc & 0x7F) > 8: + mode = "I;16" elif nc == 1: - mode = 'L' + mode = "L" elif nc == 2: - mode = 'LA' + mode = "LA" break elif cs == 18: # sYCC if nc == 3: - mode = 'RGB' + mode = "RGB" elif nc == 4: - mode = 'RGBA' + mode = "RGBA" break if size is None or mode is None: raise SyntaxError("Malformed jp2 header") - return (size, mode) + return (size, mode, mimetype) + ## # Image plugin for JPEG2000 images. @@ -157,20 +164,21 @@ class Jpeg2KImageFile(ImageFile.ImageFile): def _open(self): sig = self.fp.read(4) - if sig == b'\xff\x4f\xff\x51': + if sig == b"\xff\x4f\xff\x51": self.codec = "j2k" self._size, self.mode = _parse_codestream(self.fp) else: sig = sig + self.fp.read(8) - if sig == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a': + if sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a": self.codec = "jp2" - self._size, self.mode = _parse_jp2_header(self.fp) + header = _parse_jp2_header(self.fp) + self._size, self.mode, self.custom_mimetype = header else: - raise SyntaxError('not a JPEG 2000 file') + raise SyntaxError("not a JPEG 2000 file") if self.size is None or self.mode is None: - raise SyntaxError('unable to determine size/mode') + raise SyntaxError("unable to determine size/mode") self.reduce = 0 self.layers = 0 @@ -181,25 +189,33 @@ class Jpeg2KImageFile(ImageFile.ImageFile): try: fd = self.fp.fileno() length = os.fstat(fd).st_size - except: + except Exception: fd = -1 try: pos = self.fp.tell() - self.fp.seek(0, 2) + self.fp.seek(0, io.SEEK_END) length = self.fp.tell() - self.fp.seek(pos, 0) - except: + self.fp.seek(pos) + except Exception: length = -1 - self.tile = [('jpeg2k', (0, 0) + self.size, 0, - (self.codec, self.reduce, self.layers, fd, length))] + self.tile = [ + ( + "jpeg2k", + (0, 0) + self.size, + 0, + (self.codec, self.reduce, self.layers, fd, length), + ) + ] def load(self): if self.reduce: power = 1 << self.reduce adjust = power >> 1 - self._size = (int((self.size[0] + adjust) / power), - int((self.size[1] + adjust) / power)) + self._size = ( + int((self.size[0] + adjust) / power), + int((self.size[1] + adjust) / power), + ) if self.tile: # Update the reduce and layers settings @@ -211,39 +227,53 @@ class Jpeg2KImageFile(ImageFile.ImageFile): def _accept(prefix): - return (prefix[:4] == b'\xff\x4f\xff\x51' or - prefix[:12] == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a') + return ( + prefix[:4] == b"\xff\x4f\xff\x51" + or prefix[:12] == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a" + ) # ------------------------------------------------------------ # Save support + def _save(im, fp, filename): - if filename.endswith('.j2k'): - kind = 'j2k' + if filename.endswith(".j2k"): + kind = "j2k" else: - kind = 'jp2' + kind = "jp2" # Get the keyword arguments info = im.encoderinfo - offset = info.get('offset', None) - tile_offset = info.get('tile_offset', None) - tile_size = info.get('tile_size', None) - quality_mode = info.get('quality_mode', 'rates') - quality_layers = info.get('quality_layers', None) - num_resolutions = info.get('num_resolutions', 0) - cblk_size = info.get('codeblock_size', None) - precinct_size = info.get('precinct_size', None) - irreversible = info.get('irreversible', False) - progression = info.get('progression', 'LRCP') - cinema_mode = info.get('cinema_mode', 'no') + offset = info.get("offset", None) + tile_offset = info.get("tile_offset", None) + tile_size = info.get("tile_size", None) + quality_mode = info.get("quality_mode", "rates") + quality_layers = info.get("quality_layers", None) + if quality_layers is not None and not ( + isinstance(quality_layers, (list, tuple)) + and all( + [ + isinstance(quality_layer, (int, float)) + for quality_layer in quality_layers + ] + ) + ): + raise ValueError("quality_layers must be a sequence of numbers") + + num_resolutions = info.get("num_resolutions", 0) + cblk_size = info.get("codeblock_size", None) + precinct_size = info.get("precinct_size", None) + irreversible = info.get("irreversible", False) + progression = info.get("progression", "LRCP") + cinema_mode = info.get("cinema_mode", "no") fd = -1 if hasattr(fp, "fileno"): try: fd = fp.fileno() - except: + except Exception: fd = -1 im.encoderconfig = ( @@ -258,10 +288,11 @@ def _save(im, fp, filename): irreversible, progression, cinema_mode, - fd + fd, ) - ImageFile._save(im, fp, [('jpeg2k', (0, 0)+im.size, 0, kind)]) + ImageFile._save(im, fp, [("jpeg2k", (0, 0) + im.size, 0, kind)]) + # ------------------------------------------------------------ # Registry stuff @@ -270,8 +301,8 @@ def _save(im, fp, filename): Image.register_open(Jpeg2KImageFile.format, Jpeg2KImageFile, _accept) Image.register_save(Jpeg2KImageFile.format, _save) -Image.register_extensions(Jpeg2KImageFile.format, - [".jp2", ".j2k", ".jpc", ".jpf", ".jpx", ".j2c"]) +Image.register_extensions( + Jpeg2KImageFile.format, [".jp2", ".j2k", ".jpc", ".jpf", ".jpx", ".j2c"] +) -Image.register_mime(Jpeg2KImageFile.format, 'image/jp2') -Image.register_mime(Jpeg2KImageFile.format, 'image/jpx') +Image.register_mime(Jpeg2KImageFile.format, "image/jp2") diff --git a/server/www/packages/packages-linux/x64/PIL/JpegImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/JpegImagePlugin.py index f206818..020b952 100644 --- a/server/www/packages/packages-linux/x64/PIL/JpegImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/JpegImagePlugin.py @@ -35,22 +35,26 @@ from __future__ import print_function import array -import struct import io +import struct import warnings -from . import Image, ImageFile, TiffImagePlugin -from ._binary import i8, o8, i16be as i16 -from .JpegPresets import presets -from ._util import isStringType +from . import Image, ImageFile, TiffImagePlugin +from ._binary import i8, i16be as i16, i32be as i32, o8 +from ._util import isStringType +from .JpegPresets import presets + +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.6" # # Parser + def Skip(self, marker): - n = i16(self.fp.read(2))-2 + n = i16(self.fp.read(2)) - 2 ImageFile._safe_read(self.fp, n) @@ -59,7 +63,7 @@ def APP(self, marker): # Application marker. Store these in the APP dictionary. # Also look for well-known application markers. - n = i16(self.fp.read(2))-2 + n = i16(self.fp.read(2)) - 2 s = ImageFile._safe_read(self.fp, n) app = "APP%d" % (marker & 15) @@ -75,7 +79,7 @@ def APP(self, marker): try: jfif_unit = i8(s[7]) jfif_density = i16(s, 8), i16(s, 10) - except: + except Exception: pass else: if jfif_unit == 1: @@ -84,7 +88,7 @@ def APP(self, marker): self.info["jfif_density"] = jfif_density elif marker == 0xFFE1 and s[:5] == b"Exif\0": if "exif" not in self.info: - # extract Exif information (incomplete) + # extract EXIF information (incomplete) self.info["exif"] = s # FIXME: value will change elif marker == 0xFFE2 and s[:5] == b"FPXR\0": # extract FlashPix information (incomplete) @@ -102,12 +106,45 @@ def APP(self, marker): # reassemble the profile, rather than assuming that the APP2 # markers appear in the correct sequence. self.icclist.append(s) + elif marker == 0xFFED: + if s[:14] == b"Photoshop 3.0\x00": + blocks = s[14:] + # parse the image resource block + offset = 0 + photoshop = {} + while blocks[offset : offset + 4] == b"8BIM": + offset += 4 + # resource code + code = i16(blocks, offset) + offset += 2 + # resource name (usually empty) + name_len = i8(blocks[offset]) + # name = blocks[offset+1:offset+1+name_len] + offset = 1 + offset + name_len + if offset & 1: + offset += 1 + # resource data block + size = i32(blocks, offset) + offset += 4 + data = blocks[offset : offset + size] + if code == 0x03ED: # ResolutionInfo + data = { + "XResolution": i32(data[:4]) / 65536, + "DisplayedUnitsX": i16(data[4:8]), + "YResolution": i32(data[8:12]) / 65536, + "DisplayedUnitsY": i16(data[12:]), + } + photoshop[code] = data + offset = offset + size + if offset & 1: + offset += 1 + self.info["photoshop"] = photoshop elif marker == 0xFFEE and s[:5] == b"Adobe": self.info["adobe"] = i16(s, 5) # extract Adobe custom properties try: adobe_transform = i8(s[1]) - except: + except Exception: pass else: self.info["adobe_transform"] = adobe_transform @@ -121,19 +158,19 @@ def APP(self, marker): # If DPI isn't in JPEG header, fetch from EXIF if "dpi" not in self.info and "exif" in self.info: try: - exif = self._getexif() + exif = self.getexif() resolution_unit = exif[0x0128] x_resolution = exif[0x011A] try: - dpi = x_resolution[0] / x_resolution[1] + dpi = float(x_resolution[0]) / x_resolution[1] except TypeError: dpi = x_resolution if resolution_unit == 3: # cm # 1 dpcm = 2.54 dpi dpi *= 2.54 - self.info["dpi"] = dpi, dpi + self.info["dpi"] = int(dpi + 0.5), int(dpi + 0.5) except (KeyError, SyntaxError, ZeroDivisionError): - # SyntaxError for invalid/unreadable exif + # SyntaxError for invalid/unreadable EXIF # KeyError for dpi not included # ZeroDivisionError for invalid dpi rational value self.info["dpi"] = 72, 72 @@ -142,7 +179,7 @@ def APP(self, marker): def COM(self, marker): # # Comment marker. Store these in the APP dictionary. - n = i16(self.fp.read(2))-2 + n = i16(self.fp.read(2)) - 2 s = ImageFile._safe_read(self.fp, n) self.app["COM"] = s # compatibility @@ -157,7 +194,7 @@ def SOF(self, marker): # mode. Note that this could be made a bit brighter, by # looking for JFIF and Adobe APP markers. - n = i16(self.fp.read(2))-2 + n = i16(self.fp.read(2)) - 2 s = ImageFile._safe_read(self.fp, n) self._size = i16(s[3:]), i16(s[1:]) @@ -192,9 +229,9 @@ def SOF(self, marker): self.icclist = None for i in range(6, len(s), 3): - t = s[i:i+3] + t = s[i : i + 3] # 4-tuples: id, vsamp, hsamp, qtable - self.layer.append((t[0], i8(t[1])//16, i8(t[1]) & 15, i8(t[2]))) + self.layer.append((t[0], i8(t[1]) // 16, i8(t[1]) & 15, i8(t[2]))) def DQT(self, marker): @@ -206,13 +243,13 @@ def DQT(self, marker): # FIXME: The quantization tables can be used to estimate the # compression quality. - n = i16(self.fp.read(2))-2 + n = i16(self.fp.read(2)) - 2 s = ImageFile._safe_read(self.fp, n) while len(s): if len(s) < 65: raise SyntaxError("bad quantization table marker") v = i8(s[0]) - if v//16 == 0: + if v // 16 == 0: self.quantization[v & 15] = array.array("B", s[1:65]) s = s[65:] else: @@ -286,7 +323,7 @@ MARKER = { 0xFFFB: ("JPG11", "Extension 11", None), 0xFFFC: ("JPG12", "Extension 12", None), 0xFFFD: ("JPG13", "Extension 13", None), - 0xFFFE: ("COM", "Comment", COM) + 0xFFFE: ("COM", "Comment", COM), } @@ -297,6 +334,7 @@ def _accept(prefix): ## # Image plugin for JPEG and JFIF images. + class JpegImageFile(ImageFile.ImageFile): format = "JPEG" @@ -340,8 +378,7 @@ class JpegImageFile(ImageFile.ImageFile): rawmode = self.mode if self.mode == "CMYK": rawmode = "CMYK;I" # assume adobe conventions - self.tile = [("jpeg", (0, 0) + self.size, 0, - (rawmode, ""))] + self.tile = [("jpeg", (0, 0) + self.size, 0, (rawmode, ""))] # self.__offset = self.fp.tell() break s = self.fp.read(1) @@ -389,8 +426,13 @@ class JpegImageFile(ImageFile.ImageFile): for s in [8, 4, 2, 1]: if scale >= s: break - e = e[0], e[1], (e[2]-e[0]+s-1)//s+e[0], (e[3]-e[1]+s-1)//s+e[1] - self._size = ((self.size[0]+s-1)//s, (self.size[1]+s-1)//s) + e = ( + e[0], + e[1], + (e[2] - e[0] + s - 1) // s + e[0], + (e[3] - e[1] + s - 1) // s + e[1], + ) + self._size = ((self.size[0] + s - 1) // s, (self.size[1] + s - 1) // s) scale = s self.tile = [(d, e, o, a)] @@ -405,6 +447,7 @@ class JpegImageFile(ImageFile.ImageFile): import subprocess import tempfile import os + f, path = tempfile.mkstemp() os.close(f) if os.path.exists(self.filename): @@ -437,60 +480,14 @@ class JpegImageFile(ImageFile.ImageFile): def _fixup_dict(src_dict): # Helper function for _getexif() # returns a dict with any single item tuples/lists as individual values - def _fixup(value): - try: - if len(value) == 1 and not isinstance(value, dict): - return value[0] - except: - pass - return value - - return {k: _fixup(v) for k, v in src_dict.items()} + exif = Image.Exif() + return exif._fixup_dict(src_dict) def _getexif(self): - # Extract EXIF information. This method is highly experimental, - # and is likely to be replaced with something better in a future - # version. - - # The EXIF record consists of a TIFF file embedded in a JPEG - # application marker (!). - try: - data = self.info["exif"] - except KeyError: + if "exif" not in self.info: return None - file = io.BytesIO(data[6:]) - head = file.read(8) - # process dictionary - info = TiffImagePlugin.ImageFileDirectory_v1(head) - info.load(file) - exif = dict(_fixup_dict(info)) - # get exif extension - try: - # exif field 0x8769 is an offset pointer to the location - # of the nested embedded exif ifd. - # It should be a long, but may be corrupted. - file.seek(exif[0x8769]) - except (KeyError, TypeError): - pass - else: - info = TiffImagePlugin.ImageFileDirectory_v1(head) - info.load(file) - exif.update(_fixup_dict(info)) - # get gpsinfo extension - try: - # exif field 0x8825 is an offset pointer to the location - # of the nested embedded gps exif ifd. - # It should be a long, but may be corrupted. - file.seek(exif[0x8825]) - except (KeyError, TypeError): - pass - else: - info = TiffImagePlugin.ImageFileDirectory_v1(head) - info.load(file) - exif[0x8825] = _fixup_dict(info) - - return exif + return dict(self.getexif()) def _getmp(self): @@ -506,13 +503,14 @@ def _getmp(self): return None file_contents = io.BytesIO(data) head = file_contents.read(8) - endianness = '>' if head[:4] == b'\x4d\x4d\x00\x2a' else '<' + endianness = ">" if head[:4] == b"\x4d\x4d\x00\x2a" else "<" # process dictionary try: info = TiffImagePlugin.ImageFileDirectory_v2(head) + file_contents.seek(info.next) info.load(file_contents) mp = dict(info) - except: + except Exception: raise SyntaxError("malformed MP Index (unreadable directory)") # it's an error not to have a number of images try: @@ -525,37 +523,33 @@ def _getmp(self): rawmpentries = mp[0xB002] for entrynum in range(0, quant): unpackedentry = struct.unpack_from( - '{}LLLHH'.format(endianness), rawmpentries, entrynum * 16) - labels = ('Attribute', 'Size', 'DataOffset', 'EntryNo1', - 'EntryNo2') + "{}LLLHH".format(endianness), rawmpentries, entrynum * 16 + ) + labels = ("Attribute", "Size", "DataOffset", "EntryNo1", "EntryNo2") mpentry = dict(zip(labels, unpackedentry)) mpentryattr = { - 'DependentParentImageFlag': bool(mpentry['Attribute'] & - (1 << 31)), - 'DependentChildImageFlag': bool(mpentry['Attribute'] & - (1 << 30)), - 'RepresentativeImageFlag': bool(mpentry['Attribute'] & - (1 << 29)), - 'Reserved': (mpentry['Attribute'] & (3 << 27)) >> 27, - 'ImageDataFormat': (mpentry['Attribute'] & (7 << 24)) >> 24, - 'MPType': mpentry['Attribute'] & 0x00FFFFFF + "DependentParentImageFlag": bool(mpentry["Attribute"] & (1 << 31)), + "DependentChildImageFlag": bool(mpentry["Attribute"] & (1 << 30)), + "RepresentativeImageFlag": bool(mpentry["Attribute"] & (1 << 29)), + "Reserved": (mpentry["Attribute"] & (3 << 27)) >> 27, + "ImageDataFormat": (mpentry["Attribute"] & (7 << 24)) >> 24, + "MPType": mpentry["Attribute"] & 0x00FFFFFF, } - if mpentryattr['ImageDataFormat'] == 0: - mpentryattr['ImageDataFormat'] = 'JPEG' + if mpentryattr["ImageDataFormat"] == 0: + mpentryattr["ImageDataFormat"] = "JPEG" else: raise SyntaxError("unsupported picture format in MPO") mptypemap = { - 0x000000: 'Undefined', - 0x010001: 'Large Thumbnail (VGA Equivalent)', - 0x010002: 'Large Thumbnail (Full HD Equivalent)', - 0x020001: 'Multi-Frame Image (Panorama)', - 0x020002: 'Multi-Frame Image: (Disparity)', - 0x020003: 'Multi-Frame Image: (Multi-Angle)', - 0x030000: 'Baseline MP Primary Image' + 0x000000: "Undefined", + 0x010001: "Large Thumbnail (VGA Equivalent)", + 0x010002: "Large Thumbnail (Full HD Equivalent)", + 0x020001: "Multi-Frame Image (Panorama)", + 0x020002: "Multi-Frame Image: (Disparity)", + 0x020003: "Multi-Frame Image: (Multi-Angle)", + 0x030000: "Baseline MP Primary Image", } - mpentryattr['MPType'] = mptypemap.get(mpentryattr['MPType'], - 'Unknown') - mpentry['Attribute'] = mpentryattr + mpentryattr["MPType"] = mptypemap.get(mpentryattr["MPType"], "Unknown") + mpentry["Attribute"] = mpentryattr mpentries.append(mpentry) mp[0xB002] = mpentries except KeyError: @@ -578,19 +572,24 @@ RAWMODE = { "YCbCr": "YCbCr", } -zigzag_index = (0, 1, 5, 6, 14, 15, 27, 28, - 2, 4, 7, 13, 16, 26, 29, 42, - 3, 8, 12, 17, 25, 30, 41, 43, - 9, 11, 18, 24, 31, 40, 44, 53, - 10, 19, 23, 32, 39, 45, 52, 54, - 20, 22, 33, 38, 46, 51, 55, 60, - 21, 34, 37, 47, 50, 56, 59, 61, - 35, 36, 48, 49, 57, 58, 62, 63) +# fmt: off +zigzag_index = ( + 0, 1, 5, 6, 14, 15, 27, 28, + 2, 4, 7, 13, 16, 26, 29, 42, + 3, 8, 12, 17, 25, 30, 41, 43, + 9, 11, 18, 24, 31, 40, 44, 53, + 10, 19, 23, 32, 39, 45, 52, 54, + 20, 22, 33, 38, 46, 51, 55, 60, + 21, 34, 37, 47, 50, 56, 59, 61, + 35, 36, 48, 49, 57, 58, 62, 63, +) -samplings = {(1, 1, 1, 1, 1, 1): 0, - (2, 1, 1, 1, 1, 1): 1, - (2, 2, 1, 1, 1, 1): 2, - } +samplings = { + (1, 1, 1, 1, 1, 1): 0, + (2, 1, 1, 1, 1, 1): 1, + (2, 2, 1, 1, 1, 1): 2, +} +# fmt: on def convert_dict_qtables(qtables): @@ -608,7 +607,7 @@ def get_sampling(im): # NOTE: currently Pillow can't encode JPEG to YCCK format. # If YCCK support is added in the future, subsampling code will have # to be updated (here and in JpegEncode.c) to deal with 4 layers. - if not hasattr(im, 'layers') or im.layers in (1, 4): + if not hasattr(im, "layers") or im.layers in (1, 4): return -1 sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3] return samplings.get(sampling, -1) @@ -636,15 +635,15 @@ def _save(im, fp, filename): elif quality in presets: preset = presets[quality] quality = 0 - subsampling = preset.get('subsampling', -1) - qtables = preset.get('quantization') + subsampling = preset.get("subsampling", -1) + qtables = preset.get("quantization") elif not isinstance(quality, int): raise ValueError("Invalid quality setting") else: if subsampling in presets: - subsampling = presets[subsampling].get('subsampling', -1) + subsampling = presets[subsampling].get("subsampling", -1) if isStringType(qtables) and qtables in presets: - qtables = presets[qtables].get('quantization') + qtables = presets[qtables].get("quantization") if subsampling == "4:4:4": subsampling = 0 @@ -658,8 +657,7 @@ def _save(im, fp, filename): subsampling = 2 elif subsampling == "keep": if im.format != "JPEG": - raise ValueError( - "Cannot use 'keep' when original image is not a JPEG") + raise ValueError("Cannot use 'keep' when original image is not a JPEG") subsampling = get_sampling(im) def validate_qtables(qtables): @@ -667,12 +665,15 @@ def _save(im, fp, filename): return qtables if isStringType(qtables): try: - lines = [int(num) for line in qtables.splitlines() - for num in line.split('#', 1)[0].split()] + lines = [ + int(num) + for line in qtables.splitlines() + for num in line.split("#", 1)[0].split() + ] except ValueError: raise ValueError("Invalid quantization table") else: - qtables = [lines[s:s+64] for s in range(0, len(lines), 64)] + qtables = [lines[s : s + 64] for s in range(0, len(lines), 64)] if isinstance(qtables, (tuple, list, dict)): if isinstance(qtables, dict): qtables = convert_dict_qtables(qtables) @@ -684,7 +685,7 @@ def _save(im, fp, filename): try: if len(table) != 64: raise TypeError - table = array.array('B', table) + table = array.array("B", table) except TypeError: raise ValueError("Invalid quantization table") else: @@ -693,8 +694,7 @@ def _save(im, fp, filename): if qtables == "keep": if im.format != "JPEG": - raise ValueError( - "Cannot use 'keep' when original image is not a JPEG") + raise ValueError("Cannot use 'keep' when original image is not a JPEG") qtables = getattr(im, "quantization", None) qtables = validate_qtables(qtables) @@ -712,18 +712,27 @@ def _save(im, fp, filename): i = 1 for marker in markers: size = struct.pack(">H", 2 + ICC_OVERHEAD_LEN + len(marker)) - extra += (b"\xFF\xE2" + size + b"ICC_PROFILE\0" + o8(i) + - o8(len(markers)) + marker) + extra += ( + b"\xFF\xE2" + + size + + b"ICC_PROFILE\0" + + o8(i) + + o8(len(markers)) + + marker + ) i += 1 # "progressive" is the official name, but older documentation # says "progression" # FIXME: issue a warning if the wrong form is used (post-1.1.7) - progressive = (info.get("progressive", False) or - info.get("progression", False)) + progressive = info.get("progressive", False) or info.get("progression", False) optimize = info.get("optimize", False) + exif = info.get("exif", b"") + if isinstance(exif, Image.Exif): + exif = exif.tobytes() + # get keyword arguments im.encoderconfig = ( quality, @@ -731,12 +740,13 @@ def _save(im, fp, filename): info.get("smooth", 0), optimize, info.get("streamtype", 0), - dpi[0], dpi[1], + dpi[0], + dpi[1], subsampling, qtables, extra, - info.get("exif", b"") - ) + exif, + ) # if we optimize, libjpeg needs a buffer big enough to hold the whole image # in a shot. Guessing on the size, at im.size bytes. (raw pixel size is @@ -745,7 +755,7 @@ def _save(im, fp, filename): bufsize = 0 if optimize or progressive: # CMYK can be bigger - if im.mode == 'CMYK': + if im.mode == "CMYK": bufsize = 4 * im.size[0] * im.size[1] # keep sets quality to 0, but the actual value may be high. elif quality >= 95 or quality == 0: @@ -753,18 +763,18 @@ def _save(im, fp, filename): else: bufsize = im.size[0] * im.size[1] - # The exif info needs to be written as one block, + APP1, + one spare byte. + # The EXIF info needs to be written as one block, + APP1, + one spare byte. # Ensure that our buffer is big enough. Same with the icc_profile block. - bufsize = max(ImageFile.MAXBLOCK, bufsize, len(info.get("exif", b"")) + 5, - len(extra) + 1) + bufsize = max(ImageFile.MAXBLOCK, bufsize, len(exif) + 5, len(extra) + 1) - ImageFile._save(im, fp, [("jpeg", (0, 0)+im.size, 0, rawmode)], bufsize) + ImageFile._save(im, fp, [("jpeg", (0, 0) + im.size, 0, rawmode)], bufsize) def _save_cjpeg(im, fp, filename): # ALTERNATIVE: handle JPEGs via the IJG command line utilities. import os import subprocess + tempfile = im._dump() subprocess.check_call(["cjpeg", "-outfile", filename, tempfile]) try: @@ -782,13 +792,17 @@ def jpeg_factory(fp=None, filename=None): if mpheader[45057] > 1: # It's actually an MPO from .MpoImagePlugin import MpoImageFile - im = MpoImageFile(fp, filename) + + # Don't reload everything, just convert it. + im = MpoImageFile.adopt(im, mpheader) except (TypeError, IndexError): # It is really a JPEG pass except SyntaxError: - warnings.warn("Image appears to be a malformed MPO file, it will be " - "interpreted as a base JPEG file") + warnings.warn( + "Image appears to be a malformed MPO file, it will be " + "interpreted as a base JPEG file" + ) return im @@ -798,7 +812,6 @@ def jpeg_factory(fp=None, filename=None): Image.register_open(JpegImageFile.format, jpeg_factory, _accept) Image.register_save(JpegImageFile.format, _save) -Image.register_extensions(JpegImageFile.format, - [".jfif", ".jpe", ".jpg", ".jpeg"]) +Image.register_extensions(JpegImageFile.format, [".jfif", ".jpe", ".jpg", ".jpeg"]) Image.register_mime(JpegImageFile.format, "image/jpeg") diff --git a/server/www/packages/packages-linux/x64/PIL/JpegPresets.py b/server/www/packages/packages-linux/x64/PIL/JpegPresets.py index 5f01f0d..387844f 100644 --- a/server/www/packages/packages-linux/x64/PIL/JpegPresets.py +++ b/server/www/packages/packages-linux/x64/PIL/JpegPresets.py @@ -62,11 +62,13 @@ The tables format between im.quantization and quantization in presets differ in You can convert the dict format to the preset format with the `JpegImagePlugin.convert_dict_qtables(dict_qtables)` function. -Libjpeg ref.: https://web.archive.org/web/20120328125543/http://www.jpegcameras.com/libjpeg/libjpeg-3.html +Libjpeg ref.: +https://web.archive.org/web/20120328125543/http://www.jpegcameras.com/libjpeg/libjpeg-3.html """ -presets = { +# fmt: off +presets = { # noqa: E128 'web_low': {'subsampling': 2, # "4:2:0" 'quantization': [ [20, 16, 25, 39, 50, 46, 62, 68, @@ -239,3 +241,4 @@ presets = { 15, 12, 12, 12, 12, 12, 12, 12] ]}, } +# fmt: on diff --git a/server/www/packages/packages-linux/x64/PIL/McIdasImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/McIdasImagePlugin.py index 161fb5e..bddd33a 100644 --- a/server/www/packages/packages-linux/x64/PIL/McIdasImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/McIdasImagePlugin.py @@ -17,8 +17,11 @@ # import struct + from . import Image, ImageFile +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.2" @@ -29,6 +32,7 @@ def _accept(s): ## # Image plugin for McIdas area images. + class McIdasImageFile(ImageFile.ImageFile): format = "MCIDAS" @@ -62,7 +66,7 @@ class McIdasImageFile(ImageFile.ImageFile): self._size = w[10], w[9] offset = w[34] + w[15] - stride = w[15] + w[10]*w[11]*w[14] + stride = w[15] + w[10] * w[11] * w[14] self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride, 1))] diff --git a/server/www/packages/packages-linux/x64/PIL/MicImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/MicImagePlugin.py index 1dbb6a5..b48905b 100644 --- a/server/www/packages/packages-linux/x64/PIL/MicImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/MicImagePlugin.py @@ -17,10 +17,12 @@ # -from . import Image, TiffImagePlugin - import olefile +from . import Image, TiffImagePlugin + +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.1" @@ -35,6 +37,7 @@ def _accept(prefix): ## # Image plugin for Microsoft's Image Composer file format. + class MicImageFile(TiffImagePlugin.TiffImageFile): format = "MIC" @@ -95,9 +98,17 @@ class MicImageFile(TiffImagePlugin.TiffImageFile): self.frame = frame def tell(self): - return self.frame + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + # # -------------------------------------------------------------------- diff --git a/server/www/packages/packages-linux/x64/PIL/MpegImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/MpegImagePlugin.py index 15c7afc..9c662fc 100644 --- a/server/www/packages/packages-linux/x64/PIL/MpegImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/MpegImagePlugin.py @@ -17,14 +17,16 @@ from . import Image, ImageFile from ._binary import i8 +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.1" # # Bitstream parser -class BitStream(object): +class BitStream(object): def __init__(self, fp): self.fp = fp self.bits = 0 @@ -59,6 +61,7 @@ class BitStream(object): # Image plugin for MPEG streams. This plugin can identify a stream, # but it cannot read it. + class MpegImageFile(ImageFile.ImageFile): format = "MPEG" diff --git a/server/www/packages/packages-linux/x64/PIL/MpoImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/MpoImagePlugin.py index a1a8d65..938f2a5 100644 --- a/server/www/packages/packages-linux/x64/PIL/MpoImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/MpoImagePlugin.py @@ -18,8 +18,11 @@ # See the README file for information on usage and redistribution. # -from . import Image, JpegImagePlugin +from . import Image, ImageFile, JpegImagePlugin +from ._binary import i16be as i16 +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.1" @@ -35,6 +38,7 @@ def _save(im, fp, filename): ## # Image plugin for MPO images. + class MpoImageFile(JpegImagePlugin.JpegImageFile): format = "MPO" @@ -44,15 +48,19 @@ class MpoImageFile(JpegImagePlugin.JpegImageFile): def _open(self): self.fp.seek(0) # prep the fp in order to pass the JPEG test JpegImagePlugin.JpegImageFile._open(self) - self.mpinfo = self._getmp() + self._after_jpeg_open() + + def _after_jpeg_open(self, mpheader=None): + self.mpinfo = mpheader if mpheader is not None else self._getmp() self.__framecount = self.mpinfo[0xB001] - self.__mpoffsets = [mpent['DataOffset'] + self.info['mpoffset'] - for mpent in self.mpinfo[0xB002]] + self.__mpoffsets = [ + mpent["DataOffset"] + self.info["mpoffset"] for mpent in self.mpinfo[0xB002] + ] self.__mpoffsets[0] = 0 # Note that the following assertion will only be invalid if something # gets broken within JpegImagePlugin. assert self.__framecount == len(self.__mpoffsets) - del self.info['mpoffset'] # no longer needed + del self.info["mpoffset"] # no longer needed self.__fp = self.fp # FIXME: hack self.__fp.seek(self.__mpoffsets[0]) # get ready to read first frame self.__frame = 0 @@ -76,14 +84,49 @@ class MpoImageFile(JpegImagePlugin.JpegImageFile): return self.fp = self.__fp self.offset = self.__mpoffsets[frame] - self.tile = [ - ("jpeg", (0, 0) + self.size, self.offset, (self.mode, "")) - ] + + self.fp.seek(self.offset + 2) # skip SOI marker + if i16(self.fp.read(2)) == 0xFFE1: # APP1 + n = i16(self.fp.read(2)) - 2 + self.info["exif"] = ImageFile._safe_read(self.fp, n) + + exif = self.getexif() + if 40962 in exif and 40963 in exif: + self._size = (exif[40962], exif[40963]) + elif "exif" in self.info: + del self.info["exif"] + + self.tile = [("jpeg", (0, 0) + self.size, self.offset, (self.mode, ""))] self.__frame = frame def tell(self): return self.__frame + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + @staticmethod + def adopt(jpeg_instance, mpheader=None): + """ + Transform the instance of JpegImageFile into + an instance of MpoImageFile. + After the call, the JpegImageFile is extended + to be an MpoImageFile. + + This is essentially useful when opening a JPEG + file that reveals itself as an MPO, to avoid + double call to _open. + """ + jpeg_instance.__class__ = MpoImageFile + jpeg_instance._after_jpeg_open(mpheader) + return jpeg_instance + # --------------------------------------------------------------------- # Registry stuff diff --git a/server/www/packages/packages-linux/x64/PIL/MspImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/MspImagePlugin.py index 74c6817..7315ab6 100644 --- a/server/www/packages/packages-linux/x64/PIL/MspImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/MspImagePlugin.py @@ -23,11 +23,14 @@ # # See also: http://www.fileformat.info/format/mspaint/egff.htm -from . import Image, ImageFile -from ._binary import i16le as i16, o16le as o16, i8 -import struct import io +import struct +from . import Image, ImageFile +from ._binary import i8, i16le as i16, o16le as o16 + +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.1" @@ -43,6 +46,7 @@ def _accept(prefix): # Image plugin for Windows MSP images. This plugin supports both # uncompressed (Windows 1.0). + class MspImageFile(ImageFile.ImageFile): format = "MSP" @@ -58,7 +62,7 @@ class MspImageFile(ImageFile.ImageFile): # Header checksum checksum = 0 for i in range(0, 32, 2): - checksum = checksum ^ i16(s[i:i+2]) + checksum = checksum ^ i16(s[i : i + 2]) if checksum != 0: raise SyntaxError("bad MSP checksum") @@ -66,9 +70,9 @@ class MspImageFile(ImageFile.ImageFile): self._size = i16(s[4:]), i16(s[6:]) if s[:4] == b"DanM": - self.tile = [("raw", (0, 0)+self.size, 32, ("1", 0, 1))] + self.tile = [("raw", (0, 0) + self.size, 32, ("1", 0, 1))] else: - self.tile = [("MSP", (0, 0)+self.size, 32, None)] + self.tile = [("MSP", (0, 0) + self.size, 32, None)] class MspDecoder(ImageFile.PyDecoder): @@ -111,11 +115,12 @@ class MspDecoder(ImageFile.PyDecoder): def decode(self, buffer): img = io.BytesIO() - blank_line = bytearray((0xff,)*((self.state.xsize+7)//8)) + blank_line = bytearray((0xFF,) * ((self.state.xsize + 7) // 8)) try: self.fd.seek(32) - rowmap = struct.unpack_from("<%dH" % (self.state.ysize), - self.fd.read(self.state.ysize*2)) + rowmap = struct.unpack_from( + "<%dH" % (self.state.ysize), self.fd.read(self.state.ysize * 2) + ) except struct.error: raise IOError("Truncated MSP file in row map") @@ -127,8 +132,8 @@ class MspDecoder(ImageFile.PyDecoder): row = self.fd.read(rowlen) if len(row) != rowlen: raise IOError( - "Truncated MSP file, expected %d bytes on row %s", - (rowlen, x)) + "Truncated MSP file, expected %d bytes on row %s", (rowlen, x) + ) idx = 0 while idx < rowlen: runtype = i8(row[idx]) @@ -139,7 +144,7 @@ class MspDecoder(ImageFile.PyDecoder): idx += 2 else: runcount = runtype - img.write(row[idx:idx+runcount]) + img.write(row[idx : idx + runcount]) idx += runcount except struct.error: @@ -150,7 +155,7 @@ class MspDecoder(ImageFile.PyDecoder): return 0, 0 -Image.register_decoder('MSP', MspDecoder) +Image.register_decoder("MSP", MspDecoder) # @@ -181,7 +186,7 @@ def _save(im, fp, filename): fp.write(o16(h)) # image body - ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 32, ("1", 0, 1))]) + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 32, ("1", 0, 1))]) # diff --git a/server/www/packages/packages-linux/x64/PIL/OleFileIO.py b/server/www/packages/packages-linux/x64/PIL/OleFileIO.py deleted file mode 100644 index b3caa10..0000000 --- a/server/www/packages/packages-linux/x64/PIL/OleFileIO.py +++ /dev/null @@ -1,4 +0,0 @@ -raise ImportError( - 'PIL.OleFileIO is deprecated. Use the olefile Python package ' - 'instead. This module will be removed in a future version.' -) diff --git a/server/www/packages/packages-linux/x64/PIL/PSDraw.py b/server/www/packages/packages-linux/x64/PIL/PSDraw.py index d2ded6f..f37701c 100644 --- a/server/www/packages/packages-linux/x64/PIL/PSDraw.py +++ b/server/www/packages/packages-linux/x64/PIL/PSDraw.py @@ -15,9 +15,10 @@ # See the README file for information on usage and redistribution. # +import sys + from . import EpsImagePlugin from ._util import py3 -import sys ## # Simple Postscript graphics interface. @@ -38,16 +39,18 @@ class PSDraw(object): if not py3 or self.fp == sys.stdout: self.fp.write(to_write) else: - self.fp.write(bytes(to_write, 'UTF-8')) + self.fp.write(bytes(to_write, "UTF-8")) def begin_document(self, id=None): """Set up printing of a document. (Write Postscript DSC header.)""" # FIXME: incomplete - self._fp_write("%!PS-Adobe-3.0\n" - "save\n" - "/showpage { } def\n" - "%%EndComments\n" - "%%BeginDocument\n") + self._fp_write( + "%!PS-Adobe-3.0\n" + "save\n" + "/showpage { } def\n" + "%%EndComments\n" + "%%BeginDocument\n" + ) # self._fp_write(ERROR_PS) # debugging! self._fp_write(EDROFF_PS) self._fp_write(VDI_PS) @@ -56,9 +59,7 @@ class PSDraw(object): def end_document(self): """Ends printing. (Write Postscript DSC footer.)""" - self._fp_write("%%EndDocument\n" - "restore showpage\n" - "%%End\n") + self._fp_write("%%EndDocument\nrestore showpage\n%%End\n") if hasattr(self.fp, "flush"): self.fp.flush() @@ -71,8 +72,7 @@ class PSDraw(object): """ if font not in self.isofont: # reencode font - self._fp_write("/PSDraw-%s ISOLatin1Encoding /%s E\n" % - (font, font)) + self._fp_write("/PSDraw-%s ISOLatin1Encoding /%s E\n" % (font, font)) self.isofont[font] = 1 # rough self._fp_write("/F0 %d /PSDraw-%s F\n" % (size, font)) @@ -142,6 +142,7 @@ class PSDraw(object): EpsImagePlugin._save(im, self.fp, None, 0) self._fp_write("\ngrestore\n") + # -------------------------------------------------------------------- # Postscript driver diff --git a/server/www/packages/packages-linux/x64/PIL/PaletteFile.py b/server/www/packages/packages-linux/x64/PIL/PaletteFile.py index 9ed69d6..ab22d5f 100644 --- a/server/www/packages/packages-linux/x64/PIL/PaletteFile.py +++ b/server/www/packages/packages-linux/x64/PIL/PaletteFile.py @@ -15,10 +15,10 @@ from ._binary import o8 - ## # File handler for Teragon-style palette files. + class PaletteFile(object): rawmode = "RGB" diff --git a/server/www/packages/packages-linux/x64/PIL/PalmImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/PalmImagePlugin.py index 7d7b165..dd068d7 100644 --- a/server/www/packages/packages-linux/x64/PIL/PalmImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/PalmImagePlugin.py @@ -10,9 +10,12 @@ from . import Image, ImageFile from ._binary import o8, o16be as o16b +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "1.0" -_Palm8BitColormapValues = ( +# fmt: off +_Palm8BitColormapValues = ( # noqa: E131 (255, 255, 255), (255, 204, 255), (255, 153, 255), (255, 102, 255), (255, 51, 255), (255, 0, 255), (255, 255, 204), (255, 204, 204), (255, 153, 204), (255, 102, 204), (255, 51, 204), (255, 0, 204), @@ -77,6 +80,7 @@ _Palm8BitColormapValues = ( (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)) +# fmt: on # so build a prototype image to be used for palette resampling @@ -86,7 +90,7 @@ def build_prototype_image(): palettedata = () for colormapValue in _Palm8BitColormapValues: palettedata += colormapValue - palettedata += (0, 0, 0)*(256 - len(_Palm8BitColormapValues)) + palettedata += (0, 0, 0) * (256 - len(_Palm8BitColormapValues)) image.putpalette(palettedata) return image @@ -98,17 +102,9 @@ Palm8BitColormapImage = build_prototype_image() # # -------------------------------------------------------------------- -_FLAGS = { - "custom-colormap": 0x4000, - "is-compressed": 0x8000, - "has-transparent": 0x2000, - } +_FLAGS = {"custom-colormap": 0x4000, "is-compressed": 0x8000, "has-transparent": 0x2000} -_COMPRESSION_TYPES = { - "none": 0xFF, - "rle": 0x01, - "scanline": 0x00, - } +_COMPRESSION_TYPES = {"none": 0xFF, "rle": 0x01, "scanline": 0x00} # @@ -117,6 +113,7 @@ _COMPRESSION_TYPES = { ## # (Internal) Image save plugin for the Palm format. + def _save(im, fp, filename): if im.mode == "P": @@ -128,28 +125,24 @@ def _save(im, fp, filename): bpp = 8 version = 1 - elif (im.mode == "L" and - "bpp" in im.encoderinfo and - im.encoderinfo["bpp"] in (1, 2, 4)): + elif im.mode == "L": + if im.encoderinfo.get("bpp") in (1, 2, 4): + # this is 8-bit grayscale, so we shift it to get the high-order bits, + # and invert it because + # Palm does greyscale from white (0) to black (1) + bpp = im.encoderinfo["bpp"] + im = im.point( + lambda x, shift=8 - bpp, maxval=(1 << bpp) - 1: maxval - (x >> shift) + ) + elif im.info.get("bpp") in (1, 2, 4): + # here we assume that even though the inherent mode is 8-bit grayscale, + # only the lower bpp bits are significant. + # We invert them to match the Palm. + bpp = im.info["bpp"] + im = im.point(lambda x, maxval=(1 << bpp) - 1: maxval - (x & maxval)) + else: + raise IOError("cannot write mode %s as Palm" % im.mode) - # this is 8-bit grayscale, so we shift it to get the high-order bits, - # and invert it because - # Palm does greyscale from white (0) to black (1) - bpp = im.encoderinfo["bpp"] - im = im.point( - lambda x, shift=8-bpp, maxval=(1 << bpp)-1: maxval - (x >> shift)) - # we ignore the palette here - im.mode = "P" - rawmode = "P;" + str(bpp) - version = 1 - - elif im.mode == "L" and "bpp" in im.info and im.info["bpp"] in (1, 2, 4): - - # here we assume that even though the inherent mode is 8-bit grayscale, - # only the lower bpp bits are significant. - # We invert them to match the Palm. - bpp = im.info["bpp"] - im = im.point(lambda x, maxval=(1 << bpp)-1: maxval - (x & maxval)) # we ignore the palette here im.mode = "P" rawmode = "P;" + str(bpp) @@ -175,7 +168,7 @@ def _save(im, fp, filename): cols = im.size[0] rows = im.size[1] - rowbytes = int((cols + (16//bpp - 1)) / (16 // bpp)) * 2 + rowbytes = int((cols + (16 // bpp - 1)) / (16 // bpp)) * 2 transparent_index = 0 compression_type = _COMPRESSION_TYPES["none"] @@ -199,7 +192,7 @@ def _save(im, fp, filename): fp.write(o16b(offset)) fp.write(o8(transparent_index)) fp.write(o8(compression_type)) - fp.write(o16b(0)) # reserved by Palm + fp.write(o16b(0)) # reserved by Palm # now write colormap if necessary @@ -207,20 +200,21 @@ def _save(im, fp, filename): fp.write(o16b(256)) for i in range(256): fp.write(o8(i)) - if colormapmode == 'RGB': + if colormapmode == "RGB": fp.write( - o8(colormap[3 * i]) + - o8(colormap[3 * i + 1]) + - o8(colormap[3 * i + 2])) - elif colormapmode == 'RGBA': + o8(colormap[3 * i]) + + o8(colormap[3 * i + 1]) + + o8(colormap[3 * i + 2]) + ) + elif colormapmode == "RGBA": fp.write( - o8(colormap[4 * i]) + - o8(colormap[4 * i + 1]) + - o8(colormap[4 * i + 2])) + o8(colormap[4 * i]) + + o8(colormap[4 * i + 1]) + + o8(colormap[4 * i + 2]) + ) # now convert data to raw form - ImageFile._save( - im, fp, [("raw", (0, 0)+im.size, 0, (rawmode, rowbytes, 1))]) + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, rowbytes, 1))]) if hasattr(fp, "flush"): fp.flush() diff --git a/server/www/packages/packages-linux/x64/PIL/PcdImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/PcdImagePlugin.py index 87e5792..6f01845 100644 --- a/server/www/packages/packages-linux/x64/PIL/PcdImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/PcdImagePlugin.py @@ -18,6 +18,8 @@ from . import Image, ImageFile from ._binary import i8 +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.1" @@ -26,6 +28,7 @@ __version__ = "0.1" # image from the file; higher resolutions are encoded in a proprietary # encoding. + class PcdImageFile(ImageFile.ImageFile): format = "PCD" @@ -49,7 +52,7 @@ class PcdImageFile(ImageFile.ImageFile): self.mode = "RGB" self._size = 768, 512 # FIXME: not correct for rotated images! - self.tile = [("pcd", (0, 0)+self.size, 96*2048, None)] + self.tile = [("pcd", (0, 0) + self.size, 96 * 2048, None)] def load_end(self): if self.tile_post_rotate: diff --git a/server/www/packages/packages-linux/x64/PIL/PcfFontFile.py b/server/www/packages/packages-linux/x64/PIL/PcfFontFile.py index eba85fe..0741246 100644 --- a/server/www/packages/packages-linux/x64/PIL/PcfFontFile.py +++ b/server/www/packages/packages-linux/x64/PIL/PcfFontFile.py @@ -16,39 +16,42 @@ # See the README file for information on usage and redistribution. # -from . import Image, FontFile -from ._binary import i8, i16le as l16, i32le as l32, i16be as b16, i32be as b32 +import io + +from . import FontFile, Image +from ._binary import i8, i16be as b16, i16le as l16, i32be as b32, i32le as l32 # -------------------------------------------------------------------- # declarations PCF_MAGIC = 0x70636601 # "\x01fcp" -PCF_PROPERTIES = (1 << 0) -PCF_ACCELERATORS = (1 << 1) -PCF_METRICS = (1 << 2) -PCF_BITMAPS = (1 << 3) -PCF_INK_METRICS = (1 << 4) -PCF_BDF_ENCODINGS = (1 << 5) -PCF_SWIDTHS = (1 << 6) -PCF_GLYPH_NAMES = (1 << 7) -PCF_BDF_ACCELERATORS = (1 << 8) +PCF_PROPERTIES = 1 << 0 +PCF_ACCELERATORS = 1 << 1 +PCF_METRICS = 1 << 2 +PCF_BITMAPS = 1 << 3 +PCF_INK_METRICS = 1 << 4 +PCF_BDF_ENCODINGS = 1 << 5 +PCF_SWIDTHS = 1 << 6 +PCF_GLYPH_NAMES = 1 << 7 +PCF_BDF_ACCELERATORS = 1 << 8 BYTES_PER_ROW = [ - lambda bits: ((bits+7) >> 3), - lambda bits: ((bits+15) >> 3) & ~1, - lambda bits: ((bits+31) >> 3) & ~3, - lambda bits: ((bits+63) >> 3) & ~7, + lambda bits: ((bits + 7) >> 3), + lambda bits: ((bits + 15) >> 3) & ~1, + lambda bits: ((bits + 31) >> 3) & ~3, + lambda bits: ((bits + 63) >> 3) & ~7, ] def sz(s, o): - return s[o:s.index(b"\0", o)] + return s[o : s.index(b"\0", o)] ## # Font file plugin for the X11 PCF format. + class PcfFontFile(FontFile.FontFile): name = "name" @@ -82,7 +85,7 @@ class PcfFontFile(FontFile.FontFile): ix = encoding[ch] if ix is not None: x, y, l, r, w, a, d, f = metrics[ix] - glyph = (w, 0), (l, d-y, x+l, d), (0, 0, x, y), bitmaps[ix] + glyph = (w, 0), (l, d - y, x + l, d), (0, 0, x, y), bitmaps[ix] self.glyph[ch] = glyph def _getformat(self, tag): @@ -117,7 +120,7 @@ class PcfFontFile(FontFile.FontFile): for i in range(nprops): p.append((i32(fp.read(4)), i8(fp.read(1)), i32(fp.read(4)))) if nprops & 3: - fp.seek(4 - (nprops & 3), 1) # pad + fp.seek(4 - (nprops & 3), io.SEEK_CUR) # pad data = fp.read(i32(fp.read(4))) @@ -140,7 +143,7 @@ class PcfFontFile(FontFile.FontFile): append = metrics.append - if (format & 0xff00) == 0x100: + if (format & 0xFF00) == 0x100: # "compressed" metrics for i in range(i16(fp.read(2))): @@ -151,10 +154,7 @@ class PcfFontFile(FontFile.FontFile): descent = i8(fp.read(1)) - 128 xsize = right - left ysize = ascent + descent - append( - (xsize, ysize, left, right, width, - ascent, descent, 0) - ) + append((xsize, ysize, left, right, width, ascent, descent, 0)) else: @@ -168,10 +168,7 @@ class PcfFontFile(FontFile.FontFile): attributes = i16(fp.read(2)) xsize = right - left ysize = ascent + descent - append( - (xsize, ysize, left, right, width, - ascent, descent, attributes) - ) + append((xsize, ysize, left, right, width, ascent, descent, attributes)) return metrics @@ -198,7 +195,7 @@ class PcfFontFile(FontFile.FontFile): bitmapSizes.append(i32(fp.read(4))) # byteorder = format & 4 # non-zero => MSB - bitorder = format & 8 # non-zero => MSB + bitorder = format & 8 # non-zero => MSB padindex = format & 3 bitmapsize = bitmapSizes[padindex] @@ -213,10 +210,8 @@ class PcfFontFile(FontFile.FontFile): for i in range(nbitmaps): x, y, l, r, w, a, d, f = metrics[i] - b, e = offsets[i], offsets[i+1] - bitmaps.append( - Image.frombytes("1", (x, y), data[b:e], "raw", mode, pad(x)) - ) + b, e = offsets[i], offsets[i + 1] + bitmaps.append(Image.frombytes("1", (x, y), data[b:e], "raw", mode, pad(x))) return bitmaps @@ -230,7 +225,7 @@ class PcfFontFile(FontFile.FontFile): firstCol, lastCol = i16(fp.read(2)), i16(fp.read(2)) firstRow, lastRow = i16(fp.read(2)), i16(fp.read(2)) - default = i16(fp.read(2)) + i16(fp.read(2)) # default nencoding = (lastCol - firstCol + 1) * (lastRow - firstRow + 1) @@ -238,7 +233,7 @@ class PcfFontFile(FontFile.FontFile): encodingOffset = i16(fp.read(2)) if encodingOffset != 0xFFFF: try: - encoding[i+firstCol] = encodingOffset + encoding[i + firstCol] = encodingOffset except IndexError: break # only load ISO-8859-1 glyphs diff --git a/server/www/packages/packages-linux/x64/PIL/PcxImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/PcxImagePlugin.py index daa58b3..397af8c 100644 --- a/server/www/packages/packages-linux/x64/PIL/PcxImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/PcxImagePlugin.py @@ -25,12 +25,16 @@ # See the README file for information on usage and redistribution. # +import io import logging + from . import Image, ImageFile, ImagePalette from ._binary import i8, i16le as i16, o8, o16le as o16 logger = logging.getLogger(__name__) +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.6" @@ -41,6 +45,7 @@ def _accept(prefix): ## # Image plugin for Paintbrush images. + class PcxImageFile(ImageFile.ImageFile): format = "PCX" @@ -54,7 +59,7 @@ class PcxImageFile(ImageFile.ImageFile): raise SyntaxError("not a PCX file") # image - bbox = i16(s, 4), i16(s, 6), i16(s, 8)+1, i16(s, 10)+1 + bbox = i16(s, 4), i16(s, 6), i16(s, 8) + 1, i16(s, 10) + 1 if bbox[2] <= bbox[0] or bbox[3] <= bbox[1]: raise SyntaxError("bad PCX image size") logger.debug("BBox: %s %s %s %s", *bbox) @@ -64,8 +69,13 @@ class PcxImageFile(ImageFile.ImageFile): bits = i8(s[3]) planes = i8(s[65]) stride = i16(s, 66) - logger.debug("PCX version %s, bits %s, planes %s, stride %s", - version, bits, planes, stride) + logger.debug( + "PCX version %s, bits %s, planes %s, stride %s", + version, + bits, + planes, + stride, + ) self.info["dpi"] = i16(s, 12), i16(s, 14) @@ -80,12 +90,12 @@ class PcxImageFile(ImageFile.ImageFile): elif version == 5 and bits == 8 and planes == 1: mode = rawmode = "L" # FIXME: hey, this doesn't work with the incremental loader !!! - self.fp.seek(-769, 2) + self.fp.seek(-769, io.SEEK_END) s = self.fp.read(769) if len(s) == 769 and i8(s[0]) == 12: # check if the palette is linear greyscale for i in range(256): - if s[i*3+1:i*3+4] != o8(i)*3: + if s[i * 3 + 1 : i * 3 + 4] != o8(i) * 3: mode = rawmode = "P" break if mode == "P": @@ -100,13 +110,14 @@ class PcxImageFile(ImageFile.ImageFile): raise IOError("unknown PCX mode") self.mode = mode - self._size = bbox[2]-bbox[0], bbox[3]-bbox[1] + self._size = bbox[2] - bbox[0], bbox[3] - bbox[1] bbox = (0, 0) + self.size logger.debug("size: %sx%s", *self.size) self.tile = [("pcx", bbox, self.fp.tell(), (rawmode, planes * stride))] + # -------------------------------------------------------------------- # save PCX files @@ -135,8 +146,12 @@ def _save(im, fp, filename): # Ideally it should be passed in in the state, but the bytes value # gets overwritten. - logger.debug("PcxImagePlugin._save: xwidth: %d, bits: %d, stride: %d", - im.size[0], bits, stride) + logger.debug( + "PcxImagePlugin._save: xwidth: %d, bits: %d, stride: %d", + im.size[0], + bits, + stride, + ) # under windows, we could determine the current screen size with # "Image.core.display_mode()[1]", but I think that's overkill... @@ -147,17 +162,30 @@ def _save(im, fp, filename): # PCX header fp.write( - o8(10) + o8(version) + o8(1) + o8(bits) + o16(0) + - o16(0) + o16(im.size[0]-1) + o16(im.size[1]-1) + o16(dpi[0]) + - o16(dpi[1]) + b"\0"*24 + b"\xFF"*24 + b"\0" + o8(planes) + - o16(stride) + o16(1) + o16(screen[0]) + o16(screen[1]) + - b"\0"*54 - ) + o8(10) + + o8(version) + + o8(1) + + o8(bits) + + o16(0) + + o16(0) + + o16(im.size[0] - 1) + + o16(im.size[1] - 1) + + o16(dpi[0]) + + o16(dpi[1]) + + b"\0" * 24 + + b"\xFF" * 24 + + b"\0" + + o8(planes) + + o16(stride) + + o16(1) + + o16(screen[0]) + + o16(screen[1]) + + b"\0" * 54 + ) assert fp.tell() == 128 - ImageFile._save(im, fp, [("pcx", (0, 0)+im.size, 0, - (rawmode, bits*planes))]) + ImageFile._save(im, fp, [("pcx", (0, 0) + im.size, 0, (rawmode, bits * planes))]) if im.mode == "P": # colour palette @@ -167,7 +195,8 @@ def _save(im, fp, filename): # greyscale palette fp.write(o8(12)) for i in range(256): - fp.write(o8(i)*3) + fp.write(o8(i) * 3) + # -------------------------------------------------------------------- # registry @@ -177,3 +206,5 @@ Image.register_open(PcxImageFile.format, PcxImageFile, _accept) Image.register_save(PcxImageFile.format, _save) Image.register_extension(PcxImageFile.format, ".pcx") + +Image.register_mime(PcxImageFile.format, "image/x-pcx") diff --git a/server/www/packages/packages-linux/x64/PIL/PdfImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/PdfImagePlugin.py index b425027..1fd40f5 100644 --- a/server/www/packages/packages-linux/x64/PIL/PdfImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/PdfImagePlugin.py @@ -20,11 +20,14 @@ # Image plugin for PDF images (output only). ## -from . import Image, ImageFile, ImageSequence, PdfParser import io import os import time +from . import Image, ImageFile, ImageSequence, PdfParser + +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.5" @@ -46,6 +49,7 @@ def _save_all(im, fp, filename): ## # (Internal) Image save plugin for the PDF format. + def _save(im, fp, filename, save_all=False): is_appending = im.encoderinfo.get("append", False) if is_appending: @@ -56,16 +60,16 @@ def _save(im, fp, filename, save_all=False): resolution = im.encoderinfo.get("resolution", 72.0) info = { - "title": None if is_appending else os.path.splitext( - os.path.basename(filename) - )[0], + "title": None + if is_appending + else os.path.splitext(os.path.basename(filename))[0], "author": None, "subject": None, "keywords": None, "creator": None, "producer": None, "creationDate": None if is_appending else time.gmtime(), - "modDate": None if is_appending else time.gmtime() + "modDate": None if is_appending else time.gmtime(), } for k, default in info.items(): v = im.encoderinfo.get(k) if k in im.encoderinfo else default @@ -140,7 +144,7 @@ def _save(im, fp, filename, save_all=False): PdfParser.PdfName("Indexed"), PdfParser.PdfName("DeviceRGB"), 255, - PdfParser.PdfBinary(palette) + PdfParser.PdfBinary(palette), ] procset = "ImageI" # indexed color elif im.mode == "RGB": @@ -164,16 +168,15 @@ def _save(im, fp, filename, save_all=False): # FIXME: the hex encoder doesn't support packed 1-bit # images; do things the hard way... data = im.tobytes("raw", "1") - im = Image.new("L", (len(data), 1), None) + im = Image.new("L", im.size) im.putdata(data) - ImageFile._save(im, op, [("hex", (0, 0)+im.size, 0, im.mode)]) + ImageFile._save(im, op, [("hex", (0, 0) + im.size, 0, im.mode)]) elif filter == "DCTDecode": Image.SAVE["JPEG"](im, op, filename) elif filter == "FlateDecode": - ImageFile._save(im, op, [("zip", (0, 0)+im.size, 0, im.mode)]) + ImageFile._save(im, op, [("zip", (0, 0) + im.size, 0, im.mode)]) elif filter == "RunLengthDecode": - ImageFile._save(im, op, - [("packbits", (0, 0)+im.size, 0, im.mode)]) + ImageFile._save(im, op, [("packbits", (0, 0) + im.size, 0, im.mode)]) else: raise ValueError("unsupported PDF filter (%s)" % filter) @@ -182,48 +185,46 @@ def _save(im, fp, filename, save_all=False): width, height = im.size - existing_pdf.write_obj(image_refs[pageNumber], - stream=op.getvalue(), - Type=PdfParser.PdfName("XObject"), - Subtype=PdfParser.PdfName("Image"), - Width=width, # * 72.0 / resolution, - Height=height, # * 72.0 / resolution, - Filter=PdfParser.PdfName(filter), - BitsPerComponent=bits, - DecodeParams=params, - ColorSpace=colorspace) + existing_pdf.write_obj( + image_refs[pageNumber], + stream=op.getvalue(), + Type=PdfParser.PdfName("XObject"), + Subtype=PdfParser.PdfName("Image"), + Width=width, # * 72.0 / resolution, + Height=height, # * 72.0 / resolution, + Filter=PdfParser.PdfName(filter), + BitsPerComponent=bits, + DecodeParams=params, + ColorSpace=colorspace, + ) # # page - existing_pdf.write_page(page_refs[pageNumber], - Resources=PdfParser.PdfDict( - ProcSet=[ - PdfParser.PdfName("PDF"), - PdfParser.PdfName(procset) - ], - XObject=PdfParser.PdfDict( - image=image_refs[pageNumber] - ) - ), - MediaBox=[ - 0, - 0, - int(width * 72.0 / resolution), - int(height * 72.0 / resolution) - ], - Contents=contents_refs[pageNumber]) + existing_pdf.write_page( + page_refs[pageNumber], + Resources=PdfParser.PdfDict( + ProcSet=[PdfParser.PdfName("PDF"), PdfParser.PdfName(procset)], + XObject=PdfParser.PdfDict(image=image_refs[pageNumber]), + ), + MediaBox=[ + 0, + 0, + int(width * 72.0 / resolution), + int(height * 72.0 / resolution), + ], + Contents=contents_refs[pageNumber], + ) # # page contents page_contents = PdfParser.make_bytes( - "q %d 0 0 %d 0 0 cm /image Do Q\n" % ( - int(width * 72.0 / resolution), - int(height * 72.0 / resolution))) + "q %d 0 0 %d 0 0 cm /image Do Q\n" + % (int(width * 72.0 / resolution), int(height * 72.0 / resolution)) + ) - existing_pdf.write_obj(contents_refs[pageNumber], - stream=page_contents) + existing_pdf.write_obj(contents_refs[pageNumber], stream=page_contents) pageNumber += 1 @@ -234,6 +235,7 @@ def _save(im, fp, filename, save_all=False): fp.flush() existing_pdf.close() + # # -------------------------------------------------------------------- diff --git a/server/www/packages/packages-linux/x64/PIL/PdfParser.py b/server/www/packages/packages-linux/x64/PIL/PdfParser.py index bcd29db..0ec6bba 100644 --- a/server/www/packages/packages-linux/x64/PIL/PdfParser.py +++ b/server/www/packages/packages-linux/x64/PIL/PdfParser.py @@ -6,6 +6,7 @@ import os import re import time import zlib + from ._util import py3 try: @@ -15,11 +16,15 @@ except ImportError: if py3: # Python 3.x + def make_bytes(s): return s.encode("us-ascii") + + else: # Python 2.x + def make_bytes(s): # pragma: no cover - return s # pragma: no cover + return s # pragma: no cover # see 7.9.2.2 Text String Type on page 86 and D.3 PDFDocEncoding Character Set @@ -74,8 +79,8 @@ PDFDocEncoding = { def decode_text(b): - if b[:len(codecs.BOM_UTF16_BE)] == codecs.BOM_UTF16_BE: - return b[len(codecs.BOM_UTF16_BE):].decode("utf_16_be") + if b[: len(codecs.BOM_UTF16_BE)] == codecs.BOM_UTF16_BE: + return b[len(codecs.BOM_UTF16_BE) :].decode("utf_16_be") elif py3: # Python 3.x return "".join(PDFDocEncoding.get(byte, chr(byte)) for byte in b) else: # Python 2.x @@ -85,6 +90,7 @@ def decode_text(b): class PdfFormatError(RuntimeError): """An error that probably indicates a syntactic or semantic error in the PDF file structure""" + pass @@ -93,8 +99,9 @@ def check_format_condition(condition, error_message): raise PdfFormatError(error_message) -class IndirectReference(collections.namedtuple("IndirectReferenceTuple", - ["object_id", "generation"])): +class IndirectReference( + collections.namedtuple("IndirectReferenceTuple", ["object_id", "generation"]) +): def __str__(self): return "%s %s R" % self @@ -102,9 +109,11 @@ class IndirectReference(collections.namedtuple("IndirectReferenceTuple", return self.__str__().encode("us-ascii") def __eq__(self, other): - return other.__class__ is self.__class__ and \ - other.object_id == self.object_id and \ - other.generation == self.generation + return ( + other.__class__ is self.__class__ + and other.object_id == self.object_id + and other.generation == self.generation + ) def __ne__(self, other): return not (self == other) @@ -120,9 +129,9 @@ class IndirectObjectDef(IndirectReference): class XrefTable: def __init__(self): - self.existing_entries = {} # object ID => (offset, generation) - self.new_entries = {} # object ID => (offset, generation) - self.deleted_entries = {0: 65536} # object ID => generation + self.existing_entries = {} # object ID => (offset, generation) + self.new_entries = {} # object ID => (offset, generation) + self.deleted_entries = {0: 65536} # object ID => generation self.reading_finished = False def __setitem__(self, key, value): @@ -150,26 +159,27 @@ class XrefTable: elif key in self.deleted_entries: generation = self.deleted_entries[key] else: - raise IndexError("object ID " + str(key) + - " cannot be deleted because it doesn't exist") + raise IndexError( + "object ID " + str(key) + " cannot be deleted because it doesn't exist" + ) def __contains__(self, key): return key in self.existing_entries or key in self.new_entries def __len__(self): - return len(set(self.existing_entries.keys()) | - set(self.new_entries.keys()) | - set(self.deleted_entries.keys())) + return len( + set(self.existing_entries.keys()) + | set(self.new_entries.keys()) + | set(self.deleted_entries.keys()) + ) def keys(self): return ( - set(self.existing_entries.keys()) - - set(self.deleted_entries.keys()) + set(self.existing_entries.keys()) - set(self.deleted_entries.keys()) ) | set(self.new_entries.keys()) def write(self, f): - keys = sorted(set(self.new_entries.keys()) | - set(self.deleted_entries.keys())) + keys = sorted(set(self.new_entries.keys()) | set(self.deleted_entries.keys())) deleted_keys = sorted(set(self.deleted_entries.keys())) startxref = f.tell() f.write(b"xref\n") @@ -177,7 +187,7 @@ class XrefTable: # find a contiguous sequence of object IDs prev = None for index, key in enumerate(keys): - if prev is None or prev+1 == key: + if prev is None or prev + 1 == key: prev = key else: contiguous_keys = keys[:index] @@ -186,25 +196,27 @@ class XrefTable: else: contiguous_keys = keys keys = None - f.write(make_bytes("%d %d\n" % - (contiguous_keys[0], len(contiguous_keys)))) + f.write(make_bytes("%d %d\n" % (contiguous_keys[0], len(contiguous_keys)))) for object_id in contiguous_keys: if object_id in self.new_entries: - f.write(make_bytes("%010d %05d n \n" % - self.new_entries[object_id])) + f.write(make_bytes("%010d %05d n \n" % self.new_entries[object_id])) else: this_deleted_object_id = deleted_keys.pop(0) - check_format_condition(object_id == this_deleted_object_id, - "expected the next deleted object " - "ID to be %s, instead found %s" % - (object_id, this_deleted_object_id)) + check_format_condition( + object_id == this_deleted_object_id, + "expected the next deleted object ID to be %s, instead found %s" + % (object_id, this_deleted_object_id), + ) try: next_in_linked_list = deleted_keys[0] except IndexError: next_in_linked_list = 0 - f.write(make_bytes("%010d %05d f \n" % - (next_in_linked_list, - self.deleted_entries[object_id]))) + f.write( + make_bytes( + "%010d %05d f \n" + % (next_in_linked_list, self.deleted_entries[object_id]) + ) + ) return startxref @@ -221,8 +233,9 @@ class PdfName: return self.name.decode("us-ascii") def __eq__(self, other): - return (isinstance(other, PdfName) and other.name == self.name) or \ - other == self.name + return ( + isinstance(other, PdfName) and other.name == self.name + ) or other == self.name def __hash__(self): return hash(self.name) @@ -269,36 +282,31 @@ class PdfDict(UserDict): else: self.__dict__[key] = value else: - if isinstance(key, str): - key = key.encode("us-ascii") - self[key] = value + self[key.encode("us-ascii")] = value def __getattr__(self, key): try: - value = self[key] + value = self[key.encode("us-ascii")] except KeyError: - try: - value = self[key.encode("us-ascii")] - except KeyError: - raise AttributeError(key) + raise AttributeError(key) if isinstance(value, bytes): value = decode_text(value) if key.endswith("Date"): if value.startswith("D:"): value = value[2:] - relationship = 'Z' + relationship = "Z" if len(value) > 17: relationship = value[14] offset = int(value[15:17]) * 60 if len(value) > 20: offset += int(value[18:20]) - format = '%Y%m%d%H%M%S'[:len(value) - 2] - value = time.strptime(value[:len(format)+2], format) - if relationship in ['+', '-']: + format = "%Y%m%d%H%M%S"[: len(value) - 2] + value = time.strptime(value[: len(format) + 2], format) + if relationship in ["+", "-"]: offset *= 60 - if relationship == '+': + if relationship == "+": offset *= -1 value = time.gmtime(calendar.timegm(value) + offset) return value @@ -325,9 +333,12 @@ class PdfBinary: self.data = data if py3: # Python 3.x + def __bytes__(self): return make_bytes("<%s>" % "".join("%02X" % b for b in self.data)) + else: # Python 2.x + def __str__(self): return "<%s>" % "".join("%02X" % ord(b) for b in self.data) @@ -350,8 +361,8 @@ class PdfStream: return zlib.decompress(self.buf, bufsize=int(expected_length)) else: raise NotImplementedError( - "stream filter %s unknown/unsupported" % - repr(self.dictionary.Filter)) + "stream filter %s unknown/unsupported" % repr(self.dictionary.Filter) + ) def pdf_repr(x): @@ -361,19 +372,19 @@ def pdf_repr(x): return b"false" elif x is None: return b"null" - elif (isinstance(x, PdfName) or isinstance(x, PdfDict) or - isinstance(x, PdfArray) or isinstance(x, PdfBinary)): + elif isinstance(x, (PdfName, PdfDict, PdfArray, PdfBinary)): return bytes(x) elif isinstance(x, int): return str(x).encode("us-ascii") elif isinstance(x, time.struct_time): - return b'(D:'+time.strftime('%Y%m%d%H%M%SZ', x).encode("us-ascii")+b')' + return b"(D:" + time.strftime("%Y%m%d%H%M%SZ", x).encode("us-ascii") + b")" elif isinstance(x, dict): return bytes(PdfDict(x)) elif isinstance(x, list): return bytes(PdfArray(x)) - elif ((py3 and isinstance(x, str)) or - (not py3 and isinstance(x, unicode))): + elif (py3 and isinstance(x, str)) or ( + not py3 and isinstance(x, unicode) # noqa: F821 + ): return pdf_repr(encode_text(x)) elif isinstance(x, bytes): # XXX escape more chars? handle binary garbage @@ -386,17 +397,14 @@ def pdf_repr(x): class PdfParser: - """Based on https://www.adobe.com/content/dam/acom/en/devnet/acrobat/pdfs/PDF32000_2008.pdf + """Based on + https://www.adobe.com/content/dam/acom/en/devnet/acrobat/pdfs/PDF32000_2008.pdf Supports PDF up to 1.4 """ - def __init__(self, filename=None, f=None, - buf=None, start_offset=0, mode="rb"): - # type: (PdfParser, str, file, Union[bytes, bytearray], int, str) - # -> None + def __init__(self, filename=None, f=None, buf=None, start_offset=0, mode="rb"): if buf and f: - raise RuntimeError( - "specify buf or f or filename, but not both buf and f") + raise RuntimeError("specify buf or f or filename, but not both buf and f") self.filename = filename self.buf = buf self.f = f @@ -470,13 +478,13 @@ class PdfParser: self.root_ref = self.next_object_id(self.f.tell()) self.pages_ref = self.next_object_id(0) self.rewrite_pages() - self.write_obj(self.root_ref, - Type=PdfName(b"Catalog"), - Pages=self.pages_ref) - self.write_obj(self.pages_ref, - Type=PdfName(b"Pages"), - Count=len(self.pages), - Kids=self.pages) + self.write_obj(self.root_ref, Type=PdfName(b"Catalog"), Pages=self.pages_ref) + self.write_obj( + self.pages_ref, + Type=PdfName(b"Pages"), + Count=len(self.pages), + Kids=self.pages, + ) return self.root_ref def rewrite_pages(self): @@ -522,8 +530,11 @@ class PdfParser: if self.info: trailer_dict[b"Info"] = self.info_ref self.last_xref_section_offset = start_xref - self.f.write(b"trailer\n" + bytes(PdfDict(trailer_dict)) + - make_bytes("\nstartxref\n%d\n%%%%EOF" % start_xref)) + self.f.write( + b"trailer\n" + + bytes(PdfDict(trailer_dict)) + + make_bytes("\nstartxref\n%d\n%%%%EOF" % start_xref) + ) def write_page(self, ref, *objs, **dict_obj): if isinstance(ref, int): @@ -585,12 +596,14 @@ class PdfParser: else: self.info = PdfDict(self.read_indirect(self.info_ref)) check_format_condition(b"Type" in self.root, "/Type missing in Root") - check_format_condition(self.root[b"Type"] == b"Catalog", - "/Type in Root is not /Catalog") + check_format_condition( + self.root[b"Type"] == b"Catalog", "/Type in Root is not /Catalog" + ) check_format_condition(b"Pages" in self.root, "/Pages missing in Root") - check_format_condition(isinstance(self.root[b"Pages"], - IndirectReference), - "/Pages in Root is not an indirect reference") + check_format_condition( + isinstance(self.root[b"Pages"], IndirectReference), + "/Pages in Root is not an indirect reference", + ) self.pages_ref = self.root[b"Pages"] self.page_tree_root = self.read_indirect(self.pages_ref) self.pages = self.linearize_page_tree(self.page_tree_root) @@ -618,13 +631,34 @@ class PdfParser: newline_only = br"[\r\n]+" newline = whitespace_optional + newline_only + whitespace_optional re_trailer_end = re.compile( - whitespace_mandatory + br"trailer" + whitespace_optional + - br"\<\<(.*\>\>)" + newline + br"startxref" + newline + br"([0-9]+)" + - newline + br"%%EOF" + whitespace_optional + br"$", re.DOTALL) + whitespace_mandatory + + br"trailer" + + whitespace_optional + + br"\<\<(.*\>\>)" + + newline + + br"startxref" + + newline + + br"([0-9]+)" + + newline + + br"%%EOF" + + whitespace_optional + + br"$", + re.DOTALL, + ) re_trailer_prev = re.compile( - whitespace_optional + br"trailer" + whitespace_optional + - br"\<\<(.*?\>\>)" + newline + br"startxref" + newline + br"([0-9]+)" + - newline + br"%%EOF" + whitespace_optional, re.DOTALL) + whitespace_optional + + br"trailer" + + whitespace_optional + + br"\<\<(.*?\>\>)" + + newline + + br"startxref" + + newline + + br"([0-9]+)" + + newline + + br"%%EOF" + + whitespace_optional, + re.DOTALL, + ) def read_trailer(self): search_start_offset = len(self.buf) - 16384 @@ -636,7 +670,7 @@ class PdfParser: last_match = m while m: last_match = m - m = self.re_trailer_end.search(self.buf, m.start()+16) + m = self.re_trailer_end.search(self.buf, m.start() + 16) if not m: m = last_match trailer_data = m.group(1) @@ -648,26 +682,29 @@ class PdfParser: self.read_prev_trailer(self.trailer_dict[b"Prev"]) def read_prev_trailer(self, xref_section_offset): - trailer_offset = self.read_xref_table( - xref_section_offset=xref_section_offset) + trailer_offset = self.read_xref_table(xref_section_offset=xref_section_offset) m = self.re_trailer_prev.search( - self.buf[trailer_offset:trailer_offset+16384]) + self.buf[trailer_offset : trailer_offset + 16384] + ) check_format_condition(m, "previous trailer not found") trailer_data = m.group(1) - check_format_condition(int(m.group(2)) == xref_section_offset, - "xref section offset in previous trailer " - "doesn't match what was expected") + check_format_condition( + int(m.group(2)) == xref_section_offset, + "xref section offset in previous trailer doesn't match what was expected", + ) trailer_dict = self.interpret_trailer(trailer_data) if b"Prev" in trailer_dict: self.read_prev_trailer(trailer_dict[b"Prev"]) re_whitespace_optional = re.compile(whitespace_optional) re_name = re.compile( - whitespace_optional + br"/([!-$&'*-.0-;=?-Z\\^-z|~]+)(?=" + - delimiter_or_ws + br")") + whitespace_optional + + br"/([!-$&'*-.0-;=?-Z\\^-z|~]+)(?=" + + delimiter_or_ws + + br")" + ) re_dict_start = re.compile(whitespace_optional + br"\<\<") - re_dict_end = re.compile( - whitespace_optional + br"\>\>" + whitespace_optional) + re_dict_end = re.compile(whitespace_optional + br"\>\>" + whitespace_optional) @classmethod def interpret_trailer(cls, trailer_data): @@ -679,19 +716,21 @@ class PdfParser: m = cls.re_dict_end.match(trailer_data, offset) check_format_condition( m and m.end() == len(trailer_data), - "name not found in trailer, remaining data: " + - repr(trailer_data[offset:])) + "name not found in trailer, remaining data: " + + repr(trailer_data[offset:]), + ) break key = cls.interpret_name(m.group(1)) value, offset = cls.get_value(trailer_data, m.end()) trailer[key] = value check_format_condition( b"Size" in trailer and isinstance(trailer[b"Size"], int), - "/Size not in trailer or not an integer") + "/Size not in trailer or not an integer", + ) check_format_condition( - b"Root" in trailer and - isinstance(trailer[b"Root"], IndirectReference), - "/Root not in trailer or not an indirect reference") + b"Root" in trailer and isinstance(trailer[b"Root"], IndirectReference), + "/Root not in trailer or not an indirect reference", + ) return trailer re_hashes_in_name = re.compile(br"([^#]*)(#([0-9a-fA-F]{2}))?") @@ -701,8 +740,7 @@ class PdfParser: name = b"" for m in cls.re_hashes_in_name.finditer(raw): if m.group(3): - name += m.group(1) + \ - bytearray.fromhex(m.group(3).decode("us-ascii")) + name += m.group(1) + bytearray.fromhex(m.group(3).decode("us-ascii")) else: name += m.group(1) if as_text: @@ -710,37 +748,54 @@ class PdfParser: else: return bytes(name) - re_null = re.compile( - whitespace_optional + br"null(?=" + delimiter_or_ws + br")") - re_true = re.compile( - whitespace_optional + br"true(?=" + delimiter_or_ws + br")") - re_false = re.compile( - whitespace_optional + br"false(?=" + delimiter_or_ws + br")") + re_null = re.compile(whitespace_optional + br"null(?=" + delimiter_or_ws + br")") + re_true = re.compile(whitespace_optional + br"true(?=" + delimiter_or_ws + br")") + re_false = re.compile(whitespace_optional + br"false(?=" + delimiter_or_ws + br")") re_int = re.compile( - whitespace_optional + br"([-+]?[0-9]+)(?=" + delimiter_or_ws + br")") + whitespace_optional + br"([-+]?[0-9]+)(?=" + delimiter_or_ws + br")" + ) re_real = re.compile( - whitespace_optional + br"([-+]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+))(?=" + - delimiter_or_ws + br")") + whitespace_optional + + br"([-+]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+))(?=" + + delimiter_or_ws + + br")" + ) re_array_start = re.compile(whitespace_optional + br"\[") re_array_end = re.compile(whitespace_optional + br"]") re_string_hex = re.compile( - whitespace_optional + br"\<(" + whitespace_or_hex + br"*)\>") + whitespace_optional + br"\<(" + whitespace_or_hex + br"*)\>" + ) re_string_lit = re.compile(whitespace_optional + br"\(") re_indirect_reference = re.compile( - whitespace_optional + br"([-+]?[0-9]+)" + whitespace_mandatory + - br"([-+]?[0-9]+)" + whitespace_mandatory + br"R(?=" + delimiter_or_ws + - br")") + whitespace_optional + + br"([-+]?[0-9]+)" + + whitespace_mandatory + + br"([-+]?[0-9]+)" + + whitespace_mandatory + + br"R(?=" + + delimiter_or_ws + + br")" + ) re_indirect_def_start = re.compile( - whitespace_optional + br"([-+]?[0-9]+)" + whitespace_mandatory + - br"([-+]?[0-9]+)" + whitespace_mandatory + br"obj(?=" + - delimiter_or_ws + br")") + whitespace_optional + + br"([-+]?[0-9]+)" + + whitespace_mandatory + + br"([-+]?[0-9]+)" + + whitespace_mandatory + + br"obj(?=" + + delimiter_or_ws + + br")" + ) re_indirect_def_end = re.compile( - whitespace_optional + br"endobj(?=" + delimiter_or_ws + br")") + whitespace_optional + br"endobj(?=" + delimiter_or_ws + br")" + ) re_comment = re.compile( - br"(" + whitespace_optional + br"%[^\r\n]*" + newline + br")*") + br"(" + whitespace_optional + br"%[^\r\n]*" + newline + br")*" + ) re_stream_start = re.compile(whitespace_optional + br"stream\r?\n") re_stream_end = re.compile( - whitespace_optional + br"endstream(?=" + delimiter_or_ws + br")") + whitespace_optional + br"endstream(?=" + delimiter_or_ws + br")" + ) @classmethod def get_value(cls, data, offset, expect_indirect=None, max_nesting=-1): @@ -753,32 +808,37 @@ class PdfParser: if m: check_format_condition( int(m.group(1)) > 0, - "indirect object definition: object ID must be greater than 0") + "indirect object definition: object ID must be greater than 0", + ) check_format_condition( int(m.group(2)) >= 0, - "indirect object definition: generation must be non-negative") + "indirect object definition: generation must be non-negative", + ) check_format_condition( - expect_indirect is None or expect_indirect == - IndirectReference(int(m.group(1)), int(m.group(2))), - "indirect object definition different than expected") - object, offset = cls.get_value( - data, m.end(), max_nesting=max_nesting-1) + expect_indirect is None + or expect_indirect + == IndirectReference(int(m.group(1)), int(m.group(2))), + "indirect object definition different than expected", + ) + object, offset = cls.get_value(data, m.end(), max_nesting=max_nesting - 1) if offset is None: return object, None m = cls.re_indirect_def_end.match(data, offset) - check_format_condition( - m, "indirect object definition end not found") + check_format_condition(m, "indirect object definition end not found") return object, m.end() check_format_condition( - not expect_indirect, "indirect object definition not found") + not expect_indirect, "indirect object definition not found" + ) m = cls.re_indirect_reference.match(data, offset) if m: check_format_condition( int(m.group(1)) > 0, - "indirect object reference: object ID must be greater than 0") + "indirect object reference: object ID must be greater than 0", + ) check_format_condition( int(m.group(2)) >= 0, - "indirect object reference: generation must be non-negative") + "indirect object reference: generation must be non-negative", + ) return IndirectReference(int(m.group(1)), int(m.group(2))), m.end() m = cls.re_dict_start.match(data, offset) if m: @@ -786,12 +846,10 @@ class PdfParser: result = {} m = cls.re_dict_end.match(data, offset) while not m: - key, offset = cls.get_value( - data, offset, max_nesting=max_nesting-1) + key, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1) if offset is None: return result, None - value, offset = cls.get_value( - data, offset, max_nesting=max_nesting-1) + value, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1) result[key] = value if offset is None: return result, None @@ -803,9 +861,10 @@ class PdfParser: stream_len = int(result[b"Length"]) except (TypeError, KeyError, ValueError): raise PdfFormatError( - "bad or missing Length in stream dict (%r)" % - result.get(b"Length", None)) - stream_data = data[m.end():m.end() + stream_len] + "bad or missing Length in stream dict (%r)" + % result.get(b"Length", None) + ) + stream_data = data[m.end() : m.end() + stream_len] m = cls.re_stream_end.match(data, m.end() + stream_len) check_format_condition(m, "stream end not found") offset = m.end() @@ -819,8 +878,7 @@ class PdfParser: result = [] m = cls.re_array_end.match(data, offset) while not m: - value, offset = cls.get_value( - data, offset, max_nesting=max_nesting-1) + value, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1) result.append(value) if offset is None: return result, None @@ -848,10 +906,9 @@ class PdfParser: m = cls.re_string_hex.match(data, offset) if m: # filter out whitespace - hex_string = bytearray([ - b for b in m.group(1) - if b in b"0123456789abcdefABCDEF" - ]) + hex_string = bytearray( + [b for b in m.group(1) if b in b"0123456789abcdefABCDEF"] + ) if len(hex_string) % 2 == 1: # append a 0 if the length is not even - yes, at the end hex_string.append(ord(b"0")) @@ -860,10 +917,11 @@ class PdfParser: if m: return cls.get_literal_string(data, m.end()) # return None, offset # fallback (only for debugging) - raise PdfFormatError( - "unrecognized object: " + repr(data[offset:offset+32])) + raise PdfFormatError("unrecognized object: " + repr(data[offset : offset + 32])) - re_lit_str_token = re.compile(br"(\\[nrtbf()\\])|(\\[0-9]{1,3})|(\\(\r\n|\r|\n))|(\r\n|\r|\n)|(\()|(\))") + re_lit_str_token = re.compile( + br"(\\[nrtbf()\\])|(\\[0-9]{1,3})|(\\(\r\n|\r|\n))|(\r\n|\r|\n)|(\()|(\))" + ) escaped_chars = { b"n": b"\n", b"r": b"\r", @@ -881,14 +939,14 @@ class PdfParser: ord(b"("): b"(", ord(b")"): b")", ord(b"\\"): b"\\", - } + } @classmethod def get_literal_string(cls, data, offset): nesting_depth = 0 result = bytearray() for m in cls.re_lit_str_token.finditer(data, offset): - result.extend(data[offset:m.start()]) + result.extend(data[offset : m.start()]) if m.group(1): result.extend(cls.escaped_chars[m.group(1)[1]]) elif m.group(2): @@ -908,30 +966,36 @@ class PdfParser: offset = m.end() raise PdfFormatError("unfinished literal string") - re_xref_section_start = re.compile( - whitespace_optional + br"xref" + newline) + re_xref_section_start = re.compile(whitespace_optional + br"xref" + newline) re_xref_subsection_start = re.compile( - whitespace_optional + br"([0-9]+)" + whitespace_mandatory + - br"([0-9]+)" + whitespace_optional + newline_only) + whitespace_optional + + br"([0-9]+)" + + whitespace_mandatory + + br"([0-9]+)" + + whitespace_optional + + newline_only + ) re_xref_entry = re.compile(br"([0-9]{10}) ([0-9]{5}) ([fn])( \r| \n|\r\n)") def read_xref_table(self, xref_section_offset): subsection_found = False m = self.re_xref_section_start.match( - self.buf, xref_section_offset + self.start_offset) + self.buf, xref_section_offset + self.start_offset + ) check_format_condition(m, "xref section start not found") offset = m.end() while True: m = self.re_xref_subsection_start.match(self.buf, offset) if not m: check_format_condition( - subsection_found, "xref subsection start not found") + subsection_found, "xref subsection start not found" + ) break subsection_found = True offset = m.end() first_object = int(m.group(1)) num_objects = int(m.group(2)) - for i in range(first_object, first_object+num_objects): + for i in range(first_object, first_object + num_objects): m = self.re_xref_entry.match(self.buf, offset) check_format_condition(m, "xref entry not found") offset = m.end() @@ -940,9 +1004,9 @@ class PdfParser: if not is_free: new_entry = (int(m.group(1)), generation) check_format_condition( - i not in self.xref_table or - self.xref_table[i] == new_entry, - "xref entry duplicated (and not identical)") + i not in self.xref_table or self.xref_table[i] == new_entry, + "xref entry duplicated (and not identical)", + ) self.xref_table[i] = new_entry return offset @@ -952,10 +1016,14 @@ class PdfParser: generation == ref[1], "expected to find generation %s for object ID %s in xref table, " "instead found generation %s at offset %s" - % (ref[1], ref[0], generation, offset)) - value = self.get_value(self.buf, offset + self.start_offset, - expect_indirect=IndirectReference(*ref), - max_nesting=max_nesting)[0] + % (ref[1], ref[0], generation, offset), + ) + value = self.get_value( + self.buf, + offset + self.start_offset, + expect_indirect=IndirectReference(*ref), + max_nesting=max_nesting, + )[0] self.cached_objects[ref] = value return value @@ -963,7 +1031,8 @@ class PdfParser: if node is None: node = self.page_tree_root check_format_condition( - node[b"Type"] == b"Pages", "/Type of page tree node is not /Pages") + node[b"Type"] == b"Pages", "/Type of page tree node is not /Pages" + ) pages = [] for kid in node[b"Kids"]: kid_object = self.read_indirect(kid) diff --git a/server/www/packages/packages-linux/x64/PIL/PixarImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/PixarImagePlugin.py index d07b28d..dc71ca1 100644 --- a/server/www/packages/packages-linux/x64/PIL/PixarImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/PixarImagePlugin.py @@ -22,12 +22,15 @@ from . import Image, ImageFile from ._binary import i16le as i16 +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.1" # # helpers + def _accept(prefix): return prefix[:4] == b"\200\350\000\000" @@ -35,6 +38,7 @@ def _accept(prefix): ## # Image plugin for PIXAR raster images. + class PixarImageFile(ImageFile.ImageFile): format = "PIXAR" @@ -60,7 +64,7 @@ class PixarImageFile(ImageFile.ImageFile): # FIXME: to be continued... # create tile descriptor (assuming "dumped") - self.tile = [("raw", (0, 0)+self.size, 1024, (self.mode, 0, 1))] + self.tile = [("raw", (0, 0) + self.size, 1024, (self.mode, 0, 1))] # diff --git a/server/www/packages/packages-linux/x64/PIL/PngImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/PngImagePlugin.py index 15077fc..be237b3 100644 --- a/server/www/packages/packages-linux/x64/PIL/PngImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/PngImagePlugin.py @@ -33,13 +33,15 @@ import logging import re -import zlib import struct +import zlib from . import Image, ImageFile, ImagePalette from ._binary import i8, i16be as i16, i32be as i32, o16be as o16, o32be as o32 from ._util import py3 +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.9" logger = logging.getLogger(__name__) @@ -52,25 +54,30 @@ _MAGIC = b"\211PNG\r\n\032\n" _MODES = { # supported bits/color combinations, and corresponding modes/rawmodes - (1, 0): ("1", "1"), - (2, 0): ("L", "L;2"), - (4, 0): ("L", "L;4"), - (8, 0): ("L", "L"), + # Greyscale + (1, 0): ("1", "1"), + (2, 0): ("L", "L;2"), + (4, 0): ("L", "L;4"), + (8, 0): ("L", "L"), (16, 0): ("I", "I;16B"), - (8, 2): ("RGB", "RGB"), + # Truecolour + (8, 2): ("RGB", "RGB"), (16, 2): ("RGB", "RGB;16B"), - (1, 3): ("P", "P;1"), - (2, 3): ("P", "P;2"), - (4, 3): ("P", "P;4"), - (8, 3): ("P", "P"), - (8, 4): ("LA", "LA"), + # Indexed-colour + (1, 3): ("P", "P;1"), + (2, 3): ("P", "P;2"), + (4, 3): ("P", "P;4"), + (8, 3): ("P", "P"), + # Greyscale with alpha + (8, 4): ("LA", "LA"), (16, 4): ("RGBA", "LA;16B"), # LA;16B->LA not yet available - (8, 6): ("RGBA", "RGBA"), + # Truecolour with alpha + (8, 6): ("RGBA", "RGBA"), (16, 6): ("RGBA", "RGBA;16B"), } -_simple_palette = re.compile(b'^\xff*\x00\xff*$') +_simple_palette = re.compile(b"^\xff*\x00\xff*$") # Maximum decompressed size for a iTXt or zTXt chunk. # Eliminates decompression bombs where compressed chunks can expand 1000x @@ -88,21 +95,21 @@ def _safe_zlib_decompress(s): def _crc32(data, seed=0): - return zlib.crc32(data, seed) & 0xffffffff + return zlib.crc32(data, seed) & 0xFFFFFFFF # -------------------------------------------------------------------- # Support classes. Suitable for PNG and related formats like MNG etc. -class ChunkStream(object): +class ChunkStream(object): def __init__(self, fp): self.fp = fp self.queue = [] def read(self): - "Fetch a new chunk. Returns header information." + """Fetch a new chunk. Returns header information.""" cid = None if self.queue: @@ -134,13 +141,13 @@ class ChunkStream(object): self.queue.append((cid, pos, length)) def call(self, cid, pos, length): - "Call the appropriate chunk handler" + """Call the appropriate chunk handler""" logger.debug("STREAM %r %s %s", cid, pos, length) - return getattr(self, "chunk_" + cid.decode('ascii'))(pos, length) + return getattr(self, "chunk_" + cid.decode("ascii"))(pos, length) def crc(self, cid, data): - "Read and verify checksum" + """Read and verify checksum""" # Skip CRC checks for ancillary chunks if allowed to load truncated # images @@ -153,14 +160,12 @@ class ChunkStream(object): crc1 = _crc32(data, _crc32(cid)) crc2 = i32(self.fp.read(4)) if crc1 != crc2: - raise SyntaxError("broken PNG file (bad header checksum in %r)" - % cid) + raise SyntaxError("broken PNG file (bad header checksum in %r)" % cid) except struct.error: - raise SyntaxError("broken PNG file (incomplete checksum in %r)" - % cid) + raise SyntaxError("broken PNG file (incomplete checksum in %r)" % cid) def crc_skip(self, cid, data): - "Read checksum. Used if the C module is not present" + """Read checksum. Used if the C module is not present""" self.fp.read(4) @@ -191,8 +196,9 @@ class iTXt(str): keeping their extra information """ + @staticmethod - def __new__(cls, text, lang, tkey): + def __new__(cls, text, lang=None, tkey=None): """ :param cls: the class to use when creating the instance :param text: value for this key @@ -246,11 +252,12 @@ class PngInfo(object): tkey = tkey.encode("utf-8", "strict") if zip: - self.add(b"iTXt", key + b"\0\x01\0" + lang + b"\0" + tkey + b"\0" + - zlib.compress(value)) + self.add( + b"iTXt", + key + b"\0\x01\0" + lang + b"\0" + tkey + b"\0" + zlib.compress(value), + ) else: - self.add(b"iTXt", key + b"\0\0\0" + lang + b"\0" + tkey + b"\0" + - value) + self.add(b"iTXt", key + b"\0\0\0" + lang + b"\0" + tkey + b"\0" + value) def add_text(self, key, value, zip=False): """Appends a text chunk. @@ -267,12 +274,12 @@ class PngInfo(object): # The tEXt chunk stores latin-1 text if not isinstance(value, bytes): try: - value = value.encode('latin-1', 'strict') + value = value.encode("latin-1", "strict") except UnicodeError: return self.add_itxt(key, value, zip=zip) if not isinstance(key, bytes): - key = key.encode('latin-1', 'strict') + key = key.encode("latin-1", "strict") if zip: self.add(b"zTXt", key + b"\0\0" + zlib.compress(value)) @@ -283,8 +290,8 @@ class PngInfo(object): # -------------------------------------------------------------------- # PNG image stream (IHDR/IEND) -class PngStream(ChunkStream): +class PngStream(ChunkStream): def __init__(self, fp): ChunkStream.__init__(self, fp) @@ -296,14 +303,17 @@ class PngStream(ChunkStream): self.im_mode = None self.im_tile = None self.im_palette = None + self.im_custom_mimetype = None self.text_memory = 0 def check_text_memory(self, chunklen): self.text_memory += chunklen if self.text_memory > MAX_TEXT_MEMORY: - raise ValueError("Too much memory used in text chunks: " - "%s>MAX_TEXT_MEMORY" % self.text_memory) + raise ValueError( + "Too much memory used in text chunks: %s>MAX_TEXT_MEMORY" + % self.text_memory + ) def chunk_iCCP(self, pos, length): @@ -319,10 +329,11 @@ class PngStream(ChunkStream): logger.debug("Compression method %s", i8(s[i])) comp_method = i8(s[i]) if comp_method != 0: - raise SyntaxError("Unknown compression method %s in iCCP chunk" % - comp_method) + raise SyntaxError( + "Unknown compression method %s in iCCP chunk" % comp_method + ) try: - icc_profile = _safe_zlib_decompress(s[i+2:]) + icc_profile = _safe_zlib_decompress(s[i + 2 :]) except ValueError: if ImageFile.LOAD_TRUNCATED_IMAGES: icc_profile = None @@ -340,7 +351,7 @@ class PngStream(ChunkStream): self.im_size = i32(s), i32(s[4:]) try: self.im_mode, self.im_rawmode = _MODES[(i8(s[8]), i8(s[9]))] - except: + except Exception: pass if i8(s[12]): self.im_info["interlace"] = 1 @@ -351,7 +362,7 @@ class PngStream(ChunkStream): def chunk_IDAT(self, pos, length): # image data - self.im_tile = [("zip", (0, 0)+self.im_size, pos, self.im_rawmode)] + self.im_tile = [("zip", (0, 0) + self.im_size, pos, self.im_rawmode)] self.im_idat = length raise EOFError @@ -383,7 +394,7 @@ class PngStream(ChunkStream): # otherwise, we have a byte string with one alpha value # for each palette entry self.im_info["transparency"] = s - elif self.im_mode == "L": + elif self.im_mode in ("1", "L", "I"): self.im_info["transparency"] = i16(s) elif self.im_mode == "RGB": self.im_info["transparency"] = i16(s), i16(s[2:]), i16(s[4:]) @@ -400,8 +411,8 @@ class PngStream(ChunkStream): # WP x,y, Red x,y, Green x,y Blue x,y s = ImageFile._safe_read(self.fp, length) - raw_vals = struct.unpack('>%dI' % (len(s) // 4), s) - self.im_info['chromaticity'] = tuple(elt/100000.0 for elt in raw_vals) + raw_vals = struct.unpack(">%dI" % (len(s) // 4), s) + self.im_info["chromaticity"] = tuple(elt / 100000.0 for elt in raw_vals) return s def chunk_sRGB(self, pos, length): @@ -412,7 +423,7 @@ class PngStream(ChunkStream): # 3 absolute colorimetric s = ImageFile._safe_read(self.fp, length) - self.im_info['srgb'] = i8(s) + self.im_info["srgb"] = i8(s) return s def chunk_pHYs(self, pos, length): @@ -440,8 +451,8 @@ class PngStream(ChunkStream): v = b"" if k: if py3: - k = k.decode('latin-1', 'strict') - v = v.decode('latin-1', 'replace') + k = k.decode("latin-1", "strict") + v = v.decode("latin-1", "replace") self.im_info[k] = self.im_text[k] = v self.check_text_memory(len(v)) @@ -462,8 +473,9 @@ class PngStream(ChunkStream): else: comp_method = 0 if comp_method != 0: - raise SyntaxError("Unknown compression method %s in zTXt chunk" % - comp_method) + raise SyntaxError( + "Unknown compression method %s in zTXt chunk" % comp_method + ) try: v = _safe_zlib_decompress(v[1:]) except ValueError: @@ -476,8 +488,8 @@ class PngStream(ChunkStream): if k: if py3: - k = k.decode('latin-1', 'strict') - v = v.decode('latin-1', 'replace') + k = k.decode("latin-1", "strict") + v = v.decode("latin-1", "replace") self.im_info[k] = self.im_text[k] = v self.check_text_memory(len(v)) @@ -526,10 +538,22 @@ class PngStream(ChunkStream): return s + def chunk_eXIf(self, pos, length): + s = ImageFile._safe_read(self.fp, length) + self.im_info["exif"] = b"Exif\x00\x00" + s + return s + + # APNG chunks + def chunk_acTL(self, pos, length): + s = ImageFile._safe_read(self.fp, length) + self.im_custom_mimetype = "image/apng" + return s + # -------------------------------------------------------------------- # PNG reader + def _accept(prefix): return prefix[:8] == _MAGIC @@ -537,6 +561,7 @@ def _accept(prefix): ## # Image plugin for PNG images. + class PngImageFile(ImageFile.ImageFile): format = "PNG" @@ -579,17 +604,27 @@ class PngImageFile(ImageFile.ImageFile): self.mode = self.png.im_mode self._size = self.png.im_size self.info = self.png.im_info - self.text = self.png.im_text # experimental + self._text = None self.tile = self.png.im_tile + self.custom_mimetype = self.png.im_custom_mimetype if self.png.im_palette: rawmode, data = self.png.im_palette self.palette = ImagePalette.raw(rawmode, data) - self.__idat = length # used by load_read() + self.__prepare_idat = length # used by load_prepare() + + @property + def text(self): + # experimental + if self._text is None: + # iTxt, tEXt and zTXt chunks may appear at the end of the file + # So load the file to ensure that they are read + self.load() + return self._text def verify(self): - "Verify PNG file" + """Verify PNG file""" if self.fp is None: raise RuntimeError("verify must be called directly after open") @@ -600,18 +635,21 @@ class PngImageFile(ImageFile.ImageFile): self.png.verify() self.png.close() + if self._exclusive_fp: + self.fp.close() self.fp = None def load_prepare(self): - "internal: prepare to read PNG file" + """internal: prepare to read PNG file""" if self.info.get("interlace"): self.decoderconfig = self.decoderconfig + (1,) + self.__idat = self.__prepare_idat # used by load_read() ImageFile.ImageFile.load_prepare(self) def load_read(self, read_bytes): - "internal: read more image data" + """internal: read more image data""" while self.__idat == 0: # end of chunk, skip forward to next one @@ -637,30 +675,63 @@ class PngImageFile(ImageFile.ImageFile): return self.fp.read(read_bytes) def load_end(self): - "internal: finished reading image data" + """internal: finished reading image data""" + while True: + self.fp.read(4) # CRC + try: + cid, pos, length = self.png.read() + except (struct.error, SyntaxError): + break + + if cid == b"IEND": + break + + try: + self.png.call(cid, pos, length) + except UnicodeDecodeError: + break + except EOFError: + ImageFile._safe_read(self.fp, length) + except AttributeError: + logger.debug("%r %s %s (unknown)", cid, pos, length) + ImageFile._safe_read(self.fp, length) + self._text = self.png.im_text self.png.close() self.png = None + def _getexif(self): + if "exif" not in self.info: + self.load() + if "exif" not in self.info: + return None + return dict(self.getexif()) + + def getexif(self): + if "exif" not in self.info: + self.load() + return ImageFile.ImageFile.getexif(self) + # -------------------------------------------------------------------- # PNG writer _OUTMODES = { # supported PIL modes, and corresponding rawmodes/bits/color combinations - "1": ("1", b'\x01\x00'), - "L;1": ("L;1", b'\x01\x00'), - "L;2": ("L;2", b'\x02\x00'), - "L;4": ("L;4", b'\x04\x00'), - "L": ("L", b'\x08\x00'), - "LA": ("LA", b'\x08\x04'), - "I": ("I;16B", b'\x10\x00'), - "P;1": ("P;1", b'\x01\x03'), - "P;2": ("P;2", b'\x02\x03'), - "P;4": ("P;4", b'\x04\x03'), - "P": ("P", b'\x08\x03'), - "RGB": ("RGB", b'\x08\x02'), - "RGBA": ("RGBA", b'\x08\x06'), + "1": ("1", b"\x01\x00"), + "L;1": ("L;1", b"\x01\x00"), + "L;2": ("L;2", b"\x02\x00"), + "L;4": ("L;4", b"\x04\x00"), + "L": ("L", b"\x08\x00"), + "LA": ("LA", b"\x08\x04"), + "I": ("I;16B", b"\x10\x00"), + "I;16": ("I;16B", b"\x10\x00"), + "P;1": ("P;1", b"\x01\x03"), + "P;2": ("P;2", b"\x02\x03"), + "P;4": ("P;4", b"\x04\x03"), + "P": ("P", b"\x08\x03"), + "RGB": ("RGB", b"\x08\x02"), + "RGBA": ("RGBA", b"\x08\x06"), } @@ -701,7 +772,7 @@ def _save(im, fp, filename, chunk=putchunk): else: # check palette contents if im.palette: - colors = max(min(len(im.palette.getdata()[1])//3, 256), 2) + colors = max(min(len(im.palette.getdata()[1]) // 3, 256), 2) else: colors = 256 @@ -717,10 +788,12 @@ def _save(im, fp, filename, chunk=putchunk): mode = "%s;%d" % (mode, bits) # encoder options - im.encoderconfig = (im.encoderinfo.get("optimize", False), - im.encoderinfo.get("compress_level", -1), - im.encoderinfo.get("compress_type", -1), - im.encoderinfo.get("dictionary", b"")) + im.encoderconfig = ( + im.encoderinfo.get("optimize", False), + im.encoderinfo.get("compress_level", -1), + im.encoderinfo.get("compress_type", -1), + im.encoderinfo.get("dictionary", b""), + ) # get the corresponding PNG mode try: @@ -733,12 +806,16 @@ def _save(im, fp, filename, chunk=putchunk): fp.write(_MAGIC) - chunk(fp, b"IHDR", - o32(im.size[0]), o32(im.size[1]), # 0: size - mode, # 8: depth/type - b'\0', # 10: compression - b'\0', # 11: filter category - b'\0') # 12: interlace flag + chunk( + fp, + b"IHDR", + o32(im.size[0]), # 0: size + o32(im.size[1]), + mode, # 8: depth/type + b"\0", # 10: compression + b"\0", # 11: filter category + b"\0", # 12: interlace flag + ) chunks = [b"cHRM", b"gAMA", b"sBIT", b"sRGB", b"tIME"] @@ -772,23 +849,22 @@ def _save(im, fp, filename, chunk=putchunk): palette_byte_number = (2 ** bits) * 3 palette_bytes = im.im.getpalette("RGB")[:palette_byte_number] while len(palette_bytes) < palette_byte_number: - palette_bytes += b'\0' + palette_bytes += b"\0" chunk(fp, b"PLTE", palette_bytes) - transparency = im.encoderinfo.get('transparency', - im.info.get('transparency', None)) + transparency = im.encoderinfo.get("transparency", im.info.get("transparency", None)) if transparency or transparency == 0: if im.mode == "P": # limit to actual palette size - alpha_bytes = 2**bits + alpha_bytes = 2 ** bits if isinstance(transparency, bytes): chunk(fp, b"tRNS", transparency[:alpha_bytes]) else: transparency = max(0, min(255, transparency)) - alpha = b'\xFF' * transparency + b'\0' + alpha = b"\xFF" * transparency + b"\0" chunk(fp, b"tRNS", alpha[:alpha_bytes]) - elif im.mode == "L": + elif im.mode in ("1", "L", "I"): transparency = max(0, min(65535, transparency)) chunk(fp, b"tRNS", o16(transparency)) elif im.mode == "RGB": @@ -802,15 +878,18 @@ def _save(im, fp, filename, chunk=putchunk): else: if im.mode == "P" and im.im.getpalettemode() == "RGBA": alpha = im.im.getpalette("RGBA", "A") - alpha_bytes = 2**bits + alpha_bytes = 2 ** bits chunk(fp, b"tRNS", alpha[:alpha_bytes]) dpi = im.encoderinfo.get("dpi") if dpi: - chunk(fp, b"pHYs", - o32(int(dpi[0] / 0.0254 + 0.5)), - o32(int(dpi[1] / 0.0254 + 0.5)), - b'\x01') + chunk( + fp, + b"pHYs", + o32(int(dpi[0] / 0.0254 + 0.5)), + o32(int(dpi[1] / 0.0254 + 0.5)), + b"\x01", + ) info = im.encoderinfo.get("pnginfo") if info: @@ -820,8 +899,15 @@ def _save(im, fp, filename, chunk=putchunk): chunks.remove(cid) chunk(fp, cid, data) - ImageFile._save(im, _idat(fp, chunk), - [("zip", (0, 0)+im.size, 0, rawmode)]) + exif = im.encoderinfo.get("exif", im.info.get("exif")) + if exif: + if isinstance(exif, Image.Exif): + exif = exif.tobytes(8) + if exif.startswith(b"Exif\x00\x00"): + exif = exif[6:] + chunk(fp, b"eXIf", exif) + + ImageFile._save(im, _idat(fp, chunk), [("zip", (0, 0) + im.size, 0, rawmode)]) chunk(fp, b"IEND", b"") @@ -832,6 +918,7 @@ def _save(im, fp, filename, chunk=putchunk): # -------------------------------------------------------------------- # PNG chunk converter + def getchunks(im, **params): """Return a list of PNG chunks representing this image.""" @@ -866,6 +953,6 @@ def getchunks(im, **params): Image.register_open(PngImageFile.format, PngImageFile, _accept) Image.register_save(PngImageFile.format, _save) -Image.register_extension(PngImageFile.format, ".png") +Image.register_extensions(PngImageFile.format, [".png", ".apng"]) Image.register_mime(PngImageFile.format, "image/png") diff --git a/server/www/packages/packages-linux/x64/PIL/PpmImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/PpmImagePlugin.py index 8002678..c3e9eed 100644 --- a/server/www/packages/packages-linux/x64/PIL/PpmImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/PpmImagePlugin.py @@ -17,12 +17,14 @@ from . import Image, ImageFile +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.2" # # -------------------------------------------------------------------- -b_whitespace = b'\x20\x09\x0a\x0b\x0c\x0d' +b_whitespace = b"\x20\x09\x0a\x0b\x0c\x0d" MODES = { # standard @@ -34,7 +36,7 @@ MODES = { # PIL extensions (for test purposes only) b"PyP": "P", b"PyRGBA": "RGBA", - b"PyCMYK": "CMYK" + b"PyCMYK": "CMYK", } @@ -45,6 +47,7 @@ def _accept(prefix): ## # Image plugin for PBM, PGM, and PPM images. + class PpmImageFile(ImageFile.ImageFile): format = "PPM" @@ -55,10 +58,10 @@ class PpmImageFile(ImageFile.ImageFile): c = self.fp.read(1) if not c or c in b_whitespace: break - if c > b'\x79': + if c > b"\x79": raise ValueError("Expected ASCII value, found binary") s = s + c - if (len(s) > 9): + if len(s) > 9: raise ValueError("Expected int, got > 9 digits") return s @@ -68,7 +71,14 @@ class PpmImageFile(ImageFile.ImageFile): s = self.fp.read(1) if s != b"P": raise SyntaxError("not a PPM file") - mode = MODES[self._token(s)] + magic_number = self._token(s) + mode = MODES[magic_number] + + self.custom_mimetype = { + b"P4": "image/x-portable-bitmap", + b"P5": "image/x-portable-graymap", + b"P6": "image/x-portable-pixmap", + }.get(magic_number) if mode == "1": self.mode = "1" @@ -83,8 +93,7 @@ class PpmImageFile(ImageFile.ImageFile): if s not in b_whitespace: break if s == b"": - raise ValueError( - "File does not extend beyond magic number") + raise ValueError("File does not extend beyond magic number") if s != b"#": break s = self.fp.readline() @@ -98,32 +107,30 @@ class PpmImageFile(ImageFile.ImageFile): elif ix == 2: # maxgrey if s > 255: - if not mode == 'L': + if not mode == "L": raise ValueError("Too many colors for band: %s" % s) - if s < 2**16: - self.mode = 'I' - rawmode = 'I;16B' + if s < 2 ** 16: + self.mode = "I" + rawmode = "I;16B" else: - self.mode = 'I' - rawmode = 'I;32B' + self.mode = "I" + rawmode = "I;32B" self._size = xsize, ysize - self.tile = [("raw", - (0, 0, xsize, ysize), - self.fp.tell(), - (rawmode, 0, 1))] + self.tile = [("raw", (0, 0, xsize, ysize), self.fp.tell(), (rawmode, 0, 1))] # # -------------------------------------------------------------------- + def _save(im, fp, filename): if im.mode == "1": rawmode, head = "1;I", b"P4" elif im.mode == "L": rawmode, head = "L", b"P5" elif im.mode == "I": - if im.getextrema()[1] < 2**16: + if im.getextrema()[1] < 2 ** 16: rawmode, head = "I;16B", b"P5" else: rawmode, head = "I;32B", b"P5" @@ -133,7 +140,7 @@ def _save(im, fp, filename): rawmode, head = "RGB", b"P6" else: raise IOError("cannot write mode %s as PPM" % im.mode) - fp.write(head + ("\n%d %d\n" % im.size).encode('ascii')) + fp.write(head + ("\n%d %d\n" % im.size).encode("ascii")) if head == b"P6": fp.write(b"255\n") if head == b"P5": @@ -143,11 +150,12 @@ def _save(im, fp, filename): fp.write(b"65535\n") elif rawmode == "I;32B": fp.write(b"2147483648\n") - ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 0, (rawmode, 0, 1))]) + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))]) # ALTERNATIVE: save via builtin debug function # im._dump(filename) + # # -------------------------------------------------------------------- @@ -155,4 +163,6 @@ def _save(im, fp, filename): Image.register_open(PpmImageFile.format, PpmImageFile, _accept) Image.register_save(PpmImageFile.format, _save) -Image.register_extensions(PpmImageFile.format, [".pbm", ".pgm", ".ppm"]) +Image.register_extensions(PpmImageFile.format, [".pbm", ".pgm", ".ppm", ".pnm"]) + +Image.register_mime(PpmImageFile.format, "image/x-portable-anymap") diff --git a/server/www/packages/packages-linux/x64/PIL/PsdImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/PsdImagePlugin.py index 2d64ecd..f72ad5f 100644 --- a/server/www/packages/packages-linux/x64/PIL/PsdImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/PsdImagePlugin.py @@ -16,8 +16,12 @@ # See the README file for information on usage and redistribution. # +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.4" +import io + from . import Image, ImageFile, ImagePalette from ._binary import i8, i16be as i16, i32be as i32 @@ -31,13 +35,14 @@ MODES = { (4, 8): ("CMYK", 4), (7, 8): ("L", 1), # FIXME: multilayer (8, 8): ("L", 1), # duotone - (9, 8): ("LAB", 3) + (9, 8): ("LAB", 3), } # --------------------------------------------------------------------. # read PSD images + def _accept(prefix): return prefix[:4] == b"8BPS" @@ -45,10 +50,12 @@ def _accept(prefix): ## # Image plugin for Photoshop images. + class PsdImageFile(ImageFile.ImageFile): format = "PSD" format_description = "Adobe Photoshop" + _close_exclusive_fp_after_loading = False def _open(self): @@ -92,13 +99,13 @@ class PsdImageFile(ImageFile.ImageFile): # load resources end = self.fp.tell() + size while self.fp.tell() < end: - signature = read(4) + read(4) # signature id = i16(read(2)) name = read(i8(read(1))) if not (len(name) & 1): read(1) # padding data = read(i32(read(4))) - if (len(data) & 1): + if len(data) & 1: read(1) # padding self.resources.append((id, name, data)) if id == 1039: # ICC profile @@ -123,7 +130,7 @@ class PsdImageFile(ImageFile.ImageFile): self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels) # keep the file open - self._fp = self.fp + self.__fp = self.fp self.frame = 1 self._min_frame = 1 @@ -141,11 +148,11 @@ class PsdImageFile(ImageFile.ImageFile): # seek to given layer (1..max) try: - name, mode, bbox, tile = self.layers[layer-1] + name, mode, bbox, tile = self.layers[layer - 1] self.mode = mode self.tile = tile self.frame = layer - self.fp = self._fp + self.fp = self.__fp return name, bbox except IndexError: raise EOFError("no such layer") @@ -156,13 +163,21 @@ class PsdImageFile(ImageFile.ImageFile): def load_prepare(self): # create image memory if necessary - if not self.im or\ - self.im.mode != self.mode or self.im.size != self.size: + if not self.im or self.im.mode != self.mode or self.im.size != self.size: self.im = Image.core.fill(self.mode, self.size, 0) # create palette (optional) if self.mode == "P": Image.Image.load(self) + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + def _layerinfo(file): # read layerinfo block @@ -207,33 +222,31 @@ def _layerinfo(file): mode = None # unknown # skip over blend flags and extra information - filler = read(12) + read(12) # filler name = "" - size = i32(read(4)) + size = i32(read(4)) # length of the extra data field combined = 0 if size: + data_end = file.tell() + size + length = i32(read(4)) if length: - mask_y = i32(read(4)) - mask_x = i32(read(4)) - mask_h = i32(read(4)) - mask_y - mask_w = i32(read(4)) - mask_x - file.seek(length - 16, 1) + file.seek(length - 16, io.SEEK_CUR) combined += length + 4 length = i32(read(4)) if length: - file.seek(length, 1) + file.seek(length, io.SEEK_CUR) combined += length + 4 length = i8(read(1)) if length: # Don't know the proper encoding, # Latin-1 should be a good guess - name = read(length).decode('latin-1', 'replace') + name = read(length).decode("latin-1", "replace") combined += length + 1 - file.seek(size - combined, 1) + file.seek(data_end) layers.append((name, mode, (x0, y0, x1, y1))) # get tiles @@ -271,7 +284,7 @@ def _maketile(file, mode, bbox, channels): if mode == "CMYK": layer += ";I" tile.append(("raw", bbox, offset, layer)) - offset = offset + xsize*ysize + offset = offset + xsize * ysize elif compression == 1: # @@ -284,11 +297,9 @@ def _maketile(file, mode, bbox, channels): layer = mode[channel] if mode == "CMYK": layer += ";I" - tile.append( - ("packbits", bbox, offset, layer) - ) + tile.append(("packbits", bbox, offset, layer)) for y in range(ysize): - offset = offset + i16(bytecount[i:i+2]) + offset = offset + i16(bytecount[i : i + 2]) i += 2 file.seek(offset) @@ -298,6 +309,7 @@ def _maketile(file, mode, bbox, channels): return tile + # -------------------------------------------------------------------- # registry diff --git a/server/www/packages/packages-linux/x64/PIL/PyAccess.py b/server/www/packages/packages-linux/x64/PIL/PyAccess.py index cce2de2..2ab06f9 100644 --- a/server/www/packages/packages-linux/x64/PIL/PyAccess.py +++ b/server/www/packages/packages-linux/x64/PIL/PyAccess.py @@ -25,7 +25,6 @@ import sys from cffi import FFI - logger = logging.getLogger(__name__) @@ -42,17 +41,18 @@ ffi.cdef(defs) class PyAccess(object): - def __init__(self, img, readonly=False): vals = dict(img.im.unsafe_ptrs) self.readonly = readonly - self.image8 = ffi.cast('unsigned char **', vals['image8']) - self.image32 = ffi.cast('int **', vals['image32']) - self.image = ffi.cast('unsigned char **', vals['image']) + self.image8 = ffi.cast("unsigned char **", vals["image8"]) + self.image32 = ffi.cast("int **", vals["image32"]) + self.image = ffi.cast("unsigned char **", vals["image"]) self.xsize, self.ysize = img.im.size # Keep pointer to im object to prevent dereferencing. self._im = img.im + if self._im.mode == "P": + self._palette = img.palette # Debugging is polluting test traces, only useful here # when hacking on PyAccess @@ -73,8 +73,22 @@ class PyAccess(object): :param color: The pixel value. """ if self.readonly: - raise ValueError('Attempt to putpixel a read only image') - (x, y) = self.check_xy(xy) + raise ValueError("Attempt to putpixel a read only image") + (x, y) = xy + if x < 0: + x = self.xsize + x + if y < 0: + y = self.ysize + y + (x, y) = self.check_xy((x, y)) + + if ( + self._im.mode == "P" + and isinstance(color, (list, tuple)) + and len(color) in [3, 4] + ): + # RGB or RGBA value for a P image + color = self._palette.getcolor(color) + return self.set_pixel(x, y, color) def __getitem__(self, xy): @@ -88,8 +102,12 @@ class PyAccess(object): :returns: a pixel value for single band images, a tuple of pixel values for multiband images. """ - - (x, y) = self.check_xy(xy) + (x, y) = xy + if x < 0: + x = self.xsize + x + if y < 0: + y = self.ysize + y + (x, y) = self.check_xy((x, y)) return self.get_pixel(x, y) putpixel = __setitem__ @@ -98,12 +116,13 @@ class PyAccess(object): def check_xy(self, xy): (x, y) = xy if not (0 <= x < self.xsize and 0 <= y < self.ysize): - raise ValueError('pixel location out of range') + raise ValueError("pixel location out of range") return xy class _PyAccess32_2(PyAccess): """ PA, LA, stored in first and last bytes of a 32 bit word """ + def _post_init(self, *args, **kwargs): self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32) @@ -139,6 +158,7 @@ class _PyAccess32_3(PyAccess): class _PyAccess32_4(PyAccess): """ RGBA etc, all 4 bytes of a 32 bit word """ + def _post_init(self, *args, **kwargs): self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32) @@ -157,6 +177,7 @@ class _PyAccess32_4(PyAccess): class _PyAccess8(PyAccess): """ 1, L, P, 8 bit images stored as uint8 """ + def _post_init(self, *args, **kwargs): self.pixels = self.image8 @@ -174,8 +195,9 @@ class _PyAccess8(PyAccess): class _PyAccessI16_N(PyAccess): """ I;16 access, native bitendian without conversion """ + def _post_init(self, *args, **kwargs): - self.pixels = ffi.cast('unsigned short **', self.image) + self.pixels = ffi.cast("unsigned short **", self.image) def get_pixel(self, x, y): return self.pixels[y][x] @@ -191,8 +213,9 @@ class _PyAccessI16_N(PyAccess): class _PyAccessI16_L(PyAccess): """ I;16L access, with conversion """ + def _post_init(self, *args, **kwargs): - self.pixels = ffi.cast('struct Pixel_I16 **', self.image) + self.pixels = ffi.cast("struct Pixel_I16 **", self.image) def get_pixel(self, x, y): pixel = self.pixels[y][x] @@ -205,14 +228,15 @@ class _PyAccessI16_L(PyAccess): except TypeError: color = min(color[0], 65535) - pixel.l = color & 0xFF + pixel.l = color & 0xFF # noqa: E741 pixel.r = color >> 8 class _PyAccessI16_B(PyAccess): """ I;16B access, with conversion """ + def _post_init(self, *args, **kwargs): - self.pixels = ffi.cast('struct Pixel_I16 **', self.image) + self.pixels = ffi.cast("struct Pixel_I16 **", self.image) def get_pixel(self, x, y): pixel = self.pixels[y][x] @@ -222,15 +246,16 @@ class _PyAccessI16_B(PyAccess): pixel = self.pixels[y][x] try: color = min(color, 65535) - except: + except Exception: color = min(color[0], 65535) - pixel.l = color >> 8 + pixel.l = color >> 8 # noqa: E741 pixel.r = color & 0xFF class _PyAccessI32_N(PyAccess): """ Signed Int32 access, native endian """ + def _post_init(self, *args, **kwargs): self.pixels = self.image32 @@ -243,15 +268,15 @@ class _PyAccessI32_N(PyAccess): class _PyAccessI32_Swap(PyAccess): """ I;32L/B access, with byteswapping conversion """ + def _post_init(self, *args, **kwargs): self.pixels = self.image32 def reverse(self, i): - orig = ffi.new('int *', i) - chars = ffi.cast('unsigned char *', orig) - chars[0], chars[1], chars[2], chars[3] = chars[3], chars[2], \ - chars[1], chars[0] - return ffi.cast('int *', chars)[0] + orig = ffi.new("int *", i) + chars = ffi.cast("unsigned char *", orig) + chars[0], chars[1], chars[2], chars[3] = chars[3], chars[2], chars[1], chars[0] + return ffi.cast("int *", chars)[0] def get_pixel(self, x, y): return self.reverse(self.pixels[y][x]) @@ -262,8 +287,9 @@ class _PyAccessI32_Swap(PyAccess): class _PyAccessF(PyAccess): """ 32 bit float access """ + def _post_init(self, *args, **kwargs): - self.pixels = ffi.cast('float **', self.image32) + self.pixels = ffi.cast("float **", self.image32) def get_pixel(self, x, y): return self.pixels[y][x] @@ -277,38 +303,39 @@ class _PyAccessF(PyAccess): self.pixels[y][x] = color[0] -mode_map = {'1': _PyAccess8, - 'L': _PyAccess8, - 'P': _PyAccess8, - 'LA': _PyAccess32_2, - 'La': _PyAccess32_2, - 'PA': _PyAccess32_2, - 'RGB': _PyAccess32_3, - 'LAB': _PyAccess32_3, - 'HSV': _PyAccess32_3, - 'YCbCr': _PyAccess32_3, - 'RGBA': _PyAccess32_4, - 'RGBa': _PyAccess32_4, - 'RGBX': _PyAccess32_4, - 'CMYK': _PyAccess32_4, - 'F': _PyAccessF, - 'I': _PyAccessI32_N, - } +mode_map = { + "1": _PyAccess8, + "L": _PyAccess8, + "P": _PyAccess8, + "LA": _PyAccess32_2, + "La": _PyAccess32_2, + "PA": _PyAccess32_2, + "RGB": _PyAccess32_3, + "LAB": _PyAccess32_3, + "HSV": _PyAccess32_3, + "YCbCr": _PyAccess32_3, + "RGBA": _PyAccess32_4, + "RGBa": _PyAccess32_4, + "RGBX": _PyAccess32_4, + "CMYK": _PyAccess32_4, + "F": _PyAccessF, + "I": _PyAccessI32_N, +} -if sys.byteorder == 'little': - mode_map['I;16'] = _PyAccessI16_N - mode_map['I;16L'] = _PyAccessI16_N - mode_map['I;16B'] = _PyAccessI16_B +if sys.byteorder == "little": + mode_map["I;16"] = _PyAccessI16_N + mode_map["I;16L"] = _PyAccessI16_N + mode_map["I;16B"] = _PyAccessI16_B - mode_map['I;32L'] = _PyAccessI32_N - mode_map['I;32B'] = _PyAccessI32_Swap + mode_map["I;32L"] = _PyAccessI32_N + mode_map["I;32B"] = _PyAccessI32_Swap else: - mode_map['I;16'] = _PyAccessI16_L - mode_map['I;16L'] = _PyAccessI16_L - mode_map['I;16B'] = _PyAccessI16_N + mode_map["I;16"] = _PyAccessI16_L + mode_map["I;16L"] = _PyAccessI16_L + mode_map["I;16B"] = _PyAccessI16_N - mode_map['I;32L'] = _PyAccessI32_Swap - mode_map['I;32B'] = _PyAccessI32_N + mode_map["I;32L"] = _PyAccessI32_Swap + mode_map["I;32B"] = _PyAccessI32_N def new(img, readonly=False): diff --git a/server/www/packages/packages-linux/x64/PIL/SgiImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/SgiImagePlugin.py index 88df351..99408fd 100644 --- a/server/www/packages/packages-linux/x64/PIL/SgiImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/SgiImagePlugin.py @@ -22,13 +22,15 @@ # -from . import Image, ImageFile -from ._binary import i8, o8, i16be as i16 -from ._util import py3 -import struct import os +import struct +from . import Image, ImageFile +from ._binary import i8, i16be as i16, o8 +from ._util import py3 +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.3" @@ -44,7 +46,7 @@ MODES = { (1, 3, 3): "RGB", (2, 3, 3): "RGB;16B", (1, 3, 4): "RGBA", - (2, 3, 4): "RGBA;16B" + (2, 3, 4): "RGBA;16B", } @@ -98,6 +100,8 @@ class SgiImageFile(ImageFile.ImageFile): self._size = xsize, ysize self.mode = rawmode.split(";")[0] + if self.mode == "RGB": + self.custom_mimetype = "image/rgb" # orientation -1 : scanlines begins at the bottom-left corner orientation = -1 @@ -106,19 +110,21 @@ class SgiImageFile(ImageFile.ImageFile): if compression == 0: pagesize = xsize * ysize * bpc if bpc == 2: - self.tile = [("SGI16", (0, 0) + self.size, - headlen, (self.mode, 0, orientation))] + self.tile = [ + ("SGI16", (0, 0) + self.size, headlen, (self.mode, 0, orientation)) + ] else: self.tile = [] offset = headlen for layer in self.mode: self.tile.append( - ("raw", (0, 0) + self.size, - offset, (layer, 0, orientation))) + ("raw", (0, 0) + self.size, offset, (layer, 0, orientation)) + ) offset += pagesize elif compression == 1: - self.tile = [("sgi_rle", (0, 0) + self.size, - headlen, (rawmode, orientation, bpc))] + self.tile = [ + ("sgi_rle", (0, 0) + self.size, headlen, (rawmode, orientation, bpc)) + ] def _save(im, fp, filename): @@ -157,8 +163,9 @@ def _save(im, fp, filename): # assert we've got the right number of bands. if len(im.getbands()) != z: - raise ValueError("incorrect number of bands in SGI write: %s vs %s" % - (z, len(im.getbands()))) + raise ValueError( + "incorrect number of bands in SGI write: %s vs %s" % (z, len(im.getbands())) + ) # Minimum Byte value pinmin = 0 @@ -167,30 +174,30 @@ def _save(im, fp, filename): # Image name (79 characters max, truncated below in write) imgName = os.path.splitext(os.path.basename(filename))[0] if py3: - imgName = imgName.encode('ascii', 'ignore') + imgName = imgName.encode("ascii", "ignore") # Standard representation of pixel in the file colormap = 0 - fp.write(struct.pack('>h', magicNumber)) + fp.write(struct.pack(">h", magicNumber)) fp.write(o8(rle)) fp.write(o8(bpc)) - fp.write(struct.pack('>H', dim)) - fp.write(struct.pack('>H', x)) - fp.write(struct.pack('>H', y)) - fp.write(struct.pack('>H', z)) - fp.write(struct.pack('>l', pinmin)) - fp.write(struct.pack('>l', pinmax)) - fp.write(struct.pack('4s', b'')) # dummy - fp.write(struct.pack('79s', imgName)) # truncates to 79 chars - fp.write(struct.pack('s', b'')) # force null byte after imgname - fp.write(struct.pack('>l', colormap)) - fp.write(struct.pack('404s', b'')) # dummy + fp.write(struct.pack(">H", dim)) + fp.write(struct.pack(">H", x)) + fp.write(struct.pack(">H", y)) + fp.write(struct.pack(">H", z)) + fp.write(struct.pack(">l", pinmin)) + fp.write(struct.pack(">l", pinmax)) + fp.write(struct.pack("4s", b"")) # dummy + fp.write(struct.pack("79s", imgName)) # truncates to 79 chars + fp.write(struct.pack("s", b"")) # force null byte after imgname + fp.write(struct.pack(">l", colormap)) + fp.write(struct.pack("404s", b"")) # dummy - rawmode = 'L' + rawmode = "L" if bpc == 2: - rawmode = 'L;16B' + rawmode = "L;16B" for channel in im.split(): - fp.write(channel.tobytes('raw', rawmode, 0, orientation)) + fp.write(channel.tobytes("raw", rawmode, 0, orientation)) fp.close() @@ -205,13 +212,15 @@ class SGI16Decoder(ImageFile.PyDecoder): self.fd.seek(512) for band in range(zsize): - channel = Image.new('L', (self.state.xsize, self.state.ysize)) - channel.frombytes(self.fd.read(2 * pagesize), 'raw', - 'L;16B', stride, orientation) + channel = Image.new("L", (self.state.xsize, self.state.ysize)) + channel.frombytes( + self.fd.read(2 * pagesize), "raw", "L;16B", stride, orientation + ) self.im.putband(channel.im, band) return -1, 0 + # # registry @@ -220,9 +229,7 @@ Image.register_decoder("SGI16", SGI16Decoder) Image.register_open(SgiImageFile.format, SgiImageFile, _accept) Image.register_save(SgiImageFile.format, _save) Image.register_mime(SgiImageFile.format, "image/sgi") -Image.register_mime(SgiImageFile.format, "image/rgb") -Image.register_extensions(SgiImageFile.format, - [".bw", ".rgb", ".rgba", ".sgi"]) +Image.register_extensions(SgiImageFile.format, [".bw", ".rgb", ".rgba", ".sgi"]) # End of file diff --git a/server/www/packages/packages-linux/x64/PIL/SpiderImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/SpiderImagePlugin.py index 3f57952..f1cae4d 100644 --- a/server/www/packages/packages-linux/x64/PIL/SpiderImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/SpiderImagePlugin.py @@ -35,16 +35,17 @@ from __future__ import print_function -from PIL import Image, ImageFile import os import struct import sys +from PIL import Image, ImageFile + def isInt(f): try: i = int(f) - if f-i == 0: + if f - i == 0: return 1 else: return 0 @@ -60,8 +61,9 @@ iforms = [1, 3, -11, -12, -21, -22] # Returns no. of bytes in the header, if it is a valid Spider header, # otherwise returns 0 + def isSpiderHeader(t): - h = (99,) + t # add 1 value so can use spider header index start=1 + h = (99,) + t # add 1 value so can use spider header index start=1 # header values 1,2,5,12,13,22,23 should be integers for i in [1, 2, 5, 12, 13, 22, 23]: if not isInt(h[i]): @@ -71,9 +73,9 @@ def isSpiderHeader(t): if iform not in iforms: return 0 # check other header values - labrec = int(h[13]) # no. records in file header - labbyt = int(h[22]) # total no. of bytes in header - lenbyt = int(h[23]) # record length in bytes + labrec = int(h[13]) # no. records in file header + labbyt = int(h[22]) # total no. of bytes in header + lenbyt = int(h[23]) # record length in bytes if labbyt != (labrec * lenbyt): return 0 # looks like a valid header @@ -81,12 +83,12 @@ def isSpiderHeader(t): def isSpiderImage(filename): - with open(filename, 'rb') as fp: - f = fp.read(92) # read 23 * 4 bytes - t = struct.unpack('>23f', f) # try big-endian first + with open(filename, "rb") as fp: + f = fp.read(92) # read 23 * 4 bytes + t = struct.unpack(">23f", f) # try big-endian first hdrlen = isSpiderHeader(t) if hdrlen == 0: - t = struct.unpack('<23f', f) # little-endian + t = struct.unpack("<23f", f) # little-endian hdrlen = isSpiderHeader(t) return hdrlen @@ -104,18 +106,18 @@ class SpiderImageFile(ImageFile.ImageFile): try: self.bigendian = 1 - t = struct.unpack('>27f', f) # try big-endian first + t = struct.unpack(">27f", f) # try big-endian first hdrlen = isSpiderHeader(t) if hdrlen == 0: self.bigendian = 0 - t = struct.unpack('<27f', f) # little-endian + t = struct.unpack("<27f", f) # little-endian hdrlen = isSpiderHeader(t) if hdrlen == 0: raise SyntaxError("not a valid Spider file") except struct.error: raise SyntaxError("not a valid Spider file") - h = (99,) + t # add 1 value : spider header index starts at 1 + h = (99,) + t # add 1 value : spider header index starts at 1 iform = int(h[5]) if iform != 1: raise SyntaxError("not a Spider 2D image") @@ -149,9 +151,7 @@ class SpiderImageFile(ImageFile.ImageFile): self.rawmode = "F;32F" self.mode = "F" - self.tile = [ - ("raw", (0, 0) + self.size, offset, - (self.rawmode, 0, 1))] + self.tile = [("raw", (0, 0) + self.size, offset, (self.rawmode, 0, 1))] self.__fp = self.fp # FIXME: hack @property @@ -184,22 +184,32 @@ class SpiderImageFile(ImageFile.ImageFile): (minimum, maximum) = self.getextrema() m = 1 if maximum != minimum: - m = depth / (maximum-minimum) + m = depth / (maximum - minimum) b = -m * minimum return self.point(lambda i, m=m, b=b: i * m + b).convert("L") # returns a ImageTk.PhotoImage object, after rescaling to 0..255 def tkPhotoImage(self): from PIL import ImageTk + return ImageTk.PhotoImage(self.convert2byte(), palette=256) + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + # -------------------------------------------------------------------- # Image series # given a list of filenames, return a list of images def loadImageSeries(filelist=None): - """create a list of Image.images for use in montage""" + """create a list of :py:class:`~PIL.Image.Image` objects for use in a montage""" if filelist is None or len(filelist) < 1: return @@ -210,11 +220,11 @@ def loadImageSeries(filelist=None): continue try: im = Image.open(img).convert2byte() - except: + except Exception: if not isSpiderImage(img): print(img + " is not a Spider image file") continue - im.info['filename'] = img + im.info["filename"] = img imglist.append(im) return imglist @@ -222,10 +232,11 @@ def loadImageSeries(filelist=None): # -------------------------------------------------------------------- # For saving images in Spider format + def makeSpiderHeader(im): nsam, nrow = im.size lenbyt = nsam * 4 # There are labrec records in the header - labrec = 1024 / lenbyt + labrec = int(1024 / lenbyt) if 1024 % lenbyt != 0: labrec += 1 labbyt = labrec * lenbyt @@ -238,10 +249,10 @@ def makeSpiderHeader(im): return [] # NB these are Fortran indices - hdr[1] = 1.0 # nslice (=1 for an image) - hdr[2] = float(nrow) # number of rows per slice - hdr[5] = 1.0 # iform for 2D image - hdr[12] = float(nsam) # number of pixels per line + hdr[1] = 1.0 # nslice (=1 for an image) + hdr[2] = float(nrow) # number of rows per slice + hdr[5] = 1.0 # iform for 2D image + hdr[12] = float(nsam) # number of pixels per line hdr[13] = float(labrec) # number of records in file header hdr[22] = float(labbyt) # total number of bytes in header hdr[23] = float(lenbyt) # record length in bytes @@ -252,13 +263,13 @@ def makeSpiderHeader(im): # pack binary data into a string hdrstr = [] for v in hdr: - hdrstr.append(struct.pack('f', v)) + hdrstr.append(struct.pack("f", v)) return hdrstr def _save(im, fp, filename): if im.mode[0] != "F": - im = im.convert('F') + im = im.convert("F") hdr = makeSpiderHeader(im) if len(hdr) < 256: @@ -268,7 +279,7 @@ def _save(im, fp, filename): fp.writelines(hdr) rawmode = "F;32NF" # 32-bit native floating point - ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 0, (rawmode, 0, 1))]) + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))]) def _save_spider(im, fp, filename): @@ -277,6 +288,7 @@ def _save_spider(im, fp, filename): Image.register_extension(SpiderImageFile.format, ext) _save(im, fp, filename) + # -------------------------------------------------------------------- @@ -299,7 +311,7 @@ if __name__ == "__main__": print("format: " + str(im.format)) print("size: " + str(im.size)) print("mode: " + str(im.mode)) - print("max, min: ", end=' ') + print("max, min: ", end=" ") print(im.getextrema()) if len(sys.argv) > 2: @@ -308,6 +320,7 @@ if __name__ == "__main__": # perform some image operation im = im.transpose(Image.FLIP_LEFT_RIGHT) print( - "saving a flipped version of %s as %s " % - (os.path.basename(filename), outfile)) + "saving a flipped version of %s as %s " + % (os.path.basename(filename), outfile) + ) im.save(outfile, SpiderImageFile.format) diff --git a/server/www/packages/packages-linux/x64/PIL/SunImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/SunImagePlugin.py index 898350e..74fa5f7 100644 --- a/server/www/packages/packages-linux/x64/PIL/SunImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/SunImagePlugin.py @@ -20,16 +20,19 @@ from . import Image, ImageFile, ImagePalette from ._binary import i32be as i32 +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.3" def _accept(prefix): - return len(prefix) >= 4 and i32(prefix) == 0x59a66a95 + return len(prefix) >= 4 and i32(prefix) == 0x59A66A95 ## # Image plugin for Sun raster files. + class SunImageFile(ImageFile.ImageFile): format = "SUN" @@ -54,7 +57,7 @@ class SunImageFile(ImageFile.ImageFile): # HEAD s = self.fp.read(32) - if i32(s) != 0x59a66a95: + if i32(s) != 0x59A66A95: raise SyntaxError("not an SUN raster file") offset = 32 @@ -80,9 +83,9 @@ class SunImageFile(ImageFile.ImageFile): self.mode, rawmode = "RGB", "BGR" elif depth == 32: if file_type == 3: - self.mode, rawmode = 'RGB', 'RGBX' + self.mode, rawmode = "RGB", "RGBX" else: - self.mode, rawmode = 'RGB', 'BGRX' + self.mode, rawmode = "RGB", "BGRX" else: raise SyntaxError("Unsupported Mode/Bit Depth") @@ -94,11 +97,10 @@ class SunImageFile(ImageFile.ImageFile): raise SyntaxError("Unsupported Palette Type") offset = offset + palette_length - self.palette = ImagePalette.raw("RGB;L", - self.fp.read(palette_length)) + self.palette = ImagePalette.raw("RGB;L", self.fp.read(palette_length)) if self.mode == "L": self.mode = "P" - rawmode = rawmode.replace('L', 'P') + rawmode = rawmode.replace("L", "P") # 16 bit boundaries on stride stride = ((self.size[0] * depth + 15) // 16) * 2 @@ -122,11 +124,12 @@ class SunImageFile(ImageFile.ImageFile): # (https://www.fileformat.info/format/sunraster/egff.htm) if file_type in (0, 1, 3, 4, 5): - self.tile = [("raw", (0, 0)+self.size, offset, (rawmode, stride))] + self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride))] elif file_type == 2: - self.tile = [("sun_rle", (0, 0)+self.size, offset, rawmode)] + self.tile = [("sun_rle", (0, 0) + self.size, offset, rawmode)] else: - raise SyntaxError('Unsupported Sun Raster file type') + raise SyntaxError("Unsupported Sun Raster file type") + # # registry diff --git a/server/www/packages/packages-linux/x64/PIL/TarIO.py b/server/www/packages/packages-linux/x64/PIL/TarIO.py index 0e949ff..e180b80 100644 --- a/server/www/packages/packages-linux/x64/PIL/TarIO.py +++ b/server/www/packages/packages-linux/x64/PIL/TarIO.py @@ -14,15 +14,17 @@ # See the README file for information on usage and redistribution. # -from . import ContainerIO +import io +import sys +from . import ContainerIO ## # A file object that provides read access to a given member of a TAR # file. -class TarIO(ContainerIO.ContainerIO): +class TarIO(ContainerIO.ContainerIO): def __init__(self, tarfile, file): """ Create file object. @@ -30,16 +32,16 @@ class TarIO(ContainerIO.ContainerIO): :param tarfile: Name of TAR file. :param file: Name of member file. """ - fh = open(tarfile, "rb") + self.fh = open(tarfile, "rb") while True: - s = fh.read(512) + s = self.fh.read(512) if len(s) != 512: raise IOError("unexpected end of tar file") - name = s[:100].decode('utf-8') - i = name.find('\0') + name = s[:100].decode("utf-8") + i = name.find("\0") if i == 0: raise IOError("cannot find subfile") if i > 0: @@ -50,7 +52,22 @@ class TarIO(ContainerIO.ContainerIO): if file == name: break - fh.seek((size + 511) & (~511), 1) + self.fh.seek((size + 511) & (~511), io.SEEK_CUR) # Open region - ContainerIO.ContainerIO.__init__(self, fh, fh.tell(), size) + ContainerIO.ContainerIO.__init__(self, self.fh, self.fh.tell(), size) + + # Context manager support + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + if sys.version_info.major >= 3: + + def __del__(self): + self.close() + + def close(self): + self.fh.close() diff --git a/server/www/packages/packages-linux/x64/PIL/TgaImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/TgaImagePlugin.py index c266d50..b1b3513 100644 --- a/server/www/packages/packages-linux/x64/PIL/TgaImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/TgaImagePlugin.py @@ -17,11 +17,13 @@ # +import warnings + from . import Image, ImageFile, ImagePalette from ._binary import i8, i16le as i16, o8, o16le as o16 -import warnings - +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.3" @@ -32,9 +34,9 @@ __version__ = "0.3" MODES = { # map imagetype/depth to rawmode - (1, 8): "P", - (3, 1): "1", - (3, 8): "L", + (1, 8): "P", + (3, 1): "1", + (3, 8): "L", (3, 16): "LA", (2, 16): "BGR;5", (2, 24): "BGR", @@ -45,6 +47,7 @@ MODES = { ## # Image plugin for Targa files. + class TgaImageFile(ImageFile.ImageFile): format = "TGA" @@ -67,9 +70,12 @@ class TgaImageFile(ImageFile.ImageFile): self._size = i16(s[12:]), i16(s[14:]) # validate header fields - if colormaptype not in (0, 1) or\ - self.size[0] <= 0 or self.size[1] <= 0 or\ - depth not in (1, 8, 16, 24, 32): + if ( + colormaptype not in (0, 1) + or self.size[0] <= 0 + or self.size[1] <= 0 + or depth not in (1, 8, 16, 24, 32) + ): raise SyntaxError("not a TGA file") # image mode @@ -110,27 +116,43 @@ class TgaImageFile(ImageFile.ImageFile): start, size, mapdepth = i16(s[3:]), i16(s[5:]), i16(s[7:]) if mapdepth == 16: self.palette = ImagePalette.raw( - "BGR;16", b"\0"*2*start + self.fp.read(2*size)) + "BGR;16", b"\0" * 2 * start + self.fp.read(2 * size) + ) elif mapdepth == 24: self.palette = ImagePalette.raw( - "BGR", b"\0"*3*start + self.fp.read(3*size)) + "BGR", b"\0" * 3 * start + self.fp.read(3 * size) + ) elif mapdepth == 32: self.palette = ImagePalette.raw( - "BGRA", b"\0"*4*start + self.fp.read(4*size)) + "BGRA", b"\0" * 4 * start + self.fp.read(4 * size) + ) # setup tile descriptor try: rawmode = MODES[(imagetype & 7, depth)] if imagetype & 8: # compressed - self.tile = [("tga_rle", (0, 0)+self.size, - self.fp.tell(), (rawmode, orientation, depth))] + self.tile = [ + ( + "tga_rle", + (0, 0) + self.size, + self.fp.tell(), + (rawmode, orientation, depth), + ) + ] else: - self.tile = [("raw", (0, 0)+self.size, - self.fp.tell(), (rawmode, 0, orientation))] + self.tile = [ + ( + "raw", + (0, 0) + self.size, + self.fp.tell(), + (rawmode, 0, orientation), + ) + ] except KeyError: pass # cannot decode + # # -------------------------------------------------------------------- # Write TGA file @@ -156,14 +178,12 @@ def _save(im, fp, filename): if "rle" in im.encoderinfo: rle = im.encoderinfo["rle"] else: - compression = im.encoderinfo.get("compression", - im.info.get("compression")) + compression = im.encoderinfo.get("compression", im.info.get("compression")) rle = compression == "tga_rle" if rle: imagetype += 8 - id_section = im.encoderinfo.get("id_section", - im.info.get("id_section", "")) + id_section = im.encoderinfo.get("id_section", im.info.get("id_section", "")) id_len = len(id_section) if id_len > 255: id_len = 255 @@ -180,23 +200,24 @@ def _save(im, fp, filename): else: flags = 0 - orientation = im.encoderinfo.get("orientation", - im.info.get("orientation", -1)) + orientation = im.encoderinfo.get("orientation", im.info.get("orientation", -1)) if orientation > 0: flags = flags | 0x20 - fp.write(o8(id_len) + - o8(colormaptype) + - o8(imagetype) + - o16(colormapfirst) + - o16(colormaplength) + - o8(colormapentry) + - o16(0) + - o16(0) + - o16(im.size[0]) + - o16(im.size[1]) + - o8(bits) + - o8(flags)) + fp.write( + o8(id_len) + + o8(colormaptype) + + o8(imagetype) + + o16(colormapfirst) + + o16(colormaplength) + + o8(colormapentry) + + o16(0) + + o16(0) + + o16(im.size[0]) + + o16(im.size[1]) + + o8(bits) + + o8(flags) + ) if id_section: fp.write(id_section) @@ -206,16 +227,17 @@ def _save(im, fp, filename): if rle: ImageFile._save( - im, - fp, - [("tga_rle", (0, 0) + im.size, 0, (rawmode, orientation))]) + im, fp, [("tga_rle", (0, 0) + im.size, 0, (rawmode, orientation))] + ) else: ImageFile._save( - im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, orientation))]) + im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, orientation))] + ) # write targa version 2 footer fp.write(b"\000" * 8 + b"TRUEVISION-XFILE." + b"\000") + # # -------------------------------------------------------------------- # Registry @@ -224,4 +246,6 @@ def _save(im, fp, filename): Image.register_open(TgaImageFile.format, TgaImageFile) Image.register_save(TgaImageFile.format, _save) -Image.register_extension(TgaImageFile.format, ".tga") +Image.register_extensions(TgaImageFile.format, [".tga", ".icb", ".vda", ".vst"]) + +Image.register_mime(TgaImageFile.format, "image/x-tga") diff --git a/server/www/packages/packages-linux/x64/PIL/TiffImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/TiffImagePlugin.py index 5059a13..a927cd3 100644 --- a/server/www/packages/packages-linux/x64/PIL/TiffImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/TiffImagePlugin.py @@ -41,20 +41,19 @@ from __future__ import division, print_function -from . import Image, ImageFile, ImagePalette, TiffTags -from ._binary import i8, o8 -from ._util import py3 - -from fractions import Fraction -from numbers import Number, Rational - +import distutils.version import io import itertools import os import struct import sys import warnings +from fractions import Fraction +from numbers import Number, Rational +from . import Image, ImageFile, ImagePalette, TiffTags +from ._binary import i8, o8 +from ._util import py3 from .TiffTags import TYPES try: @@ -65,6 +64,8 @@ except ImportError: from collections import MutableMapping +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "1.3.5" DEBUG = False # Needs to be merged with the new logging approach. @@ -96,6 +97,7 @@ X_RESOLUTION = 282 Y_RESOLUTION = 283 PLANAR_CONFIGURATION = 284 RESOLUTION_UNIT = 296 +TRANSFERFUNCTION = 301 SOFTWARE = 305 DATE_TIME = 306 ARTIST = 315 @@ -105,12 +107,14 @@ TILEOFFSETS = 324 EXTRASAMPLES = 338 SAMPLEFORMAT = 339 JPEGTABLES = 347 +REFERENCEBLACKWHITE = 532 COPYRIGHT = 33432 IPTC_NAA_CHUNK = 33723 # newsphoto properties PHOTOSHOP_CHUNK = 34377 # photoshop properties ICCPROFILE = 34675 EXIFIFD = 34665 XMP = 700 +JPEGQUALITY = 65537 # pseudo-tag by libtiff # https://github.com/imagej/ImageJA/blob/master/src/main/java/ij/io/TiffDecoder.java IMAGEJ_META_DATA_BYTE_COUNTS = 50838 @@ -132,6 +136,9 @@ COMPRESSION_INFO = { 32946: "tiff_deflate", 34676: "tiff_sgilog", 34677: "tiff_sgilog24", + 34925: "lzma", + 50000: "zstd", + 50001: "webp", } COMPRESSION_INFO_REV = {v: k for k, v in COMPRESSION_INFO.items()} @@ -147,7 +154,6 @@ OPEN_INFO = { (MM, 1, (1,), 1, (1,), ()): ("1", "1"), (II, 1, (1,), 2, (1,), ()): ("1", "1;R"), (MM, 1, (1,), 2, (1,), ()): ("1", "1;R"), - (II, 0, (1,), 1, (2,), ()): ("L", "L;2I"), (MM, 0, (1,), 1, (2,), ()): ("L", "L;2I"), (II, 0, (1,), 2, (2,), ()): ("L", "L;2IR"), @@ -156,7 +162,6 @@ OPEN_INFO = { (MM, 1, (1,), 1, (2,), ()): ("L", "L;2"), (II, 1, (1,), 2, (2,), ()): ("L", "L;2R"), (MM, 1, (1,), 2, (2,), ()): ("L", "L;2R"), - (II, 0, (1,), 1, (4,), ()): ("L", "L;4I"), (MM, 0, (1,), 1, (4,), ()): ("L", "L;4I"), (II, 0, (1,), 2, (4,), ()): ("L", "L;4IR"), @@ -165,7 +170,6 @@ OPEN_INFO = { (MM, 1, (1,), 1, (4,), ()): ("L", "L;4"), (II, 1, (1,), 2, (4,), ()): ("L", "L;4R"), (MM, 1, (1,), 2, (4,), ()): ("L", "L;4R"), - (II, 0, (1,), 1, (8,), ()): ("L", "L;I"), (MM, 0, (1,), 1, (8,), ()): ("L", "L;I"), (II, 0, (1,), 2, (8,), ()): ("L", "L;IR"), @@ -174,14 +178,11 @@ OPEN_INFO = { (MM, 1, (1,), 1, (8,), ()): ("L", "L"), (II, 1, (1,), 2, (8,), ()): ("L", "L;R"), (MM, 1, (1,), 2, (8,), ()): ("L", "L;R"), - (II, 1, (1,), 1, (12,), ()): ("I;16", "I;12"), - (II, 1, (1,), 1, (16,), ()): ("I;16", "I;16"), (MM, 1, (1,), 1, (16,), ()): ("I;16B", "I;16B"), (II, 1, (2,), 1, (16,), ()): ("I", "I;16S"), (MM, 1, (2,), 1, (16,), ()): ("I", "I;16BS"), - (II, 0, (3,), 1, (32,), ()): ("F", "F;32F"), (MM, 0, (3,), 1, (32,), ()): ("F", "F;32BF"), (II, 1, (1,), 1, (32,), ()): ("I", "I;32N"), @@ -189,10 +190,8 @@ OPEN_INFO = { (MM, 1, (2,), 1, (32,), ()): ("I", "I;32BS"), (II, 1, (3,), 1, (32,), ()): ("F", "F;32F"), (MM, 1, (3,), 1, (32,), ()): ("F", "F;32BF"), - (II, 1, (1,), 1, (8, 8), (2,)): ("LA", "LA"), (MM, 1, (1,), 1, (8, 8), (2,)): ("LA", "LA"), - (II, 2, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"), (MM, 2, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"), (II, 2, (1,), 2, (8, 8, 8), ()): ("RGB", "RGB;R"), @@ -219,7 +218,6 @@ OPEN_INFO = { (MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (2, 0, 0)): ("RGBA", "RGBAXX"), (II, 2, (1,), 1, (8, 8, 8, 8), (999,)): ("RGBA", "RGBA"), # Corel Draw 10 (MM, 2, (1,), 1, (8, 8, 8, 8), (999,)): ("RGBA", "RGBA"), # Corel Draw 10 - (II, 2, (1,), 1, (16, 16, 16), ()): ("RGB", "RGB;16L"), (MM, 2, (1,), 1, (16, 16, 16), ()): ("RGB", "RGB;16B"), (II, 2, (1,), 1, (16, 16, 16, 16), ()): ("RGBA", "RGBA;16L"), @@ -230,7 +228,6 @@ OPEN_INFO = { (MM, 2, (1,), 1, (16, 16, 16, 16), (1,)): ("RGBA", "RGBa;16B"), (II, 2, (1,), 1, (16, 16, 16, 16), (2,)): ("RGBA", "RGBA;16L"), (MM, 2, (1,), 1, (16, 16, 16, 16), (2,)): ("RGBA", "RGBA;16B"), - (II, 3, (1,), 1, (1,), ()): ("P", "P;1"), (MM, 3, (1,), 1, (1,), ()): ("P", "P;1"), (II, 3, (1,), 2, (1,), ()): ("P", "P;1R"), @@ -249,19 +246,17 @@ OPEN_INFO = { (MM, 3, (1,), 1, (8, 8), (2,)): ("PA", "PA"), (II, 3, (1,), 2, (8,), ()): ("P", "P;R"), (MM, 3, (1,), 2, (8,), ()): ("P", "P;R"), - (II, 5, (1,), 1, (8, 8, 8, 8), ()): ("CMYK", "CMYK"), (MM, 5, (1,), 1, (8, 8, 8, 8), ()): ("CMYK", "CMYK"), (II, 5, (1,), 1, (8, 8, 8, 8, 8), (0,)): ("CMYK", "CMYKX"), (MM, 5, (1,), 1, (8, 8, 8, 8, 8), (0,)): ("CMYK", "CMYKX"), (II, 5, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0)): ("CMYK", "CMYKXX"), (MM, 5, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0)): ("CMYK", "CMYKXX"), - - # JPEG compressed images handled by LibTiff and auto-converted to RGB + (II, 5, (1,), 1, (16, 16, 16, 16), ()): ("CMYK", "CMYK;16L"), + # JPEG compressed images handled by LibTiff and auto-converted to RGBX # Minimal Baseline TIFF requires YCbCr images to have 3 SamplesPerPixel - (II, 6, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"), - (MM, 6, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"), - + (II, 6, (1,), 1, (8, 8, 8), ()): ("RGB", "RGBX"), + (MM, 6, (1,), 1, (8, 8, 8), ()): ("RGB", "RGBX"), (II, 8, (1,), 1, (8, 8, 8), ()): ("LAB", "LAB"), (MM, 8, (1,), 1, (8, 8, 8), ()): ("LAB", "LAB"), } @@ -284,6 +279,10 @@ def _limit_rational(val, max_val): return n_d[::-1] if inv else n_d +def _libtiff_version(): + return Image.core.libtiff_version.split("\n")[0].split("Version ")[1] + + ## # Wrapper for TIFF IFDs. @@ -303,7 +302,7 @@ class IFDRational(Rational): """ - __slots__ = ('_numerator', '_denominator', '_val') + __slots__ = ("_numerator", "_denominator", "_val") def __init__(self, value, denominator=1): """ @@ -327,7 +326,7 @@ class IFDRational(Rational): return if denominator == 0: - self._val = float('nan') + self._val = float("nan") return elif denominator == 1: @@ -368,6 +367,7 @@ class IFDRational(Rational): def _delegate(op): def delegate(self, *args): return getattr(self._val, op)(*args) + return delegate """ a = ['add','radd', 'sub', 'rsub','div', 'rdiv', 'mul', 'rmul', @@ -378,34 +378,34 @@ class IFDRational(Rational): print("\n".join("__%s__ = _delegate('__%s__')" % (s,s) for s in a)) """ - __add__ = _delegate('__add__') - __radd__ = _delegate('__radd__') - __sub__ = _delegate('__sub__') - __rsub__ = _delegate('__rsub__') - __div__ = _delegate('__div__') - __rdiv__ = _delegate('__rdiv__') - __mul__ = _delegate('__mul__') - __rmul__ = _delegate('__rmul__') - __truediv__ = _delegate('__truediv__') - __rtruediv__ = _delegate('__rtruediv__') - __floordiv__ = _delegate('__floordiv__') - __rfloordiv__ = _delegate('__rfloordiv__') - __mod__ = _delegate('__mod__') - __rmod__ = _delegate('__rmod__') - __pow__ = _delegate('__pow__') - __rpow__ = _delegate('__rpow__') - __pos__ = _delegate('__pos__') - __neg__ = _delegate('__neg__') - __abs__ = _delegate('__abs__') - __trunc__ = _delegate('__trunc__') - __lt__ = _delegate('__lt__') - __gt__ = _delegate('__gt__') - __le__ = _delegate('__le__') - __ge__ = _delegate('__ge__') - __nonzero__ = _delegate('__nonzero__') - __ceil__ = _delegate('__ceil__') - __floor__ = _delegate('__floor__') - __round__ = _delegate('__round__') + __add__ = _delegate("__add__") + __radd__ = _delegate("__radd__") + __sub__ = _delegate("__sub__") + __rsub__ = _delegate("__rsub__") + __div__ = _delegate("__div__") + __rdiv__ = _delegate("__rdiv__") + __mul__ = _delegate("__mul__") + __rmul__ = _delegate("__rmul__") + __truediv__ = _delegate("__truediv__") + __rtruediv__ = _delegate("__rtruediv__") + __floordiv__ = _delegate("__floordiv__") + __rfloordiv__ = _delegate("__rfloordiv__") + __mod__ = _delegate("__mod__") + __rmod__ = _delegate("__rmod__") + __pow__ = _delegate("__pow__") + __rpow__ = _delegate("__rpow__") + __pos__ = _delegate("__pos__") + __neg__ = _delegate("__neg__") + __abs__ = _delegate("__abs__") + __trunc__ = _delegate("__trunc__") + __lt__ = _delegate("__lt__") + __gt__ = _delegate("__gt__") + __le__ = _delegate("__le__") + __ge__ = _delegate("__ge__") + __nonzero__ = _delegate("__nonzero__") + __ceil__ = _delegate("__ceil__") + __floor__ = _delegate("__floor__") + __round__ = _delegate("__round__") class ImageFileDirectory_v2(MutableMapping): @@ -416,7 +416,7 @@ class ImageFileDirectory_v2(MutableMapping): ifd = ImageFileDirectory_v2() ifd[key] = 'Some Data' - ifd.tagtype[key] = 2 + ifd.tagtype[key] = TiffTags.ASCII print(ifd[key]) 'Some Data' @@ -439,6 +439,7 @@ class ImageFileDirectory_v2(MutableMapping): .. versionadded:: 3.0.0 """ + """ Documentation: @@ -498,7 +499,7 @@ class ImageFileDirectory_v2(MutableMapping): self._tags_v1 = {} # will remain empty if legacy_api is false self._tags_v2 = {} # main tag storage self._tagdata = {} - self.tagtype = {} # added 2008-06-05 by Florian Hoech + self.tagtype = {} # added 2008-06-05 by Florian Hoech self._next = None self._offset = None @@ -511,8 +512,7 @@ class ImageFileDirectory_v2(MutableMapping): Returns the complete tag dictionary, with named tags where possible. """ - return dict((TiffTags.lookup(code).name, value) - for code, value in self.items()) + return {TiffTags.lookup(code).name: value for code, value in self.items()} def __len__(self): return len(set(self._tagdata) | set(self._tags_v2)) @@ -525,13 +525,14 @@ class ImageFileDirectory_v2(MutableMapping): self[tag] = handler(self, data, self.legacy_api) # check type val = self._tags_v2[tag] if self.legacy_api and not isinstance(val, (tuple, bytes)): - val = val, + val = (val,) return val def __contains__(self, tag): return tag in self._tags_v2 or tag in self._tagdata if not py3: + def has_key(self, tag): return tag in self @@ -541,7 +542,7 @@ class ImageFileDirectory_v2(MutableMapping): def _setitem(self, tag, value, legacy_api): basetypes = (Number, bytes, str) if not py3: - basetypes += unicode, + basetypes += (unicode,) # noqa: F821 info = TiffTags.lookup(tag) values = [value] if isinstance(value, basetypes) else value @@ -550,30 +551,30 @@ class ImageFileDirectory_v2(MutableMapping): if info.type: self.tagtype[tag] = info.type else: - self.tagtype[tag] = 7 + self.tagtype[tag] = TiffTags.UNDEFINED if all(isinstance(v, IFDRational) for v in values): - self.tagtype[tag] = 5 + self.tagtype[tag] = TiffTags.RATIONAL elif all(isinstance(v, int) for v in values): if all(v < 2 ** 16 for v in values): - self.tagtype[tag] = 3 + self.tagtype[tag] = TiffTags.SHORT else: - self.tagtype[tag] = 4 + self.tagtype[tag] = TiffTags.LONG elif all(isinstance(v, float) for v in values): - self.tagtype[tag] = 12 + self.tagtype[tag] = TiffTags.DOUBLE else: if py3: if all(isinstance(v, str) for v in values): - self.tagtype[tag] = 2 + self.tagtype[tag] = TiffTags.ASCII else: # Never treat data as binary by default on Python 2. - self.tagtype[tag] = 2 + self.tagtype[tag] = TiffTags.ASCII - if self.tagtype[tag] == 7 and py3: - values = [value.encode("ascii", 'replace') if isinstance( - value, str) else value] - elif self.tagtype[tag] == 5: - values = [float(v) if isinstance(v, int) else v - for v in values] + if self.tagtype[tag] == TiffTags.UNDEFINED and py3: + values = [ + value.encode("ascii", "replace") if isinstance(value, str) else value + ] + elif self.tagtype[tag] == TiffTags.RATIONAL: + values = [float(v) if isinstance(v, int) else v for v in values] values = tuple(info.cvt_enum(value) for value in values) @@ -584,18 +585,23 @@ class ImageFileDirectory_v2(MutableMapping): # Spec'd length == 1, Actual > 1, Warn and truncate. Formerly barfed. # No Spec, Actual length 1, Formerly (<4.2) returned a 1 element tuple. # Don't mess with the legacy api, since it's frozen. - if (info.length == 1) or \ - (info.length is None and len(values) == 1 and not legacy_api): + if (info.length == 1) or ( + info.length is None and len(values) == 1 and not legacy_api + ): # Don't mess with the legacy api, since it's frozen. - if legacy_api and self.tagtype[tag] in [5, 10]: # rationals - values = values, + if legacy_api and self.tagtype[tag] in [ + TiffTags.RATIONAL, + TiffTags.SIGNED_RATIONAL, + ]: # rationals + values = (values,) try: dest[tag], = values except ValueError: # We've got a builtin tag with 1 expected entry warnings.warn( - "Metadata Warning, tag %s had too many entries: %s, expected 1" % ( - tag, len(values))) + "Metadata Warning, tag %s had too many entries: %s, expected 1" + % (tag, len(values)) + ) dest[tag] = values[0] else: @@ -620,36 +626,51 @@ class ImageFileDirectory_v2(MutableMapping): def _register_loader(idx, size): def decorator(func): from .TiffTags import TYPES + if func.__name__.startswith("load_"): TYPES[idx] = func.__name__[5:].replace("_", " ") - _load_dispatch[idx] = size, func + _load_dispatch[idx] = size, func # noqa: F821 return func + return decorator def _register_writer(idx): def decorator(func): - _write_dispatch[idx] = func + _write_dispatch[idx] = func # noqa: F821 return func + return decorator def _register_basic(idx_fmt_name): from .TiffTags import TYPES + idx, fmt, name = idx_fmt_name TYPES[idx] = name size = struct.calcsize("=" + fmt) - _load_dispatch[idx] = size, lambda self, data, legacy_api=True: ( - self._unpack("{}{}".format(len(data) // size, fmt), data)) - _write_dispatch[idx] = lambda self, *values: ( - b"".join(self._pack(fmt, value) for value in values)) + _load_dispatch[idx] = ( # noqa: F821 + size, + lambda self, data, legacy_api=True: ( + self._unpack("{}{}".format(len(data) // size, fmt), data) + ), + ) + _write_dispatch[idx] = lambda self, *values: ( # noqa: F821 + b"".join(self._pack(fmt, value) for value in values) + ) - list(map(_register_basic, - [(3, "H", "short"), - (4, "L", "long"), - (6, "b", "signed byte"), - (8, "h", "signed short"), - (9, "l", "signed long"), - (11, "f", "float"), - (12, "d", "double")])) + list( + map( + _register_basic, + [ + (TiffTags.SHORT, "H", "short"), + (TiffTags.LONG, "L", "long"), + (TiffTags.SIGNED_BYTE, "b", "signed byte"), + (TiffTags.SIGNED_SHORT, "h", "signed short"), + (TiffTags.SIGNED_LONG, "l", "signed long"), + (TiffTags.FLOAT, "f", "float"), + (TiffTags.DOUBLE, "d", "double"), + ], + ) + ) @_register_loader(1, 1) # Basic type, except for the legacy API. def load_byte(self, data, legacy_api=True): @@ -669,21 +690,23 @@ class ImageFileDirectory_v2(MutableMapping): def write_string(self, value): # remerge of https://github.com/python-pillow/Pillow/pull/1416 if sys.version_info.major == 2: - value = value.decode('ascii', 'replace') - return b"" + value.encode('ascii', 'replace') + b"\0" + value = value.decode("ascii", "replace") + return b"" + value.encode("ascii", "replace") + b"\0" @_register_loader(5, 8) def load_rational(self, data, legacy_api=True): vals = self._unpack("{}L".format(len(data) // 4), data) - def combine(a, b): return (a, b) if legacy_api else IFDRational(a, b) - return tuple(combine(num, denom) - for num, denom in zip(vals[::2], vals[1::2])) + def combine(a, b): + return (a, b) if legacy_api else IFDRational(a, b) + + return tuple(combine(num, denom) for num, denom in zip(vals[::2], vals[1::2])) @_register_writer(5) def write_rational(self, *values): - return b"".join(self._pack("2L", *_limit_rational(frac, 2 ** 31)) - for frac in values) + return b"".join( + self._pack("2L", *_limit_rational(frac, 2 ** 31)) for frac in values + ) @_register_loader(7, 1) def load_undefined(self, data, legacy_api=True): @@ -697,21 +720,24 @@ class ImageFileDirectory_v2(MutableMapping): def load_signed_rational(self, data, legacy_api=True): vals = self._unpack("{}l".format(len(data) // 4), data) - def combine(a, b): return (a, b) if legacy_api else IFDRational(a, b) - return tuple(combine(num, denom) - for num, denom in zip(vals[::2], vals[1::2])) + def combine(a, b): + return (a, b) if legacy_api else IFDRational(a, b) + + return tuple(combine(num, denom) for num, denom in zip(vals[::2], vals[1::2])) @_register_writer(10) def write_signed_rational(self, *values): - return b"".join(self._pack("2L", *_limit_rational(frac, 2 ** 30)) - for frac in values) + return b"".join( + self._pack("2L", *_limit_rational(frac, 2 ** 30)) for frac in values + ) def _ensure_read(self, fp, size): ret = fp.read(size) if len(ret) != size: - raise IOError("Corrupt EXIF data. " + - "Expecting to read %d bytes but only got %d. " % - (size, len(ret))) + raise IOError( + "Corrupt EXIF data. " + + "Expecting to read %d bytes but only got %d. " % (size, len(ret)) + ) return ret def load(self, fp): @@ -721,13 +747,14 @@ class ImageFileDirectory_v2(MutableMapping): try: for i in range(self._unpack("H", self._ensure_read(fp, 2))[0]): - tag, typ, count, data = self._unpack("HHL4s", - self._ensure_read(fp, 12)) + tag, typ, count, data = self._unpack("HHL4s", self._ensure_read(fp, 12)) if DEBUG: tagname = TiffTags.lookup(tag).name typname = TYPES.get(typ, "unknown") - print("tag: %s (%d) - type: %s (%d)" % - (tagname, tag, typname, typ), end=" ") + print( + "tag: %s (%d) - type: %s (%d)" % (tagname, tag, typname, typ), + end=" ", + ) try: unit_size, handler = self._load_dispatch[typ] @@ -740,8 +767,10 @@ class ImageFileDirectory_v2(MutableMapping): here = fp.tell() offset, = self._unpack("L", data) if DEBUG: - print("Tag Location: %s - Data Location: %s" % - (here, offset), end=" ") + print( + "Tag Location: %s - Data Location: %s" % (here, offset), + end=" ", + ) fp.seek(offset) data = ImageFile._safe_read(fp, size) fp.seek(here) @@ -749,9 +778,11 @@ class ImageFileDirectory_v2(MutableMapping): data = data[:size] if len(data) != size: - warnings.warn("Possibly corrupt EXIF data. " - "Expecting to read %d bytes but only got %d." - " Skipping tag %s" % (size, len(data), tag)) + warnings.warn( + "Possibly corrupt EXIF data. " + "Expecting to read %d bytes but only got %d." + " Skipping tag %s" % (size, len(data), tag) + ) continue if not data: @@ -771,17 +802,12 @@ class ImageFileDirectory_v2(MutableMapping): warnings.warn(str(msg)) return - def save(self, fp): - - if fp.tell() == 0: # skip TIFF header on subsequent pages - # tiff header -- PIL always starts the first IFD at offset 8 - fp.write(self._prefix + self._pack("HL", 42, 8)) - + def tobytes(self, offset=0): # FIXME What about tagdata? - fp.write(self._pack("H", len(self._tags_v2))) + result = self._pack("H", len(self._tags_v2)) entries = [] - offset = fp.tell() + len(self._tags_v2) * 12 + 4 + offset = offset + len(result) + len(self._tags_v2) * 12 + 4 stripoffsets = None # pass 1: convert tags to binary format @@ -797,29 +823,32 @@ class ImageFileDirectory_v2(MutableMapping): if DEBUG: tagname = TiffTags.lookup(tag).name typname = TYPES.get(typ, "unknown") - print("save: %s (%d) - type: %s (%d)" % - (tagname, tag, typname, typ), end=" ") + print( + "save: %s (%d) - type: %s (%d)" % (tagname, tag, typname, typ), + end=" ", + ) if len(data) >= 16: print("- value: " % len(data)) else: print("- value:", values) # count is sum of lengths for string and arbitrary data - count = len(data) if typ in [2, 7] else len(values) + if typ in [TiffTags.BYTE, TiffTags.ASCII, TiffTags.UNDEFINED]: + count = len(data) + else: + count = len(values) # figure out if data fits into the entry if len(data) <= 4: entries.append((tag, typ, count, data.ljust(4, b"\0"), b"")) else: - entries.append((tag, typ, count, self._pack("L", offset), - data)) + entries.append((tag, typ, count, self._pack("L", offset), data)) offset += (len(data) + 1) // 2 * 2 # pad to word # update strip offset data to point beyond auxiliary data if stripoffsets is not None: tag, typ, count, value, data = entries[stripoffsets] if data: - raise NotImplementedError( - "multistrip support not yet implemented") + raise NotImplementedError("multistrip support not yet implemented") value = self._pack("L", self._unpack("L", value)[0] + offset) entries[stripoffsets] = tag, typ, count, value, data @@ -827,18 +856,29 @@ class ImageFileDirectory_v2(MutableMapping): for tag, typ, count, value, data in entries: if DEBUG > 1: print(tag, typ, count, repr(value), repr(data)) - fp.write(self._pack("HHL4s", tag, typ, count, value)) + result += self._pack("HHL4s", tag, typ, count, value) # -- overwrite here for multi-page -- - fp.write(b"\0\0\0\0") # end of entries + result += b"\0\0\0\0" # end of entries # pass 3: write auxiliary data to file for tag, typ, count, value, data in entries: - fp.write(data) + result += data if len(data) & 1: - fp.write(b"\0") + result += b"\0" - return offset + return result + + def save(self, fp): + + if fp.tell() == 0: # skip TIFF header on subsequent pages + # tiff header -- PIL always starts the first IFD at offset 8 + fp.write(self._prefix + self._pack("HL", 42, 8)) + + offset = fp.tell() + result = self.tobytes(offset) + fp.write(result) + return offset + len(result) ImageFileDirectory_v2._load_dispatch = _load_dispatch @@ -858,7 +898,7 @@ class ImageFileDirectory_v1(ImageFileDirectory_v2): ifd = ImageFileDirectory_v1() ifd[key] = 'Some Data' - ifd.tagtype[key] = 2 + ifd.tagtype[key] = TiffTags.ASCII print(ifd[key]) ('Some Data',) @@ -869,6 +909,7 @@ class ImageFileDirectory_v1(ImageFileDirectory_v2): .. deprecated:: 3.0.0 """ + def __init__(self, *args, **kwargs): ImageFileDirectory_v2.__init__(self, *args, **kwargs) self._legacy_api = True @@ -933,7 +974,7 @@ class ImageFileDirectory_v1(ImageFileDirectory_v2): self._setitem(tag, handler(self, data, legacy), legacy) val = self._tags_v1[tag] if not isinstance(val, (tuple, bytes)): - val = val, + val = (val,) return val @@ -944,6 +985,7 @@ ImageFileDirectory = ImageFileDirectory_v1 ## # Image plugin for TIFF files. + class TiffImageFile(ImageFile.ImageFile): format = "TIFF" @@ -951,7 +993,7 @@ class TiffImageFile(ImageFile.ImageFile): _close_exclusive_fp_after_loading = False def _open(self): - "Open the first image in a TIFF file" + """Open the first image in a TIFF file""" # Header ifh = self.fp.read(8) @@ -968,7 +1010,6 @@ class TiffImageFile(ImageFile.ImageFile): self.__fp = self.fp self._frame_pos = [] self._n_frames = None - self._is_animated = None if DEBUG: print("*** TiffImageFile._open ***") @@ -982,33 +1023,18 @@ class TiffImageFile(ImageFile.ImageFile): def n_frames(self): if self._n_frames is None: current = self.tell() - try: - while True: - self._seek(self.tell() + 1) - except EOFError: - self._n_frames = self.tell() + 1 + self._seek(len(self._frame_pos)) + while self._n_frames is None: + self._seek(self.tell() + 1) self.seek(current) return self._n_frames @property def is_animated(self): - if self._is_animated is None: - if self._n_frames is not None: - self._is_animated = self._n_frames != 1 - else: - current = self.tell() - - try: - self.seek(1) - self._is_animated = True - except EOFError: - self._is_animated = False - - self.seek(current) return self._is_animated def seek(self, frame): - "Select a given frame as current image" + """Select a given frame as current image""" if not self._seek_check(frame): return self._seek(frame) @@ -1024,9 +1050,10 @@ class TiffImageFile(ImageFile.ImageFile): if not self.__next: raise EOFError("no more images in TIFF file") if DEBUG: - print("Seeking to frame %s, on frame %s, " - "__next %s, location: %s" % - (frame, self.__frame, self.__next, self.fp.tell())) + print( + "Seeking to frame %s, on frame %s, __next %s, location: %s" + % (frame, self.__frame, self.__next, self.fp.tell()) + ) # reset python3 buffered io handle in case fp # was passed to libtiff, invalidating the buffer self.fp.tell() @@ -1036,17 +1063,20 @@ class TiffImageFile(ImageFile.ImageFile): print("Loading tags, location: %s" % self.fp.tell()) self.tag_v2.load(self.fp) self.__next = self.tag_v2.next + if self.__next == 0: + self._n_frames = frame + 1 + if len(self._frame_pos) == 1: + self._is_animated = self.__next != 0 self.__frame += 1 self.fp.seek(self._frame_pos[frame]) self.tag_v2.load(self.fp) - self.__next = self.tag_v2.next # fill the legacy tag/ifd entries self.tag = self.ifd = ImageFileDirectory_v1.from_v2(self.tag_v2) self.__frame = frame self._setup() def tell(self): - "Return the current frame number" + """Return the current frame number""" return self.__frame @property @@ -1056,9 +1086,9 @@ class TiffImageFile(ImageFile.ImageFile): @size.setter def size(self, value): warnings.warn( - 'Setting the size of a TIFF image directly is deprecated, and will' - ' be removed in a future version. Use the resize method instead.', - DeprecationWarning + "Setting the size of a TIFF image directly is deprecated, and will" + " be removed in a future version. Use the resize method instead.", + DeprecationWarning, ) self._size = value @@ -1068,9 +1098,23 @@ class TiffImageFile(ImageFile.ImageFile): return super(TiffImageFile, self).load() def load_end(self): + if self._tile_orientation: + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90, + }.get(self._tile_orientation) + if method is not None: + self.im = self.im.transpose(method) + self._size = self.im.size + # allow closing if we're on the first frame, there's no next # This is the ImageFile.load path only, libtiff specific below. - if self.__frame == 0 and not self.__next: + if not self._is_animated: self._close_exclusive_fp_after_loading = True def _load_libtiff(self): @@ -1092,7 +1136,7 @@ class TiffImageFile(ImageFile.ImageFile): # (self._compression, (extents tuple), # 0, (rawmode, self._compression, fp)) extents = self.tile[0][1] - args = list(self.tile[0][3]) + [self.tag_v2.offset] + args = list(self.tile[0][3]) # To be nice on memory footprint, if there's a # file descriptor, use that instead of reading @@ -1113,13 +1157,15 @@ class TiffImageFile(ImageFile.ImageFile): if fp: args[2] = fp - decoder = Image._getdecoder(self.mode, 'libtiff', tuple(args), - self.decoderconfig) + decoder = Image._getdecoder( + self.mode, "libtiff", tuple(args), self.decoderconfig + ) try: decoder.setimage(self.im, extents) except ValueError: raise IOError("Couldn't set the image") + close_self_fp = self._exclusive_fp and not self._is_animated if hasattr(self.fp, "getvalue"): # We've got a stringio like thing passed in. Yay for all in memory. # The decoder needs the entire file in one shot, so there's not @@ -1133,27 +1179,31 @@ class TiffImageFile(ImageFile.ImageFile): if DEBUG: print("have getvalue. just sending in a string from getvalue") n, err = decoder.decode(self.fp.getvalue()) - elif hasattr(self.fp, "fileno"): + elif fp: # we've got a actual file on disk, pass in the fp. if DEBUG: print("have fileno, calling fileno version of the decoder.") - self.fp.seek(0) + if not close_self_fp: + self.fp.seek(0) # 4 bytes, otherwise the trace might error out n, err = decoder.decode(b"fpfp") else: # we have something else. if DEBUG: print("don't have fileno or getvalue. just reading") + self.fp.seek(0) # UNDONE -- so much for that buffer size thing. n, err = decoder.decode(self.fp.read()) self.tile = [] self.readonly = 0 + + self.load_end() + # libtiff closed the fp in a, we need to close self.fp, if possible - if self._exclusive_fp: - if self.__frame == 0 and not self.__next: - self.fp.close() - self.fp = None # might be shared + if close_self_fp: + self.fp.close() + self.fp = None # might be shared if err < 0: raise IOError(err) @@ -1161,7 +1211,7 @@ class TiffImageFile(ImageFile.ImageFile): return Image.Image.load(self) def _setup(self): - "Setup this image object based on current tags" + """Setup this image object based on current tags""" if 0xBC01 in self.tag_v2: raise IOError("Windows Media Photo files not yet supported") @@ -1174,6 +1224,10 @@ class TiffImageFile(ImageFile.ImageFile): # the specification photo = self.tag_v2.get(PHOTOMETRIC_INTERPRETATION, 0) + # old style jpeg compression images most certainly are YCbCr + if self._compression == "tiff_jpeg": + photo = 6 + fillorder = self.tag_v2.get(FILLORDER, 1) if DEBUG: @@ -1185,16 +1239,15 @@ class TiffImageFile(ImageFile.ImageFile): print("- YCbCr subsampling:", self.tag.get(530)) # size - xsize = self.tag_v2.get(IMAGEWIDTH) - ysize = self.tag_v2.get(IMAGELENGTH) + xsize = int(self.tag_v2.get(IMAGEWIDTH)) + ysize = int(self.tag_v2.get(IMAGELENGTH)) self._size = xsize, ysize if DEBUG: print("- size:", self.size) sampleFormat = self.tag_v2.get(SAMPLEFORMAT, (1,)) - if (len(sampleFormat) > 1 - and max(sampleFormat) == min(sampleFormat) == 1): + if len(sampleFormat) > 1 and max(sampleFormat) == min(sampleFormat) == 1: # SAMPLEFORMAT is properly per band, so an RGB image will # be (1,1,1). But, we don't support per band pixel types, # and anything more than one band is a uint8. So, just @@ -1217,8 +1270,14 @@ class TiffImageFile(ImageFile.ImageFile): bps_tuple = bps_tuple * bps_count # mode: check photometric interpretation and bits per pixel - key = (self.tag_v2.prefix, photo, sampleFormat, fillorder, - bps_tuple, extra_tuple) + key = ( + self.tag_v2.prefix, + photo, + sampleFormat, + fillorder, + bps_tuple, + extra_tuple, + ) if DEBUG: print("format key:", key) try: @@ -1240,11 +1299,11 @@ class TiffImageFile(ImageFile.ImageFile): if xres and yres: resunit = self.tag_v2.get(RESOLUTION_UNIT) if resunit == 2: # dots per inch - self.info["dpi"] = xres, yres + self.info["dpi"] = int(xres + 0.5), int(yres + 0.5) elif resunit == 3: # dots per centimeter. convert to dpi - self.info["dpi"] = xres * 2.54, yres * 2.54 + self.info["dpi"] = int(xres * 2.54 + 0.5), int(yres * 2.54 + 0.5) elif resunit is None: # used to default to 1, but now 2) - self.info["dpi"] = xres, yres + self.info["dpi"] = int(xres + 0.5), int(yres + 0.5) # For backward compatibility, # we also preserve the old behavior self.info["resolution"] = xres, yres @@ -1254,7 +1313,7 @@ class TiffImageFile(ImageFile.ImageFile): # build tile descriptors x = y = layer = 0 self.tile = [] - self.use_load_libtiff = READ_LIBTIFF or self._compression != 'raw' + self.use_load_libtiff = READ_LIBTIFF or self._compression != "raw" if self.use_load_libtiff: # Decoder expects entire file as one tile. # There's a buffer size limit in load (64k) @@ -1281,20 +1340,17 @@ class TiffImageFile(ImageFile.ImageFile): # we're expecting image byte order. So, if the rawmode # contains I;16, we need to convert from native to image # byte order. - if rawmode == 'I;16': - rawmode = 'I;16N' - if ';16B' in rawmode: - rawmode = rawmode.replace(';16B', ';16N') - if ';16L' in rawmode: - rawmode = rawmode.replace(';16L', ';16N') + if rawmode == "I;16": + rawmode = "I;16N" + if ";16B" in rawmode: + rawmode = rawmode.replace(";16B", ";16N") + if ";16L" in rawmode: + rawmode = rawmode.replace(";16L", ";16N") # Offset in the tile tuple is 0, we go from 0,0 to # w,h, and we only do this once -- eds - a = (rawmode, self._compression, False) - self.tile.append( - (self._compression, - (0, 0, xsize, ysize), - 0, a)) + a = (rawmode, self._compression, False, self.tag_v2.offset) + self.tile.append(("libtiff", (0, 0, xsize, ysize), 0, a)) elif STRIPOFFSETS in self.tag_v2 or TILEOFFSETS in self.tag_v2: # striped image @@ -1323,9 +1379,13 @@ class TiffImageFile(ImageFile.ImageFile): a = (tile_rawmode, int(stride), 1) self.tile.append( - (self._compression, - (x, y, min(x+w, xsize), min(y+h, ysize)), - offset, a)) + ( + self._compression, + (x, y, min(x + w, xsize), min(y + h, ysize)), + offset, + a, + ) + ) x = x + w if x >= self.size[0]: x, y = 0, y + h @@ -1339,14 +1399,25 @@ class TiffImageFile(ImageFile.ImageFile): # Fix up info. if ICCPROFILE in self.tag_v2: - self.info['icc_profile'] = self.tag_v2[ICCPROFILE] + self.info["icc_profile"] = self.tag_v2[ICCPROFILE] # fixup palette descriptor - if self.mode == "P": + if self.mode in ["P", "PA"]: palette = [o8(b // 256) for b in self.tag_v2[COLORMAP]] self.palette = ImagePalette.raw("RGB;L", b"".join(palette)) + self._tile_orientation = self.tag_v2.get(0x0112) + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + # # -------------------------------------------------------------------- @@ -1373,7 +1444,6 @@ SAVE_INFO = { "CMYK": ("CMYK", II, 5, 1, (8, 8, 8, 8), None), "YCbCr": ("YCbCr", II, 6, 1, (8, 8, 8), None), "LAB": ("LAB", II, 8, 1, (8, 8, 8), None), - "I;32BS": ("I;32BS", MM, 1, 2, (32,), None), "I;16B": ("I;16B", MM, 1, 1, (16,), None), "I;16BS": ("I;16BS", MM, 1, 2, (16,), None), @@ -1390,14 +1460,14 @@ def _save(im, fp, filename): ifd = ImageFileDirectory_v2(prefix=prefix) - compression = im.encoderinfo.get('compression', im.info.get('compression')) + compression = im.encoderinfo.get("compression", im.info.get("compression")) if compression is None: - compression = 'raw' + compression = "raw" - libtiff = WRITE_LIBTIFF or compression != 'raw' + libtiff = WRITE_LIBTIFF or compression != "raw" # required for color libtiff images - ifd[PLANAR_CONFIGURATION] = getattr(im, '_planar_configuration', 1) + ifd[PLANAR_CONFIGURATION] = getattr(im, "_planar_configuration", 1) ifd[IMAGEWIDTH] = im.size[0] ifd[IMAGELENGTH] = im.size[1] @@ -1412,15 +1482,21 @@ def _save(im, fp, filename): ifd[key] = info.get(key) try: ifd.tagtype[key] = info.tagtype[key] - except: - pass # might not be an IFD, Might not have populated type + except Exception: + pass # might not be an IFD. Might not have populated type # additions written by Greg Couch, gregc@cgl.ucsf.edu # inspired by image-sig posting from Kevin Cazabon, kcazabon@home.com - if hasattr(im, 'tag_v2'): + if hasattr(im, "tag_v2"): # preserve tags from original TIFF image file - for key in (RESOLUTION_UNIT, X_RESOLUTION, Y_RESOLUTION, - IPTC_NAA_CHUNK, PHOTOSHOP_CHUNK, XMP): + for key in ( + RESOLUTION_UNIT, + X_RESOLUTION, + Y_RESOLUTION, + IPTC_NAA_CHUNK, + PHOTOSHOP_CHUNK, + XMP, + ): if key in im.tag_v2: ifd[key] = im.tag_v2[key] ifd.tagtype[key] = im.tag_v2.tagtype[key] @@ -1430,24 +1506,26 @@ def _save(im, fp, filename): if "icc_profile" in im.info: ifd[ICCPROFILE] = im.info["icc_profile"] - for key, name in [(IMAGEDESCRIPTION, "description"), - (X_RESOLUTION, "resolution"), - (Y_RESOLUTION, "resolution"), - (X_RESOLUTION, "x_resolution"), - (Y_RESOLUTION, "y_resolution"), - (RESOLUTION_UNIT, "resolution_unit"), - (SOFTWARE, "software"), - (DATE_TIME, "date_time"), - (ARTIST, "artist"), - (COPYRIGHT, "copyright")]: + for key, name in [ + (IMAGEDESCRIPTION, "description"), + (X_RESOLUTION, "resolution"), + (Y_RESOLUTION, "resolution"), + (X_RESOLUTION, "x_resolution"), + (Y_RESOLUTION, "y_resolution"), + (RESOLUTION_UNIT, "resolution_unit"), + (SOFTWARE, "software"), + (DATE_TIME, "date_time"), + (ARTIST, "artist"), + (COPYRIGHT, "copyright"), + ]: if name in im.encoderinfo: ifd[key] = im.encoderinfo[name] dpi = im.encoderinfo.get("dpi") if dpi: ifd[RESOLUTION_UNIT] = 2 - ifd[X_RESOLUTION] = dpi[0] - ifd[Y_RESOLUTION] = dpi[1] + ifd[X_RESOLUTION] = int(dpi[0] + 0.5) + ifd[Y_RESOLUTION] = int(dpi[1] + 0.5) if bits != (1,): ifd[BITSPERSAMPLE] = bits @@ -1460,11 +1538,11 @@ def _save(im, fp, filename): ifd[PHOTOMETRIC_INTERPRETATION] = photo - if im.mode == "P": + if im.mode in ["P", "PA"]: lut = im.im.getpalette("RGB", "RGB;L") ifd[COLORMAP] = tuple(i8(v) * 256 for v in lut) # data orientation - stride = len(bits) * ((im.size[0]*bits[0]+7)//8) + stride = len(bits) * ((im.size[0] * bits[0] + 7) // 8) ifd[ROWSPERSTRIP] = im.size[1] ifd[STRIPBYTECOUNTS] = stride * im.size[1] ifd[STRIPOFFSETS] = 0 # this is adjusted by IFD writer @@ -1472,6 +1550,16 @@ def _save(im, fp, filename): ifd[COMPRESSION] = COMPRESSION_INFO_REV.get(compression, 1) if libtiff: + if "quality" in im.encoderinfo: + quality = im.encoderinfo["quality"] + if not isinstance(quality, int) or quality < 0 or quality > 100: + raise ValueError("Invalid quality setting") + if compression != "jpeg": + raise ValueError( + "quality setting only supported for 'jpeg' compression" + ) + ifd[JPEGQUALITY] = quality + if DEBUG: print("Saving using libtiff encoder") print("Items: %s" % sorted(ifd.items())) @@ -1483,9 +1571,24 @@ def _save(im, fp, filename): except io.UnsupportedOperation: pass + # optional types for non core tags + types = {} + # SAMPLEFORMAT is determined by the image format and should not be copied + # from legacy_ifd. # STRIPOFFSETS and STRIPBYTECOUNTS are added by the library # based on the data in the strip. - blocklist = [STRIPOFFSETS, STRIPBYTECOUNTS] + # The other tags expect arrays with a certain length (fixed or depending on + # BITSPERSAMPLE, etc), passing arrays with a different length will result in + # segfaults. Block these tags until we add extra validation. + blocklist = [ + COLORMAP, + REFERENCEBLACKWHITE, + SAMPLEFORMAT, + STRIPBYTECOUNTS, + STRIPOFFSETS, + TRANSFERFUNCTION, + ] + atts = {} # bits per sample is a single short in the tiff directory, not a list. atts[BITSPERSAMPLE] = bits[0] @@ -1493,21 +1596,33 @@ def _save(im, fp, filename): # the original file, e.g x,y resolution so that we can # save(load('')) == original file. legacy_ifd = {} - if hasattr(im, 'tag'): + if hasattr(im, "tag"): legacy_ifd = im.tag.to_v2() - for tag, value in itertools.chain(ifd.items(), - getattr(im, 'tag_v2', {}).items(), - legacy_ifd.items()): + for tag, value in itertools.chain( + ifd.items(), getattr(im, "tag_v2", {}).items(), legacy_ifd.items() + ): # Libtiff can only process certain core items without adding - # them to the custom dictionary. It will segfault if it attempts - # to add a custom tag without the dictionary entry - # - # UNDONE -- add code for the custom dictionary + # them to the custom dictionary. + # Custom items are supported for int, float, unicode, string and byte + # values. Other types and tuples require a tagtype. if tag not in TiffTags.LIBTIFF_CORE: - continue + if TiffTags.lookup(tag).type == TiffTags.UNDEFINED: + continue + if distutils.version.StrictVersion( + _libtiff_version() + ) < distutils.version.StrictVersion("4.0"): + continue + + if tag in ifd.tagtype: + types[tag] = ifd.tagtype[tag] + elif not ( + isinstance(value, (int, float, str, bytes)) + or (not py3 and isinstance(value, unicode)) # noqa: F821 + ): + continue if tag not in atts and tag not in blocklist: - if isinstance(value, str if py3 else unicode): - atts[tag] = value.encode('ascii', 'replace') + b"\0" + if isinstance(value, str if py3 else unicode): # noqa: F821 + atts[tag] = value.encode("ascii", "replace") + b"\0" elif isinstance(value, IFDRational): atts[tag] = float(value) else: @@ -1520,15 +1635,20 @@ def _save(im, fp, filename): # we're storing image byte order. So, if the rawmode # contains I;16, we need to convert from native to image # byte order. - if im.mode in ('I;16B', 'I;16'): - rawmode = 'I;16N' + if im.mode in ("I;16B", "I;16"): + rawmode = "I;16N" - a = (rawmode, compression, _fp, filename, atts) - e = Image._getencoder(im.mode, 'libtiff', a, im.encoderconfig) - e.setimage(im.im, (0, 0)+im.size) + # Pass tags as sorted list so that the tags are set in a fixed order. + # This is required by libtiff for some tags. For example, the JPEGQUALITY + # pseudo tag requires that the COMPRESS tag was already set. + tags = list(atts.items()) + tags.sort() + a = (rawmode, compression, _fp, filename, tags, types) + e = Image._getencoder(im.mode, "libtiff", a, im.encoderconfig) + e.setimage(im.im, (0, 0) + im.size) while True: # undone, change to self.decodermaxblock: - l, s, d = e.encode(16*1024) + l, s, d = e.encode(16 * 1024) if not _fp: fp.write(d) if s: @@ -1539,9 +1659,9 @@ def _save(im, fp, filename): else: offset = ifd.save(fp) - ImageFile._save(im, fp, [ - ("raw", (0, 0)+im.size, offset, (rawmode, stride, 1)) - ]) + ImageFile._save( + im, fp, [("raw", (0, 0) + im.size, offset, (rawmode, stride, 1))] + ) # -- helper for multi-page save -- if "_debug_multipage" in im.encoderinfo: @@ -1575,7 +1695,7 @@ class AppendingTiffWriter: Tags = {273, 288, 324, 519, 520, 521} def __init__(self, fn, new=False): - if hasattr(fn, 'read'): + if hasattr(fn, "read"): self.f = fn self.close_fp = False else: @@ -1626,8 +1746,7 @@ class AppendingTiffWriter: return if IIMM != self.IIMM: - raise RuntimeError("IIMM of new page doesn't match IIMM of " - "first page") + raise RuntimeError("IIMM of new page doesn't match IIMM of first page") IFDoffset = self.readLong() IFDoffset += self.offsetOfNewPage @@ -1652,7 +1771,7 @@ class AppendingTiffWriter: def tell(self): return self.f.tell() - self.offsetOfNewPage - def seek(self, offset, whence): + def seek(self, offset, whence=io.SEEK_SET): if whence == os.SEEK_SET: offset += self.offsetOfNewPage @@ -1701,34 +1820,29 @@ class AppendingTiffWriter: self.f.seek(-2, os.SEEK_CUR) bytesWritten = self.f.write(struct.pack(self.longFmt, value)) if bytesWritten is not None and bytesWritten != 4: - raise RuntimeError("wrote only %u bytes but wanted 4" % - bytesWritten) + raise RuntimeError("wrote only %u bytes but wanted 4" % bytesWritten) def rewriteLastShort(self, value): self.f.seek(-2, os.SEEK_CUR) bytesWritten = self.f.write(struct.pack(self.shortFmt, value)) if bytesWritten is not None and bytesWritten != 2: - raise RuntimeError("wrote only %u bytes but wanted 2" % - bytesWritten) + raise RuntimeError("wrote only %u bytes but wanted 2" % bytesWritten) def rewriteLastLong(self, value): self.f.seek(-4, os.SEEK_CUR) bytesWritten = self.f.write(struct.pack(self.longFmt, value)) if bytesWritten is not None and bytesWritten != 4: - raise RuntimeError("wrote only %u bytes but wanted 4" % - bytesWritten) + raise RuntimeError("wrote only %u bytes but wanted 4" % bytesWritten) def writeShort(self, value): bytesWritten = self.f.write(struct.pack(self.shortFmt, value)) if bytesWritten is not None and bytesWritten != 2: - raise RuntimeError("wrote only %u bytes but wanted 2" % - bytesWritten) + raise RuntimeError("wrote only %u bytes but wanted 2" % bytesWritten) def writeLong(self, value): bytesWritten = self.f.write(struct.pack(self.longFmt, value)) if bytesWritten is not None and bytesWritten != 4: - raise RuntimeError("wrote only %u bytes but wanted 4" % - bytesWritten) + raise RuntimeError("wrote only %u bytes but wanted 4" % bytesWritten) def close(self): self.finalize() @@ -1738,12 +1852,11 @@ class AppendingTiffWriter: numTags = self.readShort() for i in range(numTags): - tag, fieldType, count = struct.unpack(self.tagFormat, - self.f.read(8)) + tag, fieldType, count = struct.unpack(self.tagFormat, self.f.read(8)) fieldSize = self.fieldSizes[fieldType] totalSize = fieldSize * count - isLocal = (totalSize <= 4) + isLocal = totalSize <= 4 if not isLocal: offset = self.readLong() offset += self.offsetOfNewPage @@ -1753,13 +1866,15 @@ class AppendingTiffWriter: curPos = self.f.tell() if isLocal: - self.fixOffsets(count, isShort=(fieldSize == 2), - isLong=(fieldSize == 4)) + self.fixOffsets( + count, isShort=(fieldSize == 2), isLong=(fieldSize == 4) + ) self.f.seek(curPos + 4) else: self.f.seek(offset) - self.fixOffsets(count, isShort=(fieldSize == 2), - isLong=(fieldSize == 4)) + self.fixOffsets( + count, isShort=(fieldSize == 2), isLong=(fieldSize == 4) + ) self.f.seek(curPos) offset = curPos = None @@ -1784,7 +1899,7 @@ class AppendingTiffWriter: # local (not referenced with another offset) self.rewriteLastShortToLong(offset) self.f.seek(-10, os.SEEK_CUR) - self.writeShort(4) # rewrite the type to LONG + self.writeShort(TiffTags.LONG) # rewrite the type to LONG self.f.seek(8, os.SEEK_CUR) elif isShort: self.rewriteLastShort(offset) @@ -1802,7 +1917,7 @@ def _save_all(im, fp, filename): cur_idx = im.tell() try: with AppendingTiffWriter(fp) as tf: - for ims in [im]+append_images: + for ims in [im] + append_images: ims.encoderinfo = encoderinfo ims.encoderconfig = encoderconfig if not hasattr(ims, "n_frames"): diff --git a/server/www/packages/packages-linux/x64/PIL/TiffTags.py b/server/www/packages/packages-linux/x64/PIL/TiffTags.py index c1e14af..82719db 100644 --- a/server/www/packages/packages-linux/x64/PIL/TiffTags.py +++ b/server/www/packages/packages-linux/x64/PIL/TiffTags.py @@ -23,13 +23,14 @@ from collections import namedtuple class TagInfo(namedtuple("_TagInfo", "value name type length enum")): __slots__ = [] - def __new__(cls, value=None, name="unknown", - type=None, length=None, enum=None): - return super(TagInfo, cls).__new__( - cls, value, name, type, length, enum or {}) + def __new__(cls, value=None, name="unknown", type=None, length=None, enum=None): + return super(TagInfo, cls).__new__(cls, value, name, type, length, enum or {}) def cvt_enum(self, value): - return self.enum.get(value, value) + # Using get will call hash(value), which can be expensive + # for some types (e.g. Fraction). Since self.enum is rarely + # used, it's usually better to test it first. + return self.enum.get(value, value) if self.enum else value def lookup(tag): @@ -41,7 +42,7 @@ def lookup(tag): """ - return TAGS_V2.get(tag, TagInfo(tag, TAGS.get(tag, 'unknown'))) + return TAGS_V2.get(tag, TagInfo(tag, TAGS.get(tag, "unknown"))) ## @@ -61,32 +62,56 @@ ASCII = 2 SHORT = 3 LONG = 4 RATIONAL = 5 +SIGNED_BYTE = 6 UNDEFINED = 7 +SIGNED_SHORT = 8 +SIGNED_LONG = 9 SIGNED_RATIONAL = 10 +FLOAT = 11 DOUBLE = 12 TAGS_V2 = { - 254: ("NewSubfileType", LONG, 1), 255: ("SubfileType", SHORT, 1), 256: ("ImageWidth", LONG, 1), 257: ("ImageLength", LONG, 1), 258: ("BitsPerSample", SHORT, 0), - 259: ("Compression", SHORT, 1, - {"Uncompressed": 1, "CCITT 1d": 2, "Group 3 Fax": 3, - "Group 4 Fax": 4, "LZW": 5, "JPEG": 6, "PackBits": 32773}), - - 262: ("PhotometricInterpretation", SHORT, 1, - {"WhiteIsZero": 0, "BlackIsZero": 1, "RGB": 2, "RGB Palette": 3, - "Transparency Mask": 4, "CMYK": 5, "YCbCr": 6, "CieLAB": 8, - "CFA": 32803, # TIFF/EP, Adobe DNG - "LinearRaw": 32892}), # Adobe DNG + 259: ( + "Compression", + SHORT, + 1, + { + "Uncompressed": 1, + "CCITT 1d": 2, + "Group 3 Fax": 3, + "Group 4 Fax": 4, + "LZW": 5, + "JPEG": 6, + "PackBits": 32773, + }, + ), + 262: ( + "PhotometricInterpretation", + SHORT, + 1, + { + "WhiteIsZero": 0, + "BlackIsZero": 1, + "RGB": 2, + "RGB Palette": 3, + "Transparency Mask": 4, + "CMYK": 5, + "YCbCr": 6, + "CieLAB": 8, + "CFA": 32803, # TIFF/EP, Adobe DNG + "LinearRaw": 32892, # Adobe DNG + }, + ), 263: ("Threshholding", SHORT, 1), 264: ("CellWidth", SHORT, 1), 265: ("CellLength", SHORT, 1), 266: ("FillOrder", SHORT, 1), 269: ("DocumentName", ASCII, 1), - 270: ("ImageDescription", ASCII, 1), 271: ("Make", ASCII, 1), 272: ("Model", ASCII, 1), @@ -95,7 +120,6 @@ TAGS_V2 = { 277: ("SamplesPerPixel", SHORT, 1), 278: ("RowsPerStrip", LONG, 1), 279: ("StripByteCounts", LONG, 0), - 280: ("MinSampleValue", LONG, 0), 281: ("MaxSampleValue", SHORT, 0), 282: ("XResolution", RATIONAL, 1), @@ -106,31 +130,26 @@ TAGS_V2 = { 287: ("YPosition", RATIONAL, 1), 288: ("FreeOffsets", LONG, 1), 289: ("FreeByteCounts", LONG, 1), - 290: ("GrayResponseUnit", SHORT, 1), 291: ("GrayResponseCurve", SHORT, 0), 292: ("T4Options", LONG, 1), 293: ("T6Options", LONG, 1), 296: ("ResolutionUnit", SHORT, 1, {"none": 1, "inch": 2, "cm": 3}), 297: ("PageNumber", SHORT, 2), - 301: ("TransferFunction", SHORT, 0), 305: ("Software", ASCII, 1), 306: ("DateTime", ASCII, 1), - 315: ("Artist", ASCII, 1), 316: ("HostComputer", ASCII, 1), 317: ("Predictor", SHORT, 1, {"none": 1, "Horizontal Differencing": 2}), 318: ("WhitePoint", RATIONAL, 2), 319: ("PrimaryChromaticities", RATIONAL, 6), - 320: ("ColorMap", SHORT, 0), 321: ("HalftoneHints", SHORT, 2), 322: ("TileWidth", LONG, 1), 323: ("TileLength", LONG, 1), 324: ("TileOffsets", LONG, 0), 325: ("TileByteCounts", LONG, 0), - 332: ("InkSet", SHORT, 1), 333: ("InkNames", ASCII, 1), 334: ("NumberOfInks", SHORT, 1), @@ -138,13 +157,10 @@ TAGS_V2 = { 337: ("TargetPrinter", ASCII, 1), 338: ("ExtraSamples", SHORT, 0), 339: ("SampleFormat", SHORT, 0), - 340: ("SMinSampleValue", DOUBLE, 0), 341: ("SMaxSampleValue", DOUBLE, 0), 342: ("TransferRange", SHORT, 6), - 347: ("JPEGTables", UNDEFINED, 1), - # obsolete JPEG tags 512: ("JPEGProc", SHORT, 1), 513: ("JPEGInterchangeFormat", LONG, 1), @@ -155,22 +171,18 @@ TAGS_V2 = { 519: ("JPEGQTables", LONG, 0), 520: ("JPEGDCTables", LONG, 0), 521: ("JPEGACTables", LONG, 0), - 529: ("YCbCrCoefficients", RATIONAL, 3), 530: ("YCbCrSubSampling", SHORT, 2), 531: ("YCbCrPositioning", SHORT, 1), 532: ("ReferenceBlackWhite", RATIONAL, 6), - - 700: ('XMP', BYTE, 1), - + 700: ("XMP", BYTE, 0), 33432: ("Copyright", ASCII, 1), - 34377: ('PhotoshopInfo', BYTE, 1), - + 33723: ("IptcNaaInfo", UNDEFINED, 0), + 34377: ("PhotoshopInfo", BYTE, 0), # FIXME add more tags here - 34665: ("ExifIFD", SHORT, 1), - 34675: ('ICCProfile', UNDEFINED, 1), - 34853: ('GPSInfoIFD', BYTE, 1), - + 34665: ("ExifIFD", LONG, 1), + 34675: ("ICCProfile", UNDEFINED, 1), + 34853: ("GPSInfoIFD", BYTE, 1), # MPInfo 45056: ("MPFVersion", UNDEFINED, 1), 45057: ("NumberOfImages", LONG, 1), @@ -191,159 +203,157 @@ TAGS_V2 = { 45579: ("YawAngle", SIGNED_RATIONAL, 1), 45580: ("PitchAngle", SIGNED_RATIONAL, 1), 45581: ("RollAngle", SIGNED_RATIONAL, 1), - 50741: ("MakerNoteSafety", SHORT, 1, {"Unsafe": 0, "Safe": 1}), 50780: ("BestQualityScale", RATIONAL, 1), 50838: ("ImageJMetaDataByteCounts", LONG, 0), # Can be more than one - 50839: ("ImageJMetaData", UNDEFINED, 1) # see Issue #2006 + 50839: ("ImageJMetaData", UNDEFINED, 1), # see Issue #2006 } # Legacy Tags structure # these tags aren't included above, but were in the previous versions -TAGS = {347: 'JPEGTables', - 700: 'XMP', - - # Additional Exif Info - 32932: 'Wang Annotation', - 33434: 'ExposureTime', - 33437: 'FNumber', - 33445: 'MD FileTag', - 33446: 'MD ScalePixel', - 33447: 'MD ColorTable', - 33448: 'MD LabName', - 33449: 'MD SampleInfo', - 33450: 'MD PrepDate', - 33451: 'MD PrepTime', - 33452: 'MD FileUnits', - 33550: 'ModelPixelScaleTag', - 33723: 'IptcNaaInfo', - 33918: 'INGR Packet Data Tag', - 33919: 'INGR Flag Registers', - 33920: 'IrasB Transformation Matrix', - 33922: 'ModelTiepointTag', - 34264: 'ModelTransformationTag', - 34377: 'PhotoshopInfo', - 34735: 'GeoKeyDirectoryTag', - 34736: 'GeoDoubleParamsTag', - 34737: 'GeoAsciiParamsTag', - 34850: 'ExposureProgram', - 34852: 'SpectralSensitivity', - 34855: 'ISOSpeedRatings', - 34856: 'OECF', - 34864: 'SensitivityType', - 34865: 'StandardOutputSensitivity', - 34866: 'RecommendedExposureIndex', - 34867: 'ISOSpeed', - 34868: 'ISOSpeedLatitudeyyy', - 34869: 'ISOSpeedLatitudezzz', - 34908: 'HylaFAX FaxRecvParams', - 34909: 'HylaFAX FaxSubAddress', - 34910: 'HylaFAX FaxRecvTime', - 36864: 'ExifVersion', - 36867: 'DateTimeOriginal', - 36868: 'DateTImeDigitized', - 37121: 'ComponentsConfiguration', - 37122: 'CompressedBitsPerPixel', - 37724: 'ImageSourceData', - 37377: 'ShutterSpeedValue', - 37378: 'ApertureValue', - 37379: 'BrightnessValue', - 37380: 'ExposureBiasValue', - 37381: 'MaxApertureValue', - 37382: 'SubjectDistance', - 37383: 'MeteringMode', - 37384: 'LightSource', - 37385: 'Flash', - 37386: 'FocalLength', - 37396: 'SubjectArea', - 37500: 'MakerNote', - 37510: 'UserComment', - 37520: 'SubSec', - 37521: 'SubSecTimeOriginal', - 37522: 'SubsecTimeDigitized', - 40960: 'FlashPixVersion', - 40961: 'ColorSpace', - 40962: 'PixelXDimension', - 40963: 'PixelYDimension', - 40964: 'RelatedSoundFile', - 40965: 'InteroperabilityIFD', - 41483: 'FlashEnergy', - 41484: 'SpatialFrequencyResponse', - 41486: 'FocalPlaneXResolution', - 41487: 'FocalPlaneYResolution', - 41488: 'FocalPlaneResolutionUnit', - 41492: 'SubjectLocation', - 41493: 'ExposureIndex', - 41495: 'SensingMethod', - 41728: 'FileSource', - 41729: 'SceneType', - 41730: 'CFAPattern', - 41985: 'CustomRendered', - 41986: 'ExposureMode', - 41987: 'WhiteBalance', - 41988: 'DigitalZoomRatio', - 41989: 'FocalLengthIn35mmFilm', - 41990: 'SceneCaptureType', - 41991: 'GainControl', - 41992: 'Contrast', - 41993: 'Saturation', - 41994: 'Sharpness', - 41995: 'DeviceSettingDescription', - 41996: 'SubjectDistanceRange', - 42016: 'ImageUniqueID', - 42032: 'CameraOwnerName', - 42033: 'BodySerialNumber', - 42034: 'LensSpecification', - 42035: 'LensMake', - 42036: 'LensModel', - 42037: 'LensSerialNumber', - 42112: 'GDAL_METADATA', - 42113: 'GDAL_NODATA', - 42240: 'Gamma', - 50215: 'Oce Scanjob Description', - 50216: 'Oce Application Selector', - 50217: 'Oce Identification Number', - 50218: 'Oce ImageLogic Characteristics', - - # Adobe DNG - 50706: 'DNGVersion', - 50707: 'DNGBackwardVersion', - 50708: 'UniqueCameraModel', - 50709: 'LocalizedCameraModel', - 50710: 'CFAPlaneColor', - 50711: 'CFALayout', - 50712: 'LinearizationTable', - 50713: 'BlackLevelRepeatDim', - 50714: 'BlackLevel', - 50715: 'BlackLevelDeltaH', - 50716: 'BlackLevelDeltaV', - 50717: 'WhiteLevel', - 50718: 'DefaultScale', - 50719: 'DefaultCropOrigin', - 50720: 'DefaultCropSize', - 50721: 'ColorMatrix1', - 50722: 'ColorMatrix2', - 50723: 'CameraCalibration1', - 50724: 'CameraCalibration2', - 50725: 'ReductionMatrix1', - 50726: 'ReductionMatrix2', - 50727: 'AnalogBalance', - 50728: 'AsShotNeutral', - 50729: 'AsShotWhiteXY', - 50730: 'BaselineExposure', - 50731: 'BaselineNoise', - 50732: 'BaselineSharpness', - 50733: 'BayerGreenSplit', - 50734: 'LinearResponseLimit', - 50735: 'CameraSerialNumber', - 50736: 'LensInfo', - 50737: 'ChromaBlurRadius', - 50738: 'AntiAliasStrength', - 50740: 'DNGPrivateData', - 50778: 'CalibrationIlluminant1', - 50779: 'CalibrationIlluminant2', - 50784: 'Alias Layer Metadata' - } +TAGS = { + 347: "JPEGTables", + 700: "XMP", + # Additional Exif Info + 32932: "Wang Annotation", + 33434: "ExposureTime", + 33437: "FNumber", + 33445: "MD FileTag", + 33446: "MD ScalePixel", + 33447: "MD ColorTable", + 33448: "MD LabName", + 33449: "MD SampleInfo", + 33450: "MD PrepDate", + 33451: "MD PrepTime", + 33452: "MD FileUnits", + 33550: "ModelPixelScaleTag", + 33723: "IptcNaaInfo", + 33918: "INGR Packet Data Tag", + 33919: "INGR Flag Registers", + 33920: "IrasB Transformation Matrix", + 33922: "ModelTiepointTag", + 34264: "ModelTransformationTag", + 34377: "PhotoshopInfo", + 34735: "GeoKeyDirectoryTag", + 34736: "GeoDoubleParamsTag", + 34737: "GeoAsciiParamsTag", + 34850: "ExposureProgram", + 34852: "SpectralSensitivity", + 34855: "ISOSpeedRatings", + 34856: "OECF", + 34864: "SensitivityType", + 34865: "StandardOutputSensitivity", + 34866: "RecommendedExposureIndex", + 34867: "ISOSpeed", + 34868: "ISOSpeedLatitudeyyy", + 34869: "ISOSpeedLatitudezzz", + 34908: "HylaFAX FaxRecvParams", + 34909: "HylaFAX FaxSubAddress", + 34910: "HylaFAX FaxRecvTime", + 36864: "ExifVersion", + 36867: "DateTimeOriginal", + 36868: "DateTImeDigitized", + 37121: "ComponentsConfiguration", + 37122: "CompressedBitsPerPixel", + 37724: "ImageSourceData", + 37377: "ShutterSpeedValue", + 37378: "ApertureValue", + 37379: "BrightnessValue", + 37380: "ExposureBiasValue", + 37381: "MaxApertureValue", + 37382: "SubjectDistance", + 37383: "MeteringMode", + 37384: "LightSource", + 37385: "Flash", + 37386: "FocalLength", + 37396: "SubjectArea", + 37500: "MakerNote", + 37510: "UserComment", + 37520: "SubSec", + 37521: "SubSecTimeOriginal", + 37522: "SubsecTimeDigitized", + 40960: "FlashPixVersion", + 40961: "ColorSpace", + 40962: "PixelXDimension", + 40963: "PixelYDimension", + 40964: "RelatedSoundFile", + 40965: "InteroperabilityIFD", + 41483: "FlashEnergy", + 41484: "SpatialFrequencyResponse", + 41486: "FocalPlaneXResolution", + 41487: "FocalPlaneYResolution", + 41488: "FocalPlaneResolutionUnit", + 41492: "SubjectLocation", + 41493: "ExposureIndex", + 41495: "SensingMethod", + 41728: "FileSource", + 41729: "SceneType", + 41730: "CFAPattern", + 41985: "CustomRendered", + 41986: "ExposureMode", + 41987: "WhiteBalance", + 41988: "DigitalZoomRatio", + 41989: "FocalLengthIn35mmFilm", + 41990: "SceneCaptureType", + 41991: "GainControl", + 41992: "Contrast", + 41993: "Saturation", + 41994: "Sharpness", + 41995: "DeviceSettingDescription", + 41996: "SubjectDistanceRange", + 42016: "ImageUniqueID", + 42032: "CameraOwnerName", + 42033: "BodySerialNumber", + 42034: "LensSpecification", + 42035: "LensMake", + 42036: "LensModel", + 42037: "LensSerialNumber", + 42112: "GDAL_METADATA", + 42113: "GDAL_NODATA", + 42240: "Gamma", + 50215: "Oce Scanjob Description", + 50216: "Oce Application Selector", + 50217: "Oce Identification Number", + 50218: "Oce ImageLogic Characteristics", + # Adobe DNG + 50706: "DNGVersion", + 50707: "DNGBackwardVersion", + 50708: "UniqueCameraModel", + 50709: "LocalizedCameraModel", + 50710: "CFAPlaneColor", + 50711: "CFALayout", + 50712: "LinearizationTable", + 50713: "BlackLevelRepeatDim", + 50714: "BlackLevel", + 50715: "BlackLevelDeltaH", + 50716: "BlackLevelDeltaV", + 50717: "WhiteLevel", + 50718: "DefaultScale", + 50719: "DefaultCropOrigin", + 50720: "DefaultCropSize", + 50721: "ColorMatrix1", + 50722: "ColorMatrix2", + 50723: "CameraCalibration1", + 50724: "CameraCalibration2", + 50725: "ReductionMatrix1", + 50726: "ReductionMatrix2", + 50727: "AnalogBalance", + 50728: "AsShotNeutral", + 50729: "AsShotWhiteXY", + 50730: "BaselineExposure", + 50731: "BaselineNoise", + 50732: "BaselineSharpness", + 50733: "BayerGreenSplit", + 50734: "LinearResponseLimit", + 50735: "CameraSerialNumber", + 50736: "LensInfo", + 50737: "ChromaBlurRadius", + 50738: "AntiAliasStrength", + 50740: "DNGPrivateData", + 50778: "CalibrationIlluminant1", + 50779: "CalibrationIlluminant2", + 50784: "Alias Layer Metadata", +} def _populate(): @@ -423,15 +433,55 @@ TYPES = {} # 389: case TIFFTAG_REFERENCEBLACKWHITE: # 393: case TIFFTAG_INKNAMES: +# Following pseudo-tags are also handled by default in libtiff: +# TIFFTAG_JPEGQUALITY 65537 + # some of these are not in our TAGS_V2 dict and were included from tiff.h -LIBTIFF_CORE = {255, 256, 257, 258, 259, 262, 263, 266, 274, 277, - 278, 280, 281, 340, 341, 282, 283, 284, 286, 287, - 296, 297, 321, 320, 338, 32995, 322, 323, 32998, - 32996, 339, 32997, 330, 531, 530, 301, 532, 333, - # as above - 269 # this has been in our tests forever, and works - } +# This list also exists in encode.c +LIBTIFF_CORE = { + 255, + 256, + 257, + 258, + 259, + 262, + 263, + 266, + 274, + 277, + 278, + 280, + 281, + 340, + 341, + 282, + 283, + 284, + 286, + 287, + 296, + 297, + 321, + 320, + 338, + 32995, + 322, + 323, + 32998, + 32996, + 339, + 32997, + 330, + 531, + 530, + 301, + 532, + 333, + # as above + 269, # this has been in our tests forever, and works + 65537, +} LIBTIFF_CORE.remove(320) # Array of short, crashes LIBTIFF_CORE.remove(301) # Array of short, crashes diff --git a/server/www/packages/packages-linux/x64/PIL/WalImageFile.py b/server/www/packages/packages-linux/x64/PIL/WalImageFile.py index 6602cc8..e2e1cd4 100644 --- a/server/www/packages/packages-linux/x64/PIL/WalImageFile.py +++ b/server/www/packages/packages-linux/x64/PIL/WalImageFile.py @@ -28,6 +28,7 @@ try: import builtins except ImportError: import __builtin__ + builtins = __builtin__ @@ -46,7 +47,7 @@ def open(filename): def imopen(fp): # read header fields - header = fp.read(32+24+32+12) + header = fp.read(32 + 24 + 32 + 12) size = i32(header, 32), i32(header, 36) offset = i32(header, 40) @@ -62,7 +63,7 @@ def open(filename): # strings are null-terminated im.info["name"] = header[:32].split(b"\0", 1)[0] - next_name = header[56:56+32].split(b"\0", 1)[0] + next_name = header[56 : 56 + 32].split(b"\0", 1)[0] if next_name: im.info["next_name"] = next_name diff --git a/server/www/packages/packages-linux/x64/PIL/WebPImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/WebPImagePlugin.py index e6485c2..18eda6d 100644 --- a/server/www/packages/packages-linux/x64/PIL/WebPImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/WebPImagePlugin.py @@ -1,28 +1,24 @@ -from . import Image, ImageFile -try: - from . import _webp - SUPPORTED = True -except ImportError as e: - SUPPORTED = False from io import BytesIO +from . import Image, ImageFile -_VALID_WEBP_MODES = { - "RGBX": True, - "RGBA": True, - "RGB": True, - } +try: + from . import _webp -_VALID_WEBP_LEGACY_MODES = { - "RGB": True, - "RGBA": True, - } + SUPPORTED = True +except ImportError: + SUPPORTED = False + + +_VALID_WEBP_MODES = {"RGBX": True, "RGBA": True, "RGB": True} + +_VALID_WEBP_LEGACY_MODES = {"RGB": True, "RGBA": True} _VP8_MODES_BY_IDENTIFIER = { b"VP8 ": "RGB", b"VP8X": "RGBA", b"VP8L": "RGBA", # lossless - } +} def _accept(prefix): @@ -32,7 +28,9 @@ def _accept(prefix): if is_riff_file_format and is_webp_file and is_valid_vp8_mode: if not SUPPORTED: - return "image file could not be identified because WEBP support not installed" + return ( + "image file could not be identified because WEBP support not installed" + ) return True @@ -44,8 +42,9 @@ class WebPImageFile(ImageFile.ImageFile): def _open(self): if not _webp.HAVE_WEBPANIM: # Legacy mode - data, width, height, self.mode, icc_profile, exif = \ - _webp.WebPDecode(self.fp.read()) + data, width, height, self.mode, icc_profile, exif = _webp.WebPDecode( + self.fp.read() + ) if icc_profile: self.info["icc_profile"] = icc_profile if exif: @@ -61,18 +60,18 @@ class WebPImageFile(ImageFile.ImageFile): self._decoder = _webp.WebPAnimDecoder(self.fp.read()) # Get info from decoder - width, height, loop_count, bgcolor, frame_count, mode = \ - self._decoder.get_info() + width, height, loop_count, bgcolor, frame_count, mode = self._decoder.get_info() self._size = width, height self.info["loop"] = loop_count - bg_a, bg_r, bg_g, bg_b = \ - (bgcolor >> 24) & 0xFF, \ - (bgcolor >> 16) & 0xFF, \ - (bgcolor >> 8) & 0xFF, \ - bgcolor & 0xFF + bg_a, bg_r, bg_g, bg_b = ( + (bgcolor >> 24) & 0xFF, + (bgcolor >> 16) & 0xFF, + (bgcolor >> 8) & 0xFF, + bgcolor & 0xFF, + ) self.info["background"] = (bg_r, bg_g, bg_b, bg_a) self._n_frames = frame_count - self.mode = 'RGB' if mode == 'RGBX' else mode + self.mode = "RGB" if mode == "RGBX" else mode self.rawmode = mode self.tile = [] @@ -92,8 +91,9 @@ class WebPImageFile(ImageFile.ImageFile): self.seek(0) def _getexif(self): - from .JpegImagePlugin import _getexif - return _getexif(self) + if "exif" not in self.info: + return None + return dict(self.getexif()) @property def n_frames(self): @@ -130,7 +130,7 @@ class WebPImageFile(ImageFile.ImageFile): # Check if an error occurred if ret is None: - self._reset() # Reset just to be safe + self._reset() # Reset just to be safe self.seek(0) raise EOFError("failed to decode next frame in WebP file") @@ -145,11 +145,11 @@ class WebPImageFile(ImageFile.ImageFile): def _seek(self, frame): if self.__physical_frame == frame: - return # Nothing to do + return # Nothing to do if frame < self.__physical_frame: - self._reset() # Rewind to beginning + self._reset() # Rewind to beginning while self.__physical_frame < frame: - self._get_next() # Advance to the requested frame + self._get_next() # Advance to the requested frame def load(self): if _webp.HAVE_WEBPANIM: @@ -163,7 +163,7 @@ class WebPImageFile(ImageFile.ImageFile): self.__loaded = self.__logical_frame # Set tile - if self.fp: + if self.fp and self._exclusive_fp: self.fp.close() self.fp = BytesIO(data) self.tile = [("raw", (0, 0) + self.size, 0, self.rawmode)] @@ -184,13 +184,25 @@ def _save_all(im, fp, filename): # If total frame count is 1, then save using the legacy API, which # will preserve non-alpha modes total = 0 - for ims in [im]+append_images: - total += 1 if not hasattr(ims, "n_frames") else ims.n_frames + for ims in [im] + append_images: + total += getattr(ims, "n_frames", 1) if total == 1: _save(im, fp, filename) return - background = encoderinfo.get("background", (0, 0, 0, 0)) + background = (0, 0, 0, 0) + if "background" in encoderinfo: + background = encoderinfo["background"] + elif "background" in im.info: + background = im.info["background"] + if isinstance(background, int): + # GifImagePlugin stores a global color table index in + # info["background"]. So it must be converted to an RGBA value + palette = im.getpalette() + if palette: + r, g, b = palette[background * 3 : (background + 1) * 3] + background = (r, g, b, 0) + duration = im.encoderinfo.get("duration", 0) loop = im.encoderinfo.get("loop", 0) minimize_size = im.encoderinfo.get("minimize_size", False) @@ -203,6 +215,8 @@ def _save_all(im, fp, filename): method = im.encoderinfo.get("method", 0) icc_profile = im.encoderinfo.get("icc_profile", "") exif = im.encoderinfo.get("exif", "") + if isinstance(exif, Image.Exif): + exif = exif.tobytes() xmp = im.encoderinfo.get("xmp", "") if allow_mixed: lossless = False @@ -214,10 +228,15 @@ def _save_all(im, fp, filename): kmax = 17 if lossless else 5 # Validate background color - if (not isinstance(background, (list, tuple)) or len(background) != 4 or - not all(v >= 0 and v < 256 for v in background)): - raise IOError("Background color is not an RGBA tuple clamped " - "to (0-255): %s" % str(background)) + if ( + not isinstance(background, (list, tuple)) + or len(background) != 4 + or not all(v >= 0 and v < 256 for v in background) + ): + raise IOError( + "Background color is not an RGBA tuple clamped to (0-255): %s" + % str(background) + ) # Convert to packed uint bg_r, bg_g, bg_b, bg_a = background @@ -225,13 +244,15 @@ def _save_all(im, fp, filename): # Setup the WebP animation encoder enc = _webp.WebPAnimEncoder( - im.size[0], im.size[1], + im.size[0], + im.size[1], background, loop, minimize_size, - kmin, kmax, + kmin, + kmax, allow_mixed, - verbose + verbose, ) # Add each frame @@ -239,12 +260,9 @@ def _save_all(im, fp, filename): timestamp = 0 cur_idx = im.tell() try: - for ims in [im]+append_images: + for ims in [im] + append_images: # Get # of frames in this image - if not hasattr(ims, "n_frames"): - nfr = 1 - else: - nfr = ims.n_frames + nfr = getattr(ims, "n_frames", 1) for idx in range(nfr): ims.seek(idx) @@ -254,24 +272,28 @@ def _save_all(im, fp, filename): frame = ims rawmode = ims.mode if ims.mode not in _VALID_WEBP_MODES: - alpha = 'A' in ims.mode or 'a' in ims.mode \ - or (ims.mode == 'P' and 'A' in ims.im.getpalettemode()) - rawmode = 'RGBA' if alpha else 'RGB' + alpha = ( + "A" in ims.mode + or "a" in ims.mode + or (ims.mode == "P" and "A" in ims.im.getpalettemode()) + ) + rawmode = "RGBA" if alpha else "RGB" frame = ims.convert(rawmode) - if rawmode == 'RGB': + if rawmode == "RGB": # For faster conversion, use RGBX - rawmode = 'RGBX' + rawmode = "RGBX" # Append the frame to the animation encoder enc.add( - frame.tobytes('raw', rawmode), + frame.tobytes("raw", rawmode), timestamp, - frame.size[0], frame.size[1], + frame.size[0], + frame.size[1], rawmode, lossless, quality, - method + method, ) # Update timestamp and frame index @@ -285,11 +307,7 @@ def _save_all(im, fp, filename): im.seek(cur_idx) # Force encoder to flush frames - enc.add( - None, - timestamp, - 0, 0, "", lossless, quality, 0 - ) + enc.add(None, timestamp, 0, 0, "", lossless, quality, 0) # Get the final output from the encoder data = enc.assemble(icc_profile, exif, xmp) @@ -304,12 +322,17 @@ def _save(im, fp, filename): quality = im.encoderinfo.get("quality", 80) icc_profile = im.encoderinfo.get("icc_profile", "") exif = im.encoderinfo.get("exif", "") + if isinstance(exif, Image.Exif): + exif = exif.tobytes() xmp = im.encoderinfo.get("xmp", "") if im.mode not in _VALID_WEBP_LEGACY_MODES: - alpha = 'A' in im.mode or 'a' in im.mode \ - or (im.mode == 'P' and 'A' in im.im.getpalettemode()) - im = im.convert('RGBA' if alpha else 'RGB') + alpha = ( + "A" in im.mode + or "a" in im.mode + or (im.mode == "P" and "A" in im.im.getpalettemode()) + ) + im = im.convert("RGBA" if alpha else "RGB") data = _webp.WebPEncode( im.tobytes(), @@ -320,7 +343,7 @@ def _save(im, fp, filename): im.mode, icc_profile, exif, - xmp + xmp, ) if data is None: raise IOError("cannot write file as WebP (encoder returned None)") diff --git a/server/www/packages/packages-linux/x64/PIL/WmfImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/WmfImagePlugin.py index 81699bd..416af6f 100644 --- a/server/www/packages/packages-linux/x64/PIL/WmfImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/WmfImagePlugin.py @@ -22,11 +22,11 @@ from __future__ import print_function from . import Image, ImageFile -from ._binary import i16le as word, si16le as short, \ - i32le as dword, si32le as _long +from ._binary import i16le as word, i32le as dword, si16le as short, si32le as _long from ._util import py3 - +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.2" _handler = None @@ -49,7 +49,6 @@ if hasattr(Image.core, "drawwmf"): # install default handler (windows only) class WmfHandler(object): - def open(self, im): im.mode = "RGB" self.bbox = im.info["wmf_bbox"] @@ -57,10 +56,14 @@ if hasattr(Image.core, "drawwmf"): def load(self, im): im.fp.seek(0) # rewind return Image.frombytes( - "RGB", im.size, + "RGB", + im.size, Image.core.drawwmf(im.fp.read(), im.size, self.bbox), - "raw", "BGR", (im.size[0]*3 + 3) & -4, -1 - ) + "raw", + "BGR", + (im.size[0] * 3 + 3) & -4, + -1, + ) register_handler(WmfHandler()) @@ -71,14 +74,14 @@ if hasattr(Image.core, "drawwmf"): def _accept(prefix): return ( - prefix[:6] == b"\xd7\xcd\xc6\x9a\x00\x00" or - prefix[:4] == b"\x01\x00\x00\x00" - ) + prefix[:6] == b"\xd7\xcd\xc6\x9a\x00\x00" or prefix[:4] == b"\x01\x00\x00\x00" + ) ## # Image plugin for Windows metafiles. + class WmfStubImageFile(ImageFile.StubImageFile): format = "WMF" @@ -129,8 +132,8 @@ class WmfStubImageFile(ImageFile.StubImageFile): size = x1 - x0, y1 - y0 # calculate dots per inch from bbox and frame - xdpi = 2540 * (x1 - y0) // (frame[2] - frame[0]) - ydpi = 2540 * (y1 - y0) // (frame[3] - frame[1]) + xdpi = int(2540.0 * (x1 - y0) / (frame[2] - frame[0]) + 0.5) + ydpi = int(2540.0 * (y1 - y0) / (frame[3] - frame[1]) + 0.5) self.info["wmf_bbox"] = x0, y0, x1, y1 @@ -158,6 +161,7 @@ def _save(im, fp, filename): raise IOError("WMF save handler not installed") _handler.save(im, fp, filename) + # # -------------------------------------------------------------------- # Registry stuff diff --git a/server/www/packages/packages-linux/x64/PIL/XVThumbImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/XVThumbImagePlugin.py index 8cdd848..aa3536d 100644 --- a/server/www/packages/packages-linux/x64/PIL/XVThumbImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/XVThumbImagePlugin.py @@ -20,6 +20,8 @@ from . import Image, ImageFile, ImagePalette from ._binary import i8, o8 +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.1" _MAGIC = b"P7 332" @@ -29,7 +31,9 @@ PALETTE = b"" for r in range(8): for g in range(8): for b in range(4): - PALETTE = PALETTE + (o8((r*255)//7)+o8((g*255)//7)+o8((b*255)//3)) + PALETTE = PALETTE + ( + o8((r * 255) // 7) + o8((g * 255) // 7) + o8((b * 255) // 3) + ) def _accept(prefix): @@ -39,6 +43,7 @@ def _accept(prefix): ## # Image plugin for XV thumbnail images. + class XVThumbImageFile(ImageFile.ImageFile): format = "XVThumb" @@ -69,10 +74,7 @@ class XVThumbImageFile(ImageFile.ImageFile): self.palette = ImagePalette.raw("RGB", PALETTE) - self.tile = [ - ("raw", (0, 0)+self.size, - self.fp.tell(), (self.mode, 0, 1) - )] + self.tile = [("raw", (0, 0) + self.size, self.fp.tell(), (self.mode, 0, 1))] # -------------------------------------------------------------------- diff --git a/server/www/packages/packages-linux/x64/PIL/XbmImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/XbmImagePlugin.py index 0cccda1..bc825c3 100644 --- a/server/www/packages/packages-linux/x64/PIL/XbmImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/XbmImagePlugin.py @@ -20,8 +20,11 @@ # import re + from . import Image, ImageFile +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.6" # XBM header @@ -43,6 +46,7 @@ def _accept(prefix): ## # Image plugin for X11 bitmaps. + class XbmImageFile(ImageFile.ImageFile): format = "XBM" @@ -58,14 +62,12 @@ class XbmImageFile(ImageFile.ImageFile): ysize = int(m.group("height")) if m.group("hotspot"): - self.info["hotspot"] = ( - int(m.group("xhot")), int(m.group("yhot")) - ) + self.info["hotspot"] = (int(m.group("xhot")), int(m.group("yhot"))) self.mode = "1" self._size = xsize, ysize - self.tile = [("xbm", (0, 0)+self.size, m.end(), None)] + self.tile = [("xbm", (0, 0) + self.size, m.end(), None)] def _save(im, fp, filename): @@ -73,17 +75,17 @@ def _save(im, fp, filename): if im.mode != "1": raise IOError("cannot write mode %s as XBM" % im.mode) - fp.write(("#define im_width %d\n" % im.size[0]).encode('ascii')) - fp.write(("#define im_height %d\n" % im.size[1]).encode('ascii')) + fp.write(("#define im_width %d\n" % im.size[0]).encode("ascii")) + fp.write(("#define im_height %d\n" % im.size[1]).encode("ascii")) hotspot = im.encoderinfo.get("hotspot") if hotspot: - fp.write(("#define im_x_hot %d\n" % hotspot[0]).encode('ascii')) - fp.write(("#define im_y_hot %d\n" % hotspot[1]).encode('ascii')) + fp.write(("#define im_x_hot %d\n" % hotspot[0]).encode("ascii")) + fp.write(("#define im_y_hot %d\n" % hotspot[1]).encode("ascii")) fp.write(b"static char im_bits[] = {\n") - ImageFile._save(im, fp, [("xbm", (0, 0)+im.size, 0, None)]) + ImageFile._save(im, fp, [("xbm", (0, 0) + im.size, 0, None)]) fp.write(b"};\n") diff --git a/server/www/packages/packages-linux/x64/PIL/XpmImagePlugin.py b/server/www/packages/packages-linux/x64/PIL/XpmImagePlugin.py index 02bc28a..2751488 100644 --- a/server/www/packages/packages-linux/x64/PIL/XpmImagePlugin.py +++ b/server/www/packages/packages-linux/x64/PIL/XpmImagePlugin.py @@ -16,13 +16,16 @@ import re + from . import Image, ImageFile, ImagePalette from ._binary import i8, o8 +# __version__ is deprecated and will be removed in a future version. Use +# PIL.__version__ instead. __version__ = "0.2" # XPM header -xpm_head = re.compile(b"\"([0-9]*) ([0-9]*) ([0-9]*) ([0-9]*)") +xpm_head = re.compile(b'"([0-9]*) ([0-9]*) ([0-9]*) ([0-9]*)') def _accept(prefix): @@ -32,6 +35,7 @@ def _accept(prefix): ## # Image plugin for X11 pixel maps. + class XpmImageFile(ImageFile.ImageFile): format = "XPM" @@ -67,9 +71,9 @@ class XpmImageFile(ImageFile.ImageFile): for i in range(pal): s = self.fp.readline() - if s[-2:] == b'\r\n': + if s[-2:] == b"\r\n": s = s[:-2] - elif s[-1:] in b'\r\n': + elif s[-1:] in b"\r\n": s = s[:-1] c = i8(s[1]) @@ -80,15 +84,15 @@ class XpmImageFile(ImageFile.ImageFile): if s[i] == b"c": # process colour key - rgb = s[i+1] + rgb = s[i + 1] if rgb == b"None": self.info["transparency"] = c elif rgb[0:1] == b"#": # FIXME: handle colour names (see ImagePalette.py) rgb = int(rgb[1:], 16) - palette[c] = (o8((rgb >> 16) & 255) + - o8((rgb >> 8) & 255) + - o8(rgb & 255)) + palette[c] = ( + o8((rgb >> 16) & 255) + o8((rgb >> 8) & 255) + o8(rgb & 255) + ) else: # unknown colour raise ValueError("cannot read this XPM file") @@ -102,7 +106,7 @@ class XpmImageFile(ImageFile.ImageFile): self.mode = "P" self.palette = ImagePalette.raw("RGB", b"".join(palette)) - self.tile = [("raw", (0, 0)+self.size, self.fp.tell(), ("P", 0, 1))] + self.tile = [("raw", (0, 0) + self.size, self.fp.tell(), ("P", 0, 1))] def load_read(self, bytes): @@ -114,10 +118,11 @@ class XpmImageFile(ImageFile.ImageFile): s = [None] * ysize for i in range(ysize): - s[i] = self.fp.readline()[1:xsize+1].ljust(xsize) + s[i] = self.fp.readline()[1 : xsize + 1].ljust(xsize) return b"".join(s) + # # Registry diff --git a/server/www/packages/packages-linux/x64/PIL/__init__.py b/server/www/packages/packages-linux/x64/PIL/__init__.py index bc8cfed..59eccc9 100644 --- a/server/www/packages/packages-linux/x64/PIL/__init__.py +++ b/server/www/packages/packages-linux/x64/PIL/__init__.py @@ -16,57 +16,58 @@ PIL.VERSION is the old PIL version and will be removed in the future. from . import _version -# VERSION is deprecated and will be removed in Pillow 6.0.0. -# PILLOW_VERSION is deprecated and will be removed after that. +# VERSION was removed in Pillow 6.0.0. +# PILLOW_VERSION is deprecated and will be removed in Pillow 7.0.0. # Use __version__ instead. -VERSION = '1.1.7' # PIL Version PILLOW_VERSION = __version__ = _version.__version__ del _version -_plugins = ['BlpImagePlugin', - 'BmpImagePlugin', - 'BufrStubImagePlugin', - 'CurImagePlugin', - 'DcxImagePlugin', - 'DdsImagePlugin', - 'EpsImagePlugin', - 'FitsStubImagePlugin', - 'FliImagePlugin', - 'FpxImagePlugin', - 'FtexImagePlugin', - 'GbrImagePlugin', - 'GifImagePlugin', - 'GribStubImagePlugin', - 'Hdf5StubImagePlugin', - 'IcnsImagePlugin', - 'IcoImagePlugin', - 'ImImagePlugin', - 'ImtImagePlugin', - 'IptcImagePlugin', - 'JpegImagePlugin', - 'Jpeg2KImagePlugin', - 'McIdasImagePlugin', - 'MicImagePlugin', - 'MpegImagePlugin', - 'MpoImagePlugin', - 'MspImagePlugin', - 'PalmImagePlugin', - 'PcdImagePlugin', - 'PcxImagePlugin', - 'PdfImagePlugin', - 'PixarImagePlugin', - 'PngImagePlugin', - 'PpmImagePlugin', - 'PsdImagePlugin', - 'SgiImagePlugin', - 'SpiderImagePlugin', - 'SunImagePlugin', - 'TgaImagePlugin', - 'TiffImagePlugin', - 'WebPImagePlugin', - 'WmfImagePlugin', - 'XbmImagePlugin', - 'XpmImagePlugin', - 'XVThumbImagePlugin'] +_plugins = [ + "BlpImagePlugin", + "BmpImagePlugin", + "BufrStubImagePlugin", + "CurImagePlugin", + "DcxImagePlugin", + "DdsImagePlugin", + "EpsImagePlugin", + "FitsStubImagePlugin", + "FliImagePlugin", + "FpxImagePlugin", + "FtexImagePlugin", + "GbrImagePlugin", + "GifImagePlugin", + "GribStubImagePlugin", + "Hdf5StubImagePlugin", + "IcnsImagePlugin", + "IcoImagePlugin", + "ImImagePlugin", + "ImtImagePlugin", + "IptcImagePlugin", + "JpegImagePlugin", + "Jpeg2KImagePlugin", + "McIdasImagePlugin", + "MicImagePlugin", + "MpegImagePlugin", + "MpoImagePlugin", + "MspImagePlugin", + "PalmImagePlugin", + "PcdImagePlugin", + "PcxImagePlugin", + "PdfImagePlugin", + "PixarImagePlugin", + "PngImagePlugin", + "PpmImagePlugin", + "PsdImagePlugin", + "SgiImagePlugin", + "SpiderImagePlugin", + "SunImagePlugin", + "TgaImagePlugin", + "TiffImagePlugin", + "WebPImagePlugin", + "WmfImagePlugin", + "XbmImagePlugin", + "XpmImagePlugin", + "XVThumbImagePlugin", +] diff --git a/server/www/packages/packages-linux/x64/PIL/__main__.py b/server/www/packages/packages-linux/x64/PIL/__main__.py new file mode 100644 index 0000000..a05323f --- /dev/null +++ b/server/www/packages/packages-linux/x64/PIL/__main__.py @@ -0,0 +1,3 @@ +from .features import pilinfo + +pilinfo() diff --git a/server/www/packages/packages-linux/x64/PIL/_binary.py b/server/www/packages/packages-linux/x64/PIL/_binary.py index 767c13b..53b1ca9 100644 --- a/server/www/packages/packages-linux/x64/PIL/_binary.py +++ b/server/www/packages/packages-linux/x64/PIL/_binary.py @@ -11,16 +11,21 @@ # See the README file for information on usage and redistribution. # -from struct import unpack_from, pack +from struct import pack, unpack_from + from ._util import py3 if py3: + def i8(c): return c if c.__class__ is int else c[0] def o8(i): return bytes((i & 255,)) + + else: + def i8(c): return ord(c) @@ -33,8 +38,8 @@ def i16le(c, o=0): """ Converts a 2-bytes (16 bits) string to an unsigned integer. - c: string containing bytes to convert - o: offset of bytes to convert in string + :param c: string containing bytes to convert + :param o: offset of bytes to convert in string """ return unpack_from(" 2: else: from Tkinter import tkinter as tk -if hasattr(sys, 'pypy_find_executable'): +if hasattr(sys, "pypy_find_executable"): # Tested with packages at https://bitbucket.org/pypy/pypy/downloads. # PyPies 1.6, 2.0 do not have tkinter built in. PyPy3-2.3.1 gives an # OSError trying to import tkinter. Otherwise: diff --git a/server/www/packages/packages-linux/x64/PIL/_util.py b/server/www/packages/packages-linux/x64/PIL/_util.py index e6989d6..59964c7 100644 --- a/server/www/packages/packages-linux/x64/PIL/_util.py +++ b/server/www/packages/packages-linux/x64/PIL/_util.py @@ -2,19 +2,32 @@ import os import sys py3 = sys.version_info.major >= 3 +py36 = sys.version_info[0:2] >= (3, 6) if py3: + def isStringType(t): return isinstance(t, str) - def isPath(f): - return isinstance(f, (bytes, str)) + if py36: + from pathlib import Path + + def isPath(f): + return isinstance(f, (bytes, str, Path)) + + else: + + def isPath(f): + return isinstance(f, (bytes, str)) + + else: + def isStringType(t): - return isinstance(t, basestring) + return isinstance(t, basestring) # noqa: F821 def isPath(f): - return isinstance(f, basestring) + return isinstance(f, basestring) # noqa: F821 # Checks if an object is a string, and that it points to a directory. diff --git a/server/www/packages/packages-linux/x64/PIL/_version.py b/server/www/packages/packages-linux/x64/PIL/_version.py index b5e4f0d..e2747ce 100644 --- a/server/www/packages/packages-linux/x64/PIL/_version.py +++ b/server/www/packages/packages-linux/x64/PIL/_version.py @@ -1,2 +1,2 @@ # Master version for Pillow -__version__ = '5.3.0' +__version__ = "6.2.1" diff --git a/server/www/packages/packages-linux/x64/PIL/_webp.cpython-37m-x86_64-linux-gnu.so b/server/www/packages/packages-linux/x64/PIL/_webp.cpython-37m-x86_64-linux-gnu.so index 176915a..24eca11 100755 Binary files a/server/www/packages/packages-linux/x64/PIL/_webp.cpython-37m-x86_64-linux-gnu.so and b/server/www/packages/packages-linux/x64/PIL/_webp.cpython-37m-x86_64-linux-gnu.so differ diff --git a/server/www/packages/packages-linux/x64/PIL/features.py b/server/www/packages/packages-linux/x64/PIL/features.py index 9926445..9fd5223 100644 --- a/server/www/packages/packages-linux/x64/PIL/features.py +++ b/server/www/packages/packages-linux/x64/PIL/features.py @@ -1,3 +1,11 @@ +from __future__ import print_function, unicode_literals + +import collections +import os +import sys + +import PIL + from . import Image modules = { @@ -26,12 +34,7 @@ def get_supported_modules(): return [f for f in modules if check_module(f)] -codecs = { - "jpg": "jpeg", - "jpg_2000": "jpeg2k", - "zlib": "zip", - "libtiff": "libtiff" -} +codecs = {"jpg": "jpeg", "jpg_2000": "jpeg2k", "zlib": "zip", "libtiff": "libtiff"} def check_codec(feature): @@ -48,10 +51,11 @@ def get_supported_codecs(): features = { - "webp_anim": ("PIL._webp", 'HAVE_WEBPANIM'), - "webp_mux": ("PIL._webp", 'HAVE_WEBPMUX'), + "webp_anim": ("PIL._webp", "HAVE_WEBPANIM"), + "webp_mux": ("PIL._webp", "HAVE_WEBPMUX"), "transp_webp": ("PIL._webp", "HAVE_TRANSPARENCY"), - "raqm": ("PIL._imagingft", "HAVE_RAQM") + "raqm": ("PIL._imagingft", "HAVE_RAQM"), + "libjpeg_turbo": ("PIL._imaging", "HAVE_LIBJPEGTURBO"), } @@ -62,7 +66,7 @@ def check_feature(feature): module, flag = features[feature] try: - imported_module = __import__(module, fromlist=['PIL']) + imported_module = __import__(module, fromlist=["PIL"]) return getattr(imported_module, flag) except ImportError: return None @@ -73,9 +77,14 @@ def get_supported_features(): def check(feature): - return (feature in modules and check_module(feature) or - feature in codecs and check_codec(feature) or - feature in features and check_feature(feature)) + return ( + feature in modules + and check_module(feature) + or feature in codecs + and check_codec(feature) + or feature in features + and check_feature(feature) + ) def get_supported(): @@ -83,3 +92,78 @@ def get_supported(): ret.extend(get_supported_features()) ret.extend(get_supported_codecs()) return ret + + +def pilinfo(out=None): + if out is None: + out = sys.stdout + + Image.init() + + print("-" * 68, file=out) + print("Pillow {}".format(PIL.__version__), file=out) + print("-" * 68, file=out) + print( + "Python modules loaded from {}".format(os.path.dirname(Image.__file__)), + file=out, + ) + print( + "Binary modules loaded from {}".format(os.path.dirname(Image.core.__file__)), + file=out, + ) + print("-" * 68, file=out) + + v = sys.version.splitlines() + print("Python {}".format(v[0].strip()), file=out) + for v in v[1:]: + print(" {}".format(v.strip()), file=out) + print("-" * 68, file=out) + + for name, feature in [ + ("pil", "PIL CORE"), + ("tkinter", "TKINTER"), + ("freetype2", "FREETYPE2"), + ("littlecms2", "LITTLECMS2"), + ("webp", "WEBP"), + ("transp_webp", "WEBP Transparency"), + ("webp_mux", "WEBPMUX"), + ("webp_anim", "WEBP Animation"), + ("jpg", "JPEG"), + ("jpg_2000", "OPENJPEG (JPEG2000)"), + ("zlib", "ZLIB (PNG/ZIP)"), + ("libtiff", "LIBTIFF"), + ("raqm", "RAQM (Bidirectional Text)"), + ]: + if check(name): + print("---", feature, "support ok", file=out) + else: + print("***", feature, "support not installed", file=out) + print("-" * 68, file=out) + + extensions = collections.defaultdict(list) + for ext, i in Image.EXTENSION.items(): + extensions[i].append(ext) + + for i in sorted(Image.ID): + line = "{}".format(i) + if i in Image.MIME: + line = "{} {}".format(line, Image.MIME[i]) + print(line, file=out) + + if i in extensions: + print("Extensions: {}".format(", ".join(sorted(extensions[i]))), file=out) + + features = [] + if i in Image.OPEN: + features.append("open") + if i in Image.SAVE: + features.append("save") + if i in Image.SAVE_ALL: + features.append("save_all") + if i in Image.DECODERS: + features.append("decode") + if i in Image.ENCODERS: + features.append("encode") + + print("Features: {}".format(", ".join(features)), file=out) + print("-" * 68, file=out) diff --git a/server/www/packages/packages-linux/x64/_cffi_backend.cpython-37m-x86_64-linux-gnu.so b/server/www/packages/packages-linux/x64/_cffi_backend.cpython-37m-x86_64-linux-gnu.so new file mode 100755 index 0000000..7b041a6 Binary files /dev/null and b/server/www/packages/packages-linux/x64/_cffi_backend.cpython-37m-x86_64-linux-gnu.so differ diff --git a/server/www/packages/packages-linux/x64/cffi/__init__.py b/server/www/packages/packages-linux/x64/cffi/__init__.py new file mode 100644 index 0000000..ddc3614 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/__init__.py @@ -0,0 +1,14 @@ +__all__ = ['FFI', 'VerificationError', 'VerificationMissing', 'CDefError', + 'FFIError'] + +from .api import FFI +from .error import CDefError, FFIError, VerificationError, VerificationMissing +from .error import PkgConfigError + +__version__ = "1.13.2" +__version_info__ = (1, 13, 2) + +# The verifier module file names are based on the CRC32 of a string that +# contains the following version number. It may be older than __version__ +# if nothing is clearly incompatible. +__version_verifier_modules__ = "0.8.6" diff --git a/server/www/packages/packages-linux/x64/cffi/_cffi_errors.h b/server/www/packages/packages-linux/x64/cffi/_cffi_errors.h new file mode 100644 index 0000000..83cdad0 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/_cffi_errors.h @@ -0,0 +1,147 @@ +#ifndef CFFI_MESSAGEBOX +# ifdef _MSC_VER +# define CFFI_MESSAGEBOX 1 +# else +# define CFFI_MESSAGEBOX 0 +# endif +#endif + + +#if CFFI_MESSAGEBOX +/* Windows only: logic to take the Python-CFFI embedding logic + initialization errors and display them in a background thread + with MessageBox. The idea is that if the whole program closes + as a result of this problem, then likely it is already a console + program and you can read the stderr output in the console too. + If it is not a console program, then it will likely show its own + dialog to complain, or generally not abruptly close, and for this + case the background thread should stay alive. +*/ +static void *volatile _cffi_bootstrap_text; + +static PyObject *_cffi_start_error_capture(void) +{ + PyObject *result = NULL; + PyObject *x, *m, *bi; + + if (InterlockedCompareExchangePointer(&_cffi_bootstrap_text, + (void *)1, NULL) != NULL) + return (PyObject *)1; + + m = PyImport_AddModule("_cffi_error_capture"); + if (m == NULL) + goto error; + + result = PyModule_GetDict(m); + if (result == NULL) + goto error; + +#if PY_MAJOR_VERSION >= 3 + bi = PyImport_ImportModule("builtins"); +#else + bi = PyImport_ImportModule("__builtin__"); +#endif + if (bi == NULL) + goto error; + PyDict_SetItemString(result, "__builtins__", bi); + Py_DECREF(bi); + + x = PyRun_String( + "import sys\n" + "class FileLike:\n" + " def write(self, x):\n" + " try:\n" + " of.write(x)\n" + " except: pass\n" + " self.buf += x\n" + "fl = FileLike()\n" + "fl.buf = ''\n" + "of = sys.stderr\n" + "sys.stderr = fl\n" + "def done():\n" + " sys.stderr = of\n" + " return fl.buf\n", /* make sure the returned value stays alive */ + Py_file_input, + result, result); + Py_XDECREF(x); + + error: + if (PyErr_Occurred()) + { + PyErr_WriteUnraisable(Py_None); + PyErr_Clear(); + } + return result; +} + +#pragma comment(lib, "user32.lib") + +static DWORD WINAPI _cffi_bootstrap_dialog(LPVOID ignored) +{ + Sleep(666); /* may be interrupted if the whole process is closing */ +#if PY_MAJOR_VERSION >= 3 + MessageBoxW(NULL, (wchar_t *)_cffi_bootstrap_text, + L"Python-CFFI error", + MB_OK | MB_ICONERROR); +#else + MessageBoxA(NULL, (char *)_cffi_bootstrap_text, + "Python-CFFI error", + MB_OK | MB_ICONERROR); +#endif + _cffi_bootstrap_text = NULL; + return 0; +} + +static void _cffi_stop_error_capture(PyObject *ecap) +{ + PyObject *s; + void *text; + + if (ecap == (PyObject *)1) + return; + + if (ecap == NULL) + goto error; + + s = PyRun_String("done()", Py_eval_input, ecap, ecap); + if (s == NULL) + goto error; + + /* Show a dialog box, but in a background thread, and + never show multiple dialog boxes at once. */ +#if PY_MAJOR_VERSION >= 3 + text = PyUnicode_AsWideCharString(s, NULL); +#else + text = PyString_AsString(s); +#endif + + _cffi_bootstrap_text = text; + + if (text != NULL) + { + HANDLE h; + h = CreateThread(NULL, 0, _cffi_bootstrap_dialog, + NULL, 0, NULL); + if (h != NULL) + CloseHandle(h); + } + /* decref the string, but it should stay alive as 'fl.buf' + in the small module above. It will really be freed only if + we later get another similar error. So it's a leak of at + most one copy of the small module. That's fine for this + situation which is usually a "fatal error" anyway. */ + Py_DECREF(s); + PyErr_Clear(); + return; + + error: + _cffi_bootstrap_text = NULL; + PyErr_Clear(); +} + +#else + +static PyObject *_cffi_start_error_capture(void) { return NULL; } +static void _cffi_stop_error_capture(PyObject *ecap) { } + +#endif diff --git a/server/www/packages/packages-linux/x64/cffi/_cffi_include.h b/server/www/packages/packages-linux/x64/cffi/_cffi_include.h new file mode 100644 index 0000000..37ea74f --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/_cffi_include.h @@ -0,0 +1,308 @@ +#define _CFFI_ + +/* We try to define Py_LIMITED_API before including Python.h. + + Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and + Py_REF_DEBUG are not defined. This is a best-effort approximation: + we can learn about Py_DEBUG from pyconfig.h, but it is unclear if + the same works for the other two macros. Py_DEBUG implies them, + but not the other way around. + + Issue #350 is still open: on Windows, the code here causes it to link + with PYTHON36.DLL (for example) instead of PYTHON3.DLL. A fix was + attempted in 164e526a5515 and 14ce6985e1c3, but reverted: virtualenv + does not make PYTHON3.DLL available, and so the "correctly" compiled + version would not run inside a virtualenv. We will re-apply the fix + after virtualenv has been fixed for some time. For explanation, see + issue #355. For a workaround if you want PYTHON3.DLL and don't worry + about virtualenv, see issue #350. See also 'py_limited_api' in + setuptools_ext.py. +*/ +#if !defined(_CFFI_USE_EMBEDDING) && !defined(Py_LIMITED_API) +# include +# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) +# define Py_LIMITED_API +# endif +#endif + +#include +#ifdef __cplusplus +extern "C" { +#endif +#include +#include "parse_c_type.h" + +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py + and cffi/_cffi_include.h */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ +# ifndef __cplusplus + typedef unsigned char _Bool; +# endif +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) +# include +# endif +#endif + +#ifdef __GNUC__ +# define _CFFI_UNUSED_FN __attribute__((unused)) +#else +# define _CFFI_UNUSED_FN /* nothing */ +#endif + +#ifdef __cplusplus +# ifndef _Bool + typedef bool _Bool; /* semi-hackish: C++ has no _Bool; bool is builtin */ +# endif +#endif + +/********** CPython-specific section **********/ +#ifndef PYPY_VERSION + + +#if PY_MAJOR_VERSION >= 3 +# define PyInt_FromLong PyLong_FromLong +#endif + +#define _cffi_from_c_double PyFloat_FromDouble +#define _cffi_from_c_float PyFloat_FromDouble +#define _cffi_from_c_long PyInt_FromLong +#define _cffi_from_c_ulong PyLong_FromUnsignedLong +#define _cffi_from_c_longlong PyLong_FromLongLong +#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong +#define _cffi_from_c__Bool PyBool_FromLong + +#define _cffi_to_c_double PyFloat_AsDouble +#define _cffi_to_c_float PyFloat_AsDouble + +#define _cffi_from_c_int(x, type) \ + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + sizeof(type) == sizeof(long) ? \ + PyLong_FromUnsignedLong((unsigned long)x) : \ + PyLong_FromUnsignedLongLong((unsigned long long)x)) : \ + (sizeof(type) <= sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + PyLong_FromLongLong((long long)x))) + +#define _cffi_to_c_int(o, type) \ + ((type)( \ + sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ + (Py_FatalError("unsupported size for type " #type), (type)0))) + +#define _cffi_to_c_i8 \ + ((int(*)(PyObject *))_cffi_exports[1]) +#define _cffi_to_c_u8 \ + ((int(*)(PyObject *))_cffi_exports[2]) +#define _cffi_to_c_i16 \ + ((int(*)(PyObject *))_cffi_exports[3]) +#define _cffi_to_c_u16 \ + ((int(*)(PyObject *))_cffi_exports[4]) +#define _cffi_to_c_i32 \ + ((int(*)(PyObject *))_cffi_exports[5]) +#define _cffi_to_c_u32 \ + ((unsigned int(*)(PyObject *))_cffi_exports[6]) +#define _cffi_to_c_i64 \ + ((long long(*)(PyObject *))_cffi_exports[7]) +#define _cffi_to_c_u64 \ + ((unsigned long long(*)(PyObject *))_cffi_exports[8]) +#define _cffi_to_c_char \ + ((int(*)(PyObject *))_cffi_exports[9]) +#define _cffi_from_c_pointer \ + ((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[10]) +#define _cffi_to_c_pointer \ + ((char *(*)(PyObject *, struct _cffi_ctypedescr *))_cffi_exports[11]) +#define _cffi_get_struct_layout \ + not used any more +#define _cffi_restore_errno \ + ((void(*)(void))_cffi_exports[13]) +#define _cffi_save_errno \ + ((void(*)(void))_cffi_exports[14]) +#define _cffi_from_c_char \ + ((PyObject *(*)(char))_cffi_exports[15]) +#define _cffi_from_c_deref \ + ((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[16]) +#define _cffi_to_c \ + ((int(*)(char *, struct _cffi_ctypedescr *, PyObject *))_cffi_exports[17]) +#define _cffi_from_c_struct \ + ((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[18]) +#define _cffi_to_c_wchar_t \ + ((_cffi_wchar_t(*)(PyObject *))_cffi_exports[19]) +#define _cffi_from_c_wchar_t \ + ((PyObject *(*)(_cffi_wchar_t))_cffi_exports[20]) +#define _cffi_to_c_long_double \ + ((long double(*)(PyObject *))_cffi_exports[21]) +#define _cffi_to_c__Bool \ + ((_Bool(*)(PyObject *))_cffi_exports[22]) +#define _cffi_prepare_pointer_call_argument \ + ((Py_ssize_t(*)(struct _cffi_ctypedescr *, \ + PyObject *, char **))_cffi_exports[23]) +#define _cffi_convert_array_from_object \ + ((int(*)(char *, struct _cffi_ctypedescr *, PyObject *))_cffi_exports[24]) +#define _CFFI_CPIDX 25 +#define _cffi_call_python \ + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX]) +#define _cffi_to_c_wchar3216_t \ + ((int(*)(PyObject *))_cffi_exports[26]) +#define _cffi_from_c_wchar3216_t \ + ((PyObject *(*)(int))_cffi_exports[27]) +#define _CFFI_NUM_EXPORTS 28 + +struct _cffi_ctypedescr; + +static void *_cffi_exports[_CFFI_NUM_EXPORTS]; + +#define _cffi_type(index) ( \ + assert((((uintptr_t)_cffi_types[index]) & 1) == 0), \ + (struct _cffi_ctypedescr *)_cffi_types[index]) + +static PyObject *_cffi_init(const char *module_name, Py_ssize_t version, + const struct _cffi_type_context_s *ctx) +{ + PyObject *module, *o_arg, *new_module; + void *raw[] = { + (void *)module_name, + (void *)version, + (void *)_cffi_exports, + (void *)ctx, + }; + + module = PyImport_ImportModule("_cffi_backend"); + if (module == NULL) + goto failure; + + o_arg = PyLong_FromVoidPtr((void *)raw); + if (o_arg == NULL) + goto failure; + + new_module = PyObject_CallMethod( + module, (char *)"_init_cffi_1_0_external_module", (char *)"O", o_arg); + + Py_DECREF(o_arg); + Py_DECREF(module); + return new_module; + + failure: + Py_XDECREF(module); + return NULL; +} + + +#ifdef HAVE_WCHAR_H +typedef wchar_t _cffi_wchar_t; +#else +typedef uint16_t _cffi_wchar_t; /* same random pick as _cffi_backend.c */ +#endif + +_CFFI_UNUSED_FN static uint16_t _cffi_to_c_char16_t(PyObject *o) +{ + if (sizeof(_cffi_wchar_t) == 2) + return (uint16_t)_cffi_to_c_wchar_t(o); + else + return (uint16_t)_cffi_to_c_wchar3216_t(o); +} + +_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char16_t(uint16_t x) +{ + if (sizeof(_cffi_wchar_t) == 2) + return _cffi_from_c_wchar_t((_cffi_wchar_t)x); + else + return _cffi_from_c_wchar3216_t((int)x); +} + +_CFFI_UNUSED_FN static int _cffi_to_c_char32_t(PyObject *o) +{ + if (sizeof(_cffi_wchar_t) == 4) + return (int)_cffi_to_c_wchar_t(o); + else + return (int)_cffi_to_c_wchar3216_t(o); +} + +_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char32_t(int x) +{ + if (sizeof(_cffi_wchar_t) == 4) + return _cffi_from_c_wchar_t((_cffi_wchar_t)x); + else + return _cffi_from_c_wchar3216_t(x); +} + + +/********** end CPython-specific section **********/ +#else +_CFFI_UNUSED_FN +static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *); +# define _cffi_call_python _cffi_call_python_org +#endif + + +#define _cffi_array_len(array) (sizeof(array) / sizeof((array)[0])) + +#define _cffi_prim_int(size, sign) \ + ((size) == 1 ? ((sign) ? _CFFI_PRIM_INT8 : _CFFI_PRIM_UINT8) : \ + (size) == 2 ? ((sign) ? _CFFI_PRIM_INT16 : _CFFI_PRIM_UINT16) : \ + (size) == 4 ? ((sign) ? _CFFI_PRIM_INT32 : _CFFI_PRIM_UINT32) : \ + (size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) : \ + _CFFI__UNKNOWN_PRIM) + +#define _cffi_prim_float(size) \ + ((size) == sizeof(float) ? _CFFI_PRIM_FLOAT : \ + (size) == sizeof(double) ? _CFFI_PRIM_DOUBLE : \ + (size) == sizeof(long double) ? _CFFI__UNKNOWN_LONG_DOUBLE : \ + _CFFI__UNKNOWN_FLOAT_PRIM) + +#define _cffi_check_int(got, got_nonpos, expected) \ + ((got_nonpos) == (expected <= 0) && \ + (got) == (unsigned long long)expected) + +#ifdef MS_WIN32 +# define _cffi_stdcall __stdcall +#else +# define _cffi_stdcall /* nothing */ +#endif + +#ifdef __cplusplus +} +#endif diff --git a/server/www/packages/packages-linux/x64/cffi/_embedding.h b/server/www/packages/packages-linux/x64/cffi/_embedding.h new file mode 100644 index 0000000..abd474a --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/_embedding.h @@ -0,0 +1,520 @@ + +/***** Support code for embedding *****/ + +#ifdef __cplusplus +extern "C" { +#endif + + +#if defined(_WIN32) +# define CFFI_DLLEXPORT __declspec(dllexport) +#elif defined(__GNUC__) +# define CFFI_DLLEXPORT __attribute__((visibility("default"))) +#else +# define CFFI_DLLEXPORT /* nothing */ +#endif + + +/* There are two global variables of type _cffi_call_python_fnptr: + + * _cffi_call_python, which we declare just below, is the one called + by ``extern "Python"`` implementations. + + * _cffi_call_python_org, which on CPython is actually part of the + _cffi_exports[] array, is the function pointer copied from + _cffi_backend. + + After initialization is complete, both are equal. However, the + first one remains equal to &_cffi_start_and_call_python until the + very end of initialization, when we are (or should be) sure that + concurrent threads also see a completely initialized world, and + only then is it changed. +*/ +#undef _cffi_call_python +typedef void (*_cffi_call_python_fnptr)(struct _cffi_externpy_s *, char *); +static void _cffi_start_and_call_python(struct _cffi_externpy_s *, char *); +static _cffi_call_python_fnptr _cffi_call_python = &_cffi_start_and_call_python; + + +#ifndef _MSC_VER + /* --- Assuming a GCC not infinitely old --- */ +# define cffi_compare_and_swap(l,o,n) __sync_bool_compare_and_swap(l,o,n) +# define cffi_write_barrier() __sync_synchronize() +# if !defined(__amd64__) && !defined(__x86_64__) && \ + !defined(__i386__) && !defined(__i386) +# define cffi_read_barrier() __sync_synchronize() +# else +# define cffi_read_barrier() (void)0 +# endif +#else + /* --- Windows threads version --- */ +# include +# define cffi_compare_and_swap(l,o,n) \ + (InterlockedCompareExchangePointer(l,n,o) == (o)) +# define cffi_write_barrier() InterlockedCompareExchange(&_cffi_dummy,0,0) +# define cffi_read_barrier() (void)0 +static volatile LONG _cffi_dummy; +#endif + +#ifdef WITH_THREAD +# ifndef _MSC_VER +# include + static pthread_mutex_t _cffi_embed_startup_lock; +# else + static CRITICAL_SECTION _cffi_embed_startup_lock; +# endif + static char _cffi_embed_startup_lock_ready = 0; +#endif + +static void _cffi_acquire_reentrant_mutex(void) +{ + static void *volatile lock = NULL; + + while (!cffi_compare_and_swap(&lock, NULL, (void *)1)) { + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: pthread_mutex_init() should be very fast, and + this is only run at start-up anyway. */ + } + +#ifdef WITH_THREAD + if (!_cffi_embed_startup_lock_ready) { +# ifndef _MSC_VER + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&_cffi_embed_startup_lock, &attr); +# else + InitializeCriticalSection(&_cffi_embed_startup_lock); +# endif + _cffi_embed_startup_lock_ready = 1; + } +#endif + + while (!cffi_compare_and_swap(&lock, (void *)1, NULL)) + ; + +#ifndef _MSC_VER + pthread_mutex_lock(&_cffi_embed_startup_lock); +#else + EnterCriticalSection(&_cffi_embed_startup_lock); +#endif +} + +static void _cffi_release_reentrant_mutex(void) +{ +#ifndef _MSC_VER + pthread_mutex_unlock(&_cffi_embed_startup_lock); +#else + LeaveCriticalSection(&_cffi_embed_startup_lock); +#endif +} + + +/********** CPython-specific section **********/ +#ifndef PYPY_VERSION + +#include "_cffi_errors.h" + + +#define _cffi_call_python_org _cffi_exports[_CFFI_CPIDX] + +PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(void); /* forward */ + +static void _cffi_py_initialize(void) +{ + /* XXX use initsigs=0, which "skips initialization registration of + signal handlers, which might be useful when Python is + embedded" according to the Python docs. But review and think + if it should be a user-controllable setting. + + XXX we should also give a way to write errors to a buffer + instead of to stderr. + + XXX if importing 'site' fails, CPython (any version) calls + exit(). Should we try to work around this behavior here? + */ + Py_InitializeEx(0); +} + +static int _cffi_initialize_python(void) +{ + /* This initializes Python, imports _cffi_backend, and then the + present .dll/.so is set up as a CPython C extension module. + */ + int result; + PyGILState_STATE state; + PyObject *pycode=NULL, *global_dict=NULL, *x; + PyObject *builtins; + + state = PyGILState_Ensure(); + + /* Call the initxxx() function from the present module. It will + create and initialize us as a CPython extension module, instead + of letting the startup Python code do it---it might reimport + the same .dll/.so and get maybe confused on some platforms. + It might also have troubles locating the .dll/.so again for all + I know. + */ + (void)_CFFI_PYTHON_STARTUP_FUNC(); + if (PyErr_Occurred()) + goto error; + + /* Now run the Python code provided to ffi.embedding_init_code(). + */ + pycode = Py_CompileString(_CFFI_PYTHON_STARTUP_CODE, + "", + Py_file_input); + if (pycode == NULL) + goto error; + global_dict = PyDict_New(); + if (global_dict == NULL) + goto error; + builtins = PyEval_GetBuiltins(); + if (builtins == NULL) + goto error; + if (PyDict_SetItemString(global_dict, "__builtins__", builtins) < 0) + goto error; + x = PyEval_EvalCode( +#if PY_MAJOR_VERSION < 3 + (PyCodeObject *) +#endif + pycode, global_dict, global_dict); + if (x == NULL) + goto error; + Py_DECREF(x); + + /* Done! Now if we've been called from + _cffi_start_and_call_python() in an ``extern "Python"``, we can + only hope that the Python code did correctly set up the + corresponding @ffi.def_extern() function. Otherwise, the + general logic of ``extern "Python"`` functions (inside the + _cffi_backend module) will find that the reference is still + missing and print an error. + */ + result = 0; + done: + Py_XDECREF(pycode); + Py_XDECREF(global_dict); + PyGILState_Release(state); + return result; + + error:; + { + /* Print as much information as potentially useful. + Debugging load-time failures with embedding is not fun + */ + PyObject *ecap; + PyObject *exception, *v, *tb, *f, *modules, *mod; + PyErr_Fetch(&exception, &v, &tb); + ecap = _cffi_start_error_capture(); + f = PySys_GetObject((char *)"stderr"); + if (f != NULL && f != Py_None) { + PyFile_WriteString( + "Failed to initialize the Python-CFFI embedding logic:\n\n", f); + } + + if (exception != NULL) { + PyErr_NormalizeException(&exception, &v, &tb); + PyErr_Display(exception, v, tb); + } + Py_XDECREF(exception); + Py_XDECREF(v); + Py_XDECREF(tb); + + if (f != NULL && f != Py_None) { + PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME + "\ncompiled with cffi version: 1.13.2" + "\n_cffi_backend module: ", f); + modules = PyImport_GetModuleDict(); + mod = PyDict_GetItemString(modules, "_cffi_backend"); + if (mod == NULL) { + PyFile_WriteString("not loaded", f); + } + else { + v = PyObject_GetAttrString(mod, "__file__"); + PyFile_WriteObject(v, f, 0); + Py_XDECREF(v); + } + PyFile_WriteString("\nsys.path: ", f); + PyFile_WriteObject(PySys_GetObject((char *)"path"), f, 0); + PyFile_WriteString("\n\n", f); + } + _cffi_stop_error_capture(ecap); + } + result = -1; + goto done; +} + +PyAPI_DATA(char *) _PyParser_TokenNames[]; /* from CPython */ + +static int _cffi_carefully_make_gil(void) +{ + /* This does the basic initialization of Python. It can be called + completely concurrently from unrelated threads. It assumes + that we don't hold the GIL before (if it exists), and we don't + hold it afterwards. + + (What it really does used to be completely different in Python 2 + and Python 3, with the Python 2 solution avoiding the spin-lock + around the Py_InitializeEx() call. However, after recent changes + to CPython 2.7 (issue #358) it no longer works. So we use the + Python 3 solution everywhere.) + + This initializes Python by calling Py_InitializeEx(). + Important: this must not be called concurrently at all. + So we use a global variable as a simple spin lock. This global + variable must be from 'libpythonX.Y.so', not from this + cffi-based extension module, because it must be shared from + different cffi-based extension modules. + + In Python < 3.8, we choose + _PyParser_TokenNames[0] as a completely arbitrary pointer value + that is never written to. The default is to point to the + string "ENDMARKER". We change it temporarily to point to the + next character in that string. (Yes, I know it's REALLY + obscure.) + + In Python >= 3.8, this string array is no longer writable, so + instead we pick PyCapsuleType.tp_version_tag. We can't change + Python < 3.8 because someone might use a mixture of cffi + embedded modules, some of which were compiled before this file + changed. + */ + +#ifdef WITH_THREAD +# if PY_VERSION_HEX < 0x03080000 + char *volatile *lock = (char *volatile *)_PyParser_TokenNames; + char *old_value, *locked_value; + + while (1) { /* spin loop */ + old_value = *lock; + locked_value = old_value + 1; + if (old_value[0] == 'E') { + assert(old_value[1] == 'N'); + if (cffi_compare_and_swap(lock, old_value, locked_value)) + break; + } + else { + assert(old_value[0] == 'N'); + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: PyEval_InitThreads() should be very fast, and + this is only run at start-up anyway. */ + } + } +# else + int volatile *lock = (int volatile *)&PyCapsule_Type.tp_version_tag; + int old_value, locked_value; + assert(!(PyCapsule_Type.tp_flags & Py_TPFLAGS_HAVE_VERSION_TAG)); + + while (1) { /* spin loop */ + old_value = *lock; + locked_value = -42; + if (old_value == 0) { + if (cffi_compare_and_swap(lock, old_value, locked_value)) + break; + } + else { + assert(old_value == locked_value); + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: PyEval_InitThreads() should be very fast, and + this is only run at start-up anyway. */ + } + } +# endif +#endif + + /* call Py_InitializeEx() */ + if (!Py_IsInitialized()) { + _cffi_py_initialize(); + PyEval_InitThreads(); + PyEval_SaveThread(); /* release the GIL */ + /* the returned tstate must be the one that has been stored into the + autoTLSkey by _PyGILState_Init() called from Py_Initialize(). */ + } + else { + PyGILState_STATE state = PyGILState_Ensure(); + PyEval_InitThreads(); + PyGILState_Release(state); + } + +#ifdef WITH_THREAD + /* release the lock */ + while (!cffi_compare_and_swap(lock, locked_value, old_value)) + ; +#endif + + return 0; +} + +/********** end CPython-specific section **********/ + + +#else + + +/********** PyPy-specific section **********/ + +PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(const void *[]); /* forward */ + +static struct _cffi_pypy_init_s { + const char *name; + void (*func)(const void *[]); + const char *code; +} _cffi_pypy_init = { + _CFFI_MODULE_NAME, + (void(*)(const void *[]))_CFFI_PYTHON_STARTUP_FUNC, + _CFFI_PYTHON_STARTUP_CODE, +}; + +extern int pypy_carefully_make_gil(const char *); +extern int pypy_init_embedded_cffi_module(int, struct _cffi_pypy_init_s *); + +static int _cffi_carefully_make_gil(void) +{ + return pypy_carefully_make_gil(_CFFI_MODULE_NAME); +} + +static int _cffi_initialize_python(void) +{ + return pypy_init_embedded_cffi_module(0xB011, &_cffi_pypy_init); +} + +/********** end PyPy-specific section **********/ + + +#endif + + +#ifdef __GNUC__ +__attribute__((noinline)) +#endif +static _cffi_call_python_fnptr _cffi_start_python(void) +{ + /* Delicate logic to initialize Python. This function can be + called multiple times concurrently, e.g. when the process calls + its first ``extern "Python"`` functions in multiple threads at + once. It can also be called recursively, in which case we must + ignore it. We also have to consider what occurs if several + different cffi-based extensions reach this code in parallel + threads---it is a different copy of the code, then, and we + can't have any shared global variable unless it comes from + 'libpythonX.Y.so'. + + Idea: + + * _cffi_carefully_make_gil(): "carefully" call + PyEval_InitThreads() (possibly with Py_InitializeEx() first). + + * then we use a (local) custom lock to make sure that a call to this + cffi-based extension will wait if another call to the *same* + extension is running the initialization in another thread. + It is reentrant, so that a recursive call will not block, but + only one from a different thread. + + * then we grab the GIL and (Python 2) we call Py_InitializeEx(). + At this point, concurrent calls to Py_InitializeEx() are not + possible: we have the GIL. + + * do the rest of the specific initialization, which may + temporarily release the GIL but not the custom lock. + Only release the custom lock when we are done. + */ + static char called = 0; + + if (_cffi_carefully_make_gil() != 0) + return NULL; + + _cffi_acquire_reentrant_mutex(); + + /* Here the GIL exists, but we don't have it. We're only protected + from concurrency by the reentrant mutex. */ + + /* This file only initializes the embedded module once, the first + time this is called, even if there are subinterpreters. */ + if (!called) { + called = 1; /* invoke _cffi_initialize_python() only once, + but don't set '_cffi_call_python' right now, + otherwise concurrent threads won't call + this function at all (we need them to wait) */ + if (_cffi_initialize_python() == 0) { + /* now initialization is finished. Switch to the fast-path. */ + + /* We would like nobody to see the new value of + '_cffi_call_python' without also seeing the rest of the + data initialized. However, this is not possible. But + the new value of '_cffi_call_python' is the function + 'cffi_call_python()' from _cffi_backend. So: */ + cffi_write_barrier(); + /* ^^^ we put a write barrier here, and a corresponding + read barrier at the start of cffi_call_python(). This + ensures that after that read barrier, we see everything + done here before the write barrier. + */ + + assert(_cffi_call_python_org != NULL); + _cffi_call_python = (_cffi_call_python_fnptr)_cffi_call_python_org; + } + else { + /* initialization failed. Reset this to NULL, even if it was + already set to some other value. Future calls to + _cffi_start_python() are still forced to occur, and will + always return NULL from now on. */ + _cffi_call_python_org = NULL; + } + } + + _cffi_release_reentrant_mutex(); + + return (_cffi_call_python_fnptr)_cffi_call_python_org; +} + +static +void _cffi_start_and_call_python(struct _cffi_externpy_s *externpy, char *args) +{ + _cffi_call_python_fnptr fnptr; + int current_err = errno; +#ifdef _MSC_VER + int current_lasterr = GetLastError(); +#endif + fnptr = _cffi_start_python(); + if (fnptr == NULL) { + fprintf(stderr, "function %s() called, but initialization code " + "failed. Returning 0.\n", externpy->name); + memset(args, 0, externpy->size_of_result); + } +#ifdef _MSC_VER + SetLastError(current_lasterr); +#endif + errno = current_err; + + if (fnptr != NULL) + fnptr(externpy, args); +} + + +/* The cffi_start_python() function makes sure Python is initialized + and our cffi module is set up. It can be called manually from the + user C code. The same effect is obtained automatically from any + dll-exported ``extern "Python"`` function. This function returns + -1 if initialization failed, 0 if all is OK. */ +_CFFI_UNUSED_FN +static int cffi_start_python(void) +{ + if (_cffi_call_python == &_cffi_start_and_call_python) { + if (_cffi_start_python() == NULL) + return -1; + } + cffi_read_barrier(); + return 0; +} + +#undef cffi_compare_and_swap +#undef cffi_write_barrier +#undef cffi_read_barrier + +#ifdef __cplusplus +} +#endif diff --git a/server/www/packages/packages-linux/x64/cffi/api.py b/server/www/packages/packages-linux/x64/cffi/api.py new file mode 100644 index 0000000..32fe620 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/api.py @@ -0,0 +1,961 @@ +import sys, types +from .lock import allocate_lock +from .error import CDefError +from . import model + +try: + callable +except NameError: + # Python 3.1 + from collections import Callable + callable = lambda x: isinstance(x, Callable) + +try: + basestring +except NameError: + # Python 3.x + basestring = str + +_unspecified = object() + + + +class FFI(object): + r''' + The main top-level class that you instantiate once, or once per module. + + Example usage: + + ffi = FFI() + ffi.cdef(""" + int printf(const char *, ...); + """) + + C = ffi.dlopen(None) # standard library + -or- + C = ffi.verify() # use a C compiler: verify the decl above is right + + C.printf("hello, %s!\n", ffi.new("char[]", "world")) + ''' + + def __init__(self, backend=None): + """Create an FFI instance. The 'backend' argument is used to + select a non-default backend, mostly for tests. + """ + if backend is None: + # You need PyPy (>= 2.0 beta), or a CPython (>= 2.6) with + # _cffi_backend.so compiled. + import _cffi_backend as backend + from . import __version__ + if backend.__version__ != __version__: + # bad version! Try to be as explicit as possible. + if hasattr(backend, '__file__'): + # CPython + raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. When we import the top-level '_cffi_backend' extension module, we get version %s, located in %r. The two versions should be equal; check your installation." % ( + __version__, __file__, + backend.__version__, backend.__file__)) + else: + # PyPy + raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. This interpreter comes with a built-in '_cffi_backend' module, which is version %s. The two versions should be equal; check your installation." % ( + __version__, __file__, backend.__version__)) + # (If you insist you can also try to pass the option + # 'backend=backend_ctypes.CTypesBackend()', but don't + # rely on it! It's probably not going to work well.) + + from . import cparser + self._backend = backend + self._lock = allocate_lock() + self._parser = cparser.Parser() + self._cached_btypes = {} + self._parsed_types = types.ModuleType('parsed_types').__dict__ + self._new_types = types.ModuleType('new_types').__dict__ + self._function_caches = [] + self._libraries = [] + self._cdefsources = [] + self._included_ffis = [] + self._windows_unicode = None + self._init_once_cache = {} + self._cdef_version = None + self._embedding = None + self._typecache = model.get_typecache(backend) + if hasattr(backend, 'set_ffi'): + backend.set_ffi(self) + for name in list(backend.__dict__): + if name.startswith('RTLD_'): + setattr(self, name, getattr(backend, name)) + # + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) + self.BCharA = self._get_cached_btype(model.char_array_type) + if isinstance(backend, types.ModuleType): + # _cffi_backend: attach these constants to the class + if not hasattr(FFI, 'NULL'): + FFI.NULL = self.cast(self.BVoidP, 0) + FFI.CData, FFI.CType = backend._get_types() + else: + # ctypes backend: attach these constants to the instance + self.NULL = self.cast(self.BVoidP, 0) + self.CData, self.CType = backend._get_types() + self.buffer = backend.buffer + + def cdef(self, csource, override=False, packed=False, pack=None): + """Parse the given C source. This registers all declared functions, + types, and global variables. The functions and global variables can + then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'. + The types can be used in 'ffi.new()' and other functions. + If 'packed' is specified as True, all structs declared inside this + cdef are packed, i.e. laid out without any field alignment at all. + Alternatively, 'pack' can be a small integer, and requests for + alignment greater than that are ignored (pack=1 is equivalent to + packed=True). + """ + self._cdef(csource, override=override, packed=packed, pack=pack) + + def embedding_api(self, csource, packed=False, pack=None): + self._cdef(csource, packed=packed, pack=pack, dllexport=True) + if self._embedding is None: + self._embedding = '' + + def _cdef(self, csource, override=False, **options): + if not isinstance(csource, str): # unicode, on Python 2 + if not isinstance(csource, basestring): + raise TypeError("cdef() argument must be a string") + csource = csource.encode('ascii') + with self._lock: + self._cdef_version = object() + self._parser.parse(csource, override=override, **options) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() + finishlist = self._parser._recomplete + if finishlist: + self._parser._recomplete = [] + for tp in finishlist: + tp.finish_backend_type(self, finishlist) + + def dlopen(self, name, flags=0): + """Load and return a dynamic library identified by 'name'. + The standard C library can be loaded by passing None. + Note that functions and types declared by 'ffi.cdef()' are not + linked to a particular library, just like C headers; in the + library we only look for the actual (untyped) symbols. + """ + assert isinstance(name, basestring) or name is None + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) + return lib + + def dlclose(self, lib): + """Close a library obtained with ffi.dlopen(). After this call, + access to functions or variables from the library will fail + (possibly with a segmentation fault). + """ + type(lib).__cffi_close__(lib) + + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + really_a_function_type = type.is_raw_function + if really_a_function_type: + type = type.as_function_pointer() + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + + def _typeof(self, cdecl, consider_function_as_funcptr=False): + # string -> ctype object + try: + result = self._parsed_types[cdecl] + except KeyError: + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) + return btype + + def typeof(self, cdecl): + """Parse the C type given as a string and return the + corresponding object. + It can also be used on 'cdata' instance to get its C type. + """ + if isinstance(cdecl, basestring): + return self._typeof(cdecl) + if isinstance(cdecl, self.CData): + return self._backend.typeof(cdecl) + if isinstance(cdecl, types.BuiltinFunctionType): + res = _builtin_function_type(cdecl) + if res is not None: + return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) + raise TypeError(type(cdecl)) + + def sizeof(self, cdecl): + """Return the size in bytes of the argument. It can be a + string naming a C type, or a 'cdata' instance. + """ + if isinstance(cdecl, basestring): + BType = self._typeof(cdecl) + return self._backend.sizeof(BType) + else: + return self._backend.sizeof(cdecl) + + def alignof(self, cdecl): + """Return the natural alignment size in bytes of the C type + given as a string. + """ + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return self._backend.alignof(cdecl) + + def offsetof(self, cdecl, *fields_or_indexes): + """Return the offset of the named field inside the given + structure or array, which must be given as a C type name. + You can give several field names in case of nested structures. + You can also give numeric values which correspond to array + items, in case of an array type. + """ + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return self._typeoffsetof(cdecl, *fields_or_indexes)[1] + + def new(self, cdecl, init=None): + """Allocate an instance according to the specified C type and + return a pointer to it. The specified C type must be either a + pointer or an array: ``new('X *')`` allocates an X and returns + a pointer to it, whereas ``new('X[n]')`` allocates an array of + n X'es and returns an array referencing it (which works + mostly like a pointer, like in C). You can also use + ``new('X[]', n)`` to allocate an array of a non-constant + length n. + + The memory is initialized following the rules of declaring a + global variable in C: by default it is zero-initialized, but + an explicit initializer can be given which can be used to + fill all or part of the memory. + + When the returned object goes out of scope, the memory + is freed. In other words the returned object has + ownership of the value of type 'cdecl' that it points to. This + means that the raw data can be used as long as this object is + kept alive, but must not be used for a longer time. Be careful + about that when copying the pointer to the memory somewhere + else, e.g. into another structure. + """ + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return self._backend.newp(cdecl, init) + + def new_allocator(self, alloc=None, free=None, + should_clear_after_alloc=True): + """Return a new allocator, i.e. a function that behaves like ffi.new() + but uses the provided low-level 'alloc' and 'free' functions. + + 'alloc' is called with the size as argument. If it returns NULL, a + MemoryError is raised. 'free' is called with the result of 'alloc' + as argument. Both can be either Python function or directly C + functions. If 'free' is None, then no free function is called. + If both 'alloc' and 'free' are None, the default is used. + + If 'should_clear_after_alloc' is set to False, then the memory + returned by 'alloc' is assumed to be already cleared (or you are + fine with garbage); otherwise CFFI will clear it. + """ + compiled_ffi = self._backend.FFI() + allocator = compiled_ffi.new_allocator(alloc, free, + should_clear_after_alloc) + def allocate(cdecl, init=None): + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return allocator(cdecl, init) + return allocate + + def cast(self, cdecl, source): + """Similar to a C cast: returns an instance of the named C + type initialized with the given 'source'. The source is + casted between integers or pointers of any type. + """ + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return self._backend.cast(cdecl, source) + + def string(self, cdata, maxlen=-1): + """Return a Python string (or unicode string) from the 'cdata'. + If 'cdata' is a pointer or array of characters or bytes, returns + the null-terminated string. The returned string extends until + the first null character, or at most 'maxlen' characters. If + 'cdata' is an array then 'maxlen' defaults to its length. + + If 'cdata' is a pointer or array of wchar_t, returns a unicode + string following the same rules. + + If 'cdata' is a single character or byte or a wchar_t, returns + it as a string or unicode string. + + If 'cdata' is an enum, returns the value of the enumerator as a + string, or 'NUMBER' if the value is out of range. + """ + return self._backend.string(cdata, maxlen) + + def unpack(self, cdata, length): + """Unpack an array of C data of the given length, + returning a Python string/unicode/list. + + If 'cdata' is a pointer to 'char', returns a byte string. + It does not stop at the first null. This is equivalent to: + ffi.buffer(cdata, length)[:] + + If 'cdata' is a pointer to 'wchar_t', returns a unicode string. + 'length' is measured in wchar_t's; it is not the size in bytes. + + If 'cdata' is a pointer to anything else, returns a list of + 'length' items. This is a faster equivalent to: + [cdata[i] for i in range(length)] + """ + return self._backend.unpack(cdata, length) + + #def buffer(self, cdata, size=-1): + # """Return a read-write buffer object that references the raw C data + # pointed to by the given 'cdata'. The 'cdata' must be a pointer or + # an array. Can be passed to functions expecting a buffer, or directly + # manipulated with: + # + # buf[:] get a copy of it in a regular string, or + # buf[idx] as a single character + # buf[:] = ... + # buf[idx] = ... change the content + # """ + # note that 'buffer' is a type, set on this instance by __init__ + + def from_buffer(self, cdecl, python_buffer=_unspecified, + require_writable=False): + """Return a cdata of the given type pointing to the data of the + given Python object, which must support the buffer interface. + Note that this is not meant to be used on the built-in types + str or unicode (you can build 'char[]' arrays explicitly) + but only on objects containing large quantities of raw data + in some other format, like 'array.array' or numpy arrays. + + The first argument is optional and default to 'char[]'. + """ + if python_buffer is _unspecified: + cdecl, python_buffer = self.BCharA, cdecl + elif isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return self._backend.from_buffer(cdecl, python_buffer, + require_writable) + + def memmove(self, dest, src, n): + """ffi.memmove(dest, src, n) copies n bytes of memory from src to dest. + + Like the C function memmove(), the memory areas may overlap; + apart from that it behaves like the C function memcpy(). + + 'src' can be any cdata ptr or array, or any Python buffer object. + 'dest' can be any cdata ptr or array, or a writable Python buffer + object. The size to copy, 'n', is always measured in bytes. + + Unlike other methods, this one supports all Python buffer including + byte strings and bytearrays---but it still does not support + non-contiguous buffers. + """ + return self._backend.memmove(dest, src, n) + + def callback(self, cdecl, python_callable=None, error=None, onerror=None): + """Return a callback object or a decorator making such a + callback object. 'cdecl' must name a C function pointer type. + The callback invokes the specified 'python_callable' (which may + be provided either directly or via a decorator). Important: the + callback object must be manually kept alive for as long as the + callback may be invoked from the C level. + """ + def callback_decorator_wrap(python_callable): + if not callable(python_callable): + raise TypeError("the 'python_callable' argument " + "is not callable") + return self._backend.callback(cdecl, python_callable, + error, onerror) + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl, consider_function_as_funcptr=True) + if python_callable is None: + return callback_decorator_wrap # decorator mode + else: + return callback_decorator_wrap(python_callable) # direct mode + + def getctype(self, cdecl, replace_with=''): + """Return a string giving the C type 'cdecl', which may be itself + a string or a object. If 'replace_with' is given, it gives + extra text to append (or insert for more complicated C types), like + a variable name, or '*' to get actually the C type 'pointer-to-cdecl'. + """ + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + replace_with = replace_with.strip() + if (replace_with.startswith('*') + and '&[' in self._backend.getcname(cdecl, '&')): + replace_with = '(%s)' % replace_with + elif replace_with and not replace_with[0] in '[(': + replace_with = ' ' + replace_with + return self._backend.getcname(cdecl, replace_with) + + def gc(self, cdata, destructor, size=0): + """Return a new cdata object that points to the same + data. Later, when this new cdata object is garbage-collected, + 'destructor(old_cdata_object)' will be called. + + The optional 'size' gives an estimate of the size, used to + trigger the garbage collection more eagerly. So far only used + on PyPy. It tells the GC that the returned object keeps alive + roughly 'size' bytes of external memory. + """ + return self._backend.gcp(cdata, destructor, size) + + def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! + try: + BType = self._cached_btypes[type] + except KeyError: + finishlist = [] + BType = type.get_cached_btype(self, finishlist) + for type in finishlist: + type.finish_backend_type(self, finishlist) + return BType + + def verify(self, source='', tmpdir=None, **kwargs): + """Verify that the current ffi signatures compile on this + machine, and return a dynamic library object. The dynamic + library can be used to call functions and access global + variables declared in this 'ffi'. The library is compiled + by the C compiler: it gives you C-level API compatibility + (including calling macros). This is unlike 'ffi.dlopen()', + which requires binary compatibility in the signatures. + """ + from .verifier import Verifier, _caller_dir_pycache + # + # If set_unicode(True) was called, insert the UNICODE and + # _UNICODE macro declarations + if self._windows_unicode: + self._apply_windows_unicode(kwargs) + # + # Set the tmpdir here, and not in Verifier.__init__: it picks + # up the caller's directory, which we want to be the caller of + # ffi.verify(), as opposed to the caller of Veritier(). + tmpdir = tmpdir or _caller_dir_pycache() + # + # Make a Verifier() and use it to load the library. + self.verifier = Verifier(self, source, tmpdir, **kwargs) + lib = self.verifier.load_library() + # + # Save the loaded library for keep-alive purposes, even + # if the caller doesn't keep it alive itself (it should). + self._libraries.append(lib) + return lib + + def _get_errno(self): + return self._backend.get_errno() + def _set_errno(self, errno): + self._backend.set_errno(errno) + errno = property(_get_errno, _set_errno, None, + "the value of 'errno' from/to the C calls") + + def getwinerror(self, code=-1): + return self._backend.getwinerror(code) + + def _pointer_to(self, ctype): + with self._lock: + return model.pointer_cache(self, ctype) + + def addressof(self, cdata, *fields_or_indexes): + """Return the address of a . + If 'fields_or_indexes' are given, returns the address of that + field or array item in the structure or array, recursively in + case of nested structures. + """ + try: + ctype = self._backend.typeof(cdata) + except TypeError: + if '__addressof__' in type(cdata).__dict__: + return type(cdata).__addressof__(cdata, *fields_or_indexes) + raise + if fields_or_indexes: + ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes) + else: + if ctype.kind == "pointer": + raise TypeError("addressof(pointer)") + offset = 0 + ctypeptr = self._pointer_to(ctype) + return self._backend.rawaddressof(ctypeptr, cdata, offset) + + def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes): + ctype, offset = self._backend.typeoffsetof(ctype, field_or_index) + for field1 in fields_or_indexes: + ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1) + offset += offset1 + return ctype, offset + + def include(self, ffi_to_include): + """Includes the typedefs, structs, unions and enums defined + in another FFI instance. Usage is similar to a #include in C, + where a part of the program might include types defined in + another part for its own usage. Note that the include() + method has no effect on functions, constants and global + variables, which must anyway be accessed directly from the + lib object returned by the original FFI instance. + """ + if not isinstance(ffi_to_include, FFI): + raise TypeError("ffi.include() expects an argument that is also of" + " type cffi.FFI, not %r" % ( + type(ffi_to_include).__name__,)) + if ffi_to_include is self: + raise ValueError("self.include(self)") + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') + self._included_ffis.append(ffi_to_include) + + def new_handle(self, x): + return self._backend.newp_handle(self.BVoidP, x) + + def from_handle(self, x): + return self._backend.from_handle(x) + + def release(self, x): + self._backend.release(x) + + def set_unicode(self, enabled_flag): + """Windows: if 'enabled_flag' is True, enable the UNICODE and + _UNICODE defines in C, and declare the types like TCHAR and LPTCSTR + to be (pointers to) wchar_t. If 'enabled_flag' is False, + declare these types to be (pointers to) plain 8-bit characters. + This is mostly for backward compatibility; you usually want True. + """ + if self._windows_unicode is not None: + raise ValueError("set_unicode() can only be called once") + enabled_flag = bool(enabled_flag) + if enabled_flag: + self.cdef("typedef wchar_t TBYTE;" + "typedef wchar_t TCHAR;" + "typedef const wchar_t *LPCTSTR;" + "typedef const wchar_t *PCTSTR;" + "typedef wchar_t *LPTSTR;" + "typedef wchar_t *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + else: + self.cdef("typedef char TBYTE;" + "typedef char TCHAR;" + "typedef const char *LPCTSTR;" + "typedef const char *PCTSTR;" + "typedef char *LPTSTR;" + "typedef char *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + self._windows_unicode = enabled_flag + + def _apply_windows_unicode(self, kwds): + defmacros = kwds.get('define_macros', ()) + if not isinstance(defmacros, (list, tuple)): + raise TypeError("'define_macros' must be a list or tuple") + defmacros = list(defmacros) + [('UNICODE', '1'), + ('_UNICODE', '1')] + kwds['define_macros'] = defmacros + + def _apply_embedding_fix(self, kwds): + # must include an argument like "-lpython2.7" for the compiler + def ensure(key, value): + lst = kwds.setdefault(key, []) + if value not in lst: + lst.append(value) + # + if '__pypy__' in sys.builtin_module_names: + import os + if sys.platform == "win32": + # we need 'libpypy-c.lib'. Current distributions of + # pypy (>= 4.1) contain it as 'libs/python27.lib'. + pythonlib = "python{0[0]}{0[1]}".format(sys.version_info) + if hasattr(sys, 'prefix'): + ensure('library_dirs', os.path.join(sys.prefix, 'libs')) + else: + # we need 'libpypy-c.{so,dylib}', which should be by + # default located in 'sys.prefix/bin' for installed + # systems. + if sys.version_info < (3,): + pythonlib = "pypy-c" + else: + pythonlib = "pypy3-c" + if hasattr(sys, 'prefix'): + ensure('library_dirs', os.path.join(sys.prefix, 'bin')) + # On uninstalled pypy's, the libpypy-c is typically found in + # .../pypy/goal/. + if hasattr(sys, 'prefix'): + ensure('library_dirs', os.path.join(sys.prefix, 'pypy', 'goal')) + else: + if sys.platform == "win32": + template = "python%d%d" + if hasattr(sys, 'gettotalrefcount'): + template += '_d' + else: + try: + import sysconfig + except ImportError: # 2.6 + from distutils import sysconfig + template = "python%d.%d" + if sysconfig.get_config_var('DEBUG_EXT'): + template += sysconfig.get_config_var('DEBUG_EXT') + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + if hasattr(sys, 'abiflags'): + pythonlib += sys.abiflags + ensure('libraries', pythonlib) + if sys.platform == "win32": + ensure('extra_link_args', '/MANIFEST') + + def set_source(self, module_name, source, source_extension='.c', **kwds): + import os + if hasattr(self, '_assigned_source'): + raise ValueError("set_source() cannot be called several times " + "per ffi object") + if not isinstance(module_name, basestring): + raise TypeError("'module_name' must be a string") + if os.sep in module_name or (os.altsep and os.altsep in module_name): + raise ValueError("'module_name' must not contain '/': use a dotted " + "name to make a 'package.module' location") + self._assigned_source = (str(module_name), source, + source_extension, kwds) + + def set_source_pkgconfig(self, module_name, pkgconfig_libs, source, + source_extension='.c', **kwds): + from . import pkgconfig + if not isinstance(pkgconfig_libs, list): + raise TypeError("the pkgconfig_libs argument must be a list " + "of package names") + kwds2 = pkgconfig.flags_from_pkgconfig(pkgconfig_libs) + pkgconfig.merge_flags(kwds, kwds2) + self.set_source(module_name, source, source_extension, **kwds) + + def distutils_extension(self, tmpdir='build', verbose=True): + from distutils.dir_util import mkpath + from .recompiler import recompile + # + if not hasattr(self, '_assigned_source'): + if hasattr(self, 'verifier'): # fallback, 'tmpdir' ignored + return self.verifier.get_extension() + raise ValueError("set_source() must be called before" + " distutils_extension()") + module_name, source, source_extension, kwds = self._assigned_source + if source is None: + raise TypeError("distutils_extension() is only for C extension " + "modules, not for dlopen()-style pure Python " + "modules") + mkpath(tmpdir) + ext, updated = recompile(self, module_name, + source, tmpdir=tmpdir, extradir=tmpdir, + source_extension=source_extension, + call_c_compiler=False, **kwds) + if verbose: + if updated: + sys.stderr.write("regenerated: %r\n" % (ext.sources[0],)) + else: + sys.stderr.write("not modified: %r\n" % (ext.sources[0],)) + return ext + + def emit_c_code(self, filename): + from .recompiler import recompile + # + if not hasattr(self, '_assigned_source'): + raise ValueError("set_source() must be called before emit_c_code()") + module_name, source, source_extension, kwds = self._assigned_source + if source is None: + raise TypeError("emit_c_code() is only for C extension modules, " + "not for dlopen()-style pure Python modules") + recompile(self, module_name, source, + c_file=filename, call_c_compiler=False, **kwds) + + def emit_python_code(self, filename): + from .recompiler import recompile + # + if not hasattr(self, '_assigned_source'): + raise ValueError("set_source() must be called before emit_c_code()") + module_name, source, source_extension, kwds = self._assigned_source + if source is not None: + raise TypeError("emit_python_code() is only for dlopen()-style " + "pure Python modules, not for C extension modules") + recompile(self, module_name, source, + c_file=filename, call_c_compiler=False, **kwds) + + def compile(self, tmpdir='.', verbose=0, target=None, debug=None): + """The 'target' argument gives the final file name of the + compiled DLL. Use '*' to force distutils' choice, suitable for + regular CPython C API modules. Use a file name ending in '.*' + to ask for the system's default extension for dynamic libraries + (.so/.dll/.dylib). + + The default is '*' when building a non-embedded C API extension, + and (module_name + '.*') when building an embedded library. + """ + from .recompiler import recompile + # + if not hasattr(self, '_assigned_source'): + raise ValueError("set_source() must be called before compile()") + module_name, source, source_extension, kwds = self._assigned_source + return recompile(self, module_name, source, tmpdir=tmpdir, + target=target, source_extension=source_extension, + compiler_verbose=verbose, debug=debug, **kwds) + + def init_once(self, func, tag): + # Read _init_once_cache[tag], which is either (False, lock) if + # we're calling the function now in some thread, or (True, result). + # Don't call setdefault() in most cases, to avoid allocating and + # immediately freeing a lock; but still use setdefaut() to avoid + # races. + try: + x = self._init_once_cache[tag] + except KeyError: + x = self._init_once_cache.setdefault(tag, (False, allocate_lock())) + # Common case: we got (True, result), so we return the result. + if x[0]: + return x[1] + # Else, it's a lock. Acquire it to serialize the following tests. + with x[1]: + # Read again from _init_once_cache the current status. + x = self._init_once_cache[tag] + if x[0]: + return x[1] + # Call the function and store the result back. + result = func() + self._init_once_cache[tag] = (True, result) + return result + + def embedding_init_code(self, pysource): + if self._embedding: + raise ValueError("embedding_init_code() can only be called once") + # fix 'pysource' before it gets dumped into the C file: + # - remove empty lines at the beginning, so it starts at "line 1" + # - dedent, if all non-empty lines are indented + # - check for SyntaxErrors + import re + match = re.match(r'\s*\n', pysource) + if match: + pysource = pysource[match.end():] + lines = pysource.splitlines() or [''] + prefix = re.match(r'\s*', lines[0]).group() + for i in range(1, len(lines)): + line = lines[i] + if line.rstrip(): + while not line.startswith(prefix): + prefix = prefix[:-1] + i = len(prefix) + lines = [line[i:]+'\n' for line in lines] + pysource = ''.join(lines) + # + compile(pysource, "cffi_init", "exec") + # + self._embedding = pysource + + def def_extern(self, *args, **kwds): + raise ValueError("ffi.def_extern() is only available on API-mode FFI " + "objects") + + def list_types(self): + """Returns the user type names known to this FFI instance. + This returns a tuple containing three lists of names: + (typedef_names, names_of_structs, names_of_unions) + """ + typedefs = [] + structs = [] + unions = [] + for key in self._parser._declarations: + if key.startswith('typedef '): + typedefs.append(key[8:]) + elif key.startswith('struct '): + structs.append(key[7:]) + elif key.startswith('union '): + unions.append(key[6:]) + typedefs.sort() + structs.sort() + unions.sort() + return (typedefs, structs, unions) + + +def _load_backend_lib(backend, name, flags): + import os + if name is None: + if sys.platform != "win32": + return backend.load_library(None, flags) + name = "c" # Windows: load_library(None) fails, but this works + # on Python 2 (backward compatibility hack only) + first_error = None + if '.' in name or '/' in name or os.sep in name: + try: + return backend.load_library(name, flags) + except OSError as e: + first_error = e + import ctypes.util + path = ctypes.util.find_library(name) + if path is None: + if name == "c" and sys.platform == "win32" and sys.version_info >= (3,): + raise OSError("dlopen(None) cannot work on Windows for Python 3 " + "(see http://bugs.python.org/issue23606)") + msg = ("ctypes.util.find_library() did not manage " + "to locate a library called %r" % (name,)) + if first_error is not None: + msg = "%s. Additionally, %s" % (first_error, msg) + raise OSError(msg) + return backend.load_library(path, flags) + +def _make_ffi_library(ffi, libname, flags): + backend = ffi._backend + backendlib = _load_backend_lib(backend, libname, flags) + # + def accessor_function(name): + key = 'function ' + name + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + value = backendlib.load_function(BType, name) + library.__dict__[name] = value + # + def accessor_variable(name): + key = 'variable ' + name + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + read_variable = backendlib.read_variable + write_variable = backendlib.write_variable + setattr(FFILibrary, name, property( + lambda self: read_variable(BType, name), + lambda self, value: write_variable(BType, name, value))) + # + def addressof_var(name): + try: + return addr_variables[name] + except KeyError: + with ffi._lock: + if name not in addr_variables: + key = 'variable ' + name + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + if BType.kind != 'array': + BType = model.pointer_cache(ffi, BType) + p = backendlib.load_function(BType, name) + addr_variables[name] = p + return addr_variables[name] + # + def accessor_constant(name): + raise NotImplementedError("non-integer constant '%s' cannot be " + "accessed from a dlopen() library" % (name,)) + # + def accessor_int_constant(name): + library.__dict__[name] = ffi._parser._int_constants[name] + # + accessors = {} + accessors_version = [False] + addr_variables = {} + # + def update_accessors(): + if accessors_version[0] is ffi._cdef_version: + return + # + for key, (tp, _) in ffi._parser._declarations.items(): + if not isinstance(tp, model.EnumType): + tag, name = key.split(' ', 1) + if tag == 'function': + accessors[name] = accessor_function + elif tag == 'variable': + accessors[name] = accessor_variable + elif tag == 'constant': + accessors[name] = accessor_constant + else: + for i, enumname in enumerate(tp.enumerators): + def accessor_enum(name, tp=tp, i=i): + tp.check_not_partial() + library.__dict__[name] = tp.enumvalues[i] + accessors[enumname] = accessor_enum + for name in ffi._parser._int_constants: + accessors.setdefault(name, accessor_int_constant) + accessors_version[0] = ffi._cdef_version + # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + if name not in accessors: + update_accessors() + if name not in accessors: + raise AttributeError(name) + accessors[name](name) + # + class FFILibrary(object): + def __getattr__(self, name): + make_accessor(name) + return getattr(self, name) + def __setattr__(self, name, value): + try: + property = getattr(self.__class__, name) + except AttributeError: + make_accessor(name) + setattr(self, name, value) + else: + property.__set__(self, value) + def __dir__(self): + with ffi._lock: + update_accessors() + return accessors.keys() + def __addressof__(self, name): + if name in library.__dict__: + return library.__dict__[name] + if name in FFILibrary.__dict__: + return addressof_var(name) + make_accessor(name) + if name in library.__dict__: + return library.__dict__[name] + if name in FFILibrary.__dict__: + return addressof_var(name) + raise AttributeError("cffi library has no function or " + "global variable named '%s'" % (name,)) + def __cffi_close__(self): + backendlib.close_lib() + self.__dict__.clear() + # + if libname is not None: + try: + if not isinstance(libname, str): # unicode, on Python 2 + libname = libname.encode('utf-8') + FFILibrary.__name__ = 'FFILibrary_%s' % libname + except UnicodeError: + pass + library = FFILibrary() + return library, library.__dict__ + +def _builtin_function_type(func): + # a hack to make at least ffi.typeof(builtin_function) work, + # if the builtin function was obtained by 'vengine_cpy'. + import sys + try: + module = sys.modules[func.__module__] + ffi = module._cffi_original_ffi + types_of_builtin_funcs = module._cffi_types_of_builtin_funcs + tp = types_of_builtin_funcs[func] + except (KeyError, AttributeError, TypeError): + return None + else: + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/server/www/packages/packages-linux/x64/cffi/backend_ctypes.py b/server/www/packages/packages-linux/x64/cffi/backend_ctypes.py new file mode 100644 index 0000000..e7956a7 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/backend_ctypes.py @@ -0,0 +1,1121 @@ +import ctypes, ctypes.util, operator, sys +from . import model + +if sys.version_info < (3,): + bytechr = chr +else: + unicode = str + long = int + xrange = range + bytechr = lambda num: bytes([num]) + +class CTypesType(type): + pass + +class CTypesData(object): + __metaclass__ = CTypesType + __slots__ = ['__weakref__'] + __name__ = '' + + def __init__(self, *args): + raise TypeError("cannot instantiate %r" % (self.__class__,)) + + @classmethod + def _newp(cls, init): + raise TypeError("expected a pointer or array ctype, got '%s'" + % (cls._get_c_name(),)) + + @staticmethod + def _to_ctypes(value): + raise TypeError + + @classmethod + def _arg_to_ctypes(cls, *value): + try: + ctype = cls._ctype + except AttributeError: + raise TypeError("cannot create an instance of %r" % (cls,)) + if value: + res = cls._to_ctypes(*value) + if not isinstance(res, ctype): + res = cls._ctype(res) + else: + res = cls._ctype() + return res + + @classmethod + def _create_ctype_obj(cls, init): + if init is None: + return cls._arg_to_ctypes() + else: + return cls._arg_to_ctypes(init) + + @staticmethod + def _from_ctypes(ctypes_value): + raise TypeError + + @classmethod + def _get_c_name(cls, replace_with=''): + return cls._reftypename.replace(' &', replace_with) + + @classmethod + def _fix_class(cls): + cls.__name__ = 'CData<%s>' % (cls._get_c_name(),) + cls.__qualname__ = 'CData<%s>' % (cls._get_c_name(),) + cls.__module__ = 'ffi' + + def _get_own_repr(self): + raise NotImplementedError + + def _addr_repr(self, address): + if address == 0: + return 'NULL' + else: + if address < 0: + address += 1 << (8*ctypes.sizeof(ctypes.c_void_p)) + return '0x%x' % address + + def __repr__(self, c_name=None): + own = self._get_own_repr() + return '' % (c_name or self._get_c_name(), own) + + def _convert_to_address(self, BClass): + if BClass is None: + raise TypeError("cannot convert %r to an address" % ( + self._get_c_name(),)) + else: + raise TypeError("cannot convert %r to %r" % ( + self._get_c_name(), BClass._get_c_name())) + + @classmethod + def _get_size(cls): + return ctypes.sizeof(cls._ctype) + + def _get_size_of_instance(self): + return ctypes.sizeof(self._ctype) + + @classmethod + def _cast_from(cls, source): + raise TypeError("cannot cast to %r" % (cls._get_c_name(),)) + + def _cast_to_integer(self): + return self._convert_to_address(None) + + @classmethod + def _alignment(cls): + return ctypes.alignment(cls._ctype) + + def __iter__(self): + raise TypeError("cdata %r does not support iteration" % ( + self._get_c_name()),) + + def _make_cmp(name): + cmpfunc = getattr(operator, name) + def cmp(self, other): + v_is_ptr = not isinstance(self, CTypesGenericPrimitive) + w_is_ptr = (isinstance(other, CTypesData) and + not isinstance(other, CTypesGenericPrimitive)) + if v_is_ptr and w_is_ptr: + return cmpfunc(self._convert_to_address(None), + other._convert_to_address(None)) + elif v_is_ptr or w_is_ptr: + return NotImplemented + else: + if isinstance(self, CTypesGenericPrimitive): + self = self._value + if isinstance(other, CTypesGenericPrimitive): + other = other._value + return cmpfunc(self, other) + cmp.func_name = name + return cmp + + __eq__ = _make_cmp('__eq__') + __ne__ = _make_cmp('__ne__') + __lt__ = _make_cmp('__lt__') + __le__ = _make_cmp('__le__') + __gt__ = _make_cmp('__gt__') + __ge__ = _make_cmp('__ge__') + + def __hash__(self): + return hash(self._convert_to_address(None)) + + def _to_string(self, maxlen): + raise TypeError("string(): %r" % (self,)) + + +class CTypesGenericPrimitive(CTypesData): + __slots__ = [] + + def __hash__(self): + return hash(self._value) + + def _get_own_repr(self): + return repr(self._from_ctypes(self._value)) + + +class CTypesGenericArray(CTypesData): + __slots__ = [] + + @classmethod + def _newp(cls, init): + return cls(init) + + def __iter__(self): + for i in xrange(len(self)): + yield self[i] + + def _get_own_repr(self): + return self._addr_repr(ctypes.addressof(self._blob)) + + +class CTypesGenericPtr(CTypesData): + __slots__ = ['_address', '_as_ctype_ptr'] + _automatic_casts = False + kind = "pointer" + + @classmethod + def _newp(cls, init): + return cls(init) + + @classmethod + def _cast_from(cls, source): + if source is None: + address = 0 + elif isinstance(source, CTypesData): + address = source._cast_to_integer() + elif isinstance(source, (int, long)): + address = source + else: + raise TypeError("bad type for cast to %r: %r" % + (cls, type(source).__name__)) + return cls._new_pointer_at(address) + + @classmethod + def _new_pointer_at(cls, address): + self = cls.__new__(cls) + self._address = address + self._as_ctype_ptr = ctypes.cast(address, cls._ctype) + return self + + def _get_own_repr(self): + try: + return self._addr_repr(self._address) + except AttributeError: + return '???' + + def _cast_to_integer(self): + return self._address + + def __nonzero__(self): + return bool(self._address) + __bool__ = __nonzero__ + + @classmethod + def _to_ctypes(cls, value): + if not isinstance(value, CTypesData): + raise TypeError("unexpected %s object" % type(value).__name__) + address = value._convert_to_address(cls) + return ctypes.cast(address, cls._ctype) + + @classmethod + def _from_ctypes(cls, ctypes_ptr): + address = ctypes.cast(ctypes_ptr, ctypes.c_void_p).value or 0 + return cls._new_pointer_at(address) + + @classmethod + def _initialize(cls, ctypes_ptr, value): + if value: + ctypes_ptr.contents = cls._to_ctypes(value).contents + + def _convert_to_address(self, BClass): + if (BClass in (self.__class__, None) or BClass._automatic_casts + or self._automatic_casts): + return self._address + else: + return CTypesData._convert_to_address(self, BClass) + + +class CTypesBaseStructOrUnion(CTypesData): + __slots__ = ['_blob'] + + @classmethod + def _create_ctype_obj(cls, init): + # may be overridden + raise TypeError("cannot instantiate opaque type %s" % (cls,)) + + def _get_own_repr(self): + return self._addr_repr(ctypes.addressof(self._blob)) + + @classmethod + def _offsetof(cls, fieldname): + return getattr(cls._ctype, fieldname).offset + + def _convert_to_address(self, BClass): + if getattr(BClass, '_BItem', None) is self.__class__: + return ctypes.addressof(self._blob) + else: + return CTypesData._convert_to_address(self, BClass) + + @classmethod + def _from_ctypes(cls, ctypes_struct_or_union): + self = cls.__new__(cls) + self._blob = ctypes_struct_or_union + return self + + @classmethod + def _to_ctypes(cls, value): + return value._blob + + def __repr__(self, c_name=None): + return CTypesData.__repr__(self, c_name or self._get_c_name(' &')) + + +class CTypesBackend(object): + + PRIMITIVE_TYPES = { + 'char': ctypes.c_char, + 'short': ctypes.c_short, + 'int': ctypes.c_int, + 'long': ctypes.c_long, + 'long long': ctypes.c_longlong, + 'signed char': ctypes.c_byte, + 'unsigned char': ctypes.c_ubyte, + 'unsigned short': ctypes.c_ushort, + 'unsigned int': ctypes.c_uint, + 'unsigned long': ctypes.c_ulong, + 'unsigned long long': ctypes.c_ulonglong, + 'float': ctypes.c_float, + 'double': ctypes.c_double, + '_Bool': ctypes.c_bool, + } + + for _name in ['unsigned long long', 'unsigned long', + 'unsigned int', 'unsigned short', 'unsigned char']: + _size = ctypes.sizeof(PRIMITIVE_TYPES[_name]) + PRIMITIVE_TYPES['uint%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_void_p): + PRIMITIVE_TYPES['uintptr_t'] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_size_t): + PRIMITIVE_TYPES['size_t'] = PRIMITIVE_TYPES[_name] + + for _name in ['long long', 'long', 'int', 'short', 'signed char']: + _size = ctypes.sizeof(PRIMITIVE_TYPES[_name]) + PRIMITIVE_TYPES['int%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_void_p): + PRIMITIVE_TYPES['intptr_t'] = PRIMITIVE_TYPES[_name] + PRIMITIVE_TYPES['ptrdiff_t'] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_size_t): + PRIMITIVE_TYPES['ssize_t'] = PRIMITIVE_TYPES[_name] + + + def __init__(self): + self.RTLD_LAZY = 0 # not supported anyway by ctypes + self.RTLD_NOW = 0 + self.RTLD_GLOBAL = ctypes.RTLD_GLOBAL + self.RTLD_LOCAL = ctypes.RTLD_LOCAL + + def set_ffi(self, ffi): + self.ffi = ffi + + def _get_types(self): + return CTypesData, CTypesType + + def load_library(self, path, flags=0): + cdll = ctypes.CDLL(path, flags) + return CTypesLibrary(self, cdll) + + def new_void_type(self): + class CTypesVoid(CTypesData): + __slots__ = [] + _reftypename = 'void &' + @staticmethod + def _from_ctypes(novalue): + return None + @staticmethod + def _to_ctypes(novalue): + if novalue is not None: + raise TypeError("None expected, got %s object" % + (type(novalue).__name__,)) + return None + CTypesVoid._fix_class() + return CTypesVoid + + def new_primitive_type(self, name): + if name == 'wchar_t': + raise NotImplementedError(name) + ctype = self.PRIMITIVE_TYPES[name] + if name == 'char': + kind = 'char' + elif name in ('float', 'double'): + kind = 'float' + else: + if name in ('signed char', 'unsigned char'): + kind = 'byte' + elif name == '_Bool': + kind = 'bool' + else: + kind = 'int' + is_signed = (ctype(-1).value == -1) + # + def _cast_source_to_int(source): + if isinstance(source, (int, long, float)): + source = int(source) + elif isinstance(source, CTypesData): + source = source._cast_to_integer() + elif isinstance(source, bytes): + source = ord(source) + elif source is None: + source = 0 + else: + raise TypeError("bad type for cast to %r: %r" % + (CTypesPrimitive, type(source).__name__)) + return source + # + kind1 = kind + class CTypesPrimitive(CTypesGenericPrimitive): + __slots__ = ['_value'] + _ctype = ctype + _reftypename = '%s &' % name + kind = kind1 + + def __init__(self, value): + self._value = value + + @staticmethod + def _create_ctype_obj(init): + if init is None: + return ctype() + return ctype(CTypesPrimitive._to_ctypes(init)) + + if kind == 'int' or kind == 'byte': + @classmethod + def _cast_from(cls, source): + source = _cast_source_to_int(source) + source = ctype(source).value # cast within range + return cls(source) + def __int__(self): + return self._value + + if kind == 'bool': + @classmethod + def _cast_from(cls, source): + if not isinstance(source, (int, long, float)): + source = _cast_source_to_int(source) + return cls(bool(source)) + def __int__(self): + return int(self._value) + + if kind == 'char': + @classmethod + def _cast_from(cls, source): + source = _cast_source_to_int(source) + source = bytechr(source & 0xFF) + return cls(source) + def __int__(self): + return ord(self._value) + + if kind == 'float': + @classmethod + def _cast_from(cls, source): + if isinstance(source, float): + pass + elif isinstance(source, CTypesGenericPrimitive): + if hasattr(source, '__float__'): + source = float(source) + else: + source = int(source) + else: + source = _cast_source_to_int(source) + source = ctype(source).value # fix precision + return cls(source) + def __int__(self): + return int(self._value) + def __float__(self): + return self._value + + _cast_to_integer = __int__ + + if kind == 'int' or kind == 'byte' or kind == 'bool': + @staticmethod + def _to_ctypes(x): + if not isinstance(x, (int, long)): + if isinstance(x, CTypesData): + x = int(x) + else: + raise TypeError("integer expected, got %s" % + type(x).__name__) + if ctype(x).value != x: + if not is_signed and x < 0: + raise OverflowError("%s: negative integer" % name) + else: + raise OverflowError("%s: integer out of bounds" + % name) + return x + + if kind == 'char': + @staticmethod + def _to_ctypes(x): + if isinstance(x, bytes) and len(x) == 1: + return x + if isinstance(x, CTypesPrimitive): # > + return x._value + raise TypeError("character expected, got %s" % + type(x).__name__) + def __nonzero__(self): + return ord(self._value) != 0 + else: + def __nonzero__(self): + return self._value != 0 + __bool__ = __nonzero__ + + if kind == 'float': + @staticmethod + def _to_ctypes(x): + if not isinstance(x, (int, long, float, CTypesData)): + raise TypeError("float expected, got %s" % + type(x).__name__) + return ctype(x).value + + @staticmethod + def _from_ctypes(value): + return getattr(value, 'value', value) + + @staticmethod + def _initialize(blob, init): + blob.value = CTypesPrimitive._to_ctypes(init) + + if kind == 'char': + def _to_string(self, maxlen): + return self._value + if kind == 'byte': + def _to_string(self, maxlen): + return chr(self._value & 0xff) + # + CTypesPrimitive._fix_class() + return CTypesPrimitive + + def new_pointer_type(self, BItem): + getbtype = self.ffi._get_cached_btype + if BItem is getbtype(model.PrimitiveType('char')): + kind = 'charp' + elif BItem in (getbtype(model.PrimitiveType('signed char')), + getbtype(model.PrimitiveType('unsigned char'))): + kind = 'bytep' + elif BItem is getbtype(model.void_type): + kind = 'voidp' + else: + kind = 'generic' + # + class CTypesPtr(CTypesGenericPtr): + __slots__ = ['_own'] + if kind == 'charp': + __slots__ += ['__as_strbuf'] + _BItem = BItem + if hasattr(BItem, '_ctype'): + _ctype = ctypes.POINTER(BItem._ctype) + _bitem_size = ctypes.sizeof(BItem._ctype) + else: + _ctype = ctypes.c_void_p + if issubclass(BItem, CTypesGenericArray): + _reftypename = BItem._get_c_name('(* &)') + else: + _reftypename = BItem._get_c_name(' * &') + + def __init__(self, init): + ctypeobj = BItem._create_ctype_obj(init) + if kind == 'charp': + self.__as_strbuf = ctypes.create_string_buffer( + ctypeobj.value + b'\x00') + self._as_ctype_ptr = ctypes.cast( + self.__as_strbuf, self._ctype) + else: + self._as_ctype_ptr = ctypes.pointer(ctypeobj) + self._address = ctypes.cast(self._as_ctype_ptr, + ctypes.c_void_p).value + self._own = True + + def __add__(self, other): + if isinstance(other, (int, long)): + return self._new_pointer_at(self._address + + other * self._bitem_size) + else: + return NotImplemented + + def __sub__(self, other): + if isinstance(other, (int, long)): + return self._new_pointer_at(self._address - + other * self._bitem_size) + elif type(self) is type(other): + return (self._address - other._address) // self._bitem_size + else: + return NotImplemented + + def __getitem__(self, index): + if getattr(self, '_own', False) and index != 0: + raise IndexError + return BItem._from_ctypes(self._as_ctype_ptr[index]) + + def __setitem__(self, index, value): + self._as_ctype_ptr[index] = BItem._to_ctypes(value) + + if kind == 'charp' or kind == 'voidp': + @classmethod + def _arg_to_ctypes(cls, *value): + if value and isinstance(value[0], bytes): + return ctypes.c_char_p(value[0]) + else: + return super(CTypesPtr, cls)._arg_to_ctypes(*value) + + if kind == 'charp' or kind == 'bytep': + def _to_string(self, maxlen): + if maxlen < 0: + maxlen = sys.maxsize + p = ctypes.cast(self._as_ctype_ptr, + ctypes.POINTER(ctypes.c_char)) + n = 0 + while n < maxlen and p[n] != b'\x00': + n += 1 + return b''.join([p[i] for i in range(n)]) + + def _get_own_repr(self): + if getattr(self, '_own', False): + return 'owning %d bytes' % ( + ctypes.sizeof(self._as_ctype_ptr.contents),) + return super(CTypesPtr, self)._get_own_repr() + # + if (BItem is self.ffi._get_cached_btype(model.void_type) or + BItem is self.ffi._get_cached_btype(model.PrimitiveType('char'))): + CTypesPtr._automatic_casts = True + # + CTypesPtr._fix_class() + return CTypesPtr + + def new_array_type(self, CTypesPtr, length): + if length is None: + brackets = ' &[]' + else: + brackets = ' &[%d]' % length + BItem = CTypesPtr._BItem + getbtype = self.ffi._get_cached_btype + if BItem is getbtype(model.PrimitiveType('char')): + kind = 'char' + elif BItem in (getbtype(model.PrimitiveType('signed char')), + getbtype(model.PrimitiveType('unsigned char'))): + kind = 'byte' + else: + kind = 'generic' + # + class CTypesArray(CTypesGenericArray): + __slots__ = ['_blob', '_own'] + if length is not None: + _ctype = BItem._ctype * length + else: + __slots__.append('_ctype') + _reftypename = BItem._get_c_name(brackets) + _declared_length = length + _CTPtr = CTypesPtr + + def __init__(self, init): + if length is None: + if isinstance(init, (int, long)): + len1 = init + init = None + elif kind == 'char' and isinstance(init, bytes): + len1 = len(init) + 1 # extra null + else: + init = tuple(init) + len1 = len(init) + self._ctype = BItem._ctype * len1 + self._blob = self._ctype() + self._own = True + if init is not None: + self._initialize(self._blob, init) + + @staticmethod + def _initialize(blob, init): + if isinstance(init, bytes): + init = [init[i:i+1] for i in range(len(init))] + else: + if isinstance(init, CTypesGenericArray): + if (len(init) != len(blob) or + not isinstance(init, CTypesArray)): + raise TypeError("length/type mismatch: %s" % (init,)) + init = tuple(init) + if len(init) > len(blob): + raise IndexError("too many initializers") + addr = ctypes.cast(blob, ctypes.c_void_p).value + PTR = ctypes.POINTER(BItem._ctype) + itemsize = ctypes.sizeof(BItem._ctype) + for i, value in enumerate(init): + p = ctypes.cast(addr + i * itemsize, PTR) + BItem._initialize(p.contents, value) + + def __len__(self): + return len(self._blob) + + def __getitem__(self, index): + if not (0 <= index < len(self._blob)): + raise IndexError + return BItem._from_ctypes(self._blob[index]) + + def __setitem__(self, index, value): + if not (0 <= index < len(self._blob)): + raise IndexError + self._blob[index] = BItem._to_ctypes(value) + + if kind == 'char' or kind == 'byte': + def _to_string(self, maxlen): + if maxlen < 0: + maxlen = len(self._blob) + p = ctypes.cast(self._blob, + ctypes.POINTER(ctypes.c_char)) + n = 0 + while n < maxlen and p[n] != b'\x00': + n += 1 + return b''.join([p[i] for i in range(n)]) + + def _get_own_repr(self): + if getattr(self, '_own', False): + return 'owning %d bytes' % (ctypes.sizeof(self._blob),) + return super(CTypesArray, self)._get_own_repr() + + def _convert_to_address(self, BClass): + if BClass in (CTypesPtr, None) or BClass._automatic_casts: + return ctypes.addressof(self._blob) + else: + return CTypesData._convert_to_address(self, BClass) + + @staticmethod + def _from_ctypes(ctypes_array): + self = CTypesArray.__new__(CTypesArray) + self._blob = ctypes_array + return self + + @staticmethod + def _arg_to_ctypes(value): + return CTypesPtr._arg_to_ctypes(value) + + def __add__(self, other): + if isinstance(other, (int, long)): + return CTypesPtr._new_pointer_at( + ctypes.addressof(self._blob) + + other * ctypes.sizeof(BItem._ctype)) + else: + return NotImplemented + + @classmethod + def _cast_from(cls, source): + raise NotImplementedError("casting to %r" % ( + cls._get_c_name(),)) + # + CTypesArray._fix_class() + return CTypesArray + + def _new_struct_or_union(self, kind, name, base_ctypes_class): + # + class struct_or_union(base_ctypes_class): + pass + struct_or_union.__name__ = '%s_%s' % (kind, name) + kind1 = kind + # + class CTypesStructOrUnion(CTypesBaseStructOrUnion): + __slots__ = ['_blob'] + _ctype = struct_or_union + _reftypename = '%s &' % (name,) + _kind = kind = kind1 + # + CTypesStructOrUnion._fix_class() + return CTypesStructOrUnion + + def new_struct_type(self, name): + return self._new_struct_or_union('struct', name, ctypes.Structure) + + def new_union_type(self, name): + return self._new_struct_or_union('union', name, ctypes.Union) + + def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp, + totalsize=-1, totalalignment=-1, sflags=0, + pack=0): + if totalsize >= 0 or totalalignment >= 0: + raise NotImplementedError("the ctypes backend of CFFI does not support " + "structures completed by verify(); please " + "compile and install the _cffi_backend module.") + struct_or_union = CTypesStructOrUnion._ctype + fnames = [fname for (fname, BField, bitsize) in fields] + btypes = [BField for (fname, BField, bitsize) in fields] + bitfields = [bitsize for (fname, BField, bitsize) in fields] + # + bfield_types = {} + cfields = [] + for (fname, BField, bitsize) in fields: + if bitsize < 0: + cfields.append((fname, BField._ctype)) + bfield_types[fname] = BField + else: + cfields.append((fname, BField._ctype, bitsize)) + bfield_types[fname] = Ellipsis + if sflags & 8: + struct_or_union._pack_ = 1 + elif pack: + struct_or_union._pack_ = pack + struct_or_union._fields_ = cfields + CTypesStructOrUnion._bfield_types = bfield_types + # + @staticmethod + def _create_ctype_obj(init): + result = struct_or_union() + if init is not None: + initialize(result, init) + return result + CTypesStructOrUnion._create_ctype_obj = _create_ctype_obj + # + def initialize(blob, init): + if is_union: + if len(init) > 1: + raise ValueError("union initializer: %d items given, but " + "only one supported (use a dict if needed)" + % (len(init),)) + if not isinstance(init, dict): + if isinstance(init, (bytes, unicode)): + raise TypeError("union initializer: got a str") + init = tuple(init) + if len(init) > len(fnames): + raise ValueError("too many values for %s initializer" % + CTypesStructOrUnion._get_c_name()) + init = dict(zip(fnames, init)) + addr = ctypes.addressof(blob) + for fname, value in init.items(): + BField, bitsize = name2fieldtype[fname] + assert bitsize < 0, \ + "not implemented: initializer with bit fields" + offset = CTypesStructOrUnion._offsetof(fname) + PTR = ctypes.POINTER(BField._ctype) + p = ctypes.cast(addr + offset, PTR) + BField._initialize(p.contents, value) + is_union = CTypesStructOrUnion._kind == 'union' + name2fieldtype = dict(zip(fnames, zip(btypes, bitfields))) + # + for fname, BField, bitsize in fields: + if fname == '': + raise NotImplementedError("nested anonymous structs/unions") + if hasattr(CTypesStructOrUnion, fname): + raise ValueError("the field name %r conflicts in " + "the ctypes backend" % fname) + if bitsize < 0: + def getter(self, fname=fname, BField=BField, + offset=CTypesStructOrUnion._offsetof(fname), + PTR=ctypes.POINTER(BField._ctype)): + addr = ctypes.addressof(self._blob) + p = ctypes.cast(addr + offset, PTR) + return BField._from_ctypes(p.contents) + def setter(self, value, fname=fname, BField=BField): + setattr(self._blob, fname, BField._to_ctypes(value)) + # + if issubclass(BField, CTypesGenericArray): + setter = None + if BField._declared_length == 0: + def getter(self, fname=fname, BFieldPtr=BField._CTPtr, + offset=CTypesStructOrUnion._offsetof(fname), + PTR=ctypes.POINTER(BField._ctype)): + addr = ctypes.addressof(self._blob) + p = ctypes.cast(addr + offset, PTR) + return BFieldPtr._from_ctypes(p) + # + else: + def getter(self, fname=fname, BField=BField): + return BField._from_ctypes(getattr(self._blob, fname)) + def setter(self, value, fname=fname, BField=BField): + # xxx obscure workaround + value = BField._to_ctypes(value) + oldvalue = getattr(self._blob, fname) + setattr(self._blob, fname, value) + if value != getattr(self._blob, fname): + setattr(self._blob, fname, oldvalue) + raise OverflowError("value too large for bitfield") + setattr(CTypesStructOrUnion, fname, property(getter, setter)) + # + CTypesPtr = self.ffi._get_cached_btype(model.PointerType(tp)) + for fname in fnames: + if hasattr(CTypesPtr, fname): + raise ValueError("the field name %r conflicts in " + "the ctypes backend" % fname) + def getter(self, fname=fname): + return getattr(self[0], fname) + def setter(self, value, fname=fname): + setattr(self[0], fname, value) + setattr(CTypesPtr, fname, property(getter, setter)) + + def new_function_type(self, BArgs, BResult, has_varargs): + nameargs = [BArg._get_c_name() for BArg in BArgs] + if has_varargs: + nameargs.append('...') + nameargs = ', '.join(nameargs) + # + class CTypesFunctionPtr(CTypesGenericPtr): + __slots__ = ['_own_callback', '_name'] + _ctype = ctypes.CFUNCTYPE(getattr(BResult, '_ctype', None), + *[BArg._ctype for BArg in BArgs], + use_errno=True) + _reftypename = BResult._get_c_name('(* &)(%s)' % (nameargs,)) + + def __init__(self, init, error=None): + # create a callback to the Python callable init() + import traceback + assert not has_varargs, "varargs not supported for callbacks" + if getattr(BResult, '_ctype', None) is not None: + error = BResult._from_ctypes( + BResult._create_ctype_obj(error)) + else: + error = None + def callback(*args): + args2 = [] + for arg, BArg in zip(args, BArgs): + args2.append(BArg._from_ctypes(arg)) + try: + res2 = init(*args2) + res2 = BResult._to_ctypes(res2) + except: + traceback.print_exc() + res2 = error + if issubclass(BResult, CTypesGenericPtr): + if res2: + res2 = ctypes.cast(res2, ctypes.c_void_p).value + # .value: http://bugs.python.org/issue1574593 + else: + res2 = None + #print repr(res2) + return res2 + if issubclass(BResult, CTypesGenericPtr): + # The only pointers callbacks can return are void*s: + # http://bugs.python.org/issue5710 + callback_ctype = ctypes.CFUNCTYPE( + ctypes.c_void_p, + *[BArg._ctype for BArg in BArgs], + use_errno=True) + else: + callback_ctype = CTypesFunctionPtr._ctype + self._as_ctype_ptr = callback_ctype(callback) + self._address = ctypes.cast(self._as_ctype_ptr, + ctypes.c_void_p).value + self._own_callback = init + + @staticmethod + def _initialize(ctypes_ptr, value): + if value: + raise NotImplementedError("ctypes backend: not supported: " + "initializers for function pointers") + + def __repr__(self): + c_name = getattr(self, '_name', None) + if c_name: + i = self._reftypename.index('(* &)') + if self._reftypename[i-1] not in ' )*': + c_name = ' ' + c_name + c_name = self._reftypename.replace('(* &)', c_name) + return CTypesData.__repr__(self, c_name) + + def _get_own_repr(self): + if getattr(self, '_own_callback', None) is not None: + return 'calling %r' % (self._own_callback,) + return super(CTypesFunctionPtr, self)._get_own_repr() + + def __call__(self, *args): + if has_varargs: + assert len(args) >= len(BArgs) + extraargs = args[len(BArgs):] + args = args[:len(BArgs)] + else: + assert len(args) == len(BArgs) + ctypes_args = [] + for arg, BArg in zip(args, BArgs): + ctypes_args.append(BArg._arg_to_ctypes(arg)) + if has_varargs: + for i, arg in enumerate(extraargs): + if arg is None: + ctypes_args.append(ctypes.c_void_p(0)) # NULL + continue + if not isinstance(arg, CTypesData): + raise TypeError( + "argument %d passed in the variadic part " + "needs to be a cdata object (got %s)" % + (1 + len(BArgs) + i, type(arg).__name__)) + ctypes_args.append(arg._arg_to_ctypes(arg)) + result = self._as_ctype_ptr(*ctypes_args) + return BResult._from_ctypes(result) + # + CTypesFunctionPtr._fix_class() + return CTypesFunctionPtr + + def new_enum_type(self, name, enumerators, enumvalues, CTypesInt): + assert isinstance(name, str) + reverse_mapping = dict(zip(reversed(enumvalues), + reversed(enumerators))) + # + class CTypesEnum(CTypesInt): + __slots__ = [] + _reftypename = '%s &' % name + + def _get_own_repr(self): + value = self._value + try: + return '%d: %s' % (value, reverse_mapping[value]) + except KeyError: + return str(value) + + def _to_string(self, maxlen): + value = self._value + try: + return reverse_mapping[value] + except KeyError: + return str(value) + # + CTypesEnum._fix_class() + return CTypesEnum + + def get_errno(self): + return ctypes.get_errno() + + def set_errno(self, value): + ctypes.set_errno(value) + + def string(self, b, maxlen=-1): + return b._to_string(maxlen) + + def buffer(self, bptr, size=-1): + raise NotImplementedError("buffer() with ctypes backend") + + def sizeof(self, cdata_or_BType): + if isinstance(cdata_or_BType, CTypesData): + return cdata_or_BType._get_size_of_instance() + else: + assert issubclass(cdata_or_BType, CTypesData) + return cdata_or_BType._get_size() + + def alignof(self, BType): + assert issubclass(BType, CTypesData) + return BType._alignment() + + def newp(self, BType, source): + if not issubclass(BType, CTypesData): + raise TypeError + return BType._newp(source) + + def cast(self, BType, source): + return BType._cast_from(source) + + def callback(self, BType, source, error, onerror): + assert onerror is None # XXX not implemented + return BType(source, error) + + _weakref_cache_ref = None + + def gcp(self, cdata, destructor, size=0): + if self._weakref_cache_ref is None: + import weakref + class MyRef(weakref.ref): + def __eq__(self, other): + myref = self() + return self is other or ( + myref is not None and myref is other()) + def __ne__(self, other): + return not (self == other) + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self()) + return self._hash + self._weakref_cache_ref = {}, MyRef + weak_cache, MyRef = self._weakref_cache_ref + + if destructor is None: + try: + del weak_cache[MyRef(cdata)] + except KeyError: + raise TypeError("Can remove destructor only on a object " + "previously returned by ffi.gc()") + return None + + def remove(k): + cdata, destructor = weak_cache.pop(k, (None, None)) + if destructor is not None: + destructor(cdata) + + new_cdata = self.cast(self.typeof(cdata), cdata) + assert new_cdata is not cdata + weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor) + return new_cdata + + typeof = type + + def getcname(self, BType, replace_with): + return BType._get_c_name(replace_with) + + def typeoffsetof(self, BType, fieldname, num=0): + if isinstance(fieldname, str): + if num == 0 and issubclass(BType, CTypesGenericPtr): + BType = BType._BItem + if not issubclass(BType, CTypesBaseStructOrUnion): + raise TypeError("expected a struct or union ctype") + BField = BType._bfield_types[fieldname] + if BField is Ellipsis: + raise TypeError("not supported for bitfields") + return (BField, BType._offsetof(fieldname)) + elif isinstance(fieldname, (int, long)): + if issubclass(BType, CTypesGenericArray): + BType = BType._CTPtr + if not issubclass(BType, CTypesGenericPtr): + raise TypeError("expected an array or ptr ctype") + BItem = BType._BItem + offset = BItem._get_size() * fieldname + if offset > sys.maxsize: + raise OverflowError + return (BItem, offset) + else: + raise TypeError(type(fieldname)) + + def rawaddressof(self, BTypePtr, cdata, offset=None): + if isinstance(cdata, CTypesBaseStructOrUnion): + ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata)) + elif isinstance(cdata, CTypesGenericPtr): + if offset is None or not issubclass(type(cdata)._BItem, + CTypesBaseStructOrUnion): + raise TypeError("unexpected cdata type") + ptr = type(cdata)._to_ctypes(cdata) + elif isinstance(cdata, CTypesGenericArray): + ptr = type(cdata)._to_ctypes(cdata) + else: + raise TypeError("expected a ") + if offset: + ptr = ctypes.cast( + ctypes.c_void_p( + ctypes.cast(ptr, ctypes.c_void_p).value + offset), + type(ptr)) + return BTypePtr._from_ctypes(ptr) + + +class CTypesLibrary(object): + + def __init__(self, backend, cdll): + self.backend = backend + self.cdll = cdll + + def load_function(self, BType, name): + c_func = getattr(self.cdll, name) + funcobj = BType._from_ctypes(c_func) + funcobj._name = name + return funcobj + + def read_variable(self, BType, name): + try: + ctypes_obj = BType._ctype.in_dll(self.cdll, name) + except AttributeError as e: + raise NotImplementedError(e) + return BType._from_ctypes(ctypes_obj) + + def write_variable(self, BType, name, value): + new_ctypes_obj = BType._to_ctypes(value) + ctypes_obj = BType._ctype.in_dll(self.cdll, name) + ctypes.memmove(ctypes.addressof(ctypes_obj), + ctypes.addressof(new_ctypes_obj), + ctypes.sizeof(BType._ctype)) diff --git a/server/www/packages/packages-linux/x64/cffi/cffi_opcode.py b/server/www/packages/packages-linux/x64/cffi/cffi_opcode.py new file mode 100644 index 0000000..a0df98d --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/cffi_opcode.py @@ -0,0 +1,187 @@ +from .error import VerificationError + +class CffiOp(object): + def __init__(self, op, arg): + self.op = op + self.arg = arg + + def as_c_expr(self): + if self.op is None: + assert isinstance(self.arg, str) + return '(_cffi_opcode_t)(%s)' % (self.arg,) + classname = CLASS_NAME[self.op] + return '_CFFI_OP(_CFFI_OP_%s, %s)' % (classname, self.arg) + + def as_python_bytes(self): + if self.op is None and self.arg.isdigit(): + value = int(self.arg) # non-negative: '-' not in self.arg + if value >= 2**31: + raise OverflowError("cannot emit %r: limited to 2**31-1" + % (self.arg,)) + return format_four_bytes(value) + if isinstance(self.arg, str): + raise VerificationError("cannot emit to Python: %r" % (self.arg,)) + return format_four_bytes((self.arg << 8) | self.op) + + def __str__(self): + classname = CLASS_NAME.get(self.op, self.op) + return '(%s %s)' % (classname, self.arg) + +def format_four_bytes(num): + return '\\x%02X\\x%02X\\x%02X\\x%02X' % ( + (num >> 24) & 0xFF, + (num >> 16) & 0xFF, + (num >> 8) & 0xFF, + (num ) & 0xFF) + +OP_PRIMITIVE = 1 +OP_POINTER = 3 +OP_ARRAY = 5 +OP_OPEN_ARRAY = 7 +OP_STRUCT_UNION = 9 +OP_ENUM = 11 +OP_FUNCTION = 13 +OP_FUNCTION_END = 15 +OP_NOOP = 17 +OP_BITFIELD = 19 +OP_TYPENAME = 21 +OP_CPYTHON_BLTN_V = 23 # varargs +OP_CPYTHON_BLTN_N = 25 # noargs +OP_CPYTHON_BLTN_O = 27 # O (i.e. a single arg) +OP_CONSTANT = 29 +OP_CONSTANT_INT = 31 +OP_GLOBAL_VAR = 33 +OP_DLOPEN_FUNC = 35 +OP_DLOPEN_CONST = 37 +OP_GLOBAL_VAR_F = 39 +OP_EXTERN_PYTHON = 41 + +PRIM_VOID = 0 +PRIM_BOOL = 1 +PRIM_CHAR = 2 +PRIM_SCHAR = 3 +PRIM_UCHAR = 4 +PRIM_SHORT = 5 +PRIM_USHORT = 6 +PRIM_INT = 7 +PRIM_UINT = 8 +PRIM_LONG = 9 +PRIM_ULONG = 10 +PRIM_LONGLONG = 11 +PRIM_ULONGLONG = 12 +PRIM_FLOAT = 13 +PRIM_DOUBLE = 14 +PRIM_LONGDOUBLE = 15 + +PRIM_WCHAR = 16 +PRIM_INT8 = 17 +PRIM_UINT8 = 18 +PRIM_INT16 = 19 +PRIM_UINT16 = 20 +PRIM_INT32 = 21 +PRIM_UINT32 = 22 +PRIM_INT64 = 23 +PRIM_UINT64 = 24 +PRIM_INTPTR = 25 +PRIM_UINTPTR = 26 +PRIM_PTRDIFF = 27 +PRIM_SIZE = 28 +PRIM_SSIZE = 29 +PRIM_INT_LEAST8 = 30 +PRIM_UINT_LEAST8 = 31 +PRIM_INT_LEAST16 = 32 +PRIM_UINT_LEAST16 = 33 +PRIM_INT_LEAST32 = 34 +PRIM_UINT_LEAST32 = 35 +PRIM_INT_LEAST64 = 36 +PRIM_UINT_LEAST64 = 37 +PRIM_INT_FAST8 = 38 +PRIM_UINT_FAST8 = 39 +PRIM_INT_FAST16 = 40 +PRIM_UINT_FAST16 = 41 +PRIM_INT_FAST32 = 42 +PRIM_UINT_FAST32 = 43 +PRIM_INT_FAST64 = 44 +PRIM_UINT_FAST64 = 45 +PRIM_INTMAX = 46 +PRIM_UINTMAX = 47 +PRIM_FLOATCOMPLEX = 48 +PRIM_DOUBLECOMPLEX = 49 +PRIM_CHAR16 = 50 +PRIM_CHAR32 = 51 + +_NUM_PRIM = 52 +_UNKNOWN_PRIM = -1 +_UNKNOWN_FLOAT_PRIM = -2 +_UNKNOWN_LONG_DOUBLE = -3 + +_IO_FILE_STRUCT = -1 + +PRIMITIVE_TO_INDEX = { + 'char': PRIM_CHAR, + 'short': PRIM_SHORT, + 'int': PRIM_INT, + 'long': PRIM_LONG, + 'long long': PRIM_LONGLONG, + 'signed char': PRIM_SCHAR, + 'unsigned char': PRIM_UCHAR, + 'unsigned short': PRIM_USHORT, + 'unsigned int': PRIM_UINT, + 'unsigned long': PRIM_ULONG, + 'unsigned long long': PRIM_ULONGLONG, + 'float': PRIM_FLOAT, + 'double': PRIM_DOUBLE, + 'long double': PRIM_LONGDOUBLE, + 'float _Complex': PRIM_FLOATCOMPLEX, + 'double _Complex': PRIM_DOUBLECOMPLEX, + '_Bool': PRIM_BOOL, + 'wchar_t': PRIM_WCHAR, + 'char16_t': PRIM_CHAR16, + 'char32_t': PRIM_CHAR32, + 'int8_t': PRIM_INT8, + 'uint8_t': PRIM_UINT8, + 'int16_t': PRIM_INT16, + 'uint16_t': PRIM_UINT16, + 'int32_t': PRIM_INT32, + 'uint32_t': PRIM_UINT32, + 'int64_t': PRIM_INT64, + 'uint64_t': PRIM_UINT64, + 'intptr_t': PRIM_INTPTR, + 'uintptr_t': PRIM_UINTPTR, + 'ptrdiff_t': PRIM_PTRDIFF, + 'size_t': PRIM_SIZE, + 'ssize_t': PRIM_SSIZE, + 'int_least8_t': PRIM_INT_LEAST8, + 'uint_least8_t': PRIM_UINT_LEAST8, + 'int_least16_t': PRIM_INT_LEAST16, + 'uint_least16_t': PRIM_UINT_LEAST16, + 'int_least32_t': PRIM_INT_LEAST32, + 'uint_least32_t': PRIM_UINT_LEAST32, + 'int_least64_t': PRIM_INT_LEAST64, + 'uint_least64_t': PRIM_UINT_LEAST64, + 'int_fast8_t': PRIM_INT_FAST8, + 'uint_fast8_t': PRIM_UINT_FAST8, + 'int_fast16_t': PRIM_INT_FAST16, + 'uint_fast16_t': PRIM_UINT_FAST16, + 'int_fast32_t': PRIM_INT_FAST32, + 'uint_fast32_t': PRIM_UINT_FAST32, + 'int_fast64_t': PRIM_INT_FAST64, + 'uint_fast64_t': PRIM_UINT_FAST64, + 'intmax_t': PRIM_INTMAX, + 'uintmax_t': PRIM_UINTMAX, + } + +F_UNION = 0x01 +F_CHECK_FIELDS = 0x02 +F_PACKED = 0x04 +F_EXTERNAL = 0x08 +F_OPAQUE = 0x10 + +G_FLAGS = dict([('_CFFI_' + _key, globals()[_key]) + for _key in ['F_UNION', 'F_CHECK_FIELDS', 'F_PACKED', + 'F_EXTERNAL', 'F_OPAQUE']]) + +CLASS_NAME = {} +for _name, _value in list(globals().items()): + if _name.startswith('OP_') and isinstance(_value, int): + CLASS_NAME[_value] = _name[3:] diff --git a/server/www/packages/packages-linux/x64/cffi/commontypes.py b/server/www/packages/packages-linux/x64/cffi/commontypes.py new file mode 100644 index 0000000..8ec97c7 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/commontypes.py @@ -0,0 +1,80 @@ +import sys +from . import model +from .error import FFIError + + +COMMON_TYPES = {} + +try: + # fetch "bool" and all simple Windows types + from _cffi_backend import _get_common_types + _get_common_types(COMMON_TYPES) +except ImportError: + pass + +COMMON_TYPES['FILE'] = model.unknown_type('FILE', '_IO_FILE') +COMMON_TYPES['bool'] = '_Bool' # in case we got ImportError above + +for _type in model.PrimitiveType.ALL_PRIMITIVE_TYPES: + if _type.endswith('_t'): + COMMON_TYPES[_type] = _type +del _type + +_CACHE = {} + +def resolve_common_type(parser, commontype): + try: + return _CACHE[commontype] + except KeyError: + cdecl = COMMON_TYPES.get(commontype, commontype) + if not isinstance(cdecl, str): + result, quals = cdecl, 0 # cdecl is already a BaseType + elif cdecl in model.PrimitiveType.ALL_PRIMITIVE_TYPES: + result, quals = model.PrimitiveType(cdecl), 0 + elif cdecl == 'set-unicode-needed': + raise FFIError("The Windows type %r is only available after " + "you call ffi.set_unicode()" % (commontype,)) + else: + if commontype == cdecl: + raise FFIError( + "Unsupported type: %r. Please look at " + "http://cffi.readthedocs.io/en/latest/cdef.html#ffi-cdef-limitations " + "and file an issue if you think this type should really " + "be supported." % (commontype,)) + result, quals = parser.parse_type_and_quals(cdecl) # recursive + + assert isinstance(result, model.BaseTypeByIdentity) + _CACHE[commontype] = result, quals + return result, quals + + +# ____________________________________________________________ +# extra types for Windows (most of them are in commontypes.c) + + +def win_common_types(): + return { + "UNICODE_STRING": model.StructType( + "_UNICODE_STRING", + ["Length", + "MaximumLength", + "Buffer"], + [model.PrimitiveType("unsigned short"), + model.PrimitiveType("unsigned short"), + model.PointerType(model.PrimitiveType("wchar_t"))], + [-1, -1, -1]), + "PUNICODE_STRING": "UNICODE_STRING *", + "PCUNICODE_STRING": "const UNICODE_STRING *", + + "TBYTE": "set-unicode-needed", + "TCHAR": "set-unicode-needed", + "LPCTSTR": "set-unicode-needed", + "PCTSTR": "set-unicode-needed", + "LPTSTR": "set-unicode-needed", + "PTSTR": "set-unicode-needed", + "PTBYTE": "set-unicode-needed", + "PTCHAR": "set-unicode-needed", + } + +if sys.platform == 'win32': + COMMON_TYPES.update(win_common_types()) diff --git a/server/www/packages/packages-linux/x64/cffi/cparser.py b/server/www/packages/packages-linux/x64/cffi/cparser.py new file mode 100644 index 0000000..ea27c48 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/cparser.py @@ -0,0 +1,963 @@ +from . import model +from .commontypes import COMMON_TYPES, resolve_common_type +from .error import FFIError, CDefError +try: + from . import _pycparser as pycparser +except ImportError: + import pycparser +import weakref, re, sys + +try: + if sys.version_info < (3,): + import thread as _thread + else: + import _thread + lock = _thread.allocate_lock() +except ImportError: + lock = None + +def _workaround_for_static_import_finders(): + # Issue #392: packaging tools like cx_Freeze can not find these + # because pycparser uses exec dynamic import. This is an obscure + # workaround. This function is never called. + import pycparser.yacctab + import pycparser.lextab + +CDEF_SOURCE_STRING = "" +_r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$", + re.DOTALL | re.MULTILINE) +_r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)" + r"\b((?:[^\n\\]|\\.)*?)$", + re.DOTALL | re.MULTILINE) +_r_partial_enum = re.compile(r"=\s*\.\.\.\s*[,}]|\.\.\.\s*\}") +_r_enum_dotdotdot = re.compile(r"__dotdotdot\d+__$") +_r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") +_r_words = re.compile(r"\w+|\S") +_parser_cache = None +_r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) +_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") +_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") +_r_cdecl = re.compile(r"\b__cdecl\b") +_r_extern_python = re.compile(r'\bextern\s*"' + r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.') +_r_star_const_space = re.compile( # matches "* const " + r"[*]\s*((const|volatile|restrict)\b\s*)+") +_r_int_dotdotdot = re.compile(r"(\b(int|long|short|signed|unsigned|char)\s*)+" + r"\.\.\.") +_r_float_dotdotdot = re.compile(r"\b(double|float)\s*\.\.\.") + +def _get_parser(): + global _parser_cache + if _parser_cache is None: + _parser_cache = pycparser.CParser() + return _parser_cache + +def _workaround_for_old_pycparser(csource): + # Workaround for a pycparser issue (fixed between pycparser 2.10 and + # 2.14): "char*const***" gives us a wrong syntax tree, the same as + # for "char***(*const)". This means we can't tell the difference + # afterwards. But "char(*const(***))" gives us the right syntax + # tree. The issue only occurs if there are several stars in + # sequence with no parenthesis inbetween, just possibly qualifiers. + # Attempt to fix it by adding some parentheses in the source: each + # time we see "* const" or "* const *", we add an opening + # parenthesis before each star---the hard part is figuring out where + # to close them. + parts = [] + while True: + match = _r_star_const_space.search(csource) + if not match: + break + #print repr(''.join(parts)+csource), '=>', + parts.append(csource[:match.start()]) + parts.append('('); closing = ')' + parts.append(match.group()) # e.g. "* const " + endpos = match.end() + if csource.startswith('*', endpos): + parts.append('('); closing += ')' + level = 0 + i = endpos + while i < len(csource): + c = csource[i] + if c == '(': + level += 1 + elif c == ')': + if level == 0: + break + level -= 1 + elif c in ',;=': + if level == 0: + break + i += 1 + csource = csource[endpos:i] + closing + csource[i:] + #print repr(''.join(parts)+csource) + parts.append(csource) + return ''.join(parts) + +def _preprocess_extern_python(csource): + # input: `extern "Python" int foo(int);` or + # `extern "Python" { int foo(int); }` + # output: + # void __cffi_extern_python_start; + # int foo(int); + # void __cffi_extern_python_stop; + # + # input: `extern "Python+C" int foo(int);` + # output: + # void __cffi_extern_python_plus_c_start; + # int foo(int); + # void __cffi_extern_python_stop; + parts = [] + while True: + match = _r_extern_python.search(csource) + if not match: + break + endpos = match.end() - 1 + #print + #print ''.join(parts)+csource + #print '=>' + parts.append(csource[:match.start()]) + if 'C' in match.group(1): + parts.append('void __cffi_extern_python_plus_c_start; ') + else: + parts.append('void __cffi_extern_python_start; ') + if csource[endpos] == '{': + # grouping variant + closing = csource.find('}', endpos) + if closing < 0: + raise CDefError("'extern \"Python\" {': no '}' found") + if csource.find('{', endpos + 1, closing) >= 0: + raise NotImplementedError("cannot use { } inside a block " + "'extern \"Python\" { ... }'") + parts.append(csource[endpos+1:closing]) + csource = csource[closing+1:] + else: + # non-grouping variant + semicolon = csource.find(';', endpos) + if semicolon < 0: + raise CDefError("'extern \"Python\": no ';' found") + parts.append(csource[endpos:semicolon+1]) + csource = csource[semicolon+1:] + parts.append(' void __cffi_extern_python_stop;') + #print ''.join(parts)+csource + #print + parts.append(csource) + return ''.join(parts) + +def _warn_for_string_literal(csource): + if '"' not in csource: + return + for line in csource.splitlines(): + if '"' in line and not line.lstrip().startswith('#'): + import warnings + warnings.warn("String literal found in cdef() or type source. " + "String literals are ignored here, but you should " + "remove them anyway because some character sequences " + "confuse pre-parsing.") + break + +def _warn_for_non_extern_non_static_global_variable(decl): + if not decl.storage: + import warnings + warnings.warn("Global variable '%s' in cdef(): for consistency " + "with C it should have a storage class specifier " + "(usually 'extern')" % (decl.name,)) + +def _preprocess(csource): + # Remove comments. NOTE: this only work because the cdef() section + # should not contain any string literal! + csource = _r_comment.sub(' ', csource) + # Remove the "#define FOO x" lines + macros = {} + for match in _r_define.finditer(csource): + macroname, macrovalue = match.groups() + macrovalue = macrovalue.replace('\\\n', '').strip() + macros[macroname] = macrovalue + csource = _r_define.sub('', csource) + # + if pycparser.__version__ < '2.14': + csource = _workaround_for_old_pycparser(csource) + # + # BIG HACK: replace WINAPI or __stdcall with "volatile const". + # It doesn't make sense for the return type of a function to be + # "volatile volatile const", so we abuse it to detect __stdcall... + # Hack number 2 is that "int(volatile *fptr)();" is not valid C + # syntax, so we place the "volatile" before the opening parenthesis. + csource = _r_stdcall2.sub(' volatile volatile const(', csource) + csource = _r_stdcall1.sub(' volatile volatile const ', csource) + csource = _r_cdecl.sub(' ', csource) + # + # Replace `extern "Python"` with start/end markers + csource = _preprocess_extern_python(csource) + # + # Now there should not be any string literal left; warn if we get one + _warn_for_string_literal(csource) + # + # Replace "[...]" with "[__dotdotdotarray__]" + csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) + # + # Replace "...}" with "__dotdotdotNUM__}". This construction should + # occur only at the end of enums; at the end of structs we have "...;}" + # and at the end of vararg functions "...);". Also replace "=...[,}]" + # with ",__dotdotdotNUM__[,}]": this occurs in the enums too, when + # giving an unknown value. + matches = list(_r_partial_enum.finditer(csource)) + for number, match in enumerate(reversed(matches)): + p = match.start() + if csource[p] == '=': + p2 = csource.find('...', p, match.end()) + assert p2 > p + csource = '%s,__dotdotdot%d__ %s' % (csource[:p], number, + csource[p2+3:]) + else: + assert csource[p:p+3] == '...' + csource = '%s __dotdotdot%d__ %s' % (csource[:p], number, + csource[p+3:]) + # Replace "int ..." or "unsigned long int..." with "__dotdotdotint__" + csource = _r_int_dotdotdot.sub(' __dotdotdotint__ ', csource) + # Replace "float ..." or "double..." with "__dotdotdotfloat__" + csource = _r_float_dotdotdot.sub(' __dotdotdotfloat__ ', csource) + # Replace all remaining "..." with the same name, "__dotdotdot__", + # which is declared with a typedef for the purpose of C parsing. + return csource.replace('...', ' __dotdotdot__ '), macros + +def _common_type_names(csource): + # Look in the source for what looks like usages of types from the + # list of common types. A "usage" is approximated here as the + # appearance of the word, minus a "definition" of the type, which + # is the last word in a "typedef" statement. Approximative only + # but should be fine for all the common types. + look_for_words = set(COMMON_TYPES) + look_for_words.add(';') + look_for_words.add(',') + look_for_words.add('(') + look_for_words.add(')') + look_for_words.add('typedef') + words_used = set() + is_typedef = False + paren = 0 + previous_word = '' + for word in _r_words.findall(csource): + if word in look_for_words: + if word == ';': + if is_typedef: + words_used.discard(previous_word) + look_for_words.discard(previous_word) + is_typedef = False + elif word == 'typedef': + is_typedef = True + paren = 0 + elif word == '(': + paren += 1 + elif word == ')': + paren -= 1 + elif word == ',': + if is_typedef and paren == 0: + words_used.discard(previous_word) + look_for_words.discard(previous_word) + else: # word in COMMON_TYPES + words_used.add(word) + previous_word = word + return words_used + + +class Parser(object): + + def __init__(self): + self._declarations = {} + self._included_declarations = set() + self._anonymous_counter = 0 + self._structnode2type = weakref.WeakKeyDictionary() + self._options = {} + self._int_constants = {} + self._recomplete = [] + self._uses_new_feature = None + + def _parse(self, csource): + csource, macros = _preprocess(csource) + # XXX: for more efficiency we would need to poke into the + # internals of CParser... the following registers the + # typedefs, because their presence or absence influences the + # parsing itself (but what they are typedef'ed to plays no role) + ctn = _common_type_names(csource) + typenames = [] + for name in sorted(self._declarations): + if name.startswith('typedef '): + name = name[8:] + typenames.append(name) + ctn.discard(name) + typenames += sorted(ctn) + # + csourcelines = [] + csourcelines.append('# 1 ""') + for typename in typenames: + csourcelines.append('typedef int %s;' % typename) + csourcelines.append('typedef int __dotdotdotint__, __dotdotdotfloat__,' + ' __dotdotdot__;') + # this forces pycparser to consider the following in the file + # called from line 1 + csourcelines.append('# 1 "%s"' % (CDEF_SOURCE_STRING,)) + csourcelines.append(csource) + fullcsource = '\n'.join(csourcelines) + if lock is not None: + lock.acquire() # pycparser is not thread-safe... + try: + ast = _get_parser().parse(fullcsource) + except pycparser.c_parser.ParseError as e: + self.convert_pycparser_error(e, csource) + finally: + if lock is not None: + lock.release() + # csource will be used to find buggy source text + return ast, macros, csource + + def _convert_pycparser_error(self, e, csource): + # xxx look for ":NUM:" at the start of str(e) + # and interpret that as a line number. This will not work if + # the user gives explicit ``# NUM "FILE"`` directives. + line = None + msg = str(e) + match = re.match(r"%s:(\d+):" % (CDEF_SOURCE_STRING,), msg) + if match: + linenum = int(match.group(1), 10) + csourcelines = csource.splitlines() + if 1 <= linenum <= len(csourcelines): + line = csourcelines[linenum-1] + return line + + def convert_pycparser_error(self, e, csource): + line = self._convert_pycparser_error(e, csource) + + msg = str(e) + if line: + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) + else: + msg = 'parse error\n%s' % (msg,) + raise CDefError(msg) + + def parse(self, csource, override=False, packed=False, pack=None, + dllexport=False): + if packed: + if packed != True: + raise ValueError("'packed' should be False or True; use " + "'pack' to give another value") + if pack: + raise ValueError("cannot give both 'pack' and 'packed'") + pack = 1 + elif pack: + if pack & (pack - 1): + raise ValueError("'pack' must be a power of two, not %r" % + (pack,)) + else: + pack = 0 + prev_options = self._options + try: + self._options = {'override': override, + 'packed': pack, + 'dllexport': dllexport} + self._internal_parse(csource) + finally: + self._options = prev_options + + def _internal_parse(self, csource): + ast, macros, csource = self._parse(csource) + # add the macros + self._process_macros(macros) + # find the first "__dotdotdot__" and use that as a separator + # between the repeated typedefs and the real csource + iterator = iter(ast.ext) + for decl in iterator: + if decl.name == '__dotdotdot__': + break + else: + assert 0 + current_decl = None + # + try: + self._inside_extern_python = '__cffi_extern_python_stop' + for decl in iterator: + current_decl = decl + if isinstance(decl, pycparser.c_ast.Decl): + self._parse_decl(decl) + elif isinstance(decl, pycparser.c_ast.Typedef): + if not decl.name: + raise CDefError("typedef does not declare any name", + decl) + quals = 0 + if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and + decl.type.type.names[-1].startswith('__dotdotdot')): + realtype = self._get_unknown_type(decl) + elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and + isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and + isinstance(decl.type.type.type, + pycparser.c_ast.IdentifierType) and + decl.type.type.type.names[-1].startswith('__dotdotdot')): + realtype = self._get_unknown_ptr_type(decl) + else: + realtype, quals = self._get_type_and_quals( + decl.type, name=decl.name, partial_length_ok=True) + self._declare('typedef ' + decl.name, realtype, quals=quals) + elif decl.__class__.__name__ == 'Pragma': + pass # skip pragma, only in pycparser 2.15 + else: + raise CDefError("unexpected <%s>: this construct is valid " + "C but not valid in cdef()" % + decl.__class__.__name__, decl) + except CDefError as e: + if len(e.args) == 1: + e.args = e.args + (current_decl,) + raise + except FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + e.args = (e.args[0] + "\n *** Err: %s" % msg,) + raise + + def _add_constants(self, key, val): + if key in self._int_constants: + if self._int_constants[key] == val: + return # ignore identical double declarations + raise FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + + def _add_integer_constant(self, name, int_str): + int_str = int_str.lower().rstrip("ul") + neg = int_str.startswith('-') + if neg: + int_str = int_str[1:] + # "010" is not valid oct in py3 + if (int_str.startswith("0") and int_str != '0' + and not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + pyvalue = int(int_str, 0) + if neg: + pyvalue = -pyvalue + self._add_constants(name, pyvalue) + self._declare('macro ' + name, pyvalue) + + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + if _r_int_literal.match(value): + self._add_integer_constant(key, value) + elif value == '...': + self._declare('macro ' + key, value) + else: + raise CDefError( + 'only supports one of the following syntax:\n' + ' #define %s ... (literally dot-dot-dot)\n' + ' #define %s NUMBER (with NUMBER an integer' + ' constant, decimal/hex/octal)\n' + 'got:\n' + ' #define %s %s' + % (key, key, key, value)) + + def _declare_function(self, tp, quals, decl): + tp = self._get_type_pointer(tp, quals) + if self._options.get('dllexport'): + tag = 'dllexport_python ' + elif self._inside_extern_python == '__cffi_extern_python_start': + tag = 'extern_python ' + elif self._inside_extern_python == '__cffi_extern_python_plus_c_start': + tag = 'extern_python_plus_c ' + else: + tag = 'function ' + self._declare(tag + decl.name, tp) + + def _parse_decl(self, decl): + node = decl.type + if isinstance(node, pycparser.c_ast.FuncDecl): + tp, quals = self._get_type_and_quals(node, name=decl.name) + assert isinstance(tp, model.RawFunctionType) + self._declare_function(tp, quals, decl) + else: + if isinstance(node, pycparser.c_ast.Struct): + self._get_struct_union_enum_type('struct', node) + elif isinstance(node, pycparser.c_ast.Union): + self._get_struct_union_enum_type('union', node) + elif isinstance(node, pycparser.c_ast.Enum): + self._get_struct_union_enum_type('enum', node) + elif not decl.name: + raise CDefError("construct does not declare any variable", + decl) + # + if decl.name: + tp, quals = self._get_type_and_quals(node, + partial_length_ok=True) + if tp.is_raw_function: + self._declare_function(tp, quals, decl) + elif (tp.is_integer_type() and + hasattr(decl, 'init') and + hasattr(decl.init, 'value') and + _r_int_literal.match(decl.init.value)): + self._add_integer_constant(decl.name, decl.init.value) + elif (tp.is_integer_type() and + isinstance(decl.init, pycparser.c_ast.UnaryOp) and + decl.init.op == '-' and + hasattr(decl.init.expr, 'value') and + _r_int_literal.match(decl.init.expr.value)): + self._add_integer_constant(decl.name, + '-' + decl.init.expr.value) + elif (tp is model.void_type and + decl.name.startswith('__cffi_extern_python_')): + # hack: `extern "Python"` in the C source is replaced + # with "void __cffi_extern_python_start;" and + # "void __cffi_extern_python_stop;" + self._inside_extern_python = decl.name + else: + if self._inside_extern_python !='__cffi_extern_python_stop': + raise CDefError( + "cannot declare constants or " + "variables with 'extern \"Python\"'") + if (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) + else: + _warn_for_non_extern_non_static_global_variable(decl) + self._declare('variable ' + decl.name, tp, quals=quals) + + def parse_type(self, cdecl): + return self.parse_type_and_quals(cdecl)[0] + + def parse_type_and_quals(self, cdecl): + ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] + assert not macros + exprnode = ast.ext[-1].type.args.params[0] + if isinstance(exprnode, pycparser.c_ast.ID): + raise CDefError("unknown identifier '%s'" % (exprnode.name,)) + return self._get_type_and_quals(exprnode.type) + + def _declare(self, name, obj, included=False, quals=0): + if name in self._declarations: + prevobj, prevquals = self._declarations[name] + if prevobj is obj and prevquals == quals: + return + if not self._options.get('override'): + raise FFIError( + "multiple declarations of %s (for interactive usage, " + "try cdef(xx, override=True))" % (name,)) + assert '__dotdotdot__' not in name.split() + self._declarations[name] = (obj, quals) + if included: + self._included_declarations.add(obj) + + def _extract_quals(self, type): + quals = 0 + if isinstance(type, (pycparser.c_ast.TypeDecl, + pycparser.c_ast.PtrDecl)): + if 'const' in type.quals: + quals |= model.Q_CONST + if 'volatile' in type.quals: + quals |= model.Q_VOLATILE + if 'restrict' in type.quals: + quals |= model.Q_RESTRICT + return quals + + def _get_type_pointer(self, type, quals, declname=None): + if isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + if (isinstance(type, model.StructOrUnionOrEnum) and + type.name.startswith('$') and type.name[1:].isdigit() and + type.forcename is None and declname is not None): + return model.NamedPointerType(type, declname, quals) + return model.PointerType(type, quals) + + def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False): + # first, dereference typedefs, if we have it already parsed, we're good + if (isinstance(typenode, pycparser.c_ast.TypeDecl) and + isinstance(typenode.type, pycparser.c_ast.IdentifierType) and + len(typenode.type.names) == 1 and + ('typedef ' + typenode.type.names[0]) in self._declarations): + tp, quals = self._declarations['typedef ' + typenode.type.names[0]] + quals |= self._extract_quals(typenode) + return tp, quals + # + if isinstance(typenode, pycparser.c_ast.ArrayDecl): + # array type + if typenode.dim is None: + length = None + else: + length = self._parse_constant( + typenode.dim, partial_length_ok=partial_length_ok) + tp, quals = self._get_type_and_quals(typenode.type, + partial_length_ok=partial_length_ok) + return model.ArrayType(tp, length), quals + # + if isinstance(typenode, pycparser.c_ast.PtrDecl): + # pointer type + itemtype, itemquals = self._get_type_and_quals(typenode.type) + tp = self._get_type_pointer(itemtype, itemquals, declname=name) + quals = self._extract_quals(typenode) + return tp, quals + # + if isinstance(typenode, pycparser.c_ast.TypeDecl): + quals = self._extract_quals(typenode) + type = typenode.type + if isinstance(type, pycparser.c_ast.IdentifierType): + # assume a primitive type. get it from .names, but reduce + # synonyms to a single chosen combination + names = list(type.names) + if names != ['signed', 'char']: # keep this unmodified + prefixes = {} + while names: + name = names[0] + if name in ('short', 'long', 'signed', 'unsigned'): + prefixes[name] = prefixes.get(name, 0) + 1 + del names[0] + else: + break + # ignore the 'signed' prefix below, and reorder the others + newnames = [] + for prefix in ('unsigned', 'short', 'long'): + for i in range(prefixes.get(prefix, 0)): + newnames.append(prefix) + if not names: + names = ['int'] # implicitly + if names == ['int']: # but kill it if 'short' or 'long' + if 'short' in prefixes or 'long' in prefixes: + names = [] + names = newnames + names + ident = ' '.join(names) + if ident == 'void': + return model.void_type, quals + if ident == '__dotdotdot__': + raise FFIError(':%d: bad usage of "..."' % + typenode.coord.line) + tp0, quals0 = resolve_common_type(self, ident) + return tp0, (quals | quals0) + # + if isinstance(type, pycparser.c_ast.Struct): + # 'struct foobar' + tp = self._get_struct_union_enum_type('struct', type, name) + return tp, quals + # + if isinstance(type, pycparser.c_ast.Union): + # 'union foobar' + tp = self._get_struct_union_enum_type('union', type, name) + return tp, quals + # + if isinstance(type, pycparser.c_ast.Enum): + # 'enum foobar' + tp = self._get_struct_union_enum_type('enum', type, name) + return tp, quals + # + if isinstance(typenode, pycparser.c_ast.FuncDecl): + # a function type + return self._parse_function_type(typenode, name), 0 + # + # nested anonymous structs or unions end up here + if isinstance(typenode, pycparser.c_ast.Struct): + return self._get_struct_union_enum_type('struct', typenode, name, + nested=True), 0 + if isinstance(typenode, pycparser.c_ast.Union): + return self._get_struct_union_enum_type('union', typenode, name, + nested=True), 0 + # + raise FFIError(":%d: bad or unsupported type declaration" % + typenode.coord.line) + + def _parse_function_type(self, typenode, funcname=None): + params = list(getattr(typenode.args, 'params', [])) + for i, arg in enumerate(params): + if not hasattr(arg, 'type'): + raise CDefError("%s arg %d: unknown type '%s'" + " (if you meant to use the old C syntax of giving" + " untyped arguments, it is not supported)" + % (funcname or 'in expression', i + 1, + getattr(arg, 'name', '?'))) + ellipsis = ( + len(params) > 0 and + isinstance(params[-1].type, pycparser.c_ast.TypeDecl) and + isinstance(params[-1].type.type, + pycparser.c_ast.IdentifierType) and + params[-1].type.type.names == ['__dotdotdot__']) + if ellipsis: + params.pop() + if not params: + raise CDefError( + "%s: a function with only '(...)' as argument" + " is not correct C" % (funcname or 'in expression')) + args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) + for argdeclnode in params] + if not ellipsis and args == [model.void_type]: + args = [] + result, quals = self._get_type_and_quals(typenode.type) + # the 'quals' on the result type are ignored. HACK: we absure them + # to detect __stdcall functions: we textually replace "__stdcall" + # with "volatile volatile const" above. + abi = None + if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway + if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: + abi = '__stdcall' + return model.RawFunctionType(tuple(args), result, ellipsis, abi) + + def _as_func_arg(self, type, quals): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item, quals) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): + # First, a level of caching on the exact 'type' node of the AST. + # This is obscure, but needed because pycparser "unrolls" declarations + # such as "typedef struct { } foo_t, *foo_p" and we end up with + # an AST that is not a tree, but a DAG, with the "type" node of the + # two branches foo_t and foo_p of the trees being the same node. + # It's a bit silly but detecting "DAG-ness" in the AST tree seems + # to be the only way to distinguish this case from two independent + # structs. See test_struct_with_two_usages. + try: + return self._structnode2type[type] + except KeyError: + pass + # + # Note that this must handle parsing "struct foo" any number of + # times and always return the same StructType object. Additionally, + # one of these times (not necessarily the first), the fields of + # the struct can be specified with "struct foo { ...fields... }". + # If no name is given, then we have to create a new anonymous struct + # with no caching; in this case, the fields are either specified + # right now or never. + # + force_name = name + name = type.name + # + # get the type or create it if needed + if name is None: + # 'force_name' is used to guess a more readable name for + # anonymous structs, for the common case "typedef struct { } foo". + if force_name is not None: + explicit_name = '$%s' % force_name + else: + self._anonymous_counter += 1 + explicit_name = '$%d' % self._anonymous_counter + tp = None + else: + explicit_name = name + key = '%s %s' % (kind, name) + tp, _ = self._declarations.get(key, (None, None)) + # + if tp is None: + if kind == 'struct': + tp = model.StructType(explicit_name, None, None, None) + elif kind == 'union': + tp = model.UnionType(explicit_name, None, None, None) + elif kind == 'enum': + if explicit_name == '__dotdotdot__': + raise CDefError("Enums cannot be declared with ...") + tp = self._build_enum_type(explicit_name, type.values) + else: + raise AssertionError("kind = %r" % (kind,)) + if name is not None: + self._declare(key, tp) + else: + if kind == 'enum' and type.values is not None: + raise NotImplementedError( + "enum %s: the '{}' declaration should appear on the first " + "time the enum is mentioned, not later" % explicit_name) + if not tp.forcename: + tp.force_the_name(force_name) + if tp.forcename and '$' in tp.name: + self._declare('anonymous %s' % tp.forcename, tp) + # + self._structnode2type[type] = tp + # + # enums: done here + if kind == 'enum': + return tp + # + # is there a 'type.decls'? If yes, then this is the place in the + # C sources that declare the fields. If no, then just return the + # existing type, possibly still incomplete. + if type.decls is None: + return tp + # + if tp.fldnames is not None: + raise CDefError("duplicate declaration of struct %s" % name) + fldnames = [] + fldtypes = [] + fldbitsize = [] + fldquals = [] + for decl in type.decls: + if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and + ''.join(decl.type.names) == '__dotdotdot__'): + # XXX pycparser is inconsistent: 'names' should be a list + # of strings, but is sometimes just one string. Use + # str.join() as a way to cope with both. + self._make_partial(tp, nested) + continue + if decl.bitsize is None: + bitsize = -1 + else: + bitsize = self._parse_constant(decl.bitsize) + self._partial_length = False + type, fqual = self._get_type_and_quals(decl.type, + partial_length_ok=True) + if self._partial_length: + self._make_partial(tp, nested) + if isinstance(type, model.StructType) and type.partial: + self._make_partial(tp, nested) + fldnames.append(decl.name or '') + fldtypes.append(type) + fldbitsize.append(bitsize) + fldquals.append(fqual) + tp.fldnames = tuple(fldnames) + tp.fldtypes = tuple(fldtypes) + tp.fldbitsize = tuple(fldbitsize) + tp.fldquals = tuple(fldquals) + if fldbitsize != [-1] * len(fldbitsize): + if isinstance(tp, model.StructType) and tp.partial: + raise NotImplementedError("%s: using both bitfields and '...;'" + % (tp,)) + tp.packed = self._options.get('packed') + if tp.completed: # must be re-completed: it is not opaque any more + tp.completed = 0 + self._recomplete.append(tp) + return tp + + def _make_partial(self, tp, nested): + if not isinstance(tp, model.StructOrUnion): + raise CDefError("%s cannot be partial" % (tp,)) + if not tp.has_c_name() and not nested: + raise NotImplementedError("%s is partial but has no C name" %(tp,)) + tp.partial = True + + def _parse_constant(self, exprnode, partial_length_ok=False): + # for now, limited to expressions that are an immediate number + # or positive/negative number + if isinstance(exprnode, pycparser.c_ast.Constant): + s = exprnode.value + if '0' <= s[0] <= '9': + s = s.rstrip('uUlL') + try: + if s.startswith('0'): + return int(s, 8) + else: + return int(s, 10) + except ValueError: + if len(s) > 1: + if s.lower()[0:2] == '0x': + return int(s, 16) + elif s.lower()[0:2] == '0b': + return int(s, 2) + raise CDefError("invalid constant %r" % (s,)) + elif s[0] == "'" and s[-1] == "'" and ( + len(s) == 3 or (len(s) == 4 and s[1] == "\\")): + return ord(s[-2]) + else: + raise CDefError("invalid constant %r" % (s,)) + # + if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and + exprnode.op == '+'): + return self._parse_constant(exprnode.expr) + # + if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and + exprnode.op == '-'): + return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] + # + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name == '__dotdotdotarray__'): + if partial_length_ok: + self._partial_length = True + return '...' + raise FFIError(":%d: unsupported '[...]' here, cannot derive " + "the actual array length in this context" + % exprnode.coord.line) + # + if isinstance(exprnode, pycparser.c_ast.BinaryOp): + left = self._parse_constant(exprnode.left) + right = self._parse_constant(exprnode.right) + if exprnode.op == '+': + return left + right + elif exprnode.op == '-': + return left - right + elif exprnode.op == '*': + return left * right + elif exprnode.op == '/': + return self._c_div(left, right) + elif exprnode.op == '%': + return left - self._c_div(left, right) * right + elif exprnode.op == '<<': + return left << right + elif exprnode.op == '>>': + return left >> right + elif exprnode.op == '&': + return left & right + elif exprnode.op == '|': + return left | right + elif exprnode.op == '^': + return left ^ right + # + raise FFIError(":%d: unsupported expression: expected a " + "simple numeric constant" % exprnode.coord.line) + + def _c_div(self, a, b): + result = a // b + if ((a < 0) ^ (b < 0)) and (a % b) != 0: + result += 1 + return result + + def _build_enum_type(self, explicit_name, decls): + if decls is not None: + partial = False + enumerators = [] + enumvalues = [] + nextenumvalue = 0 + for enum in decls.enumerators: + if _r_enum_dotdotdot.match(enum.name): + partial = True + continue + if enum.value is not None: + nextenumvalue = self._parse_constant(enum.value) + enumerators.append(enum.name) + enumvalues.append(nextenumvalue) + self._add_constants(enum.name, nextenumvalue) + nextenumvalue += 1 + enumerators = tuple(enumerators) + enumvalues = tuple(enumvalues) + tp = model.EnumType(explicit_name, enumerators, enumvalues) + tp.partial = partial + else: # opaque enum + tp = model.EnumType(explicit_name, (), ()) + return tp + + def include(self, other): + for name, (tp, quals) in other._declarations.items(): + if name.startswith('anonymous $enum_$'): + continue # fix for test_anonymous_enum_include + kind = name.split(' ', 1)[0] + if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): + self._declare(name, tp, included=True, quals=quals) + for k, v in other._int_constants.items(): + self._add_constants(k, v) + + def _get_unknown_type(self, decl): + typenames = decl.type.type.names + if typenames == ['__dotdotdot__']: + return model.unknown_type(decl.name) + + if typenames == ['__dotdotdotint__']: + if self._uses_new_feature is None: + self._uses_new_feature = "'typedef int... %s'" % decl.name + return model.UnknownIntegerType(decl.name) + + if typenames == ['__dotdotdotfloat__']: + # note: not for 'long double' so far + if self._uses_new_feature is None: + self._uses_new_feature = "'typedef float... %s'" % decl.name + return model.UnknownFloatType(decl.name) + + raise FFIError(':%d: unsupported usage of "..." in typedef' + % decl.coord.line) + + def _get_unknown_ptr_type(self, decl): + if decl.type.type.type.names == ['__dotdotdot__']: + return model.unknown_ptr_type(decl.name) + raise FFIError(':%d: unsupported usage of "..." in typedef' + % decl.coord.line) diff --git a/server/www/packages/packages-linux/x64/cffi/error.py b/server/www/packages/packages-linux/x64/cffi/error.py new file mode 100644 index 0000000..0a27247 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/error.py @@ -0,0 +1,31 @@ + +class FFIError(Exception): + __module__ = 'cffi' + +class CDefError(Exception): + __module__ = 'cffi' + def __str__(self): + try: + current_decl = self.args[1] + filename = current_decl.coord.file + linenum = current_decl.coord.line + prefix = '%s:%d: ' % (filename, linenum) + except (AttributeError, TypeError, IndexError): + prefix = '' + return '%s%s' % (prefix, self.args[0]) + +class VerificationError(Exception): + """ An error raised when verification fails + """ + __module__ = 'cffi' + +class VerificationMissing(Exception): + """ An error raised when incomplete structures are passed into + cdef, but no verification has been done + """ + __module__ = 'cffi' + +class PkgConfigError(Exception): + """ An error raised for missing modules in pkg-config + """ + __module__ = 'cffi' diff --git a/server/www/packages/packages-linux/x64/cffi/ffiplatform.py b/server/www/packages/packages-linux/x64/cffi/ffiplatform.py new file mode 100644 index 0000000..8531346 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/ffiplatform.py @@ -0,0 +1,127 @@ +import sys, os +from .error import VerificationError + + +LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', + 'extra_objects', 'depends'] + +def get_extension(srcfilename, modname, sources=(), **kwds): + _hack_at_distutils() + from distutils.core import Extension + allsources = [srcfilename] + for src in sources: + allsources.append(os.path.normpath(src)) + return Extension(name=modname, sources=allsources, **kwds) + +def compile(tmpdir, ext, compiler_verbose=0, debug=None): + """Compile a C extension module using distutils.""" + + _hack_at_distutils() + saved_environ = os.environ.copy() + try: + outputfilename = _build(tmpdir, ext, compiler_verbose, debug) + outputfilename = os.path.abspath(outputfilename) + finally: + # workaround for a distutils bugs where some env vars can + # become longer and longer every time it is used + for key, value in saved_environ.items(): + if os.environ.get(key) != value: + os.environ[key] = value + return outputfilename + +def _build(tmpdir, ext, compiler_verbose=0, debug=None): + # XXX compact but horrible :-( + from distutils.core import Distribution + import distutils.errors, distutils.log + # + dist = Distribution({'ext_modules': [ext]}) + dist.parse_config_files() + options = dist.get_option_dict('build_ext') + if debug is None: + debug = sys.flags.debug + options['debug'] = ('ffiplatform', debug) + options['force'] = ('ffiplatform', True) + options['build_lib'] = ('ffiplatform', tmpdir) + options['build_temp'] = ('ffiplatform', tmpdir) + # + try: + old_level = distutils.log.set_threshold(0) or 0 + try: + distutils.log.set_verbosity(compiler_verbose) + dist.run_command('build_ext') + cmd_obj = dist.get_command_obj('build_ext') + [soname] = cmd_obj.get_outputs() + finally: + distutils.log.set_threshold(old_level) + except (distutils.errors.CompileError, + distutils.errors.LinkError) as e: + raise VerificationError('%s: %s' % (e.__class__.__name__, e)) + # + return soname + +try: + from os.path import samefile +except ImportError: + def samefile(f1, f2): + return os.path.abspath(f1) == os.path.abspath(f2) + +def maybe_relative_path(path): + if not os.path.isabs(path): + return path # already relative + dir = path + names = [] + while True: + prevdir = dir + dir, name = os.path.split(prevdir) + if dir == prevdir or not dir: + return path # failed to make it relative + names.append(name) + try: + if samefile(dir, os.curdir): + names.reverse() + return os.path.join(*names) + except OSError: + pass + +# ____________________________________________________________ + +try: + int_or_long = (int, long) + import cStringIO +except NameError: + int_or_long = int # Python 3 + import io as cStringIO + +def _flatten(x, f): + if isinstance(x, str): + f.write('%ds%s' % (len(x), x)) + elif isinstance(x, dict): + keys = sorted(x.keys()) + f.write('%dd' % len(keys)) + for key in keys: + _flatten(key, f) + _flatten(x[key], f) + elif isinstance(x, (list, tuple)): + f.write('%dl' % len(x)) + for value in x: + _flatten(value, f) + elif isinstance(x, int_or_long): + f.write('%di' % (x,)) + else: + raise TypeError( + "the keywords to verify() contains unsupported object %r" % (x,)) + +def flatten(x): + f = cStringIO.StringIO() + _flatten(x, f) + return f.getvalue() + +def _hack_at_distutils(): + # Windows-only workaround for some configurations: see + # https://bugs.python.org/issue23246 (Python 2.7 with + # a specific MS compiler suite download) + if sys.platform == "win32": + try: + import setuptools # for side-effects, patches distutils + except ImportError: + pass diff --git a/server/www/packages/packages-linux/x64/cffi/lock.py b/server/www/packages/packages-linux/x64/cffi/lock.py new file mode 100644 index 0000000..db91b71 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/server/www/packages/packages-linux/x64/cffi/model.py b/server/www/packages/packages-linux/x64/cffi/model.py new file mode 100644 index 0000000..5f1b0d2 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/model.py @@ -0,0 +1,614 @@ +import types +import weakref + +from .lock import allocate_lock +from .error import CDefError, VerificationError, VerificationMissing + +# type qualifiers +Q_CONST = 0x01 +Q_RESTRICT = 0x02 +Q_VOLATILE = 0x04 + +def qualify(quals, replace_with): + if quals & Q_CONST: + replace_with = ' const ' + replace_with.lstrip() + if quals & Q_VOLATILE: + replace_with = ' volatile ' + replace_with.lstrip() + if quals & Q_RESTRICT: + # It seems that __restrict is supported by gcc and msvc. + # If you hit some different compiler, add a #define in + # _cffi_include.h for it (and in its copies, documented there) + replace_with = ' __restrict ' + replace_with.lstrip() + return replace_with + + +class BaseTypeByIdentity(object): + is_array_type = False + is_raw_function = False + + def get_c_name(self, replace_with='', context='a C file', quals=0): + result = self.c_name_with_marker + assert result.count('&') == 1 + # some logic duplication with ffi.getctype()... :-( + replace_with = replace_with.strip() + if replace_with: + if replace_with.startswith('*') and '&[' in result: + replace_with = '(%s)' % replace_with + elif not replace_with[0] in '[(': + replace_with = ' ' + replace_with + replace_with = qualify(quals, replace_with) + result = result.replace('&', replace_with) + if '$' in result: + raise VerificationError( + "cannot generate '%s' in %s: unknown type name" + % (self._get_c_name(), context)) + return result + + def _get_c_name(self): + return self.c_name_with_marker.replace('&', '') + + def has_c_name(self): + return '$' not in self._get_c_name() + + def is_integer_type(self): + return False + + def get_cached_btype(self, ffi, finishlist, can_delay=False): + try: + BType = ffi._cached_btypes[self] + except KeyError: + BType = self.build_backend_type(ffi, finishlist) + BType2 = ffi._cached_btypes.setdefault(self, BType) + assert BType2 is BType + return BType + + def __repr__(self): + return '<%s>' % (self._get_c_name(),) + + def _get_items(self): + return [(name, getattr(self, name)) for name in self._attrs_] + + +class BaseType(BaseTypeByIdentity): + + def __eq__(self, other): + return (self.__class__ == other.__class__ and + self._get_items() == other._get_items()) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.__class__, tuple(self._get_items()))) + + +class VoidType(BaseType): + _attrs_ = () + + def __init__(self): + self.c_name_with_marker = 'void&' + + def build_backend_type(self, ffi, finishlist): + return global_cache(self, ffi, 'new_void_type') + +void_type = VoidType() + + +class BasePrimitiveType(BaseType): + def is_complex_type(self): + return False + + +class PrimitiveType(BasePrimitiveType): + _attrs_ = ('name',) + + ALL_PRIMITIVE_TYPES = { + 'char': 'c', + 'short': 'i', + 'int': 'i', + 'long': 'i', + 'long long': 'i', + 'signed char': 'i', + 'unsigned char': 'i', + 'unsigned short': 'i', + 'unsigned int': 'i', + 'unsigned long': 'i', + 'unsigned long long': 'i', + 'float': 'f', + 'double': 'f', + 'long double': 'f', + 'float _Complex': 'j', + 'double _Complex': 'j', + '_Bool': 'i', + # the following types are not primitive in the C sense + 'wchar_t': 'c', + 'char16_t': 'c', + 'char32_t': 'c', + 'int8_t': 'i', + 'uint8_t': 'i', + 'int16_t': 'i', + 'uint16_t': 'i', + 'int32_t': 'i', + 'uint32_t': 'i', + 'int64_t': 'i', + 'uint64_t': 'i', + 'int_least8_t': 'i', + 'uint_least8_t': 'i', + 'int_least16_t': 'i', + 'uint_least16_t': 'i', + 'int_least32_t': 'i', + 'uint_least32_t': 'i', + 'int_least64_t': 'i', + 'uint_least64_t': 'i', + 'int_fast8_t': 'i', + 'uint_fast8_t': 'i', + 'int_fast16_t': 'i', + 'uint_fast16_t': 'i', + 'int_fast32_t': 'i', + 'uint_fast32_t': 'i', + 'int_fast64_t': 'i', + 'uint_fast64_t': 'i', + 'intptr_t': 'i', + 'uintptr_t': 'i', + 'intmax_t': 'i', + 'uintmax_t': 'i', + 'ptrdiff_t': 'i', + 'size_t': 'i', + 'ssize_t': 'i', + } + + def __init__(self, name): + assert name in self.ALL_PRIMITIVE_TYPES + self.name = name + self.c_name_with_marker = name + '&' + + def is_char_type(self): + return self.ALL_PRIMITIVE_TYPES[self.name] == 'c' + def is_integer_type(self): + return self.ALL_PRIMITIVE_TYPES[self.name] == 'i' + def is_float_type(self): + return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' + def is_complex_type(self): + return self.ALL_PRIMITIVE_TYPES[self.name] == 'j' + + def build_backend_type(self, ffi, finishlist): + return global_cache(self, ffi, 'new_primitive_type', self.name) + + +class UnknownIntegerType(BasePrimitiveType): + _attrs_ = ('name',) + + def __init__(self, name): + self.name = name + self.c_name_with_marker = name + '&' + + def is_integer_type(self): + return True + + def build_backend_type(self, ffi, finishlist): + raise NotImplementedError("integer type '%s' can only be used after " + "compilation" % self.name) + +class UnknownFloatType(BasePrimitiveType): + _attrs_ = ('name', ) + + def __init__(self, name): + self.name = name + self.c_name_with_marker = name + '&' + + def build_backend_type(self, ffi, finishlist): + raise NotImplementedError("float type '%s' can only be used after " + "compilation" % self.name) + + +class BaseFunctionType(BaseType): + _attrs_ = ('args', 'result', 'ellipsis', 'abi') + + def __init__(self, args, result, ellipsis, abi=None): + self.args = args + self.result = result + self.ellipsis = ellipsis + self.abi = abi + # + reprargs = [arg._get_c_name() for arg in self.args] + if self.ellipsis: + reprargs.append('...') + reprargs = reprargs or ['void'] + replace_with = self._base_pattern % (', '.join(reprargs),) + if abi is not None: + replace_with = replace_with[:1] + abi + ' ' + replace_with[1:] + self.c_name_with_marker = ( + self.result.c_name_with_marker.replace('&', replace_with)) + + +class RawFunctionType(BaseFunctionType): + # Corresponds to a C type like 'int(int)', which is the C type of + # a function, but not a pointer-to-function. The backend has no + # notion of such a type; it's used temporarily by parsing. + _base_pattern = '(&)(%s)' + is_raw_function = True + + def build_backend_type(self, ffi, finishlist): + raise CDefError("cannot render the type %r: it is a function " + "type, not a pointer-to-function type" % (self,)) + + def as_function_pointer(self): + return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi) + + +class FunctionPtrType(BaseFunctionType): + _base_pattern = '(*&)(%s)' + + def build_backend_type(self, ffi, finishlist): + result = self.result.get_cached_btype(ffi, finishlist) + args = [] + for tp in self.args: + args.append(tp.get_cached_btype(ffi, finishlist)) + abi_args = () + if self.abi == "__stdcall": + if not self.ellipsis: # __stdcall ignored for variadic funcs + try: + abi_args = (ffi._backend.FFI_STDCALL,) + except AttributeError: + pass + return global_cache(self, ffi, 'new_function_type', + tuple(args), result, self.ellipsis, *abi_args) + + def as_raw_function(self): + return RawFunctionType(self.args, self.result, self.ellipsis, self.abi) + + +class PointerType(BaseType): + _attrs_ = ('totype', 'quals') + + def __init__(self, totype, quals=0): + self.totype = totype + self.quals = quals + extra = qualify(quals, " *&") + if totype.is_array_type: + extra = "(%s)" % (extra.lstrip(),) + self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) + + def build_backend_type(self, ffi, finishlist): + BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True) + return global_cache(self, ffi, 'new_pointer_type', BItem) + +voidp_type = PointerType(void_type) + +def ConstPointerType(totype): + return PointerType(totype, Q_CONST) + +const_voidp_type = ConstPointerType(void_type) + + +class NamedPointerType(PointerType): + _attrs_ = ('totype', 'name') + + def __init__(self, totype, name, quals=0): + PointerType.__init__(self, totype, quals) + self.name = name + self.c_name_with_marker = name + '&' + + +class ArrayType(BaseType): + _attrs_ = ('item', 'length') + is_array_type = True + + def __init__(self, item, length): + self.item = item + self.length = length + # + if length is None: + brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' + else: + brackets = '&[%s]' % length + self.c_name_with_marker = ( + self.item.c_name_with_marker.replace('&', brackets)) + + def resolve_length(self, newlength): + return ArrayType(self.item, newlength) + + def build_backend_type(self, ffi, finishlist): + if self.length == '...': + raise CDefError("cannot render the type %r: unknown length" % + (self,)) + self.item.get_cached_btype(ffi, finishlist) # force the item BType + BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) + return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) + +char_array_type = ArrayType(PrimitiveType('char'), None) + + +class StructOrUnionOrEnum(BaseTypeByIdentity): + _attrs_ = ('name',) + forcename = None + + def build_c_name_with_marker(self): + name = self.forcename or '%s %s' % (self.kind, self.name) + self.c_name_with_marker = name + '&' + + def force_the_name(self, forcename): + self.forcename = forcename + self.build_c_name_with_marker() + + def get_official_name(self): + assert self.c_name_with_marker.endswith('&') + return self.c_name_with_marker[:-1] + + +class StructOrUnion(StructOrUnionOrEnum): + fixedlayout = None + completed = 0 + partial = False + packed = 0 + + def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): + self.name = name + self.fldnames = fldnames + self.fldtypes = fldtypes + self.fldbitsize = fldbitsize + self.fldquals = fldquals + self.build_c_name_with_marker() + + def anonymous_struct_fields(self): + if self.fldtypes is not None: + for name, type in zip(self.fldnames, self.fldtypes): + if name == '' and isinstance(type, StructOrUnion): + yield type + + def enumfields(self, expand_anonymous_struct_union=True): + fldquals = self.fldquals + if fldquals is None: + fldquals = (0,) * len(self.fldnames) + for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, + self.fldbitsize, fldquals): + if (name == '' and isinstance(type, StructOrUnion) + and expand_anonymous_struct_union): + # nested anonymous struct/union + for result in type.enumfields(): + yield result + else: + yield (name, type, bitsize, quals) + + def force_flatten(self): + # force the struct or union to have a declaration that lists + # directly all fields returned by enumfields(), flattening + # nested anonymous structs/unions. + names = [] + types = [] + bitsizes = [] + fldquals = [] + for name, type, bitsize, quals in self.enumfields(): + names.append(name) + types.append(type) + bitsizes.append(bitsize) + fldquals.append(quals) + self.fldnames = tuple(names) + self.fldtypes = tuple(types) + self.fldbitsize = tuple(bitsizes) + self.fldquals = tuple(fldquals) + + def get_cached_btype(self, ffi, finishlist, can_delay=False): + BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, + can_delay) + if not can_delay: + self.finish_backend_type(ffi, finishlist) + return BType + + def finish_backend_type(self, ffi, finishlist): + if self.completed: + if self.completed != 2: + raise NotImplementedError("recursive structure declaration " + "for '%s'" % (self.name,)) + return + BType = ffi._cached_btypes[self] + # + self.completed = 1 + # + if self.fldtypes is None: + pass # not completing it: it's an opaque struct + # + elif self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] + lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) + extra_flags = () + if self.packed: + if self.packed == 1: + extra_flags = (8,) # SF_PACKED + else: + extra_flags = (0, self.packed) + ffi._backend.complete_struct_or_union(BType, lst, self, + -1, -1, *extra_flags) + # + else: + fldtypes = [] + fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout + for i in range(len(self.fldnames)): + fsize = fieldsize[i] + ftype = self.fldtypes[i] + # + if isinstance(ftype, ArrayType) and ftype.length == '...': + # fix the length to match the total size + BItemType = ftype.item.get_cached_btype(ffi, finishlist) + nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) + if nrest != 0: + self._verification_error( + "field '%s.%s' has a bogus size?" % ( + self.name, self.fldnames[i] or '{}')) + ftype = ftype.resolve_length(nlen) + self.fldtypes = (self.fldtypes[:i] + (ftype,) + + self.fldtypes[i+1:]) + # + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # + lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) + ffi._backend.complete_struct_or_union(BType, lst, self, + totalsize, totalalignment) + self.completed = 2 + + def _verification_error(self, msg): + raise VerificationError(msg) + + def check_not_partial(self): + if self.partial and self.fixedlayout is None: + raise VerificationMissing(self._get_c_name()) + + def build_backend_type(self, ffi, finishlist): + self.check_not_partial() + finishlist.append(self) + # + return global_cache(self, ffi, 'new_%s_type' % self.kind, + self.get_official_name(), key=self) + + +class StructType(StructOrUnion): + kind = 'struct' + + +class UnionType(StructOrUnion): + kind = 'union' + + +class EnumType(StructOrUnionOrEnum): + kind = 'enum' + partial = False + partial_resolved = False + + def __init__(self, name, enumerators, enumvalues, baseinttype=None): + self.name = name + self.enumerators = enumerators + self.enumvalues = enumvalues + self.baseinttype = baseinttype + self.build_c_name_with_marker() + + def force_the_name(self, forcename): + StructOrUnionOrEnum.force_the_name(self, forcename) + if self.forcename is None: + name = self.get_official_name() + self.forcename = '$' + name.replace(' ', '_') + + def check_not_partial(self): + if self.partial and not self.partial_resolved: + raise VerificationMissing(self._get_c_name()) + + def build_backend_type(self, ffi, finishlist): + self.check_not_partial() + base_btype = self.build_baseinttype(ffi, finishlist) + return global_cache(self, ffi, 'new_enum_type', + self.get_official_name(), + self.enumerators, self.enumvalues, + base_btype, key=self) + + def build_baseinttype(self, ffi, finishlist): + if self.baseinttype is not None: + return self.baseinttype.get_cached_btype(ffi, finishlist) + # + if self.enumvalues: + smallest_value = min(self.enumvalues) + largest_value = max(self.enumvalues) + else: + import warnings + try: + # XXX! The goal is to ensure that the warnings.warn() + # will not suppress the warning. We want to get it + # several times if we reach this point several times. + __warningregistry__.clear() + except NameError: + pass + warnings.warn("%r has no values explicitly defined; " + "guessing that it is equivalent to 'unsigned int'" + % self._get_c_name()) + smallest_value = largest_value = 0 + if smallest_value < 0: # needs a signed type + sign = 1 + candidate1 = PrimitiveType("int") + candidate2 = PrimitiveType("long") + else: + sign = 0 + candidate1 = PrimitiveType("unsigned int") + candidate2 = PrimitiveType("unsigned long") + btype1 = candidate1.get_cached_btype(ffi, finishlist) + btype2 = candidate2.get_cached_btype(ffi, finishlist) + size1 = ffi.sizeof(btype1) + size2 = ffi.sizeof(btype2) + if (smallest_value >= ((-1) << (8*size1-1)) and + largest_value < (1 << (8*size1-sign))): + return btype1 + if (smallest_value >= ((-1) << (8*size2-1)) and + largest_value < (1 << (8*size2-sign))): + return btype2 + raise CDefError("%s values don't all fit into either 'long' " + "or 'unsigned long'" % self._get_c_name()) + +def unknown_type(name, structname=None): + if structname is None: + structname = '$%s' % name + tp = StructType(structname, None, None, None) + tp.force_the_name(name) + tp.origin = "unknown_type" + return tp + +def unknown_ptr_type(name, structname=None): + if structname is None: + structname = '$$%s' % name + tp = StructType(structname, None, None, None) + return NamedPointerType(tp, name) + + +global_lock = allocate_lock() +_typecache_cffi_backend = weakref.WeakValueDictionary() + +def get_typecache(backend): + # returns _typecache_cffi_backend if backend is the _cffi_backend + # module, or type(backend).__typecache if backend is an instance of + # CTypesBackend (or some FakeBackend class during tests) + if isinstance(backend, types.ModuleType): + return _typecache_cffi_backend + with global_lock: + if not hasattr(type(backend), '__typecache'): + type(backend).__typecache = weakref.WeakValueDictionary() + return type(backend).__typecache + +def global_cache(srctype, ffi, funcname, *args, **kwds): + key = kwds.pop('key', (funcname, args)) + assert not kwds + try: + return ffi._typecache[key] + except KeyError: + pass + try: + res = getattr(ffi._backend, funcname)(*args) + except NotImplementedError as e: + raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e)) + # note that setdefault() on WeakValueDictionary is not atomic + # and contains a rare bug (http://bugs.python.org/issue19542); + # we have to use a lock and do it ourselves + cache = ffi._typecache + with global_lock: + res1 = cache.get(key) + if res1 is None: + cache[key] = res + return res + else: + return res1 + +def pointer_cache(ffi, BType): + return global_cache('?', ffi, 'new_pointer_type', BType) + +def attach_exception_info(e, name): + if e.args and type(e.args[0]) is str: + e.args = ('%s: %s' % (name, e.args[0]),) + e.args[1:] diff --git a/server/www/packages/packages-linux/x64/cffi/parse_c_type.h b/server/www/packages/packages-linux/x64/cffi/parse_c_type.h new file mode 100644 index 0000000..84e4ef8 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/parse_c_type.h @@ -0,0 +1,181 @@ + +/* This part is from file 'cffi/parse_c_type.h'. It is copied at the + beginning of C sources generated by CFFI's ffi.set_source(). */ + +typedef void *_cffi_opcode_t; + +#define _CFFI_OP(opcode, arg) (_cffi_opcode_t)(opcode | (((uintptr_t)(arg)) << 8)) +#define _CFFI_GETOP(cffi_opcode) ((unsigned char)(uintptr_t)cffi_opcode) +#define _CFFI_GETARG(cffi_opcode) (((intptr_t)cffi_opcode) >> 8) + +#define _CFFI_OP_PRIMITIVE 1 +#define _CFFI_OP_POINTER 3 +#define _CFFI_OP_ARRAY 5 +#define _CFFI_OP_OPEN_ARRAY 7 +#define _CFFI_OP_STRUCT_UNION 9 +#define _CFFI_OP_ENUM 11 +#define _CFFI_OP_FUNCTION 13 +#define _CFFI_OP_FUNCTION_END 15 +#define _CFFI_OP_NOOP 17 +#define _CFFI_OP_BITFIELD 19 +#define _CFFI_OP_TYPENAME 21 +#define _CFFI_OP_CPYTHON_BLTN_V 23 // varargs +#define _CFFI_OP_CPYTHON_BLTN_N 25 // noargs +#define _CFFI_OP_CPYTHON_BLTN_O 27 // O (i.e. a single arg) +#define _CFFI_OP_CONSTANT 29 +#define _CFFI_OP_CONSTANT_INT 31 +#define _CFFI_OP_GLOBAL_VAR 33 +#define _CFFI_OP_DLOPEN_FUNC 35 +#define _CFFI_OP_DLOPEN_CONST 37 +#define _CFFI_OP_GLOBAL_VAR_F 39 +#define _CFFI_OP_EXTERN_PYTHON 41 + +#define _CFFI_PRIM_VOID 0 +#define _CFFI_PRIM_BOOL 1 +#define _CFFI_PRIM_CHAR 2 +#define _CFFI_PRIM_SCHAR 3 +#define _CFFI_PRIM_UCHAR 4 +#define _CFFI_PRIM_SHORT 5 +#define _CFFI_PRIM_USHORT 6 +#define _CFFI_PRIM_INT 7 +#define _CFFI_PRIM_UINT 8 +#define _CFFI_PRIM_LONG 9 +#define _CFFI_PRIM_ULONG 10 +#define _CFFI_PRIM_LONGLONG 11 +#define _CFFI_PRIM_ULONGLONG 12 +#define _CFFI_PRIM_FLOAT 13 +#define _CFFI_PRIM_DOUBLE 14 +#define _CFFI_PRIM_LONGDOUBLE 15 + +#define _CFFI_PRIM_WCHAR 16 +#define _CFFI_PRIM_INT8 17 +#define _CFFI_PRIM_UINT8 18 +#define _CFFI_PRIM_INT16 19 +#define _CFFI_PRIM_UINT16 20 +#define _CFFI_PRIM_INT32 21 +#define _CFFI_PRIM_UINT32 22 +#define _CFFI_PRIM_INT64 23 +#define _CFFI_PRIM_UINT64 24 +#define _CFFI_PRIM_INTPTR 25 +#define _CFFI_PRIM_UINTPTR 26 +#define _CFFI_PRIM_PTRDIFF 27 +#define _CFFI_PRIM_SIZE 28 +#define _CFFI_PRIM_SSIZE 29 +#define _CFFI_PRIM_INT_LEAST8 30 +#define _CFFI_PRIM_UINT_LEAST8 31 +#define _CFFI_PRIM_INT_LEAST16 32 +#define _CFFI_PRIM_UINT_LEAST16 33 +#define _CFFI_PRIM_INT_LEAST32 34 +#define _CFFI_PRIM_UINT_LEAST32 35 +#define _CFFI_PRIM_INT_LEAST64 36 +#define _CFFI_PRIM_UINT_LEAST64 37 +#define _CFFI_PRIM_INT_FAST8 38 +#define _CFFI_PRIM_UINT_FAST8 39 +#define _CFFI_PRIM_INT_FAST16 40 +#define _CFFI_PRIM_UINT_FAST16 41 +#define _CFFI_PRIM_INT_FAST32 42 +#define _CFFI_PRIM_UINT_FAST32 43 +#define _CFFI_PRIM_INT_FAST64 44 +#define _CFFI_PRIM_UINT_FAST64 45 +#define _CFFI_PRIM_INTMAX 46 +#define _CFFI_PRIM_UINTMAX 47 +#define _CFFI_PRIM_FLOATCOMPLEX 48 +#define _CFFI_PRIM_DOUBLECOMPLEX 49 +#define _CFFI_PRIM_CHAR16 50 +#define _CFFI_PRIM_CHAR32 51 + +#define _CFFI__NUM_PRIM 52 +#define _CFFI__UNKNOWN_PRIM (-1) +#define _CFFI__UNKNOWN_FLOAT_PRIM (-2) +#define _CFFI__UNKNOWN_LONG_DOUBLE (-3) + +#define _CFFI__IO_FILE_STRUCT (-1) + + +struct _cffi_global_s { + const char *name; + void *address; + _cffi_opcode_t type_op; + void *size_or_direct_fn; // OP_GLOBAL_VAR: size, or 0 if unknown + // OP_CPYTHON_BLTN_*: addr of direct function +}; + +struct _cffi_getconst_s { + unsigned long long value; + const struct _cffi_type_context_s *ctx; + int gindex; +}; + +struct _cffi_struct_union_s { + const char *name; + int type_index; // -> _cffi_types, on a OP_STRUCT_UNION + int flags; // _CFFI_F_* flags below + size_t size; + int alignment; + int first_field_index; // -> _cffi_fields array + int num_fields; +}; +#define _CFFI_F_UNION 0x01 // is a union, not a struct +#define _CFFI_F_CHECK_FIELDS 0x02 // complain if fields are not in the + // "standard layout" or if some are missing +#define _CFFI_F_PACKED 0x04 // for CHECK_FIELDS, assume a packed struct +#define _CFFI_F_EXTERNAL 0x08 // in some other ffi.include() +#define _CFFI_F_OPAQUE 0x10 // opaque + +struct _cffi_field_s { + const char *name; + size_t field_offset; + size_t field_size; + _cffi_opcode_t field_type_op; +}; + +struct _cffi_enum_s { + const char *name; + int type_index; // -> _cffi_types, on a OP_ENUM + int type_prim; // _CFFI_PRIM_xxx + const char *enumerators; // comma-delimited string +}; + +struct _cffi_typename_s { + const char *name; + int type_index; /* if opaque, points to a possibly artificial + OP_STRUCT which is itself opaque */ +}; + +struct _cffi_type_context_s { + _cffi_opcode_t *types; + const struct _cffi_global_s *globals; + const struct _cffi_field_s *fields; + const struct _cffi_struct_union_s *struct_unions; + const struct _cffi_enum_s *enums; + const struct _cffi_typename_s *typenames; + int num_globals; + int num_struct_unions; + int num_enums; + int num_typenames; + const char *const *includes; + int num_types; + int flags; /* future extension */ +}; + +struct _cffi_parse_info_s { + const struct _cffi_type_context_s *ctx; + _cffi_opcode_t *output; + unsigned int output_size; + size_t error_location; + const char *error_message; +}; + +struct _cffi_externpy_s { + const char *name; + size_t size_of_result; + void *reserved1, *reserved2; +}; + +#ifdef _CFFI_INTERNAL +static int parse_c_type(struct _cffi_parse_info_s *info, const char *input); +static int search_in_globals(const struct _cffi_type_context_s *ctx, + const char *search, size_t search_len); +static int search_in_struct_unions(const struct _cffi_type_context_s *ctx, + const char *search, size_t search_len); +#endif diff --git a/server/www/packages/packages-linux/x64/cffi/pkgconfig.py b/server/www/packages/packages-linux/x64/cffi/pkgconfig.py new file mode 100644 index 0000000..5c93f15 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/pkgconfig.py @@ -0,0 +1,121 @@ +# pkg-config, https://www.freedesktop.org/wiki/Software/pkg-config/ integration for cffi +import sys, os, subprocess + +from .error import PkgConfigError + + +def merge_flags(cfg1, cfg2): + """Merge values from cffi config flags cfg2 to cf1 + + Example: + merge_flags({"libraries": ["one"]}, {"libraries": ["two"]}) + {"libraries": ["one", "two"]} + """ + for key, value in cfg2.items(): + if key not in cfg1: + cfg1[key] = value + else: + if not isinstance(cfg1[key], list): + raise TypeError("cfg1[%r] should be a list of strings" % (key,)) + if not isinstance(value, list): + raise TypeError("cfg2[%r] should be a list of strings" % (key,)) + cfg1[key].extend(value) + return cfg1 + + +def call(libname, flag, encoding=sys.getfilesystemencoding()): + """Calls pkg-config and returns the output if found + """ + a = ["pkg-config", "--print-errors"] + a.append(flag) + a.append(libname) + try: + pc = subprocess.Popen(a, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except EnvironmentError as e: + raise PkgConfigError("cannot run pkg-config: %s" % (str(e).strip(),)) + + bout, berr = pc.communicate() + if pc.returncode != 0: + try: + berr = berr.decode(encoding) + except Exception: + pass + raise PkgConfigError(berr.strip()) + + if sys.version_info >= (3,) and not isinstance(bout, str): # Python 3.x + try: + bout = bout.decode(encoding) + except UnicodeDecodeError: + raise PkgConfigError("pkg-config %s %s returned bytes that cannot " + "be decoded with encoding %r:\n%r" % + (flag, libname, encoding, bout)) + + if os.altsep != '\\' and '\\' in bout: + raise PkgConfigError("pkg-config %s %s returned an unsupported " + "backslash-escaped output:\n%r" % + (flag, libname, bout)) + return bout + + +def flags_from_pkgconfig(libs): + r"""Return compiler line flags for FFI.set_source based on pkg-config output + + Usage + ... + ffibuilder.set_source("_foo", pkgconfig = ["libfoo", "libbar >= 1.8.3"]) + + If pkg-config is installed on build machine, then arguments include_dirs, + library_dirs, libraries, define_macros, extra_compile_args and + extra_link_args are extended with an output of pkg-config for libfoo and + libbar. + + Raises PkgConfigError in case the pkg-config call fails. + """ + + def get_include_dirs(string): + return [x[2:] for x in string.split() if x.startswith("-I")] + + def get_library_dirs(string): + return [x[2:] for x in string.split() if x.startswith("-L")] + + def get_libraries(string): + return [x[2:] for x in string.split() if x.startswith("-l")] + + # convert -Dfoo=bar to list of tuples [("foo", "bar")] expected by distutils + def get_macros(string): + def _macro(x): + x = x[2:] # drop "-D" + if '=' in x: + return tuple(x.split("=", 1)) # "-Dfoo=bar" => ("foo", "bar") + else: + return (x, None) # "-Dfoo" => ("foo", None) + return [_macro(x) for x in string.split() if x.startswith("-D")] + + def get_other_cflags(string): + return [x for x in string.split() if not x.startswith("-I") and + not x.startswith("-D")] + + def get_other_libs(string): + return [x for x in string.split() if not x.startswith("-L") and + not x.startswith("-l")] + + # return kwargs for given libname + def kwargs(libname): + fse = sys.getfilesystemencoding() + all_cflags = call(libname, "--cflags") + all_libs = call(libname, "--libs") + return { + "include_dirs": get_include_dirs(all_cflags), + "library_dirs": get_library_dirs(all_libs), + "libraries": get_libraries(all_libs), + "define_macros": get_macros(all_cflags), + "extra_compile_args": get_other_cflags(all_cflags), + "extra_link_args": get_other_libs(all_libs), + } + + # merge all arguments together + ret = {} + for libname in libs: + lib_flags = kwargs(libname) + merge_flags(ret, lib_flags) + return ret diff --git a/server/www/packages/packages-linux/x64/cffi/recompiler.py b/server/www/packages/packages-linux/x64/cffi/recompiler.py new file mode 100644 index 0000000..d6530e5 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/recompiler.py @@ -0,0 +1,1543 @@ +import os, sys, io +from . import ffiplatform, model +from .error import VerificationError +from .cffi_opcode import * + +VERSION_BASE = 0x2601 +VERSION_EMBEDDED = 0x2701 +VERSION_CHAR16CHAR32 = 0x2801 + + +class GlobalExpr: + def __init__(self, name, address, type_op, size=0, check_value=0): + self.name = name + self.address = address + self.type_op = type_op + self.size = size + self.check_value = check_value + + def as_c_expr(self): + return ' { "%s", (void *)%s, %s, (void *)%s },' % ( + self.name, self.address, self.type_op.as_c_expr(), self.size) + + def as_python_expr(self): + return "b'%s%s',%d" % (self.type_op.as_python_bytes(), self.name, + self.check_value) + +class FieldExpr: + def __init__(self, name, field_offset, field_size, fbitsize, field_type_op): + self.name = name + self.field_offset = field_offset + self.field_size = field_size + self.fbitsize = fbitsize + self.field_type_op = field_type_op + + def as_c_expr(self): + spaces = " " * len(self.name) + return (' { "%s", %s,\n' % (self.name, self.field_offset) + + ' %s %s,\n' % (spaces, self.field_size) + + ' %s %s },' % (spaces, self.field_type_op.as_c_expr())) + + def as_python_expr(self): + raise NotImplementedError + + def as_field_python_expr(self): + if self.field_type_op.op == OP_NOOP: + size_expr = '' + elif self.field_type_op.op == OP_BITFIELD: + size_expr = format_four_bytes(self.fbitsize) + else: + raise NotImplementedError + return "b'%s%s%s'" % (self.field_type_op.as_python_bytes(), + size_expr, + self.name) + +class StructUnionExpr: + def __init__(self, name, type_index, flags, size, alignment, comment, + first_field_index, c_fields): + self.name = name + self.type_index = type_index + self.flags = flags + self.size = size + self.alignment = alignment + self.comment = comment + self.first_field_index = first_field_index + self.c_fields = c_fields + + def as_c_expr(self): + return (' { "%s", %d, %s,' % (self.name, self.type_index, self.flags) + + '\n %s, %s, ' % (self.size, self.alignment) + + '%d, %d ' % (self.first_field_index, len(self.c_fields)) + + ('/* %s */ ' % self.comment if self.comment else '') + + '},') + + def as_python_expr(self): + flags = eval(self.flags, G_FLAGS) + fields_expr = [c_field.as_field_python_expr() + for c_field in self.c_fields] + return "(b'%s%s%s',%s)" % ( + format_four_bytes(self.type_index), + format_four_bytes(flags), + self.name, + ','.join(fields_expr)) + +class EnumExpr: + def __init__(self, name, type_index, size, signed, allenums): + self.name = name + self.type_index = type_index + self.size = size + self.signed = signed + self.allenums = allenums + + def as_c_expr(self): + return (' { "%s", %d, _cffi_prim_int(%s, %s),\n' + ' "%s" },' % (self.name, self.type_index, + self.size, self.signed, self.allenums)) + + def as_python_expr(self): + prim_index = { + (1, 0): PRIM_UINT8, (1, 1): PRIM_INT8, + (2, 0): PRIM_UINT16, (2, 1): PRIM_INT16, + (4, 0): PRIM_UINT32, (4, 1): PRIM_INT32, + (8, 0): PRIM_UINT64, (8, 1): PRIM_INT64, + }[self.size, self.signed] + return "b'%s%s%s\\x00%s'" % (format_four_bytes(self.type_index), + format_four_bytes(prim_index), + self.name, self.allenums) + +class TypenameExpr: + def __init__(self, name, type_index): + self.name = name + self.type_index = type_index + + def as_c_expr(self): + return ' { "%s", %d },' % (self.name, self.type_index) + + def as_python_expr(self): + return "b'%s%s'" % (format_four_bytes(self.type_index), self.name) + + +# ____________________________________________________________ + + +class Recompiler: + _num_externpy = 0 + + def __init__(self, ffi, module_name, target_is_python=False): + self.ffi = ffi + self.module_name = module_name + self.target_is_python = target_is_python + self._version = VERSION_BASE + + def needs_version(self, ver): + self._version = max(self._version, ver) + + def collect_type_table(self): + self._typesdict = {} + self._generate("collecttype") + # + all_decls = sorted(self._typesdict, key=str) + # + # prepare all FUNCTION bytecode sequences first + self.cffi_types = [] + for tp in all_decls: + if tp.is_raw_function: + assert self._typesdict[tp] is None + self._typesdict[tp] = len(self.cffi_types) + self.cffi_types.append(tp) # placeholder + for tp1 in tp.args: + assert isinstance(tp1, (model.VoidType, + model.BasePrimitiveType, + model.PointerType, + model.StructOrUnionOrEnum, + model.FunctionPtrType)) + if self._typesdict[tp1] is None: + self._typesdict[tp1] = len(self.cffi_types) + self.cffi_types.append(tp1) # placeholder + self.cffi_types.append('END') # placeholder + # + # prepare all OTHER bytecode sequences + for tp in all_decls: + if not tp.is_raw_function and self._typesdict[tp] is None: + self._typesdict[tp] = len(self.cffi_types) + self.cffi_types.append(tp) # placeholder + if tp.is_array_type and tp.length is not None: + self.cffi_types.append('LEN') # placeholder + assert None not in self._typesdict.values() + # + # collect all structs and unions and enums + self._struct_unions = {} + self._enums = {} + for tp in all_decls: + if isinstance(tp, model.StructOrUnion): + self._struct_unions[tp] = None + elif isinstance(tp, model.EnumType): + self._enums[tp] = None + for i, tp in enumerate(sorted(self._struct_unions, + key=lambda tp: tp.name)): + self._struct_unions[tp] = i + for i, tp in enumerate(sorted(self._enums, + key=lambda tp: tp.name)): + self._enums[tp] = i + # + # emit all bytecode sequences now + for tp in all_decls: + method = getattr(self, '_emit_bytecode_' + tp.__class__.__name__) + method(tp, self._typesdict[tp]) + # + # consistency check + for op in self.cffi_types: + assert isinstance(op, CffiOp) + self.cffi_types = tuple(self.cffi_types) # don't change any more + + def _do_collect_type(self, tp): + if not isinstance(tp, model.BaseTypeByIdentity): + if isinstance(tp, tuple): + for x in tp: + self._do_collect_type(x) + return + if tp not in self._typesdict: + self._typesdict[tp] = None + if isinstance(tp, model.FunctionPtrType): + self._do_collect_type(tp.as_raw_function()) + elif isinstance(tp, model.StructOrUnion): + if tp.fldtypes is not None and ( + tp not in self.ffi._parser._included_declarations): + for name1, tp1, _, _ in tp.enumfields(): + self._do_collect_type(self._field_type(tp, name1, tp1)) + else: + for _, x in tp._get_items(): + self._do_collect_type(x) + + def _generate(self, step_name): + lst = self.ffi._parser._declarations.items() + for name, (tp, quals) in sorted(lst): + kind, realname = name.split(' ', 1) + try: + method = getattr(self, '_generate_cpy_%s_%s' % (kind, + step_name)) + except AttributeError: + raise VerificationError( + "not implemented in recompile(): %r" % name) + try: + self._current_quals = quals + method(tp, realname) + except Exception as e: + model.attach_exception_info(e, name) + raise + + # ---------- + + ALL_STEPS = ["global", "field", "struct_union", "enum", "typename"] + + def collect_step_tables(self): + # collect the declarations for '_cffi_globals', '_cffi_typenames', etc. + self._lsts = {} + for step_name in self.ALL_STEPS: + self._lsts[step_name] = [] + self._seen_struct_unions = set() + self._generate("ctx") + self._add_missing_struct_unions() + # + for step_name in self.ALL_STEPS: + lst = self._lsts[step_name] + if step_name != "field": + lst.sort(key=lambda entry: entry.name) + self._lsts[step_name] = tuple(lst) # don't change any more + # + # check for a possible internal inconsistency: _cffi_struct_unions + # should have been generated with exactly self._struct_unions + lst = self._lsts["struct_union"] + for tp, i in self._struct_unions.items(): + assert i < len(lst) + assert lst[i].name == tp.name + assert len(lst) == len(self._struct_unions) + # same with enums + lst = self._lsts["enum"] + for tp, i in self._enums.items(): + assert i < len(lst) + assert lst[i].name == tp.name + assert len(lst) == len(self._enums) + + # ---------- + + def _prnt(self, what=''): + self._f.write(what + '\n') + + def write_source_to_f(self, f, preamble): + if self.target_is_python: + assert preamble is None + self.write_py_source_to_f(f) + else: + assert preamble is not None + self.write_c_source_to_f(f, preamble) + + def _rel_readlines(self, filename): + g = open(os.path.join(os.path.dirname(__file__), filename), 'r') + lines = g.readlines() + g.close() + return lines + + def write_c_source_to_f(self, f, preamble): + self._f = f + prnt = self._prnt + if self.ffi._embedding is not None: + prnt('#define _CFFI_USE_EMBEDDING') + # + # first the '#include' (actually done by inlining the file's content) + lines = self._rel_readlines('_cffi_include.h') + i = lines.index('#include "parse_c_type.h"\n') + lines[i:i+1] = self._rel_readlines('parse_c_type.h') + prnt(''.join(lines)) + # + # if we have ffi._embedding != None, we give it here as a macro + # and include an extra file + base_module_name = self.module_name.split('.')[-1] + if self.ffi._embedding is not None: + prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) + prnt('static const char _CFFI_PYTHON_STARTUP_CODE[] = {') + self._print_string_literal_in_array(self.ffi._embedding) + prnt('0 };') + prnt('#ifdef PYPY_VERSION') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( + base_module_name,)) + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % ( + base_module_name,)) + prnt('#else') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % ( + base_module_name,)) + prnt('#endif') + lines = self._rel_readlines('_embedding.h') + i = lines.index('#include "_cffi_errors.h"\n') + lines[i:i+1] = self._rel_readlines('_cffi_errors.h') + prnt(''.join(lines)) + self.needs_version(VERSION_EMBEDDED) + # + # then paste the C source given by the user, verbatim. + prnt('/************************************************************/') + prnt() + prnt(preamble) + prnt() + prnt('/************************************************************/') + prnt() + # + # the declaration of '_cffi_types' + prnt('static void *_cffi_types[] = {') + typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()]) + for i, op in enumerate(self.cffi_types): + comment = '' + if i in typeindex2type: + comment = ' // ' + typeindex2type[i]._get_c_name() + prnt('/* %2d */ %s,%s' % (i, op.as_c_expr(), comment)) + if not self.cffi_types: + prnt(' 0') + prnt('};') + prnt() + # + # call generate_cpy_xxx_decl(), for every xxx found from + # ffi._parser._declarations. This generates all the functions. + self._seen_constants = set() + self._generate("decl") + # + # the declaration of '_cffi_globals' and '_cffi_typenames' + nums = {} + for step_name in self.ALL_STEPS: + lst = self._lsts[step_name] + nums[step_name] = len(lst) + if nums[step_name] > 0: + prnt('static const struct _cffi_%s_s _cffi_%ss[] = {' % ( + step_name, step_name)) + for entry in lst: + prnt(entry.as_c_expr()) + prnt('};') + prnt() + # + # the declaration of '_cffi_includes' + if self.ffi._included_ffis: + prnt('static const char * const _cffi_includes[] = {') + for ffi_to_include in self.ffi._included_ffis: + try: + included_module_name, included_source = ( + ffi_to_include._assigned_source[:2]) + except AttributeError: + raise VerificationError( + "ffi object %r includes %r, but the latter has not " + "been prepared with set_source()" % ( + self.ffi, ffi_to_include,)) + if included_source is None: + raise VerificationError( + "not implemented yet: ffi.include() of a Python-based " + "ffi inside a C-based ffi") + prnt(' "%s",' % (included_module_name,)) + prnt(' NULL') + prnt('};') + prnt() + # + # the declaration of '_cffi_type_context' + prnt('static const struct _cffi_type_context_s _cffi_type_context = {') + prnt(' _cffi_types,') + for step_name in self.ALL_STEPS: + if nums[step_name] > 0: + prnt(' _cffi_%ss,' % step_name) + else: + prnt(' NULL, /* no %ss */' % step_name) + for step_name in self.ALL_STEPS: + if step_name != "field": + prnt(' %d, /* num_%ss */' % (nums[step_name], step_name)) + if self.ffi._included_ffis: + prnt(' _cffi_includes,') + else: + prnt(' NULL, /* no includes */') + prnt(' %d, /* num_types */' % (len(self.cffi_types),)) + flags = 0 + if self._num_externpy: + flags |= 1 # set to mean that we use extern "Python" + prnt(' %d, /* flags */' % flags) + prnt('};') + prnt() + # + # the init function + prnt('#ifdef __GNUC__') + prnt('# pragma GCC visibility push(default) /* for -fvisibility= */') + prnt('#endif') + prnt() + prnt('#ifdef PYPY_VERSION') + prnt('PyMODINIT_FUNC') + prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) + prnt('{') + if self._num_externpy: + prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') + prnt(' _cffi_call_python_org = ' + '(void(*)(struct _cffi_externpy_s *, char *))p[1];') + prnt(' }') + prnt(' p[0] = (const void *)0x%x;' % self._version) + prnt(' p[1] = &_cffi_type_context;') + prnt('#if PY_MAJOR_VERSION >= 3') + prnt(' return NULL;') + prnt('#endif') + prnt('}') + # on Windows, distutils insists on putting init_cffi_xyz in + # 'export_symbols', so instead of fighting it, just give up and + # give it one + prnt('# ifdef _MSC_VER') + prnt(' PyMODINIT_FUNC') + prnt('# if PY_MAJOR_VERSION >= 3') + prnt(' PyInit_%s(void) { return NULL; }' % (base_module_name,)) + prnt('# else') + prnt(' init%s(void) { }' % (base_module_name,)) + prnt('# endif') + prnt('# endif') + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('PyMODINIT_FUNC') + prnt('PyInit_%s(void)' % (base_module_name,)) + prnt('{') + prnt(' return _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( + self.module_name, self._version)) + prnt('}') + prnt('#else') + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % (base_module_name,)) + prnt('{') + prnt(' _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( + self.module_name, self._version)) + prnt('}') + prnt('#endif') + prnt() + prnt('#ifdef __GNUC__') + prnt('# pragma GCC visibility pop') + prnt('#endif') + self._version = None + + def _to_py(self, x): + if isinstance(x, str): + return "b'%s'" % (x,) + if isinstance(x, (list, tuple)): + rep = [self._to_py(item) for item in x] + if len(rep) == 1: + rep.append('') + return "(%s)" % (','.join(rep),) + return x.as_python_expr() # Py2: unicode unexpected; Py3: bytes unexp. + + def write_py_source_to_f(self, f): + self._f = f + prnt = self._prnt + # + # header + prnt("# auto-generated file") + prnt("import _cffi_backend") + # + # the 'import' of the included ffis + num_includes = len(self.ffi._included_ffis or ()) + for i in range(num_includes): + ffi_to_include = self.ffi._included_ffis[i] + try: + included_module_name, included_source = ( + ffi_to_include._assigned_source[:2]) + except AttributeError: + raise VerificationError( + "ffi object %r includes %r, but the latter has not " + "been prepared with set_source()" % ( + self.ffi, ffi_to_include,)) + if included_source is not None: + raise VerificationError( + "not implemented yet: ffi.include() of a C-based " + "ffi inside a Python-based ffi") + prnt('from %s import ffi as _ffi%d' % (included_module_name, i)) + prnt() + prnt("ffi = _cffi_backend.FFI('%s'," % (self.module_name,)) + prnt(" _version = 0x%x," % (self._version,)) + self._version = None + # + # the '_types' keyword argument + self.cffi_types = tuple(self.cffi_types) # don't change any more + types_lst = [op.as_python_bytes() for op in self.cffi_types] + prnt(' _types = %s,' % (self._to_py(''.join(types_lst)),)) + typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()]) + # + # the keyword arguments from ALL_STEPS + for step_name in self.ALL_STEPS: + lst = self._lsts[step_name] + if len(lst) > 0 and step_name != "field": + prnt(' _%ss = %s,' % (step_name, self._to_py(lst))) + # + # the '_includes' keyword argument + if num_includes > 0: + prnt(' _includes = (%s,),' % ( + ', '.join(['_ffi%d' % i for i in range(num_includes)]),)) + # + # the footer + prnt(')') + + # ---------- + + def _gettypenum(self, type): + # a KeyError here is a bug. please report it! :-) + return self._typesdict[type] + + def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode): + extraarg = '' + if isinstance(tp, model.BasePrimitiveType) and not tp.is_complex_type(): + if tp.is_integer_type() and tp.name != '_Bool': + converter = '_cffi_to_c_int' + extraarg = ', %s' % tp.name + elif isinstance(tp, model.UnknownFloatType): + # don't check with is_float_type(): it may be a 'long + # double' here, and _cffi_to_c_double would loose precision + converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),) + else: + cname = tp.get_c_name('') + converter = '(%s)_cffi_to_c_%s' % (cname, + tp.name.replace(' ', '_')) + if cname in ('char16_t', 'char32_t'): + self.needs_version(VERSION_CHAR16CHAR32) + errvalue = '-1' + # + elif isinstance(tp, model.PointerType): + self._convert_funcarg_to_c_ptr_or_array(tp, fromvar, + tovar, errcode) + return + # + elif (isinstance(tp, model.StructOrUnionOrEnum) or + isinstance(tp, model.BasePrimitiveType)): + # a struct (not a struct pointer) as a function argument; + # or, a complex (the same code works) + self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)' + % (tovar, self._gettypenum(tp), fromvar)) + self._prnt(' %s;' % errcode) + return + # + elif isinstance(tp, model.FunctionPtrType): + converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('') + extraarg = ', _cffi_type(%d)' % self._gettypenum(tp) + errvalue = 'NULL' + # + else: + raise NotImplementedError(tp) + # + self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg)) + self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % ( + tovar, tp.get_c_name(''), errvalue)) + self._prnt(' %s;' % errcode) + + def _extra_local_variables(self, tp, localvars): + if isinstance(tp, model.PointerType): + localvars.add('Py_ssize_t datasize') + + def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode): + self._prnt(' datasize = _cffi_prepare_pointer_call_argument(') + self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % ( + self._gettypenum(tp), fromvar, tovar)) + self._prnt(' if (datasize != 0) {') + self._prnt(' if (datasize < 0)') + self._prnt(' %s;' % errcode) + self._prnt(' %s = (%s)alloca((size_t)datasize);' % ( + tovar, tp.get_c_name(''))) + self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,)) + self._prnt(' if (_cffi_convert_array_from_object(' + '(char *)%s, _cffi_type(%d), %s) < 0)' % ( + tovar, self._gettypenum(tp), fromvar)) + self._prnt(' %s;' % errcode) + self._prnt(' }') + + def _convert_expr_from_c(self, tp, var, context): + if isinstance(tp, model.BasePrimitiveType): + if tp.is_integer_type() and tp.name != '_Bool': + return '_cffi_from_c_int(%s, %s)' % (var, tp.name) + elif isinstance(tp, model.UnknownFloatType): + return '_cffi_from_c_double(%s)' % (var,) + elif tp.name != 'long double' and not tp.is_complex_type(): + cname = tp.name.replace(' ', '_') + if cname in ('char16_t', 'char32_t'): + self.needs_version(VERSION_CHAR16CHAR32) + return '_cffi_from_c_%s(%s)' % (cname, var) + else: + return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, (model.PointerType, model.FunctionPtrType)): + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, model.ArrayType): + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) + elif isinstance(tp, model.StructOrUnion): + if tp.fldnames is None: + raise TypeError("'%s' is used as %s, but is opaque" % ( + tp._get_c_name(), context)) + return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, model.EnumType): + return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + else: + raise NotImplementedError(tp) + + # ---------- + # typedefs + + def _typedef_type(self, tp, name): + return self._global_type(tp, "(*(%s *)0)" % (name,)) + + def _generate_cpy_typedef_collecttype(self, tp, name): + self._do_collect_type(self._typedef_type(tp, name)) + + def _generate_cpy_typedef_decl(self, tp, name): + pass + + def _typedef_ctx(self, tp, name): + type_index = self._typesdict[tp] + self._lsts["typename"].append(TypenameExpr(name, type_index)) + + def _generate_cpy_typedef_ctx(self, tp, name): + tp = self._typedef_type(tp, name) + self._typedef_ctx(tp, name) + if getattr(tp, "origin", None) == "unknown_type": + self._struct_ctx(tp, tp.name, approxname=None) + elif isinstance(tp, model.NamedPointerType): + self._struct_ctx(tp.totype, tp.totype.name, approxname=tp.name, + named_ptr=tp) + + # ---------- + # function declarations + + def _generate_cpy_function_collecttype(self, tp, name): + self._do_collect_type(tp.as_raw_function()) + if tp.ellipsis and not self.target_is_python: + self._do_collect_type(tp) + + def _generate_cpy_function_decl(self, tp, name): + assert not self.target_is_python + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + # cannot support vararg functions better than this: check for its + # exact type (including the fixed arguments), and build it as a + # constant function pointer (no CPython wrapper) + self._generate_cpy_constant_decl(tp, name) + return + prnt = self._prnt + numargs = len(tp.args) + if numargs == 0: + argname = 'noarg' + elif numargs == 1: + argname = 'arg0' + else: + argname = 'args' + # + # ------------------------------ + # the 'd' version of the function, only for addressof(lib, 'func') + arguments = [] + call_arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arguments.append(type.get_c_name(' x%d' % i, context)) + call_arguments.append('x%d' % i) + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) + prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) + prnt('{') + call_arguments = ', '.join(call_arguments) + result_code = 'return ' + if isinstance(tp.result, model.VoidType): + result_code = '' + prnt(' %s%s(%s);' % (result_code, name, call_arguments)) + prnt('}') + # + prnt('#ifndef PYPY_VERSION') # ------------------------------ + # + prnt('static PyObject *') + prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname)) + prnt('{') + # + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arg = type.get_c_name(' x%d' % i, context) + prnt(' %s;' % arg) + # + localvars = set() + for type in tp.args: + self._extra_local_variables(type, localvars) + for decl in localvars: + prnt(' %s;' % (decl,)) + # + if not isinstance(tp.result, model.VoidType): + result_code = 'result = ' + context = 'result of %s' % name + result_decl = ' %s;' % tp.result.get_c_name(' result', context) + prnt(result_decl) + else: + result_decl = None + result_code = '' + # + if len(tp.args) > 1: + rng = range(len(tp.args)) + for i in rng: + prnt(' PyObject *arg%d;' % i) + prnt() + prnt(' if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % ( + name, len(rng), len(rng), + ', '.join(['&arg%d' % i for i in rng]))) + prnt(' return NULL;') + prnt() + # + for i, type in enumerate(tp.args): + self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i, + 'return NULL') + prnt() + # + prnt(' Py_BEGIN_ALLOW_THREADS') + prnt(' _cffi_restore_errno();') + call_arguments = ['x%d' % i for i in range(len(tp.args))] + call_arguments = ', '.join(call_arguments) + prnt(' { %s%s(%s); }' % (result_code, name, call_arguments)) + prnt(' _cffi_save_errno();') + prnt(' Py_END_ALLOW_THREADS') + prnt() + # + prnt(' (void)self; /* unused */') + if numargs == 0: + prnt(' (void)noarg; /* unused */') + if result_code: + prnt(' return %s;' % + self._convert_expr_from_c(tp.result, 'result', 'result type')) + else: + prnt(' Py_INCREF(Py_None);') + prnt(' return Py_None;') + prnt('}') + # + prnt('#else') # ------------------------------ + # + # the PyPy version: need to replace struct/union arguments with + # pointers, and if the result is a struct/union, insert a first + # arg that is a pointer to the result. We also do that for + # complex args and return type. + def need_indirection(type): + return (isinstance(type, model.StructOrUnion) or + (isinstance(type, model.PrimitiveType) and + type.is_complex_type())) + difference = False + arguments = [] + call_arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + indirection = '' + if need_indirection(type): + indirection = '*' + difference = True + arg = type.get_c_name(' %sx%d' % (indirection, i), context) + arguments.append(arg) + call_arguments.append('%sx%d' % (indirection, i)) + tp_result = tp.result + if need_indirection(tp_result): + context = 'result of %s' % name + arg = tp_result.get_c_name(' *result', context) + arguments.insert(0, arg) + tp_result = model.void_type + result_decl = None + result_code = '*result = ' + difference = True + if difference: + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, + repr_arguments) + prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) + prnt('{') + if result_decl: + prnt(result_decl) + call_arguments = ', '.join(call_arguments) + prnt(' { %s%s(%s); }' % (result_code, name, call_arguments)) + if result_decl: + prnt(' return result;') + prnt('}') + else: + prnt('# define _cffi_f_%s _cffi_d_%s' % (name, name)) + # + prnt('#endif') # ------------------------------ + prnt() + + def _generate_cpy_function_ctx(self, tp, name): + if tp.ellipsis and not self.target_is_python: + self._generate_cpy_constant_ctx(tp, name) + return + type_index = self._typesdict[tp.as_raw_function()] + numargs = len(tp.args) + if self.target_is_python: + meth_kind = OP_DLOPEN_FUNC + elif numargs == 0: + meth_kind = OP_CPYTHON_BLTN_N # 'METH_NOARGS' + elif numargs == 1: + meth_kind = OP_CPYTHON_BLTN_O # 'METH_O' + else: + meth_kind = OP_CPYTHON_BLTN_V # 'METH_VARARGS' + self._lsts["global"].append( + GlobalExpr(name, '_cffi_f_%s' % name, + CffiOp(meth_kind, type_index), + size='_cffi_d_%s' % name)) + + # ---------- + # named structs or unions + + def _field_type(self, tp_struct, field_name, tp_field): + if isinstance(tp_field, model.ArrayType): + actual_length = tp_field.length + if actual_length == '...': + ptr_struct_name = tp_struct.get_c_name('*') + actual_length = '_cffi_array_len(((%s)0)->%s)' % ( + ptr_struct_name, field_name) + tp_item = self._field_type(tp_struct, '%s[0]' % field_name, + tp_field.item) + tp_field = model.ArrayType(tp_item, actual_length) + return tp_field + + def _struct_collecttype(self, tp): + self._do_collect_type(tp) + if self.target_is_python: + # also requires nested anon struct/unions in ABI mode, recursively + for fldtype in tp.anonymous_struct_fields(): + self._struct_collecttype(fldtype) + + def _struct_decl(self, tp, cname, approxname): + if tp.fldtypes is None: + return + prnt = self._prnt + checkfuncname = '_cffi_checkfld_%s' % (approxname,) + prnt('_CFFI_UNUSED_FN') + prnt('static void %s(%s *p)' % (checkfuncname, cname)) + prnt('{') + prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') + for fname, ftype, fbitsize, fqual in tp.enumfields(): + try: + if ftype.is_integer_type() or fbitsize >= 0: + # accept all integers, but complain on float or double + if fname != '': + prnt(" (void)((p->%s) | 0); /* check that '%s.%s' is " + "an integer */" % (fname, cname, fname)) + continue + # only accept exactly the type declared, except that '[]' + # is interpreted as a '*' and so will match any array length. + # (It would also match '*', but that's harder to detect...) + while (isinstance(ftype, model.ArrayType) + and (ftype.length is None or ftype.length == '...')): + ftype = ftype.item + fname = fname + '[0]' + prnt(' { %s = &p->%s; (void)tmp; }' % ( + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) + except VerificationError as e: + prnt(' /* %s */' % str(e)) # cannot verify it, ignore + prnt('}') + prnt('struct _cffi_align_%s { char x; %s y; };' % (approxname, cname)) + prnt() + + def _struct_ctx(self, tp, cname, approxname, named_ptr=None): + type_index = self._typesdict[tp] + reason_for_not_expanding = None + flags = [] + if isinstance(tp, model.UnionType): + flags.append("_CFFI_F_UNION") + if tp.fldtypes is None: + flags.append("_CFFI_F_OPAQUE") + reason_for_not_expanding = "opaque" + if (tp not in self.ffi._parser._included_declarations and + (named_ptr is None or + named_ptr not in self.ffi._parser._included_declarations)): + if tp.fldtypes is None: + pass # opaque + elif tp.partial or any(tp.anonymous_struct_fields()): + pass # field layout obtained silently from the C compiler + else: + flags.append("_CFFI_F_CHECK_FIELDS") + if tp.packed: + if tp.packed > 1: + raise NotImplementedError( + "%r is declared with 'pack=%r'; only 0 or 1 are " + "supported in API mode (try to use \"...;\", which " + "does not require a 'pack' declaration)" % + (tp, tp.packed)) + flags.append("_CFFI_F_PACKED") + else: + flags.append("_CFFI_F_EXTERNAL") + reason_for_not_expanding = "external" + flags = '|'.join(flags) or '0' + c_fields = [] + if reason_for_not_expanding is None: + expand_anonymous_struct_union = not self.target_is_python + enumfields = list(tp.enumfields(expand_anonymous_struct_union)) + for fldname, fldtype, fbitsize, fqual in enumfields: + fldtype = self._field_type(tp, fldname, fldtype) + self._check_not_opaque(fldtype, + "field '%s.%s'" % (tp.name, fldname)) + # cname is None for _add_missing_struct_unions() only + op = OP_NOOP + if fbitsize >= 0: + op = OP_BITFIELD + size = '%d /* bits */' % fbitsize + elif cname is None or ( + isinstance(fldtype, model.ArrayType) and + fldtype.length is None): + size = '(size_t)-1' + else: + size = 'sizeof(((%s)0)->%s)' % ( + tp.get_c_name('*') if named_ptr is None + else named_ptr.name, + fldname) + if cname is None or fbitsize >= 0: + offset = '(size_t)-1' + elif named_ptr is not None: + offset = '((char *)&((%s)0)->%s) - (char *)0' % ( + named_ptr.name, fldname) + else: + offset = 'offsetof(%s, %s)' % (tp.get_c_name(''), fldname) + c_fields.append( + FieldExpr(fldname, offset, size, fbitsize, + CffiOp(op, self._typesdict[fldtype]))) + first_field_index = len(self._lsts["field"]) + self._lsts["field"].extend(c_fields) + # + if cname is None: # unknown name, for _add_missing_struct_unions + size = '(size_t)-2' + align = -2 + comment = "unnamed" + else: + if named_ptr is not None: + size = 'sizeof(*(%s)0)' % (named_ptr.name,) + align = '-1 /* unknown alignment */' + else: + size = 'sizeof(%s)' % (cname,) + align = 'offsetof(struct _cffi_align_%s, y)' % (approxname,) + comment = None + else: + size = '(size_t)-1' + align = -1 + first_field_index = -1 + comment = reason_for_not_expanding + self._lsts["struct_union"].append( + StructUnionExpr(tp.name, type_index, flags, size, align, comment, + first_field_index, c_fields)) + self._seen_struct_unions.add(tp) + + def _check_not_opaque(self, tp, location): + while isinstance(tp, model.ArrayType): + tp = tp.item + if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None: + raise TypeError( + "%s is of an opaque type (not declared in cdef())" % location) + + def _add_missing_struct_unions(self): + # not very nice, but some struct declarations might be missing + # because they don't have any known C name. Check that they are + # not partial (we can't complete or verify them!) and emit them + # anonymously. + lst = list(self._struct_unions.items()) + lst.sort(key=lambda tp_order: tp_order[1]) + for tp, order in lst: + if tp not in self._seen_struct_unions: + if tp.partial: + raise NotImplementedError("internal inconsistency: %r is " + "partial but was not seen at " + "this point" % (tp,)) + if tp.name.startswith('$') and tp.name[1:].isdigit(): + approxname = tp.name[1:] + elif tp.name == '_IO_FILE' and tp.forcename == 'FILE': + approxname = 'FILE' + self._typedef_ctx(tp, 'FILE') + else: + raise NotImplementedError("internal inconsistency: %r" % + (tp,)) + self._struct_ctx(tp, None, approxname) + + def _generate_cpy_struct_collecttype(self, tp, name): + self._struct_collecttype(tp) + _generate_cpy_union_collecttype = _generate_cpy_struct_collecttype + + def _struct_names(self, tp): + cname = tp.get_c_name('') + if ' ' in cname: + return cname, cname.replace(' ', '_') + else: + return cname, '_' + cname + + def _generate_cpy_struct_decl(self, tp, name): + self._struct_decl(tp, *self._struct_names(tp)) + _generate_cpy_union_decl = _generate_cpy_struct_decl + + def _generate_cpy_struct_ctx(self, tp, name): + self._struct_ctx(tp, *self._struct_names(tp)) + _generate_cpy_union_ctx = _generate_cpy_struct_ctx + + # ---------- + # 'anonymous' declarations. These are produced for anonymous structs + # or unions; the 'name' is obtained by a typedef. + + def _generate_cpy_anonymous_collecttype(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_cpy_enum_collecttype(tp, name) + else: + self._struct_collecttype(tp) + + def _generate_cpy_anonymous_decl(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_cpy_enum_decl(tp) + else: + self._struct_decl(tp, name, 'typedef_' + name) + + def _generate_cpy_anonymous_ctx(self, tp, name): + if isinstance(tp, model.EnumType): + self._enum_ctx(tp, name) + else: + self._struct_ctx(tp, name, 'typedef_' + name) + + # ---------- + # constants, declared with "static const ..." + + def _generate_cpy_const(self, is_int, name, tp=None, category='const', + check_value=None): + if (category, name) in self._seen_constants: + raise VerificationError( + "duplicate declaration of %s '%s'" % (category, name)) + self._seen_constants.add((category, name)) + # + prnt = self._prnt + funcname = '_cffi_%s_%s' % (category, name) + if is_int: + prnt('static int %s(unsigned long long *o)' % funcname) + prnt('{') + prnt(' int n = (%s) <= 0;' % (name,)) + prnt(' *o = (unsigned long long)((%s) | 0);' + ' /* check that %s is an integer */' % (name, name)) + if check_value is not None: + if check_value > 0: + check_value = '%dU' % (check_value,) + prnt(' if (!_cffi_check_int(*o, n, %s))' % (check_value,)) + prnt(' n |= 2;') + prnt(' return n;') + prnt('}') + else: + assert check_value is None + prnt('static void %s(char *o)' % funcname) + prnt('{') + prnt(' *(%s)o = %s;' % (tp.get_c_name('*'), name)) + prnt('}') + prnt() + + def _generate_cpy_constant_collecttype(self, tp, name): + is_int = tp.is_integer_type() + if not is_int or self.target_is_python: + self._do_collect_type(tp) + + def _generate_cpy_constant_decl(self, tp, name): + is_int = tp.is_integer_type() + self._generate_cpy_const(is_int, name, tp) + + def _generate_cpy_constant_ctx(self, tp, name): + if not self.target_is_python and tp.is_integer_type(): + type_op = CffiOp(OP_CONSTANT_INT, -1) + else: + if self.target_is_python: + const_kind = OP_DLOPEN_CONST + else: + const_kind = OP_CONSTANT + type_index = self._typesdict[tp] + type_op = CffiOp(const_kind, type_index) + self._lsts["global"].append( + GlobalExpr(name, '_cffi_const_%s' % name, type_op)) + + # ---------- + # enums + + def _generate_cpy_enum_collecttype(self, tp, name): + self._do_collect_type(tp) + + def _generate_cpy_enum_decl(self, tp, name=None): + for enumerator in tp.enumerators: + self._generate_cpy_const(True, enumerator) + + def _enum_ctx(self, tp, cname): + type_index = self._typesdict[tp] + type_op = CffiOp(OP_ENUM, -1) + if self.target_is_python: + tp.check_not_partial() + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + self._lsts["global"].append( + GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, + check_value=enumvalue)) + # + if cname is not None and '$' not in cname and not self.target_is_python: + size = "sizeof(%s)" % cname + signed = "((%s)-1) <= 0" % cname + else: + basetp = tp.build_baseinttype(self.ffi, []) + size = self.ffi.sizeof(basetp) + signed = int(int(self.ffi.cast(basetp, -1)) < 0) + allenums = ",".join(tp.enumerators) + self._lsts["enum"].append( + EnumExpr(tp.name, type_index, size, signed, allenums)) + + def _generate_cpy_enum_ctx(self, tp, name): + self._enum_ctx(tp, tp._get_c_name()) + + # ---------- + # macros: for now only for integers + + def _generate_cpy_macro_collecttype(self, tp, name): + pass + + def _generate_cpy_macro_decl(self, tp, name): + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) + + def _generate_cpy_macro_ctx(self, tp, name): + if tp == '...': + if self.target_is_python: + raise VerificationError( + "cannot use the syntax '...' in '#define %s ...' when " + "using the ABI mode" % (name,)) + check_value = None + else: + check_value = tp # an integer + type_op = CffiOp(OP_CONSTANT_INT, -1) + self._lsts["global"].append( + GlobalExpr(name, '_cffi_const_%s' % name, type_op, + check_value=check_value)) + + # ---------- + # global variables + + def _global_type(self, tp, global_name): + if isinstance(tp, model.ArrayType): + actual_length = tp.length + if actual_length == '...': + actual_length = '_cffi_array_len(%s)' % (global_name,) + tp_item = self._global_type(tp.item, '%s[0]' % global_name) + tp = model.ArrayType(tp_item, actual_length) + return tp + + def _generate_cpy_variable_collecttype(self, tp, name): + self._do_collect_type(self._global_type(tp, name)) + + def _generate_cpy_variable_decl(self, tp, name): + prnt = self._prnt + tp = self._global_type(tp, name) + if isinstance(tp, model.ArrayType) and tp.length is None: + tp = tp.item + ampersand = '' + else: + ampersand = '&' + # This code assumes that casts from "tp *" to "void *" is a + # no-op, i.e. a function that returns a "tp *" can be called + # as if it returned a "void *". This should be generally true + # on any modern machine. The only exception to that rule (on + # uncommon architectures, and as far as I can tell) might be + # if 'tp' were a function type, but that is not possible here. + # (If 'tp' is a function _pointer_ type, then casts from "fn_t + # **" to "void *" are again no-ops, as far as I can tell.) + decl = '*_cffi_var_%s(void)' % (name,) + prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) + prnt('{') + prnt(' return %s(%s);' % (ampersand, name)) + prnt('}') + prnt() + + def _generate_cpy_variable_ctx(self, tp, name): + tp = self._global_type(tp, name) + type_index = self._typesdict[tp] + if self.target_is_python: + op = OP_GLOBAL_VAR + else: + op = OP_GLOBAL_VAR_F + self._lsts["global"].append( + GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index))) + + # ---------- + # extern "Python" + + def _generate_cpy_extern_python_collecttype(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + self._do_collect_type(tp) + _generate_cpy_dllexport_python_collecttype = \ + _generate_cpy_extern_python_plus_c_collecttype = \ + _generate_cpy_extern_python_collecttype + + def _extern_python_decl(self, tp, name, tag_and_space): + prnt = self._prnt + if isinstance(tp.result, model.VoidType): + size_of_result = '0' + else: + context = 'result of %s' % name + size_of_result = '(int)sizeof(%s)' % ( + tp.result.get_c_name('', context),) + prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name) + prnt(' { "%s.%s", %s };' % (self.module_name, name, size_of_result)) + prnt() + # + arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arg = type.get_c_name(' a%d' % i, context) + arguments.append(arg) + # + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + name_and_arguments = '%s(%s)' % (name, repr_arguments) + if tp.abi == "__stdcall": + name_and_arguments = '_cffi_stdcall ' + name_and_arguments + # + def may_need_128_bits(tp): + return (isinstance(tp, model.PrimitiveType) and + tp.name == 'long double') + # + size_of_a = max(len(tp.args)*8, 8) + if may_need_128_bits(tp.result): + size_of_a = max(size_of_a, 16) + if isinstance(tp.result, model.StructOrUnion): + size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( + tp.result.get_c_name(''), size_of_a, + tp.result.get_c_name(''), size_of_a) + prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments))) + prnt('{') + prnt(' char a[%s];' % size_of_a) + prnt(' char *p = a;') + for i, type in enumerate(tp.args): + arg = 'a%d' % i + if (isinstance(type, model.StructOrUnion) or + may_need_128_bits(type)): + arg = '&' + arg + type = model.PointerType(type) + prnt(' *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg)) + prnt(' _cffi_call_python(&_cffi_externpy__%s, p);' % name) + if not isinstance(tp.result, model.VoidType): + prnt(' return *(%s)p;' % (tp.result.get_c_name('*'),)) + prnt('}') + prnt() + self._num_externpy += 1 + + def _generate_cpy_extern_python_decl(self, tp, name): + self._extern_python_decl(tp, name, 'static ') + + def _generate_cpy_dllexport_python_decl(self, tp, name): + self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ') + + def _generate_cpy_extern_python_plus_c_decl(self, tp, name): + self._extern_python_decl(tp, name, '') + + def _generate_cpy_extern_python_ctx(self, tp, name): + if self.target_is_python: + raise VerificationError( + "cannot use 'extern \"Python\"' in the ABI mode") + if tp.ellipsis: + raise NotImplementedError("a vararg function is extern \"Python\"") + type_index = self._typesdict[tp] + type_op = CffiOp(OP_EXTERN_PYTHON, type_index) + self._lsts["global"].append( + GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) + + _generate_cpy_dllexport_python_ctx = \ + _generate_cpy_extern_python_plus_c_ctx = \ + _generate_cpy_extern_python_ctx + + def _print_string_literal_in_array(self, s): + prnt = self._prnt + prnt('// # NB. this is not a string because of a size limit in MSVC') + for line in s.splitlines(True): + prnt(('// ' + line).rstrip()) + printed_line = '' + for c in line: + if len(printed_line) >= 76: + prnt(printed_line) + printed_line = '' + printed_line += '%d,' % (ord(c),) + prnt(printed_line) + + # ---------- + # emitting the opcodes for individual types + + def _emit_bytecode_VoidType(self, tp, index): + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, PRIM_VOID) + + def _emit_bytecode_PrimitiveType(self, tp, index): + prim_index = PRIMITIVE_TO_INDEX[tp.name] + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, prim_index) + + def _emit_bytecode_UnknownIntegerType(self, tp, index): + s = ('_cffi_prim_int(sizeof(%s), (\n' + ' ((%s)-1) | 0 /* check that %s is an integer type */\n' + ' ) <= 0)' % (tp.name, tp.name, tp.name)) + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) + + def _emit_bytecode_UnknownFloatType(self, tp, index): + s = ('_cffi_prim_float(sizeof(%s) *\n' + ' (((%s)1) / 2) * 2 /* integer => 0, float => 1 */\n' + ' )' % (tp.name, tp.name)) + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) + + def _emit_bytecode_RawFunctionType(self, tp, index): + self.cffi_types[index] = CffiOp(OP_FUNCTION, self._typesdict[tp.result]) + index += 1 + for tp1 in tp.args: + realindex = self._typesdict[tp1] + if index != realindex: + if isinstance(tp1, model.PrimitiveType): + self._emit_bytecode_PrimitiveType(tp1, index) + else: + self.cffi_types[index] = CffiOp(OP_NOOP, realindex) + index += 1 + flags = int(tp.ellipsis) + if tp.abi is not None: + if tp.abi == '__stdcall': + flags |= 2 + else: + raise NotImplementedError("abi=%r" % (tp.abi,)) + self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) + + def _emit_bytecode_PointerType(self, tp, index): + self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) + + _emit_bytecode_ConstPointerType = _emit_bytecode_PointerType + _emit_bytecode_NamedPointerType = _emit_bytecode_PointerType + + def _emit_bytecode_FunctionPtrType(self, tp, index): + raw = tp.as_raw_function() + self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[raw]) + + def _emit_bytecode_ArrayType(self, tp, index): + item_index = self._typesdict[tp.item] + if tp.length is None: + self.cffi_types[index] = CffiOp(OP_OPEN_ARRAY, item_index) + elif tp.length == '...': + raise VerificationError( + "type %s badly placed: the '...' array length can only be " + "used on global arrays or on fields of structures" % ( + str(tp).replace('/*...*/', '...'),)) + else: + assert self.cffi_types[index + 1] == 'LEN' + self.cffi_types[index] = CffiOp(OP_ARRAY, item_index) + self.cffi_types[index + 1] = CffiOp(None, str(tp.length)) + + def _emit_bytecode_StructType(self, tp, index): + struct_index = self._struct_unions[tp] + self.cffi_types[index] = CffiOp(OP_STRUCT_UNION, struct_index) + _emit_bytecode_UnionType = _emit_bytecode_StructType + + def _emit_bytecode_EnumType(self, tp, index): + enum_index = self._enums[tp] + self.cffi_types[index] = CffiOp(OP_ENUM, enum_index) + + +if sys.version_info >= (3,): + NativeIO = io.StringIO +else: + class NativeIO(io.BytesIO): + def write(self, s): + if isinstance(s, unicode): + s = s.encode('ascii') + super(NativeIO, self).write(s) + +def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose): + if verbose: + print("generating %s" % (target_file,)) + recompiler = Recompiler(ffi, module_name, + target_is_python=(preamble is None)) + recompiler.collect_type_table() + recompiler.collect_step_tables() + f = NativeIO() + recompiler.write_source_to_f(f, preamble) + output = f.getvalue() + try: + with open(target_file, 'r') as f1: + if f1.read(len(output) + 1) != output: + raise IOError + if verbose: + print("(already up-to-date)") + return False # already up-to-date + except IOError: + tmp_file = '%s.~%d' % (target_file, os.getpid()) + with open(tmp_file, 'w') as f1: + f1.write(output) + try: + os.rename(tmp_file, target_file) + except OSError: + os.unlink(target_file) + os.rename(tmp_file, target_file) + return True + +def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False): + assert preamble is not None + return _make_c_or_py_source(ffi, module_name, preamble, target_c_file, + verbose) + +def make_py_source(ffi, module_name, target_py_file, verbose=False): + return _make_c_or_py_source(ffi, module_name, None, target_py_file, + verbose) + +def _modname_to_file(outputdir, modname, extension): + parts = modname.split('.') + try: + os.makedirs(os.path.join(outputdir, *parts[:-1])) + except OSError: + pass + parts[-1] += extension + return os.path.join(outputdir, *parts), parts + + +# Aaargh. Distutils is not tested at all for the purpose of compiling +# DLLs that are not extension modules. Here are some hacks to work +# around that, in the _patch_for_*() functions... + +def _patch_meth(patchlist, cls, name, new_meth): + old = getattr(cls, name) + patchlist.append((cls, name, old)) + setattr(cls, name, new_meth) + return old + +def _unpatch_meths(patchlist): + for cls, name, old_meth in reversed(patchlist): + setattr(cls, name, old_meth) + +def _patch_for_embedding(patchlist): + if sys.platform == 'win32': + # we must not remove the manifest when building for embedding! + from distutils.msvc9compiler import MSVCCompiler + _patch_meth(patchlist, MSVCCompiler, '_remove_visual_c_ref', + lambda self, manifest_file: manifest_file) + + if sys.platform == 'darwin': + # we must not make a '-bundle', but a '-dynamiclib' instead + from distutils.ccompiler import CCompiler + def my_link_shared_object(self, *args, **kwds): + if '-bundle' in self.linker_so: + self.linker_so = list(self.linker_so) + i = self.linker_so.index('-bundle') + self.linker_so[i] = '-dynamiclib' + return old_link_shared_object(self, *args, **kwds) + old_link_shared_object = _patch_meth(patchlist, CCompiler, + 'link_shared_object', + my_link_shared_object) + +def _patch_for_target(patchlist, target): + from distutils.command.build_ext import build_ext + # if 'target' is different from '*', we need to patch some internal + # method to just return this 'target' value, instead of having it + # built from module_name + if target.endswith('.*'): + target = target[:-2] + if sys.platform == 'win32': + target += '.dll' + elif sys.platform == 'darwin': + target += '.dylib' + else: + target += '.so' + _patch_meth(patchlist, build_ext, 'get_ext_filename', + lambda self, ext_name: target) + + +def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, + c_file=None, source_extension='.c', extradir=None, + compiler_verbose=1, target=None, debug=None, **kwds): + if not isinstance(module_name, str): + module_name = module_name.encode('ascii') + if ffi._windows_unicode: + ffi._apply_windows_unicode(kwds) + if preamble is not None: + embedding = (ffi._embedding is not None) + if embedding: + ffi._apply_embedding_fix(kwds) + if c_file is None: + c_file, parts = _modname_to_file(tmpdir, module_name, + source_extension) + if extradir: + parts = [extradir] + parts + ext_c_file = os.path.join(*parts) + else: + ext_c_file = c_file + # + if target is None: + if embedding: + target = '%s.*' % module_name + else: + target = '*' + # + ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) + updated = make_c_source(ffi, module_name, preamble, c_file, + verbose=compiler_verbose) + if call_c_compiler: + patchlist = [] + cwd = os.getcwd() + try: + if embedding: + _patch_for_embedding(patchlist) + if target != '*': + _patch_for_target(patchlist, target) + if compiler_verbose: + if tmpdir == '.': + msg = 'the current directory is' + else: + msg = 'setting the current directory to' + print('%s %r' % (msg, os.path.abspath(tmpdir))) + os.chdir(tmpdir) + outputfilename = ffiplatform.compile('.', ext, + compiler_verbose, debug) + finally: + os.chdir(cwd) + _unpatch_meths(patchlist) + return outputfilename + else: + return ext, updated + else: + if c_file is None: + c_file, _ = _modname_to_file(tmpdir, module_name, '.py') + updated = make_py_source(ffi, module_name, c_file, + verbose=compiler_verbose) + if call_c_compiler: + return c_file + else: + return None, updated + diff --git a/server/www/packages/packages-linux/x64/cffi/setuptools_ext.py b/server/www/packages/packages-linux/x64/cffi/setuptools_ext.py new file mode 100644 index 0000000..df5a518 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/setuptools_ext.py @@ -0,0 +1,217 @@ +import os +import sys + +try: + basestring +except NameError: + # Python 3.x + basestring = str + +def error(msg): + from distutils.errors import DistutilsSetupError + raise DistutilsSetupError(msg) + + +def execfile(filename, glob): + # We use execfile() (here rewritten for Python 3) instead of + # __import__() to load the build script. The problem with + # a normal import is that in some packages, the intermediate + # __init__.py files may already try to import the file that + # we are generating. + with open(filename) as f: + src = f.read() + src += '\n' # Python 2.6 compatibility + code = compile(src, filename, 'exec') + exec(code, glob, glob) + + +def add_cffi_module(dist, mod_spec): + from cffi.api import FFI + + if not isinstance(mod_spec, basestring): + error("argument to 'cffi_modules=...' must be a str or a list of str," + " not %r" % (type(mod_spec).__name__,)) + mod_spec = str(mod_spec) + try: + build_file_name, ffi_var_name = mod_spec.split(':') + except ValueError: + error("%r must be of the form 'path/build.py:ffi_variable'" % + (mod_spec,)) + if not os.path.exists(build_file_name): + ext = '' + rewritten = build_file_name.replace('.', '/') + '.py' + if os.path.exists(rewritten): + ext = ' (rewrite cffi_modules to [%r])' % ( + rewritten + ':' + ffi_var_name,) + error("%r does not name an existing file%s" % (build_file_name, ext)) + + mod_vars = {'__name__': '__cffi__', '__file__': build_file_name} + execfile(build_file_name, mod_vars) + + try: + ffi = mod_vars[ffi_var_name] + except KeyError: + error("%r: object %r not found in module" % (mod_spec, + ffi_var_name)) + if not isinstance(ffi, FFI): + ffi = ffi() # maybe it's a function instead of directly an ffi + if not isinstance(ffi, FFI): + error("%r is not an FFI instance (got %r)" % (mod_spec, + type(ffi).__name__)) + if not hasattr(ffi, '_assigned_source'): + error("%r: the set_source() method was not called" % (mod_spec,)) + module_name, source, source_extension, kwds = ffi._assigned_source + if ffi._windows_unicode: + kwds = kwds.copy() + ffi._apply_windows_unicode(kwds) + + if source is None: + _add_py_module(dist, ffi, module_name) + else: + _add_c_module(dist, ffi, module_name, source, source_extension, kwds) + +def _set_py_limited_api(Extension, kwds): + """ + Add py_limited_api to kwds if setuptools >= 26 is in use. + Do not alter the setting if it already exists. + Setuptools takes care of ignoring the flag on Python 2 and PyPy. + + CPython itself should ignore the flag in a debugging version + (by not listing .abi3.so in the extensions it supports), but + it doesn't so far, creating troubles. That's why we check + for "not hasattr(sys, 'gettotalrefcount')" (the 2.7 compatible equivalent + of 'd' not in sys.abiflags). (http://bugs.python.org/issue28401) + + On Windows, with CPython <= 3.4, it's better not to use py_limited_api + because virtualenv *still* doesn't copy PYTHON3.DLL on these versions. + For now we'll skip py_limited_api on all Windows versions to avoid an + inconsistent mess. + """ + if ('py_limited_api' not in kwds and not hasattr(sys, 'gettotalrefcount') + and sys.platform != 'win32'): + import setuptools + try: + setuptools_major_version = int(setuptools.__version__.partition('.')[0]) + if setuptools_major_version >= 26: + kwds['py_limited_api'] = True + except ValueError: # certain development versions of setuptools + # If we don't know the version number of setuptools, we + # try to set 'py_limited_api' anyway. At worst, we get a + # warning. + kwds['py_limited_api'] = True + return kwds + +def _add_c_module(dist, ffi, module_name, source, source_extension, kwds): + from distutils.core import Extension + # We are a setuptools extension. Need this build_ext for py_limited_api. + from setuptools.command.build_ext import build_ext + from distutils.dir_util import mkpath + from distutils import log + from cffi import recompiler + + allsources = ['$PLACEHOLDER'] + allsources.extend(kwds.pop('sources', [])) + kwds = _set_py_limited_api(Extension, kwds) + ext = Extension(name=module_name, sources=allsources, **kwds) + + def make_mod(tmpdir, pre_run=None): + c_file = os.path.join(tmpdir, module_name + source_extension) + log.info("generating cffi module %r" % c_file) + mkpath(tmpdir) + # a setuptools-only, API-only hook: called with the "ext" and "ffi" + # arguments just before we turn the ffi into C code. To use it, + # subclass the 'distutils.command.build_ext.build_ext' class and + # add a method 'def pre_run(self, ext, ffi)'. + if pre_run is not None: + pre_run(ext, ffi) + updated = recompiler.make_c_source(ffi, module_name, source, c_file) + if not updated: + log.info("already up-to-date") + return c_file + + if dist.ext_modules is None: + dist.ext_modules = [] + dist.ext_modules.append(ext) + + base_class = dist.cmdclass.get('build_ext', build_ext) + class build_ext_make_mod(base_class): + def run(self): + if ext.sources[0] == '$PLACEHOLDER': + pre_run = getattr(self, 'pre_run', None) + ext.sources[0] = make_mod(self.build_temp, pre_run) + base_class.run(self) + dist.cmdclass['build_ext'] = build_ext_make_mod + # NB. multiple runs here will create multiple 'build_ext_make_mod' + # classes. Even in this case the 'build_ext' command should be + # run once; but just in case, the logic above does nothing if + # called again. + + +def _add_py_module(dist, ffi, module_name): + from distutils.dir_util import mkpath + from setuptools.command.build_py import build_py + from setuptools.command.build_ext import build_ext + from distutils import log + from cffi import recompiler + + def generate_mod(py_file): + log.info("generating cffi module %r" % py_file) + mkpath(os.path.dirname(py_file)) + updated = recompiler.make_py_source(ffi, module_name, py_file) + if not updated: + log.info("already up-to-date") + + base_class = dist.cmdclass.get('build_py', build_py) + class build_py_make_mod(base_class): + def run(self): + base_class.run(self) + module_path = module_name.split('.') + module_path[-1] += '.py' + generate_mod(os.path.join(self.build_lib, *module_path)) + def get_source_files(self): + # This is called from 'setup.py sdist' only. Exclude + # the generate .py module in this case. + saved_py_modules = self.py_modules + try: + if saved_py_modules: + self.py_modules = [m for m in saved_py_modules + if m != module_name] + return base_class.get_source_files(self) + finally: + self.py_modules = saved_py_modules + dist.cmdclass['build_py'] = build_py_make_mod + + # distutils and setuptools have no notion I could find of a + # generated python module. If we don't add module_name to + # dist.py_modules, then things mostly work but there are some + # combination of options (--root and --record) that will miss + # the module. So we add it here, which gives a few apparently + # harmless warnings about not finding the file outside the + # build directory. + # Then we need to hack more in get_source_files(); see above. + if dist.py_modules is None: + dist.py_modules = [] + dist.py_modules.append(module_name) + + # the following is only for "build_ext -i" + base_class_2 = dist.cmdclass.get('build_ext', build_ext) + class build_ext_make_mod(base_class_2): + def run(self): + base_class_2.run(self) + if self.inplace: + # from get_ext_fullpath() in distutils/command/build_ext.py + module_path = module_name.split('.') + package = '.'.join(module_path[:-1]) + build_py = self.get_finalized_command('build_py') + package_dir = build_py.get_package_dir(package) + file_name = module_path[-1] + '.py' + generate_mod(os.path.join(package_dir, file_name)) + dist.cmdclass['build_ext'] = build_ext_make_mod + +def cffi_modules(dist, attr, value): + assert attr == 'cffi_modules' + if isinstance(value, basestring): + value = [value] + + for cffi_module in value: + add_cffi_module(dist, cffi_module) diff --git a/server/www/packages/packages-linux/x64/cffi/vengine_cpy.py b/server/www/packages/packages-linux/x64/cffi/vengine_cpy.py new file mode 100644 index 0000000..536f11f --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/vengine_cpy.py @@ -0,0 +1,1015 @@ +# +# DEPRECATED: implementation for ffi.verify() +# +import sys, imp +from . import model +from .error import VerificationError + + +class VCPythonEngine(object): + _class_key = 'x' + _gen_python_module = True + + def __init__(self, verifier): + self.verifier = verifier + self.ffi = verifier.ffi + self._struct_pending_verification = {} + self._types_of_builtin_functions = {} + + def patch_extension_kwds(self, kwds): + pass + + def find_module(self, module_name, path, so_suffixes): + try: + f, filename, descr = imp.find_module(module_name, path) + except ImportError: + return None + if f is not None: + f.close() + # Note that after a setuptools installation, there are both .py + # and .so files with the same basename. The code here relies on + # imp.find_module() locating the .so in priority. + if descr[0] not in so_suffixes: + return None + return filename + + def collect_types(self): + self._typesdict = {} + self._generate("collecttype") + + def _prnt(self, what=''): + self._f.write(what + '\n') + + def _gettypenum(self, type): + # a KeyError here is a bug. please report it! :-) + return self._typesdict[type] + + def _do_collect_type(self, tp): + if ((not isinstance(tp, model.PrimitiveType) + or tp.name == 'long double') + and tp not in self._typesdict): + num = len(self._typesdict) + self._typesdict[tp] = num + + def write_source_to_f(self): + self.collect_types() + # + # The new module will have a _cffi_setup() function that receives + # objects from the ffi world, and that calls some setup code in + # the module. This setup code is split in several independent + # functions, e.g. one per constant. The functions are "chained" + # by ending in a tail call to each other. + # + # This is further split in two chained lists, depending on if we + # can do it at import-time or if we must wait for _cffi_setup() to + # provide us with the objects. This is needed because we + # need the values of the enum constants in order to build the + # that we may have to pass to _cffi_setup(). + # + # The following two 'chained_list_constants' items contains + # the head of these two chained lists, as a string that gives the + # call to do, if any. + self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)'] + # + prnt = self._prnt + # first paste some standard set of lines that are mostly '#define' + prnt(cffimod_header) + prnt() + # then paste the C source given by the user, verbatim. + prnt(self.verifier.preamble) + prnt() + # + # call generate_cpy_xxx_decl(), for every xxx found from + # ffi._parser._declarations. This generates all the functions. + self._generate("decl") + # + # implement the function _cffi_setup_custom() as calling the + # head of the chained list. + self._generate_setup_custom() + prnt() + # + # produce the method table, including the entries for the + # generated Python->C function wrappers, which are done + # by generate_cpy_function_method(). + prnt('static PyMethodDef _cffi_methods[] = {') + self._generate("method") + prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},') + prnt(' {NULL, NULL, 0, NULL} /* Sentinel */') + prnt('};') + prnt() + # + # standard init. + modname = self.verifier.get_module_name() + constants = self._chained_list_constants[False] + prnt('#if PY_MAJOR_VERSION >= 3') + prnt() + prnt('static struct PyModuleDef _cffi_module_def = {') + prnt(' PyModuleDef_HEAD_INIT,') + prnt(' "%s",' % modname) + prnt(' NULL,') + prnt(' -1,') + prnt(' _cffi_methods,') + prnt(' NULL, NULL, NULL, NULL') + prnt('};') + prnt() + prnt('PyMODINIT_FUNC') + prnt('PyInit_%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = PyModule_Create(&_cffi_module_def);') + prnt(' if (lib == NULL)') + prnt(' return NULL;') + prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,)) + prnt(' Py_DECREF(lib);') + prnt(' return NULL;') + prnt(' }') + prnt(' return lib;') + prnt('}') + prnt() + prnt('#else') + prnt() + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname) + prnt(' if (lib == NULL)') + prnt(' return;') + prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,)) + prnt(' return;') + prnt(' return;') + prnt('}') + prnt() + prnt('#endif') + + def load_library(self, flags=None): + # XXX review all usages of 'self' here! + # import it as a new extension module + imp.acquire_lock() + try: + if hasattr(sys, "getdlopenflags"): + previous_flags = sys.getdlopenflags() + try: + if hasattr(sys, "setdlopenflags") and flags is not None: + sys.setdlopenflags(flags) + module = imp.load_dynamic(self.verifier.get_module_name(), + self.verifier.modulefilename) + except ImportError as e: + error = "importing %r: %s" % (self.verifier.modulefilename, e) + raise VerificationError(error) + finally: + if hasattr(sys, "setdlopenflags"): + sys.setdlopenflags(previous_flags) + finally: + imp.release_lock() + # + # call loading_cpy_struct() to get the struct layout inferred by + # the C compiler + self._load(module, 'loading') + # + # the C code will need the objects. Collect them in + # order in a list. + revmapping = dict([(value, key) + for (key, value) in self._typesdict.items()]) + lst = [revmapping[i] for i in range(len(revmapping))] + lst = list(map(self.ffi._get_cached_btype, lst)) + # + # build the FFILibrary class and instance and call _cffi_setup(). + # this will set up some fields like '_cffi_types', and only then + # it will invoke the chained list of functions that will really + # build (notably) the constant objects, as if they are + # pointers, and store them as attributes on the 'library' object. + class FFILibrary(object): + _cffi_python_module = module + _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir + list(self.__dict__) + library = FFILibrary() + if module._cffi_setup(lst, VerificationError, library): + import warnings + warnings.warn("reimporting %r might overwrite older definitions" + % (self.verifier.get_module_name())) + # + # finally, call the loaded_cpy_xxx() functions. This will perform + # the final adjustments, like copying the Python->C wrapper + # functions from the module to the 'library' object, and setting + # up the FFILibrary class with properties for the global C variables. + self._load(module, 'loaded', library=library) + module._cffi_original_ffi = self.ffi + module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions + return library + + def _get_declarations(self): + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst + + def _generate(self, step_name): + for name, tp in self._get_declarations(): + kind, realname = name.split(' ', 1) + try: + method = getattr(self, '_generate_cpy_%s_%s' % (kind, + step_name)) + except AttributeError: + raise VerificationError( + "not implemented in verify(): %r" % name) + try: + method(tp, realname) + except Exception as e: + model.attach_exception_info(e, name) + raise + + def _load(self, module, step_name, **kwds): + for name, tp in self._get_declarations(): + kind, realname = name.split(' ', 1) + method = getattr(self, '_%s_cpy_%s' % (step_name, kind)) + try: + method(tp, realname, module, **kwds) + except Exception as e: + model.attach_exception_info(e, name) + raise + + def _generate_nothing(self, tp, name): + pass + + def _loaded_noop(self, tp, name, module, **kwds): + pass + + # ---------- + + def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode): + extraarg = '' + if isinstance(tp, model.PrimitiveType): + if tp.is_integer_type() and tp.name != '_Bool': + converter = '_cffi_to_c_int' + extraarg = ', %s' % tp.name + else: + converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''), + tp.name.replace(' ', '_')) + errvalue = '-1' + # + elif isinstance(tp, model.PointerType): + self._convert_funcarg_to_c_ptr_or_array(tp, fromvar, + tovar, errcode) + return + # + elif isinstance(tp, (model.StructOrUnion, model.EnumType)): + # a struct (not a struct pointer) as a function argument + self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)' + % (tovar, self._gettypenum(tp), fromvar)) + self._prnt(' %s;' % errcode) + return + # + elif isinstance(tp, model.FunctionPtrType): + converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('') + extraarg = ', _cffi_type(%d)' % self._gettypenum(tp) + errvalue = 'NULL' + # + else: + raise NotImplementedError(tp) + # + self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg)) + self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % ( + tovar, tp.get_c_name(''), errvalue)) + self._prnt(' %s;' % errcode) + + def _extra_local_variables(self, tp, localvars): + if isinstance(tp, model.PointerType): + localvars.add('Py_ssize_t datasize') + + def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode): + self._prnt(' datasize = _cffi_prepare_pointer_call_argument(') + self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % ( + self._gettypenum(tp), fromvar, tovar)) + self._prnt(' if (datasize != 0) {') + self._prnt(' if (datasize < 0)') + self._prnt(' %s;' % errcode) + self._prnt(' %s = alloca((size_t)datasize);' % (tovar,)) + self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,)) + self._prnt(' if (_cffi_convert_array_from_object(' + '(char *)%s, _cffi_type(%d), %s) < 0)' % ( + tovar, self._gettypenum(tp), fromvar)) + self._prnt(' %s;' % errcode) + self._prnt(' }') + + def _convert_expr_from_c(self, tp, var, context): + if isinstance(tp, model.PrimitiveType): + if tp.is_integer_type() and tp.name != '_Bool': + return '_cffi_from_c_int(%s, %s)' % (var, tp.name) + elif tp.name != 'long double': + return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) + else: + return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, (model.PointerType, model.FunctionPtrType)): + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, model.ArrayType): + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) + elif isinstance(tp, model.StructOrUnion): + if tp.fldnames is None: + raise TypeError("'%s' is used as %s, but is opaque" % ( + tp._get_c_name(), context)) + return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, model.EnumType): + return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + else: + raise NotImplementedError(tp) + + # ---------- + # typedefs: generates no code so far + + _generate_cpy_typedef_collecttype = _generate_nothing + _generate_cpy_typedef_decl = _generate_nothing + _generate_cpy_typedef_method = _generate_nothing + _loading_cpy_typedef = _loaded_noop + _loaded_cpy_typedef = _loaded_noop + + # ---------- + # function declarations + + def _generate_cpy_function_collecttype(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + self._do_collect_type(tp) + else: + # don't call _do_collect_type(tp) in this common case, + # otherwise test_autofilled_struct_as_argument fails + for type in tp.args: + self._do_collect_type(type) + self._do_collect_type(tp.result) + + def _generate_cpy_function_decl(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + # cannot support vararg functions better than this: check for its + # exact type (including the fixed arguments), and build it as a + # constant function pointer (no CPython wrapper) + self._generate_cpy_const(False, name, tp) + return + prnt = self._prnt + numargs = len(tp.args) + if numargs == 0: + argname = 'noarg' + elif numargs == 1: + argname = 'arg0' + else: + argname = 'args' + prnt('static PyObject *') + prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname)) + prnt('{') + # + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + prnt(' %s;' % type.get_c_name(' x%d' % i, context)) + # + localvars = set() + for type in tp.args: + self._extra_local_variables(type, localvars) + for decl in localvars: + prnt(' %s;' % (decl,)) + # + if not isinstance(tp.result, model.VoidType): + result_code = 'result = ' + context = 'result of %s' % name + prnt(' %s;' % tp.result.get_c_name(' result', context)) + else: + result_code = '' + # + if len(tp.args) > 1: + rng = range(len(tp.args)) + for i in rng: + prnt(' PyObject *arg%d;' % i) + prnt() + prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % ( + 'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng]))) + prnt(' return NULL;') + prnt() + # + for i, type in enumerate(tp.args): + self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i, + 'return NULL') + prnt() + # + prnt(' Py_BEGIN_ALLOW_THREADS') + prnt(' _cffi_restore_errno();') + prnt(' { %s%s(%s); }' % ( + result_code, name, + ', '.join(['x%d' % i for i in range(len(tp.args))]))) + prnt(' _cffi_save_errno();') + prnt(' Py_END_ALLOW_THREADS') + prnt() + # + prnt(' (void)self; /* unused */') + if numargs == 0: + prnt(' (void)noarg; /* unused */') + if result_code: + prnt(' return %s;' % + self._convert_expr_from_c(tp.result, 'result', 'result type')) + else: + prnt(' Py_INCREF(Py_None);') + prnt(' return Py_None;') + prnt('}') + prnt() + + def _generate_cpy_function_method(self, tp, name): + if tp.ellipsis: + return + numargs = len(tp.args) + if numargs == 0: + meth = 'METH_NOARGS' + elif numargs == 1: + meth = 'METH_O' + else: + meth = 'METH_VARARGS' + self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth)) + + _loading_cpy_function = _loaded_noop + + def _loaded_cpy_function(self, tp, name, module, library): + if tp.ellipsis: + return + func = getattr(module, name) + setattr(library, name, func) + self._types_of_builtin_functions[func] = tp + + # ---------- + # named structs + + _generate_cpy_struct_collecttype = _generate_nothing + def _generate_cpy_struct_decl(self, tp, name): + assert name == tp.name + self._generate_struct_or_union_decl(tp, 'struct', name) + def _generate_cpy_struct_method(self, tp, name): + self._generate_struct_or_union_method(tp, 'struct', name) + def _loading_cpy_struct(self, tp, name, module): + self._loading_struct_or_union(tp, 'struct', name, module) + def _loaded_cpy_struct(self, tp, name, module, **kwds): + self._loaded_struct_or_union(tp) + + _generate_cpy_union_collecttype = _generate_nothing + def _generate_cpy_union_decl(self, tp, name): + assert name == tp.name + self._generate_struct_or_union_decl(tp, 'union', name) + def _generate_cpy_union_method(self, tp, name): + self._generate_struct_or_union_method(tp, 'union', name) + def _loading_cpy_union(self, tp, name, module): + self._loading_struct_or_union(tp, 'union', name, module) + def _loaded_cpy_union(self, tp, name, module, **kwds): + self._loaded_struct_or_union(tp) + + def _generate_struct_or_union_decl(self, tp, prefix, name): + if tp.fldnames is None: + return # nothing to do with opaque structs + checkfuncname = '_cffi_check_%s_%s' % (prefix, name) + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + cname = ('%s %s' % (prefix, name)).strip() + # + prnt = self._prnt + prnt('static void %s(%s *p)' % (checkfuncname, cname)) + prnt('{') + prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if (isinstance(ftype, model.PrimitiveType) + and ftype.is_integer_type()) or fbitsize >= 0: + # accept all integers, but complain on float or double + prnt(' (void)((p->%s) << 1);' % fname) + else: + # only accept exactly the type declared. + try: + prnt(' { %s = &p->%s; (void)tmp; }' % ( + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) + except VerificationError as e: + prnt(' /* %s */' % str(e)) # cannot verify it, ignore + prnt('}') + prnt('static PyObject *') + prnt('%s(PyObject *self, PyObject *noarg)' % (layoutfuncname,)) + prnt('{') + prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) + prnt(' static Py_ssize_t nums[] = {') + prnt(' sizeof(%s),' % cname) + prnt(' offsetof(struct _cffi_aligncheck, y),') + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if fbitsize >= 0: + continue # xxx ignore fbitsize for now + prnt(' offsetof(%s, %s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + prnt(' -1') + prnt(' };') + prnt(' (void)self; /* unused */') + prnt(' (void)noarg; /* unused */') + prnt(' return _cffi_get_struct_layout(nums);') + prnt(' /* the next line is not executed, but compiled */') + prnt(' %s(0);' % (checkfuncname,)) + prnt('}') + prnt() + + def _generate_struct_or_union_method(self, tp, prefix, name): + if tp.fldnames is None: + return # nothing to do with opaque structs + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname, + layoutfuncname)) + + def _loading_struct_or_union(self, tp, prefix, name, module): + if tp.fldnames is None: + return # nothing to do with opaque structs + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + # + function = getattr(module, layoutfuncname) + layout = function() + if isinstance(tp, model.StructOrUnion) and tp.partial: + # use the function()'s sizes and offsets to guide the + # layout of the struct + totalsize = layout[0] + totalalignment = layout[1] + fieldofs = layout[2::2] + fieldsize = layout[3::2] + tp.force_flatten() + assert len(fieldofs) == len(fieldsize) == len(tp.fldnames) + tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment + else: + cname = ('%s %s' % (prefix, name)).strip() + self._struct_pending_verification[tp] = layout, cname + + def _loaded_struct_or_union(self, tp): + if tp.fldnames is None: + return # nothing to do with opaque structs + self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered + + if tp in self._struct_pending_verification: + # check that the layout sizes and offsets match the real ones + def check(realvalue, expectedvalue, msg): + if realvalue != expectedvalue: + raise VerificationError( + "%s (we have %d, but C compiler says %d)" + % (msg, expectedvalue, realvalue)) + ffi = self.ffi + BStruct = ffi._get_cached_btype(tp) + layout, cname = self._struct_pending_verification.pop(tp) + check(layout[0], ffi.sizeof(BStruct), "wrong total size") + check(layout[1], ffi.alignof(BStruct), "wrong total alignment") + i = 2 + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if fbitsize >= 0: + continue # xxx ignore fbitsize for now + check(layout[i], ffi.offsetof(BStruct, fname), + "wrong offset for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) + i += 2 + assert i == len(layout) + + # ---------- + # 'anonymous' declarations. These are produced for anonymous structs + # or unions; the 'name' is obtained by a typedef. + + _generate_cpy_anonymous_collecttype = _generate_nothing + + def _generate_cpy_anonymous_decl(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_cpy_enum_decl(tp, name, '') + else: + self._generate_struct_or_union_decl(tp, '', name) + + def _generate_cpy_anonymous_method(self, tp, name): + if not isinstance(tp, model.EnumType): + self._generate_struct_or_union_method(tp, '', name) + + def _loading_cpy_anonymous(self, tp, name, module): + if isinstance(tp, model.EnumType): + self._loading_cpy_enum(tp, name, module) + else: + self._loading_struct_or_union(tp, '', name, module) + + def _loaded_cpy_anonymous(self, tp, name, module, **kwds): + if isinstance(tp, model.EnumType): + self._loaded_cpy_enum(tp, name, module, **kwds) + else: + self._loaded_struct_or_union(tp) + + # ---------- + # constants, likely declared with '#define' + + def _generate_cpy_const(self, is_int, name, tp=None, category='const', + vartp=None, delayed=True, size_too=False, + check_value=None): + prnt = self._prnt + funcname = '_cffi_%s_%s' % (category, name) + prnt('static int %s(PyObject *lib)' % funcname) + prnt('{') + prnt(' PyObject *o;') + prnt(' int res;') + if not is_int: + prnt(' %s;' % (vartp or tp).get_c_name(' i', name)) + else: + assert category == 'const' + # + if check_value is not None: + self._check_int_constant_value(name, check_value) + # + if not is_int: + if category == 'var': + realexpr = '&' + name + else: + realexpr = name + prnt(' i = (%s);' % (realexpr,)) + prnt(' o = %s;' % (self._convert_expr_from_c(tp, 'i', + 'variable type'),)) + assert delayed + else: + prnt(' o = _cffi_from_c_int_const(%s);' % name) + prnt(' if (o == NULL)') + prnt(' return -1;') + if size_too: + prnt(' {') + prnt(' PyObject *o1 = o;') + prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));' + % (name,)) + prnt(' Py_DECREF(o1);') + prnt(' if (o == NULL)') + prnt(' return -1;') + prnt(' }') + prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name) + prnt(' Py_DECREF(o);') + prnt(' if (res < 0)') + prnt(' return -1;') + prnt(' return %s;' % self._chained_list_constants[delayed]) + self._chained_list_constants[delayed] = funcname + '(lib)' + prnt('}') + prnt() + + def _generate_cpy_constant_collecttype(self, tp, name): + is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() + if not is_int: + self._do_collect_type(tp) + + def _generate_cpy_constant_decl(self, tp, name): + is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() + self._generate_cpy_const(is_int, name, tp) + + _generate_cpy_constant_method = _generate_nothing + _loading_cpy_constant = _loaded_noop + _loaded_cpy_constant = _loaded_noop + + # ---------- + # enums + + def _check_int_constant_value(self, name, value, err_prefix=''): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + name) + prnt(' PyErr_Format(_cffi_VerificationError,') + prnt(' "%s%s has the real value %s, not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + err_prefix, name, value)) + prnt(' return -1;') + prnt(' }') + + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + + def _generate_cpy_enum_decl(self, tp, name, prefix='enum'): + if tp.partial: + for enumerator in tp.enumerators: + self._generate_cpy_const(True, enumerator, delayed=False) + return + # + funcname = self._enum_funcname(prefix, name) + prnt = self._prnt + prnt('static int %s(PyObject *lib)' % funcname) + prnt('{') + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + self._check_int_constant_value(enumerator, enumvalue, + "enum %s: " % name) + prnt(' return %s;' % self._chained_list_constants[True]) + self._chained_list_constants[True] = funcname + '(lib)' + prnt('}') + prnt() + + _generate_cpy_enum_collecttype = _generate_nothing + _generate_cpy_enum_method = _generate_nothing + + def _loading_cpy_enum(self, tp, name, module): + if tp.partial: + enumvalues = [getattr(module, enumerator) + for enumerator in tp.enumerators] + tp.enumvalues = tuple(enumvalues) + tp.partial_resolved = True + + def _loaded_cpy_enum(self, tp, name, module, library): + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + setattr(library, enumerator, enumvalue) + + # ---------- + # macros: for now only for integers + + def _generate_cpy_macro_decl(self, tp, name): + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) + + _generate_cpy_macro_collecttype = _generate_nothing + _generate_cpy_macro_method = _generate_nothing + _loading_cpy_macro = _loaded_noop + _loaded_cpy_macro = _loaded_noop + + # ---------- + # global variables + + def _generate_cpy_variable_collecttype(self, tp, name): + if isinstance(tp, model.ArrayType): + tp_ptr = model.PointerType(tp.item) + else: + tp_ptr = model.PointerType(tp) + self._do_collect_type(tp_ptr) + + def _generate_cpy_variable_decl(self, tp, name): + if isinstance(tp, model.ArrayType): + tp_ptr = model.PointerType(tp.item) + self._generate_cpy_const(False, name, tp, vartp=tp_ptr, + size_too = (tp.length == '...')) + else: + tp_ptr = model.PointerType(tp) + self._generate_cpy_const(False, name, tp_ptr, category='var') + + _generate_cpy_variable_method = _generate_nothing + _loading_cpy_variable = _loaded_noop + + def _loaded_cpy_variable(self, tp, name, module, library): + value = getattr(library, name) + if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the + # sense that "a=..." is forbidden + if tp.length == '...': + assert isinstance(value, tuple) + (value, size) = value + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + return + # remove ptr= from the library instance, and replace + # it by a property on the class, which reads/writes into ptr[0]. + ptr = value + delattr(library, name) + def getter(library): + return ptr[0] + def setter(library, value): + ptr[0] = value + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) + + # ---------- + + def _generate_setup_custom(self): + prnt = self._prnt + prnt('static int _cffi_setup_custom(PyObject *lib)') + prnt('{') + prnt(' return %s;' % self._chained_list_constants[True]) + prnt('}') + +cffimod_header = r''' +#include +#include + +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py + and cffi/_cffi_include.h */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ +# ifndef __cplusplus + typedef unsigned char _Bool; +# endif +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) +# include +# endif +#endif + +#if PY_MAJOR_VERSION < 3 +# undef PyCapsule_CheckExact +# undef PyCapsule_GetPointer +# define PyCapsule_CheckExact(capsule) (PyCObject_Check(capsule)) +# define PyCapsule_GetPointer(capsule, name) \ + (PyCObject_AsVoidPtr(capsule)) +#endif + +#if PY_MAJOR_VERSION >= 3 +# define PyInt_FromLong PyLong_FromLong +#endif + +#define _cffi_from_c_double PyFloat_FromDouble +#define _cffi_from_c_float PyFloat_FromDouble +#define _cffi_from_c_long PyInt_FromLong +#define _cffi_from_c_ulong PyLong_FromUnsignedLong +#define _cffi_from_c_longlong PyLong_FromLongLong +#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong +#define _cffi_from_c__Bool PyBool_FromLong + +#define _cffi_to_c_double PyFloat_AsDouble +#define _cffi_to_c_float PyFloat_AsDouble + +#define _cffi_from_c_int_const(x) \ + (((x) > 0) ? \ + ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \ + ((long long)(x) >= (long long)LONG_MIN) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromLongLong((long long)(x))) + +#define _cffi_from_c_int(x, type) \ + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + sizeof(type) == sizeof(long) ? \ + PyLong_FromUnsignedLong((unsigned long)x) : \ + PyLong_FromUnsignedLongLong((unsigned long long)x)) : \ + (sizeof(type) <= sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + PyLong_FromLongLong((long long)x))) + +#define _cffi_to_c_int(o, type) \ + ((type)( \ + sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ + (Py_FatalError("unsupported size for type " #type), (type)0))) + +#define _cffi_to_c_i8 \ + ((int(*)(PyObject *))_cffi_exports[1]) +#define _cffi_to_c_u8 \ + ((int(*)(PyObject *))_cffi_exports[2]) +#define _cffi_to_c_i16 \ + ((int(*)(PyObject *))_cffi_exports[3]) +#define _cffi_to_c_u16 \ + ((int(*)(PyObject *))_cffi_exports[4]) +#define _cffi_to_c_i32 \ + ((int(*)(PyObject *))_cffi_exports[5]) +#define _cffi_to_c_u32 \ + ((unsigned int(*)(PyObject *))_cffi_exports[6]) +#define _cffi_to_c_i64 \ + ((long long(*)(PyObject *))_cffi_exports[7]) +#define _cffi_to_c_u64 \ + ((unsigned long long(*)(PyObject *))_cffi_exports[8]) +#define _cffi_to_c_char \ + ((int(*)(PyObject *))_cffi_exports[9]) +#define _cffi_from_c_pointer \ + ((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[10]) +#define _cffi_to_c_pointer \ + ((char *(*)(PyObject *, CTypeDescrObject *))_cffi_exports[11]) +#define _cffi_get_struct_layout \ + ((PyObject *(*)(Py_ssize_t[]))_cffi_exports[12]) +#define _cffi_restore_errno \ + ((void(*)(void))_cffi_exports[13]) +#define _cffi_save_errno \ + ((void(*)(void))_cffi_exports[14]) +#define _cffi_from_c_char \ + ((PyObject *(*)(char))_cffi_exports[15]) +#define _cffi_from_c_deref \ + ((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[16]) +#define _cffi_to_c \ + ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[17]) +#define _cffi_from_c_struct \ + ((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[18]) +#define _cffi_to_c_wchar_t \ + ((wchar_t(*)(PyObject *))_cffi_exports[19]) +#define _cffi_from_c_wchar_t \ + ((PyObject *(*)(wchar_t))_cffi_exports[20]) +#define _cffi_to_c_long_double \ + ((long double(*)(PyObject *))_cffi_exports[21]) +#define _cffi_to_c__Bool \ + ((_Bool(*)(PyObject *))_cffi_exports[22]) +#define _cffi_prepare_pointer_call_argument \ + ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) +#define _cffi_convert_array_from_object \ + ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) +#define _CFFI_NUM_EXPORTS 25 + +typedef struct _ctypedescr CTypeDescrObject; + +static void *_cffi_exports[_CFFI_NUM_EXPORTS]; +static PyObject *_cffi_types, *_cffi_VerificationError; + +static int _cffi_setup_custom(PyObject *lib); /* forward */ + +static PyObject *_cffi_setup(PyObject *self, PyObject *args) +{ + PyObject *library; + int was_alive = (_cffi_types != NULL); + (void)self; /* unused */ + if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, + &library)) + return NULL; + Py_INCREF(_cffi_types); + Py_INCREF(_cffi_VerificationError); + if (_cffi_setup_custom(library) < 0) + return NULL; + return PyBool_FromLong(was_alive); +} + +static int _cffi_init(void) +{ + PyObject *module, *c_api_object = NULL; + + module = PyImport_ImportModule("_cffi_backend"); + if (module == NULL) + goto failure; + + c_api_object = PyObject_GetAttrString(module, "_C_API"); + if (c_api_object == NULL) + goto failure; + if (!PyCapsule_CheckExact(c_api_object)) { + PyErr_SetNone(PyExc_ImportError); + goto failure; + } + memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), + _CFFI_NUM_EXPORTS * sizeof(void *)); + + Py_DECREF(module); + Py_DECREF(c_api_object); + return 0; + + failure: + Py_XDECREF(module); + Py_XDECREF(c_api_object); + return -1; +} + +#define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) + +/**********/ +''' diff --git a/server/www/packages/packages-linux/x64/cffi/vengine_gen.py b/server/www/packages/packages-linux/x64/cffi/vengine_gen.py new file mode 100644 index 0000000..a64ff64 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/vengine_gen.py @@ -0,0 +1,675 @@ +# +# DEPRECATED: implementation for ffi.verify() +# +import sys, os +import types + +from . import model +from .error import VerificationError + + +class VGenericEngine(object): + _class_key = 'g' + _gen_python_module = False + + def __init__(self, verifier): + self.verifier = verifier + self.ffi = verifier.ffi + self.export_symbols = [] + self._struct_pending_verification = {} + + def patch_extension_kwds(self, kwds): + # add 'export_symbols' to the dictionary. Note that we add the + # list before filling it. When we fill it, it will thus also show + # up in kwds['export_symbols']. + kwds.setdefault('export_symbols', self.export_symbols) + + def find_module(self, module_name, path, so_suffixes): + for so_suffix in so_suffixes: + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename + + def collect_types(self): + pass # not needed in the generic engine + + def _prnt(self, what=''): + self._f.write(what + '\n') + + def write_source_to_f(self): + prnt = self._prnt + # first paste some standard set of lines that are mostly '#include' + prnt(cffimod_header) + # then paste the C source given by the user, verbatim. + prnt(self.verifier.preamble) + # + # call generate_gen_xxx_decl(), for every xxx found from + # ffi._parser._declarations. This generates all the functions. + self._generate('decl') + # + # on Windows, distutils insists on putting init_cffi_xyz in + # 'export_symbols', so instead of fighting it, just give up and + # give it one + if sys.platform == 'win32': + if sys.version_info >= (3,): + prefix = 'PyInit_' + else: + prefix = 'init' + modname = self.verifier.get_module_name() + prnt("void %s%s(void) { }\n" % (prefix, modname)) + + def load_library(self, flags=0): + # import it with the CFFI backend + backend = self.ffi._backend + # needs to make a path that contains '/', on Posix + filename = os.path.join(os.curdir, self.verifier.modulefilename) + module = backend.load_library(filename, flags) + # + # call loading_gen_struct() to get the struct layout inferred by + # the C compiler + self._load(module, 'loading') + + # build the FFILibrary class and instance, this is a module subclass + # because modules are expected to have usually-constant-attributes and + # in PyPy this means the JIT is able to treat attributes as constant, + # which we want. + class FFILibrary(types.ModuleType): + _cffi_generic_module = module + _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir + library = FFILibrary("") + # + # finally, call the loaded_gen_xxx() functions. This will set + # up the 'library' object. + self._load(module, 'loaded', library=library) + return library + + def _get_declarations(self): + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst + + def _generate(self, step_name): + for name, tp in self._get_declarations(): + kind, realname = name.split(' ', 1) + try: + method = getattr(self, '_generate_gen_%s_%s' % (kind, + step_name)) + except AttributeError: + raise VerificationError( + "not implemented in verify(): %r" % name) + try: + method(tp, realname) + except Exception as e: + model.attach_exception_info(e, name) + raise + + def _load(self, module, step_name, **kwds): + for name, tp in self._get_declarations(): + kind, realname = name.split(' ', 1) + method = getattr(self, '_%s_gen_%s' % (step_name, kind)) + try: + method(tp, realname, module, **kwds) + except Exception as e: + model.attach_exception_info(e, name) + raise + + def _generate_nothing(self, tp, name): + pass + + def _loaded_noop(self, tp, name, module, **kwds): + pass + + # ---------- + # typedefs: generates no code so far + + _generate_gen_typedef_decl = _generate_nothing + _loading_gen_typedef = _loaded_noop + _loaded_gen_typedef = _loaded_noop + + # ---------- + # function declarations + + def _generate_gen_function_decl(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + # cannot support vararg functions better than this: check for its + # exact type (including the fixed arguments), and build it as a + # constant function pointer (no _cffi_f_%s wrapper) + self._generate_gen_const(False, name, tp) + return + prnt = self._prnt + numargs = len(tp.args) + argnames = [] + for i, type in enumerate(tp.args): + indirection = '' + if isinstance(type, model.StructOrUnion): + indirection = '*' + argnames.append('%sx%d' % (indirection, i)) + context = 'argument of %s' % name + arglist = [type.get_c_name(' %s' % arg, context) + for type, arg in zip(tp.args, argnames)] + tpresult = tp.result + if isinstance(tpresult, model.StructOrUnion): + arglist.insert(0, tpresult.get_c_name(' *r', context)) + tpresult = model.void_type + arglist = ', '.join(arglist) or 'void' + wrappername = '_cffi_f_%s' % name + self.export_symbols.append(wrappername) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist) + context = 'result of %s' % name + prnt(tpresult.get_c_name(funcdecl, context)) + prnt('{') + # + if isinstance(tp.result, model.StructOrUnion): + result_code = '*r = ' + elif not isinstance(tp.result, model.VoidType): + result_code = 'return ' + else: + result_code = '' + prnt(' %s%s(%s);' % (result_code, name, ', '.join(argnames))) + prnt('}') + prnt() + + _loading_gen_function = _loaded_noop + + def _loaded_gen_function(self, tp, name, module, library): + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + newfunction = self._load_constant(False, tp, name, module) + else: + indirections = [] + base_tp = tp + if (any(isinstance(typ, model.StructOrUnion) for typ in tp.args) + or isinstance(tp.result, model.StructOrUnion)): + indirect_args = [] + for i, typ in enumerate(tp.args): + if isinstance(typ, model.StructOrUnion): + typ = model.PointerType(typ) + indirections.append((i, typ)) + indirect_args.append(typ) + indirect_result = tp.result + if isinstance(indirect_result, model.StructOrUnion): + if indirect_result.fldtypes is None: + raise TypeError("'%s' is used as result type, " + "but is opaque" % ( + indirect_result._get_c_name(),)) + indirect_result = model.PointerType(indirect_result) + indirect_args.insert(0, indirect_result) + indirections.insert(0, ("result", indirect_result)) + indirect_result = model.void_type + tp = model.FunctionPtrType(tuple(indirect_args), + indirect_result, tp.ellipsis) + BFunc = self.ffi._get_cached_btype(tp) + wrappername = '_cffi_f_%s' % name + newfunction = module.load_function(BFunc, wrappername) + for i, typ in indirections: + newfunction = self._make_struct_wrapper(newfunction, i, typ, + base_tp) + setattr(library, name, newfunction) + type(library)._cffi_dir.append(name) + + def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): + backend = self.ffi._backend + BType = self.ffi._get_cached_btype(tp) + if i == "result": + ffi = self.ffi + def newfunc(*args): + res = ffi.new(BType) + oldfunc(res, *args) + return res[0] + else: + def newfunc(*args): + args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] + return oldfunc(*args) + newfunc._cffi_base_type = base_tp + return newfunc + + # ---------- + # named structs + + def _generate_gen_struct_decl(self, tp, name): + assert name == tp.name + self._generate_struct_or_union_decl(tp, 'struct', name) + + def _loading_gen_struct(self, tp, name, module): + self._loading_struct_or_union(tp, 'struct', name, module) + + def _loaded_gen_struct(self, tp, name, module, **kwds): + self._loaded_struct_or_union(tp) + + def _generate_gen_union_decl(self, tp, name): + assert name == tp.name + self._generate_struct_or_union_decl(tp, 'union', name) + + def _loading_gen_union(self, tp, name, module): + self._loading_struct_or_union(tp, 'union', name, module) + + def _loaded_gen_union(self, tp, name, module, **kwds): + self._loaded_struct_or_union(tp) + + def _generate_struct_or_union_decl(self, tp, prefix, name): + if tp.fldnames is None: + return # nothing to do with opaque structs + checkfuncname = '_cffi_check_%s_%s' % (prefix, name) + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + cname = ('%s %s' % (prefix, name)).strip() + # + prnt = self._prnt + prnt('static void %s(%s *p)' % (checkfuncname, cname)) + prnt('{') + prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if (isinstance(ftype, model.PrimitiveType) + and ftype.is_integer_type()) or fbitsize >= 0: + # accept all integers, but complain on float or double + prnt(' (void)((p->%s) << 1);' % fname) + else: + # only accept exactly the type declared. + try: + prnt(' { %s = &p->%s; (void)tmp; }' % ( + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) + except VerificationError as e: + prnt(' /* %s */' % str(e)) # cannot verify it, ignore + prnt('}') + self.export_symbols.append(layoutfuncname) + prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) + prnt('{') + prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) + prnt(' static intptr_t nums[] = {') + prnt(' sizeof(%s),' % cname) + prnt(' offsetof(struct _cffi_aligncheck, y),') + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if fbitsize >= 0: + continue # xxx ignore fbitsize for now + prnt(' offsetof(%s, %s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + prnt(' -1') + prnt(' };') + prnt(' return nums[i];') + prnt(' /* the next line is not executed, but compiled */') + prnt(' %s(0);' % (checkfuncname,)) + prnt('}') + prnt() + + def _loading_struct_or_union(self, tp, prefix, name, module): + if tp.fldnames is None: + return # nothing to do with opaque structs + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + # + BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] + function = module.load_function(BFunc, layoutfuncname) + layout = [] + num = 0 + while True: + x = function(num) + if x < 0: break + layout.append(x) + num += 1 + if isinstance(tp, model.StructOrUnion) and tp.partial: + # use the function()'s sizes and offsets to guide the + # layout of the struct + totalsize = layout[0] + totalalignment = layout[1] + fieldofs = layout[2::2] + fieldsize = layout[3::2] + tp.force_flatten() + assert len(fieldofs) == len(fieldsize) == len(tp.fldnames) + tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment + else: + cname = ('%s %s' % (prefix, name)).strip() + self._struct_pending_verification[tp] = layout, cname + + def _loaded_struct_or_union(self, tp): + if tp.fldnames is None: + return # nothing to do with opaque structs + self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered + + if tp in self._struct_pending_verification: + # check that the layout sizes and offsets match the real ones + def check(realvalue, expectedvalue, msg): + if realvalue != expectedvalue: + raise VerificationError( + "%s (we have %d, but C compiler says %d)" + % (msg, expectedvalue, realvalue)) + ffi = self.ffi + BStruct = ffi._get_cached_btype(tp) + layout, cname = self._struct_pending_verification.pop(tp) + check(layout[0], ffi.sizeof(BStruct), "wrong total size") + check(layout[1], ffi.alignof(BStruct), "wrong total alignment") + i = 2 + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if fbitsize >= 0: + continue # xxx ignore fbitsize for now + check(layout[i], ffi.offsetof(BStruct, fname), + "wrong offset for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) + i += 2 + assert i == len(layout) + + # ---------- + # 'anonymous' declarations. These are produced for anonymous structs + # or unions; the 'name' is obtained by a typedef. + + def _generate_gen_anonymous_decl(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_gen_enum_decl(tp, name, '') + else: + self._generate_struct_or_union_decl(tp, '', name) + + def _loading_gen_anonymous(self, tp, name, module): + if isinstance(tp, model.EnumType): + self._loading_gen_enum(tp, name, module, '') + else: + self._loading_struct_or_union(tp, '', name, module) + + def _loaded_gen_anonymous(self, tp, name, module, **kwds): + if isinstance(tp, model.EnumType): + self._loaded_gen_enum(tp, name, module, **kwds) + else: + self._loaded_struct_or_union(tp) + + # ---------- + # constants, likely declared with '#define' + + def _generate_gen_const(self, is_int, name, tp=None, category='const', + check_value=None): + prnt = self._prnt + funcname = '_cffi_%s_%s' % (category, name) + self.export_symbols.append(funcname) + if check_value is not None: + assert is_int + assert category == 'const' + prnt('int %s(char *out_error)' % funcname) + prnt('{') + self._check_int_constant_value(name, check_value) + prnt(' return 0;') + prnt('}') + elif is_int: + assert category == 'const' + prnt('int %s(long long *out_value)' % funcname) + prnt('{') + prnt(' *out_value = (long long)(%s);' % (name,)) + prnt(' return (%s) <= 0;' % (name,)) + prnt('}') + else: + assert tp is not None + assert check_value is None + if category == 'var': + ampersand = '&' + else: + ampersand = '' + extra = '' + if category == 'const' and isinstance(tp, model.StructOrUnion): + extra = 'const *' + ampersand = '&' + prnt(tp.get_c_name(' %s%s(void)' % (extra, funcname), name)) + prnt('{') + prnt(' return (%s%s);' % (ampersand, name)) + prnt('}') + prnt() + + def _generate_gen_constant_decl(self, tp, name): + is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() + self._generate_gen_const(is_int, name, tp) + + _loading_gen_constant = _loaded_noop + + def _load_constant(self, is_int, tp, name, module, check_value=None): + funcname = '_cffi_const_%s' % name + if check_value is not None: + assert is_int + self._load_known_int_constant(module, funcname) + value = check_value + elif is_int: + BType = self.ffi._typeof_locked("long long*")[0] + BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType) + negative = function(p) + value = int(p[0]) + if value < 0 and not negative: + BLongLong = self.ffi._typeof_locked("long long")[0] + value += (1 << (8*self.ffi.sizeof(BLongLong))) + else: + assert check_value is None + fntypeextra = '(*)(void)' + if isinstance(tp, model.StructOrUnion): + fntypeextra = '*' + fntypeextra + BFunc = self.ffi._typeof_locked(tp.get_c_name(fntypeextra, name))[0] + function = module.load_function(BFunc, funcname) + value = function() + if isinstance(tp, model.StructOrUnion): + value = value[0] + return value + + def _loaded_gen_constant(self, tp, name, module, library): + is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() + value = self._load_constant(is_int, tp, name, module) + setattr(library, name, value) + type(library)._cffi_dir.append(name) + + # ---------- + # enums + + def _check_int_constant_value(self, name, value): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % + name) + prnt(' sprintf(out_error, "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % (name[:100], value)) + prnt(' return -1;') + prnt(' }') + + def _load_known_int_constant(self, module, funcname): + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType, 256) + if function(p) < 0: + error = self.ffi.string(p) + if sys.version_info >= (3,): + error = str(error, 'utf-8') + raise VerificationError(error) + + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + + def _generate_gen_enum_decl(self, tp, name, prefix='enum'): + if tp.partial: + for enumerator in tp.enumerators: + self._generate_gen_const(True, enumerator) + return + # + funcname = self._enum_funcname(prefix, name) + self.export_symbols.append(funcname) + prnt = self._prnt + prnt('int %s(char *out_error)' % funcname) + prnt('{') + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + self._check_int_constant_value(enumerator, enumvalue) + prnt(' return 0;') + prnt('}') + prnt() + + def _loading_gen_enum(self, tp, name, module, prefix='enum'): + if tp.partial: + enumvalues = [self._load_constant(True, tp, enumerator, module) + for enumerator in tp.enumerators] + tp.enumvalues = tuple(enumvalues) + tp.partial_resolved = True + else: + funcname = self._enum_funcname(prefix, name) + self._load_known_int_constant(module, funcname) + + def _loaded_gen_enum(self, tp, name, module, library): + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + setattr(library, enumerator, enumvalue) + type(library)._cffi_dir.append(enumerator) + + # ---------- + # macros: for now only for integers + + def _generate_gen_macro_decl(self, tp, name): + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_gen_const(True, name, check_value=check_value) + + _loading_gen_macro = _loaded_noop + + def _loaded_gen_macro(self, tp, name, module, library): + if tp == '...': + check_value = None + else: + check_value = tp # an integer + value = self._load_constant(True, tp, name, module, + check_value=check_value) + setattr(library, name, value) + type(library)._cffi_dir.append(name) + + # ---------- + # global variables + + def _generate_gen_variable_decl(self, tp, name): + if isinstance(tp, model.ArrayType): + if tp.length == '...': + prnt = self._prnt + funcname = '_cffi_sizeof_%s' % (name,) + self.export_symbols.append(funcname) + prnt("size_t %s(void)" % funcname) + prnt("{") + prnt(" return sizeof(%s);" % (name,)) + prnt("}") + tp_ptr = model.PointerType(tp.item) + self._generate_gen_const(False, name, tp_ptr) + else: + tp_ptr = model.PointerType(tp) + self._generate_gen_const(False, name, tp_ptr, category='var') + + _loading_gen_variable = _loaded_noop + + def _loaded_gen_variable(self, tp, name, module, library): + if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the + # sense that "a=..." is forbidden + if tp.length == '...': + funcname = '_cffi_sizeof_%s' % (name,) + BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] + function = module.load_function(BFunc, funcname) + size = function() + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + tp_ptr = model.PointerType(tp.item) + value = self._load_constant(False, tp_ptr, name, module) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + type(library)._cffi_dir.append(name) + return + # remove ptr= from the library instance, and replace + # it by a property on the class, which reads/writes into ptr[0]. + funcname = '_cffi_var_%s' % name + BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] + function = module.load_function(BFunc, funcname) + ptr = function() + def getter(library): + return ptr[0] + def setter(library, value): + ptr[0] = value + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) + +cffimod_header = r''' +#include +#include +#include +#include +#include /* XXX for ssize_t on some platforms */ + +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py + and cffi/_cffi_include.h */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ +# ifndef __cplusplus + typedef unsigned char _Bool; +# endif +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) +# include +# endif +#endif +''' diff --git a/server/www/packages/packages-linux/x64/cffi/verifier.py b/server/www/packages/packages-linux/x64/cffi/verifier.py new file mode 100644 index 0000000..59b78c2 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cffi/verifier.py @@ -0,0 +1,306 @@ +# +# DEPRECATED: implementation for ffi.verify() +# +import sys, os, binascii, shutil, io +from . import __version_verifier_modules__ +from . import ffiplatform +from .error import VerificationError + +if sys.version_info >= (3, 3): + import importlib.machinery + def _extension_suffixes(): + return importlib.machinery.EXTENSION_SUFFIXES[:] +else: + import imp + def _extension_suffixes(): + return [suffix for suffix, _, type in imp.get_suffixes() + if type == imp.C_EXTENSION] + + +if sys.version_info >= (3,): + NativeIO = io.StringIO +else: + class NativeIO(io.BytesIO): + def write(self, s): + if isinstance(s, unicode): + s = s.encode('ascii') + super(NativeIO, self).write(s) + + +class Verifier(object): + + def __init__(self, ffi, preamble, tmpdir=None, modulename=None, + ext_package=None, tag='', force_generic_engine=False, + source_extension='.c', flags=None, relative_to=None, **kwds): + if ffi._parser._uses_new_feature: + raise VerificationError( + "feature not supported with ffi.verify(), but only " + "with ffi.set_source(): %s" % (ffi._parser._uses_new_feature,)) + self.ffi = ffi + self.preamble = preamble + if not modulename: + flattened_kwds = ffiplatform.flatten(kwds) + vengine_class = _locate_engine_class(ffi, force_generic_engine) + self._vengine = vengine_class(self) + self._vengine.patch_extension_kwds(kwds) + self.flags = flags + self.kwds = self.make_relative_to(kwds, relative_to) + # + if modulename: + if tag: + raise TypeError("can't specify both 'modulename' and 'tag'") + else: + key = '\x00'.join([sys.version[:3], __version_verifier_modules__, + preamble, flattened_kwds] + + ffi._cdefsources) + if sys.version_info >= (3,): + key = key.encode('utf-8') + k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) + k1 = k1.lstrip('0x').rstrip('L') + k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) + k2 = k2.lstrip('0').rstrip('L') + modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key, + k1, k2) + suffix = _get_so_suffixes()[0] + self.tmpdir = tmpdir or _caller_dir_pycache() + self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension) + self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) + self.ext_package = ext_package + self._has_source = False + self._has_module = False + + def write_source(self, file=None): + """Write the C source code. It is produced in 'self.sourcefilename', + which can be tweaked beforehand.""" + with self.ffi._lock: + if self._has_source and file is None: + raise VerificationError( + "source code already written") + self._write_source(file) + + def compile_module(self): + """Write the C source code (if not done already) and compile it. + This produces a dynamic link library in 'self.modulefilename'.""" + with self.ffi._lock: + if self._has_module: + raise VerificationError("module already compiled") + if not self._has_source: + self._write_source() + self._compile_module() + + def load_library(self): + """Get a C module from this Verifier instance. + Returns an instance of a FFILibrary class that behaves like the + objects returned by ffi.dlopen(), but that delegates all + operations to the C module. If necessary, the C code is written + and compiled first. + """ + with self.ffi._lock: + if not self._has_module: + self._locate_module() + if not self._has_module: + if not self._has_source: + self._write_source() + self._compile_module() + return self._load_library() + + def get_module_name(self): + basename = os.path.basename(self.modulefilename) + # kill both the .so extension and the other .'s, as introduced + # by Python 3: 'basename.cpython-33m.so' + basename = basename.split('.', 1)[0] + # and the _d added in Python 2 debug builds --- but try to be + # conservative and not kill a legitimate _d + if basename.endswith('_d') and hasattr(sys, 'gettotalrefcount'): + basename = basename[:-2] + return basename + + def get_extension(self): + ffiplatform._hack_at_distutils() # backward compatibility hack + if not self._has_source: + with self.ffi._lock: + if not self._has_source: + self._write_source() + sourcename = ffiplatform.maybe_relative_path(self.sourcefilename) + modname = self.get_module_name() + return ffiplatform.get_extension(sourcename, modname, **self.kwds) + + def generates_python_module(self): + return self._vengine._gen_python_module + + def make_relative_to(self, kwds, relative_to): + if relative_to and os.path.dirname(relative_to): + dirname = os.path.dirname(relative_to) + kwds = kwds.copy() + for key in ffiplatform.LIST_OF_FILE_NAMES: + if key in kwds: + lst = kwds[key] + if not isinstance(lst, (list, tuple)): + raise TypeError("keyword '%s' should be a list or tuple" + % (key,)) + lst = [os.path.join(dirname, fn) for fn in lst] + kwds[key] = lst + return kwds + + # ---------- + + def _locate_module(self): + if not os.path.isfile(self.modulefilename): + if self.ext_package: + try: + pkg = __import__(self.ext_package, None, None, ['__doc__']) + except ImportError: + return # cannot import the package itself, give up + # (e.g. it might be called differently before installation) + path = pkg.__path__ + else: + path = None + filename = self._vengine.find_module(self.get_module_name(), path, + _get_so_suffixes()) + if filename is None: + return + self.modulefilename = filename + self._vengine.collect_types() + self._has_module = True + + def _write_source_to(self, file): + self._vengine._f = file + try: + self._vengine.write_source_to_f() + finally: + del self._vengine._f + + def _write_source(self, file=None): + if file is not None: + self._write_source_to(file) + else: + # Write our source file to an in memory file. + f = NativeIO() + self._write_source_to(f) + source_data = f.getvalue() + + # Determine if this matches the current file + if os.path.exists(self.sourcefilename): + with open(self.sourcefilename, "r") as fp: + needs_written = not (fp.read() == source_data) + else: + needs_written = True + + # Actually write the file out if it doesn't match + if needs_written: + _ensure_dir(self.sourcefilename) + with open(self.sourcefilename, "w") as fp: + fp.write(source_data) + + # Set this flag + self._has_source = True + + def _compile_module(self): + # compile this C source + tmpdir = os.path.dirname(self.sourcefilename) + outputfilename = ffiplatform.compile(tmpdir, self.get_extension()) + try: + same = ffiplatform.samefile(outputfilename, self.modulefilename) + except OSError: + same = False + if not same: + _ensure_dir(self.modulefilename) + shutil.move(outputfilename, self.modulefilename) + self._has_module = True + + def _load_library(self): + assert self._has_module + if self.flags is not None: + return self._vengine.load_library(self.flags) + else: + return self._vengine.load_library() + +# ____________________________________________________________ + +_FORCE_GENERIC_ENGINE = False # for tests + +def _locate_engine_class(ffi, force_generic_engine): + if _FORCE_GENERIC_ENGINE: + force_generic_engine = True + if not force_generic_engine: + if '__pypy__' in sys.builtin_module_names: + force_generic_engine = True + else: + try: + import _cffi_backend + except ImportError: + _cffi_backend = '?' + if ffi._backend is not _cffi_backend: + force_generic_engine = True + if force_generic_engine: + from . import vengine_gen + return vengine_gen.VGenericEngine + else: + from . import vengine_cpy + return vengine_cpy.VCPythonEngine + +# ____________________________________________________________ + +_TMPDIR = None + +def _caller_dir_pycache(): + if _TMPDIR: + return _TMPDIR + result = os.environ.get('CFFI_TMPDIR') + if result: + return result + filename = sys._getframe(2).f_code.co_filename + return os.path.abspath(os.path.join(os.path.dirname(filename), + '__pycache__')) + +def set_tmpdir(dirname): + """Set the temporary directory to use instead of __pycache__.""" + global _TMPDIR + _TMPDIR = dirname + +def cleanup_tmpdir(tmpdir=None, keep_so=False): + """Clean up the temporary directory by removing all files in it + called `_cffi_*.{c,so}` as well as the `build` subdirectory.""" + tmpdir = tmpdir or _caller_dir_pycache() + try: + filelist = os.listdir(tmpdir) + except OSError: + return + if keep_so: + suffix = '.c' # only remove .c files + else: + suffix = _get_so_suffixes()[0].lower() + for fn in filelist: + if fn.lower().startswith('_cffi_') and ( + fn.lower().endswith(suffix) or fn.lower().endswith('.c')): + try: + os.unlink(os.path.join(tmpdir, fn)) + except OSError: + pass + clean_dir = [os.path.join(tmpdir, 'build')] + for dir in clean_dir: + try: + for fn in os.listdir(dir): + fn = os.path.join(dir, fn) + if os.path.isdir(fn): + clean_dir.append(fn) + else: + os.unlink(fn) + except OSError: + pass + +def _get_so_suffixes(): + suffixes = _extension_suffixes() + if not suffixes: + # bah, no C_EXTENSION available. Occurs on pypy without cpyext + if sys.platform == 'win32': + suffixes = [".pyd"] + else: + suffixes = [".so"] + + return suffixes + +def _ensure_dir(filename): + dirname = os.path.dirname(filename) + if dirname and not os.path.isdir(dirname): + os.makedirs(dirname) diff --git a/server/www/packages/packages-linux/x64/cryptography/__about__.py b/server/www/packages/packages-linux/x64/cryptography/__about__.py index ac18bb4..4055e29 100644 --- a/server/www/packages/packages-linux/x64/cryptography/__about__.py +++ b/server/www/packages/packages-linux/x64/cryptography/__about__.py @@ -14,10 +14,10 @@ __summary__ = ("cryptography is a package which provides cryptographic recipes" " and primitives to Python developers.") __uri__ = "https://github.com/pyca/cryptography" -__version__ = "2.4.2" +__version__ = "2.8" __author__ = "The cryptography developers" __email__ = "cryptography-dev@python.org" __license__ = "BSD or Apache License, Version 2.0" -__copyright__ = "Copyright 2013-2017 {0}".format(__author__) +__copyright__ = "Copyright 2013-2019 {}".format(__author__) diff --git a/server/www/packages/packages-linux/x64/cryptography/exceptions.py b/server/www/packages/packages-linux/x64/cryptography/exceptions.py index 648cf9d..1d52d7d 100644 --- a/server/www/packages/packages-linux/x64/cryptography/exceptions.py +++ b/server/www/packages/packages-linux/x64/cryptography/exceptions.py @@ -19,6 +19,7 @@ class _Reasons(Enum): UNSUPPORTED_X509 = 8 UNSUPPORTED_EXCHANGE_ALGORITHM = 9 UNSUPPORTED_DIFFIE_HELLMAN = 10 + UNSUPPORTED_MAC = 11 class UnsupportedAlgorithm(Exception): diff --git a/server/www/packages/packages-linux/x64/cryptography/fernet.py b/server/www/packages/packages-linux/x64/cryptography/fernet.py index ac2dd0b..b990def 100644 --- a/server/www/packages/packages-linux/x64/cryptography/fernet.py +++ b/server/www/packages/packages-linux/x64/cryptography/fernet.py @@ -12,6 +12,7 @@ import time import six +from cryptography import utils from cryptography.exceptions import InvalidSignature from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, padding @@ -51,8 +52,7 @@ class Fernet(object): return self._encrypt_from_parts(data, current_time, iv) def _encrypt_from_parts(self, data, current_time, iv): - if not isinstance(data, bytes): - raise TypeError("data must be bytes.") + utils._check_bytes("data", data) padder = padding.PKCS7(algorithms.AES.block_size).padder() padded_data = padder.update(data) + padder.finalize() @@ -82,9 +82,7 @@ class Fernet(object): @staticmethod def _get_unverified_token_data(token): - if not isinstance(token, bytes): - raise TypeError("token must be bytes.") - + utils._check_bytes("token", token) try: data = base64.urlsafe_b64decode(token) except (TypeError, binascii.Error): diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/_der.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/_der.py new file mode 100644 index 0000000..51518d6 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/_der.py @@ -0,0 +1,156 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import six + +from cryptography.utils import int_from_bytes, int_to_bytes + + +# This module contains a lightweight DER encoder and decoder. See X.690 for the +# specification. This module intentionally does not implement the more complex +# BER encoding, only DER. +# +# Note this implementation treats an element's constructed bit as part of the +# tag. This is fine for DER, where the bit is always computable from the type. + + +CONSTRUCTED = 0x20 +CONTEXT_SPECIFIC = 0x80 + +INTEGER = 0x02 +BIT_STRING = 0x03 +OCTET_STRING = 0x04 +NULL = 0x05 +OBJECT_IDENTIFIER = 0x06 +SEQUENCE = 0x10 | CONSTRUCTED +SET = 0x11 | CONSTRUCTED +PRINTABLE_STRING = 0x13 +UTC_TIME = 0x17 +GENERALIZED_TIME = 0x18 + + +class DERReader(object): + def __init__(self, data): + self.data = memoryview(data) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + if exc_value is None: + self.check_empty() + + def is_empty(self): + return len(self.data) == 0 + + def check_empty(self): + if not self.is_empty(): + raise ValueError("Invalid DER input: trailing data") + + def read_byte(self): + if len(self.data) < 1: + raise ValueError("Invalid DER input: insufficient data") + ret = six.indexbytes(self.data, 0) + self.data = self.data[1:] + return ret + + def read_bytes(self, n): + if len(self.data) < n: + raise ValueError("Invalid DER input: insufficient data") + ret = self.data[:n] + self.data = self.data[n:] + return ret + + def read_any_element(self): + tag = self.read_byte() + # Tag numbers 31 or higher are stored in multiple bytes. No supported + # ASN.1 types use such tags, so reject these. + if tag & 0x1f == 0x1f: + raise ValueError("Invalid DER input: unexpected high tag number") + length_byte = self.read_byte() + if length_byte & 0x80 == 0: + # If the high bit is clear, the first length byte is the length. + length = length_byte + else: + # If the high bit is set, the first length byte encodes the length + # of the length. + length_byte &= 0x7f + if length_byte == 0: + raise ValueError( + "Invalid DER input: indefinite length form is not allowed " + "in DER" + ) + length = 0 + for i in range(length_byte): + length <<= 8 + length |= self.read_byte() + if length == 0: + raise ValueError( + "Invalid DER input: length was not minimally-encoded" + ) + if length < 0x80: + # If the length could have been encoded in short form, it must + # not use long form. + raise ValueError( + "Invalid DER input: length was not minimally-encoded" + ) + body = self.read_bytes(length) + return tag, DERReader(body) + + def read_element(self, expected_tag): + tag, body = self.read_any_element() + if tag != expected_tag: + raise ValueError("Invalid DER input: unexpected tag") + return body + + def read_single_element(self, expected_tag): + with self: + return self.read_element(expected_tag) + + def read_optional_element(self, expected_tag): + if len(self.data) > 0 and six.indexbytes(self.data, 0) == expected_tag: + return self.read_element(expected_tag) + return None + + def as_integer(self): + if len(self.data) == 0: + raise ValueError("Invalid DER input: empty integer contents") + first = six.indexbytes(self.data, 0) + if first & 0x80 == 0x80: + raise ValueError("Negative DER integers are not supported") + # The first 9 bits must not all be zero or all be ones. Otherwise, the + # encoding should have been one byte shorter. + if len(self.data) > 1: + second = six.indexbytes(self.data, 1) + if first == 0 and second & 0x80 == 0: + raise ValueError( + "Invalid DER input: integer not minimally-encoded" + ) + return int_from_bytes(self.data, "big") + + +def encode_der_integer(x): + if not isinstance(x, six.integer_types): + raise ValueError("Value must be an integer") + if x < 0: + raise ValueError("Negative integers are not supported") + n = x.bit_length() // 8 + 1 + return int_to_bytes(x, n) + + +def encode_der(tag, *children): + length = 0 + for child in children: + length += len(child) + chunks = [six.int2byte(tag)] + if length < 0x80: + chunks.append(six.int2byte(length)) + else: + length_bytes = int_to_bytes(length) + chunks.append(six.int2byte(0x80 | len(length_bytes))) + chunks.append(length_bytes) + chunks.extend(children) + return b"".join(chunks) diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/_oid.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/_oid.py index cfe906c..4b08722 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/_oid.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/_oid.py @@ -50,7 +50,7 @@ class ObjectIdentifier(object): return not self == other def __repr__(self): - return "".format( + return "".format( self.dotted_string, self._name ) diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/interfaces.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/interfaces.py index 0a476b9..20f4164 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/interfaces.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/interfaces.py @@ -57,7 +57,7 @@ class HMACBackend(object): @abc.abstractmethod def create_hmac_ctx(self, key, algorithm): """ - Create a MACContext for calculating a message authentication code. + Create a context for calculating a message authentication code. """ @@ -72,7 +72,7 @@ class CMACBackend(object): @abc.abstractmethod def create_cmac_ctx(self, algorithm): """ - Create a MACContext for calculating a message authentication code. + Create a context for calculating a message authentication code. """ diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/aead.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/aead.py index 9cec3e2..0cad15c 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/aead.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/aead.py @@ -18,10 +18,10 @@ def _aead_cipher_name(cipher): if isinstance(cipher, ChaCha20Poly1305): return b"chacha20-poly1305" elif isinstance(cipher, AESCCM): - return "aes-{0}-ccm".format(len(cipher._key) * 8).encode("ascii") + return "aes-{}-ccm".format(len(cipher._key) * 8).encode("ascii") else: assert isinstance(cipher, AESGCM) - return "aes-{0}-gcm".format(len(cipher._key) * 8).encode("ascii") + return "aes-{}-gcm".format(len(cipher._key) * 8).encode("ascii") def _aead_setup(backend, cipher_name, key, nonce, tag, tag_len, operation): @@ -49,17 +49,20 @@ def _aead_setup(backend, cipher_name, key, nonce, tag, tag_len, operation): ctx, backend._lib.EVP_CTRL_AEAD_SET_TAG, len(tag), tag ) backend.openssl_assert(res != 0) - else: + elif cipher_name.endswith(b"-ccm"): res = backend._lib.EVP_CIPHER_CTX_ctrl( ctx, backend._lib.EVP_CTRL_AEAD_SET_TAG, tag_len, backend._ffi.NULL ) + backend.openssl_assert(res != 0) + nonce_ptr = backend._ffi.from_buffer(nonce) + key_ptr = backend._ffi.from_buffer(key) res = backend._lib.EVP_CipherInit_ex( ctx, backend._ffi.NULL, backend._ffi.NULL, - key, - nonce, + key_ptr, + nonce_ptr, int(operation == _ENCRYPT) ) backend.openssl_assert(res != 0) diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/backend.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/backend.py index 19734a5..7e9fa20 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/backend.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/backend.py @@ -5,18 +5,19 @@ from __future__ import absolute_import, division, print_function import base64 -import calendar import collections import contextlib import itertools from contextlib import contextmanager -import asn1crypto.core - import six +from six.moves import range from cryptography import utils, x509 from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat._der import ( + INTEGER, NULL, SEQUENCE, encode_der, encode_der_integer +) from cryptography.hazmat.backends.interfaces import ( CMACBackend, CipherBackend, DERSerializationBackend, DHBackend, DSABackend, EllipticCurveBackend, HMACBackend, HashBackend, PBKDF2HMACBackend, @@ -26,7 +27,7 @@ from cryptography.hazmat.backends.openssl import aead from cryptography.hazmat.backends.openssl.ciphers import _CipherContext from cryptography.hazmat.backends.openssl.cmac import _CMACContext from cryptography.hazmat.backends.openssl.decode_asn1 import ( - _CRL_ENTRY_REASON_ENUM_TO_CODE, _Integers + _CRL_ENTRY_REASON_ENUM_TO_CODE ) from cryptography.hazmat.backends.openssl.dh import ( _DHParameters, _DHPrivateKey, _DHPublicKey, _dh_params_dup @@ -37,6 +38,12 @@ from cryptography.hazmat.backends.openssl.dsa import ( from cryptography.hazmat.backends.openssl.ec import ( _EllipticCurvePrivateKey, _EllipticCurvePublicKey ) +from cryptography.hazmat.backends.openssl.ed25519 import ( + _Ed25519PrivateKey, _Ed25519PublicKey +) +from cryptography.hazmat.backends.openssl.ed448 import ( + _ED448_KEY_SIZE, _Ed448PrivateKey, _Ed448PublicKey +) from cryptography.hazmat.backends.openssl.encode_asn1 import ( _CRL_ENTRY_EXTENSION_ENCODE_HANDLERS, _CRL_EXTENSION_ENCODE_HANDLERS, _EXTENSION_ENCODE_HANDLERS, @@ -49,19 +56,27 @@ from cryptography.hazmat.backends.openssl.hmac import _HMACContext from cryptography.hazmat.backends.openssl.ocsp import ( _OCSPRequest, _OCSPResponse ) +from cryptography.hazmat.backends.openssl.poly1305 import ( + _POLY1305_KEY_SIZE, _Poly1305Context +) from cryptography.hazmat.backends.openssl.rsa import ( _RSAPrivateKey, _RSAPublicKey ) from cryptography.hazmat.backends.openssl.x25519 import ( _X25519PrivateKey, _X25519PublicKey ) +from cryptography.hazmat.backends.openssl.x448 import ( + _X448PrivateKey, _X448PublicKey +) from cryptography.hazmat.backends.openssl.x509 import ( _Certificate, _CertificateRevocationList, _CertificateSigningRequest, _RevokedCertificate ) from cryptography.hazmat.bindings.openssl import binding from cryptography.hazmat.primitives import hashes, serialization -from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa +from cryptography.hazmat.primitives.asymmetric import ( + dsa, ec, ed25519, ed448, rsa +) from cryptography.hazmat.primitives.asymmetric.padding import ( MGF1, OAEP, PKCS1v15, PSS ) @@ -72,6 +87,7 @@ from cryptography.hazmat.primitives.ciphers.modes import ( CBC, CFB, CFB8, CTR, ECB, GCM, OFB, XTS ) from cryptography.hazmat.primitives.kdf import scrypt +from cryptography.hazmat.primitives.serialization import ssh from cryptography.x509 import ocsp @@ -115,21 +131,23 @@ class Backend(object): return binding._openssl_assert(self._lib, ok) def activate_builtin_random(self): - # Obtain a new structural reference. - e = self._lib.ENGINE_get_default_RAND() - if e != self._ffi.NULL: - self._lib.ENGINE_unregister_RAND(e) - # Reset the RNG to use the new engine. - self._lib.RAND_cleanup() - # decrement the structural reference from get_default_RAND - res = self._lib.ENGINE_finish(e) - self.openssl_assert(res == 1) + if self._lib.Cryptography_HAS_ENGINE: + # Obtain a new structural reference. + e = self._lib.ENGINE_get_default_RAND() + if e != self._ffi.NULL: + self._lib.ENGINE_unregister_RAND(e) + # Reset the RNG to use the built-in. + res = self._lib.RAND_set_rand_method(self._ffi.NULL) + self.openssl_assert(res == 1) + # decrement the structural reference from get_default_RAND + res = self._lib.ENGINE_finish(e) + self.openssl_assert(res == 1) @contextlib.contextmanager def _get_osurandom_engine(self): # Fetches an engine by id and returns it. This creates a structural # reference. - e = self._lib.ENGINE_by_id(self._binding._osrandom_engine_id) + e = self._lib.ENGINE_by_id(self._lib.Cryptography_osrandom_engine_id) self.openssl_assert(e != self._ffi.NULL) # Initialize the engine for use. This adds a functional reference. res = self._lib.ENGINE_init(e) @@ -146,14 +164,16 @@ class Backend(object): self.openssl_assert(res == 1) def activate_osrandom_engine(self): - # Unregister and free the current engine. - self.activate_builtin_random() - with self._get_osurandom_engine() as e: - # Set the engine as the default RAND provider. - res = self._lib.ENGINE_set_default_RAND(e) + if self._lib.Cryptography_HAS_ENGINE: + # Unregister and free the current engine. + self.activate_builtin_random() + with self._get_osurandom_engine() as e: + # Set the engine as the default RAND provider. + res = self._lib.ENGINE_set_default_RAND(e) + self.openssl_assert(res == 1) + # Reset the RNG to use the engine + res = self._lib.RAND_set_rand_method(self._ffi.NULL) self.openssl_assert(res == 1) - # Reset the RNG to use the new engine. - self._lib.RAND_cleanup() def osrandom_engine_implementation(self): buf = self._ffi.new("char[]", 64) @@ -183,7 +203,7 @@ class Backend(object): def _evp_md_from_algorithm(self, algorithm): if algorithm.name == "blake2b" or algorithm.name == "blake2s": - alg = "{0}{1}".format( + alg = "{}{}".format( algorithm.name, algorithm.digest_size * 8 ).encode("ascii") else: @@ -217,7 +237,7 @@ class Backend(object): def register_cipher_adapter(self, cipher_cls, mode_cls, adapter): if (cipher_cls, mode_cls) in self._cipher_registry: - raise ValueError("Duplicate registration for: {0} {1}.".format( + raise ValueError("Duplicate registration for: {} {}.".format( cipher_cls, mode_cls) ) self._cipher_registry[cipher_cls, mode_cls] = adapter @@ -292,8 +312,9 @@ class Backend(object): key_material): buf = self._ffi.new("unsigned char[]", length) evp_md = self._evp_md_non_null_from_algorithm(algorithm) + key_material_ptr = self._ffi.from_buffer(key_material) res = self._lib.PKCS5_PBKDF2_HMAC( - key_material, + key_material_ptr, len(key_material), salt, len(salt), @@ -318,7 +339,10 @@ class Backend(object): bin_len = self._lib.BN_bn2bin(bn, bin_ptr) # A zero length means the BN has value 0 self.openssl_assert(bin_len >= 0) - return int.from_bytes(self._ffi.buffer(bin_ptr)[:bin_len], "big") + val = int.from_bytes(self._ffi.buffer(bin_ptr)[:bin_len], "big") + if self._lib.BN_is_negative(bn): + val = -val + return val else: # Under Python 2 the best we can do is hex() hex_cdata = self._lib.BN_bn2hex(bn) @@ -446,13 +470,13 @@ class Backend(object): The char* is the storage for the BIO and it must stay alive until the BIO is finished with. """ - data_char_p = self._ffi.new("char[]", data) + data_ptr = self._ffi.from_buffer(data) bio = self._lib.BIO_new_mem_buf( - data_char_p, len(data) + data_ptr, len(data) ) self.openssl_assert(bio != self._ffi.NULL) - return _MemoryBIO(self._ffi.gc(bio, self._lib.BIO_free), data_char_p) + return _MemoryBIO(self._ffi.gc(bio, self._lib.BIO_free), data_ptr) def _create_mem_bio_gc(self): """ @@ -504,6 +528,18 @@ class Backend(object): self.openssl_assert(dh_cdata != self._ffi.NULL) dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) return _DHPrivateKey(self, dh_cdata, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_ED25519", None): + # EVP_PKEY_ED25519 is not present in OpenSSL < 1.1.1 + return _Ed25519PrivateKey(self, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_X448", None): + # EVP_PKEY_X448 is not present in OpenSSL < 1.1.1 + return _X448PrivateKey(self, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_X25519", None): + # EVP_PKEY_X25519 is not present in OpenSSL < 1.1.0 + return _X25519PrivateKey(self, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_ED448", None): + # EVP_PKEY_ED448 is not present in OpenSSL < 1.1.1 + return _Ed448PrivateKey(self, evp_pkey) else: raise UnsupportedAlgorithm("Unsupported key type.") @@ -535,6 +571,18 @@ class Backend(object): self.openssl_assert(dh_cdata != self._ffi.NULL) dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) return _DHPublicKey(self, dh_cdata, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_ED25519", None): + # EVP_PKEY_ED25519 is not present in OpenSSL < 1.1.1 + return _Ed25519PublicKey(self, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_X448", None): + # EVP_PKEY_X448 is not present in OpenSSL < 1.1.1 + return _X448PublicKey(self, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_X25519", None): + # EVP_PKEY_X25519 is not present in OpenSSL < 1.1.0 + return _X25519PublicKey(self, evp_pkey) + elif key_type == getattr(self._lib, "EVP_PKEY_ED448", None): + # EVP_PKEY_X25519 is not present in OpenSSL < 1.1.1 + return _Ed448PublicKey(self, evp_pkey) else: raise UnsupportedAlgorithm("Unsupported key type.") @@ -676,10 +724,18 @@ class Backend(object): return _CMACContext(self, algorithm) def create_x509_csr(self, builder, private_key, algorithm): - if not isinstance(algorithm, hashes.HashAlgorithm): - raise TypeError('Algorithm must be a registered hash algorithm.') + if not isinstance(builder, x509.CertificateSigningRequestBuilder): + raise TypeError('Builder type mismatch.') - if ( + if isinstance(private_key, + (ed25519.Ed25519PrivateKey, ed448.Ed448PrivateKey)): + if algorithm is not None: + raise ValueError( + "algorithm must be None when signing via ed25519 or ed448" + ) + elif not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError('Algorithm must be a registered hash algorithm.') + elif ( isinstance(algorithm, hashes.MD5) and not isinstance(private_key, rsa.RSAPrivateKey) ): @@ -688,7 +744,7 @@ class Backend(object): ) # Resolve the signature algorithm. - evp_md = self._evp_md_non_null_from_algorithm(algorithm) + evp_md = self._evp_md_x509_null_if_eddsa(private_key, algorithm) # Create an empty request. x509_req = self._lib.X509_REQ_new() @@ -755,7 +811,13 @@ class Backend(object): def create_x509_certificate(self, builder, private_key, algorithm): if not isinstance(builder, x509.CertificateBuilder): raise TypeError('Builder type mismatch.') - if not isinstance(algorithm, hashes.HashAlgorithm): + if isinstance(private_key, + (ed25519.Ed25519PrivateKey, ed448.Ed448PrivateKey)): + if algorithm is not None: + raise ValueError( + "algorithm must be None when signing via ed25519 or ed448" + ) + elif not isinstance(algorithm, hashes.HashAlgorithm): raise TypeError('Algorithm must be a registered hash algorithm.') if ( @@ -763,11 +825,11 @@ class Backend(object): isinstance(private_key, rsa.RSAPrivateKey) ): raise ValueError( - "MD5 is not a supported hash algorithm for EC/DSA certificates" + "MD5 is only (reluctantly) supported for RSA certificates" ) # Resolve the signature algorithm. - evp_md = self._evp_md_non_null_from_algorithm(algorithm) + evp_md = self._evp_md_x509_null_if_eddsa(private_key, algorithm) # Create an empty certificate. x509_cert = self._lib.X509_new() @@ -796,12 +858,12 @@ class Backend(object): # Set the "not before" time. self._set_asn1_time( - self._lib.X509_get_notBefore(x509_cert), builder._not_valid_before + self._lib.X509_getm_notBefore(x509_cert), builder._not_valid_before ) # Set the "not after" time. self._set_asn1_time( - self._lib.X509_get_notAfter(x509_cert), builder._not_valid_after + self._lib.X509_getm_notAfter(x509_cert), builder._not_valid_after ) # Add extensions. @@ -835,21 +897,21 @@ class Backend(object): return _Certificate(self, x509_cert) + def _evp_md_x509_null_if_eddsa(self, private_key, algorithm): + if isinstance(private_key, + (ed25519.Ed25519PrivateKey, ed448.Ed448PrivateKey)): + # OpenSSL requires us to pass NULL for EVP_MD for ed25519/ed448 + return self._ffi.NULL + else: + return self._evp_md_non_null_from_algorithm(algorithm) + def _set_asn1_time(self, asn1_time, time): - timestamp = calendar.timegm(time.timetuple()) - res = self._lib.ASN1_TIME_set(asn1_time, timestamp) - if res == self._ffi.NULL: - errors = self._consume_errors() - self.openssl_assert( - errors[0]._lib_reason_match( - self._lib.ERR_LIB_ASN1, - self._lib.ASN1_R_ERROR_GETTING_TIME - ) - ) - raise ValueError( - "Invalid time. This error can occur if you set a time too far " - "in the future on Windows." - ) + if time.year >= 2050: + asn1_str = time.strftime('%Y%m%d%H%M%SZ').encode('ascii') + else: + asn1_str = time.strftime('%y%m%d%H%M%SZ').encode('ascii') + res = self._lib.ASN1_TIME_set_string(asn1_time, asn1_str) + self.openssl_assert(res == 1) def _create_asn1_time(self, time): asn1_time = self._lib.ASN1_TIME_new() @@ -861,7 +923,13 @@ class Backend(object): def create_x509_crl(self, builder, private_key, algorithm): if not isinstance(builder, x509.CertificateRevocationListBuilder): raise TypeError('Builder type mismatch.') - if not isinstance(algorithm, hashes.HashAlgorithm): + if isinstance(private_key, + (ed25519.Ed25519PrivateKey, ed448.Ed448PrivateKey)): + if algorithm is not None: + raise ValueError( + "algorithm must be None when signing via ed25519 or ed448" + ) + elif not isinstance(algorithm, hashes.HashAlgorithm): raise TypeError('Algorithm must be a registered hash algorithm.') if ( @@ -872,7 +940,7 @@ class Backend(object): "MD5 is not a supported hash algorithm for EC/DSA CRLs" ) - evp_md = self._evp_md_non_null_from_algorithm(algorithm) + evp_md = self._evp_md_x509_null_if_eddsa(private_key, algorithm) # Create an empty CRL. x509_crl = self._lib.X509_CRL_new() @@ -959,19 +1027,24 @@ class Backend(object): value = _encode_asn1_str_gc(self, extension.value.value) return self._create_raw_x509_extension(extension, value) elif isinstance(extension.value, x509.TLSFeature): - asn1 = _Integers([x.value for x in extension.value]).dump() + asn1 = encode_der( + SEQUENCE, + *[ + encode_der(INTEGER, encode_der_integer(x.value)) + for x in extension.value + ] + ) value = _encode_asn1_str_gc(self, asn1) return self._create_raw_x509_extension(extension, value) elif isinstance(extension.value, x509.PrecertPoison): - asn1 = asn1crypto.core.Null().dump() - value = _encode_asn1_str_gc(self, asn1) + value = _encode_asn1_str_gc(self, encode_der(NULL)) return self._create_raw_x509_extension(extension, value) else: try: encode = handlers[extension.oid] except KeyError: raise NotImplementedError( - 'Extension not supported: {0}'.format(extension.oid) + 'Extension not supported: {}'.format(extension.oid) ) ext_struct = encode(self, extension.value) @@ -1137,7 +1210,10 @@ class Backend(object): ) if x509 == self._ffi.NULL: self._consume_errors() - raise ValueError("Unable to load certificate") + raise ValueError( + "Unable to load certificate. See https://cryptography.io/en/la" + "test/faq/#why-can-t-i-import-my-pem-file for more details." + ) x509 = self._ffi.gc(x509, self._lib.X509_free) return _Certificate(self, x509) @@ -1159,7 +1235,10 @@ class Backend(object): ) if x509_crl == self._ffi.NULL: self._consume_errors() - raise ValueError("Unable to load CRL") + raise ValueError( + "Unable to load CRL. See https://cryptography.io/en/la" + "test/faq/#why-can-t-i-import-my-pem-file for more details." + ) x509_crl = self._ffi.gc(x509_crl, self._lib.X509_CRL_free) return _CertificateRevocationList(self, x509_crl) @@ -1181,7 +1260,10 @@ class Backend(object): ) if x509_req == self._ffi.NULL: self._consume_errors() - raise ValueError("Unable to load request") + raise ValueError( + "Unable to load request. See https://cryptography.io/en/la" + "test/faq/#why-can-t-i-import-my-pem-file for more details." + ) x509_req = self._ffi.gc(x509_req, self._lib.X509_REQ_free) return _CertificateSigningRequest(self, x509_req) @@ -1199,13 +1281,11 @@ class Backend(object): def _load_key(self, openssl_read_func, convert_func, data, password): mem_bio = self._bytes_to_bio(data) - if password is not None and not isinstance(password, bytes): - raise TypeError("Password must be bytes") - userdata = self._ffi.new("CRYPTOGRAPHY_PASSWORD_DATA *") if password is not None: - password_buf = self._ffi.new("char []", password) - userdata.password = password_buf + utils._check_byteslike("password", password) + password_ptr = self._ffi.from_buffer(password) + userdata.password = password_ptr userdata.length = len(password) evp_pkey = openssl_read_func( @@ -1228,7 +1308,7 @@ class Backend(object): else: assert userdata.error == -2 raise ValueError( - "Passwords longer than {0} bytes are not supported " + "Passwords longer than {} bytes are not supported " "by this backend.".format(userdata.maxsize - 1) ) else: @@ -1330,11 +1410,7 @@ class Backend(object): """ if self.elliptic_curve_supported(curve): - curve_nid = self._elliptic_curve_to_nid(curve) - - ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid) - self.openssl_assert(ec_cdata != self._ffi.NULL) - ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + ec_cdata = self._ec_key_new_by_curve(curve) res = self._lib.EC_KEY_generate_key(ec_cdata) self.openssl_assert(res == 1) @@ -1344,18 +1420,14 @@ class Backend(object): return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey) else: raise UnsupportedAlgorithm( - "Backend object does not support {0}.".format(curve.name), + "Backend object does not support {}.".format(curve.name), _Reasons.UNSUPPORTED_ELLIPTIC_CURVE ) def load_elliptic_curve_private_numbers(self, numbers): public = numbers.public_numbers - curve_nid = self._elliptic_curve_to_nid(public.curve) - - ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid) - self.openssl_assert(ec_cdata != self._ffi.NULL) - ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + ec_cdata = self._ec_key_new_by_curve(public.curve) private_value = self._ffi.gc( self._int_to_bn(numbers.private_value), self._lib.BN_clear_free @@ -1371,24 +1443,35 @@ class Backend(object): return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey) def load_elliptic_curve_public_numbers(self, numbers): - curve_nid = self._elliptic_curve_to_nid(numbers.curve) - - ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid) - self.openssl_assert(ec_cdata != self._ffi.NULL) - ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) - + ec_cdata = self._ec_key_new_by_curve(numbers.curve) ec_cdata = self._ec_key_set_public_key_affine_coordinates( ec_cdata, numbers.x, numbers.y) evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata) return _EllipticCurvePublicKey(self, ec_cdata, evp_pkey) - def derive_elliptic_curve_private_key(self, private_value, curve): - curve_nid = self._elliptic_curve_to_nid(curve) + def load_elliptic_curve_public_bytes(self, curve, point_bytes): + ec_cdata = self._ec_key_new_by_curve(curve) + group = self._lib.EC_KEY_get0_group(ec_cdata) + self.openssl_assert(group != self._ffi.NULL) + point = self._lib.EC_POINT_new(group) + self.openssl_assert(point != self._ffi.NULL) + point = self._ffi.gc(point, self._lib.EC_POINT_free) + with self._tmp_bn_ctx() as bn_ctx: + res = self._lib.EC_POINT_oct2point( + group, point, point_bytes, len(point_bytes), bn_ctx + ) + if res != 1: + self._consume_errors() + raise ValueError("Invalid public bytes for the given curve") - ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid) - self.openssl_assert(ec_cdata != self._ffi.NULL) - ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + res = self._lib.EC_KEY_set_public_key(ec_cdata, point) + self.openssl_assert(res == 1) + evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata) + return _EllipticCurvePublicKey(self, ec_cdata, evp_pkey) + + def derive_elliptic_curve_private_key(self, private_value, curve): + ec_cdata = self._ec_key_new_by_curve(curve) get_func, group = self._ec_key_determine_group_get_func(ec_cdata) @@ -1421,6 +1504,12 @@ class Backend(object): return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey) + def _ec_key_new_by_curve(self, curve): + curve_nid = self._elliptic_curve_to_nid(curve) + ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid) + self.openssl_assert(ec_cdata != self._ffi.NULL) + return self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + def load_der_ocsp_request(self, data): mem_bio = self._bytes_to_bio(data) request = self._lib.d2i_OCSP_REQUEST_bio(mem_bio.bio, self._ffi.NULL) @@ -1507,7 +1596,7 @@ class Backend(object): ) self.openssl_assert(res != self._ffi.NULL) # okay, now sign the basic structure - evp_md = self._evp_md_non_null_from_algorithm(algorithm) + evp_md = self._evp_md_x509_null_if_eddsa(private_key, algorithm) responder_cert, responder_encoding = builder._responder_id flags = self._lib.OCSP_NOCERTS if responder_encoding is ocsp.OCSPResponderEncoding.HASH: @@ -1585,7 +1674,7 @@ class Backend(object): curve_nid = self._lib.OBJ_sn2nid(curve_name.encode()) if curve_nid == self._lib.NID_undef: raise UnsupportedAlgorithm( - "{0} is not a supported elliptic curve".format(curve.name), + "{} is not a supported elliptic curve".format(curve.name), _Reasons.UNSUPPORTED_ELLIPTIC_CURVE ) return curve_nid @@ -1656,6 +1745,20 @@ class Backend(object): "format must be an item from the PrivateFormat enum" ) + # X9.62 encoding is only valid for EC public keys + if encoding is serialization.Encoding.X962: + raise ValueError("X9.62 format is only valid for EC public keys") + + # Raw format and encoding are only valid for X25519, Ed25519, X448, and + # Ed448 keys. We capture those cases before this method is called so if + # we see those enum values here it means the caller has passed them to + # a key that doesn't support raw type + if format is serialization.PrivateFormat.Raw: + raise ValueError("raw format is invalid with this key or encoding") + + if encoding is serialization.Encoding.Raw: + raise ValueError("raw encoding is invalid with this key or format") + if not isinstance(encryption_algorithm, serialization.KeySerializationEncryption): raise TypeError( @@ -1715,7 +1818,7 @@ class Backend(object): write_bio = self._lib.i2d_PKCS8PrivateKey_bio key = evp_pkey else: - raise TypeError("encoding must be an item from the Encoding enum") + raise TypeError("encoding must be Encoding.PEM or Encoding.DER") bio = self._create_mem_bio_gc() res = write_bio( @@ -1748,6 +1851,23 @@ class Backend(object): if not isinstance(encoding, serialization.Encoding): raise TypeError("encoding must be an item from the Encoding enum") + # Compressed/UncompressedPoint are only valid for EC keys and those + # cases are handled by the ECPublicKey public_bytes method before this + # method is called + if format in (serialization.PublicFormat.UncompressedPoint, + serialization.PublicFormat.CompressedPoint): + raise ValueError("Point formats are not valid for this key type") + + # Raw format and encoding are only valid for X25519, Ed25519, X448, and + # Ed448 keys. We capture those cases before this method is called so if + # we see those enum values here it means the caller has passed them to + # a key that doesn't support raw type + if format is serialization.PublicFormat.Raw: + raise ValueError("raw format is invalid with this key or encoding") + + if encoding is serialization.Encoding.Raw: + raise ValueError("raw encoding is invalid with this key or format") + if ( format is serialization.PublicFormat.OpenSSH or encoding is serialization.Encoding.OpenSSH @@ -1792,22 +1912,28 @@ class Backend(object): if isinstance(key, rsa.RSAPublicKey): public_numbers = key.public_numbers() return b"ssh-rsa " + base64.b64encode( - serialization._ssh_write_string(b"ssh-rsa") + - serialization._ssh_write_mpint(public_numbers.e) + - serialization._ssh_write_mpint(public_numbers.n) + ssh._ssh_write_string(b"ssh-rsa") + + ssh._ssh_write_mpint(public_numbers.e) + + ssh._ssh_write_mpint(public_numbers.n) ) elif isinstance(key, dsa.DSAPublicKey): public_numbers = key.public_numbers() parameter_numbers = public_numbers.parameter_numbers return b"ssh-dss " + base64.b64encode( - serialization._ssh_write_string(b"ssh-dss") + - serialization._ssh_write_mpint(parameter_numbers.p) + - serialization._ssh_write_mpint(parameter_numbers.q) + - serialization._ssh_write_mpint(parameter_numbers.g) + - serialization._ssh_write_mpint(public_numbers.y) + ssh._ssh_write_string(b"ssh-dss") + + ssh._ssh_write_mpint(parameter_numbers.p) + + ssh._ssh_write_mpint(parameter_numbers.q) + + ssh._ssh_write_mpint(parameter_numbers.g) + + ssh._ssh_write_mpint(public_numbers.y) ) - else: - assert isinstance(key, ec.EllipticCurvePublicKey) + elif isinstance(key, ed25519.Ed25519PublicKey): + raw_bytes = key.public_bytes(serialization.Encoding.Raw, + serialization.PublicFormat.Raw) + return b"ssh-ed25519 " + base64.b64encode( + ssh._ssh_write_string(b"ssh-ed25519") + + ssh._ssh_write_string(raw_bytes) + ) + elif isinstance(key, ec.EllipticCurvePublicKey): public_numbers = key.public_numbers() try: curve_name = { @@ -1820,10 +1946,19 @@ class Backend(object): "Only SECP256R1, SECP384R1, and SECP521R1 curves are " "supported by the SSH public key format" ) + + point = key.public_bytes( + serialization.Encoding.X962, + serialization.PublicFormat.UncompressedPoint + ) return b"ecdsa-sha2-" + curve_name + b" " + base64.b64encode( - serialization._ssh_write_string(b"ecdsa-sha2-" + curve_name) + - serialization._ssh_write_string(curve_name) + - serialization._ssh_write_string(public_numbers.encode_point()) + ssh._ssh_write_string(b"ecdsa-sha2-" + curve_name) + + ssh._ssh_write_string(curve_name) + + ssh._ssh_write_string(point) + ) + else: + raise ValueError( + "OpenSSH encoding is not supported for this key type" ) def _parameter_bytes(self, encoding, format, cdata): @@ -2027,6 +2162,11 @@ class Backend(object): return self._ffi.buffer(pp[0], res)[:] def x25519_load_public_bytes(self, data): + # When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_111 we can + # switch this to EVP_PKEY_new_raw_public_key + if len(data) != 32: + raise ValueError("An X25519 public key is 32 bytes long") + evp_pkey = self._create_evp_pkey_gc() res = self._lib.EVP_PKEY_set_type(evp_pkey, self._lib.NID_X25519) backend.openssl_assert(res == 1) @@ -2037,6 +2177,9 @@ class Backend(object): return _X25519PublicKey(self, evp_pkey) def x25519_load_private_bytes(self, data): + # When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_111 we can + # switch this to EVP_PKEY_new_raw_private_key and drop the + # zeroed_bytearray garbage. # OpenSSL only has facilities for loading PKCS8 formatted private # keys using the algorithm identifiers specified in # https://tools.ietf.org/html/draft-ietf-curdle-pkix-09. @@ -2050,9 +2193,16 @@ class Backend(object): # Of course there's a bit more complexity. In reality OCTET STRING # contains an OCTET STRING of length 32! So the last two bytes here # are \x04\x20, which is an OCTET STRING of length 32. + if len(data) != 32: + raise ValueError("An X25519 private key is 32 bytes long") + pkcs8_prefix = b'0.\x02\x01\x000\x05\x06\x03+en\x04"\x04 ' - bio = self._bytes_to_bio(pkcs8_prefix + data) - evp_pkey = backend._lib.d2i_PrivateKey_bio(bio.bio, self._ffi.NULL) + with self._zeroed_bytearray(48) as ba: + ba[0:16] = pkcs8_prefix + ba[16:] = data + bio = self._bytes_to_bio(ba) + evp_pkey = backend._lib.d2i_PrivateKey_bio(bio.bio, self._ffi.NULL) + self.openssl_assert(evp_pkey != self._ffi.NULL) evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) self.openssl_assert( @@ -2060,14 +2210,10 @@ class Backend(object): ) return _X25519PrivateKey(self, evp_pkey) - def x25519_generate_key(self): - evp_pkey_ctx = self._lib.EVP_PKEY_CTX_new_id( - self._lib.NID_X25519, self._ffi.NULL - ) + def _evp_pkey_keygen_gc(self, nid): + evp_pkey_ctx = self._lib.EVP_PKEY_CTX_new_id(nid, self._ffi.NULL) self.openssl_assert(evp_pkey_ctx != self._ffi.NULL) - evp_pkey_ctx = self._ffi.gc( - evp_pkey_ctx, self._lib.EVP_PKEY_CTX_free - ) + evp_pkey_ctx = self._ffi.gc(evp_pkey_ctx, self._lib.EVP_PKEY_CTX_free) res = self._lib.EVP_PKEY_keygen_init(evp_pkey_ctx) self.openssl_assert(res == 1) evp_ppkey = self._ffi.new("EVP_PKEY **") @@ -2075,18 +2221,143 @@ class Backend(object): self.openssl_assert(res == 1) self.openssl_assert(evp_ppkey[0] != self._ffi.NULL) evp_pkey = self._ffi.gc(evp_ppkey[0], self._lib.EVP_PKEY_free) + return evp_pkey + + def x25519_generate_key(self): + evp_pkey = self._evp_pkey_keygen_gc(self._lib.NID_X25519) return _X25519PrivateKey(self, evp_pkey) def x25519_supported(self): return self._lib.CRYPTOGRAPHY_OPENSSL_110_OR_GREATER + def x448_load_public_bytes(self, data): + if len(data) != 56: + raise ValueError("An X448 public key is 56 bytes long") + + evp_pkey = self._lib.EVP_PKEY_new_raw_public_key( + self._lib.NID_X448, self._ffi.NULL, data, len(data) + ) + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + return _X448PublicKey(self, evp_pkey) + + def x448_load_private_bytes(self, data): + if len(data) != 56: + raise ValueError("An X448 private key is 56 bytes long") + + data_ptr = self._ffi.from_buffer(data) + evp_pkey = self._lib.EVP_PKEY_new_raw_private_key( + self._lib.NID_X448, self._ffi.NULL, data_ptr, len(data) + ) + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + return _X448PrivateKey(self, evp_pkey) + + def x448_generate_key(self): + evp_pkey = self._evp_pkey_keygen_gc(self._lib.NID_X448) + return _X448PrivateKey(self, evp_pkey) + + def x448_supported(self): + return not self._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_111 + + def ed25519_supported(self): + return not self._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_111B + + def ed25519_load_public_bytes(self, data): + utils._check_bytes("data", data) + + if len(data) != ed25519._ED25519_KEY_SIZE: + raise ValueError("An Ed25519 public key is 32 bytes long") + + evp_pkey = self._lib.EVP_PKEY_new_raw_public_key( + self._lib.NID_ED25519, self._ffi.NULL, data, len(data) + ) + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + + return _Ed25519PublicKey(self, evp_pkey) + + def ed25519_load_private_bytes(self, data): + if len(data) != ed25519._ED25519_KEY_SIZE: + raise ValueError("An Ed25519 private key is 32 bytes long") + + utils._check_byteslike("data", data) + data_ptr = self._ffi.from_buffer(data) + evp_pkey = self._lib.EVP_PKEY_new_raw_private_key( + self._lib.NID_ED25519, self._ffi.NULL, data_ptr, len(data) + ) + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + + return _Ed25519PrivateKey(self, evp_pkey) + + def ed25519_generate_key(self): + evp_pkey = self._evp_pkey_keygen_gc(self._lib.NID_ED25519) + return _Ed25519PrivateKey(self, evp_pkey) + + def ed448_supported(self): + return not self._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_111B + + def ed448_load_public_bytes(self, data): + utils._check_bytes("data", data) + if len(data) != _ED448_KEY_SIZE: + raise ValueError("An Ed448 public key is 57 bytes long") + + evp_pkey = self._lib.EVP_PKEY_new_raw_public_key( + self._lib.NID_ED448, self._ffi.NULL, data, len(data) + ) + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + + return _Ed448PublicKey(self, evp_pkey) + + def ed448_load_private_bytes(self, data): + utils._check_byteslike("data", data) + if len(data) != _ED448_KEY_SIZE: + raise ValueError("An Ed448 private key is 57 bytes long") + + data_ptr = self._ffi.from_buffer(data) + evp_pkey = self._lib.EVP_PKEY_new_raw_private_key( + self._lib.NID_ED448, self._ffi.NULL, data_ptr, len(data) + ) + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + + return _Ed448PrivateKey(self, evp_pkey) + + def ed448_generate_key(self): + evp_pkey = self._evp_pkey_keygen_gc(self._lib.NID_ED448) + return _Ed448PrivateKey(self, evp_pkey) + def derive_scrypt(self, key_material, salt, length, n, r, p): buf = self._ffi.new("unsigned char[]", length) + key_material_ptr = self._ffi.from_buffer(key_material) res = self._lib.EVP_PBE_scrypt( - key_material, len(key_material), salt, len(salt), n, r, p, + key_material_ptr, len(key_material), salt, len(salt), n, r, p, scrypt._MEM_LIMIT, buf, length ) - self.openssl_assert(res == 1) + if res != 1: + errors = self._consume_errors() + if not self._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_111: + # This error is only added to the stack in 1.1.1+ + self.openssl_assert( + errors[0]._lib_reason_match( + self._lib.ERR_LIB_EVP, + self._lib.ERR_R_MALLOC_FAILURE + ) or + errors[0]._lib_reason_match( + self._lib.ERR_LIB_EVP, + self._lib.EVP_R_MEMORY_LIMIT_EXCEEDED + ) + ) + + # memory required formula explained here: + # https://blog.filippo.io/the-scrypt-parameters/ + min_memory = 128 * n * r // (1024**2) + raise MemoryError( + "Not enough memory to derive key. These parameters require" + " {} MB of memory.".format(min_memory) + ) return self._ffi.buffer(buf)[:] def aead_cipher_supported(self, cipher): @@ -2095,6 +2366,105 @@ class Backend(object): self._lib.EVP_get_cipherbyname(cipher_name) != self._ffi.NULL ) + @contextlib.contextmanager + def _zeroed_bytearray(self, length): + """ + This method creates a bytearray, which we copy data into (hopefully + also from a mutable buffer that can be dynamically erased!), and then + zero when we're done. + """ + ba = bytearray(length) + try: + yield ba + finally: + self._zero_data(ba, length) + + def _zero_data(self, data, length): + # We clear things this way because at the moment we're not + # sure of a better way that can guarantee it overwrites the + # memory of a bytearray and doesn't just replace the underlying char *. + for i in range(length): + data[i] = 0 + + @contextlib.contextmanager + def _zeroed_null_terminated_buf(self, data): + """ + This method takes bytes, which can be a bytestring or a mutable + buffer like a bytearray, and yields a null-terminated version of that + data. This is required because PKCS12_parse doesn't take a length with + its password char * and ffi.from_buffer doesn't provide null + termination. So, to support zeroing the data via bytearray we + need to build this ridiculous construct that copies the memory, but + zeroes it after use. + """ + if data is None: + yield self._ffi.NULL + else: + data_len = len(data) + buf = self._ffi.new("char[]", data_len + 1) + self._ffi.memmove(buf, data, data_len) + try: + yield buf + finally: + # Cast to a uint8_t * so we can assign by integer + self._zero_data(self._ffi.cast("uint8_t *", buf), data_len) + + def load_key_and_certificates_from_pkcs12(self, data, password): + if password is not None: + utils._check_byteslike("password", password) + + bio = self._bytes_to_bio(data) + p12 = self._lib.d2i_PKCS12_bio(bio.bio, self._ffi.NULL) + if p12 == self._ffi.NULL: + self._consume_errors() + raise ValueError("Could not deserialize PKCS12 data") + + p12 = self._ffi.gc(p12, self._lib.PKCS12_free) + evp_pkey_ptr = self._ffi.new("EVP_PKEY **") + x509_ptr = self._ffi.new("X509 **") + sk_x509_ptr = self._ffi.new("Cryptography_STACK_OF_X509 **") + with self._zeroed_null_terminated_buf(password) as password_buf: + res = self._lib.PKCS12_parse( + p12, password_buf, evp_pkey_ptr, x509_ptr, sk_x509_ptr + ) + + if res == 0: + self._consume_errors() + raise ValueError("Invalid password or PKCS12 data") + + cert = None + key = None + additional_certificates = [] + + if evp_pkey_ptr[0] != self._ffi.NULL: + evp_pkey = self._ffi.gc(evp_pkey_ptr[0], self._lib.EVP_PKEY_free) + key = self._evp_pkey_to_private_key(evp_pkey) + + if x509_ptr[0] != self._ffi.NULL: + x509 = self._ffi.gc(x509_ptr[0], self._lib.X509_free) + cert = _Certificate(self, x509) + + if sk_x509_ptr[0] != self._ffi.NULL: + sk_x509 = self._ffi.gc(sk_x509_ptr[0], self._lib.sk_X509_free) + num = self._lib.sk_X509_num(sk_x509_ptr[0]) + for i in range(num): + x509 = self._lib.sk_X509_value(sk_x509, i) + x509 = self._ffi.gc(x509, self._lib.X509_free) + self.openssl_assert(x509 != self._ffi.NULL) + additional_certificates.append(_Certificate(self, x509)) + + return (key, cert, additional_certificates) + + def poly1305_supported(self): + return self._lib.Cryptography_HAS_POLY1305 == 1 + + def create_poly1305_ctx(self, key): + utils._check_byteslike("key", key) + if len(key) != _POLY1305_KEY_SIZE: + raise ValueError("A poly1305 key is 32 bytes long") + + return _Poly1305Context(self, key) + class GetCipherByName(object): def __init__(self, fmt): @@ -2106,7 +2476,7 @@ class GetCipherByName(object): def _get_xts_cipher(backend, cipher, mode): - cipher_name = "aes-{0}-xts".format(cipher.key_size // 2) + cipher_name = "aes-{}-xts".format(cipher.key_size // 2) return backend._lib.EVP_get_cipherbyname(cipher_name.encode("ascii")) diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ciphers.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ciphers.py index e0ee06e..94b48f5 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ciphers.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ciphers.py @@ -40,7 +40,7 @@ class _CipherContext(object): adapter = registry[type(cipher), type(mode)] except KeyError: raise UnsupportedAlgorithm( - "cipher {0} in {1} mode is not supported " + "cipher {} in {} mode is not supported " "by this backend.".format( cipher.name, mode.name if mode else mode), _Reasons.UNSUPPORTED_CIPHER @@ -48,21 +48,25 @@ class _CipherContext(object): evp_cipher = adapter(self._backend, cipher, mode) if evp_cipher == self._backend._ffi.NULL: - raise UnsupportedAlgorithm( - "cipher {0} in {1} mode is not supported " - "by this backend.".format( - cipher.name, mode.name if mode else mode), - _Reasons.UNSUPPORTED_CIPHER - ) + msg = "cipher {0.name} ".format(cipher) + if mode is not None: + msg += "in {0.name} mode ".format(mode) + msg += ( + "is not supported by this backend (Your version of OpenSSL " + "may be too old. Current version: {}.)" + ).format(self._backend.openssl_version_text()) + raise UnsupportedAlgorithm(msg, _Reasons.UNSUPPORTED_CIPHER) if isinstance(mode, modes.ModeWithInitializationVector): - iv_nonce = mode.initialization_vector + iv_nonce = self._backend._ffi.from_buffer( + mode.initialization_vector + ) elif isinstance(mode, modes.ModeWithTweak): - iv_nonce = mode.tweak + iv_nonce = self._backend._ffi.from_buffer(mode.tweak) elif isinstance(mode, modes.ModeWithNonce): - iv_nonce = mode.nonce + iv_nonce = self._backend._ffi.from_buffer(mode.nonce) elif isinstance(cipher, modes.ModeWithNonce): - iv_nonce = cipher.nonce + iv_nonce = self._backend._ffi.from_buffer(cipher.nonce) else: iv_nonce = self._backend._ffi.NULL # begin init with cipher and operation type @@ -105,7 +109,7 @@ class _CipherContext(object): ctx, self._backend._ffi.NULL, self._backend._ffi.NULL, - cipher.key, + self._backend._ffi.from_buffer(cipher.key), iv_nonce, operation ) @@ -123,7 +127,7 @@ class _CipherContext(object): def update_into(self, data, buf): if len(buf) < (len(data) + self._block_size_bytes - 1): raise ValueError( - "buffer must be at least {0} bytes for this " + "buffer must be at least {} bytes for this " "payload".format(len(data) + self._block_size_bytes - 1) ) @@ -131,8 +135,10 @@ class _CipherContext(object): "unsigned char *", self._backend._ffi.from_buffer(buf) ) outlen = self._backend._ffi.new("int *") - res = self._backend._lib.EVP_CipherUpdate(self._ctx, buf, outlen, - data, len(data)) + res = self._backend._lib.EVP_CipherUpdate( + self._ctx, buf, outlen, + self._backend._ffi.from_buffer(data), len(data) + ) self._backend.openssl_assert(res != 0) return outlen[0] @@ -201,7 +207,7 @@ class _CipherContext(object): ) if len(tag) < self._mode._min_tag_length: raise ValueError( - "Authentication tag must be {0} bytes or longer.".format( + "Authentication tag must be {} bytes or longer.".format( self._mode._min_tag_length) ) res = self._backend._lib.EVP_CIPHER_CTX_ctrl( @@ -215,7 +221,8 @@ class _CipherContext(object): def authenticate_additional_data(self, data): outlen = self._backend._ffi.new("int *") res = self._backend._lib.EVP_CipherUpdate( - self._ctx, self._backend._ffi.NULL, outlen, data, len(data) + self._ctx, self._backend._ffi.NULL, outlen, + self._backend._ffi.from_buffer(data), len(data) ) self._backend.openssl_assert(res != 0) diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/cmac.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/cmac.py index e20f66d..d4d46f5 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/cmac.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/cmac.py @@ -9,11 +9,10 @@ from cryptography import utils from cryptography.exceptions import ( InvalidSignature, UnsupportedAlgorithm, _Reasons ) -from cryptography.hazmat.primitives import constant_time, mac +from cryptography.hazmat.primitives import constant_time from cryptography.hazmat.primitives.ciphers.modes import CBC -@utils.register_interface(mac.MACContext) class _CMACContext(object): def __init__(self, backend, algorithm, ctx=None): if not backend.cmac_algorithm_supported(algorithm): @@ -36,8 +35,9 @@ class _CMACContext(object): self._backend.openssl_assert(ctx != self._backend._ffi.NULL) ctx = self._backend._ffi.gc(ctx, self._backend._lib.CMAC_CTX_free) + key_ptr = self._backend._ffi.from_buffer(self._key) res = self._backend._lib.CMAC_Init( - ctx, self._key, len(self._key), + ctx, key_ptr, len(self._key), evp_cipher, self._backend._ffi.NULL ) self._backend.openssl_assert(res == 1) diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/decode_asn1.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/decode_asn1.py index e06e8cd..47c6c65 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/decode_asn1.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/decode_asn1.py @@ -7,11 +7,10 @@ from __future__ import absolute_import, division, print_function import datetime import ipaddress -import asn1crypto.core - import six from cryptography import x509 +from cryptography.hazmat._der import DERReader, INTEGER, NULL, SEQUENCE from cryptography.x509.extensions import _TLS_FEATURE_TYPE_TO_ENUM from cryptography.x509.name import _ASN1_TYPE_TO_ENUM from cryptography.x509.oid import ( @@ -20,10 +19,6 @@ from cryptography.x509.oid import ( ) -class _Integers(asn1crypto.core.SequenceOf): - _child_spec = asn1crypto.core.Integer - - def _obj2txt(backend, obj): # Set to 80 on the recommendation of # https://www.openssl.org/docs/crypto/OBJ_nid2ln.html#return_values @@ -135,7 +130,7 @@ def _decode_general_name(backend, gn): if "1" in bits[prefix:]: raise ValueError("Invalid netmask") - ip = ipaddress.ip_network(base.exploded + u"/{0}".format(prefix)) + ip = ipaddress.ip_network(base.exploded + u"/{}".format(prefix)) else: ip = ipaddress.ip_address(data) @@ -160,7 +155,7 @@ def _decode_general_name(backend, gn): else: # x400Address or ediPartyName raise x509.UnsupportedGeneralNameType( - "{0} is not a supported type".format( + "{} is not a supported type".format( x509._GENERAL_NAMES.get(gn.type, gn.type) ), gn.type @@ -202,27 +197,32 @@ class _X509ExtensionParser(object): ) if oid in seen_oids: raise x509.DuplicateExtension( - "Duplicate {0} extension found".format(oid), oid + "Duplicate {} extension found".format(oid), oid ) # These OIDs are only supported in OpenSSL 1.1.0+ but we want # to support them in all versions of OpenSSL so we decode them # ourselves. if oid == ExtensionOID.TLS_FEATURE: + # The extension contents are a SEQUENCE OF INTEGERs. data = backend._lib.X509_EXTENSION_get_data(ext) - parsed = _Integers.load(_asn1_string_to_bytes(backend, data)) + data_bytes = _asn1_string_to_bytes(backend, data) + features = DERReader(data_bytes).read_single_element(SEQUENCE) + parsed = [] + while not features.is_empty(): + parsed.append(features.read_element(INTEGER).as_integer()) + # Map the features to their enum value. value = x509.TLSFeature( - [_TLS_FEATURE_TYPE_TO_ENUM[x.native] for x in parsed] + [_TLS_FEATURE_TYPE_TO_ENUM[x] for x in parsed] ) extensions.append(x509.Extension(oid, critical, value)) seen_oids.add(oid) continue elif oid == ExtensionOID.PRECERT_POISON: data = backend._lib.X509_EXTENSION_get_data(ext) - parsed = asn1crypto.core.Null.load( - _asn1_string_to_bytes(backend, data) - ) - assert parsed == asn1crypto.core.Null() + # The contents of the extension must be an ASN.1 NULL. + reader = DERReader(_asn1_string_to_bytes(backend, data)) + reader.read_single_element(NULL).check_empty() extensions.append(x509.Extension( oid, critical, x509.PrecertPoison() )) @@ -245,7 +245,7 @@ class _X509ExtensionParser(object): if ext_data == backend._ffi.NULL: backend._consume_errors() raise ValueError( - "The {0} extension is invalid and can't be " + "The {} extension is invalid and can't be " "parsed".format(oid) ) @@ -379,7 +379,14 @@ def _decode_authority_key_identifier(backend, akid): def _decode_authority_information_access(backend, aia): aia = backend._ffi.cast("Cryptography_STACK_OF_ACCESS_DESCRIPTION *", aia) - aia = backend._ffi.gc(aia, backend._lib.sk_ACCESS_DESCRIPTION_free) + aia = backend._ffi.gc( + aia, + lambda x: backend._lib.sk_ACCESS_DESCRIPTION_pop_free( + x, backend._ffi.addressof( + backend._lib._original_lib, "ACCESS_DESCRIPTION_free" + ) + ) + ) num = backend._lib.sk_ACCESS_DESCRIPTION_num(aia) access_descriptions = [] for i in range(num): @@ -464,6 +471,30 @@ def _decode_general_subtrees(backend, stack_subtrees): return subtrees +def _decode_issuing_dist_point(backend, idp): + idp = backend._ffi.cast("ISSUING_DIST_POINT *", idp) + idp = backend._ffi.gc(idp, backend._lib.ISSUING_DIST_POINT_free) + if idp.distpoint != backend._ffi.NULL: + full_name, relative_name = _decode_distpoint(backend, idp.distpoint) + else: + full_name = None + relative_name = None + + only_user = idp.onlyuser == 255 + only_ca = idp.onlyCA == 255 + indirect_crl = idp.indirectCRL == 255 + only_attr = idp.onlyattr == 255 + if idp.onlysomereasons != backend._ffi.NULL: + only_some_reasons = _decode_reasons(backend, idp.onlysomereasons) + else: + only_some_reasons = None + + return x509.IssuingDistributionPoint( + full_name, relative_name, only_user, only_ca, only_some_reasons, + indirect_crl, only_attr + ) + + def _decode_policy_constraints(backend, pc): pc = backend._ffi.cast("POLICY_CONSTRAINTS *", pc) pc = backend._ffi.gc(pc, backend._lib.POLICY_CONSTRAINTS_free) @@ -674,7 +705,7 @@ def _decode_crl_reason(backend, enum): try: return x509.CRLReason(_CRL_ENTRY_REASON_CODE_TO_ENUM[code]) except KeyError: - raise ValueError("Unsupported reason code: {0}".format(code)) + raise ValueError("Unsupported reason code: {}".format(code)) def _decode_invalidity_date(backend, inv_date): @@ -734,7 +765,7 @@ def _asn1_string_to_utf8(backend, asn1_string): res = backend._lib.ASN1_STRING_to_UTF8(buf, asn1_string) if res == -1: raise ValueError( - "Unsupported ASN1 string type. Type: {0}".format(asn1_string.type) + "Unsupported ASN1 string type. Type: {}".format(asn1_string.type) ) backend.openssl_assert(buf[0] != backend._ffi.NULL) @@ -814,6 +845,8 @@ _CRL_EXTENSION_HANDLERS = { ExtensionOID.AUTHORITY_INFORMATION_ACCESS: ( _decode_authority_information_access ), + ExtensionOID.ISSUING_DISTRIBUTION_POINT: _decode_issuing_dist_point, + ExtensionOID.FRESHEST_CRL: _decode_freshest_crl, } _OCSP_REQ_EXTENSION_HANDLERS = { diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/dsa.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/dsa.py index 48886e4..de61f08 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/dsa.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/dsa.py @@ -211,8 +211,7 @@ class _DSAPublicKey(object): def verifier(self, signature, signature_algorithm): _warn_sign_verify_deprecated() - if not isinstance(signature, bytes): - raise TypeError("signature must be bytes.") + utils._check_bytes("signature", signature) _check_not_prehashed(signature_algorithm) return _DSAVerificationContext( diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ec.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ec.py index 69da234..3d8681b 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ec.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ec.py @@ -34,7 +34,19 @@ def _ec_key_curve_sn(backend, ec_key): # an error for now. if nid == backend._lib.NID_undef: raise NotImplementedError( - "ECDSA certificates with unnamed curves are unsupported " + "ECDSA keys with unnamed curves are unsupported " + "at this time" + ) + + # This is like the above check, but it also catches the case where you + # explicitly encoded a curve with the same parameters as a named curve. + # Don't do that. + if ( + backend._lib.CRYPTOGRAPHY_OPENSSL_110_OR_GREATER and + backend._lib.EC_GROUP_get_asn1_flag(group) == 0 + ): + raise NotImplementedError( + "ECDSA keys with unnamed curves are unsupported " "at this time" ) @@ -62,7 +74,7 @@ def _sn_to_elliptic_curve(backend, sn): return ec._CURVE_TYPES[sn]() except KeyError: raise UnsupportedAlgorithm( - "{0} is not a supported elliptic curve".format(sn), + "{} is not a supported elliptic curve".format(sn), _Reasons.UNSUPPORTED_ELLIPTIC_CURVE ) @@ -127,12 +139,12 @@ class _ECDSAVerificationContext(object): class _EllipticCurvePrivateKey(object): def __init__(self, backend, ec_key_cdata, evp_pkey): self._backend = backend - _mark_asn1_named_ec_curve(backend, ec_key_cdata) self._ec_key = ec_key_cdata self._evp_pkey = evp_pkey sn = _ec_key_curve_sn(backend, ec_key_cdata) self._curve = _sn_to_elliptic_curve(backend, sn) + _mark_asn1_named_ec_curve(backend, ec_key_cdata) curve = utils.read_only_property("_curve") @@ -229,12 +241,12 @@ class _EllipticCurvePrivateKey(object): class _EllipticCurvePublicKey(object): def __init__(self, backend, ec_key_cdata, evp_pkey): self._backend = backend - _mark_asn1_named_ec_curve(backend, ec_key_cdata) self._ec_key = ec_key_cdata self._evp_pkey = evp_pkey sn = _ec_key_curve_sn(backend, ec_key_cdata) self._curve = _sn_to_elliptic_curve(backend, sn) + _mark_asn1_named_ec_curve(backend, ec_key_cdata) curve = utils.read_only_property("_curve") @@ -244,8 +256,7 @@ class _EllipticCurvePublicKey(object): def verifier(self, signature, signature_algorithm): _warn_sign_verify_deprecated() - if not isinstance(signature, bytes): - raise TypeError("signature must be bytes.") + utils._check_bytes("signature", signature) _check_signature_algorithm(signature_algorithm) _check_not_prehashed(signature_algorithm.algorithm) @@ -276,19 +287,62 @@ class _EllipticCurvePublicKey(object): curve=self._curve ) + def _encode_point(self, format): + if format is serialization.PublicFormat.CompressedPoint: + conversion = self._backend._lib.POINT_CONVERSION_COMPRESSED + else: + assert format is serialization.PublicFormat.UncompressedPoint + conversion = self._backend._lib.POINT_CONVERSION_UNCOMPRESSED + + group = self._backend._lib.EC_KEY_get0_group(self._ec_key) + self._backend.openssl_assert(group != self._backend._ffi.NULL) + point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key) + self._backend.openssl_assert(point != self._backend._ffi.NULL) + with self._backend._tmp_bn_ctx() as bn_ctx: + buflen = self._backend._lib.EC_POINT_point2oct( + group, point, conversion, self._backend._ffi.NULL, 0, bn_ctx + ) + self._backend.openssl_assert(buflen > 0) + buf = self._backend._ffi.new("char[]", buflen) + res = self._backend._lib.EC_POINT_point2oct( + group, point, conversion, buf, buflen, bn_ctx + ) + self._backend.openssl_assert(buflen == res) + + return self._backend._ffi.buffer(buf)[:] + def public_bytes(self, encoding, format): if format is serialization.PublicFormat.PKCS1: raise ValueError( "EC public keys do not support PKCS1 serialization" ) - return self._backend._public_key_bytes( - encoding, - format, - self, - self._evp_pkey, - None - ) + if ( + encoding is serialization.Encoding.X962 or + format is serialization.PublicFormat.CompressedPoint or + format is serialization.PublicFormat.UncompressedPoint + ): + if ( + encoding is not serialization.Encoding.X962 or + format not in ( + serialization.PublicFormat.CompressedPoint, + serialization.PublicFormat.UncompressedPoint + ) + ): + raise ValueError( + "X962 encoding must be used with CompressedPoint or " + "UncompressedPoint format" + ) + + return self._encode_point(format) + else: + return self._backend._public_key_bytes( + encoding, + format, + self, + self._evp_pkey, + None + ) def verify(self, signature, data, signature_algorithm): _check_signature_algorithm(signature_algorithm) diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ed25519.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ed25519.py new file mode 100644 index 0000000..f38f11d --- /dev/null +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ed25519.py @@ -0,0 +1,151 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import exceptions, utils +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric.ed25519 import ( + Ed25519PrivateKey, Ed25519PublicKey, _ED25519_KEY_SIZE, _ED25519_SIG_SIZE +) + + +@utils.register_interface(Ed25519PublicKey) +class _Ed25519PublicKey(object): + def __init__(self, backend, evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_bytes(self, encoding, format): + if ( + encoding is serialization.Encoding.Raw or + format is serialization.PublicFormat.Raw + ): + if ( + encoding is not serialization.Encoding.Raw or + format is not serialization.PublicFormat.Raw + ): + raise ValueError( + "When using Raw both encoding and format must be Raw" + ) + + return self._raw_public_bytes() + + if ( + encoding in serialization._PEM_DER and + format is not serialization.PublicFormat.SubjectPublicKeyInfo + ): + raise ValueError( + "format must be SubjectPublicKeyInfo when encoding is PEM or " + "DER" + ) + + return self._backend._public_key_bytes( + encoding, format, self, self._evp_pkey, None + ) + + def _raw_public_bytes(self): + buf = self._backend._ffi.new("unsigned char []", _ED25519_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _ED25519_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_public_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED25519_KEY_SIZE) + return self._backend._ffi.buffer(buf, _ED25519_KEY_SIZE)[:] + + def verify(self, signature, data): + evp_md_ctx = self._backend._lib.Cryptography_EVP_MD_CTX_new() + self._backend.openssl_assert(evp_md_ctx != self._backend._ffi.NULL) + evp_md_ctx = self._backend._ffi.gc( + evp_md_ctx, self._backend._lib.Cryptography_EVP_MD_CTX_free + ) + res = self._backend._lib.EVP_DigestVerifyInit( + evp_md_ctx, self._backend._ffi.NULL, self._backend._ffi.NULL, + self._backend._ffi.NULL, self._evp_pkey + ) + self._backend.openssl_assert(res == 1) + res = self._backend._lib.EVP_DigestVerify( + evp_md_ctx, signature, len(signature), data, len(data) + ) + if res != 1: + self._backend._consume_errors() + raise exceptions.InvalidSignature + + +@utils.register_interface(Ed25519PrivateKey) +class _Ed25519PrivateKey(object): + def __init__(self, backend, evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_key(self): + buf = self._backend._ffi.new("unsigned char []", _ED25519_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _ED25519_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_public_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED25519_KEY_SIZE) + public_bytes = self._backend._ffi.buffer(buf)[:] + return self._backend.ed25519_load_public_bytes(public_bytes) + + def sign(self, data): + evp_md_ctx = self._backend._lib.Cryptography_EVP_MD_CTX_new() + self._backend.openssl_assert(evp_md_ctx != self._backend._ffi.NULL) + evp_md_ctx = self._backend._ffi.gc( + evp_md_ctx, self._backend._lib.Cryptography_EVP_MD_CTX_free + ) + res = self._backend._lib.EVP_DigestSignInit( + evp_md_ctx, self._backend._ffi.NULL, self._backend._ffi.NULL, + self._backend._ffi.NULL, self._evp_pkey + ) + self._backend.openssl_assert(res == 1) + buf = self._backend._ffi.new("unsigned char[]", _ED25519_SIG_SIZE) + buflen = self._backend._ffi.new("size_t *", len(buf)) + res = self._backend._lib.EVP_DigestSign( + evp_md_ctx, buf, buflen, data, len(data) + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED25519_SIG_SIZE) + return self._backend._ffi.buffer(buf, buflen[0])[:] + + def private_bytes(self, encoding, format, encryption_algorithm): + if ( + encoding is serialization.Encoding.Raw or + format is serialization.PublicFormat.Raw + ): + if ( + format is not serialization.PrivateFormat.Raw or + encoding is not serialization.Encoding.Raw or not + isinstance(encryption_algorithm, serialization.NoEncryption) + ): + raise ValueError( + "When using Raw both encoding and format must be Raw " + "and encryption_algorithm must be NoEncryption()" + ) + + return self._raw_private_bytes() + + if ( + encoding in serialization._PEM_DER and + format is not serialization.PrivateFormat.PKCS8 + ): + raise ValueError( + "format must be PKCS8 when encoding is PEM or DER" + ) + + return self._backend._private_key_bytes( + encoding, format, encryption_algorithm, self._evp_pkey, None + ) + + def _raw_private_bytes(self): + buf = self._backend._ffi.new("unsigned char []", _ED25519_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _ED25519_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_private_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED25519_KEY_SIZE) + return self._backend._ffi.buffer(buf, _ED25519_KEY_SIZE)[:] diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ed448.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ed448.py new file mode 100644 index 0000000..f541f05 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ed448.py @@ -0,0 +1,154 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import exceptions, utils +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric.ed448 import ( + Ed448PrivateKey, Ed448PublicKey +) + +_ED448_KEY_SIZE = 57 +_ED448_SIG_SIZE = 114 + + +@utils.register_interface(Ed448PublicKey) +class _Ed448PublicKey(object): + def __init__(self, backend, evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_bytes(self, encoding, format): + if ( + encoding is serialization.Encoding.Raw or + format is serialization.PublicFormat.Raw + ): + if ( + encoding is not serialization.Encoding.Raw or + format is not serialization.PublicFormat.Raw + ): + raise ValueError( + "When using Raw both encoding and format must be Raw" + ) + + return self._raw_public_bytes() + + if ( + encoding in serialization._PEM_DER and + format is not serialization.PublicFormat.SubjectPublicKeyInfo + ): + raise ValueError( + "format must be SubjectPublicKeyInfo when encoding is PEM or " + "DER" + ) + + return self._backend._public_key_bytes( + encoding, format, self, self._evp_pkey, None + ) + + def _raw_public_bytes(self): + buf = self._backend._ffi.new("unsigned char []", _ED448_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _ED448_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_public_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED448_KEY_SIZE) + return self._backend._ffi.buffer(buf, _ED448_KEY_SIZE)[:] + + def verify(self, signature, data): + evp_md_ctx = self._backend._lib.Cryptography_EVP_MD_CTX_new() + self._backend.openssl_assert(evp_md_ctx != self._backend._ffi.NULL) + evp_md_ctx = self._backend._ffi.gc( + evp_md_ctx, self._backend._lib.Cryptography_EVP_MD_CTX_free + ) + res = self._backend._lib.EVP_DigestVerifyInit( + evp_md_ctx, self._backend._ffi.NULL, self._backend._ffi.NULL, + self._backend._ffi.NULL, self._evp_pkey + ) + self._backend.openssl_assert(res == 1) + res = self._backend._lib.EVP_DigestVerify( + evp_md_ctx, signature, len(signature), data, len(data) + ) + if res != 1: + self._backend._consume_errors() + raise exceptions.InvalidSignature + + +@utils.register_interface(Ed448PrivateKey) +class _Ed448PrivateKey(object): + def __init__(self, backend, evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_key(self): + buf = self._backend._ffi.new("unsigned char []", _ED448_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _ED448_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_public_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED448_KEY_SIZE) + public_bytes = self._backend._ffi.buffer(buf)[:] + return self._backend.ed448_load_public_bytes(public_bytes) + + def sign(self, data): + evp_md_ctx = self._backend._lib.Cryptography_EVP_MD_CTX_new() + self._backend.openssl_assert(evp_md_ctx != self._backend._ffi.NULL) + evp_md_ctx = self._backend._ffi.gc( + evp_md_ctx, self._backend._lib.Cryptography_EVP_MD_CTX_free + ) + res = self._backend._lib.EVP_DigestSignInit( + evp_md_ctx, self._backend._ffi.NULL, self._backend._ffi.NULL, + self._backend._ffi.NULL, self._evp_pkey + ) + self._backend.openssl_assert(res == 1) + buf = self._backend._ffi.new("unsigned char[]", _ED448_SIG_SIZE) + buflen = self._backend._ffi.new("size_t *", len(buf)) + res = self._backend._lib.EVP_DigestSign( + evp_md_ctx, buf, buflen, data, len(data) + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED448_SIG_SIZE) + return self._backend._ffi.buffer(buf, buflen[0])[:] + + def private_bytes(self, encoding, format, encryption_algorithm): + if ( + encoding is serialization.Encoding.Raw or + format is serialization.PublicFormat.Raw + ): + if ( + format is not serialization.PrivateFormat.Raw or + encoding is not serialization.Encoding.Raw or not + isinstance(encryption_algorithm, serialization.NoEncryption) + ): + raise ValueError( + "When using Raw both encoding and format must be Raw " + "and encryption_algorithm must be NoEncryption()" + ) + + return self._raw_private_bytes() + + if ( + encoding in serialization._PEM_DER and + format is not serialization.PrivateFormat.PKCS8 + ): + raise ValueError( + "format must be PKCS8 when encoding is PEM or DER" + ) + + return self._backend._private_key_bytes( + encoding, format, encryption_algorithm, self._evp_pkey, None + ) + + def _raw_private_bytes(self): + buf = self._backend._ffi.new("unsigned char []", _ED448_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _ED448_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_private_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _ED448_KEY_SIZE) + return self._backend._ffi.buffer(buf, _ED448_KEY_SIZE)[:] diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/encode_asn1.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/encode_asn1.py index 6ff1a9a..ca35f0e 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/encode_asn1.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/encode_asn1.py @@ -114,13 +114,15 @@ def _encode_sk_name_entry(backend, attributes): for attribute in attributes: name_entry = _encode_name_entry(backend, attribute) res = backend._lib.sk_X509_NAME_ENTRY_push(stack, name_entry) - backend.openssl_assert(res == 1) + backend.openssl_assert(res >= 1) return stack def _encode_name_entry(backend, attribute): if attribute._type is _ASN1Type.BMPString: value = attribute.value.encode('utf_16_be') + elif attribute._type is _ASN1Type.UniversalString: + value = attribute.value.encode('utf_32_be') else: value = attribute.value.encode('utf8') @@ -136,6 +138,28 @@ def _encode_crl_number_delta_crl_indicator(backend, ext): return _encode_asn1_int_gc(backend, ext.crl_number) +def _encode_issuing_dist_point(backend, ext): + idp = backend._lib.ISSUING_DIST_POINT_new() + backend.openssl_assert(idp != backend._ffi.NULL) + idp = backend._ffi.gc(idp, backend._lib.ISSUING_DIST_POINT_free) + idp.onlyuser = 255 if ext.only_contains_user_certs else 0 + idp.onlyCA = 255 if ext.only_contains_ca_certs else 0 + idp.indirectCRL = 255 if ext.indirect_crl else 0 + idp.onlyattr = 255 if ext.only_contains_attribute_certs else 0 + if ext.only_some_reasons: + idp.onlysomereasons = _encode_reasonflags( + backend, ext.only_some_reasons + ) + + if ext.full_name: + idp.distpoint = _encode_full_name(backend, ext.full_name) + + if ext.relative_name: + idp.distpoint = _encode_relative_name(backend, ext.relative_name) + + return idp + + def _encode_crl_reason(backend, crl_reason): asn1enum = backend._lib.ASN1_ENUMERATED_new() backend.openssl_assert(asn1enum != backend._ffi.NULL) @@ -323,16 +347,22 @@ def _encode_authority_information_access(backend, authority_info_access): aia = backend._lib.sk_ACCESS_DESCRIPTION_new_null() backend.openssl_assert(aia != backend._ffi.NULL) aia = backend._ffi.gc( - aia, backend._lib.sk_ACCESS_DESCRIPTION_free + aia, + lambda x: backend._lib.sk_ACCESS_DESCRIPTION_pop_free( + x, backend._ffi.addressof( + backend._lib._original_lib, "ACCESS_DESCRIPTION_free" + ) + ) ) for access_description in authority_info_access: ad = backend._lib.ACCESS_DESCRIPTION_new() method = _txt2obj( backend, access_description.access_method.dotted_string ) - gn = _encode_general_name(backend, access_description.access_location) + _encode_general_name_preallocated( + backend, access_description.access_location, ad.location + ) ad.method = method - ad.location = gn res = backend._lib.sk_ACCESS_DESCRIPTION_push(aia, ad) backend.openssl_assert(res >= 1) @@ -363,8 +393,13 @@ def _encode_subject_key_identifier(backend, ski): def _encode_general_name(backend, name): + gn = backend._lib.GENERAL_NAME_new() + _encode_general_name_preallocated(backend, name, gn) + return gn + + +def _encode_general_name_preallocated(backend, name, gn): if isinstance(name, x509.DNSName): - gn = backend._lib.GENERAL_NAME_new() backend.openssl_assert(gn != backend._ffi.NULL) gn.type = backend._lib.GEN_DNS @@ -378,7 +413,6 @@ def _encode_general_name(backend, name): backend.openssl_assert(res == 1) gn.d.dNSName = ia5 elif isinstance(name, x509.RegisteredID): - gn = backend._lib.GENERAL_NAME_new() backend.openssl_assert(gn != backend._ffi.NULL) gn.type = backend._lib.GEN_RID obj = backend._lib.OBJ_txt2obj( @@ -387,13 +421,11 @@ def _encode_general_name(backend, name): backend.openssl_assert(obj != backend._ffi.NULL) gn.d.registeredID = obj elif isinstance(name, x509.DirectoryName): - gn = backend._lib.GENERAL_NAME_new() backend.openssl_assert(gn != backend._ffi.NULL) dir_name = _encode_name(backend, name.value) gn.type = backend._lib.GEN_DIRNAME gn.d.directoryName = dir_name elif isinstance(name, x509.IPAddress): - gn = backend._lib.GENERAL_NAME_new() backend.openssl_assert(gn != backend._ffi.NULL) if isinstance(name.value, ipaddress.IPv4Network): packed = ( @@ -411,7 +443,6 @@ def _encode_general_name(backend, name): gn.type = backend._lib.GEN_IPADD gn.d.iPAddress = ipaddr elif isinstance(name, x509.OtherName): - gn = backend._lib.GENERAL_NAME_new() backend.openssl_assert(gn != backend._ffi.NULL) other_name = backend._lib.OTHERNAME_new() backend.openssl_assert(other_name != backend._ffi.NULL) @@ -434,7 +465,6 @@ def _encode_general_name(backend, name): gn.type = backend._lib.GEN_OTHERNAME gn.d.otherName = other_name elif isinstance(name, x509.RFC822Name): - gn = backend._lib.GENERAL_NAME_new() backend.openssl_assert(gn != backend._ffi.NULL) # ia5strings are supposed to be ITU T.50 but to allow round-tripping # of broken certs that encode utf8 we'll encode utf8 here too. @@ -443,7 +473,6 @@ def _encode_general_name(backend, name): gn.type = backend._lib.GEN_EMAIL gn.d.rfc822Name = asn1_str elif isinstance(name, x509.UniformResourceIdentifier): - gn = backend._lib.GENERAL_NAME_new() backend.openssl_assert(gn != backend._ffi.NULL) # ia5strings are supposed to be ITU T.50 but to allow round-tripping # of broken certs that encode utf8 we'll encode utf8 here too. @@ -453,11 +482,9 @@ def _encode_general_name(backend, name): gn.d.uniformResourceIdentifier = asn1_str else: raise ValueError( - "{0} is an unknown GeneralName type".format(name) + "{} is an unknown GeneralName type".format(name) ) - return gn - def _encode_extended_key_usage(backend, extended_key_usage): eku = backend._lib.sk_ASN1_OBJECT_new_null() @@ -482,6 +509,34 @@ _CRLREASONFLAGS = { } +def _encode_reasonflags(backend, reasons): + bitmask = backend._lib.ASN1_BIT_STRING_new() + backend.openssl_assert(bitmask != backend._ffi.NULL) + for reason in reasons: + res = backend._lib.ASN1_BIT_STRING_set_bit( + bitmask, _CRLREASONFLAGS[reason], 1 + ) + backend.openssl_assert(res == 1) + + return bitmask + + +def _encode_full_name(backend, full_name): + dpn = backend._lib.DIST_POINT_NAME_new() + backend.openssl_assert(dpn != backend._ffi.NULL) + dpn.type = _DISTPOINT_TYPE_FULLNAME + dpn.name.fullname = _encode_general_names(backend, full_name) + return dpn + + +def _encode_relative_name(backend, relative_name): + dpn = backend._lib.DIST_POINT_NAME_new() + backend.openssl_assert(dpn != backend._ffi.NULL) + dpn.type = _DISTPOINT_TYPE_RELATIVENAME + dpn.name.relativename = _encode_sk_name_entry(backend, relative_name) + return dpn + + def _encode_cdps_freshest_crl(backend, cdps): cdp = backend._lib.sk_DIST_POINT_new_null() cdp = backend._ffi.gc(cdp, backend._lib.sk_DIST_POINT_free) @@ -490,30 +545,13 @@ def _encode_cdps_freshest_crl(backend, cdps): backend.openssl_assert(dp != backend._ffi.NULL) if point.reasons: - bitmask = backend._lib.ASN1_BIT_STRING_new() - backend.openssl_assert(bitmask != backend._ffi.NULL) - dp.reasons = bitmask - for reason in point.reasons: - res = backend._lib.ASN1_BIT_STRING_set_bit( - bitmask, _CRLREASONFLAGS[reason], 1 - ) - backend.openssl_assert(res == 1) + dp.reasons = _encode_reasonflags(backend, point.reasons) if point.full_name: - dpn = backend._lib.DIST_POINT_NAME_new() - backend.openssl_assert(dpn != backend._ffi.NULL) - dpn.type = _DISTPOINT_TYPE_FULLNAME - dpn.name.fullname = _encode_general_names(backend, point.full_name) - dp.distpoint = dpn + dp.distpoint = _encode_full_name(backend, point.full_name) if point.relative_name: - dpn = backend._lib.DIST_POINT_NAME_new() - backend.openssl_assert(dpn != backend._ffi.NULL) - dpn.type = _DISTPOINT_TYPE_RELATIVENAME - relativename = _encode_sk_name_entry(backend, point.relative_name) - backend.openssl_assert(relativename != backend._ffi.NULL) - dpn.name.relativename = relativename - dp.distpoint = dpn + dp.distpoint = _encode_relative_name(backend, point.relative_name) if point.crl_issuer: dp.CRLissuer = _encode_general_names(backend, point.crl_issuer) @@ -603,6 +641,8 @@ _CRL_EXTENSION_ENCODE_HANDLERS = { ), ExtensionOID.CRL_NUMBER: _encode_crl_number_delta_crl_indicator, ExtensionOID.DELTA_CRL_INDICATOR: _encode_crl_number_delta_crl_indicator, + ExtensionOID.ISSUING_DISTRIBUTION_POINT: _encode_issuing_dist_point, + ExtensionOID.FRESHEST_CRL: _encode_cdps_freshest_crl, } _CRL_ENTRY_EXTENSION_ENCODE_HANDLERS = { diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/hashes.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/hashes.py index c39f57d..7f9d840 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/hashes.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/hashes.py @@ -25,7 +25,7 @@ class _HashContext(object): evp_md = self._backend._evp_md_from_algorithm(algorithm) if evp_md == self._backend._ffi.NULL: raise UnsupportedAlgorithm( - "{0} is not a supported hash on this backend.".format( + "{} is not a supported hash on this backend.".format( algorithm.name), _Reasons.UNSUPPORTED_HASH ) @@ -47,14 +47,32 @@ class _HashContext(object): return _HashContext(self._backend, self.algorithm, ctx=copied_ctx) def update(self, data): - res = self._backend._lib.EVP_DigestUpdate(self._ctx, data, len(data)) + data_ptr = self._backend._ffi.from_buffer(data) + res = self._backend._lib.EVP_DigestUpdate( + self._ctx, data_ptr, len(data) + ) self._backend.openssl_assert(res != 0) def finalize(self): + if isinstance(self.algorithm, hashes.ExtendableOutputFunction): + # extendable output functions use a different finalize + return self._finalize_xof() + else: + buf = self._backend._ffi.new("unsigned char[]", + self._backend._lib.EVP_MAX_MD_SIZE) + outlen = self._backend._ffi.new("unsigned int *") + res = self._backend._lib.EVP_DigestFinal_ex(self._ctx, buf, outlen) + self._backend.openssl_assert(res != 0) + self._backend.openssl_assert( + outlen[0] == self.algorithm.digest_size + ) + return self._backend._ffi.buffer(buf)[:outlen[0]] + + def _finalize_xof(self): buf = self._backend._ffi.new("unsigned char[]", - self._backend._lib.EVP_MAX_MD_SIZE) - outlen = self._backend._ffi.new("unsigned int *") - res = self._backend._lib.EVP_DigestFinal_ex(self._ctx, buf, outlen) + self.algorithm.digest_size) + res = self._backend._lib.EVP_DigestFinalXOF( + self._ctx, buf, self.algorithm.digest_size + ) self._backend.openssl_assert(res != 0) - self._backend.openssl_assert(outlen[0] == self.algorithm.digest_size) - return self._backend._ffi.buffer(buf)[:outlen[0]] + return self._backend._ffi.buffer(buf)[:self.algorithm.digest_size] diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/hmac.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/hmac.py index 99c43f2..2e09cbc 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/hmac.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/hmac.py @@ -9,10 +9,9 @@ from cryptography import utils from cryptography.exceptions import ( InvalidSignature, UnsupportedAlgorithm, _Reasons ) -from cryptography.hazmat.primitives import constant_time, hashes, mac +from cryptography.hazmat.primitives import constant_time, hashes -@utils.register_interface(mac.MACContext) @utils.register_interface(hashes.HashContext) class _HMACContext(object): def __init__(self, backend, key, algorithm, ctx=None): @@ -28,12 +27,13 @@ class _HMACContext(object): evp_md = self._backend._evp_md_from_algorithm(algorithm) if evp_md == self._backend._ffi.NULL: raise UnsupportedAlgorithm( - "{0} is not a supported hash on this backend".format( + "{} is not a supported hash on this backend".format( algorithm.name), _Reasons.UNSUPPORTED_HASH ) + key_ptr = self._backend._ffi.from_buffer(key) res = self._backend._lib.HMAC_Init_ex( - ctx, key, len(key), evp_md, self._backend._ffi.NULL + ctx, key_ptr, len(key), evp_md, self._backend._ffi.NULL ) self._backend.openssl_assert(res != 0) @@ -55,7 +55,8 @@ class _HMACContext(object): ) def update(self, data): - res = self._backend._lib.HMAC_Update(self._ctx, data, len(data)) + data_ptr = self._backend._ffi.from_buffer(data) + res = self._backend._lib.HMAC_Update(self._ctx, data_ptr, len(data)) self._backend.openssl_assert(res != 0) def finalize(self): diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ocsp.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ocsp.py index 32e26a0..7420f65 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ocsp.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/ocsp.py @@ -82,7 +82,7 @@ def _hash_algorithm(backend, cert_id): return _OIDS_TO_HASH[oid] except KeyError: raise UnsupportedAlgorithm( - "Signature algorithm OID: {0} not recognized".format(oid) + "Signature algorithm OID: {} not recognized".format(oid) ) @@ -126,6 +126,17 @@ class _OCSPResponse(object): oid = _obj2txt(self._backend, alg.algorithm) return x509.ObjectIdentifier(oid) + @property + @_requires_successful_response + def signature_hash_algorithm(self): + oid = self.signature_algorithm_oid + try: + return x509._SIG_OIDS_TO_HASH[oid] + except KeyError: + raise UnsupportedAlgorithm( + "Signature algorithm OID:{} not recognized".format(oid) + ) + @property @_requires_successful_response def signature(self): diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/poly1305.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/poly1305.py new file mode 100644 index 0000000..25448dd --- /dev/null +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/poly1305.py @@ -0,0 +1,60 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + + +from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.primitives import constant_time + + +_POLY1305_TAG_SIZE = 16 +_POLY1305_KEY_SIZE = 32 + + +class _Poly1305Context(object): + def __init__(self, backend, key): + self._backend = backend + + key_ptr = self._backend._ffi.from_buffer(key) + # This function copies the key into OpenSSL-owned memory so we don't + # need to retain it ourselves + evp_pkey = self._backend._lib.EVP_PKEY_new_raw_private_key( + self._backend._lib.NID_poly1305, + self._backend._ffi.NULL, key_ptr, len(key) + ) + self._backend.openssl_assert(evp_pkey != self._backend._ffi.NULL) + self._evp_pkey = self._backend._ffi.gc( + evp_pkey, self._backend._lib.EVP_PKEY_free + ) + ctx = self._backend._lib.Cryptography_EVP_MD_CTX_new() + self._backend.openssl_assert(ctx != self._backend._ffi.NULL) + self._ctx = self._backend._ffi.gc( + ctx, self._backend._lib.Cryptography_EVP_MD_CTX_free + ) + res = self._backend._lib.EVP_DigestSignInit( + self._ctx, self._backend._ffi.NULL, self._backend._ffi.NULL, + self._backend._ffi.NULL, self._evp_pkey + ) + self._backend.openssl_assert(res == 1) + + def update(self, data): + data_ptr = self._backend._ffi.from_buffer(data) + res = self._backend._lib.EVP_DigestSignUpdate( + self._ctx, data_ptr, len(data) + ) + self._backend.openssl_assert(res != 0) + + def finalize(self): + buf = self._backend._ffi.new("unsigned char[]", _POLY1305_TAG_SIZE) + outlen = self._backend._ffi.new("size_t *") + res = self._backend._lib.EVP_DigestSignFinal(self._ctx, buf, outlen) + self._backend.openssl_assert(res != 0) + self._backend.openssl_assert(outlen[0] == _POLY1305_TAG_SIZE) + return self._backend._ffi.buffer(buf)[:outlen[0]] + + def verify(self, tag): + mac = self.finalize() + if not constant_time.bytes_eq(mac, tag): + raise InvalidSignature("Value did not match computed tag.") diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/rsa.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/rsa.py index 00f5e37..3e4c2fd 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/rsa.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/rsa.py @@ -59,7 +59,7 @@ def _enc_dec_rsa(backend, key, data, padding): else: raise UnsupportedAlgorithm( - "{0} is not supported by this backend.".format( + "{} is not supported by this backend.".format( padding.name ), _Reasons.UNSUPPORTED_PADDING @@ -127,10 +127,11 @@ def _enc_dec_rsa_pkey_ctx(backend, key, data, padding_enum, padding): def _handle_rsa_enc_dec_error(backend, key): errors = backend._consume_errors() backend.openssl_assert(errors) - assert errors[0].lib == backend._lib.ERR_LIB_RSA + backend.openssl_assert(errors[0].lib == backend._lib.ERR_LIB_RSA) if isinstance(key, _RSAPublicKey): - assert (errors[0].reason == - backend._lib.RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE) + backend.openssl_assert( + errors[0].reason == backend._lib.RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE + ) raise ValueError( "Data too long for key size. Encrypt less data or use a " "larger key size." @@ -148,7 +149,7 @@ def _handle_rsa_enc_dec_error(backend, key): if backend._lib.Cryptography_HAS_RSA_R_PKCS_DECODING_ERROR: decoding_errors.append(backend._lib.RSA_R_PKCS_DECODING_ERROR) - assert errors[0].reason in decoding_errors + backend.openssl_assert(errors[0].reason in decoding_errors) raise ValueError("Decryption failed.") @@ -177,7 +178,7 @@ def _rsa_sig_determine_padding(backend, key, padding, algorithm): padding_enum = backend._lib.RSA_PKCS1_PSS_PADDING else: raise UnsupportedAlgorithm( - "{0} is not supported by this backend.".format(padding.name), + "{} is not supported by this backend.".format(padding.name), _Reasons.UNSUPPORTED_PADDING ) @@ -196,7 +197,7 @@ def _rsa_sig_setup(backend, padding, algorithm, key, data, init_func): if res == 0: backend._consume_errors() raise UnsupportedAlgorithm( - "{0} is not supported by this backend for RSA signing.".format( + "{} is not supported by this backend for RSA signing.".format( algorithm.name ), _Reasons.UNSUPPORTED_HASH @@ -236,17 +237,19 @@ def _rsa_sig_sign(backend, padding, algorithm, private_key, data): pkey_ctx, buf, buflen, data, len(data)) if res != 1: errors = backend._consume_errors() - assert errors[0].lib == backend._lib.ERR_LIB_RSA - reason = None - if (errors[0].reason == - backend._lib.RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE): + backend.openssl_assert(errors[0].lib == backend._lib.ERR_LIB_RSA) + if ( + errors[0].reason == + backend._lib.RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE + ): reason = ("Salt length too long for key size. Try using " "MAX_LENGTH instead.") else: - assert (errors[0].reason == - backend._lib.RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY) + backend.openssl_assert( + errors[0].reason == + backend._lib.RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY + ) reason = "Digest too large for key size. Use a larger key." - assert reason is not None raise ValueError(reason) return backend._ffi.buffer(buf)[:] @@ -434,8 +437,7 @@ class _RSAPublicKey(object): def verifier(self, signature, padding, algorithm): _warn_sign_verify_deprecated() - if not isinstance(signature, bytes): - raise TypeError("signature must be bytes.") + utils._check_bytes("signature", signature) _check_not_prehashed(algorithm) return _RSAVerificationContext( diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/utils.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/utils.py index 05d0fe5..ee472c0 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/utils.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/utils.py @@ -11,6 +11,30 @@ from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.asymmetric.utils import Prehashed +def _evp_pkey_derive(backend, evp_pkey, peer_public_key): + ctx = backend._lib.EVP_PKEY_CTX_new(evp_pkey, backend._ffi.NULL) + backend.openssl_assert(ctx != backend._ffi.NULL) + ctx = backend._ffi.gc(ctx, backend._lib.EVP_PKEY_CTX_free) + res = backend._lib.EVP_PKEY_derive_init(ctx) + backend.openssl_assert(res == 1) + res = backend._lib.EVP_PKEY_derive_set_peer( + ctx, peer_public_key._evp_pkey + ) + backend.openssl_assert(res == 1) + keylen = backend._ffi.new("size_t *") + res = backend._lib.EVP_PKEY_derive(ctx, backend._ffi.NULL, keylen) + backend.openssl_assert(res == 1) + backend.openssl_assert(keylen[0] > 0) + buf = backend._ffi.new("unsigned char[]", keylen[0]) + res = backend._lib.EVP_PKEY_derive(ctx, buf, keylen) + if res != 1: + raise ValueError( + "Null shared key derived from public/private pair." + ) + + return backend._ffi.buffer(buf, keylen[0])[:] + + def _calculate_digest_and_algorithm(backend, data, algorithm): if not isinstance(algorithm, Prehashed): hash_ctx = hashes.Hash(algorithm, backend) @@ -40,6 +64,6 @@ def _warn_sign_verify_deprecated(): warnings.warn( "signer and verifier have been deprecated. Please use sign " "and verify instead.", - utils.PersistentlyDeprecated, + utils.PersistentlyDeprecated2017, stacklevel=3 ) diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/x25519.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/x25519.py index 983ece6..9aab25b 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/x25519.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/x25519.py @@ -4,19 +4,66 @@ from __future__ import absolute_import, division, print_function +import warnings + from cryptography import utils +from cryptography.hazmat.backends.openssl.utils import _evp_pkey_derive +from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric.x25519 import ( X25519PrivateKey, X25519PublicKey ) +_X25519_KEY_SIZE = 32 + + @utils.register_interface(X25519PublicKey) class _X25519PublicKey(object): def __init__(self, backend, evp_pkey): self._backend = backend self._evp_pkey = evp_pkey - def public_bytes(self): + def public_bytes(self, encoding=None, format=None): + if encoding is None or format is None: + if encoding is not None or format is not None: + raise ValueError("Both encoding and format are required") + else: + warnings.warn( + "public_bytes now requires encoding and format arguments. " + "Support for calling without arguments will be removed in " + "cryptography 2.7", + utils.DeprecatedIn25, + ) + encoding = serialization.Encoding.Raw + format = serialization.PublicFormat.Raw + if ( + encoding is serialization.Encoding.Raw or + format is serialization.PublicFormat.Raw + ): + if ( + encoding is not serialization.Encoding.Raw or + format is not serialization.PublicFormat.Raw + ): + raise ValueError( + "When using Raw both encoding and format must be Raw" + ) + + return self._raw_public_bytes() + + if ( + encoding in serialization._PEM_DER and + format is not serialization.PublicFormat.SubjectPublicKeyInfo + ): + raise ValueError( + "format must be SubjectPublicKeyInfo when encoding is PEM or " + "DER" + ) + + return self._backend._public_key_bytes( + encoding, format, self, self._evp_pkey, None + ) + + def _raw_public_bytes(self): ucharpp = self._backend._ffi.new("unsigned char **") res = self._backend._lib.EVP_PKEY_get1_tls_encodedpoint( self._evp_pkey, ucharpp @@ -52,28 +99,51 @@ class _X25519PrivateKey(object): if not isinstance(peer_public_key, X25519PublicKey): raise TypeError("peer_public_key must be X25519PublicKey.") - ctx = self._backend._lib.EVP_PKEY_CTX_new( - self._evp_pkey, self._backend._ffi.NULL + return _evp_pkey_derive( + self._backend, self._evp_pkey, peer_public_key ) - self._backend.openssl_assert(ctx != self._backend._ffi.NULL) - ctx = self._backend._ffi.gc(ctx, self._backend._lib.EVP_PKEY_CTX_free) - res = self._backend._lib.EVP_PKEY_derive_init(ctx) - self._backend.openssl_assert(res == 1) - res = self._backend._lib.EVP_PKEY_derive_set_peer( - ctx, peer_public_key._evp_pkey - ) - self._backend.openssl_assert(res == 1) - keylen = self._backend._ffi.new("size_t *") - res = self._backend._lib.EVP_PKEY_derive( - ctx, self._backend._ffi.NULL, keylen - ) - self._backend.openssl_assert(res == 1) - self._backend.openssl_assert(keylen[0] > 0) - buf = self._backend._ffi.new("unsigned char[]", keylen[0]) - res = self._backend._lib.EVP_PKEY_derive(ctx, buf, keylen) - if res != 1: + + def private_bytes(self, encoding, format, encryption_algorithm): + if ( + encoding is serialization.Encoding.Raw or + format is serialization.PublicFormat.Raw + ): + if ( + format is not serialization.PrivateFormat.Raw or + encoding is not serialization.Encoding.Raw or not + isinstance(encryption_algorithm, serialization.NoEncryption) + ): + raise ValueError( + "When using Raw both encoding and format must be Raw " + "and encryption_algorithm must be NoEncryption()" + ) + + return self._raw_private_bytes() + + if ( + encoding in serialization._PEM_DER and + format is not serialization.PrivateFormat.PKCS8 + ): raise ValueError( - "Null shared key derived from public/private pair." + "format must be PKCS8 when encoding is PEM or DER" ) - return self._backend._ffi.buffer(buf, keylen[0])[:] + return self._backend._private_key_bytes( + encoding, format, encryption_algorithm, self._evp_pkey, None + ) + + def _raw_private_bytes(self): + # When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_111 we can + # switch this to EVP_PKEY_new_raw_private_key + # The trick we use here is serializing to a PKCS8 key and just + # using the last 32 bytes, which is the key itself. + bio = self._backend._create_mem_bio_gc() + res = self._backend._lib.i2d_PKCS8PrivateKey_bio( + bio, self._evp_pkey, + self._backend._ffi.NULL, self._backend._ffi.NULL, + 0, self._backend._ffi.NULL, self._backend._ffi.NULL + ) + self._backend.openssl_assert(res == 1) + pkcs8 = self._backend._read_mem_bio(bio) + self._backend.openssl_assert(len(pkcs8) == 48) + return pkcs8[-_X25519_KEY_SIZE:] diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/x448.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/x448.py new file mode 100644 index 0000000..fe0dcd9 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/x448.py @@ -0,0 +1,123 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.hazmat.backends.openssl.utils import _evp_pkey_derive +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric.x448 import ( + X448PrivateKey, X448PublicKey +) + +_X448_KEY_SIZE = 56 + + +@utils.register_interface(X448PublicKey) +class _X448PublicKey(object): + def __init__(self, backend, evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_bytes(self, encoding, format): + if ( + encoding is serialization.Encoding.Raw or + format is serialization.PublicFormat.Raw + ): + if ( + encoding is not serialization.Encoding.Raw or + format is not serialization.PublicFormat.Raw + ): + raise ValueError( + "When using Raw both encoding and format must be Raw" + ) + + return self._raw_public_bytes() + + if ( + encoding in serialization._PEM_DER and + format is not serialization.PublicFormat.SubjectPublicKeyInfo + ): + raise ValueError( + "format must be SubjectPublicKeyInfo when encoding is PEM or " + "DER" + ) + + return self._backend._public_key_bytes( + encoding, format, self, self._evp_pkey, None + ) + + def _raw_public_bytes(self): + buf = self._backend._ffi.new("unsigned char []", _X448_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _X448_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_public_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _X448_KEY_SIZE) + return self._backend._ffi.buffer(buf, _X448_KEY_SIZE)[:] + + +@utils.register_interface(X448PrivateKey) +class _X448PrivateKey(object): + def __init__(self, backend, evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_key(self): + buf = self._backend._ffi.new("unsigned char []", _X448_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _X448_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_public_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _X448_KEY_SIZE) + return self._backend.x448_load_public_bytes(buf) + + def exchange(self, peer_public_key): + if not isinstance(peer_public_key, X448PublicKey): + raise TypeError("peer_public_key must be X448PublicKey.") + + return _evp_pkey_derive( + self._backend, self._evp_pkey, peer_public_key + ) + + def private_bytes(self, encoding, format, encryption_algorithm): + if ( + encoding is serialization.Encoding.Raw or + format is serialization.PublicFormat.Raw + ): + if ( + format is not serialization.PrivateFormat.Raw or + encoding is not serialization.Encoding.Raw or not + isinstance(encryption_algorithm, serialization.NoEncryption) + ): + raise ValueError( + "When using Raw both encoding and format must be Raw " + "and encryption_algorithm must be NoEncryption()" + ) + + return self._raw_private_bytes() + + if ( + encoding in serialization._PEM_DER and + format is not serialization.PrivateFormat.PKCS8 + ): + raise ValueError( + "format must be PKCS8 when encoding is PEM or DER" + ) + + return self._backend._private_key_bytes( + encoding, format, encryption_algorithm, self._evp_pkey, None + ) + + def _raw_private_bytes(self): + buf = self._backend._ffi.new("unsigned char []", _X448_KEY_SIZE) + buflen = self._backend._ffi.new("size_t *", _X448_KEY_SIZE) + res = self._backend._lib.EVP_PKEY_get_raw_private_key( + self._evp_pkey, buf, buflen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(buflen[0] == _X448_KEY_SIZE) + return self._backend._ffi.buffer(buf, _X448_KEY_SIZE)[:] diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/x509.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/x509.py index ac1838c..efbb179 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/x509.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/backends/openssl/x509.py @@ -6,7 +6,6 @@ from __future__ import absolute_import, division, print_function import datetime import operator -import warnings from cryptography import utils, x509 from cryptography.exceptions import UnsupportedAlgorithm @@ -30,7 +29,7 @@ class _Certificate(object): self._x509 = x509 def __repr__(self): - return "".format(self.subject) + return "".format(self.subject) def __eq__(self, other): if not isinstance(other, x509.Certificate): @@ -59,18 +58,9 @@ class _Certificate(object): return x509.Version.v3 else: raise x509.InvalidVersion( - "{0} is not a valid X509 version".format(version), version + "{} is not a valid X509 version".format(version), version ) - @property - def serial(self): - warnings.warn( - "Certificate serial is deprecated, use serial_number instead.", - utils.PersistentlyDeprecated, - stacklevel=2 - ) - return self.serial_number - @property def serial_number(self): asn1_int = self._backend._lib.X509_get_serialNumber(self._x509) @@ -90,12 +80,12 @@ class _Certificate(object): @property def not_valid_before(self): - asn1_time = self._backend._lib.X509_get_notBefore(self._x509) + asn1_time = self._backend._lib.X509_getm_notBefore(self._x509) return _parse_asn1_time(self._backend, asn1_time) @property def not_valid_after(self): - asn1_time = self._backend._lib.X509_get_notAfter(self._x509) + asn1_time = self._backend._lib.X509_getm_notAfter(self._x509) return _parse_asn1_time(self._backend, asn1_time) @property @@ -117,7 +107,7 @@ class _Certificate(object): return x509._SIG_OIDS_TO_HASH[oid] except KeyError: raise UnsupportedAlgorithm( - "Signature algorithm OID:{0} not recognized".format(oid) + "Signature algorithm OID:{} not recognized".format(oid) ) @property @@ -271,7 +261,7 @@ class _CertificateRevocationList(object): return x509._SIG_OIDS_TO_HASH[oid] except KeyError: raise UnsupportedAlgorithm( - "Signature algorithm OID:{0} not recognized".format(oid) + "Signature algorithm OID:{} not recognized".format(oid) ) @property @@ -423,7 +413,7 @@ class _CertificateSigningRequest(object): return x509._SIG_OIDS_TO_HASH[oid] except KeyError: raise UnsupportedAlgorithm( - "Signature algorithm OID:{0} not recognized".format(oid) + "Signature algorithm OID:{} not recognized".format(oid) ) @property diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/_constant_time.abi3.so b/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/_constant_time.abi3.so index 464be7e..4a658ac 100755 Binary files a/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/_constant_time.abi3.so and b/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/_constant_time.abi3.so differ diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/_openssl.abi3.so b/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/_openssl.abi3.so index 99b5aaf..46bac0b 100755 Binary files a/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/_openssl.abi3.so and b/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/_openssl.abi3.so differ diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/_padding.abi3.so b/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/_padding.abi3.so index e5304d2..7fe4749 100755 Binary files a/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/_padding.abi3.so and b/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/_padding.abi3.so differ diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/openssl/_conditional.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/openssl/_conditional.py index 219f163..a39bb66 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/openssl/_conditional.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/openssl/_conditional.py @@ -150,11 +150,6 @@ def cryptography_has_tls_st(): def cryptography_has_locking_callbacks(): return [ - "CRYPTO_LOCK", - "CRYPTO_UNLOCK", - "CRYPTO_READ", - "CRYPTO_LOCK_SSL", - "CRYPTO_lock", "Cryptography_setup_ssl_threads", ] @@ -224,9 +219,31 @@ def cryptography_has_x25519(): ] +def cryptography_has_x448(): + return [ + "EVP_PKEY_X448", + "NID_X448", + ] + + +def cryptography_has_ed448(): + return [ + "EVP_PKEY_ED448", + "NID_ED448", + ] + + def cryptography_has_ed25519(): return [ "NID_ED25519", + "EVP_PKEY_ED25519", + ] + + +def cryptography_has_poly1305(): + return [ + "NID_poly1305", + "EVP_PKEY_POLY1305", ] @@ -237,6 +254,12 @@ def cryptography_has_oneshot_evp_digest_sign_verify(): ] +def cryptography_has_evp_digestfinal_xof(): + return [ + "EVP_DigestFinalXOF", + ] + + def cryptography_has_evp_pkey_get_set_tls_encodedpoint(): return [ "EVP_PKEY_get1_tls_encodedpoint", @@ -246,7 +269,7 @@ def cryptography_has_evp_pkey_get_set_tls_encodedpoint(): def cryptography_has_fips(): return [ - "FIPS_set_mode", + "FIPS_mode_set", "FIPS_mode", ] @@ -298,6 +321,46 @@ def cryptography_has_tlsv13(): "SSL_verify_client_post_handshake", "SSL_CTX_set_post_handshake_auth", "SSL_set_post_handshake_auth", + "SSL_SESSION_get_max_early_data", + "SSL_write_early_data", + "SSL_read_early_data", + "SSL_CTX_set_max_early_data", + ] + + +def cryptography_has_raw_key(): + return [ + "EVP_PKEY_new_raw_private_key", + "EVP_PKEY_new_raw_public_key", + "EVP_PKEY_get_raw_private_key", + "EVP_PKEY_get_raw_public_key", + ] + + +def cryptography_has_evp_r_memory_limit_exceeded(): + return [ + "EVP_R_MEMORY_LIMIT_EXCEEDED", + ] + + +def cryptography_has_engine(): + return [ + "ENGINE_by_id", + "ENGINE_init", + "ENGINE_finish", + "ENGINE_get_default_RAND", + "ENGINE_set_default_RAND", + "ENGINE_unregister_RAND", + "ENGINE_ctrl_cmd", + "ENGINE_free", + "ENGINE_get_name", + "Cryptography_add_osrandom_engine", + ] + + +def cryptography_has_verified_chain(): + return [ + "SSL_get0_verified_chain", ] @@ -349,7 +412,10 @@ CONDITIONAL_NAMES = { cryptography_has_x509_store_ctx_get_issuer ), "Cryptography_HAS_X25519": cryptography_has_x25519, + "Cryptography_HAS_X448": cryptography_has_x448, + "Cryptography_HAS_ED448": cryptography_has_ed448, "Cryptography_HAS_ED25519": cryptography_has_ed25519, + "Cryptography_HAS_POLY1305": cryptography_has_poly1305, "Cryptography_HAS_ONESHOT_EVP_DIGEST_SIGN_VERIFY": ( cryptography_has_oneshot_evp_digest_sign_verify ), @@ -363,4 +429,13 @@ CONDITIONAL_NAMES = { "Cryptography_HAS_OPENSSL_CLEANUP": cryptography_has_openssl_cleanup, "Cryptography_HAS_CIPHER_DETAILS": cryptography_has_cipher_details, "Cryptography_HAS_TLSv1_3": cryptography_has_tlsv13, + "Cryptography_HAS_RAW_KEY": cryptography_has_raw_key, + "Cryptography_HAS_EVP_DIGESTFINAL_XOF": ( + cryptography_has_evp_digestfinal_xof + ), + "Cryptography_HAS_EVP_R_MEMORY_LIMIT_EXCEEDED": ( + cryptography_has_evp_r_memory_limit_exceeded + ), + "Cryptography_HAS_ENGINE": cryptography_has_engine, + "Cryptography_HAS_VERIFIED_CHAIN": cryptography_has_verified_chain, } diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/openssl/binding.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/openssl/binding.py index 0824ea8..9740516 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/openssl/binding.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/bindings/openssl/binding.py @@ -9,6 +9,7 @@ import threading import types import warnings +import cryptography from cryptography import utils from cryptography.exceptions import InternalError from cryptography.hazmat.bindings._openssl import ffi, lib @@ -114,10 +115,9 @@ class Binding(object): # reliably clear the error queue. Once we clear it here we will # error on any subsequent unexpected item in the stack. cls.lib.ERR_clear_error() - cls._osrandom_engine_id = cls.lib.Cryptography_osrandom_engine_id - cls._osrandom_engine_name = cls.lib.Cryptography_osrandom_engine_name - result = cls.lib.Cryptography_add_osrandom_engine() - _openssl_assert(cls.lib, result in (1, 2)) + if cls.lib.Cryptography_HAS_ENGINE: + result = cls.lib.Cryptography_add_osrandom_engine() + _openssl_assert(cls.lib, result in (1, 2)) @classmethod def _ensure_ffi_initialized(cls): @@ -158,12 +158,35 @@ def _verify_openssl_version(lib): ): warnings.warn( "OpenSSL version 1.0.1 is no longer supported by the OpenSSL " - "project, please upgrade. A future version of cryptography will " + "project, please upgrade. The next version of cryptography will " "drop support for it.", utils.CryptographyDeprecationWarning ) +def _verify_package_version(version): + # Occasionally we run into situations where the version of the Python + # package does not match the version of the shared object that is loaded. + # This may occur in environments where multiple versions of cryptography + # are installed and available in the python path. To avoid errors cropping + # up later this code checks that the currently imported package and the + # shared object that were loaded have the same version and raise an + # ImportError if they do not + so_package_version = ffi.string(lib.CRYPTOGRAPHY_PACKAGE_VERSION) + if version.encode("ascii") != so_package_version: + raise ImportError( + "The version of cryptography does not match the loaded " + "shared object. This can happen if you have multiple copies of " + "cryptography installed in your Python path. Please try creating " + "a new virtual environment to resolve this issue. " + "Loaded python version: {}, shared object version: {}".format( + version, so_package_version + ) + ) + + +_verify_package_version(cryptography.__version__) + # OpenSSL is not thread safe until the locks are initialized. We call this # method in module scope so that it executes with the import lock. On # Pythons < 3.4 this import lock is a global lock, which can prevent a race diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/ec.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/ec.py index 431ecb7..529391f 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/ec.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/ec.py @@ -5,6 +5,7 @@ from __future__ import absolute_import, division, print_function import abc +import warnings import six @@ -19,6 +20,19 @@ class EllipticCurveOID(object): SECP256R1 = ObjectIdentifier("1.2.840.10045.3.1.7") SECP384R1 = ObjectIdentifier("1.3.132.0.34") SECP521R1 = ObjectIdentifier("1.3.132.0.35") + BRAINPOOLP256R1 = ObjectIdentifier("1.3.36.3.3.2.8.1.1.7") + BRAINPOOLP384R1 = ObjectIdentifier("1.3.36.3.3.2.8.1.1.11") + BRAINPOOLP512R1 = ObjectIdentifier("1.3.36.3.3.2.8.1.1.13") + SECT163K1 = ObjectIdentifier("1.3.132.0.1") + SECT163R2 = ObjectIdentifier("1.3.132.0.15") + SECT233K1 = ObjectIdentifier("1.3.132.0.26") + SECT233R1 = ObjectIdentifier("1.3.132.0.27") + SECT283K1 = ObjectIdentifier("1.3.132.0.16") + SECT283R1 = ObjectIdentifier("1.3.132.0.17") + SECT409K1 = ObjectIdentifier("1.3.132.0.36") + SECT409R1 = ObjectIdentifier("1.3.132.0.37") + SECT571K1 = ObjectIdentifier("1.3.132.0.38") + SECT571R1 = ObjectIdentifier("1.3.132.0.39") @six.add_metaclass(abc.ABCMeta) @@ -138,6 +152,22 @@ class EllipticCurvePublicKey(object): Verifies the signature of the data. """ + @classmethod + def from_encoded_point(cls, curve, data): + utils._check_bytes("data", data) + + if not isinstance(curve, EllipticCurve): + raise TypeError("curve must be an EllipticCurve instance") + + if len(data) == 0: + raise ValueError("data must not be an empty byte string") + + if six.indexbytes(data, 0) not in [0x02, 0x03, 0x04]: + raise ValueError("Unsupported elliptic curve point type") + + from cryptography.hazmat.backends.openssl.backend import backend + return backend.load_elliptic_curve_public_bytes(curve, data) + EllipticCurvePublicKeyWithSerialization = EllipticCurvePublicKey @@ -329,6 +359,14 @@ class EllipticCurvePublicNumbers(object): return backend.load_elliptic_curve_public_numbers(self) def encode_point(self): + warnings.warn( + "encode_point has been deprecated on EllipticCurvePublicNumbers" + " and will be removed in a future version. Please use " + "EllipticCurvePublicKey.public_bytes to obtain both " + "compressed and uncompressed point encoding.", + utils.DeprecatedIn25, + stacklevel=2, + ) # key_size is in bits. Convert to bytes and round up byte_length = (self.curve.key_size + 7) // 8 return ( @@ -341,6 +379,14 @@ class EllipticCurvePublicNumbers(object): if not isinstance(curve, EllipticCurve): raise TypeError("curve must be an EllipticCurve instance") + warnings.warn( + "Support for unsafe construction of public numbers from " + "encoded data will be removed in a future version. " + "Please use EllipticCurvePublicKey.from_encoded_point", + utils.DeprecatedIn25, + stacklevel=2, + ) + if data.startswith(b'\x04'): # key_size is in bits. Convert to bytes and round up byte_length = (curve.key_size + 7) // 8 @@ -419,3 +465,36 @@ class EllipticCurvePrivateNumbers(object): class ECDH(object): pass + + +_OID_TO_CURVE = { + EllipticCurveOID.SECP192R1: SECP192R1, + EllipticCurveOID.SECP224R1: SECP224R1, + EllipticCurveOID.SECP256K1: SECP256K1, + EllipticCurveOID.SECP256R1: SECP256R1, + EllipticCurveOID.SECP384R1: SECP384R1, + EllipticCurveOID.SECP521R1: SECP521R1, + EllipticCurveOID.BRAINPOOLP256R1: BrainpoolP256R1, + EllipticCurveOID.BRAINPOOLP384R1: BrainpoolP384R1, + EllipticCurveOID.BRAINPOOLP512R1: BrainpoolP512R1, + EllipticCurveOID.SECT163K1: SECT163K1, + EllipticCurveOID.SECT163R2: SECT163R2, + EllipticCurveOID.SECT233K1: SECT233K1, + EllipticCurveOID.SECT233R1: SECT233R1, + EllipticCurveOID.SECT283K1: SECT283K1, + EllipticCurveOID.SECT283R1: SECT283R1, + EllipticCurveOID.SECT409K1: SECT409K1, + EllipticCurveOID.SECT409R1: SECT409R1, + EllipticCurveOID.SECT571K1: SECT571K1, + EllipticCurveOID.SECT571R1: SECT571R1, +} + + +def get_curve_for_oid(oid): + try: + return _OID_TO_CURVE[oid] + except KeyError: + raise LookupError( + "The provided object identifier has no matching elliptic " + "curve class" + ) diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/ed25519.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/ed25519.py new file mode 100644 index 0000000..d89445f --- /dev/null +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/ed25519.py @@ -0,0 +1,84 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons + + +_ED25519_KEY_SIZE = 32 +_ED25519_SIG_SIZE = 64 + + +@six.add_metaclass(abc.ABCMeta) +class Ed25519PublicKey(object): + @classmethod + def from_public_bytes(cls, data): + from cryptography.hazmat.backends.openssl.backend import backend + if not backend.ed25519_supported(): + raise UnsupportedAlgorithm( + "ed25519 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM + ) + + return backend.ed25519_load_public_bytes(data) + + @abc.abstractmethod + def public_bytes(self, encoding, format): + """ + The serialized bytes of the public key. + """ + + @abc.abstractmethod + def verify(self, signature, data): + """ + Verify the signature. + """ + + +@six.add_metaclass(abc.ABCMeta) +class Ed25519PrivateKey(object): + @classmethod + def generate(cls): + from cryptography.hazmat.backends.openssl.backend import backend + if not backend.ed25519_supported(): + raise UnsupportedAlgorithm( + "ed25519 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM + ) + + return backend.ed25519_generate_key() + + @classmethod + def from_private_bytes(cls, data): + from cryptography.hazmat.backends.openssl.backend import backend + if not backend.ed25519_supported(): + raise UnsupportedAlgorithm( + "ed25519 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM + ) + + return backend.ed25519_load_private_bytes(data) + + @abc.abstractmethod + def public_key(self): + """ + The Ed25519PublicKey derived from the private key. + """ + + @abc.abstractmethod + def private_bytes(self, encoding, format, encryption_algorithm): + """ + The serialized bytes of the private key. + """ + + @abc.abstractmethod + def sign(self, data): + """ + Signs the data. + """ diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/ed448.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/ed448.py new file mode 100644 index 0000000..939157a --- /dev/null +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/ed448.py @@ -0,0 +1,79 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons + + +@six.add_metaclass(abc.ABCMeta) +class Ed448PublicKey(object): + @classmethod + def from_public_bytes(cls, data): + from cryptography.hazmat.backends.openssl.backend import backend + if not backend.ed448_supported(): + raise UnsupportedAlgorithm( + "ed448 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM + ) + + return backend.ed448_load_public_bytes(data) + + @abc.abstractmethod + def public_bytes(self, encoding, format): + """ + The serialized bytes of the public key. + """ + + @abc.abstractmethod + def verify(self, signature, data): + """ + Verify the signature. + """ + + +@six.add_metaclass(abc.ABCMeta) +class Ed448PrivateKey(object): + @classmethod + def generate(cls): + from cryptography.hazmat.backends.openssl.backend import backend + if not backend.ed448_supported(): + raise UnsupportedAlgorithm( + "ed448 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM + ) + return backend.ed448_generate_key() + + @classmethod + def from_private_bytes(cls, data): + from cryptography.hazmat.backends.openssl.backend import backend + if not backend.ed448_supported(): + raise UnsupportedAlgorithm( + "ed448 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM + ) + + return backend.ed448_load_private_bytes(data) + + @abc.abstractmethod + def public_key(self): + """ + The Ed448PublicKey derived from the private key. + """ + + @abc.abstractmethod + def sign(self, data): + """ + Signs the data. + """ + + @abc.abstractmethod + def private_bytes(self, encoding, format, encryption_algorithm): + """ + The serialized bytes of the private key. + """ diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/utils.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/utils.py index ef1e7eb..14d2abe 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/utils.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/utils.py @@ -4,49 +4,26 @@ from __future__ import absolute_import, division, print_function -import warnings - -from asn1crypto.algos import DSASignature - -import six - from cryptography import utils +from cryptography.hazmat._der import ( + DERReader, INTEGER, SEQUENCE, encode_der, encode_der_integer +) from cryptography.hazmat.primitives import hashes -def decode_rfc6979_signature(signature): - warnings.warn( - "decode_rfc6979_signature is deprecated and will " - "be removed in a future version, use decode_dss_signature instead.", - utils.PersistentlyDeprecated, - stacklevel=2 - ) - return decode_dss_signature(signature) - - def decode_dss_signature(signature): - data = DSASignature.load(signature, strict=True).native - return data['r'], data['s'] - - -def encode_rfc6979_signature(r, s): - warnings.warn( - "encode_rfc6979_signature is deprecated and will " - "be removed in a future version, use encode_dss_signature instead.", - utils.PersistentlyDeprecated, - stacklevel=2 - ) - return encode_dss_signature(r, s) + with DERReader(signature).read_single_element(SEQUENCE) as seq: + r = seq.read_element(INTEGER).as_integer() + s = seq.read_element(INTEGER).as_integer() + return r, s def encode_dss_signature(r, s): - if ( - not isinstance(r, six.integer_types) or - not isinstance(s, six.integer_types) - ): - raise ValueError("Both r and s must be integers") - - return DSASignature({'r': r, 's': s}).dump() + return encode_der( + SEQUENCE, + encode_der(INTEGER, encode_der_integer(r)), + encode_der(INTEGER, encode_der_integer(s)), + ) class Prehashed(object): diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/x25519.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/x25519.py index 5c4652a..4e8badf 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/x25519.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/x25519.py @@ -21,11 +21,14 @@ class X25519PublicKey(object): "X25519 is not supported by this version of OpenSSL.", _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM ) + return backend.x25519_load_public_bytes(data) @abc.abstractmethod - def public_bytes(self): - pass + def public_bytes(self, encoding=None, format=None): + """ + The serialized bytes of the public key. + """ @six.add_metaclass(abc.ABCMeta) @@ -41,14 +44,30 @@ class X25519PrivateKey(object): return backend.x25519_generate_key() @classmethod - def _from_private_bytes(cls, data): + def from_private_bytes(cls, data): from cryptography.hazmat.backends.openssl.backend import backend + if not backend.x25519_supported(): + raise UnsupportedAlgorithm( + "X25519 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM + ) + return backend.x25519_load_private_bytes(data) @abc.abstractmethod def public_key(self): - pass + """ + The serialized bytes of the public key. + """ + + @abc.abstractmethod + def private_bytes(self, encoding, format, encryption_algorithm): + """ + The serialized bytes of the private key. + """ @abc.abstractmethod def exchange(self, peer_public_key): - pass + """ + Performs a key exchange operation using the provided peer's public key. + """ diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/x448.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/x448.py new file mode 100644 index 0000000..475e678 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/asymmetric/x448.py @@ -0,0 +1,73 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons + + +@six.add_metaclass(abc.ABCMeta) +class X448PublicKey(object): + @classmethod + def from_public_bytes(cls, data): + from cryptography.hazmat.backends.openssl.backend import backend + if not backend.x448_supported(): + raise UnsupportedAlgorithm( + "X448 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM + ) + + return backend.x448_load_public_bytes(data) + + @abc.abstractmethod + def public_bytes(self, encoding, format): + """ + The serialized bytes of the public key. + """ + + +@six.add_metaclass(abc.ABCMeta) +class X448PrivateKey(object): + @classmethod + def generate(cls): + from cryptography.hazmat.backends.openssl.backend import backend + if not backend.x448_supported(): + raise UnsupportedAlgorithm( + "X448 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM + ) + return backend.x448_generate_key() + + @classmethod + def from_private_bytes(cls, data): + from cryptography.hazmat.backends.openssl.backend import backend + if not backend.x448_supported(): + raise UnsupportedAlgorithm( + "X448 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM + ) + + return backend.x448_load_private_bytes(data) + + @abc.abstractmethod + def public_key(self): + """ + The serialized bytes of the public key. + """ + + @abc.abstractmethod + def private_bytes(self, encoding, format, encryption_algorithm): + """ + The serialized bytes of the private key. + """ + + @abc.abstractmethod + def exchange(self, peer_public_key): + """ + Performs a key exchange operation using the provided peer's public key. + """ diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/aead.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/aead.py index 16899d0..42e19ad 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/aead.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/aead.py @@ -20,7 +20,7 @@ class ChaCha20Poly1305(object): "ChaCha20Poly1305 is not supported by this version of OpenSSL", exceptions._Reasons.UNSUPPORTED_CIPHER ) - utils._check_bytes("key", key) + utils._check_byteslike("key", key) if len(key) != 32: raise ValueError("ChaCha20Poly1305 key must be 32 bytes.") @@ -56,7 +56,7 @@ class ChaCha20Poly1305(object): ) def _check_params(self, nonce, data, associated_data): - utils._check_bytes("nonce", nonce) + utils._check_byteslike("nonce", nonce) utils._check_bytes("data", data) utils._check_bytes("associated_data", associated_data) if len(nonce) != 12: @@ -67,7 +67,7 @@ class AESCCM(object): _MAX_SIZE = 2 ** 32 def __init__(self, key, tag_length=16): - utils._check_bytes("key", key) + utils._check_byteslike("key", key) if len(key) not in (16, 24, 32): raise ValueError("AESCCM key must be 128, 192, or 256 bits.") @@ -129,7 +129,7 @@ class AESCCM(object): raise ValueError("Nonce too long for data") def _check_params(self, nonce, data, associated_data): - utils._check_bytes("nonce", nonce) + utils._check_byteslike("nonce", nonce) utils._check_bytes("data", data) utils._check_bytes("associated_data", associated_data) if not 7 <= len(nonce) <= 13: @@ -140,7 +140,7 @@ class AESGCM(object): _MAX_SIZE = 2 ** 32 def __init__(self, key): - utils._check_bytes("key", key) + utils._check_byteslike("key", key) if len(key) not in (16, 24, 32): raise ValueError("AESGCM key must be 128, 192, or 256 bits.") @@ -181,7 +181,7 @@ class AESGCM(object): ) def _check_params(self, nonce, data, associated_data): - utils._check_bytes("nonce", nonce) + utils._check_byteslike("nonce", nonce) utils._check_bytes("data", data) utils._check_bytes("associated_data", associated_data) if len(nonce) == 0: diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/algorithms.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/algorithms.py index 68a9e33..f4d5160 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/algorithms.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/algorithms.py @@ -13,11 +13,11 @@ from cryptography.hazmat.primitives.ciphers.modes import ModeWithNonce def _verify_key_size(algorithm, key): # Verify that the key is instance of bytes - utils._check_bytes("key", key) + utils._check_byteslike("key", key) # Verify that the key size matches the expected key size if len(key) * 8 not in algorithm.key_sizes: - raise ValueError("Invalid key size ({0}) for {1}.".format( + raise ValueError("Invalid key size ({}) for {}.".format( len(key) * 8, algorithm.name )) return key @@ -153,8 +153,7 @@ class ChaCha20(object): def __init__(self, key, nonce): self.key = _verify_key_size(self, key) - if not isinstance(nonce, bytes): - raise TypeError("nonce must be bytes") + utils._check_byteslike("nonce", nonce) if len(nonce) != 16: raise ValueError("nonce must be 128-bits (16 bytes)") diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/base.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/base.py index f857041..4d5f8d6 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/base.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/base.py @@ -179,7 +179,7 @@ class _AEADCipherContext(object): self._bytes_processed += data_size if self._bytes_processed > self._ctx._mode._MAX_ENCRYPTED_BYTES: raise ValueError( - "{0} has a maximum encrypted byte limit of {1}".format( + "{} has a maximum encrypted byte limit of {}".format( self._ctx._mode.name, self._ctx._mode._MAX_ENCRYPTED_BYTES ) ) @@ -217,7 +217,7 @@ class _AEADCipherContext(object): self._aad_bytes_processed += len(data) if self._aad_bytes_processed > self._ctx._mode._MAX_AAD_BYTES: raise ValueError( - "{0} has a maximum AAD byte limit of {1}".format( + "{} has a maximum AAD byte limit of {}".format( self._ctx._mode.name, self._ctx._mode._MAX_AAD_BYTES ) ) diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/modes.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/modes.py index e82c1a8..78fa1c4 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/modes.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/ciphers/modes.py @@ -72,7 +72,7 @@ def _check_aes_key_length(self, algorithm): def _check_iv_length(self, algorithm): if len(self.initialization_vector) * 8 != algorithm.block_size: - raise ValueError("Invalid IV size ({0}) for {1}.".format( + raise ValueError("Invalid IV size ({}) for {}.".format( len(self.initialization_vector), self.name )) @@ -88,9 +88,7 @@ class CBC(object): name = "CBC" def __init__(self, initialization_vector): - if not isinstance(initialization_vector, bytes): - raise TypeError("initialization_vector must be bytes") - + utils._check_byteslike("initialization_vector", initialization_vector) self._initialization_vector = initialization_vector initialization_vector = utils.read_only_property("_initialization_vector") @@ -103,8 +101,7 @@ class XTS(object): name = "XTS" def __init__(self, tweak): - if not isinstance(tweak, bytes): - raise TypeError("tweak must be bytes") + utils._check_byteslike("tweak", tweak) if len(tweak) != 16: raise ValueError("tweak must be 128-bits (16 bytes)") @@ -134,9 +131,7 @@ class OFB(object): name = "OFB" def __init__(self, initialization_vector): - if not isinstance(initialization_vector, bytes): - raise TypeError("initialization_vector must be bytes") - + utils._check_byteslike("initialization_vector", initialization_vector) self._initialization_vector = initialization_vector initialization_vector = utils.read_only_property("_initialization_vector") @@ -149,9 +144,7 @@ class CFB(object): name = "CFB" def __init__(self, initialization_vector): - if not isinstance(initialization_vector, bytes): - raise TypeError("initialization_vector must be bytes") - + utils._check_byteslike("initialization_vector", initialization_vector) self._initialization_vector = initialization_vector initialization_vector = utils.read_only_property("_initialization_vector") @@ -164,9 +157,7 @@ class CFB8(object): name = "CFB8" def __init__(self, initialization_vector): - if not isinstance(initialization_vector, bytes): - raise TypeError("initialization_vector must be bytes") - + utils._check_byteslike("initialization_vector", initialization_vector) self._initialization_vector = initialization_vector initialization_vector = utils.read_only_property("_initialization_vector") @@ -179,9 +170,7 @@ class CTR(object): name = "CTR" def __init__(self, nonce): - if not isinstance(nonce, bytes): - raise TypeError("nonce must be bytes") - + utils._check_byteslike("nonce", nonce) self._nonce = nonce nonce = utils.read_only_property("_nonce") @@ -189,7 +178,7 @@ class CTR(object): def validate_for_algorithm(self, algorithm): _check_aes_key_length(self, algorithm) if len(self.nonce) * 8 != algorithm.block_size: - raise ValueError("Invalid nonce size ({0}) for {1}.".format( + raise ValueError("Invalid nonce size ({}) for {}.".format( len(self.nonce), self.name )) @@ -206,19 +195,17 @@ class GCM(object): # len(initialization_vector) must in [1, 2 ** 64), but it's impossible # to actually construct a bytes object that large, so we don't check # for it - if not isinstance(initialization_vector, bytes): - raise TypeError("initialization_vector must be bytes") + utils._check_byteslike("initialization_vector", initialization_vector) if len(initialization_vector) == 0: raise ValueError("initialization_vector must be at least 1 byte") self._initialization_vector = initialization_vector if tag is not None: - if not isinstance(tag, bytes): - raise TypeError("tag must be bytes or None") + utils._check_bytes("tag", tag) if min_tag_length < 4: raise ValueError("min_tag_length must be >= 4") if len(tag) < min_tag_length: raise ValueError( - "Authentication tag must be {0} bytes or longer.".format( + "Authentication tag must be {} bytes or longer.".format( min_tag_length) ) self._tag = tag diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/cmac.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/cmac.py index 77537f0..95a8d97 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/cmac.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/cmac.py @@ -9,10 +9,9 @@ from cryptography.exceptions import ( AlreadyFinalized, UnsupportedAlgorithm, _Reasons ) from cryptography.hazmat.backends.interfaces import CMACBackend -from cryptography.hazmat.primitives import ciphers, mac +from cryptography.hazmat.primitives import ciphers -@utils.register_interface(mac.MACContext) class CMAC(object): def __init__(self, algorithm, backend, ctx=None): if not isinstance(backend, CMACBackend): @@ -36,8 +35,8 @@ class CMAC(object): def update(self, data): if self._ctx is None: raise AlreadyFinalized("Context was already finalized.") - if not isinstance(data, bytes): - raise TypeError("data must be bytes.") + + utils._check_bytes("data", data) self._ctx.update(data) def finalize(self): @@ -48,8 +47,7 @@ class CMAC(object): return digest def verify(self, signature): - if not isinstance(signature, bytes): - raise TypeError("signature must be bytes.") + utils._check_bytes("signature", signature) if self._ctx is None: raise AlreadyFinalized("Context was already finalized.") diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/constant_time.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/constant_time.py index 0e987ea..35ceafe 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/constant_time.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/constant_time.py @@ -21,9 +21,9 @@ if hasattr(hmac, "compare_digest"): else: warnings.warn( "Support for your Python version is deprecated. The next version of " - "cryptography will remove support. Please upgrade to a 2.7.x " - "release that supports hmac.compare_digest as soon as possible.", - utils.DeprecatedIn23, + "cryptography will remove support. Please upgrade to a release " + "(2.7.7+) that supports hmac.compare_digest as soon as possible.", + utils.PersistentlyDeprecated2018, ) def bytes_eq(a, b): diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/hashes.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/hashes.py index 3f3aadd..9be2b60 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/hashes.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/hashes.py @@ -57,6 +57,13 @@ class HashContext(object): """ +@six.add_metaclass(abc.ABCMeta) +class ExtendableOutputFunction(object): + """ + An interface for extendable output functions. + """ + + @utils.register_interface(HashContext) class Hash(object): def __init__(self, algorithm, backend, ctx=None): @@ -82,8 +89,7 @@ class Hash(object): def update(self, data): if self._ctx is None: raise AlreadyFinalized("Context was already finalized.") - if not isinstance(data, bytes): - raise TypeError("data must be bytes.") + utils._check_byteslike("data", data) self._ctx.update(data) def copy(self): @@ -108,6 +114,20 @@ class SHA1(object): block_size = 64 +@utils.register_interface(HashAlgorithm) +class SHA512_224(object): # noqa: N801 + name = "sha512-224" + digest_size = 28 + block_size = 128 + + +@utils.register_interface(HashAlgorithm) +class SHA512_256(object): # noqa: N801 + name = "sha512-256" + digest_size = 32 + block_size = 128 + + @utils.register_interface(HashAlgorithm) class SHA224(object): name = "sha224" @@ -136,6 +156,64 @@ class SHA512(object): block_size = 128 +@utils.register_interface(HashAlgorithm) +class SHA3_224(object): # noqa: N801 + name = "sha3-224" + digest_size = 28 + + +@utils.register_interface(HashAlgorithm) +class SHA3_256(object): # noqa: N801 + name = "sha3-256" + digest_size = 32 + + +@utils.register_interface(HashAlgorithm) +class SHA3_384(object): # noqa: N801 + name = "sha3-384" + digest_size = 48 + + +@utils.register_interface(HashAlgorithm) +class SHA3_512(object): # noqa: N801 + name = "sha3-512" + digest_size = 64 + + +@utils.register_interface(HashAlgorithm) +@utils.register_interface(ExtendableOutputFunction) +class SHAKE128(object): + name = "shake128" + + def __init__(self, digest_size): + if not isinstance(digest_size, six.integer_types): + raise TypeError("digest_size must be an integer") + + if digest_size < 1: + raise ValueError("digest_size must be a positive integer") + + self._digest_size = digest_size + + digest_size = utils.read_only_property("_digest_size") + + +@utils.register_interface(HashAlgorithm) +@utils.register_interface(ExtendableOutputFunction) +class SHAKE256(object): + name = "shake256" + + def __init__(self, digest_size): + if not isinstance(digest_size, six.integer_types): + raise TypeError("digest_size must be an integer") + + if digest_size < 1: + raise ValueError("digest_size must be a positive integer") + + self._digest_size = digest_size + + digest_size = utils.read_only_property("_digest_size") + + @utils.register_interface(HashAlgorithm) class MD5(object): name = "md5" @@ -151,13 +229,9 @@ class BLAKE2b(object): block_size = 128 def __init__(self, digest_size): - if ( - digest_size > self._max_digest_size or - digest_size < self._min_digest_size - ): - raise ValueError("Digest size must be {0}-{1}".format( - self._min_digest_size, self._max_digest_size) - ) + + if digest_size != 64: + raise ValueError("Digest size must be 64") self._digest_size = digest_size @@ -172,13 +246,9 @@ class BLAKE2s(object): _min_digest_size = 1 def __init__(self, digest_size): - if ( - digest_size > self._max_digest_size or - digest_size < self._min_digest_size - ): - raise ValueError("Digest size must be {0}-{1}".format( - self._min_digest_size, self._max_digest_size) - ) + + if digest_size != 32: + raise ValueError("Digest size must be 32") self._digest_size = digest_size diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/hmac.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/hmac.py index 2e9a4e2..9eceeac 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/hmac.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/hmac.py @@ -9,10 +9,9 @@ from cryptography.exceptions import ( AlreadyFinalized, UnsupportedAlgorithm, _Reasons ) from cryptography.hazmat.backends.interfaces import HMACBackend -from cryptography.hazmat.primitives import hashes, mac +from cryptography.hazmat.primitives import hashes -@utils.register_interface(mac.MACContext) @utils.register_interface(hashes.HashContext) class HMAC(object): def __init__(self, key, algorithm, backend, ctx=None): @@ -38,8 +37,7 @@ class HMAC(object): def update(self, data): if self._ctx is None: raise AlreadyFinalized("Context was already finalized.") - if not isinstance(data, bytes): - raise TypeError("data must be bytes.") + utils._check_byteslike("data", data) self._ctx.update(data) def copy(self): @@ -60,8 +58,7 @@ class HMAC(object): return digest def verify(self, signature): - if not isinstance(signature, bytes): - raise TypeError("signature must be bytes.") + utils._check_bytes("signature", signature) if self._ctx is None: raise AlreadyFinalized("Context was already finalized.") diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/concatkdf.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/concatkdf.py index c6399e4..7cb6385 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/concatkdf.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/concatkdf.py @@ -24,17 +24,15 @@ def _common_args_checks(algorithm, length, otherinfo): max_length = algorithm.digest_size * (2 ** 32 - 1) if length > max_length: raise ValueError( - "Can not derive keys larger than {0} bits.".format( + "Can not derive keys larger than {} bits.".format( max_length )) - if not (otherinfo is None or isinstance(otherinfo, bytes)): - raise TypeError("otherinfo must be bytes.") + if otherinfo is not None: + utils._check_bytes("otherinfo", otherinfo) def _concatkdf_derive(key_material, length, auxfn, otherinfo): - if not isinstance(key_material, bytes): - raise TypeError("key_material must be bytes.") - + utils._check_byteslike("key_material", key_material) output = [b""] outlen = 0 counter = 1 @@ -96,10 +94,11 @@ class ConcatKDFHMAC(object): if self._otherinfo is None: self._otherinfo = b"" - if not (salt is None or isinstance(salt, bytes)): - raise TypeError("salt must be bytes.") if salt is None: salt = b"\x00" * algorithm.block_size + else: + utils._check_bytes("salt", salt) + self._salt = salt if not isinstance(backend, HMACBackend): diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/hkdf.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/hkdf.py index 917b4e9..01f0f28 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/hkdf.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/hkdf.py @@ -26,11 +26,10 @@ class HKDF(object): self._algorithm = algorithm - if not (salt is None or isinstance(salt, bytes)): - raise TypeError("salt must be bytes.") - if salt is None: salt = b"\x00" * self._algorithm.digest_size + else: + utils._check_bytes("salt", salt) self._salt = salt @@ -44,9 +43,7 @@ class HKDF(object): return h.finalize() def derive(self, key_material): - if not isinstance(key_material, bytes): - raise TypeError("key_material must be bytes.") - + utils._check_byteslike("key_material", key_material) return self._hkdf_expand.derive(self._extract(key_material)) def verify(self, key_material, expected_key): @@ -71,17 +68,16 @@ class HKDFExpand(object): if length > max_length: raise ValueError( - "Can not derive keys larger than {0} octets.".format( + "Can not derive keys larger than {} octets.".format( max_length )) self._length = length - if not (info is None or isinstance(info, bytes)): - raise TypeError("info must be bytes.") - if info is None: info = b"" + else: + utils._check_bytes("info", info) self._info = info @@ -102,9 +98,7 @@ class HKDFExpand(object): return b"".join(output)[:self._length] def derive(self, key_material): - if not isinstance(key_material, bytes): - raise TypeError("key_material must be bytes.") - + utils._check_byteslike("key_material", key_material) if self._used: raise AlreadyFinalized diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/kbkdf.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/kbkdf.py index 14de56e..56783a8 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/kbkdf.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/kbkdf.py @@ -73,10 +73,8 @@ class KBKDFHMAC(object): if context is None: context = b'' - if (not isinstance(label, bytes) or - not isinstance(context, bytes)): - raise TypeError('label and context must be of type bytes') - + utils._check_bytes("label", label) + utils._check_bytes("context", context) self._algorithm = algorithm self._mode = mode self._length = length @@ -102,8 +100,7 @@ class KBKDFHMAC(object): if self._used: raise AlreadyFinalized - if not isinstance(key_material, bytes): - raise TypeError('key_material must be bytes') + utils._check_byteslike("key_material", key_material) self._used = True # inverse floor division (equivalent to ceiling) diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/pbkdf2.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/pbkdf2.py index f8ce7a3..07d8ac6 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/pbkdf2.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/pbkdf2.py @@ -24,15 +24,14 @@ class PBKDF2HMAC(object): if not backend.pbkdf2_hmac_supported(algorithm): raise UnsupportedAlgorithm( - "{0} is not supported for PBKDF2 by this backend.".format( + "{} is not supported for PBKDF2 by this backend.".format( algorithm.name), _Reasons.UNSUPPORTED_HASH ) self._used = False self._algorithm = algorithm self._length = length - if not isinstance(salt, bytes): - raise TypeError("salt must be bytes.") + utils._check_bytes("salt", salt) self._salt = salt self._iterations = iterations self._backend = backend @@ -42,8 +41,7 @@ class PBKDF2HMAC(object): raise AlreadyFinalized("PBKDF2 instances can only be used once.") self._used = True - if not isinstance(key_material, bytes): - raise TypeError("key_material must be bytes.") + utils._check_byteslike("key_material", key_material) return self._backend.derive_pbkdf2_hmac( self._algorithm, self._length, diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/scrypt.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/scrypt.py index 77dcf9a..df9745e 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/scrypt.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/scrypt.py @@ -30,9 +30,7 @@ class Scrypt(object): ) self._length = length - if not isinstance(salt, bytes): - raise TypeError("salt must be bytes.") - + utils._check_bytes("salt", salt) if n < 2 or (n & (n - 1)) != 0: raise ValueError("n must be greater than 1 and be a power of 2.") @@ -54,8 +52,7 @@ class Scrypt(object): raise AlreadyFinalized("Scrypt instances can only be used once.") self._used = True - if not isinstance(key_material, bytes): - raise TypeError("key_material must be bytes.") + utils._check_byteslike("key_material", key_material) return self._backend.derive_scrypt( key_material, self._salt, self._length, self._n, self._r, self._p ) diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/x963kdf.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/x963kdf.py index 83789b3..9eb50b0 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/x963kdf.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/kdf/x963kdf.py @@ -26,9 +26,10 @@ class X963KDF(object): max_len = algorithm.digest_size * (2 ** 32 - 1) if length > max_len: raise ValueError( - "Can not derive keys larger than {0} bits.".format(max_len)) - if not (sharedinfo is None or isinstance(sharedinfo, bytes)): - raise TypeError("sharedinfo must be bytes.") + "Can not derive keys larger than {} bits.".format(max_len)) + if sharedinfo is not None: + utils._check_bytes("sharedinfo", sharedinfo) + self._algorithm = algorithm self._length = length self._sharedinfo = sharedinfo @@ -45,10 +46,7 @@ class X963KDF(object): if self._used: raise AlreadyFinalized self._used = True - - if not isinstance(key_material, bytes): - raise TypeError("key_material must be bytes.") - + utils._check_byteslike("key_material", key_material) output = [b""] outlen = 0 counter = 1 diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/mac.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/mac.py deleted file mode 100644 index 4c95190..0000000 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/mac.py +++ /dev/null @@ -1,37 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -from __future__ import absolute_import, division, print_function - -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) -class MACContext(object): - @abc.abstractmethod - def update(self, data): - """ - Processes the provided bytes. - """ - - @abc.abstractmethod - def finalize(self): - """ - Returns the message authentication code as bytes. - """ - - @abc.abstractmethod - def copy(self): - """ - Return a MACContext that is a copy of the current context. - """ - - @abc.abstractmethod - def verify(self, signature): - """ - Checks if the generated message authentication code matches the - signature. - """ diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/padding.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/padding.py index a081976..170c802 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/padding.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/padding.py @@ -40,8 +40,7 @@ def _byte_padding_update(buffer_, data, block_size): if buffer_ is None: raise AlreadyFinalized("Context was already finalized.") - if not isinstance(data, bytes): - raise TypeError("data must be bytes.") + utils._check_bytes("data", data) buffer_ += data @@ -65,8 +64,7 @@ def _byte_unpadding_update(buffer_, data, block_size): if buffer_ is None: raise AlreadyFinalized("Context was already finalized.") - if not isinstance(data, bytes): - raise TypeError("data must be bytes.") + utils._check_bytes("data", data) buffer_ += data diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/poly1305.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/poly1305.py new file mode 100644 index 0000000..d92f62a --- /dev/null +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/poly1305.py @@ -0,0 +1,55 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, UnsupportedAlgorithm, _Reasons +) + + +class Poly1305(object): + def __init__(self, key): + from cryptography.hazmat.backends.openssl.backend import backend + if not backend.poly1305_supported(): + raise UnsupportedAlgorithm( + "poly1305 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_MAC + ) + self._ctx = backend.create_poly1305_ctx(key) + + def update(self, data): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + utils._check_byteslike("data", data) + self._ctx.update(data) + + def finalize(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + mac = self._ctx.finalize() + self._ctx = None + return mac + + def verify(self, tag): + utils._check_bytes("tag", tag) + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + + ctx, self._ctx = self._ctx, None + ctx.verify(tag) + + @classmethod + def generate_tag(cls, key, data): + p = Poly1305(key) + p.update(data) + return p.finalize() + + @classmethod + def verify_tag(cls, key, data, tag): + p = Poly1305(key) + p.update(data) + p.verify(tag) diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/serialization/__init__.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/serialization/__init__.py new file mode 100644 index 0000000..f6d4ce9 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/serialization/__init__.py @@ -0,0 +1,26 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography.hazmat.primitives.serialization.base import ( + BestAvailableEncryption, Encoding, KeySerializationEncryption, + NoEncryption, ParameterFormat, PrivateFormat, PublicFormat, + load_der_parameters, load_der_private_key, load_der_public_key, + load_pem_parameters, load_pem_private_key, load_pem_public_key, +) +from cryptography.hazmat.primitives.serialization.ssh import ( + load_ssh_public_key +) + + +_PEM_DER = (Encoding.PEM, Encoding.DER) + +__all__ = [ + "load_der_parameters", "load_der_private_key", "load_der_public_key", + "load_pem_parameters", "load_pem_private_key", "load_pem_public_key", + "load_ssh_public_key", "Encoding", "PrivateFormat", "PublicFormat", + "ParameterFormat", "KeySerializationEncryption", "BestAvailableEncryption", + "NoEncryption", +] diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/serialization/base.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/serialization/base.py new file mode 100644 index 0000000..4218ea8 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/serialization/base.py @@ -0,0 +1,82 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +from enum import Enum + +import six + +from cryptography import utils + + +def load_pem_private_key(data, password, backend): + return backend.load_pem_private_key(data, password) + + +def load_pem_public_key(data, backend): + return backend.load_pem_public_key(data) + + +def load_pem_parameters(data, backend): + return backend.load_pem_parameters(data) + + +def load_der_private_key(data, password, backend): + return backend.load_der_private_key(data, password) + + +def load_der_public_key(data, backend): + return backend.load_der_public_key(data) + + +def load_der_parameters(data, backend): + return backend.load_der_parameters(data) + + +class Encoding(Enum): + PEM = "PEM" + DER = "DER" + OpenSSH = "OpenSSH" + Raw = "Raw" + X962 = "ANSI X9.62" + + +class PrivateFormat(Enum): + PKCS8 = "PKCS8" + TraditionalOpenSSL = "TraditionalOpenSSL" + Raw = "Raw" + + +class PublicFormat(Enum): + SubjectPublicKeyInfo = "X.509 subjectPublicKeyInfo with PKCS#1" + PKCS1 = "Raw PKCS#1" + OpenSSH = "OpenSSH" + Raw = "Raw" + CompressedPoint = "X9.62 Compressed Point" + UncompressedPoint = "X9.62 Uncompressed Point" + + +class ParameterFormat(Enum): + PKCS3 = "PKCS3" + + +@six.add_metaclass(abc.ABCMeta) +class KeySerializationEncryption(object): + pass + + +@utils.register_interface(KeySerializationEncryption) +class BestAvailableEncryption(object): + def __init__(self, password): + if not isinstance(password, bytes) or len(password) == 0: + raise ValueError("Password must be 1 or more bytes.") + + self.password = password + + +@utils.register_interface(KeySerializationEncryption) +class NoEncryption(object): + pass diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/serialization/pkcs12.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/serialization/pkcs12.py new file mode 100644 index 0000000..98161d5 --- /dev/null +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/serialization/pkcs12.py @@ -0,0 +1,9 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + + +def load_key_and_certificates(data, password, backend): + return backend.load_key_and_certificates_from_pkcs12(data, password) diff --git a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/serialization.py b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/serialization/ssh.py similarity index 70% rename from server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/serialization.py rename to server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/serialization/ssh.py index bd09e6e..a1d6c8c 100644 --- a/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/serialization.py +++ b/server/www/packages/packages-linux/x64/cryptography/hazmat/primitives/serialization/ssh.py @@ -4,40 +4,14 @@ from __future__ import absolute_import, division, print_function -import abc import base64 import struct -from enum import Enum import six from cryptography import utils from cryptography.exceptions import UnsupportedAlgorithm -from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa - - -def load_pem_private_key(data, password, backend): - return backend.load_pem_private_key(data, password) - - -def load_pem_public_key(data, backend): - return backend.load_pem_public_key(data) - - -def load_pem_parameters(data, backend): - return backend.load_pem_parameters(data) - - -def load_der_private_key(data, password, backend): - return backend.load_der_private_key(data, password) - - -def load_der_public_key(data, backend): - return backend.load_der_public_key(data) - - -def load_der_parameters(data, backend): - return backend.load_der_parameters(data) +from cryptography.hazmat.primitives.asymmetric import dsa, ec, ed25519, rsa def load_ssh_public_key(data, backend): @@ -57,6 +31,8 @@ def load_ssh_public_key(data, backend): b'ecdsa-sha2-nistp256', b'ecdsa-sha2-nistp384', b'ecdsa-sha2-nistp521', ]: loader = _load_ssh_ecdsa_public_key + elif key_type == b'ssh-ed25519': + loader = _load_ssh_ed25519_public_key else: raise UnsupportedAlgorithm('Key type is not supported.') @@ -125,8 +101,16 @@ def _load_ssh_ecdsa_public_key(expected_key_type, decoded_data, backend): "Compressed elliptic curve points are not supported" ) - numbers = ec.EllipticCurvePublicNumbers.from_encoded_point(curve, data) - return numbers.public_key(backend) + return ec.EllipticCurvePublicKey.from_encoded_point(curve, data) + + +def _load_ssh_ed25519_public_key(expected_key_type, decoded_data, backend): + data, rest = _ssh_read_next_string(decoded_data) + + if rest: + raise ValueError('Key body contains extra bytes.') + + return ed25519.Ed25519PublicKey.from_public_bytes(data) def _ssh_read_next_string(data): @@ -167,43 +151,3 @@ def _ssh_write_mpint(value): if six.indexbytes(data, 0) & 0x80: data = b"\x00" + data return _ssh_write_string(data) - - -class Encoding(Enum): - PEM = "PEM" - DER = "DER" - OpenSSH = "OpenSSH" - - -class PrivateFormat(Enum): - PKCS8 = "PKCS8" - TraditionalOpenSSL = "TraditionalOpenSSL" - - -class PublicFormat(Enum): - SubjectPublicKeyInfo = "X.509 subjectPublicKeyInfo with PKCS#1" - PKCS1 = "Raw PKCS#1" - OpenSSH = "OpenSSH" - - -class ParameterFormat(Enum): - PKCS3 = "PKCS3" - - -@six.add_metaclass(abc.ABCMeta) -class KeySerializationEncryption(object): - pass - - -@utils.register_interface(KeySerializationEncryption) -class BestAvailableEncryption(object): - def __init__(self, password): - if not isinstance(password, bytes) or len(password) == 0: - raise ValueError("Password must be 1 or more bytes.") - - self.password = password - - -@utils.register_interface(KeySerializationEncryption) -class NoEncryption(object): - pass diff --git a/server/www/packages/packages-linux/x64/cryptography/utils.py b/server/www/packages/packages-linux/x64/cryptography/utils.py index 3d45a77..0b36f63 100644 --- a/server/www/packages/packages-linux/x64/cryptography/utils.py +++ b/server/www/packages/packages-linux/x64/cryptography/utils.py @@ -20,14 +20,22 @@ class CryptographyDeprecationWarning(UserWarning): # Several APIs were deprecated with no specific end-of-life date because of the # ubiquity of their use. They should not be removed until we agree on when that # cycle ends. -PersistentlyDeprecated = CryptographyDeprecationWarning -DeprecatedIn21 = CryptographyDeprecationWarning -DeprecatedIn23 = CryptographyDeprecationWarning +PersistentlyDeprecated2017 = CryptographyDeprecationWarning +PersistentlyDeprecated2018 = CryptographyDeprecationWarning +DeprecatedIn25 = CryptographyDeprecationWarning +DeprecatedIn27 = CryptographyDeprecationWarning def _check_bytes(name, value): if not isinstance(value, bytes): - raise TypeError("{0} must be bytes".format(name)) + raise TypeError("{} must be bytes".format(name)) + + +def _check_byteslike(name, value): + try: + memoryview(value) + except TypeError: + raise TypeError("{} must be bytes-like".format(name)) def read_only_property(name): @@ -90,7 +98,7 @@ def verify_interface(iface, klass): for method in iface.__abstractmethods__: if not hasattr(klass, method): raise InterfaceNotImplemented( - "{0} is missing a {1!r} method".format(klass, method) + "{} is missing a {!r} method".format(klass, method) ) if isinstance(getattr(iface, method), abc.abstractproperty): # Can't properly verify these yet. @@ -99,8 +107,8 @@ def verify_interface(iface, klass): actual = signature(getattr(klass, method)) if sig != actual: raise InterfaceNotImplemented( - "{0}.{1}'s signature differs from the expected. Expected: " - "{2!r}. Received: {3!r}".format( + "{}.{}'s signature differs from the expected. Expected: " + "{!r}. Received: {!r}".format( klass, method, sig, actual ) ) @@ -152,7 +160,7 @@ def deprecated(value, module_name, message, warning_class): def cached_property(func): - cached_name = "_cached_{0}".format(func) + cached_name = "_cached_{}".format(func) sentinel = object() def inner(instance): diff --git a/server/www/packages/packages-linux/x64/cryptography/x509/__init__.py b/server/www/packages/packages-linux/x64/cryptography/x509/__init__.py index fd01945..b761e26 100644 --- a/server/www/packages/packages-linux/x64/cryptography/x509/__init__.py +++ b/server/www/packages/packages-linux/x64/cryptography/x509/__init__.py @@ -21,9 +21,9 @@ from cryptography.x509.extensions import ( DeltaCRLIndicator, DistributionPoint, DuplicateExtension, ExtendedKeyUsage, Extension, ExtensionNotFound, ExtensionType, Extensions, FreshestCRL, GeneralNames, InhibitAnyPolicy, InvalidityDate, IssuerAlternativeName, - KeyUsage, NameConstraints, NoticeReference, OCSPNoCheck, OCSPNonce, - PolicyConstraints, PolicyInformation, PrecertPoison, - PrecertificateSignedCertificateTimestamps, ReasonFlags, + IssuingDistributionPoint, KeyUsage, NameConstraints, NoticeReference, + OCSPNoCheck, OCSPNonce, PolicyConstraints, PolicyInformation, + PrecertPoison, PrecertificateSignedCertificateTimestamps, ReasonFlags, SubjectAlternativeName, SubjectKeyIdentifier, TLSFeature, TLSFeatureType, UnrecognizedExtension, UserNotice ) @@ -134,6 +134,7 @@ __all__ = [ "Extension", "ExtendedKeyUsage", "FreshestCRL", + "IssuingDistributionPoint", "TLSFeature", "TLSFeatureType", "OCSPNoCheck", diff --git a/server/www/packages/packages-linux/x64/cryptography/x509/base.py b/server/www/packages/packages-linux/x64/cryptography/x509/base.py index a3b334a..3983c9b 100644 --- a/server/www/packages/packages-linux/x64/cryptography/x509/base.py +++ b/server/www/packages/packages-linux/x64/cryptography/x509/base.py @@ -12,12 +12,14 @@ from enum import Enum import six from cryptography import utils -from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa +from cryptography.hazmat.primitives.asymmetric import ( + dsa, ec, ed25519, ed448, rsa +) from cryptography.x509.extensions import Extension, ExtensionType from cryptography.x509.name import Name -_UNIX_EPOCH = datetime.datetime(1970, 1, 1) +_EARLIEST_UTC_TIME = datetime.datetime(1950, 1, 1) def _reject_duplicate_extension(extension, extensions): @@ -474,9 +476,12 @@ class CertificateBuilder(object): Sets the requestor's public key (as found in the signing request). """ if not isinstance(key, (dsa.DSAPublicKey, rsa.RSAPublicKey, - ec.EllipticCurvePublicKey)): + ec.EllipticCurvePublicKey, + ed25519.Ed25519PublicKey, + ed448.Ed448PublicKey)): raise TypeError('Expecting one of DSAPublicKey, RSAPublicKey,' - ' or EllipticCurvePublicKey.') + ' EllipticCurvePublicKey, Ed25519PublicKey or' + ' Ed448PublicKey.') if self._public_key is not None: raise ValueError('The public key may only be set once.') return CertificateBuilder( @@ -516,9 +521,9 @@ class CertificateBuilder(object): if self._not_valid_before is not None: raise ValueError('The not valid before may only be set once.') time = _convert_to_naive_utc_time(time) - if time <= _UNIX_EPOCH: - raise ValueError('The not valid before date must be after the unix' - ' epoch (1970 January 1).') + if time < _EARLIEST_UTC_TIME: + raise ValueError('The not valid before date must be on or after' + ' 1950 January 1).') if self._not_valid_after is not None and time > self._not_valid_after: raise ValueError( 'The not valid before date must be before the not valid after ' @@ -539,9 +544,9 @@ class CertificateBuilder(object): if self._not_valid_after is not None: raise ValueError('The not valid after may only be set once.') time = _convert_to_naive_utc_time(time) - if time <= _UNIX_EPOCH: - raise ValueError('The not valid after date must be after the unix' - ' epoch (1970 January 1).') + if time < _EARLIEST_UTC_TIME: + raise ValueError('The not valid after date must be on or after' + ' 1950 January 1.') if (self._not_valid_before is not None and time < self._not_valid_before): raise ValueError( @@ -620,9 +625,9 @@ class CertificateRevocationListBuilder(object): if self._last_update is not None: raise ValueError('Last update may only be set once.') last_update = _convert_to_naive_utc_time(last_update) - if last_update <= _UNIX_EPOCH: - raise ValueError('The last update date must be after the unix' - ' epoch (1970 January 1).') + if last_update < _EARLIEST_UTC_TIME: + raise ValueError('The last update date must be on or after' + ' 1950 January 1.') if self._next_update is not None and last_update > self._next_update: raise ValueError( 'The last update date must be before the next update date.' @@ -638,9 +643,9 @@ class CertificateRevocationListBuilder(object): if self._next_update is not None: raise ValueError('Last update may only be set once.') next_update = _convert_to_naive_utc_time(next_update) - if next_update <= _UNIX_EPOCH: - raise ValueError('The last update date must be after the unix' - ' epoch (1970 January 1).') + if next_update < _EARLIEST_UTC_TIME: + raise ValueError('The last update date must be on or after' + ' 1950 January 1.') if self._last_update is not None and next_update < self._last_update: raise ValueError( 'The next update date must be after the last update date.' @@ -720,9 +725,9 @@ class RevokedCertificateBuilder(object): if self._revocation_date is not None: raise ValueError('The revocation date may only be set once.') time = _convert_to_naive_utc_time(time) - if time <= _UNIX_EPOCH: - raise ValueError('The revocation date must be after the unix' - ' epoch (1970 January 1).') + if time < _EARLIEST_UTC_TIME: + raise ValueError('The revocation date must be on or after' + ' 1950 January 1.') return RevokedCertificateBuilder( self._serial_number, time, self._extensions ) diff --git a/server/www/packages/packages-linux/x64/cryptography/x509/extensions.py b/server/www/packages/packages-linux/x64/cryptography/x509/extensions.py index 6301af5..f60075a 100644 --- a/server/www/packages/packages-linux/x64/cryptography/x509/extensions.py +++ b/server/www/packages/packages-linux/x64/cryptography/x509/extensions.py @@ -8,13 +8,15 @@ import abc import datetime import hashlib import ipaddress +import warnings from enum import Enum -from asn1crypto.keys import PublicKeyInfo - import six from cryptography import utils +from cryptography.hazmat._der import ( + BIT_STRING, DERReader, OBJECT_IDENTIFIER, SEQUENCE +) from cryptography.hazmat.primitives import constant_time, serialization from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey @@ -35,7 +37,10 @@ def _key_identifier_from_public_key(public_key): serialization.PublicFormat.PKCS1, ) elif isinstance(public_key, EllipticCurvePublicKey): - data = public_key.public_numbers().encode_point() + data = public_key.public_bytes( + serialization.Encoding.X962, + serialization.PublicFormat.UncompressedPoint + ) else: # This is a very slow way to do this. serialized = public_key.public_bytes( @@ -43,11 +48,41 @@ def _key_identifier_from_public_key(public_key): serialization.PublicFormat.SubjectPublicKeyInfo ) - data = six.binary_type(PublicKeyInfo.load(serialized)['public_key']) + reader = DERReader(serialized) + with reader.read_single_element(SEQUENCE) as public_key_info: + algorithm = public_key_info.read_element(SEQUENCE) + public_key = public_key_info.read_element(BIT_STRING) + + # Double-check the algorithm structure. + with algorithm: + algorithm.read_element(OBJECT_IDENTIFIER) + if not algorithm.is_empty(): + # Skip the optional parameters field. + algorithm.read_any_element() + + # BIT STRING contents begin with the number of padding bytes added. It + # must be zero for SubjectPublicKeyInfo structures. + if public_key.read_byte() != 0: + raise ValueError('Invalid public key encoding') + + data = public_key.data return hashlib.sha1(data).digest() +def _make_sequence_methods(field_name): + def len_method(self): + return len(getattr(self, field_name)) + + def iter_method(self): + return iter(getattr(self, field_name)) + + def getitem_method(self, idx): + return getattr(self, field_name)[idx] + + return len_method, iter_method, getitem_method + + class DuplicateExtension(Exception): def __init__(self, msg, oid): super(DuplicateExtension, self).__init__(msg) @@ -78,7 +113,7 @@ class Extensions(object): if ext.oid == oid: return ext - raise ExtensionNotFound("No {0} extension was found".format(oid), oid) + raise ExtensionNotFound("No {} extension was found".format(oid), oid) def get_extension_for_class(self, extclass): if extclass is UnrecognizedExtension: @@ -93,21 +128,14 @@ class Extensions(object): return ext raise ExtensionNotFound( - "No {0} extension was found".format(extclass), extclass.oid + "No {} extension was found".format(extclass), extclass.oid ) - def __iter__(self): - return iter(self._extensions) - - def __len__(self): - return len(self._extensions) - - def __getitem__(self, idx): - return self._extensions[idx] + __len__, __iter__, __getitem__ = _make_sequence_methods("_extensions") def __repr__(self): return ( - "".format(self._extensions) + "".format(self._extensions) ) @@ -134,7 +162,7 @@ class CRLNumber(object): return hash(self.crl_number) def __repr__(self): - return "".format(self.crl_number) + return "".format(self.crl_number) crl_number = utils.read_only_property("_crl_number") @@ -185,8 +213,21 @@ class AuthorityKeyIdentifier(object): @classmethod def from_issuer_subject_key_identifier(cls, ski): + if isinstance(ski, SubjectKeyIdentifier): + digest = ski.digest + else: + digest = ski.value.digest + warnings.warn( + "Extension objects are deprecated as arguments to " + "from_issuer_subject_key_identifier and support will be " + "removed soon. Please migrate to passing a " + "SubjectKeyIdentifier directly.", + utils.DeprecatedIn27, + stacklevel=2, + ) + return cls( - key_identifier=ski.value.digest, + key_identifier=digest, authority_cert_issuer=None, authority_cert_serial_number=None ) @@ -272,14 +313,10 @@ class AuthorityInformationAccess(object): self._descriptions = descriptions - def __iter__(self): - return iter(self._descriptions) - - def __len__(self): - return len(self._descriptions) + __len__, __iter__, __getitem__ = _make_sequence_methods("_descriptions") def __repr__(self): - return "".format(self._descriptions) + return "".format(self._descriptions) def __eq__(self, other): if not isinstance(other, AuthorityInformationAccess): @@ -290,9 +327,6 @@ class AuthorityInformationAccess(object): def __ne__(self, other): return not self == other - def __getitem__(self, idx): - return self._descriptions[idx] - def __hash__(self): return hash(tuple(self._descriptions)) @@ -419,14 +453,12 @@ class CRLDistributionPoints(object): self._distribution_points = distribution_points - def __iter__(self): - return iter(self._distribution_points) - - def __len__(self): - return len(self._distribution_points) + __len__, __iter__, __getitem__ = _make_sequence_methods( + "_distribution_points" + ) def __repr__(self): - return "".format(self._distribution_points) + return "".format(self._distribution_points) def __eq__(self, other): if not isinstance(other, CRLDistributionPoints): @@ -437,9 +469,6 @@ class CRLDistributionPoints(object): def __ne__(self, other): return not self == other - def __getitem__(self, idx): - return self._distribution_points[idx] - def __hash__(self): return hash(tuple(self._distribution_points)) @@ -460,14 +489,12 @@ class FreshestCRL(object): self._distribution_points = distribution_points - def __iter__(self): - return iter(self._distribution_points) - - def __len__(self): - return len(self._distribution_points) + __len__, __iter__, __getitem__ = _make_sequence_methods( + "_distribution_points" + ) def __repr__(self): - return "".format(self._distribution_points) + return "".format(self._distribution_points) def __eq__(self, other): if not isinstance(other, FreshestCRL): @@ -478,9 +505,6 @@ class FreshestCRL(object): def __ne__(self, other): return not self == other - def __getitem__(self, idx): - return self._distribution_points[idx] - def __hash__(self): return hash(tuple(self._distribution_points)) @@ -541,8 +565,8 @@ class DistributionPoint(object): def __repr__(self): return ( "".format(self) + "tive_name}, reasons={0.reasons}, crl_issuer={0.crl_issuer})>" + .format(self) ) def __eq__(self, other): @@ -666,14 +690,10 @@ class CertificatePolicies(object): self._policies = policies - def __iter__(self): - return iter(self._policies) - - def __len__(self): - return len(self._policies) + __len__, __iter__, __getitem__ = _make_sequence_methods("_policies") def __repr__(self): - return "".format(self._policies) + return "".format(self._policies) def __eq__(self, other): if not isinstance(other, CertificatePolicies): @@ -684,9 +704,6 @@ class CertificatePolicies(object): def __ne__(self, other): return not self == other - def __getitem__(self, idx): - return self._policies[idx] - def __hash__(self): return hash(tuple(self._policies)) @@ -827,14 +844,10 @@ class ExtendedKeyUsage(object): self._usages = usages - def __iter__(self): - return iter(self._usages) - - def __len__(self): - return len(self._usages) + __len__, __iter__, __getitem__ = _make_sequence_methods("_usages") def __repr__(self): - return "".format(self._usages) + return "".format(self._usages) def __eq__(self, other): if not isinstance(other, ExtendedKeyUsage): @@ -853,11 +866,41 @@ class ExtendedKeyUsage(object): class OCSPNoCheck(object): oid = ExtensionOID.OCSP_NO_CHECK + def __eq__(self, other): + if not isinstance(other, OCSPNoCheck): + return NotImplemented + + return True + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(OCSPNoCheck) + + def __repr__(self): + return "" + @utils.register_interface(ExtensionType) class PrecertPoison(object): oid = ExtensionOID.PRECERT_POISON + def __eq__(self, other): + if not isinstance(other, PrecertPoison): + return NotImplemented + + return True + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(PrecertPoison) + + def __repr__(self): + return "" + @utils.register_interface(ExtensionType) class TLSFeature(object): @@ -876,11 +919,7 @@ class TLSFeature(object): self._features = features - def __iter__(self): - return iter(self._features) - - def __len__(self): - return len(self._features) + __len__, __iter__, __getitem__ = _make_sequence_methods("_features") def __repr__(self): return "".format(self) @@ -891,9 +930,6 @@ class TLSFeature(object): return self._features == other._features - def __getitem__(self, idx): - return self._features[idx] - def __ne__(self, other): return not self == other @@ -1172,12 +1208,7 @@ class GeneralNames(object): ) self._general_names = general_names - - def __iter__(self): - return iter(self._general_names) - - def __len__(self): - return len(self._general_names) + __len__, __iter__, __getitem__ = _make_sequence_methods("_general_names") def get_values_for_type(self, type): # Return the value of each GeneralName, except for OtherName instances @@ -1189,7 +1220,7 @@ class GeneralNames(object): return list(objs) def __repr__(self): - return "".format(self._general_names) + return "".format(self._general_names) def __eq__(self, other): if not isinstance(other, GeneralNames): @@ -1200,9 +1231,6 @@ class GeneralNames(object): def __ne__(self, other): return not self == other - def __getitem__(self, idx): - return self._general_names[idx] - def __hash__(self): return hash(tuple(self._general_names)) @@ -1214,17 +1242,13 @@ class SubjectAlternativeName(object): def __init__(self, general_names): self._general_names = GeneralNames(general_names) - def __iter__(self): - return iter(self._general_names) - - def __len__(self): - return len(self._general_names) + __len__, __iter__, __getitem__ = _make_sequence_methods("_general_names") def get_values_for_type(self, type): return self._general_names.get_values_for_type(type) def __repr__(self): - return "".format(self._general_names) + return "".format(self._general_names) def __eq__(self, other): if not isinstance(other, SubjectAlternativeName): @@ -1232,9 +1256,6 @@ class SubjectAlternativeName(object): return self._general_names == other._general_names - def __getitem__(self, idx): - return self._general_names[idx] - def __ne__(self, other): return not self == other @@ -1249,17 +1270,13 @@ class IssuerAlternativeName(object): def __init__(self, general_names): self._general_names = GeneralNames(general_names) - def __iter__(self): - return iter(self._general_names) - - def __len__(self): - return len(self._general_names) + __len__, __iter__, __getitem__ = _make_sequence_methods("_general_names") def get_values_for_type(self, type): return self._general_names.get_values_for_type(type) def __repr__(self): - return "".format(self._general_names) + return "".format(self._general_names) def __eq__(self, other): if not isinstance(other, IssuerAlternativeName): @@ -1270,9 +1287,6 @@ class IssuerAlternativeName(object): def __ne__(self, other): return not self == other - def __getitem__(self, idx): - return self._general_names[idx] - def __hash__(self): return hash(self._general_names) @@ -1284,17 +1298,13 @@ class CertificateIssuer(object): def __init__(self, general_names): self._general_names = GeneralNames(general_names) - def __iter__(self): - return iter(self._general_names) - - def __len__(self): - return len(self._general_names) + __len__, __iter__, __getitem__ = _make_sequence_methods("_general_names") def get_values_for_type(self, type): return self._general_names.get_values_for_type(type) def __repr__(self): - return "".format(self._general_names) + return "".format(self._general_names) def __eq__(self, other): if not isinstance(other, CertificateIssuer): @@ -1305,9 +1315,6 @@ class CertificateIssuer(object): def __ne__(self, other): return not self == other - def __getitem__(self, idx): - return self._general_names[idx] - def __hash__(self): return hash(self._general_names) @@ -1323,7 +1330,7 @@ class CRLReason(object): self._reason = reason def __repr__(self): - return "".format(self._reason) + return "".format(self._reason) def __eq__(self, other): if not isinstance(other, CRLReason): @@ -1351,7 +1358,7 @@ class InvalidityDate(object): self._invalidity_date = invalidity_date def __repr__(self): - return "".format( + return "".format( self._invalidity_date ) @@ -1386,18 +1393,13 @@ class PrecertificateSignedCertificateTimestamps(object): ) self._signed_certificate_timestamps = signed_certificate_timestamps - def __iter__(self): - return iter(self._signed_certificate_timestamps) - - def __len__(self): - return len(self._signed_certificate_timestamps) - - def __getitem__(self, idx): - return self._signed_certificate_timestamps[idx] + __len__, __iter__, __getitem__ = _make_sequence_methods( + "_signed_certificate_timestamps" + ) def __repr__(self): return ( - "".format( + "".format( list(self) ) ) @@ -1446,6 +1448,136 @@ class OCSPNonce(object): nonce = utils.read_only_property("_nonce") +@utils.register_interface(ExtensionType) +class IssuingDistributionPoint(object): + oid = ExtensionOID.ISSUING_DISTRIBUTION_POINT + + def __init__(self, full_name, relative_name, only_contains_user_certs, + only_contains_ca_certs, only_some_reasons, indirect_crl, + only_contains_attribute_certs): + if ( + only_some_reasons and ( + not isinstance(only_some_reasons, frozenset) or not all( + isinstance(x, ReasonFlags) for x in only_some_reasons + ) + ) + ): + raise TypeError( + "only_some_reasons must be None or frozenset of ReasonFlags" + ) + + if only_some_reasons and ( + ReasonFlags.unspecified in only_some_reasons or + ReasonFlags.remove_from_crl in only_some_reasons + ): + raise ValueError( + "unspecified and remove_from_crl are not valid reasons in an " + "IssuingDistributionPoint" + ) + + if not ( + isinstance(only_contains_user_certs, bool) and + isinstance(only_contains_ca_certs, bool) and + isinstance(indirect_crl, bool) and + isinstance(only_contains_attribute_certs, bool) + ): + raise TypeError( + "only_contains_user_certs, only_contains_ca_certs, " + "indirect_crl and only_contains_attribute_certs " + "must all be boolean." + ) + + crl_constraints = [ + only_contains_user_certs, only_contains_ca_certs, + indirect_crl, only_contains_attribute_certs + ] + + if len([x for x in crl_constraints if x]) > 1: + raise ValueError( + "Only one of the following can be set to True: " + "only_contains_user_certs, only_contains_ca_certs, " + "indirect_crl, only_contains_attribute_certs" + ) + + if ( + not any([ + only_contains_user_certs, only_contains_ca_certs, + indirect_crl, only_contains_attribute_certs, full_name, + relative_name, only_some_reasons + ]) + ): + raise ValueError( + "Cannot create empty extension: " + "if only_contains_user_certs, only_contains_ca_certs, " + "indirect_crl, and only_contains_attribute_certs are all False" + ", then either full_name, relative_name, or only_some_reasons " + "must have a value." + ) + + self._only_contains_user_certs = only_contains_user_certs + self._only_contains_ca_certs = only_contains_ca_certs + self._indirect_crl = indirect_crl + self._only_contains_attribute_certs = only_contains_attribute_certs + self._only_some_reasons = only_some_reasons + self._full_name = full_name + self._relative_name = relative_name + + def __repr__(self): + return ( + "".format(self) + ) + + def __eq__(self, other): + if not isinstance(other, IssuingDistributionPoint): + return NotImplemented + + return ( + self.full_name == other.full_name and + self.relative_name == other.relative_name and + self.only_contains_user_certs == other.only_contains_user_certs and + self.only_contains_ca_certs == other.only_contains_ca_certs and + self.only_some_reasons == other.only_some_reasons and + self.indirect_crl == other.indirect_crl and + self.only_contains_attribute_certs == + other.only_contains_attribute_certs + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(( + self.full_name, + self.relative_name, + self.only_contains_user_certs, + self.only_contains_ca_certs, + self.only_some_reasons, + self.indirect_crl, + self.only_contains_attribute_certs, + )) + + full_name = utils.read_only_property("_full_name") + relative_name = utils.read_only_property("_relative_name") + only_contains_user_certs = utils.read_only_property( + "_only_contains_user_certs" + ) + only_contains_ca_certs = utils.read_only_property( + "_only_contains_ca_certs" + ) + only_some_reasons = utils.read_only_property("_only_some_reasons") + indirect_crl = utils.read_only_property("_indirect_crl") + only_contains_attribute_certs = utils.read_only_property( + "_only_contains_attribute_certs" + ) + + @utils.register_interface(ExtensionType) class UnrecognizedExtension(object): def __init__(self, oid, value): diff --git a/server/www/packages/packages-linux/x64/cryptography/x509/general_name.py b/server/www/packages/packages-linux/x64/cryptography/x509/general_name.py index 26f389a..1233841 100644 --- a/server/www/packages/packages-linux/x64/cryptography/x509/general_name.py +++ b/server/www/packages/packages-linux/x64/cryptography/x509/general_name.py @@ -9,8 +9,6 @@ import ipaddress import warnings from email.utils import parseaddr -import idna - import six from six.moves import urllib_parse @@ -32,6 +30,20 @@ _GENERAL_NAMES = { } +def _lazy_import_idna(): + # Import idna lazily becase it allocates a decent amount of memory, and + # we're only using it in deprecated paths. + try: + import idna + return idna + except ImportError: + raise ImportError( + "idna is not installed, but a deprecated feature that requires it" + " was used. See: https://cryptography.io/en/latest/faq/#importe" + "rror-idna-is-not-installed" + ) + + class UnsupportedGeneralNameType(Exception): def __init__(self, msg, type): super(UnsupportedGeneralNameType, self).__init__(msg) @@ -60,7 +72,7 @@ class RFC822Name(object): "This means unicode characters should be encoded via " "idna. Support for passing unicode strings (aka U-label) " "will be removed in a future version.", - utils.DeprecatedIn21, + utils.PersistentlyDeprecated2017, stacklevel=2, ) else: @@ -83,6 +95,7 @@ class RFC822Name(object): return instance def _idna_encode(self, value): + idna = _lazy_import_idna() _, address = parseaddr(value) parts = address.split(u"@") return parts[0] + "@" + idna.encode(parts[1]).decode("ascii") @@ -104,6 +117,7 @@ class RFC822Name(object): def _idna_encode(value): + idna = _lazy_import_idna() # Retain prefixes '*.' for common/alt names and '.' for name constraints for prefix in ['*.', '.']: if value.startswith(prefix): @@ -125,7 +139,7 @@ class DNSName(object): "This means unicode characters should be encoded via " "idna. Support for passing unicode strings (aka U-label) " "will be removed in a future version.", - utils.DeprecatedIn21, + utils.PersistentlyDeprecated2017, stacklevel=2, ) else: @@ -170,7 +184,7 @@ class UniformResourceIdentifier(object): "This means unicode characters should be encoded via " "idna. Support for passing unicode strings (aka U-label) " " will be removed in a future version.", - utils.DeprecatedIn21, + utils.PersistentlyDeprecated2017, stacklevel=2, ) else: @@ -187,11 +201,12 @@ class UniformResourceIdentifier(object): return instance def _idna_encode(self, value): + idna = _lazy_import_idna() parsed = urllib_parse.urlparse(value) if parsed.port: netloc = ( idna.encode(parsed.hostname) + - ":{0}".format(parsed.port).encode("ascii") + ":{}".format(parsed.port).encode("ascii") ).decode("ascii") else: netloc = idna.encode(parsed.hostname).decode("ascii") @@ -235,7 +250,7 @@ class DirectoryName(object): value = utils.read_only_property("_value") def __repr__(self): - return "".format(self.value) + return "".format(self.value) def __eq__(self, other): if not isinstance(other, DirectoryName): @@ -261,7 +276,7 @@ class RegisteredID(object): value = utils.read_only_property("_value") def __repr__(self): - return "".format(self.value) + return "".format(self.value) def __eq__(self, other): if not isinstance(other, RegisteredID): @@ -299,7 +314,7 @@ class IPAddress(object): value = utils.read_only_property("_value") def __repr__(self): - return "".format(self.value) + return "".format(self.value) def __eq__(self, other): if not isinstance(other, IPAddress): @@ -329,7 +344,7 @@ class OtherName(object): value = utils.read_only_property("_value") def __repr__(self): - return "".format( + return "".format( self.type_id, self.value) def __eq__(self, other): diff --git a/server/www/packages/packages-linux/x64/cryptography/x509/name.py b/server/www/packages/packages-linux/x64/cryptography/x509/name.py index 5548eda..ca2a175 100644 --- a/server/www/packages/packages-linux/x64/cryptography/x509/name.py +++ b/server/www/packages/packages-linux/x64/cryptography/x509/name.py @@ -36,6 +36,41 @@ _NAMEOID_DEFAULT_TYPE = { NameOID.DOMAIN_COMPONENT: _ASN1Type.IA5String, } +#: Short attribute names from RFC 4514: +#: https://tools.ietf.org/html/rfc4514#page-7 +_NAMEOID_TO_NAME = { + NameOID.COMMON_NAME: 'CN', + NameOID.LOCALITY_NAME: 'L', + NameOID.STATE_OR_PROVINCE_NAME: 'ST', + NameOID.ORGANIZATION_NAME: 'O', + NameOID.ORGANIZATIONAL_UNIT_NAME: 'OU', + NameOID.COUNTRY_NAME: 'C', + NameOID.STREET_ADDRESS: 'STREET', + NameOID.DOMAIN_COMPONENT: 'DC', + NameOID.USER_ID: 'UID', +} + + +def _escape_dn_value(val): + """Escape special characters in RFC4514 Distinguished Name value.""" + + # See https://tools.ietf.org/html/rfc4514#section-2.4 + val = val.replace('\\', '\\\\') + val = val.replace('"', '\\"') + val = val.replace('+', '\\+') + val = val.replace(',', '\\,') + val = val.replace(';', '\\;') + val = val.replace('<', '\\<') + val = val.replace('>', '\\>') + val = val.replace('\0', '\\00') + + if val[0] in ('#', ' '): + val = '\\' + val + if val[-1] == ' ': + val = val[:-1] + '\\ ' + + return val + class NameAttribute(object): def __init__(self, oid, value, _type=_SENTINEL): @@ -80,6 +115,16 @@ class NameAttribute(object): oid = utils.read_only_property("_oid") value = utils.read_only_property("_value") + def rfc4514_string(self): + """ + Format as RFC4514 Distinguished Name string. + + Use short attribute name if available, otherwise fall back to OID + dotted string. + """ + key = _NAMEOID_TO_NAME.get(self.oid, self.oid.dotted_string) + return '%s=%s' % (key, _escape_dn_value(self.value)) + def __eq__(self, other): if not isinstance(other, NameAttribute): return NotImplemented @@ -117,6 +162,15 @@ class RelativeDistinguishedName(object): def get_attributes_for_oid(self, oid): return [i for i in self if i.oid == oid] + def rfc4514_string(self): + """ + Format as RFC4514 Distinguished Name string. + + Within each RDN, attributes are joined by '+', although that is rarely + used in certificates. + """ + return '+'.join(attr.rfc4514_string() for attr in self._attributes) + def __eq__(self, other): if not isinstance(other, RelativeDistinguishedName): return NotImplemented @@ -136,7 +190,7 @@ class RelativeDistinguishedName(object): return len(self._attributes) def __repr__(self): - return "".format(list(self)) + return "".format(self.rfc4514_string()) class Name(object): @@ -154,6 +208,18 @@ class Name(object): " or a list RelativeDistinguishedName" ) + def rfc4514_string(self): + """ + Format as RFC4514 Distinguished Name string. + For example 'CN=foobar.com,O=Foo Corp,C=US' + + An X.509 name is a two-level structure: a list of sets of attributes. + Each list element is separated by ',' and within each list element, set + elements are separated by '+'. The latter is almost never used in + real world certificates. + """ + return ','.join(attr.rfc4514_string() for attr in self._attributes) + def get_attributes_for_oid(self, oid): return [i for i in self if i.oid == oid] @@ -187,4 +253,7 @@ class Name(object): return sum(len(rdn) for rdn in self._attributes) def __repr__(self): - return "".format(list(self)) + if six.PY2: + return "".format(self.rfc4514_string().encode('utf8')) + else: + return "".format(self.rfc4514_string()) diff --git a/server/www/packages/packages-linux/x64/cryptography/x509/ocsp.py b/server/www/packages/packages-linux/x64/cryptography/x509/ocsp.py index 2b0b1dc..b15063d 100644 --- a/server/www/packages/packages-linux/x64/cryptography/x509/ocsp.py +++ b/server/www/packages/packages-linux/x64/cryptography/x509/ocsp.py @@ -12,8 +12,9 @@ import six from cryptography import x509 from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.asymmetric import ed25519, ed448 from cryptography.x509.base import ( - _UNIX_EPOCH, _convert_to_naive_utc_time, _reject_duplicate_extension + _EARLIEST_UTC_TIME, _convert_to_naive_utc_time, _reject_duplicate_extension ) @@ -154,9 +155,9 @@ class _SingleResponse(object): raise TypeError("revocation_time must be a datetime object") revocation_time = _convert_to_naive_utc_time(revocation_time) - if revocation_time <= _UNIX_EPOCH: - raise ValueError('The revocation_time must be after the unix' - ' epoch (1970 January 1).') + if revocation_time < _EARLIEST_UTC_TIME: + raise ValueError('The revocation_time must be on or after' + ' 1950 January 1.') if ( revocation_reason is not None and @@ -241,7 +242,13 @@ class OCSPResponseBuilder(object): if self._responder_id is None: raise ValueError("You must add a responder_id before signing") - if not isinstance(algorithm, hashes.HashAlgorithm): + if isinstance(private_key, + (ed25519.Ed25519PrivateKey, ed448.Ed448PrivateKey)): + if algorithm is not None: + raise ValueError( + "algorithm must be None when signing via ed25519 or ed448" + ) + elif not isinstance(algorithm, hashes.HashAlgorithm): raise TypeError("Algorithm must be a registered hash algorithm.") return backend.create_ocsp_response( @@ -314,6 +321,12 @@ class OCSPResponse(object): The ObjectIdentifier of the signature algorithm """ + @abc.abstractproperty + def signature_hash_algorithm(self): + """ + Returns a HashAlgorithm corresponding to the type of the digest signed + """ + @abc.abstractproperty def signature(self): """ diff --git a/server/www/packages/packages-linux/x64/cryptography/x509/oid.py b/server/www/packages/packages-linux/x64/cryptography/x509/oid.py index ec19007..c1e5dc5 100644 --- a/server/www/packages/packages-linux/x64/cryptography/x509/oid.py +++ b/server/www/packages/packages-linux/x64/cryptography/x509/oid.py @@ -96,6 +96,8 @@ class SignatureAlgorithmOID(object): DSA_WITH_SHA1 = ObjectIdentifier("1.2.840.10040.4.3") DSA_WITH_SHA224 = ObjectIdentifier("2.16.840.1.101.3.4.3.1") DSA_WITH_SHA256 = ObjectIdentifier("2.16.840.1.101.3.4.3.2") + ED25519 = ObjectIdentifier("1.3.101.112") + ED448 = ObjectIdentifier("1.3.101.113") _SIG_OIDS_TO_HASH = { @@ -113,7 +115,9 @@ _SIG_OIDS_TO_HASH = { SignatureAlgorithmOID.ECDSA_WITH_SHA512: hashes.SHA512(), SignatureAlgorithmOID.DSA_WITH_SHA1: hashes.SHA1(), SignatureAlgorithmOID.DSA_WITH_SHA224: hashes.SHA224(), - SignatureAlgorithmOID.DSA_WITH_SHA256: hashes.SHA256() + SignatureAlgorithmOID.DSA_WITH_SHA256: hashes.SHA256(), + SignatureAlgorithmOID.ED25519: None, + SignatureAlgorithmOID.ED448: None, } @@ -181,6 +185,8 @@ _OID_NAMES = { SignatureAlgorithmOID.DSA_WITH_SHA1: "dsa-with-sha1", SignatureAlgorithmOID.DSA_WITH_SHA224: "dsa-with-sha224", SignatureAlgorithmOID.DSA_WITH_SHA256: "dsa-with-sha256", + SignatureAlgorithmOID.ED25519: "ed25519", + SignatureAlgorithmOID.ED448: "ed448", ExtendedKeyUsageOID.SERVER_AUTH: "serverAuth", ExtendedKeyUsageOID.CLIENT_AUTH: "clientAuth", ExtendedKeyUsageOID.CODE_SIGNING: "codeSigning", @@ -196,6 +202,7 @@ _OID_NAMES = { ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS: ( "signedCertificateTimestampList" ), + ExtensionOID.PRECERT_POISON: "ctPoison", CRLEntryExtensionOID.CRL_REASON: "cRLReason", CRLEntryExtensionOID.INVALIDITY_DATE: "invalidityDate", CRLEntryExtensionOID.CERTIFICATE_ISSUER: "certificateIssuer", diff --git a/server/www/packages/packages-linux/x64/ldap3/__init__.py b/server/www/packages/packages-linux/x64/ldap3/__init__.py index f6f1666..c352994 100644 --- a/server/www/packages/packages-linux/x64/ldap3/__init__.py +++ b/server/www/packages/packages-linux/x64/ldap3/__init__.py @@ -89,6 +89,7 @@ SCHEMA = 'SCHEMA' ALL = 'ALL' OFFLINE_EDIR_8_8_8 = 'EDIR_8_8_8' +OFFLINE_EDIR_9_1_4 = 'EDIR_9_1_4' OFFLINE_AD_2012_R2 = 'AD_2012_R2' OFFLINE_SLAPD_2_4 = 'SLAPD_2_4' OFFLINE_DS389_1_3_3 = 'DS389_1_3_3' diff --git a/server/www/packages/packages-linux/x64/ldap3/abstract/__init__.py b/server/www/packages/packages-linux/x64/ldap3/abstract/__init__.py index c40f838..22ea8c9 100644 --- a/server/www/packages/packages-linux/x64/ldap3/abstract/__init__.py +++ b/server/www/packages/packages-linux/x64/ldap3/abstract/__init__.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # diff --git a/server/www/packages/packages-linux/x64/ldap3/abstract/attrDef.py b/server/www/packages/packages-linux/x64/ldap3/abstract/attrDef.py index d954e25..21d8f3e 100644 --- a/server/www/packages/packages-linux/x64/ldap3/abstract/attrDef.py +++ b/server/www/packages/packages-linux/x64/ldap3/abstract/attrDef.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. diff --git a/server/www/packages/packages-linux/x64/ldap3/abstract/attribute.py b/server/www/packages/packages-linux/x64/ldap3/abstract/attribute.py index 24f682c..434ee12 100644 --- a/server/www/packages/packages-linux/x64/ldap3/abstract/attribute.py +++ b/server/www/packages/packages-linux/x64/ldap3/abstract/attribute.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # @@ -174,7 +174,7 @@ class WritableAttribute(Attribute): log(PROTOCOL, 'adding %r to <%s> attribute in <%s> entry', values, self.key, self.entry.entry_dn) # new value for attribute to commit with a MODIFY_ADD if self.entry._state._initial_status == STATUS_VIRTUAL: - error_message = 'cannot add an attribute value in a new entry' + error_message = 'cannot perform a modify operation in a new entry' if log_enabled(ERROR): log(ERROR, '%s for <%s>', error_message, self) raise LDAPCursorError(error_message) diff --git a/server/www/packages/packages-linux/x64/ldap3/abstract/cursor.py b/server/www/packages/packages-linux/x64/ldap3/abstract/cursor.py index 275a384..ad774a1 100644 --- a/server/www/packages/packages-linux/x64/ldap3/abstract/cursor.py +++ b/server/www/packages/packages-linux/x64/ldap3/abstract/cursor.py @@ -1,904 +1,906 @@ -""" -""" - -# Created on 2014.01.06 -# -# Author: Giovanni Cannata -# -# Copyright 2014 - 2018 Giovanni Cannata -# -# This file is part of ldap3. -# -# ldap3 is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ldap3 is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with ldap3 in the COPYING and COPYING.LESSER files. -# If not, see . -from collections import namedtuple -from copy import deepcopy -from datetime import datetime -from os import linesep -from time import sleep - -from . import STATUS_VIRTUAL, STATUS_READ, STATUS_WRITABLE -from .. import SUBTREE, LEVEL, DEREF_ALWAYS, DEREF_NEVER, BASE, SEQUENCE_TYPES, STRING_TYPES, get_config_parameter -from ..abstract import STATUS_PENDING_CHANGES -from .attribute import Attribute, OperationalAttribute, WritableAttribute -from .attrDef import AttrDef -from .objectDef import ObjectDef -from .entry import Entry, WritableEntry -from ..core.exceptions import LDAPCursorError, LDAPObjectDereferenceError -from ..core.results import RESULT_SUCCESS -from ..utils.ciDict import CaseInsensitiveWithAliasDict -from ..utils.dn import safe_dn, safe_rdn -from ..utils.conv import to_raw -from ..utils.config import get_config_parameter -from ..utils.log import log, log_enabled, ERROR, BASIC, PROTOCOL, EXTENDED -from ..protocol.oid import ATTRIBUTE_DIRECTORY_OPERATION, ATTRIBUTE_DISTRIBUTED_OPERATION, ATTRIBUTE_DSA_OPERATION, CLASS_AUXILIARY - -Operation = namedtuple('Operation', ('request', 'result', 'response')) - - -def _ret_search_value(value): - return value[0] + '=' + value[1:] if value[0] in '<>~' and value[1] != '=' else value - - -def _create_query_dict(query_text): - """ - Create a dictionary with query key:value definitions - query_text is a comma delimited key:value sequence - """ - query_dict = dict() - if query_text: - for arg_value_str in query_text.split(','): - if ':' in arg_value_str: - arg_value_list = arg_value_str.split(':') - query_dict[arg_value_list[0].strip()] = arg_value_list[1].strip() - - return query_dict - - -class Cursor(object): - # entry_class and attribute_class define the type of entry and attribute used by the cursor - # entry_initial_status defines the initial status of a entry - # entry_class = Entry, must be defined in subclasses - # attribute_class = Attribute, must be defined in subclasses - # entry_initial_status = STATUS, must be defined in subclasses - - def __init__(self, connection, object_def, get_operational_attributes=False, attributes=None, controls=None, auxiliary_class=None): - conf_attributes_excluded_from_object_def = [v.lower() for v in get_config_parameter('ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF')] - self.connection = connection - self.get_operational_attributes = get_operational_attributes - if connection._deferred_bind or connection._deferred_open: # probably a lazy connection, tries to bind - connection._fire_deferred() - - if isinstance(object_def, (STRING_TYPES, SEQUENCE_TYPES)): - object_def = ObjectDef(object_def, connection.server.schema, auxiliary_class=auxiliary_class) - self.definition = object_def - if attributes: # checks if requested attributes are defined in ObjectDef - not_defined_attributes = [] - if isinstance(attributes, STRING_TYPES): - attributes = [attributes] - - for attribute in attributes: - if attribute not in self.definition._attributes and attribute.lower() not in conf_attributes_excluded_from_object_def: - not_defined_attributes.append(attribute) - - if not_defined_attributes: - error_message = 'Attributes \'%s\' non in definition' % ', '.join(not_defined_attributes) - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - - self.attributes = set(attributes) if attributes else set([attr.name for attr in self.definition]) - self.controls = controls - self.execution_time = None - self.entries = [] - self.schema = self.connection.server.schema - self._do_not_reset = False # used for refreshing entry in entry_refresh() without removing all entries from the Cursor - self._operation_history = list() # a list storing all the requests, results and responses for the last cursor operation - - def __repr__(self): - r = 'CURSOR : ' + self.__class__.__name__ + linesep - r += 'CONN : ' + str(self.connection) + linesep - r += 'DEFS : ' + ', '.join(self.definition._object_class) - if self.definition._auxiliary_class: - r += ' [AUX: ' + ', '.join(self.definition._auxiliary_class) + ']' - r += linesep - # for attr_def in sorted(self.definition): - # r += (attr_def.key if attr_def.key == attr_def.name else (attr_def.key + ' <' + attr_def.name + '>')) + ', ' - # if r[-2] == ',': - # r = r[:-2] - # r += ']' + linesep - if hasattr(self, 'attributes'): - r += 'ATTRS : ' + repr(sorted(self.attributes)) + (' [OPERATIONAL]' if self.get_operational_attributes else '') + linesep - if isinstance(self, Reader): - if hasattr(self, 'base'): - r += 'BASE : ' + repr(self.base) + (' [SUB]' if self.sub_tree else ' [LEVEL]') + linesep - if hasattr(self, '_query') and self._query: - r += 'QUERY : ' + repr(self._query) + ('' if '(' in self._query else (' [AND]' if self.components_in_and else ' [OR]')) + linesep - if hasattr(self, 'validated_query') and self.validated_query: - r += 'PARSED : ' + repr(self.validated_query) + ('' if '(' in self._query else (' [AND]' if self.components_in_and else ' [OR]')) + linesep - if hasattr(self, 'query_filter') and self.query_filter: - r += 'FILTER : ' + repr(self.query_filter) + linesep - - if hasattr(self, 'execution_time') and self.execution_time: - r += 'ENTRIES: ' + str(len(self.entries)) - r += ' [executed at: ' + str(self.execution_time.isoformat()) + ']' + linesep - - if self.failed: - r += 'LAST OPERATION FAILED [' + str(len(self.errors)) + ' failure' + ('s' if len(self.errors) > 1 else '') + ' at operation' + ('s ' if len(self.errors) > 1 else ' ') + ', '.join([str(i) for i, error in enumerate(self.operations) if error.result['result'] != RESULT_SUCCESS]) + ']' - - return r - - def __str__(self): - return self.__repr__() - - def __iter__(self): - return self.entries.__iter__() - - def __getitem__(self, item): - """Return indexed item, if index is not found then try to sequentially search in DN of entries. - If only one entry is found return it else raise a KeyError exception. The exception message - includes the number of entries that matches, if less than 10 entries match then show the DNs - in the exception message. - """ - try: - return self.entries[item] - except TypeError: - pass - - if isinstance(item, STRING_TYPES): - found = self.match_dn(item) - - if len(found) == 1: - return found[0] - elif len(found) > 1: - error_message = 'Multiple entries found: %d entries match the text in dn' % len(found) + ('' if len(found) > 10 else (' [' + '; '.join([e.entry_dn for e in found]) + ']')) - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise KeyError(error_message) - - error_message = 'no entry found' - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise KeyError(error_message) - - def __len__(self): - return len(self.entries) - - if str is not bytes: # Python 3 - def __bool__(self): # needed to make the cursor appears as existing in "if cursor:" even if there are no entries - return True - else: # Python 2 - def __nonzero__(self): - return True - - def _get_attributes(self, response, attr_defs, entry): - """Assign the result of the LDAP query to the Entry object dictionary. - - If the optional 'post_query' callable is present in the AttrDef it is called with each value of the attribute and the callable result is stored in the attribute. - - Returns the default value for missing attributes. - If the 'dereference_dn' in AttrDef is a ObjectDef then the attribute values are treated as distinguished name and the relevant entry is retrieved and stored in the attribute value. - - """ - conf_operational_attribute_prefix = get_config_parameter('ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX') - conf_attributes_excluded_from_object_def = [v.lower() for v in get_config_parameter('ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF')] - attributes = CaseInsensitiveWithAliasDict() - used_attribute_names = set() - for attr in attr_defs: - attr_def = attr_defs[attr] - attribute_name = None - for attr_name in response['attributes']: - if attr_def.name.lower() == attr_name.lower(): - attribute_name = attr_name - break - - if attribute_name or attr_def.default is not NotImplemented: # attribute value found in result or default value present - NotImplemented allows use of None as default - attribute = self.attribute_class(attr_def, entry, self) - attribute.response = response - attribute.raw_values = response['raw_attributes'][attribute_name] if attribute_name else None - if attr_def.post_query and attr_def.name in response['attributes'] and response['raw_attributes'] != list(): - attribute.values = attr_def.post_query(attr_def.key, response['attributes'][attribute_name]) - else: - if attr_def.default is NotImplemented or (attribute_name and response['raw_attributes'][attribute_name] != list()): - attribute.values = response['attributes'][attribute_name] - else: - attribute.values = attr_def.default if isinstance(attr_def.default, SEQUENCE_TYPES) else [attr_def.default] - if not isinstance(attribute.values, list): # force attribute values to list (if attribute is single-valued) - attribute.values = [attribute.values] - if attr_def.dereference_dn: # try to get object referenced in value - if attribute.values: - temp_reader = Reader(self.connection, attr_def.dereference_dn, base='', get_operational_attributes=self.get_operational_attributes, controls=self.controls) - temp_values = [] - for element in attribute.values: - if entry.entry_dn != element: - temp_values.append(temp_reader.search_object(element)) - else: - error_message = 'object %s is referencing itself in the \'%s\' attribute' % (entry.entry_dn, attribute.definition.name) - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPObjectDereferenceError(error_message) - del temp_reader # remove the temporary Reader - attribute.values = temp_values - attributes[attribute.key] = attribute - if attribute.other_names: - attributes.set_alias(attribute.key, attribute.other_names) - if attr_def.other_names: - attributes.set_alias(attribute.key, attr_def.other_names) - used_attribute_names.add(attribute_name) - - if self.attributes: - used_attribute_names.update(self.attributes) - - for attribute_name in response['attributes']: - if attribute_name not in used_attribute_names: - operational_attribute = False - # check if the type is an operational attribute - if attribute_name in self.schema.attribute_types: - if self.schema.attribute_types[attribute_name].no_user_modification or self.schema.attribute_types[attribute_name].usage in [ATTRIBUTE_DIRECTORY_OPERATION, ATTRIBUTE_DISTRIBUTED_OPERATION, ATTRIBUTE_DSA_OPERATION]: - operational_attribute = True - else: - operational_attribute = True - if not operational_attribute and attribute_name not in attr_defs and attribute_name.lower() not in conf_attributes_excluded_from_object_def: - error_message = 'attribute \'%s\' not in object class \'%s\' for entry %s' % (attribute_name, ', '.join(entry.entry_definition._object_class), entry.entry_dn) - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - attribute = OperationalAttribute(AttrDef(conf_operational_attribute_prefix + attribute_name), entry, self) - attribute.raw_values = response['raw_attributes'][attribute_name] - attribute.values = response['attributes'][attribute_name] if isinstance(response['attributes'][attribute_name], SEQUENCE_TYPES) else [response['attributes'][attribute_name]] - if (conf_operational_attribute_prefix + attribute_name) not in attributes: - attributes[conf_operational_attribute_prefix + attribute_name] = attribute - - return attributes - - def match_dn(self, dn): - """Return entries with text in DN""" - matched = [] - for entry in self.entries: - if dn.lower() in entry.entry_dn.lower(): - matched.append(entry) - return matched - - def match(self, attributes, value): - """Return entries with text in one of the specified attributes""" - matched = [] - if not isinstance(attributes, SEQUENCE_TYPES): - attributes = [attributes] - - for entry in self.entries: - found = False - for attribute in attributes: - if attribute in entry: - for attr_value in entry[attribute].values: - if hasattr(attr_value, 'lower') and hasattr(value, 'lower') and value.lower() in attr_value.lower(): - found = True - elif value == attr_value: - found = True - if found: - matched.append(entry) - break - if found: - break - # checks raw values, tries to convert value to byte - raw_value = to_raw(value) - if isinstance(raw_value, (bytes, bytearray)): - for attr_value in entry[attribute].raw_values: - if hasattr(attr_value, 'lower') and hasattr(raw_value, 'lower') and raw_value.lower() in attr_value.lower(): - found = True - elif raw_value == attr_value: - found = True - if found: - matched.append(entry) - break - if found: - break - return matched - - def _create_entry(self, response): - if not response['type'] == 'searchResEntry': - return None - - entry = self.entry_class(response['dn'], self) # define an Entry (writable or readonly), as specified in the cursor definition - entry._state.attributes = self._get_attributes(response, self.definition._attributes, entry) - entry._state.entry_raw_attributes = deepcopy(response['raw_attributes']) - - entry._state.response = response - entry._state.read_time = datetime.now() - entry._state.set_status(self.entry_initial_status) - for attr in entry: # returns the whole attribute object - entry.__dict__[attr.key] = attr - - return entry - - def _execute_query(self, query_scope, attributes): - if not self.connection: - error_message = 'no connection established' - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - old_query_filter = None - if query_scope == BASE: # requesting a single object so an always-valid filter is set - if hasattr(self, 'query_filter'): # only Reader has a query filter - old_query_filter = self.query_filter - self.query_filter = '(objectclass=*)' - else: - self._create_query_filter() - if log_enabled(PROTOCOL): - log(PROTOCOL, 'executing query - base: %s - filter: %s - scope: %s for <%s>', self.base, self.query_filter, query_scope, self) - with self.connection: - result = self.connection.search(search_base=self.base, - search_filter=self.query_filter, - search_scope=query_scope, - dereference_aliases=self.dereference_aliases, - attributes=attributes if attributes else list(self.attributes), - get_operational_attributes=self.get_operational_attributes, - controls=self.controls) - if not self.connection.strategy.sync: - response, result, request = self.connection.get_response(result, get_request=True) - else: - response = self.connection.response - result = self.connection.result - request = self.connection.request - - self._store_operation_in_history(request, result, response) - - if self._do_not_reset: # trick to not remove entries when using _refresh() - return self._create_entry(response[0]) - - self.entries = [] - for r in response: - entry = self._create_entry(r) - if entry is not None: - self.entries.append(entry) - if 'objectClass' in entry: - for object_class in entry.objectClass: - if self.schema.object_classes[object_class].kind == CLASS_AUXILIARY and object_class not in self.definition._auxiliary_class: - # add auxiliary class to object definition - self.definition._auxiliary_class.append(object_class) - self.definition._populate_attr_defs(object_class) - self.execution_time = datetime.now() - - if old_query_filter: # requesting a single object so an always-valid filter is set - self.query_filter = old_query_filter - - def remove(self, entry): - if log_enabled(PROTOCOL): - log(PROTOCOL, 'removing entry <%s> in <%s>', entry, self) - self.entries.remove(entry) - - def _reset_history(self): - self._operation_history = list() - - def _store_operation_in_history(self, request, result, response): - self._operation_history.append(Operation(request, result, response)) - - @property - def operations(self): - return self._operation_history - - @property - def errors(self): - return [error for error in self._operation_history if error.result['result'] != RESULT_SUCCESS] - - @property - def failed(self): - if hasattr(self, '_operation_history'): - return any([error.result['result'] != RESULT_SUCCESS for error in self._operation_history]) - - -class Reader(Cursor): - """Reader object to perform searches: - - :param connection: the LDAP connection object to use - :type connection: LDAPConnection - :param object_def: the ObjectDef of the LDAP object returned - :type object_def: ObjectDef - :param query: the simplified query (will be transformed in an LDAP filter) - :type query: str - :param base: starting base of the search - :type base: str - :param components_in_and: specify if assertions in the query must all be satisfied or not (AND/OR) - :type components_in_and: bool - :param sub_tree: specify if the search must be performed ad Single Level (False) or Whole SubTree (True) - :type sub_tree: bool - :param get_operational_attributes: specify if operational attributes are returned or not - :type get_operational_attributes: bool - :param controls: controls to be used in search - :type controls: tuple - - """ - entry_class = Entry # entries are read_only - attribute_class = Attribute # attributes are read_only - entry_initial_status = STATUS_READ - - def __init__(self, connection, object_def, base, query='', components_in_and=True, sub_tree=True, get_operational_attributes=False, attributes=None, controls=None, auxiliary_class=None): - Cursor.__init__(self, connection, object_def, get_operational_attributes, attributes, controls, auxiliary_class) - self._components_in_and = components_in_and - self.sub_tree = sub_tree - self._query = query - self.base = base - self.dereference_aliases = DEREF_ALWAYS - self.validated_query = None - self._query_dict = dict() - self._validated_query_dict = dict() - self.query_filter = None - self.reset() - - if log_enabled(BASIC): - log(BASIC, 'instantiated Reader Cursor: <%r>', self) - - @property - def query(self): - return self._query - - @query.setter - def query(self, value): - self._query = value - self.reset() - - @property - def components_in_and(self): - return self._components_in_and - - @components_in_and.setter - def components_in_and(self, value): - self._components_in_and = value - self.reset() - - def clear(self): - """Clear the Reader search parameters - - """ - self.dereference_aliases = DEREF_ALWAYS - self._reset_history() - - def reset(self): - """Clear all the Reader parameters - - """ - self.clear() - self.validated_query = None - self._query_dict = dict() - self._validated_query_dict = dict() - self.execution_time = None - self.query_filter = None - self.entries = [] - self._create_query_filter() - - def _validate_query(self): - """Processes the text query and verifies that the requested friendly names are in the Reader dictionary - If the AttrDef has a 'validate' property the callable is executed and if it returns False an Exception is raised - - """ - if not self._query_dict: - self._query_dict = _create_query_dict(self._query) - - query = '' - for d in sorted(self._query_dict): - attr = d[1:] if d[0] in '&|' else d - for attr_def in self.definition: - if ''.join(attr.split()).lower() == attr_def.key.lower(): - attr = attr_def.key - break - if attr in self.definition: - vals = sorted(self._query_dict[d].split(';')) - - query += (d[0] + attr if d[0] in '&|' else attr) + ': ' - for val in vals: - val = val.strip() - val_not = True if val[0] == '!' else False - val_search_operator = '=' # default - if val_not: - if val[1:].lstrip()[0] not in '=<>~': - value = val[1:].lstrip() - else: - val_search_operator = val[1:].lstrip()[0] - value = val[1:].lstrip()[1:] - else: - if val[0] not in '=<>~': - value = val.lstrip() - else: - val_search_operator = val[0] - value = val[1:].lstrip() - - if self.definition[attr].validate: - validated = self.definition[attr].validate(value) # returns True, False or a value to substitute to the actual values - if validated is False: - error_message = 'validation failed for attribute %s and value %s' % (d, val) - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - elif validated is not True: # a valid LDAP value equivalent to the actual values - value = validated - if val_not: - query += '!' + val_search_operator + str(value) - else: - query += val_search_operator + str(value) - - query += ';' - query = query[:-1] + ', ' - else: - error_message = 'attribute \'%s\' not in definition' % attr - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - self.validated_query = query[:-2] - self._validated_query_dict = _create_query_dict(self.validated_query) - - def _create_query_filter(self): - """Converts the query dictionary to the filter text""" - self.query_filter = '' - - if self.definition._object_class: - self.query_filter += '(&' - if isinstance(self.definition._object_class, SEQUENCE_TYPES) and len(self.definition._object_class) == 1: - self.query_filter += '(objectClass=' + self.definition._object_class[0] + ')' - elif isinstance(self.definition._object_class, SEQUENCE_TYPES): - self.query_filter += '(&' - for object_class in self.definition._object_class: - self.query_filter += '(objectClass=' + object_class + ')' - self.query_filter += ')' - else: - error_message = 'object class must be a string or a list' - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - - if self._query and self._query.startswith('(') and self._query.endswith(')'): # query is already an LDAP filter - if 'objectclass' not in self._query.lower(): - self.query_filter += self._query + ')' # if objectclass not in filter adds from definition - else: - self.query_filter = self._query - return - elif self._query: # if a simplified filter is present - if not self.components_in_and: - self.query_filter += '(|' - elif not self.definition._object_class: - self.query_filter += '(&' - - self._validate_query() - - attr_counter = 0 - for attr in sorted(self._validated_query_dict): - attr_counter += 1 - multi = True if ';' in self._validated_query_dict[attr] else False - vals = sorted(self._validated_query_dict[attr].split(';')) - attr_def = self.definition[attr[1:]] if attr[0] in '&|' else self.definition[attr] - if attr_def.pre_query: - modvals = [] - for val in vals: - modvals.append(val[0] + attr_def.pre_query(attr_def.key, val[1:])) - vals = modvals - if multi: - if attr[0] in '&|': - self.query_filter += '(' + attr[0] - else: - self.query_filter += '(|' - - for val in vals: - if val[0] == '!': - self.query_filter += '(!(' + attr_def.name + _ret_search_value(val[1:]) + '))' - else: - self.query_filter += '(' + attr_def.name + _ret_search_value(val) + ')' - if multi: - self.query_filter += ')' - - if not self.components_in_and: - self.query_filter += '))' - else: - self.query_filter += ')' - - if not self.definition._object_class and attr_counter == 1: # removes unneeded starting filter - self.query_filter = self.query_filter[2: -1] - - if self.query_filter == '(|)' or self.query_filter == '(&)': # removes empty filter - self.query_filter = '' - else: # no query, remove unneeded leading (& - self.query_filter = self.query_filter[2:] - - def search(self, attributes=None): - """Perform the LDAP search - - :return: Entries found in search - - """ - self.clear() - query_scope = SUBTREE if self.sub_tree else LEVEL - if log_enabled(PROTOCOL): - log(PROTOCOL, 'performing search in <%s>', self) - self._execute_query(query_scope, attributes) - - return self.entries - - def search_object(self, entry_dn=None, attributes=None): # base must be a single dn - """Perform the LDAP search operation SINGLE_OBJECT scope - - :return: Entry found in search - - """ - if log_enabled(PROTOCOL): - log(PROTOCOL, 'performing object search in <%s>', self) - self.clear() - if entry_dn: - old_base = self.base - self.base = entry_dn - self._execute_query(BASE, attributes) - self.base = old_base - else: - self._execute_query(BASE, attributes) - - return self.entries[0] if len(self.entries) > 0 else None - - def search_level(self, attributes=None): - """Perform the LDAP search operation with SINGLE_LEVEL scope - - :return: Entries found in search - - """ - if log_enabled(PROTOCOL): - log(PROTOCOL, 'performing single level search in <%s>', self) - self.clear() - self._execute_query(LEVEL, attributes) - - return self.entries - - def search_subtree(self, attributes=None): - """Perform the LDAP search operation WHOLE_SUBTREE scope - - :return: Entries found in search - - """ - if log_enabled(PROTOCOL): - log(PROTOCOL, 'performing whole subtree search in <%s>', self) - self.clear() - self._execute_query(SUBTREE, attributes) - - return self.entries - - def _entries_generator(self, responses): - for response in responses: - yield self._create_entry(response) - - def search_paged(self, paged_size, paged_criticality=True, generator=True, attributes=None): - """Perform a paged search, can be called as an Iterator - - :param attributes: optional attributes to search - :param paged_size: number of entries returned in each search - :type paged_size: int - :param paged_criticality: specify if server must not execute the search if it is not capable of paging searches - :type paged_criticality: bool - :param generator: if True the paged searches are executed while generating the entries, - if False all the paged searches are execute before returning the generator - :type generator: bool - :return: Entries found in search - - """ - if log_enabled(PROTOCOL): - log(PROTOCOL, 'performing paged search in <%s> with paged size %s', self, str(paged_size)) - if not self.connection: - error_message = 'no connection established' - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - - self.clear() - self._create_query_filter() - self.entries = [] - self.execution_time = datetime.now() - response = self.connection.extend.standard.paged_search(search_base=self.base, - search_filter=self.query_filter, - search_scope=SUBTREE if self.sub_tree else LEVEL, - dereference_aliases=self.dereference_aliases, - attributes=attributes if attributes else self.attributes, - get_operational_attributes=self.get_operational_attributes, - controls=self.controls, - paged_size=paged_size, - paged_criticality=paged_criticality, - generator=generator) - if generator: - return self._entries_generator(response) - else: - return list(self._entries_generator(response)) - - -class Writer(Cursor): - entry_class = WritableEntry - attribute_class = WritableAttribute - entry_initial_status = STATUS_WRITABLE - - @staticmethod - def from_cursor(cursor, connection=None, object_def=None, custom_validator=None): - if connection is None: - connection = cursor.connection - if object_def is None: - object_def = cursor.definition - writer = Writer(connection, object_def, attributes=cursor.attributes) - for entry in cursor.entries: - if isinstance(cursor, Reader): - entry.entry_writable(object_def, writer, custom_validator=custom_validator) - elif isinstance(cursor, Writer): - pass - else: - error_message = 'unknown cursor type %s' % str(type(cursor)) - if log_enabled(ERROR): - log(ERROR, '%s', error_message) - raise LDAPCursorError(error_message) - writer.execution_time = cursor.execution_time - if log_enabled(BASIC): - log(BASIC, 'instantiated Writer Cursor <%r> from cursor <%r>', writer, cursor) - return writer - - @staticmethod - def from_response(connection, object_def, response=None): - if response is None: - if not connection.strategy.sync: - error_message = 'with asynchronous strategies response must be specified' - if log_enabled(ERROR): - log(ERROR, '%s', error_message) - raise LDAPCursorError(error_message) - elif connection.response: - response = connection.response - else: - error_message = 'response not present' - if log_enabled(ERROR): - log(ERROR, '%s', error_message) - raise LDAPCursorError(error_message) - writer = Writer(connection, object_def) - - for resp in response: - if resp['type'] == 'searchResEntry': - entry = writer._create_entry(resp) - writer.entries.append(entry) - if log_enabled(BASIC): - log(BASIC, 'instantiated Writer Cursor <%r> from response', writer) - return writer - - def __init__(self, connection, object_def, get_operational_attributes=False, attributes=None, controls=None, auxiliary_class=None): - Cursor.__init__(self, connection, object_def, get_operational_attributes, attributes, controls, auxiliary_class) - self.dereference_aliases = DEREF_NEVER - - if log_enabled(BASIC): - log(BASIC, 'instantiated Writer Cursor: <%r>', self) - - def commit(self, refresh=True): - if log_enabled(PROTOCOL): - log(PROTOCOL, 'committed changes for <%s>', self) - self._reset_history() - successful = True - for entry in self.entries: - if not entry.entry_commit_changes(refresh=refresh, controls=self.controls, clear_history=False): - successful = False - - self.execution_time = datetime.now() - - return successful - - def discard(self): - if log_enabled(PROTOCOL): - log(PROTOCOL, 'discarded changes for <%s>', self) - for entry in self.entries: - entry.entry_discard_changes() - - def _refresh_object(self, entry_dn, attributes=None, tries=4, seconds=2, controls=None): # base must be a single dn - """Performs the LDAP search operation SINGLE_OBJECT scope - - :return: Entry found in search - - """ - if log_enabled(PROTOCOL): - log(PROTOCOL, 'refreshing object <%s> for <%s>', entry_dn, self) - if not self.connection: - error_message = 'no connection established' - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - - response = [] - with self.connection: - counter = 0 - while counter < tries: - result = self.connection.search(search_base=entry_dn, - search_filter='(objectclass=*)', - search_scope=BASE, - dereference_aliases=DEREF_NEVER, - attributes=attributes if attributes else self.attributes, - get_operational_attributes=self.get_operational_attributes, - controls=controls) - if not self.connection.strategy.sync: - response, result, request = self.connection.get_response(result, get_request=True) - else: - response = self.connection.response - result = self.connection.result - request = self.connection.request - - if result['result'] in [RESULT_SUCCESS]: - break - sleep(seconds) - counter += 1 - self._store_operation_in_history(request, result, response) - - if len(response) == 1: - return self._create_entry(response[0]) - elif len(response) == 0: - return None - - error_message = 'more than 1 entry returned for a single object search' - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - - def new(self, dn): - if log_enabled(BASIC): - log(BASIC, 'creating new entry <%s> for <%s>', dn, self) - dn = safe_dn(dn) - for entry in self.entries: # checks if dn is already used in an cursor entry - if entry.entry_dn == dn: - error_message = 'dn already present in cursor' - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - rdns = safe_rdn(dn, decompose=True) - entry = self.entry_class(dn, self) # defines a new empty Entry - for attr in entry.entry_mandatory_attributes: # defines all mandatory attributes as virtual - entry._state.attributes[attr] = self.attribute_class(entry._state.definition[attr], entry, self) - entry.__dict__[attr] = entry._state.attributes[attr] - entry.objectclass.set(self.definition._object_class) - for rdn in rdns: # adds virtual attributes from rdns in entry name (should be more than one with + syntax) - if rdn[0] in entry._state.definition._attributes: - rdn_name = entry._state.definition._attributes[rdn[0]].name # normalize case folding - if rdn_name not in entry._state.attributes: - entry._state.attributes[rdn_name] = self.attribute_class(entry._state.definition[rdn_name], entry, self) - entry.__dict__[rdn_name] = entry._state.attributes[rdn_name] - entry.__dict__[rdn_name].set(rdn[1]) - else: - error_message = 'rdn type \'%s\' not in object class definition' % rdn[0] - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - entry._state.set_status(STATUS_VIRTUAL) # set intial status - entry._state.set_status(STATUS_PENDING_CHANGES) # tries to change status to PENDING_CHANGES. If mandatory attributes are missing status is reverted to MANDATORY_MISSING - self.entries.append(entry) - return entry - - def refresh_entry(self, entry, tries=4, seconds=2): - conf_operational_attribute_prefix = get_config_parameter('ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX') - - self._do_not_reset = True - attr_list = [] - if log_enabled(PROTOCOL): - log(PROTOCOL, 'refreshing entry <%s> for <%s>', entry, self) - for attr in entry._state.attributes: # check friendly attribute name in AttrDef, do not check operational attributes - if attr.lower().startswith(conf_operational_attribute_prefix.lower()): - continue - if entry._state.definition[attr].name: - attr_list.append(entry._state.definition[attr].name) - else: - attr_list.append(entry._state.definition[attr].key) - - temp_entry = self._refresh_object(entry.entry_dn, attr_list, tries, seconds=seconds) # if any attributes is added adds only to the entry not to the definition - self._do_not_reset = False - if temp_entry: - temp_entry._state.origin = entry._state.origin - entry.__dict__.clear() - entry.__dict__['_state'] = temp_entry._state - for attr in entry._state.attributes: # returns the attribute key - entry.__dict__[attr] = entry._state.attributes[attr] - - for attr in entry.entry_attributes: # if any attribute of the class was deleted makes it virtual - if attr not in entry._state.attributes and attr in entry.entry_definition._attributes: - entry._state.attributes[attr] = WritableAttribute(entry.entry_definition[attr], entry, self) - entry.__dict__[attr] = entry._state.attributes[attr] - entry._state.set_status(entry._state._initial_status) - return True - return False +""" +""" + +# Created on 2014.01.06 +# +# Author: Giovanni Cannata +# +# Copyright 2014 - 2019 Giovanni Cannata +# +# This file is part of ldap3. +# +# ldap3 is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ldap3 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with ldap3 in the COPYING and COPYING.LESSER files. +# If not, see . +from collections import namedtuple +from copy import deepcopy +from datetime import datetime +from os import linesep +from time import sleep + +from . import STATUS_VIRTUAL, STATUS_READ, STATUS_WRITABLE +from .. import SUBTREE, LEVEL, DEREF_ALWAYS, DEREF_NEVER, BASE, SEQUENCE_TYPES, STRING_TYPES, get_config_parameter +from ..abstract import STATUS_PENDING_CHANGES +from .attribute import Attribute, OperationalAttribute, WritableAttribute +from .attrDef import AttrDef +from .objectDef import ObjectDef +from .entry import Entry, WritableEntry +from ..core.exceptions import LDAPCursorError, LDAPObjectDereferenceError +from ..core.results import RESULT_SUCCESS +from ..utils.ciDict import CaseInsensitiveWithAliasDict +from ..utils.dn import safe_dn, safe_rdn +from ..utils.conv import to_raw +from ..utils.config import get_config_parameter +from ..utils.log import log, log_enabled, ERROR, BASIC, PROTOCOL, EXTENDED +from ..protocol.oid import ATTRIBUTE_DIRECTORY_OPERATION, ATTRIBUTE_DISTRIBUTED_OPERATION, ATTRIBUTE_DSA_OPERATION, CLASS_AUXILIARY + +Operation = namedtuple('Operation', ('request', 'result', 'response')) + + +def _ret_search_value(value): + return value[0] + '=' + value[1:] if value[0] in '<>~' and value[1] != '=' else value + + +def _create_query_dict(query_text): + """ + Create a dictionary with query key:value definitions + query_text is a comma delimited key:value sequence + """ + query_dict = dict() + if query_text: + for arg_value_str in query_text.split(','): + if ':' in arg_value_str: + arg_value_list = arg_value_str.split(':') + query_dict[arg_value_list[0].strip()] = arg_value_list[1].strip() + + return query_dict + + +class Cursor(object): + # entry_class and attribute_class define the type of entry and attribute used by the cursor + # entry_initial_status defines the initial status of a entry + # entry_class = Entry, must be defined in subclasses + # attribute_class = Attribute, must be defined in subclasses + # entry_initial_status = STATUS, must be defined in subclasses + + def __init__(self, connection, object_def, get_operational_attributes=False, attributes=None, controls=None, auxiliary_class=None): + conf_attributes_excluded_from_object_def = [v.lower() for v in get_config_parameter('ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF')] + self.connection = connection + self.get_operational_attributes = get_operational_attributes + if connection._deferred_bind or connection._deferred_open: # probably a lazy connection, tries to bind + connection._fire_deferred() + + if isinstance(object_def, (STRING_TYPES, SEQUENCE_TYPES)): + if connection.closed: # try to open connection if closed to read schema + connection.bind() + object_def = ObjectDef(object_def, connection.server.schema, auxiliary_class=auxiliary_class) + self.definition = object_def + if attributes: # checks if requested attributes are defined in ObjectDef + not_defined_attributes = [] + if isinstance(attributes, STRING_TYPES): + attributes = [attributes] + + for attribute in attributes: + if attribute not in self.definition._attributes and attribute.lower() not in conf_attributes_excluded_from_object_def: + not_defined_attributes.append(attribute) + + if not_defined_attributes: + error_message = 'Attributes \'%s\' non in definition' % ', '.join(not_defined_attributes) + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + + self.attributes = set(attributes) if attributes else set([attr.name for attr in self.definition]) + self.controls = controls + self.execution_time = None + self.entries = [] + self.schema = self.connection.server.schema + self._do_not_reset = False # used for refreshing entry in entry_refresh() without removing all entries from the Cursor + self._operation_history = list() # a list storing all the requests, results and responses for the last cursor operation + + def __repr__(self): + r = 'CURSOR : ' + self.__class__.__name__ + linesep + r += 'CONN : ' + str(self.connection) + linesep + r += 'DEFS : ' + ', '.join(self.definition._object_class) + if self.definition._auxiliary_class: + r += ' [AUX: ' + ', '.join(self.definition._auxiliary_class) + ']' + r += linesep + # for attr_def in sorted(self.definition): + # r += (attr_def.key if attr_def.key == attr_def.name else (attr_def.key + ' <' + attr_def.name + '>')) + ', ' + # if r[-2] == ',': + # r = r[:-2] + # r += ']' + linesep + if hasattr(self, 'attributes'): + r += 'ATTRS : ' + repr(sorted(self.attributes)) + (' [OPERATIONAL]' if self.get_operational_attributes else '') + linesep + if isinstance(self, Reader): + if hasattr(self, 'base'): + r += 'BASE : ' + repr(self.base) + (' [SUB]' if self.sub_tree else ' [LEVEL]') + linesep + if hasattr(self, '_query') and self._query: + r += 'QUERY : ' + repr(self._query) + ('' if '(' in self._query else (' [AND]' if self.components_in_and else ' [OR]')) + linesep + if hasattr(self, 'validated_query') and self.validated_query: + r += 'PARSED : ' + repr(self.validated_query) + ('' if '(' in self._query else (' [AND]' if self.components_in_and else ' [OR]')) + linesep + if hasattr(self, 'query_filter') and self.query_filter: + r += 'FILTER : ' + repr(self.query_filter) + linesep + + if hasattr(self, 'execution_time') and self.execution_time: + r += 'ENTRIES: ' + str(len(self.entries)) + r += ' [executed at: ' + str(self.execution_time.isoformat()) + ']' + linesep + + if self.failed: + r += 'LAST OPERATION FAILED [' + str(len(self.errors)) + ' failure' + ('s' if len(self.errors) > 1 else '') + ' at operation' + ('s ' if len(self.errors) > 1 else ' ') + ', '.join([str(i) for i, error in enumerate(self.operations) if error.result['result'] != RESULT_SUCCESS]) + ']' + + return r + + def __str__(self): + return self.__repr__() + + def __iter__(self): + return self.entries.__iter__() + + def __getitem__(self, item): + """Return indexed item, if index is not found then try to sequentially search in DN of entries. + If only one entry is found return it else raise a KeyError exception. The exception message + includes the number of entries that matches, if less than 10 entries match then show the DNs + in the exception message. + """ + try: + return self.entries[item] + except TypeError: + pass + + if isinstance(item, STRING_TYPES): + found = self.match_dn(item) + + if len(found) == 1: + return found[0] + elif len(found) > 1: + error_message = 'Multiple entries found: %d entries match the text in dn' % len(found) + ('' if len(found) > 10 else (' [' + '; '.join([e.entry_dn for e in found]) + ']')) + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise KeyError(error_message) + + error_message = 'no entry found' + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise KeyError(error_message) + + def __len__(self): + return len(self.entries) + + if str is not bytes: # Python 3 + def __bool__(self): # needed to make the cursor appears as existing in "if cursor:" even if there are no entries + return True + else: # Python 2 + def __nonzero__(self): + return True + + def _get_attributes(self, response, attr_defs, entry): + """Assign the result of the LDAP query to the Entry object dictionary. + + If the optional 'post_query' callable is present in the AttrDef it is called with each value of the attribute and the callable result is stored in the attribute. + + Returns the default value for missing attributes. + If the 'dereference_dn' in AttrDef is a ObjectDef then the attribute values are treated as distinguished name and the relevant entry is retrieved and stored in the attribute value. + + """ + conf_operational_attribute_prefix = get_config_parameter('ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX') + conf_attributes_excluded_from_object_def = [v.lower() for v in get_config_parameter('ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF')] + attributes = CaseInsensitiveWithAliasDict() + used_attribute_names = set() + for attr in attr_defs: + attr_def = attr_defs[attr] + attribute_name = None + for attr_name in response['attributes']: + if attr_def.name.lower() == attr_name.lower(): + attribute_name = attr_name + break + + if attribute_name or attr_def.default is not NotImplemented: # attribute value found in result or default value present - NotImplemented allows use of None as default + attribute = self.attribute_class(attr_def, entry, self) + attribute.response = response + attribute.raw_values = response['raw_attributes'][attribute_name] if attribute_name else None + if attr_def.post_query and attr_def.name in response['attributes'] and response['raw_attributes'] != list(): + attribute.values = attr_def.post_query(attr_def.key, response['attributes'][attribute_name]) + else: + if attr_def.default is NotImplemented or (attribute_name and response['raw_attributes'][attribute_name] != list()): + attribute.values = response['attributes'][attribute_name] + else: + attribute.values = attr_def.default if isinstance(attr_def.default, SEQUENCE_TYPES) else [attr_def.default] + if not isinstance(attribute.values, list): # force attribute values to list (if attribute is single-valued) + attribute.values = [attribute.values] + if attr_def.dereference_dn: # try to get object referenced in value + if attribute.values: + temp_reader = Reader(self.connection, attr_def.dereference_dn, base='', get_operational_attributes=self.get_operational_attributes, controls=self.controls) + temp_values = [] + for element in attribute.values: + if entry.entry_dn != element: + temp_values.append(temp_reader.search_object(element)) + else: + error_message = 'object %s is referencing itself in the \'%s\' attribute' % (entry.entry_dn, attribute.definition.name) + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPObjectDereferenceError(error_message) + del temp_reader # remove the temporary Reader + attribute.values = temp_values + attributes[attribute.key] = attribute + if attribute.other_names: + attributes.set_alias(attribute.key, attribute.other_names) + if attr_def.other_names: + attributes.set_alias(attribute.key, attr_def.other_names) + used_attribute_names.add(attribute_name) + + if self.attributes: + used_attribute_names.update(self.attributes) + + for attribute_name in response['attributes']: + if attribute_name not in used_attribute_names: + operational_attribute = False + # check if the type is an operational attribute + if attribute_name in self.schema.attribute_types: + if self.schema.attribute_types[attribute_name].no_user_modification or self.schema.attribute_types[attribute_name].usage in [ATTRIBUTE_DIRECTORY_OPERATION, ATTRIBUTE_DISTRIBUTED_OPERATION, ATTRIBUTE_DSA_OPERATION]: + operational_attribute = True + else: + operational_attribute = True + if not operational_attribute and attribute_name not in attr_defs and attribute_name.lower() not in conf_attributes_excluded_from_object_def: + error_message = 'attribute \'%s\' not in object class \'%s\' for entry %s' % (attribute_name, ', '.join(entry.entry_definition._object_class), entry.entry_dn) + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + attribute = OperationalAttribute(AttrDef(conf_operational_attribute_prefix + attribute_name), entry, self) + attribute.raw_values = response['raw_attributes'][attribute_name] + attribute.values = response['attributes'][attribute_name] if isinstance(response['attributes'][attribute_name], SEQUENCE_TYPES) else [response['attributes'][attribute_name]] + if (conf_operational_attribute_prefix + attribute_name) not in attributes: + attributes[conf_operational_attribute_prefix + attribute_name] = attribute + + return attributes + + def match_dn(self, dn): + """Return entries with text in DN""" + matched = [] + for entry in self.entries: + if dn.lower() in entry.entry_dn.lower(): + matched.append(entry) + return matched + + def match(self, attributes, value): + """Return entries with text in one of the specified attributes""" + matched = [] + if not isinstance(attributes, SEQUENCE_TYPES): + attributes = [attributes] + + for entry in self.entries: + found = False + for attribute in attributes: + if attribute in entry: + for attr_value in entry[attribute].values: + if hasattr(attr_value, 'lower') and hasattr(value, 'lower') and value.lower() in attr_value.lower(): + found = True + elif value == attr_value: + found = True + if found: + matched.append(entry) + break + if found: + break + # checks raw values, tries to convert value to byte + raw_value = to_raw(value) + if isinstance(raw_value, (bytes, bytearray)): + for attr_value in entry[attribute].raw_values: + if hasattr(attr_value, 'lower') and hasattr(raw_value, 'lower') and raw_value.lower() in attr_value.lower(): + found = True + elif raw_value == attr_value: + found = True + if found: + matched.append(entry) + break + if found: + break + return matched + + def _create_entry(self, response): + if not response['type'] == 'searchResEntry': + return None + + entry = self.entry_class(response['dn'], self) # define an Entry (writable or readonly), as specified in the cursor definition + entry._state.attributes = self._get_attributes(response, self.definition._attributes, entry) + entry._state.raw_attributes = deepcopy(response['raw_attributes']) + + entry._state.response = response + entry._state.read_time = datetime.now() + entry._state.set_status(self.entry_initial_status) + for attr in entry: # returns the whole attribute object + entry.__dict__[attr.key] = attr + + return entry + + def _execute_query(self, query_scope, attributes): + if not self.connection: + error_message = 'no connection established' + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + old_query_filter = None + if query_scope == BASE: # requesting a single object so an always-valid filter is set + if hasattr(self, 'query_filter'): # only Reader has a query filter + old_query_filter = self.query_filter + self.query_filter = '(objectclass=*)' + else: + self._create_query_filter() + if log_enabled(PROTOCOL): + log(PROTOCOL, 'executing query - base: %s - filter: %s - scope: %s for <%s>', self.base, self.query_filter, query_scope, self) + with self.connection: + result = self.connection.search(search_base=self.base, + search_filter=self.query_filter, + search_scope=query_scope, + dereference_aliases=self.dereference_aliases, + attributes=attributes if attributes else list(self.attributes), + get_operational_attributes=self.get_operational_attributes, + controls=self.controls) + if not self.connection.strategy.sync: + response, result, request = self.connection.get_response(result, get_request=True) + else: + response = self.connection.response + result = self.connection.result + request = self.connection.request + + self._store_operation_in_history(request, result, response) + + if self._do_not_reset: # trick to not remove entries when using _refresh() + return self._create_entry(response[0]) + + self.entries = [] + for r in response: + entry = self._create_entry(r) + if entry is not None: + self.entries.append(entry) + if 'objectClass' in entry: + for object_class in entry.objectClass: + if self.schema and self.schema.object_classes[object_class].kind == CLASS_AUXILIARY and object_class not in self.definition._auxiliary_class: + # add auxiliary class to object definition + self.definition._auxiliary_class.append(object_class) + self.definition._populate_attr_defs(object_class) + self.execution_time = datetime.now() + + if old_query_filter: # requesting a single object so an always-valid filter is set + self.query_filter = old_query_filter + + def remove(self, entry): + if log_enabled(PROTOCOL): + log(PROTOCOL, 'removing entry <%s> in <%s>', entry, self) + self.entries.remove(entry) + + def _reset_history(self): + self._operation_history = list() + + def _store_operation_in_history(self, request, result, response): + self._operation_history.append(Operation(request, result, response)) + + @property + def operations(self): + return self._operation_history + + @property + def errors(self): + return [error for error in self._operation_history if error.result['result'] != RESULT_SUCCESS] + + @property + def failed(self): + if hasattr(self, '_operation_history'): + return any([error.result['result'] != RESULT_SUCCESS for error in self._operation_history]) + + +class Reader(Cursor): + """Reader object to perform searches: + + :param connection: the LDAP connection object to use + :type connection: LDAPConnection + :param object_def: the ObjectDef of the LDAP object returned + :type object_def: ObjectDef + :param query: the simplified query (will be transformed in an LDAP filter) + :type query: str + :param base: starting base of the search + :type base: str + :param components_in_and: specify if assertions in the query must all be satisfied or not (AND/OR) + :type components_in_and: bool + :param sub_tree: specify if the search must be performed ad Single Level (False) or Whole SubTree (True) + :type sub_tree: bool + :param get_operational_attributes: specify if operational attributes are returned or not + :type get_operational_attributes: bool + :param controls: controls to be used in search + :type controls: tuple + + """ + entry_class = Entry # entries are read_only + attribute_class = Attribute # attributes are read_only + entry_initial_status = STATUS_READ + + def __init__(self, connection, object_def, base, query='', components_in_and=True, sub_tree=True, get_operational_attributes=False, attributes=None, controls=None, auxiliary_class=None): + Cursor.__init__(self, connection, object_def, get_operational_attributes, attributes, controls, auxiliary_class) + self._components_in_and = components_in_and + self.sub_tree = sub_tree + self._query = query + self.base = base + self.dereference_aliases = DEREF_ALWAYS + self.validated_query = None + self._query_dict = dict() + self._validated_query_dict = dict() + self.query_filter = None + self.reset() + + if log_enabled(BASIC): + log(BASIC, 'instantiated Reader Cursor: <%r>', self) + + @property + def query(self): + return self._query + + @query.setter + def query(self, value): + self._query = value + self.reset() + + @property + def components_in_and(self): + return self._components_in_and + + @components_in_and.setter + def components_in_and(self, value): + self._components_in_and = value + self.reset() + + def clear(self): + """Clear the Reader search parameters + + """ + self.dereference_aliases = DEREF_ALWAYS + self._reset_history() + + def reset(self): + """Clear all the Reader parameters + + """ + self.clear() + self.validated_query = None + self._query_dict = dict() + self._validated_query_dict = dict() + self.execution_time = None + self.query_filter = None + self.entries = [] + self._create_query_filter() + + def _validate_query(self): + """Processes the text query and verifies that the requested friendly names are in the Reader dictionary + If the AttrDef has a 'validate' property the callable is executed and if it returns False an Exception is raised + + """ + if not self._query_dict: + self._query_dict = _create_query_dict(self._query) + + query = '' + for d in sorted(self._query_dict): + attr = d[1:] if d[0] in '&|' else d + for attr_def in self.definition: + if ''.join(attr.split()).lower() == attr_def.key.lower(): + attr = attr_def.key + break + if attr in self.definition: + vals = sorted(self._query_dict[d].split(';')) + + query += (d[0] + attr if d[0] in '&|' else attr) + ': ' + for val in vals: + val = val.strip() + val_not = True if val[0] == '!' else False + val_search_operator = '=' # default + if val_not: + if val[1:].lstrip()[0] not in '=<>~': + value = val[1:].lstrip() + else: + val_search_operator = val[1:].lstrip()[0] + value = val[1:].lstrip()[1:] + else: + if val[0] not in '=<>~': + value = val.lstrip() + else: + val_search_operator = val[0] + value = val[1:].lstrip() + + if self.definition[attr].validate: + validated = self.definition[attr].validate(value) # returns True, False or a value to substitute to the actual values + if validated is False: + error_message = 'validation failed for attribute %s and value %s' % (d, val) + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + elif validated is not True: # a valid LDAP value equivalent to the actual values + value = validated + if val_not: + query += '!' + val_search_operator + str(value) + else: + query += val_search_operator + str(value) + + query += ';' + query = query[:-1] + ', ' + else: + error_message = 'attribute \'%s\' not in definition' % attr + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + self.validated_query = query[:-2] + self._validated_query_dict = _create_query_dict(self.validated_query) + + def _create_query_filter(self): + """Converts the query dictionary to the filter text""" + self.query_filter = '' + + if self.definition._object_class: + self.query_filter += '(&' + if isinstance(self.definition._object_class, SEQUENCE_TYPES) and len(self.definition._object_class) == 1: + self.query_filter += '(objectClass=' + self.definition._object_class[0] + ')' + elif isinstance(self.definition._object_class, SEQUENCE_TYPES): + self.query_filter += '(&' + for object_class in self.definition._object_class: + self.query_filter += '(objectClass=' + object_class + ')' + self.query_filter += ')' + else: + error_message = 'object class must be a string or a list' + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + + if self._query and self._query.startswith('(') and self._query.endswith(')'): # query is already an LDAP filter + if 'objectclass' not in self._query.lower(): + self.query_filter += self._query + ')' # if objectclass not in filter adds from definition + else: + self.query_filter = self._query + return + elif self._query: # if a simplified filter is present + if not self.components_in_and: + self.query_filter += '(|' + elif not self.definition._object_class: + self.query_filter += '(&' + + self._validate_query() + + attr_counter = 0 + for attr in sorted(self._validated_query_dict): + attr_counter += 1 + multi = True if ';' in self._validated_query_dict[attr] else False + vals = sorted(self._validated_query_dict[attr].split(';')) + attr_def = self.definition[attr[1:]] if attr[0] in '&|' else self.definition[attr] + if attr_def.pre_query: + modvals = [] + for val in vals: + modvals.append(val[0] + attr_def.pre_query(attr_def.key, val[1:])) + vals = modvals + if multi: + if attr[0] in '&|': + self.query_filter += '(' + attr[0] + else: + self.query_filter += '(|' + + for val in vals: + if val[0] == '!': + self.query_filter += '(!(' + attr_def.name + _ret_search_value(val[1:]) + '))' + else: + self.query_filter += '(' + attr_def.name + _ret_search_value(val) + ')' + if multi: + self.query_filter += ')' + + if not self.components_in_and: + self.query_filter += '))' + else: + self.query_filter += ')' + + if not self.definition._object_class and attr_counter == 1: # removes unneeded starting filter + self.query_filter = self.query_filter[2: -1] + + if self.query_filter == '(|)' or self.query_filter == '(&)': # removes empty filter + self.query_filter = '' + else: # no query, remove unneeded leading (& + self.query_filter = self.query_filter[2:] + + def search(self, attributes=None): + """Perform the LDAP search + + :return: Entries found in search + + """ + self.clear() + query_scope = SUBTREE if self.sub_tree else LEVEL + if log_enabled(PROTOCOL): + log(PROTOCOL, 'performing search in <%s>', self) + self._execute_query(query_scope, attributes) + + return self.entries + + def search_object(self, entry_dn=None, attributes=None): # base must be a single dn + """Perform the LDAP search operation SINGLE_OBJECT scope + + :return: Entry found in search + + """ + if log_enabled(PROTOCOL): + log(PROTOCOL, 'performing object search in <%s>', self) + self.clear() + if entry_dn: + old_base = self.base + self.base = entry_dn + self._execute_query(BASE, attributes) + self.base = old_base + else: + self._execute_query(BASE, attributes) + + return self.entries[0] if len(self.entries) > 0 else None + + def search_level(self, attributes=None): + """Perform the LDAP search operation with SINGLE_LEVEL scope + + :return: Entries found in search + + """ + if log_enabled(PROTOCOL): + log(PROTOCOL, 'performing single level search in <%s>', self) + self.clear() + self._execute_query(LEVEL, attributes) + + return self.entries + + def search_subtree(self, attributes=None): + """Perform the LDAP search operation WHOLE_SUBTREE scope + + :return: Entries found in search + + """ + if log_enabled(PROTOCOL): + log(PROTOCOL, 'performing whole subtree search in <%s>', self) + self.clear() + self._execute_query(SUBTREE, attributes) + + return self.entries + + def _entries_generator(self, responses): + for response in responses: + yield self._create_entry(response) + + def search_paged(self, paged_size, paged_criticality=True, generator=True, attributes=None): + """Perform a paged search, can be called as an Iterator + + :param attributes: optional attributes to search + :param paged_size: number of entries returned in each search + :type paged_size: int + :param paged_criticality: specify if server must not execute the search if it is not capable of paging searches + :type paged_criticality: bool + :param generator: if True the paged searches are executed while generating the entries, + if False all the paged searches are execute before returning the generator + :type generator: bool + :return: Entries found in search + + """ + if log_enabled(PROTOCOL): + log(PROTOCOL, 'performing paged search in <%s> with paged size %s', self, str(paged_size)) + if not self.connection: + error_message = 'no connection established' + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + + self.clear() + self._create_query_filter() + self.entries = [] + self.execution_time = datetime.now() + response = self.connection.extend.standard.paged_search(search_base=self.base, + search_filter=self.query_filter, + search_scope=SUBTREE if self.sub_tree else LEVEL, + dereference_aliases=self.dereference_aliases, + attributes=attributes if attributes else self.attributes, + get_operational_attributes=self.get_operational_attributes, + controls=self.controls, + paged_size=paged_size, + paged_criticality=paged_criticality, + generator=generator) + if generator: + return self._entries_generator(response) + else: + return list(self._entries_generator(response)) + + +class Writer(Cursor): + entry_class = WritableEntry + attribute_class = WritableAttribute + entry_initial_status = STATUS_WRITABLE + + @staticmethod + def from_cursor(cursor, connection=None, object_def=None, custom_validator=None): + if connection is None: + connection = cursor.connection + if object_def is None: + object_def = cursor.definition + writer = Writer(connection, object_def, attributes=cursor.attributes) + for entry in cursor.entries: + if isinstance(cursor, Reader): + entry.entry_writable(object_def, writer, custom_validator=custom_validator) + elif isinstance(cursor, Writer): + pass + else: + error_message = 'unknown cursor type %s' % str(type(cursor)) + if log_enabled(ERROR): + log(ERROR, '%s', error_message) + raise LDAPCursorError(error_message) + writer.execution_time = cursor.execution_time + if log_enabled(BASIC): + log(BASIC, 'instantiated Writer Cursor <%r> from cursor <%r>', writer, cursor) + return writer + + @staticmethod + def from_response(connection, object_def, response=None): + if response is None: + if not connection.strategy.sync: + error_message = 'with asynchronous strategies response must be specified' + if log_enabled(ERROR): + log(ERROR, '%s', error_message) + raise LDAPCursorError(error_message) + elif connection.response: + response = connection.response + else: + error_message = 'response not present' + if log_enabled(ERROR): + log(ERROR, '%s', error_message) + raise LDAPCursorError(error_message) + writer = Writer(connection, object_def) + + for resp in response: + if resp['type'] == 'searchResEntry': + entry = writer._create_entry(resp) + writer.entries.append(entry) + if log_enabled(BASIC): + log(BASIC, 'instantiated Writer Cursor <%r> from response', writer) + return writer + + def __init__(self, connection, object_def, get_operational_attributes=False, attributes=None, controls=None, auxiliary_class=None): + Cursor.__init__(self, connection, object_def, get_operational_attributes, attributes, controls, auxiliary_class) + self.dereference_aliases = DEREF_NEVER + + if log_enabled(BASIC): + log(BASIC, 'instantiated Writer Cursor: <%r>', self) + + def commit(self, refresh=True): + if log_enabled(PROTOCOL): + log(PROTOCOL, 'committed changes for <%s>', self) + self._reset_history() + successful = True + for entry in self.entries: + if not entry.entry_commit_changes(refresh=refresh, controls=self.controls, clear_history=False): + successful = False + + self.execution_time = datetime.now() + + return successful + + def discard(self): + if log_enabled(PROTOCOL): + log(PROTOCOL, 'discarded changes for <%s>', self) + for entry in self.entries: + entry.entry_discard_changes() + + def _refresh_object(self, entry_dn, attributes=None, tries=4, seconds=2, controls=None): # base must be a single dn + """Performs the LDAP search operation SINGLE_OBJECT scope + + :return: Entry found in search + + """ + if log_enabled(PROTOCOL): + log(PROTOCOL, 'refreshing object <%s> for <%s>', entry_dn, self) + if not self.connection: + error_message = 'no connection established' + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + + response = [] + with self.connection: + counter = 0 + while counter < tries: + result = self.connection.search(search_base=entry_dn, + search_filter='(objectclass=*)', + search_scope=BASE, + dereference_aliases=DEREF_NEVER, + attributes=attributes if attributes else self.attributes, + get_operational_attributes=self.get_operational_attributes, + controls=controls) + if not self.connection.strategy.sync: + response, result, request = self.connection.get_response(result, get_request=True) + else: + response = self.connection.response + result = self.connection.result + request = self.connection.request + + if result['result'] in [RESULT_SUCCESS]: + break + sleep(seconds) + counter += 1 + self._store_operation_in_history(request, result, response) + + if len(response) == 1: + return self._create_entry(response[0]) + elif len(response) == 0: + return None + + error_message = 'more than 1 entry returned for a single object search' + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + + def new(self, dn): + if log_enabled(BASIC): + log(BASIC, 'creating new entry <%s> for <%s>', dn, self) + dn = safe_dn(dn) + for entry in self.entries: # checks if dn is already used in an cursor entry + if entry.entry_dn == dn: + error_message = 'dn already present in cursor' + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + rdns = safe_rdn(dn, decompose=True) + entry = self.entry_class(dn, self) # defines a new empty Entry + for attr in entry.entry_mandatory_attributes: # defines all mandatory attributes as virtual + entry._state.attributes[attr] = self.attribute_class(entry._state.definition[attr], entry, self) + entry.__dict__[attr] = entry._state.attributes[attr] + entry.objectclass.set(self.definition._object_class) + for rdn in rdns: # adds virtual attributes from rdns in entry name (should be more than one with + syntax) + if rdn[0] in entry._state.definition._attributes: + rdn_name = entry._state.definition._attributes[rdn[0]].name # normalize case folding + if rdn_name not in entry._state.attributes: + entry._state.attributes[rdn_name] = self.attribute_class(entry._state.definition[rdn_name], entry, self) + entry.__dict__[rdn_name] = entry._state.attributes[rdn_name] + entry.__dict__[rdn_name].set(rdn[1]) + else: + error_message = 'rdn type \'%s\' not in object class definition' % rdn[0] + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + entry._state.set_status(STATUS_VIRTUAL) # set intial status + entry._state.set_status(STATUS_PENDING_CHANGES) # tries to change status to PENDING_CHANGES. If mandatory attributes are missing status is reverted to MANDATORY_MISSING + self.entries.append(entry) + return entry + + def refresh_entry(self, entry, tries=4, seconds=2): + conf_operational_attribute_prefix = get_config_parameter('ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX') + + self._do_not_reset = True + attr_list = [] + if log_enabled(PROTOCOL): + log(PROTOCOL, 'refreshing entry <%s> for <%s>', entry, self) + for attr in entry._state.attributes: # check friendly attribute name in AttrDef, do not check operational attributes + if attr.lower().startswith(conf_operational_attribute_prefix.lower()): + continue + if entry._state.definition[attr].name: + attr_list.append(entry._state.definition[attr].name) + else: + attr_list.append(entry._state.definition[attr].key) + + temp_entry = self._refresh_object(entry.entry_dn, attr_list, tries, seconds=seconds) # if any attributes is added adds only to the entry not to the definition + self._do_not_reset = False + if temp_entry: + temp_entry._state.origin = entry._state.origin + entry.__dict__.clear() + entry.__dict__['_state'] = temp_entry._state + for attr in entry._state.attributes: # returns the attribute key + entry.__dict__[attr] = entry._state.attributes[attr] + + for attr in entry.entry_attributes: # if any attribute of the class was deleted makes it virtual + if attr not in entry._state.attributes and attr in entry.entry_definition._attributes: + entry._state.attributes[attr] = WritableAttribute(entry.entry_definition[attr], entry, self) + entry.__dict__[attr] = entry._state.attributes[attr] + entry._state.set_status(entry._state._initial_status) + return True + return False diff --git a/server/www/packages/packages-linux/x64/ldap3/abstract/entry.py b/server/www/packages/packages-linux/x64/ldap3/abstract/entry.py index 18c0420..ee8bf20 100644 --- a/server/www/packages/packages-linux/x64/ldap3/abstract/entry.py +++ b/server/www/packages/packages-linux/x64/ldap3/abstract/entry.py @@ -1,671 +1,675 @@ -""" -""" - -# Created on 2016.08.19 -# -# Author: Giovanni Cannata -# -# Copyright 2016 - 2018 Giovanni Cannata -# -# This file is part of ldap3. -# -# ldap3 is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ldap3 is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with ldap3 in the COPYING and COPYING.LESSER files. -# If not, see . - - -import json -try: - from collections import OrderedDict -except ImportError: - from ..utils.ordDict import OrderedDict # for Python 2.6 - -from os import linesep - -from .. import STRING_TYPES, SEQUENCE_TYPES, MODIFY_ADD, MODIFY_REPLACE -from .attribute import WritableAttribute -from .objectDef import ObjectDef -from .attrDef import AttrDef -from ..core.exceptions import LDAPKeyError, LDAPCursorError -from ..utils.conv import check_json_dict, format_json, prepare_for_stream -from ..protocol.rfc2849 import operation_to_ldif, add_ldif_header -from ..utils.dn import safe_dn, safe_rdn, to_dn -from ..utils.repr import to_stdout_encoding -from ..utils.ciDict import CaseInsensitiveWithAliasDict -from ..utils.config import get_config_parameter -from . import STATUS_VIRTUAL, STATUS_WRITABLE, STATUS_PENDING_CHANGES, STATUS_COMMITTED, STATUS_DELETED,\ - STATUS_INIT, STATUS_READY_FOR_DELETION, STATUS_READY_FOR_MOVING, STATUS_READY_FOR_RENAMING, STATUS_MANDATORY_MISSING, STATUSES, INITIAL_STATUSES -from ..core.results import RESULT_SUCCESS -from ..utils.log import log, log_enabled, ERROR, BASIC, PROTOCOL, EXTENDED - - -class EntryState(object): - """Contains data on the status of the entry. Does not pollute the Entry __dict__. - - """ - - def __init__(self, dn, cursor): - self.dn = dn - self._initial_status = None - self._to = None # used for move and rename - self.status = STATUS_INIT - self.attributes = CaseInsensitiveWithAliasDict() - self.raw_attributes = CaseInsensitiveWithAliasDict() - self.response = None - self.cursor = cursor - self.origin = None # reference to the original read-only entry (set when made writable). Needed to update attributes in read-only when modified (only if both refer the same server) - self.read_time = None - self.changes = OrderedDict() # includes changes to commit in a writable entry - if cursor.definition: - self.definition = cursor.definition - else: - self.definition = None - - def __repr__(self): - if self.__dict__ and self.dn is not None: - r = 'DN: ' + to_stdout_encoding(self.dn) + ' - STATUS: ' + ((self._initial_status + ', ') if self._initial_status != self.status else '') + self.status + ' - READ TIME: ' + (self.read_time.isoformat() if self.read_time else '') + linesep - r += 'attributes: ' + ', '.join(sorted(self.attributes.keys())) + linesep - r += 'object def: ' + (', '.join(sorted(self.definition._object_class)) if self.definition._object_class else '') + linesep - r += 'attr defs: ' + ', '.join(sorted(self.definition._attributes.keys())) + linesep - r += 'response: ' + ('present' if self.response else '') + linesep - r += 'cursor: ' + (self.cursor.__class__.__name__ if self.cursor else '') + linesep - return r - else: - return object.__repr__(self) - - def __str__(self): - return self.__repr__() - - def set_status(self, status): - conf_ignored_mandatory_attributes_in_object_def = [v.lower() for v in get_config_parameter('IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF')] - if status not in STATUSES: - error_message = 'invalid entry status ' + str(status) - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - if status in INITIAL_STATUSES: - self._initial_status = status - self.status = status - if status == STATUS_DELETED: - self._initial_status = STATUS_VIRTUAL - if status == STATUS_COMMITTED: - self._initial_status = STATUS_WRITABLE - if self.status == STATUS_VIRTUAL or (self.status == STATUS_PENDING_CHANGES and self._initial_status == STATUS_VIRTUAL): # checks if all mandatory attributes are present in new entries - for attr in self.definition._attributes: - if self.definition._attributes[attr].mandatory and attr.lower() not in conf_ignored_mandatory_attributes_in_object_def: - if (attr not in self.attributes or self.attributes[attr].virtual) and attr not in self.changes: - self.status = STATUS_MANDATORY_MISSING - break - - -class EntryBase(object): - """The Entry object contains a single LDAP entry. - Attributes can be accessed either by sequence, by assignment - or as dictionary keys. Keys are not case sensitive. - - The Entry object is read only - - - The DN is retrieved by entry_dn - - The cursor reference is in _cursor - - Raw attributes values are retrieved with _raw_attributes and the _raw_attribute() methods - """ - - def __init__(self, dn, cursor): - self.__dict__['_state'] = EntryState(dn, cursor) - - def __repr__(self): - if self.__dict__ and self.entry_dn is not None: - r = 'DN: ' + to_stdout_encoding(self.entry_dn) + ' - STATUS: ' + ((self._state._initial_status + ', ') if self._state._initial_status != self.entry_status else '') + self.entry_status + ' - READ TIME: ' + (self.entry_read_time.isoformat() if self.entry_read_time else '') + linesep - if self._state.attributes: - for attr in sorted(self._state.attributes): - if self._state.attributes[attr] or (hasattr(self._state.attributes[attr], 'changes') and self._state.attributes[attr].changes): - r += ' ' + repr(self._state.attributes[attr]) + linesep - return r - else: - return object.__repr__(self) - - def __str__(self): - return self.__repr__() - - def __iter__(self): - for attribute in self._state.attributes: - yield self._state.attributes[attribute] - # raise StopIteration # deprecated in PEP 479 - return - - def __contains__(self, item): - try: - self.__getitem__(item) - return True - except LDAPKeyError: - return False - - def __getattr__(self, item): - if isinstance(item, STRING_TYPES): - if item == '_state': - return self.__dict__['_state'] - item = ''.join(item.split()).lower() - attr_found = None - for attr in self._state.attributes.keys(): - if item == attr.lower(): - attr_found = attr - break - if not attr_found: - for attr in self._state.attributes.aliases(): - if item == attr.lower(): - attr_found = attr - break - if not attr_found: - for attr in self._state.attributes.keys(): - if item + ';binary' == attr.lower(): - attr_found = attr - break - if not attr_found: - for attr in self._state.attributes.aliases(): - if item + ';binary' == attr.lower(): - attr_found = attr - break - if not attr_found: - for attr in self._state.attributes.keys(): - if item + ';range' in attr.lower(): - attr_found = attr - break - if not attr_found: - for attr in self._state.attributes.aliases(): - if item + ';range' in attr.lower(): - attr_found = attr - break - if not attr_found: - error_message = 'attribute \'%s\' not found' % item - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - return self._state.attributes[attr] - error_message = 'attribute name must be a string' - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - - def __setattr__(self, item, value): - if item in self._state.attributes: - error_message = 'attribute \'%s\' is read only' % item - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - else: - error_message = 'entry is read only, cannot add \'%s\'' % item - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - - def __getitem__(self, item): - if isinstance(item, STRING_TYPES): - item = ''.join(item.split()).lower() - attr_found = None - for attr in self._state.attributes.keys(): - if item == attr.lower(): - attr_found = attr - break - if not attr_found: - for attr in self._state.attributes.aliases(): - if item == attr.lower(): - attr_found = attr - break - if not attr_found: - for attr in self._state.attributes.keys(): - if item + ';binary' == attr.lower(): - attr_found = attr - break - if not attr_found: - for attr in self._state.attributes.aliases(): - if item + ';binary' == attr.lower(): - attr_found = attr - break - if not attr_found: - error_message = 'key \'%s\' not found' % item - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPKeyError(error_message) - return self._state.attributes[attr] - - error_message = 'key must be a string' - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPKeyError(error_message) - - def __eq__(self, other): - if isinstance(other, EntryBase): - return self.entry_dn == other.entry_dn - - return False - - def __lt__(self, other): - if isinstance(other, EntryBase): - return self.entry_dn <= other.entry_dn - - return False - - @property - def entry_dn(self): - return self._state.dn - - @property - def entry_cursor(self): - return self._state.cursor - - @property - def entry_status(self): - return self._state.status - - @property - def entry_definition(self): - return self._state.definition - - @property - def entry_raw_attributes(self): - return self._state.entry_raw_attributes - - def entry_raw_attribute(self, name): - """ - - :param name: name of the attribute - :return: raw (unencoded) value of the attribute, None if attribute is not found - """ - return self._state.entry_raw_attributes[name] if name in self._state.entry_raw_attributes else None - - @property - def entry_mandatory_attributes(self): - return [attribute for attribute in self.entry_definition._attributes if self.entry_definition._attributes[attribute].mandatory] - - @property - def entry_attributes(self): - return list(self._state.attributes.keys()) - - @property - def entry_attributes_as_dict(self): - return dict((attribute_key, attribute_value.values) for (attribute_key, attribute_value) in self._state.attributes.items()) - - @property - def entry_read_time(self): - return self._state.read_time - - @property - def _changes(self): - return self._state.changes - - def entry_to_json(self, raw=False, indent=4, sort=True, stream=None, checked_attributes=True, include_empty=True): - json_entry = dict() - json_entry['dn'] = self.entry_dn - if checked_attributes: - if not include_empty: - # needed for python 2.6 compatibility - json_entry['attributes'] = dict((key, self.entry_attributes_as_dict[key]) for key in self.entry_attributes_as_dict if self.entry_attributes_as_dict[key]) - else: - json_entry['attributes'] = self.entry_attributes_as_dict - if raw: - if not include_empty: - # needed for python 2.6 compatibility - json_entry['raw'] = dict((key, self.entry_raw_attributes[key]) for key in self.entry_raw_attributes if self.entry_raw_attributes[key]) - else: - json_entry['raw'] = dict(self.entry_raw_attributes) - - if str is bytes: # Python 2 - check_json_dict(json_entry) - - json_output = json.dumps(json_entry, - ensure_ascii=True, - sort_keys=sort, - indent=indent, - check_circular=True, - default=format_json, - separators=(',', ': ')) - - if stream: - stream.write(json_output) - - return json_output - - def entry_to_ldif(self, all_base64=False, line_separator=None, sort_order=None, stream=None): - ldif_lines = operation_to_ldif('searchResponse', [self._state.response], all_base64, sort_order=sort_order) - ldif_lines = add_ldif_header(ldif_lines) - line_separator = line_separator or linesep - ldif_output = line_separator.join(ldif_lines) - if stream: - if stream.tell() == 0: - header = add_ldif_header(['-'])[0] - stream.write(prepare_for_stream(header + line_separator + line_separator)) - stream.write(prepare_for_stream(ldif_output + line_separator + line_separator)) - return ldif_output - - -class Entry(EntryBase): - """The Entry object contains a single LDAP entry. - Attributes can be accessed either by sequence, by assignment - or as dictionary keys. Keys are not case sensitive. - - The Entry object is read only - - - The DN is retrieved by entry_dn - - The Reader reference is in _cursor() - - Raw attributes values are retrieved by the _ra_attributes and - _raw_attribute() methods - - """ - def entry_writable(self, object_def=None, writer_cursor=None, attributes=None, custom_validator=None, auxiliary_class=None): - if not self.entry_cursor.schema: - error_message = 'schema must be available to make an entry writable' - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - # returns a new WritableEntry and its Writer cursor - if object_def is None: - if self.entry_cursor.definition._object_class: - object_def = self.entry_definition._object_class - auxiliary_class = self.entry_definition._auxiliary_class + (auxiliary_class if isinstance(auxiliary_class, SEQUENCE_TYPES) else []) - elif 'objectclass' in self: - object_def = self.objectclass.values - - if not object_def: - error_message = 'object class must be specified to make an entry writable' - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - - if not isinstance(object_def, ObjectDef): - object_def = ObjectDef(object_def, self.entry_cursor.schema, custom_validator, auxiliary_class) - - if attributes: - if isinstance(attributes, STRING_TYPES): - attributes = [attributes] - - if isinstance(attributes, SEQUENCE_TYPES): - for attribute in attributes: - if attribute not in object_def._attributes: - error_message = 'attribute \'%s\' not in schema for \'%s\'' % (attribute, object_def) - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - else: - attributes = [] - - if not writer_cursor: - from .cursor import Writer # local import to avoid circular reference in import at startup - writable_cursor = Writer(self.entry_cursor.connection, object_def) - else: - writable_cursor = writer_cursor - - if attributes: # force reading of attributes - writable_entry = writable_cursor._refresh_object(self.entry_dn, list(attributes) + self.entry_attributes) - else: - writable_entry = writable_cursor._create_entry(self._state.response) - writable_cursor.entries.append(writable_entry) - writable_entry._state.read_time = self.entry_read_time - writable_entry._state.origin = self # reference to the original read-only entry - # checks original entry for custom definitions in AttrDefs - for attr in writable_entry._state.origin.entry_definition._attributes: - original_attr = writable_entry._state.origin.entry_definition._attributes[attr] - if attr != original_attr.name and attr not in writable_entry._state.attributes: - old_attr_def = writable_entry.entry_definition._attributes[original_attr.name] - new_attr_def = AttrDef(original_attr.name, - key=attr, - validate=original_attr.validate, - pre_query=original_attr.pre_query, - post_query=original_attr.post_query, - default=original_attr.default, - dereference_dn=original_attr.dereference_dn, - description=original_attr.description, - mandatory=old_attr_def.mandatory, # keeps value read from schema - single_value=old_attr_def.single_value, # keeps value read from schema - alias=original_attr.other_names) - object_def = writable_entry.entry_definition - object_def -= old_attr_def - object_def += new_attr_def - # updates attribute name in entry attributes - new_attr = WritableAttribute(new_attr_def, writable_entry, writable_cursor) - if original_attr.name in writable_entry._state.attributes: - new_attr.other_names = writable_entry._state.attributes[original_attr.name].other_names - new_attr.raw_values = writable_entry._state.attributes[original_attr.name].raw_values - new_attr.values = writable_entry._state.attributes[original_attr.name].values - new_attr.response = writable_entry._state.attributes[original_attr.name].response - writable_entry._state.attributes[attr] = new_attr - # writable_entry._state.attributes.set_alias(attr, new_attr.other_names) - del writable_entry._state.attributes[original_attr.name] - - writable_entry._state.set_status(STATUS_WRITABLE) - return writable_entry - - -class WritableEntry(EntryBase): - def __setitem__(self, key, value): - if value is not Ellipsis: # hack for using implicit operators in writable attributes - self.__setattr__(key, value) - - def __setattr__(self, item, value): - conf_attributes_excluded_from_object_def = [v.lower() for v in get_config_parameter('ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF')] - if item == '_state' and isinstance(value, EntryState): - self.__dict__['_state'] = value - return - - if value is not Ellipsis: # hack for using implicit operators in writable attributes - # checks if using an alias - if item in self.entry_cursor.definition._attributes or item.lower() in conf_attributes_excluded_from_object_def: - if item not in self._state.attributes: # setting value to an attribute still without values - new_attribute = WritableAttribute(self.entry_cursor.definition._attributes[item], self, cursor=self.entry_cursor) - self._state.attributes[str(item)] = new_attribute # force item to a string for key in attributes dict - self._state.attributes[item].set(value) # try to add to new_values - else: - error_message = 'attribute \'%s\' not defined' % item - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - - def __getattr__(self, item): - if isinstance(item, STRING_TYPES): - if item == '_state': - return self.__dict__['_state'] - item = ''.join(item.split()).lower() - for attr in self._state.attributes.keys(): - if item == attr.lower(): - return self._state.attributes[attr] - for attr in self._state.attributes.aliases(): - if item == attr.lower(): - return self._state.attributes[attr] - if item in self.entry_definition._attributes: # item is a new attribute to commit, creates the AttrDef and add to the attributes to retrive - self._state.attributes[item] = WritableAttribute(self.entry_definition._attributes[item], self, self.entry_cursor) - self.entry_cursor.attributes.add(item) - return self._state.attributes[item] - error_message = 'attribute \'%s\' not defined' % item - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - else: - error_message = 'attribute name must be a string' - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - - @property - def entry_virtual_attributes(self): - return [attr for attr in self.entry_attributes if self[attr].virtual] - - def entry_commit_changes(self, refresh=True, controls=None, clear_history=True): - if clear_history: - self.entry_cursor._reset_history() - - if self.entry_status == STATUS_READY_FOR_DELETION: - result = self.entry_cursor.connection.delete(self.entry_dn, controls) - if not self.entry_cursor.connection.strategy.sync: - response, result, request = self.entry_cursor.connection.get_response(result, get_request=True) - else: - response = self.entry_cursor.connection.response - result = self.entry_cursor.connection.result - request = self.entry_cursor.connection.request - self.entry_cursor._store_operation_in_history(request, result, response) - if result['result'] == RESULT_SUCCESS: - dn = self.entry_dn - if self._state.origin and self.entry_cursor.connection.server == self._state.origin.entry_cursor.connection.server: # deletes original read-only Entry - cursor = self._state.origin.entry_cursor - self._state.origin.__dict__.clear() - self._state.origin.__dict__['_state'] = EntryState(dn, cursor) - self._state.origin._state.set_status(STATUS_DELETED) - cursor = self.entry_cursor - self.__dict__.clear() - self._state = EntryState(dn, cursor) - self._state.set_status(STATUS_DELETED) - return True - return False - elif self.entry_status == STATUS_READY_FOR_MOVING: - result = self.entry_cursor.connection.modify_dn(self.entry_dn, '+'.join(safe_rdn(self.entry_dn)), new_superior=self._state._to) - if not self.entry_cursor.connection.strategy.sync: - response, result, request = self.entry_cursor.connection.get_response(result, get_request=True) - else: - response = self.entry_cursor.connection.response - result = self.entry_cursor.connection.result - request = self.entry_cursor.connection.request - self.entry_cursor._store_operation_in_history(request, result, response) - if result['result'] == RESULT_SUCCESS: - self._state.dn = safe_dn('+'.join(safe_rdn(self.entry_dn)) + ',' + self._state._to) - if refresh: - if self.entry_refresh(): - if self._state.origin and self.entry_cursor.connection.server == self._state.origin.entry_cursor.connection.server: # refresh dn of origin - self._state.origin._state.dn = self.entry_dn - self._state.set_status(STATUS_COMMITTED) - self._state._to = None - return True - return False - elif self.entry_status == STATUS_READY_FOR_RENAMING: - rdn = '+'.join(safe_rdn(self._state._to)) - result = self.entry_cursor.connection.modify_dn(self.entry_dn, rdn) - if not self.entry_cursor.connection.strategy.sync: - response, result, request = self.entry_cursor.connection.get_response(result, get_request=True) - else: - response = self.entry_cursor.connection.response - result = self.entry_cursor.connection.result - request = self.entry_cursor.connection.request - self.entry_cursor._store_operation_in_history(request, result, response) - if result['result'] == RESULT_SUCCESS: - self._state.dn = rdn + ',' + ','.join(to_dn(self.entry_dn)[1:]) - if refresh: - if self.entry_refresh(): - if self._state.origin and self.entry_cursor.connection.server == self._state.origin.entry_cursor.connection.server: # refresh dn of origin - self._state.origin._state.dn = self.entry_dn - self._state.set_status(STATUS_COMMITTED) - self._state._to = None - return True - return False - elif self.entry_status in [STATUS_VIRTUAL, STATUS_MANDATORY_MISSING]: - missing_attributes = [] - for attr in self.entry_mandatory_attributes: - if (attr not in self._state.attributes or self._state.attributes[attr].virtual) and attr not in self._changes: - missing_attributes.append('\'' + attr + '\'') - error_message = 'mandatory attributes %s missing in entry %s' % (', '.join(missing_attributes), self.entry_dn) - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - elif self.entry_status == STATUS_PENDING_CHANGES: - if self._changes: - if self.entry_definition._auxiliary_class: # checks if an attribute is from an auxiliary class and adds it to the objectClass attribute if not present - for attr in self._changes: - # checks schema to see if attribute is defined in one of the already present object classes - attr_classes = self.entry_cursor.schema.attribute_types[attr].mandatory_in + self.entry_cursor.schema.attribute_types[attr].optional_in - for object_class in self.objectclass: - if object_class in attr_classes: - break - else: # executed only if the attribute class is not present in the objectClass attribute - # checks if attribute is defined in one of the possible auxiliary classes - for aux_class in self.entry_definition._auxiliary_class: - if aux_class in attr_classes: - if self._state._initial_status == STATUS_VIRTUAL: # entry is new, there must be a pending objectClass MODIFY_REPLACE - self._changes['objectClass'][0][1].append(aux_class) - else: - self.objectclass += aux_class - if self._state._initial_status == STATUS_VIRTUAL: - new_attributes = dict() - for attr in self._changes: - new_attributes[attr] = self._changes[attr][0][1] - result = self.entry_cursor.connection.add(self.entry_dn, None, new_attributes, controls) - else: - result = self.entry_cursor.connection.modify(self.entry_dn, self._changes, controls) - - if not self.entry_cursor.connection.strategy.sync: # asynchronous request - response, result, request = self.entry_cursor.connection.get_response(result, get_request=True) - else: - response = self.entry_cursor.connection.response - result = self.entry_cursor.connection.result - request = self.entry_cursor.connection.request - self.entry_cursor._store_operation_in_history(request, result, response) - - if result['result'] == RESULT_SUCCESS: - if refresh: - if self.entry_refresh(): - if self._state.origin and self.entry_cursor.connection.server == self._state.origin.entry_cursor.connection.server: # updates original read-only entry if present - for attr in self: # adds AttrDefs from writable entry to origin entry definition if some is missing - if attr.key in self.entry_definition._attributes and attr.key not in self._state.origin.entry_definition._attributes: - self._state.origin.entry_cursor.definition.add_attribute(self.entry_cursor.definition._attributes[attr.key]) # adds AttrDef from writable entry to original entry if missing - temp_entry = self._state.origin.entry_cursor._create_entry(self._state.response) - self._state.origin.__dict__.clear() - self._state.origin.__dict__['_state'] = temp_entry._state - for attr in self: # returns the whole attribute object - if not attr.virtual: - self._state.origin.__dict__[attr.key] = self._state.origin._state.attributes[attr.key] - self._state.origin._state.read_time = self.entry_read_time - else: - self.entry_discard_changes() # if not refreshed remove committed changes - self._state.set_status(STATUS_COMMITTED) - return True - return False - - def entry_discard_changes(self): - self._changes.clear() - self._state.set_status(self._state._initial_status) - - def entry_delete(self): - if self.entry_status not in [STATUS_WRITABLE, STATUS_COMMITTED, STATUS_READY_FOR_DELETION]: - error_message = 'cannot delete entry, invalid status: ' + self.entry_status - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - self._state.set_status(STATUS_READY_FOR_DELETION) - - def entry_refresh(self, tries=4, seconds=2): - """ - - Refreshes the entry from the LDAP Server - """ - if self.entry_cursor.connection: - if self.entry_cursor.refresh_entry(self, tries, seconds): - return True - - return False - - def entry_move(self, destination_dn): - if self.entry_status not in [STATUS_WRITABLE, STATUS_COMMITTED, STATUS_READY_FOR_MOVING]: - error_message = 'cannot move entry, invalid status: ' + self.entry_status - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - self._state._to = safe_dn(destination_dn) - self._state.set_status(STATUS_READY_FOR_MOVING) - - def entry_rename(self, new_name): - if self.entry_status not in [STATUS_WRITABLE, STATUS_COMMITTED, STATUS_READY_FOR_RENAMING]: - error_message = 'cannot rename entry, invalid status: ' + self.entry_status - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', error_message, self) - raise LDAPCursorError(error_message) - self._state._to = new_name - self._state.set_status(STATUS_READY_FOR_RENAMING) - - @property - def entry_changes(self): - return self._changes +""" +""" + +# Created on 2016.08.19 +# +# Author: Giovanni Cannata +# +# Copyright 2016 - 2018 Giovanni Cannata +# +# This file is part of ldap3. +# +# ldap3 is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ldap3 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with ldap3 in the COPYING and COPYING.LESSER files. +# If not, see . + + +import json +try: + from collections import OrderedDict +except ImportError: + from ..utils.ordDict import OrderedDict # for Python 2.6 + +from os import linesep + +from .. import STRING_TYPES, SEQUENCE_TYPES, MODIFY_ADD, MODIFY_REPLACE +from .attribute import WritableAttribute +from .objectDef import ObjectDef +from .attrDef import AttrDef +from ..core.exceptions import LDAPKeyError, LDAPCursorError +from ..utils.conv import check_json_dict, format_json, prepare_for_stream +from ..protocol.rfc2849 import operation_to_ldif, add_ldif_header +from ..utils.dn import safe_dn, safe_rdn, to_dn +from ..utils.repr import to_stdout_encoding +from ..utils.ciDict import CaseInsensitiveWithAliasDict +from ..utils.config import get_config_parameter +from . import STATUS_VIRTUAL, STATUS_WRITABLE, STATUS_PENDING_CHANGES, STATUS_COMMITTED, STATUS_DELETED,\ + STATUS_INIT, STATUS_READY_FOR_DELETION, STATUS_READY_FOR_MOVING, STATUS_READY_FOR_RENAMING, STATUS_MANDATORY_MISSING, STATUSES, INITIAL_STATUSES +from ..core.results import RESULT_SUCCESS +from ..utils.log import log, log_enabled, ERROR, BASIC, PROTOCOL, EXTENDED + + +class EntryState(object): + """Contains data on the status of the entry. Does not pollute the Entry __dict__. + + """ + + def __init__(self, dn, cursor): + self.dn = dn + self._initial_status = None + self._to = None # used for move and rename + self.status = STATUS_INIT + self.attributes = CaseInsensitiveWithAliasDict() + self.raw_attributes = CaseInsensitiveWithAliasDict() + self.response = None + self.cursor = cursor + self.origin = None # reference to the original read-only entry (set when made writable). Needed to update attributes in read-only when modified (only if both refer the same server) + self.read_time = None + self.changes = OrderedDict() # includes changes to commit in a writable entry + if cursor.definition: + self.definition = cursor.definition + else: + self.definition = None + + def __repr__(self): + if self.__dict__ and self.dn is not None: + r = 'DN: ' + to_stdout_encoding(self.dn) + ' - STATUS: ' + ((self._initial_status + ', ') if self._initial_status != self.status else '') + self.status + ' - READ TIME: ' + (self.read_time.isoformat() if self.read_time else '') + linesep + r += 'attributes: ' + ', '.join(sorted(self.attributes.keys())) + linesep + r += 'object def: ' + (', '.join(sorted(self.definition._object_class)) if self.definition._object_class else '') + linesep + r += 'attr defs: ' + ', '.join(sorted(self.definition._attributes.keys())) + linesep + r += 'response: ' + ('present' if self.response else '') + linesep + r += 'cursor: ' + (self.cursor.__class__.__name__ if self.cursor else '') + linesep + return r + else: + return object.__repr__(self) + + def __str__(self): + return self.__repr__() + + def set_status(self, status): + conf_ignored_mandatory_attributes_in_object_def = [v.lower() for v in get_config_parameter('IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF')] + if status not in STATUSES: + error_message = 'invalid entry status ' + str(status) + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + if status in INITIAL_STATUSES: + self._initial_status = status + self.status = status + if status == STATUS_DELETED: + self._initial_status = STATUS_VIRTUAL + if status == STATUS_COMMITTED: + self._initial_status = STATUS_WRITABLE + if self.status == STATUS_VIRTUAL or (self.status == STATUS_PENDING_CHANGES and self._initial_status == STATUS_VIRTUAL): # checks if all mandatory attributes are present in new entries + for attr in self.definition._attributes: + if self.definition._attributes[attr].mandatory and attr.lower() not in conf_ignored_mandatory_attributes_in_object_def: + if (attr not in self.attributes or self.attributes[attr].virtual) and attr not in self.changes: + self.status = STATUS_MANDATORY_MISSING + break + + @property + def entry_raw_attributes(self): + return self.raw_attributes + + +class EntryBase(object): + """The Entry object contains a single LDAP entry. + Attributes can be accessed either by sequence, by assignment + or as dictionary keys. Keys are not case sensitive. + + The Entry object is read only + + - The DN is retrieved by entry_dn + - The cursor reference is in _cursor + - Raw attributes values are retrieved with _raw_attributes and the _raw_attribute() methods + """ + + def __init__(self, dn, cursor): + self.__dict__['_state'] = EntryState(dn, cursor) + + def __repr__(self): + if self.__dict__ and self.entry_dn is not None: + r = 'DN: ' + to_stdout_encoding(self.entry_dn) + ' - STATUS: ' + ((self._state._initial_status + ', ') if self._state._initial_status != self.entry_status else '') + self.entry_status + ' - READ TIME: ' + (self.entry_read_time.isoformat() if self.entry_read_time else '') + linesep + if self._state.attributes: + for attr in sorted(self._state.attributes): + if self._state.attributes[attr] or (hasattr(self._state.attributes[attr], 'changes') and self._state.attributes[attr].changes): + r += ' ' + repr(self._state.attributes[attr]) + linesep + return r + else: + return object.__repr__(self) + + def __str__(self): + return self.__repr__() + + def __iter__(self): + for attribute in self._state.attributes: + yield self._state.attributes[attribute] + # raise StopIteration # deprecated in PEP 479 + return + + def __contains__(self, item): + try: + self.__getitem__(item) + return True + except LDAPKeyError: + return False + + def __getattr__(self, item): + if isinstance(item, STRING_TYPES): + if item == '_state': + return self.__dict__['_state'] + item = ''.join(item.split()).lower() + attr_found = None + for attr in self._state.attributes.keys(): + if item == attr.lower(): + attr_found = attr + break + if not attr_found: + for attr in self._state.attributes.aliases(): + if item == attr.lower(): + attr_found = attr + break + if not attr_found: + for attr in self._state.attributes.keys(): + if item + ';binary' == attr.lower(): + attr_found = attr + break + if not attr_found: + for attr in self._state.attributes.aliases(): + if item + ';binary' == attr.lower(): + attr_found = attr + break + if not attr_found: + for attr in self._state.attributes.keys(): + if item + ';range' in attr.lower(): + attr_found = attr + break + if not attr_found: + for attr in self._state.attributes.aliases(): + if item + ';range' in attr.lower(): + attr_found = attr + break + if not attr_found: + error_message = 'attribute \'%s\' not found' % item + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + return self._state.attributes[attr] + error_message = 'attribute name must be a string' + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + + def __setattr__(self, item, value): + if item in self._state.attributes: + error_message = 'attribute \'%s\' is read only' % item + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + else: + error_message = 'entry is read only, cannot add \'%s\'' % item + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + + def __getitem__(self, item): + if isinstance(item, STRING_TYPES): + item = ''.join(item.split()).lower() + attr_found = None + for attr in self._state.attributes.keys(): + if item == attr.lower(): + attr_found = attr + break + if not attr_found: + for attr in self._state.attributes.aliases(): + if item == attr.lower(): + attr_found = attr + break + if not attr_found: + for attr in self._state.attributes.keys(): + if item + ';binary' == attr.lower(): + attr_found = attr + break + if not attr_found: + for attr in self._state.attributes.aliases(): + if item + ';binary' == attr.lower(): + attr_found = attr + break + if not attr_found: + error_message = 'key \'%s\' not found' % item + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPKeyError(error_message) + return self._state.attributes[attr] + + error_message = 'key must be a string' + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPKeyError(error_message) + + def __eq__(self, other): + if isinstance(other, EntryBase): + return self.entry_dn == other.entry_dn + + return False + + def __lt__(self, other): + if isinstance(other, EntryBase): + return self.entry_dn <= other.entry_dn + + return False + + @property + def entry_dn(self): + return self._state.dn + + @property + def entry_cursor(self): + return self._state.cursor + + @property + def entry_status(self): + return self._state.status + + @property + def entry_definition(self): + return self._state.definition + + @property + def entry_raw_attributes(self): + return self._state.raw_attributes + + def entry_raw_attribute(self, name): + """ + + :param name: name of the attribute + :return: raw (unencoded) value of the attribute, None if attribute is not found + """ + return self._state.raw_attributes[name] if name in self._state.raw_attributes else None + + @property + def entry_mandatory_attributes(self): + return [attribute for attribute in self.entry_definition._attributes if self.entry_definition._attributes[attribute].mandatory] + + @property + def entry_attributes(self): + return list(self._state.attributes.keys()) + + @property + def entry_attributes_as_dict(self): + return dict((attribute_key, attribute_value.values) for (attribute_key, attribute_value) in self._state.attributes.items()) + + @property + def entry_read_time(self): + return self._state.read_time + + @property + def _changes(self): + return self._state.changes + + def entry_to_json(self, raw=False, indent=4, sort=True, stream=None, checked_attributes=True, include_empty=True): + json_entry = dict() + json_entry['dn'] = self.entry_dn + if checked_attributes: + if not include_empty: + # needed for python 2.6 compatibility + json_entry['attributes'] = dict((key, self.entry_attributes_as_dict[key]) for key in self.entry_attributes_as_dict if self.entry_attributes_as_dict[key]) + else: + json_entry['attributes'] = self.entry_attributes_as_dict + if raw: + if not include_empty: + # needed for python 2.6 compatibility + json_entry['raw'] = dict((key, self.entry_raw_attributes[key]) for key in self.entry_raw_attributes if self.entry_raw_attributes[key]) + else: + json_entry['raw'] = dict(self.entry_raw_attributes) + + if str is bytes: # Python 2 + check_json_dict(json_entry) + + json_output = json.dumps(json_entry, + ensure_ascii=True, + sort_keys=sort, + indent=indent, + check_circular=True, + default=format_json, + separators=(',', ': ')) + + if stream: + stream.write(json_output) + + return json_output + + def entry_to_ldif(self, all_base64=False, line_separator=None, sort_order=None, stream=None): + ldif_lines = operation_to_ldif('searchResponse', [self._state.response], all_base64, sort_order=sort_order) + ldif_lines = add_ldif_header(ldif_lines) + line_separator = line_separator or linesep + ldif_output = line_separator.join(ldif_lines) + if stream: + if stream.tell() == 0: + header = add_ldif_header(['-'])[0] + stream.write(prepare_for_stream(header + line_separator + line_separator)) + stream.write(prepare_for_stream(ldif_output + line_separator + line_separator)) + return ldif_output + + +class Entry(EntryBase): + """The Entry object contains a single LDAP entry. + Attributes can be accessed either by sequence, by assignment + or as dictionary keys. Keys are not case sensitive. + + The Entry object is read only + + - The DN is retrieved by entry_dn + - The Reader reference is in _cursor() + - Raw attributes values are retrieved by the _ra_attributes and + _raw_attribute() methods + + """ + def entry_writable(self, object_def=None, writer_cursor=None, attributes=None, custom_validator=None, auxiliary_class=None): + if not self.entry_cursor.schema: + error_message = 'schema must be available to make an entry writable' + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + # returns a new WritableEntry and its Writer cursor + if object_def is None: + if self.entry_cursor.definition._object_class: + object_def = self.entry_definition._object_class + auxiliary_class = self.entry_definition._auxiliary_class + (auxiliary_class if isinstance(auxiliary_class, SEQUENCE_TYPES) else []) + elif 'objectclass' in self: + object_def = self.objectclass.values + + if not object_def: + error_message = 'object class must be specified to make an entry writable' + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + + if not isinstance(object_def, ObjectDef): + object_def = ObjectDef(object_def, self.entry_cursor.schema, custom_validator, auxiliary_class) + + if attributes: + if isinstance(attributes, STRING_TYPES): + attributes = [attributes] + + if isinstance(attributes, SEQUENCE_TYPES): + for attribute in attributes: + if attribute not in object_def._attributes: + error_message = 'attribute \'%s\' not in schema for \'%s\'' % (attribute, object_def) + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + else: + attributes = [] + + if not writer_cursor: + from .cursor import Writer # local import to avoid circular reference in import at startup + writable_cursor = Writer(self.entry_cursor.connection, object_def) + else: + writable_cursor = writer_cursor + + if attributes: # force reading of attributes + writable_entry = writable_cursor._refresh_object(self.entry_dn, list(attributes) + self.entry_attributes) + else: + writable_entry = writable_cursor._create_entry(self._state.response) + writable_cursor.entries.append(writable_entry) + writable_entry._state.read_time = self.entry_read_time + writable_entry._state.origin = self # reference to the original read-only entry + # checks original entry for custom definitions in AttrDefs + for attr in writable_entry._state.origin.entry_definition._attributes: + original_attr = writable_entry._state.origin.entry_definition._attributes[attr] + if attr != original_attr.name and attr not in writable_entry._state.attributes: + old_attr_def = writable_entry.entry_definition._attributes[original_attr.name] + new_attr_def = AttrDef(original_attr.name, + key=attr, + validate=original_attr.validate, + pre_query=original_attr.pre_query, + post_query=original_attr.post_query, + default=original_attr.default, + dereference_dn=original_attr.dereference_dn, + description=original_attr.description, + mandatory=old_attr_def.mandatory, # keeps value read from schema + single_value=old_attr_def.single_value, # keeps value read from schema + alias=original_attr.other_names) + object_def = writable_entry.entry_definition + object_def -= old_attr_def + object_def += new_attr_def + # updates attribute name in entry attributes + new_attr = WritableAttribute(new_attr_def, writable_entry, writable_cursor) + if original_attr.name in writable_entry._state.attributes: + new_attr.other_names = writable_entry._state.attributes[original_attr.name].other_names + new_attr.raw_values = writable_entry._state.attributes[original_attr.name].raw_values + new_attr.values = writable_entry._state.attributes[original_attr.name].values + new_attr.response = writable_entry._state.attributes[original_attr.name].response + writable_entry._state.attributes[attr] = new_attr + # writable_entry._state.attributes.set_alias(attr, new_attr.other_names) + del writable_entry._state.attributes[original_attr.name] + + writable_entry._state.set_status(STATUS_WRITABLE) + return writable_entry + + +class WritableEntry(EntryBase): + def __setitem__(self, key, value): + if value is not Ellipsis: # hack for using implicit operators in writable attributes + self.__setattr__(key, value) + + def __setattr__(self, item, value): + conf_attributes_excluded_from_object_def = [v.lower() for v in get_config_parameter('ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF')] + if item == '_state' and isinstance(value, EntryState): + self.__dict__['_state'] = value + return + + if value is not Ellipsis: # hack for using implicit operators in writable attributes + # checks if using an alias + if item in self.entry_cursor.definition._attributes or item.lower() in conf_attributes_excluded_from_object_def: + if item not in self._state.attributes: # setting value to an attribute still without values + new_attribute = WritableAttribute(self.entry_cursor.definition._attributes[item], self, cursor=self.entry_cursor) + self._state.attributes[str(item)] = new_attribute # force item to a string for key in attributes dict + self._state.attributes[item].set(value) # try to add to new_values + else: + error_message = 'attribute \'%s\' not defined' % item + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + + def __getattr__(self, item): + if isinstance(item, STRING_TYPES): + if item == '_state': + return self.__dict__['_state'] + item = ''.join(item.split()).lower() + for attr in self._state.attributes.keys(): + if item == attr.lower(): + return self._state.attributes[attr] + for attr in self._state.attributes.aliases(): + if item == attr.lower(): + return self._state.attributes[attr] + if item in self.entry_definition._attributes: # item is a new attribute to commit, creates the AttrDef and add to the attributes to retrive + self._state.attributes[item] = WritableAttribute(self.entry_definition._attributes[item], self, self.entry_cursor) + self.entry_cursor.attributes.add(item) + return self._state.attributes[item] + error_message = 'attribute \'%s\' not defined' % item + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + else: + error_message = 'attribute name must be a string' + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + + @property + def entry_virtual_attributes(self): + return [attr for attr in self.entry_attributes if self[attr].virtual] + + def entry_commit_changes(self, refresh=True, controls=None, clear_history=True): + if clear_history: + self.entry_cursor._reset_history() + + if self.entry_status == STATUS_READY_FOR_DELETION: + result = self.entry_cursor.connection.delete(self.entry_dn, controls) + if not self.entry_cursor.connection.strategy.sync: + response, result, request = self.entry_cursor.connection.get_response(result, get_request=True) + else: + response = self.entry_cursor.connection.response + result = self.entry_cursor.connection.result + request = self.entry_cursor.connection.request + self.entry_cursor._store_operation_in_history(request, result, response) + if result['result'] == RESULT_SUCCESS: + dn = self.entry_dn + if self._state.origin and self.entry_cursor.connection.server == self._state.origin.entry_cursor.connection.server: # deletes original read-only Entry + cursor = self._state.origin.entry_cursor + self._state.origin.__dict__.clear() + self._state.origin.__dict__['_state'] = EntryState(dn, cursor) + self._state.origin._state.set_status(STATUS_DELETED) + cursor = self.entry_cursor + self.__dict__.clear() + self._state = EntryState(dn, cursor) + self._state.set_status(STATUS_DELETED) + return True + return False + elif self.entry_status == STATUS_READY_FOR_MOVING: + result = self.entry_cursor.connection.modify_dn(self.entry_dn, '+'.join(safe_rdn(self.entry_dn)), new_superior=self._state._to) + if not self.entry_cursor.connection.strategy.sync: + response, result, request = self.entry_cursor.connection.get_response(result, get_request=True) + else: + response = self.entry_cursor.connection.response + result = self.entry_cursor.connection.result + request = self.entry_cursor.connection.request + self.entry_cursor._store_operation_in_history(request, result, response) + if result['result'] == RESULT_SUCCESS: + self._state.dn = safe_dn('+'.join(safe_rdn(self.entry_dn)) + ',' + self._state._to) + if refresh: + if self.entry_refresh(): + if self._state.origin and self.entry_cursor.connection.server == self._state.origin.entry_cursor.connection.server: # refresh dn of origin + self._state.origin._state.dn = self.entry_dn + self._state.set_status(STATUS_COMMITTED) + self._state._to = None + return True + return False + elif self.entry_status == STATUS_READY_FOR_RENAMING: + rdn = '+'.join(safe_rdn(self._state._to)) + result = self.entry_cursor.connection.modify_dn(self.entry_dn, rdn) + if not self.entry_cursor.connection.strategy.sync: + response, result, request = self.entry_cursor.connection.get_response(result, get_request=True) + else: + response = self.entry_cursor.connection.response + result = self.entry_cursor.connection.result + request = self.entry_cursor.connection.request + self.entry_cursor._store_operation_in_history(request, result, response) + if result['result'] == RESULT_SUCCESS: + self._state.dn = rdn + ',' + ','.join(to_dn(self.entry_dn)[1:]) + if refresh: + if self.entry_refresh(): + if self._state.origin and self.entry_cursor.connection.server == self._state.origin.entry_cursor.connection.server: # refresh dn of origin + self._state.origin._state.dn = self.entry_dn + self._state.set_status(STATUS_COMMITTED) + self._state._to = None + return True + return False + elif self.entry_status in [STATUS_VIRTUAL, STATUS_MANDATORY_MISSING]: + missing_attributes = [] + for attr in self.entry_mandatory_attributes: + if (attr not in self._state.attributes or self._state.attributes[attr].virtual) and attr not in self._changes: + missing_attributes.append('\'' + attr + '\'') + error_message = 'mandatory attributes %s missing in entry %s' % (', '.join(missing_attributes), self.entry_dn) + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + elif self.entry_status == STATUS_PENDING_CHANGES: + if self._changes: + if self.entry_definition._auxiliary_class: # checks if an attribute is from an auxiliary class and adds it to the objectClass attribute if not present + for attr in self._changes: + # checks schema to see if attribute is defined in one of the already present object classes + attr_classes = self.entry_cursor.schema.attribute_types[attr].mandatory_in + self.entry_cursor.schema.attribute_types[attr].optional_in + for object_class in self.objectclass: + if object_class in attr_classes: + break + else: # executed only if the attribute class is not present in the objectClass attribute + # checks if attribute is defined in one of the possible auxiliary classes + for aux_class in self.entry_definition._auxiliary_class: + if aux_class in attr_classes: + if self._state._initial_status == STATUS_VIRTUAL: # entry is new, there must be a pending objectClass MODIFY_REPLACE + self._changes['objectClass'][0][1].append(aux_class) + else: + self.objectclass += aux_class + if self._state._initial_status == STATUS_VIRTUAL: + new_attributes = dict() + for attr in self._changes: + new_attributes[attr] = self._changes[attr][0][1] + result = self.entry_cursor.connection.add(self.entry_dn, None, new_attributes, controls) + else: + result = self.entry_cursor.connection.modify(self.entry_dn, self._changes, controls) + + if not self.entry_cursor.connection.strategy.sync: # asynchronous request + response, result, request = self.entry_cursor.connection.get_response(result, get_request=True) + else: + response = self.entry_cursor.connection.response + result = self.entry_cursor.connection.result + request = self.entry_cursor.connection.request + self.entry_cursor._store_operation_in_history(request, result, response) + + if result['result'] == RESULT_SUCCESS: + if refresh: + if self.entry_refresh(): + if self._state.origin and self.entry_cursor.connection.server == self._state.origin.entry_cursor.connection.server: # updates original read-only entry if present + for attr in self: # adds AttrDefs from writable entry to origin entry definition if some is missing + if attr.key in self.entry_definition._attributes and attr.key not in self._state.origin.entry_definition._attributes: + self._state.origin.entry_cursor.definition.add_attribute(self.entry_cursor.definition._attributes[attr.key]) # adds AttrDef from writable entry to original entry if missing + temp_entry = self._state.origin.entry_cursor._create_entry(self._state.response) + self._state.origin.__dict__.clear() + self._state.origin.__dict__['_state'] = temp_entry._state + for attr in self: # returns the whole attribute object + if not attr.virtual: + self._state.origin.__dict__[attr.key] = self._state.origin._state.attributes[attr.key] + self._state.origin._state.read_time = self.entry_read_time + else: + self.entry_discard_changes() # if not refreshed remove committed changes + self._state.set_status(STATUS_COMMITTED) + return True + return False + + def entry_discard_changes(self): + self._changes.clear() + self._state.set_status(self._state._initial_status) + + def entry_delete(self): + if self.entry_status not in [STATUS_WRITABLE, STATUS_COMMITTED, STATUS_READY_FOR_DELETION]: + error_message = 'cannot delete entry, invalid status: ' + self.entry_status + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + self._state.set_status(STATUS_READY_FOR_DELETION) + + def entry_refresh(self, tries=4, seconds=2): + """ + + Refreshes the entry from the LDAP Server + """ + if self.entry_cursor.connection: + if self.entry_cursor.refresh_entry(self, tries, seconds): + return True + + return False + + def entry_move(self, destination_dn): + if self.entry_status not in [STATUS_WRITABLE, STATUS_COMMITTED, STATUS_READY_FOR_MOVING]: + error_message = 'cannot move entry, invalid status: ' + self.entry_status + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + self._state._to = safe_dn(destination_dn) + self._state.set_status(STATUS_READY_FOR_MOVING) + + def entry_rename(self, new_name): + if self.entry_status not in [STATUS_WRITABLE, STATUS_COMMITTED, STATUS_READY_FOR_RENAMING]: + error_message = 'cannot rename entry, invalid status: ' + self.entry_status + if log_enabled(ERROR): + log(ERROR, '%s for <%s>', error_message, self) + raise LDAPCursorError(error_message) + self._state._to = new_name + self._state.set_status(STATUS_READY_FOR_RENAMING) + + @property + def entry_changes(self): + return self._changes diff --git a/server/www/packages/packages-linux/x64/ldap3/abstract/objectDef.py b/server/www/packages/packages-linux/x64/ldap3/abstract/objectDef.py index 5af64d5..7fdc481 100644 --- a/server/www/packages/packages-linux/x64/ldap3/abstract/objectDef.py +++ b/server/www/packages/packages-linux/x64/ldap3/abstract/objectDef.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # diff --git a/server/www/packages/packages-linux/x64/ldap3/core/connection.py b/server/www/packages/packages-linux/x64/ldap3/core/connection.py index b8ed002..9d9211a 100644 --- a/server/www/packages/packages-linux/x64/ldap3/core/connection.py +++ b/server/www/packages/packages-linux/x64/ldap3/core/connection.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # @@ -30,8 +30,9 @@ import json from .. import ANONYMOUS, SIMPLE, SASL, MODIFY_ADD, MODIFY_DELETE, MODIFY_REPLACE, get_config_parameter, DEREF_ALWAYS, \ SUBTREE, ASYNC, SYNC, NO_ATTRIBUTES, ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES, MODIFY_INCREMENT, LDIF, ASYNC_STREAM, \ - RESTARTABLE, ROUND_ROBIN, REUSABLE, AUTO_BIND_DEFAULT, AUTO_BIND_NONE, AUTO_BIND_TLS_BEFORE_BIND, AUTO_BIND_TLS_AFTER_BIND, AUTO_BIND_NO_TLS, \ - STRING_TYPES, SEQUENCE_TYPES, MOCK_SYNC, MOCK_ASYNC, NTLM, EXTERNAL, DIGEST_MD5, GSSAPI, PLAIN + RESTARTABLE, ROUND_ROBIN, REUSABLE, AUTO_BIND_DEFAULT, AUTO_BIND_NONE, AUTO_BIND_TLS_BEFORE_BIND,\ + AUTO_BIND_TLS_AFTER_BIND, AUTO_BIND_NO_TLS, STRING_TYPES, SEQUENCE_TYPES, MOCK_SYNC, MOCK_ASYNC, NTLM, EXTERNAL,\ + DIGEST_MD5, GSSAPI, PLAIN from .results import RESULT_SUCCESS, RESULT_COMPARE_TRUE, RESULT_COMPARE_FALSE from ..extend import ExtendedOperationsRoot @@ -163,6 +164,8 @@ class Connection(object): :type pool_size: int :param pool_lifetime: pool lifetime for pooled strategies :type pool_lifetime: int + :param cred_store: credential store for gssapi + :type cred_store: dict :param use_referral_cache: keep referral connections open and reuse them :type use_referral_cache: bool :param auto_escape: automatic escaping of filter values @@ -190,6 +193,7 @@ class Connection(object): pool_name=None, pool_size=None, pool_lifetime=None, + cred_store=None, fast_decoder=True, receive_timeout=None, return_empty_attributes=True, @@ -254,6 +258,7 @@ class Connection(object): self.lazy = lazy self.pool_name = pool_name if pool_name else conf_default_pool_name self.pool_size = pool_size + self.cred_store = cred_store self.pool_lifetime = pool_lifetime self.pool_keepalive = pool_keepalive self.starting_tls = False @@ -333,7 +338,7 @@ class Connection(object): if log_enabled(BASIC): log(BASIC, 'performing automatic bind for <%s>', self) if self.closed: - self.open(read_server_info=False) + self.open(read_server_info=False) if self.auto_bind == AUTO_BIND_NO_TLS: self.bind(read_server_info=True) elif self.auto_bind == AUTO_BIND_TLS_BEFORE_BIND: @@ -387,6 +392,7 @@ class Connection(object): r += '' if self.pool_size is None else ', pool_size={0.pool_size!r}'.format(self) r += '' if self.pool_lifetime is None else ', pool_lifetime={0.pool_lifetime!r}'.format(self) r += '' if self.pool_keepalive is None else ', pool_keepalive={0.pool_keepalive!r}'.format(self) + r += '' if self.cred_store is None else (', cred_store=' + repr(self.cred_store)) r += '' if self.fast_decoder is None else (', fast_decoder=' + ('True' if self.fast_decoder else 'False')) r += '' if self.auto_range is None else (', auto_range=' + ('True' if self.auto_range else 'False')) r += '' if self.receive_timeout is None else ', receive_timeout={0.receive_timeout!r}'.format(self) @@ -425,6 +431,7 @@ class Connection(object): r += '' if self.pool_size is None else ', pool_size={0.pool_size!r}'.format(self) r += '' if self.pool_lifetime is None else ', pool_lifetime={0.pool_lifetime!r}'.format(self) r += '' if self.pool_keepalive is None else ', pool_keepalive={0.pool_keepalive!r}'.format(self) + r += '' if self.cred_store is None else (', cred_store=' + repr(self.cred_store)) r += '' if self.fast_decoder is None else (', fast_decoder=' + 'True' if self.fast_decoder else 'False') r += '' if self.auto_range is None else (', auto_range=' + ('True' if self.auto_range else 'False')) r += '' if self.receive_timeout is None else ', receive_timeout={0.receive_timeout!r}'.format(self) @@ -1031,6 +1038,7 @@ class Connection(object): log(ERROR, '%s for <%s>', self.last_error, self) raise LDAPChangeError(self.last_error) + changelist = dict() for attribute_name in changes: if self.server and self.server.schema and self.check_names: if ';' in attribute_name: # remove tags for checking @@ -1048,7 +1056,7 @@ class Connection(object): log(ERROR, '%s for <%s>', self.last_error, self) raise LDAPChangeError(self.last_error) - changes[attribute_name] = [change] # insert change in a tuple + changelist[attribute_name] = [change] # insert change in a list else: for change_operation in change: if len(change_operation) != 2 or change_operation[0] not in [MODIFY_ADD, MODIFY_DELETE, MODIFY_REPLACE, MODIFY_INCREMENT, 0, 1, 2, 3]: @@ -1056,7 +1064,8 @@ class Connection(object): if log_enabled(ERROR): log(ERROR, '%s for <%s>', self.last_error, self) raise LDAPChangeError(self.last_error) - request = modify_operation(dn, changes, self.auto_encode, self.server.schema if self.server else None, validator=self.server.custom_validator if self.server else None, check_names=self.check_names) + changelist[attribute_name] = change + request = modify_operation(dn, changelist, self.auto_encode, self.server.schema if self.server else None, validator=self.server.custom_validator if self.server else None, check_names=self.check_names) if log_enabled(PROTOCOL): log(PROTOCOL, 'MODIFY request <%s> sent via <%s>', modify_request_to_dict(request), self) response = self.post_send_single_response(self.send('modifyRequest', request, controls)) @@ -1107,11 +1116,11 @@ class Connection(object): log(ERROR, '%s for <%s>', self.last_error, self) raise LDAPConnectionIsReadOnlyError(self.last_error) - if new_superior and not dn.startswith(relative_dn): # as per RFC4511 (4.9) - self.last_error = 'DN cannot change while performing moving' - if log_enabled(ERROR): - log(ERROR, '%s for <%s>', self.last_error, self) - raise LDAPChangeError(self.last_error) + # if new_superior and not dn.startswith(relative_dn): # as per RFC4511 (4.9) + # self.last_error = 'DN cannot change while performing moving' + # if log_enabled(ERROR): + # log(ERROR, '%s for <%s>', self.last_error, self) + # raise LDAPChangeError(self.last_error) request = modify_dn_operation(dn, relative_dn, delete_old_dn, new_superior) if log_enabled(PROTOCOL): @@ -1220,6 +1229,8 @@ class Connection(object): log(BASIC, 'deferring START TLS for <%s>', self) else: self._deferred_start_tls = False + if self.closed: + self.open() if self.server.tls.start_tls(self) and self.strategy.sync: # for asynchronous connections _start_tls is run by the strategy if read_server_info: self.refresh_server_info() # refresh server info as per RFC4515 (3.1.5) @@ -1269,54 +1280,58 @@ class Connection(object): result = None if not self.sasl_in_progress: self.sasl_in_progress = True # ntlm is same of sasl authentication - # additional import for NTLM - from ..utils.ntlm import NtlmClient - domain_name, user_name = self.user.split('\\', 1) - ntlm_client = NtlmClient(user_name=user_name, domain=domain_name, password=self.password) + try: + # additional import for NTLM + from ..utils.ntlm import NtlmClient + domain_name, user_name = self.user.split('\\', 1) + ntlm_client = NtlmClient(user_name=user_name, domain=domain_name, password=self.password) - # as per https://msdn.microsoft.com/en-us/library/cc223501.aspx - # send a sicilyPackageDiscovery request (in the bindRequest) - request = bind_operation(self.version, 'SICILY_PACKAGE_DISCOVERY', ntlm_client) - if log_enabled(PROTOCOL): - log(PROTOCOL, 'NTLM SICILY PACKAGE DISCOVERY request sent via <%s>', self) - response = self.post_send_single_response(self.send('bindRequest', request, controls)) - if not self.strategy.sync: - _, result = self.get_response(response) - else: - result = response[0] - if 'server_creds' in result: - sicily_packages = result['server_creds'].decode('ascii').split(';') - if 'NTLM' in sicily_packages: # NTLM available on server - request = bind_operation(self.version, 'SICILY_NEGOTIATE_NTLM', ntlm_client) - if log_enabled(PROTOCOL): - log(PROTOCOL, 'NTLM SICILY NEGOTIATE request sent via <%s>', self) - response = self.post_send_single_response(self.send('bindRequest', request, controls)) - if not self.strategy.sync: - _, result = self.get_response(response) - else: + # as per https://msdn.microsoft.com/en-us/library/cc223501.aspx + # send a sicilyPackageDiscovery request (in the bindRequest) + request = bind_operation(self.version, 'SICILY_PACKAGE_DISCOVERY', ntlm_client) + if log_enabled(PROTOCOL): + log(PROTOCOL, 'NTLM SICILY PACKAGE DISCOVERY request sent via <%s>', self) + response = self.post_send_single_response(self.send('bindRequest', request, controls)) + if not self.strategy.sync: + _, result = self.get_response(response) + else: + result = response[0] + if 'server_creds' in result: + sicily_packages = result['server_creds'].decode('ascii').split(';') + if 'NTLM' in sicily_packages: # NTLM available on server + request = bind_operation(self.version, 'SICILY_NEGOTIATE_NTLM', ntlm_client) if log_enabled(PROTOCOL): - log(PROTOCOL, 'NTLM SICILY NEGOTIATE response <%s> received via <%s>', response[0], self) - result = response[0] - - if result['result'] == RESULT_SUCCESS: - request = bind_operation(self.version, 'SICILY_RESPONSE_NTLM', ntlm_client, result['server_creds']) - if log_enabled(PROTOCOL): - log(PROTOCOL, 'NTLM SICILY RESPONSE NTLM request sent via <%s>', self) + log(PROTOCOL, 'NTLM SICILY NEGOTIATE request sent via <%s>', self) response = self.post_send_single_response(self.send('bindRequest', request, controls)) if not self.strategy.sync: _, result = self.get_response(response) else: if log_enabled(PROTOCOL): - log(PROTOCOL, 'NTLM BIND response <%s> received via <%s>', response[0], self) + log(PROTOCOL, 'NTLM SICILY NEGOTIATE response <%s> received via <%s>', response[0], + self) result = response[0] - else: - result = None - self.sasl_in_progress = False - if log_enabled(BASIC): - log(BASIC, 'done SASL NTLM operation, result <%s>', result) + if result['result'] == RESULT_SUCCESS: + request = bind_operation(self.version, 'SICILY_RESPONSE_NTLM', ntlm_client, + result['server_creds']) + if log_enabled(PROTOCOL): + log(PROTOCOL, 'NTLM SICILY RESPONSE NTLM request sent via <%s>', self) + response = self.post_send_single_response(self.send('bindRequest', request, controls)) + if not self.strategy.sync: + _, result = self.get_response(response) + else: + if log_enabled(PROTOCOL): + log(PROTOCOL, 'NTLM BIND response <%s> received via <%s>', response[0], self) + result = response[0] + else: + result = None + finally: + self.sasl_in_progress = False - return result + if log_enabled(BASIC): + log(BASIC, 'done SASL NTLM operation, result <%s>', result) + + return result def refresh_server_info(self): # if self.strategy.no_real_dsa: # do not refresh for mock strategies diff --git a/server/www/packages/packages-linux/x64/ldap3/core/exceptions.py b/server/www/packages/packages-linux/x64/ldap3/core/exceptions.py index cfefb6d..b5d22b9 100644 --- a/server/www/packages/packages-linux/x64/ldap3/core/exceptions.py +++ b/server/www/packages/packages-linux/x64/ldap3/core/exceptions.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # @@ -376,9 +376,11 @@ class LDAPAttributeError(LDAPExceptionError, ValueError, TypeError): class LDAPCursorError(LDAPExceptionError): pass + class LDAPObjectDereferenceError(LDAPExceptionError): pass + # security exceptions class LDAPSSLNotSupportedError(LDAPExceptionError, ImportError): pass @@ -505,6 +507,10 @@ class LDAPTransactionError(LDAPExceptionError): pass +class LDAPInfoError(LDAPExceptionError): + pass + + # communication exceptions class LDAPCommunicationError(LDAPExceptionError): pass diff --git a/server/www/packages/packages-linux/x64/ldap3/core/pooling.py b/server/www/packages/packages-linux/x64/ldap3/core/pooling.py index 66a0bbd..890023c 100644 --- a/server/www/packages/packages-linux/x64/ldap3/core/pooling.py +++ b/server/www/packages/packages-linux/x64/ldap3/core/pooling.py @@ -1,306 +1,329 @@ -""" -""" - -# Created on 2014.03.14 -# -# Author: Giovanni Cannata -# -# Copyright 2014 - 2018 Giovanni Cannata -# -# This file is part of ldap3. -# -# ldap3 is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ldap3 is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with ldap3 in the COPYING and COPYING.LESSER files. -# If not, see . - -from datetime import datetime, MINYEAR -from os import linesep -from random import randint -from time import sleep - -from .. import FIRST, ROUND_ROBIN, RANDOM, SEQUENCE_TYPES, STRING_TYPES, get_config_parameter -from .exceptions import LDAPUnknownStrategyError, LDAPServerPoolError, LDAPServerPoolExhaustedError -from .server import Server -from ..utils.log import log, log_enabled, ERROR, BASIC, NETWORK - -POOLING_STRATEGIES = [FIRST, ROUND_ROBIN, RANDOM] - - -class ServerPoolState(object): - def __init__(self, server_pool): - self.servers = [] # each element is a list: [server, last_checked_time, available] - self.strategy = server_pool.strategy - self.server_pool = server_pool - self.last_used_server = 0 - self.refresh() - self.initialize_time = datetime.now() - - if log_enabled(BASIC): - log(BASIC, 'instantiated ServerPoolState: <%r>', self) - - def __str__(self): - s = 'servers: ' + linesep - if self.servers: - for server in self.servers: - s += str(server[0]) + linesep - else: - s += 'None' + linesep - s += 'Pool strategy: ' + str(self.strategy) + linesep - s += ' - Last used server: ' + ('None' if self.last_used_server == -1 else str(self.servers[self.last_used_server][0])) - - return s - - def refresh(self): - self.servers = [] - for server in self.server_pool.servers: - self.servers.append([server, datetime(MINYEAR, 1, 1), True]) # server, smallest date ever, supposed available - self.last_used_server = randint(0, len(self.servers) - 1) - - def get_current_server(self): - return self.servers[self.last_used_server][0] - - def get_server(self): - if self.servers: - if self.server_pool.strategy == FIRST: - if self.server_pool.active: - # returns the first active server - self.last_used_server = self.find_active_server(starting=0) - else: - # returns always the first server - no pooling - self.last_used_server = 0 - elif self.server_pool.strategy == ROUND_ROBIN: - if self.server_pool.active: - # returns the next active server in a circular range - self.last_used_server = self.find_active_server(self.last_used_server + 1) - else: - # returns the next server in a circular range - self.last_used_server = self.last_used_server + 1 if (self.last_used_server + 1) < len(self.servers) else 0 - elif self.server_pool.strategy == RANDOM: - if self.server_pool.active: - self.last_used_server = self.find_active_random_server() - else: - # returns a random server in the pool - self.last_used_server = randint(0, len(self.servers) - 1) - else: - if log_enabled(ERROR): - log(ERROR, 'unknown server pooling strategy <%s>', self.server_pool.strategy) - raise LDAPUnknownStrategyError('unknown server pooling strategy') - if log_enabled(BASIC): - log(BASIC, 'server returned from Server Pool: <%s>', self.last_used_server) - return self.servers[self.last_used_server][0] - else: - if log_enabled(ERROR): - log(ERROR, 'no servers in Server Pool <%s>', self) - raise LDAPServerPoolError('no servers in server pool') - - def find_active_random_server(self): - counter = self.server_pool.active # can be True for "forever" or the number of cycles to try - while counter: - if log_enabled(NETWORK): - log(NETWORK, 'entering loop for finding active server in pool <%s>', self) - temp_list = self.servers[:] # copy - while temp_list: - # pops a random server from a temp list and checks its - # availability, if not available tries another one - server = temp_list.pop(randint(0, len(temp_list) - 1)) - if not server[2]: # server is offline - if (isinstance(self.server_pool.exhaust, bool) and self.server_pool.exhaust) or (datetime.now() - server[1]).seconds < self.server_pool.exhaust: # keeps server offline - if log_enabled(NETWORK): - log(NETWORK, 'server <%s> excluded from checking because it is offline', server[0]) - continue - if log_enabled(NETWORK): - log(NETWORK, 'server <%s> reinserted in pool', server[0]) - server[1] = datetime.now() - if log_enabled(NETWORK): - log(NETWORK, 'checking server <%s> for availability', server[0]) - if server[0].check_availability(): - # returns a random active server in the pool - server[2] = True - return self.servers.index(server) - else: - server[2] = False - if not isinstance(self.server_pool.active, bool): - counter -= 1 - if log_enabled(ERROR): - log(ERROR, 'no random active server available in Server Pool <%s> after maximum number of tries', self) - raise LDAPServerPoolExhaustedError('no random active server available in server pool after maximum number of tries') - - def find_active_server(self, starting): - conf_pool_timeout = get_config_parameter('POOLING_LOOP_TIMEOUT') - counter = self.server_pool.active # can be True for "forever" or the number of cycles to try - if starting >= len(self.servers): - starting = 0 - - while counter: - if log_enabled(NETWORK): - log(NETWORK, 'entering loop number <%s> for finding active server in pool <%s>', counter, self) - index = -1 - pool_size = len(self.servers) - while index < pool_size - 1: - index += 1 - offset = index + starting if index + starting < pool_size else index + starting - pool_size - if not self.servers[offset][2]: # server is offline - if (isinstance(self.server_pool.exhaust, bool) and self.server_pool.exhaust) or (datetime.now() - self.servers[offset][1]).seconds < self.server_pool.exhaust: # keeps server offline - if log_enabled(NETWORK): - if isinstance(self.server_pool.exhaust, bool): - log(NETWORK, 'server <%s> excluded from checking because is offline', self.servers[offset][0]) - else: - log(NETWORK, 'server <%s> excluded from checking because is offline for %d seconds', self.servers[offset][0], (self.server_pool.exhaust - (datetime.now() - self.servers[offset][1]).seconds)) - continue - if log_enabled(NETWORK): - log(NETWORK, 'server <%s> reinserted in pool', self.servers[offset][0]) - self.servers[offset][1] = datetime.now() - if log_enabled(NETWORK): - log(NETWORK, 'checking server <%s> for availability', self.servers[offset][0]) - if self.servers[offset][0].check_availability(): - self.servers[offset][2] = True - return offset - else: - self.servers[offset][2] = False # sets server offline - - if not isinstance(self.server_pool.active, bool): - counter -= 1 - if log_enabled(NETWORK): - log(NETWORK, 'waiting for %d seconds before retrying pool servers cycle', conf_pool_timeout) - sleep(conf_pool_timeout) - - if log_enabled(ERROR): - log(ERROR, 'no active server available in Server Pool <%s> after maximum number of tries', self) - raise LDAPServerPoolExhaustedError('no active server available in server pool after maximum number of tries') - - def __len__(self): - return len(self.servers) - - -class ServerPool(object): - def __init__(self, - servers=None, - pool_strategy=ROUND_ROBIN, - active=True, - exhaust=False): - - if pool_strategy not in POOLING_STRATEGIES: - if log_enabled(ERROR): - log(ERROR, 'unknown pooling strategy <%s>', pool_strategy) - raise LDAPUnknownStrategyError('unknown pooling strategy') - if exhaust and not active: - if log_enabled(ERROR): - log(ERROR, 'cannot instantiate pool with exhaust and not active') - raise LDAPServerPoolError('pools can be exhausted only when checking for active servers') - self.servers = [] - self.pool_states = dict() - self.active = active - self.exhaust = exhaust - if isinstance(servers, SEQUENCE_TYPES + (Server, )): - self.add(servers) - elif isinstance(servers, STRING_TYPES): - self.add(Server(servers)) - self.strategy = pool_strategy - - if log_enabled(BASIC): - log(BASIC, 'instantiated ServerPool: <%r>', self) - - def __str__(self): - s = 'servers: ' + linesep - if self.servers: - for server in self.servers: - s += str(server) + linesep - else: - s += 'None' + linesep - s += 'Pool strategy: ' + str(self.strategy) - s += ' - ' + 'active: ' + (str(self.active) if self.active else 'False') - s += ' - ' + 'exhaust pool: ' + (str(self.exhaust) if self.exhaust else 'False') - return s - - def __repr__(self): - r = 'ServerPool(servers=' - if self.servers: - r += '[' - for server in self.servers: - r += server.__repr__() + ', ' - r = r[:-2] + ']' - else: - r += 'None' - r += ', pool_strategy={0.strategy!r}'.format(self) - r += ', active={0.active!r}'.format(self) - r += ', exhaust={0.exhaust!r}'.format(self) - r += ')' - - return r - - def __len__(self): - return len(self.servers) - - def __getitem__(self, item): - return self.servers[item] - - def __iter__(self): - return self.servers.__iter__() - - def add(self, servers): - if isinstance(servers, Server): - if servers not in self.servers: - self.servers.append(servers) - elif isinstance(servers, STRING_TYPES): - self.servers.append(Server(servers)) - elif isinstance(servers, SEQUENCE_TYPES): - for server in servers: - if isinstance(server, Server): - self.servers.append(server) - elif isinstance(server, STRING_TYPES): - self.servers.append(Server(server)) - else: - if log_enabled(ERROR): - log(ERROR, 'element must be a server in Server Pool <%s>', self) - raise LDAPServerPoolError('server in ServerPool must be a Server') - else: - if log_enabled(ERROR): - log(ERROR, 'server must be a Server of a list of Servers when adding to Server Pool <%s>', self) - raise LDAPServerPoolError('server must be a Server or a list of Server') - - for connection in self.pool_states: - # notifies connections using this pool to refresh - self.pool_states[connection].refresh() - - def remove(self, server): - if server in self.servers: - self.servers.remove(server) - else: - if log_enabled(ERROR): - log(ERROR, 'server %s to be removed not in Server Pool <%s>', server, self) - raise LDAPServerPoolError('server not in server pool') - - for connection in self.pool_states: - # notifies connections using this pool to refresh - self.pool_states[connection].refresh() - - def initialize(self, connection): - pool_state = ServerPoolState(self) - # registers pool_state in ServerPool object - self.pool_states[connection] = pool_state - - def get_server(self, connection): - if connection in self.pool_states: - return self.pool_states[connection].get_server() - else: - if log_enabled(ERROR): - log(ERROR, 'connection <%s> not in Server Pool State <%s>', connection, self) - raise LDAPServerPoolError('connection not in ServerPoolState') - - def get_current_server(self, connection): - if connection in self.pool_states: - return self.pool_states[connection].get_current_server() - else: - if log_enabled(ERROR): - log(ERROR, 'connection <%s> not in Server Pool State <%s>', connection, self) - raise LDAPServerPoolError('connection not in ServerPoolState') +""" +""" + +# Created on 2014.03.14 +# +# Author: Giovanni Cannata +# +# Copyright 2014 - 2019 Giovanni Cannata +# +# This file is part of ldap3. +# +# ldap3 is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ldap3 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with ldap3 in the COPYING and COPYING.LESSER files. +# If not, see . + +from datetime import datetime, MINYEAR +from os import linesep +from random import randint +from time import sleep + +from .. import FIRST, ROUND_ROBIN, RANDOM, SEQUENCE_TYPES, STRING_TYPES, get_config_parameter +from .exceptions import LDAPUnknownStrategyError, LDAPServerPoolError, LDAPServerPoolExhaustedError +from .server import Server +from ..utils.log import log, log_enabled, ERROR, BASIC, NETWORK + +POOLING_STRATEGIES = [FIRST, ROUND_ROBIN, RANDOM] + + +class ServerState(object): + def __init__(self, server, last_checked_time, available): + self.server = server + self.last_checked_time = last_checked_time + self.available = available + + +class ServerPoolState(object): + def __init__(self, server_pool): + self.server_states = [] # each element is a ServerState + self.strategy = server_pool.strategy + self.server_pool = server_pool + self.last_used_server = 0 + self.refresh() + self.initialize_time = datetime.now() + + if log_enabled(BASIC): + log(BASIC, 'instantiated ServerPoolState: <%r>', self) + + def __str__(self): + s = 'servers: ' + linesep + if self.server_states: + for state in self.server_states: + s += str(state.server) + linesep + else: + s += 'None' + linesep + s += 'Pool strategy: ' + str(self.strategy) + linesep + s += ' - Last used server: ' + ('None' if self.last_used_server == -1 else str(self.server_states[self.last_used_server].server)) + + return s + + def refresh(self): + self.server_states = [] + for server in self.server_pool.servers: + self.server_states.append(ServerState(server, datetime(MINYEAR, 1, 1), True)) # server, smallest date ever, supposed available + self.last_used_server = randint(0, len(self.server_states) - 1) + + def get_current_server(self): + return self.server_states[self.last_used_server].server + + def get_server(self): + if self.server_states: + if self.server_pool.strategy == FIRST: + if self.server_pool.active: + # returns the first active server + self.last_used_server = self.find_active_server(starting=0) + else: + # returns always the first server - no pooling + self.last_used_server = 0 + elif self.server_pool.strategy == ROUND_ROBIN: + if self.server_pool.active: + # returns the next active server in a circular range + self.last_used_server = self.find_active_server(self.last_used_server + 1) + else: + # returns the next server in a circular range + self.last_used_server = self.last_used_server + 1 if (self.last_used_server + 1) < len(self.server_states) else 0 + elif self.server_pool.strategy == RANDOM: + if self.server_pool.active: + self.last_used_server = self.find_active_random_server() + else: + # returns a random server in the pool + self.last_used_server = randint(0, len(self.server_states) - 1) + else: + if log_enabled(ERROR): + log(ERROR, 'unknown server pooling strategy <%s>', self.server_pool.strategy) + raise LDAPUnknownStrategyError('unknown server pooling strategy') + if log_enabled(BASIC): + log(BASIC, 'server returned from Server Pool: <%s>', self.last_used_server) + return self.server_states[self.last_used_server].server + else: + if log_enabled(ERROR): + log(ERROR, 'no servers in Server Pool <%s>', self) + raise LDAPServerPoolError('no servers in server pool') + + def find_active_random_server(self): + counter = self.server_pool.active # can be True for "forever" or the number of cycles to try + while counter: + if log_enabled(NETWORK): + log(NETWORK, 'entering loop for finding active server in pool <%s>', self) + temp_list = self.server_states[:] # copy + while temp_list: + # pops a random server from a temp list and checks its + # availability, if not available tries another one + server_state = temp_list.pop(randint(0, len(temp_list) - 1)) + if not server_state.available: # server is offline + if (isinstance(self.server_pool.exhaust, bool) and self.server_pool.exhaust) or (datetime.now() - server_state.last_checked_time).seconds < self.server_pool.exhaust: # keeps server offline + if log_enabled(NETWORK): + log(NETWORK, 'server <%s> excluded from checking because it is offline', server_state.server) + continue + if log_enabled(NETWORK): + log(NETWORK, 'server <%s> reinserted in pool', server_state.server) + server_state.last_checked_time = datetime.now() + if log_enabled(NETWORK): + log(NETWORK, 'checking server <%s> for availability', server_state.server) + if server_state.server.check_availability(): + # returns a random active server in the pool + server_state.available = True + return self.server_states.index(server_state) + else: + server_state.available = False + if not isinstance(self.server_pool.active, bool): + counter -= 1 + if log_enabled(ERROR): + log(ERROR, 'no random active server available in Server Pool <%s> after maximum number of tries', self) + raise LDAPServerPoolExhaustedError('no random active server available in server pool after maximum number of tries') + + def find_active_server(self, starting): + conf_pool_timeout = get_config_parameter('POOLING_LOOP_TIMEOUT') + counter = self.server_pool.active # can be True for "forever" or the number of cycles to try + if starting >= len(self.server_states): + starting = 0 + + while counter: + if log_enabled(NETWORK): + log(NETWORK, 'entering loop number <%s> for finding active server in pool <%s>', counter, self) + index = -1 + pool_size = len(self.server_states) + while index < pool_size - 1: + index += 1 + offset = index + starting if index + starting < pool_size else index + starting - pool_size + server_state = self.server_states[offset] + if not server_state.available: # server is offline + if (isinstance(self.server_pool.exhaust, bool) and self.server_pool.exhaust) or (datetime.now() - server_state.last_checked_time).seconds < self.server_pool.exhaust: # keeps server offline + if log_enabled(NETWORK): + if isinstance(self.server_pool.exhaust, bool): + log(NETWORK, 'server <%s> excluded from checking because is offline', server_state.server) + else: + log(NETWORK, 'server <%s> excluded from checking because is offline for %d seconds', server_state.server, (self.server_pool.exhaust - (datetime.now() - server_state.last_checked_time).seconds)) + continue + if log_enabled(NETWORK): + log(NETWORK, 'server <%s> reinserted in pool', server_state.server) + server_state.last_checked_time = datetime.now() + if log_enabled(NETWORK): + log(NETWORK, 'checking server <%s> for availability', server_state.server) + if server_state.server.check_availability(): + server_state.available = True + return offset + else: + server_state.available = False # sets server offline + + if not isinstance(self.server_pool.active, bool): + counter -= 1 + if log_enabled(NETWORK): + log(NETWORK, 'waiting for %d seconds before retrying pool servers cycle', conf_pool_timeout) + sleep(conf_pool_timeout) + + if log_enabled(ERROR): + log(ERROR, 'no active server available in Server Pool <%s> after maximum number of tries', self) + raise LDAPServerPoolExhaustedError('no active server available in server pool after maximum number of tries') + + def __len__(self): + return len(self.server_states) + + +class ServerPool(object): + def __init__(self, + servers=None, + pool_strategy=ROUND_ROBIN, + active=True, + exhaust=False, + single_state=True): + + if pool_strategy not in POOLING_STRATEGIES: + if log_enabled(ERROR): + log(ERROR, 'unknown pooling strategy <%s>', pool_strategy) + raise LDAPUnknownStrategyError('unknown pooling strategy') + if exhaust and not active: + if log_enabled(ERROR): + log(ERROR, 'cannot instantiate pool with exhaust and not active') + raise LDAPServerPoolError('pools can be exhausted only when checking for active servers') + self.servers = [] + self.pool_states = dict() + self.active = active + self.exhaust = exhaust + self.single = single_state + self._pool_state = None # used for storing the global state of the pool + if isinstance(servers, SEQUENCE_TYPES + (Server, )): + self.add(servers) + elif isinstance(servers, STRING_TYPES): + self.add(Server(servers)) + self.strategy = pool_strategy + + if log_enabled(BASIC): + log(BASIC, 'instantiated ServerPool: <%r>', self) + + def __str__(self): + s = 'servers: ' + linesep + if self.servers: + for server in self.servers: + s += str(server) + linesep + else: + s += 'None' + linesep + s += 'Pool strategy: ' + str(self.strategy) + s += ' - ' + 'active: ' + (str(self.active) if self.active else 'False') + s += ' - ' + 'exhaust pool: ' + (str(self.exhaust) if self.exhaust else 'False') + return s + + def __repr__(self): + r = 'ServerPool(servers=' + if self.servers: + r += '[' + for server in self.servers: + r += server.__repr__() + ', ' + r = r[:-2] + ']' + else: + r += 'None' + r += ', pool_strategy={0.strategy!r}'.format(self) + r += ', active={0.active!r}'.format(self) + r += ', exhaust={0.exhaust!r}'.format(self) + r += ')' + + return r + + def __len__(self): + return len(self.servers) + + def __getitem__(self, item): + return self.servers[item] + + def __iter__(self): + return self.servers.__iter__() + + def add(self, servers): + if isinstance(servers, Server): + if servers not in self.servers: + self.servers.append(servers) + elif isinstance(servers, STRING_TYPES): + self.servers.append(Server(servers)) + elif isinstance(servers, SEQUENCE_TYPES): + for server in servers: + if isinstance(server, Server): + self.servers.append(server) + elif isinstance(server, STRING_TYPES): + self.servers.append(Server(server)) + else: + if log_enabled(ERROR): + log(ERROR, 'element must be a server in Server Pool <%s>', self) + raise LDAPServerPoolError('server in ServerPool must be a Server') + else: + if log_enabled(ERROR): + log(ERROR, 'server must be a Server of a list of Servers when adding to Server Pool <%s>', self) + raise LDAPServerPoolError('server must be a Server or a list of Server') + + if self.single: + if self._pool_state: + self._pool_state.refresh() + else: + for connection in self.pool_states: + # notifies connections using this pool to refresh + self.pool_states[connection].refresh() + + def remove(self, server): + if server in self.servers: + self.servers.remove(server) + else: + if log_enabled(ERROR): + log(ERROR, 'server %s to be removed not in Server Pool <%s>', server, self) + raise LDAPServerPoolError('server not in server pool') + + if self.single: + if self._pool_state: + self._pool_state.refresh() + else: + for connection in self.pool_states: + # notifies connections using this pool to refresh + self.pool_states[connection].refresh() + + def initialize(self, connection): + # registers pool_state in ServerPool object + if self.single: + if not self._pool_state: + self._pool_state = ServerPoolState(self) + self.pool_states[connection] = self._pool_state + else: + self.pool_states[connection] = ServerPoolState(self) + + def get_server(self, connection): + if connection in self.pool_states: + return self.pool_states[connection].get_server() + else: + if log_enabled(ERROR): + log(ERROR, 'connection <%s> not in Server Pool State <%s>', connection, self) + raise LDAPServerPoolError('connection not in ServerPoolState') + + def get_current_server(self, connection): + if connection in self.pool_states: + return self.pool_states[connection].get_current_server() + else: + if log_enabled(ERROR): + log(ERROR, 'connection <%s> not in Server Pool State <%s>', connection, self) + raise LDAPServerPoolError('connection not in ServerPoolState') diff --git a/server/www/packages/packages-linux/x64/ldap3/core/results.py b/server/www/packages/packages-linux/x64/ldap3/core/results.py index 6f10643..4c08172 100644 --- a/server/www/packages/packages-linux/x64/ldap3/core/results.py +++ b/server/www/packages/packages-linux/x64/ldap3/core/results.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # @@ -134,4 +134,4 @@ RESULT_CODES = { } # do not raise exception for (in raise_exceptions connection mode) -DO_NOT_RAISE_EXCEPTIONS = [RESULT_SUCCESS, RESULT_COMPARE_FALSE, RESULT_COMPARE_TRUE, RESULT_REFERRAL, RESULT_SASL_BIND_IN_PROGRESS] +DO_NOT_RAISE_EXCEPTIONS = [RESULT_SUCCESS, RESULT_COMPARE_FALSE, RESULT_COMPARE_TRUE, RESULT_REFERRAL, RESULT_SASL_BIND_IN_PROGRESS, RESULT_SIZE_LIMIT_EXCEEDED, RESULT_TIME_LIMIT_EXCEEDED] diff --git a/server/www/packages/packages-linux/x64/ldap3/core/server.py b/server/www/packages/packages-linux/x64/ldap3/core/server.py index 36c782b..c266f1c 100644 --- a/server/www/packages/packages-linux/x64/ldap3/core/server.py +++ b/server/www/packages/packages-linux/x64/ldap3/core/server.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # @@ -27,8 +27,8 @@ import socket from threading import Lock from datetime import datetime, MINYEAR -from .. import DSA, SCHEMA, ALL, BASE, get_config_parameter, OFFLINE_EDIR_8_8_8, OFFLINE_AD_2012_R2, OFFLINE_SLAPD_2_4, OFFLINE_DS389_1_3_3, SEQUENCE_TYPES, IP_SYSTEM_DEFAULT, IP_V4_ONLY, IP_V6_ONLY, IP_V4_PREFERRED, IP_V6_PREFERRED, STRING_TYPES -from .exceptions import LDAPInvalidServerError, LDAPDefinitionError, LDAPInvalidPortError, LDAPInvalidTlsSpecificationError, LDAPSocketOpenError +from .. import DSA, SCHEMA, ALL, BASE, get_config_parameter, OFFLINE_EDIR_8_8_8, OFFLINE_EDIR_9_1_4, OFFLINE_AD_2012_R2, OFFLINE_SLAPD_2_4, OFFLINE_DS389_1_3_3, SEQUENCE_TYPES, IP_SYSTEM_DEFAULT, IP_V4_ONLY, IP_V6_ONLY, IP_V4_PREFERRED, IP_V6_PREFERRED, STRING_TYPES +from .exceptions import LDAPInvalidServerError, LDAPDefinitionError, LDAPInvalidPortError, LDAPInvalidTlsSpecificationError, LDAPSocketOpenError, LDAPInfoError from ..protocol.formatters.standard import format_attribute_values from ..protocol.rfc4511 import LDAP_MAX_INT from ..protocol.rfc4512 import SchemaInfo, DsaInfo @@ -69,7 +69,6 @@ class Server(object): _message_counter = 0 _message_id_lock = Lock() # global lock for message_id shared by all Server objects - def __init__(self, host, port=None, @@ -452,6 +451,10 @@ class Server(object): from ..protocol.schemas.edir888 import edir_8_8_8_schema, edir_8_8_8_dsa_info self.attach_schema_info(SchemaInfo.from_json(edir_8_8_8_schema)) self.attach_dsa_info(DsaInfo.from_json(edir_8_8_8_dsa_info)) + elif self.get_info == OFFLINE_EDIR_9_1_4: + from ..protocol.schemas.edir914 import edir_9_1_4_schema, edir_9_1_4_dsa_info + self.attach_schema_info(SchemaInfo.from_json(edir_9_1_4_schema)) + self.attach_dsa_info(DsaInfo.from_json(edir_9_1_4_dsa_info)) elif self.get_info == OFFLINE_AD_2012_R2: from ..protocol.schemas.ad2012R2 import ad_2012_r2_schema, ad_2012_r2_dsa_info self.attach_schema_info(SchemaInfo.from_json(ad_2012_r2_schema)) @@ -570,3 +573,34 @@ class Server(object): for candidate in candidates: log(BASIC, 'obtained candidate address for <%s>: <%r> with mode %s', self, candidate[:-2], self.mode) return candidates + + def _check_info_property(self, kind, name): + if not self._dsa_info: + raise LDAPInfoError('server info not loaded') + + if kind == 'control': + properties = self.info.supported_controls + elif kind == 'extension': + properties = self.info.supported_extensions + elif kind == 'feature': + properties = self.info.supported_features + else: + raise LDAPInfoError('invalid info category') + + for prop in properties: + if name == prop[0] or (prop[2] and name.lower() == prop[2].lower()): # checks oid and description + return True + + return False + + def has_control(self, control): + return self._check_info_property('control', control) + + def has_extension(self, extension): + return self._check_info_property('extension', extension) + + def has_feature(self, feature): + return self._check_info_property('feature', feature) + + + diff --git a/server/www/packages/packages-linux/x64/ldap3/extend/__init__.py b/server/www/packages/packages-linux/x64/ldap3/extend/__init__.py index 24f426e..28e5f64 100644 --- a/server/www/packages/packages-linux/x64/ldap3/extend/__init__.py +++ b/server/www/packages/packages-linux/x64/ldap3/extend/__init__.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # @@ -169,6 +169,36 @@ class StandardExtendedOperations(ExtendedOperationContainer): streaming, callback) + def funnel_search(self, + search_base='', + search_filter='', + search_scope=SUBTREE, + dereference_aliases=DEREF_NEVER, + attributes=ALL_ATTRIBUTES, + size_limit=0, + time_limit=0, + controls=None, + streaming=True, + callback=None, + queue_size=8 + ): + if callback: + streaming = False + return PersistentSearch(self._connection, + search_base, + search_filter, + search_scope, + dereference_aliases, + attributes, + size_limit, + time_limit, + controls, + None, + None, + None, + streaming, + callback) + class NovellExtendedOperations(ExtendedOperationContainer): def get_bind_dn(self, controls=None): diff --git a/server/www/packages/packages-linux/x64/ldap3/extend/microsoft/addMembersToGroups.py b/server/www/packages/packages-linux/x64/ldap3/extend/microsoft/addMembersToGroups.py index 28c409f..d7938fd 100644 --- a/server/www/packages/packages-linux/x64/ldap3/extend/microsoft/addMembersToGroups.py +++ b/server/www/packages/packages-linux/x64/ldap3/extend/microsoft/addMembersToGroups.py @@ -1,81 +1,93 @@ -""" -""" - -# Created on 2016.12.26 -# -# Author: Giovanni Cannata -# -# Copyright 2016 - 2018 Giovanni Cannata -# -# This file is part of ldap3. -# -# ldap3 is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ldap3 is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with ldap3 in the COPYING and COPYING.LESSER files. -# If not, see . -from ...core.exceptions import LDAPInvalidDnError -from ... import SEQUENCE_TYPES, MODIFY_ADD, BASE, DEREF_NEVER - - -def ad_add_members_to_groups(connection, - members_dn, - groups_dn, - fix=True): - """ - :param connection: a bound Connection object - :param members_dn: the list of members to add to groups - :param groups_dn: the list of groups where members are to be added - :param fix: checks for group existence and already assigned members - :return: a boolean where True means that the operation was successful and False means an error has happened - Establishes users-groups relations following the Active Directory rules: users are added to the member attribute of groups. - Raises LDAPInvalidDnError if members or groups are not found in the DIT. - """ - - if not isinstance(members_dn, SEQUENCE_TYPES): - members_dn = [members_dn] - - if not isinstance(groups_dn, SEQUENCE_TYPES): - groups_dn = [groups_dn] - - error = False - for group in groups_dn: - if fix: # checks for existance of group and for already assigned members - result = connection.search(group, '(objectclass=*)', BASE, dereference_aliases=DEREF_NEVER, attributes=['member']) - - if not connection.strategy.sync: - response, result = connection.get_response(result) - else: - response, result = connection.response, connection.result - - if not result['description'] == 'success': - raise LDAPInvalidDnError(group + ' not found') - - existing_members = response[0]['attributes']['member'] if 'member' in response[0]['attributes'] else [] - existing_members = [element.lower() for element in existing_members] - else: - existing_members = [] - - changes = dict() - member_to_add = [element for element in members_dn if element.lower() not in existing_members] - if member_to_add: - changes['member'] = (MODIFY_ADD, member_to_add) - if changes: - result = connection.modify(group, changes) - if not connection.strategy.sync: - _, result = connection.get_response(result) - else: - result = connection.result - if result['description'] != 'success': - error = True - break - - return not error # returns True if no error is raised in the LDAP operations +""" +""" + +# Created on 2016.12.26 +# +# Author: Giovanni Cannata +# +# Copyright 2016 - 2018 Giovanni Cannata +# +# This file is part of ldap3. +# +# ldap3 is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ldap3 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with ldap3 in the COPYING and COPYING.LESSER files. +# If not, see . + +from ... import SEQUENCE_TYPES, MODIFY_ADD, BASE, DEREF_NEVER +from ...core.exceptions import LDAPInvalidDnError, LDAPOperationsErrorResult +from ...utils.dn import safe_dn + + +def ad_add_members_to_groups(connection, + members_dn, + groups_dn, + fix=True, + raise_error=False): + """ + :param connection: a bound Connection object + :param members_dn: the list of members to add to groups + :param groups_dn: the list of groups where members are to be added + :param fix: checks for group existence and already assigned members + :param raise_error: If the operation fails it raises an error instead of returning False + :return: a boolean where True means that the operation was successful and False means an error has happened + Establishes users-groups relations following the Active Directory rules: users are added to the member attribute of groups. + Raises LDAPInvalidDnError if members or groups are not found in the DIT. + """ + + if not isinstance(members_dn, SEQUENCE_TYPES): + members_dn = [members_dn] + + if not isinstance(groups_dn, SEQUENCE_TYPES): + groups_dn = [groups_dn] + + if connection.check_names: # builds new lists with sanitized dn + members_dn = [safe_dn(member_dn) for member_dn in members_dn] + groups_dn = [safe_dn(group_dn) for group_dn in groups_dn] + + error = False + for group in groups_dn: + if fix: # checks for existance of group and for already assigned members + result = connection.search(group, '(objectclass=*)', BASE, dereference_aliases=DEREF_NEVER, + attributes=['member']) + + if not connection.strategy.sync: + response, result = connection.get_response(result) + else: + response, result = connection.response, connection.result + + if not result['description'] == 'success': + raise LDAPInvalidDnError(group + ' not found') + + existing_members = response[0]['attributes']['member'] if 'member' in response[0]['attributes'] else [] + existing_members = [element.lower() for element in existing_members] + else: + existing_members = [] + + changes = dict() + member_to_add = [element for element in members_dn if element.lower() not in existing_members] + if member_to_add: + changes['member'] = (MODIFY_ADD, member_to_add) + if changes: + result = connection.modify(group, changes) + if not connection.strategy.sync: + _, result = connection.get_response(result) + else: + result = connection.result + if result['description'] != 'success': + error = True + result_error_params = ['result', 'description', 'dn', 'message'] + if raise_error: + raise LDAPOperationsErrorResult([(k, v) for k, v in result.items() if k in result_error_params]) + break + + return not error # returns True if no error is raised in the LDAP operations diff --git a/server/www/packages/packages-linux/x64/ldap3/extend/microsoft/removeMembersFromGroups.py b/server/www/packages/packages-linux/x64/ldap3/extend/microsoft/removeMembersFromGroups.py index 1b7feb3..e415188 100644 --- a/server/www/packages/packages-linux/x64/ldap3/extend/microsoft/removeMembersFromGroups.py +++ b/server/www/packages/packages-linux/x64/ldap3/extend/microsoft/removeMembersFromGroups.py @@ -1,93 +1,92 @@ -""" -""" - -# Created on 2016.12.26 -# -# Author: Giovanni Cannata -# -# Copyright 2016 - 2018 Giovanni Cannata -# -# This file is part of ldap3. -# -# ldap3 is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ldap3 is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with ldap3 in the COPYING and COPYING.LESSER files. -# If not, see . -from ...core.exceptions import LDAPInvalidDnError -from ... import SEQUENCE_TYPES, MODIFY_DELETE, BASE, DEREF_NEVER -from ...utils.dn import safe_dn - - -def ad_remove_members_from_groups(connection, - members_dn, - groups_dn, - fix): - """ - :param connection: a bound Connection object - :param members_dn: the list of members to remove from groups - :param groups_dn: the list of groups where members are to be removed - :param fix: checks for group existence and existing members - :return: a boolean where True means that the operation was successful and False means an error has happened - Removes users-groups relations following the Activwe Directory rules: users are removed from groups' member attribute - - """ - if not isinstance(members_dn, SEQUENCE_TYPES): - members_dn = [members_dn] - - if not isinstance(groups_dn, SEQUENCE_TYPES): - groups_dn = [groups_dn] - - if connection.check_names: # builds new lists with sanitized dn - safe_members_dn = [] - safe_groups_dn = [] - for member_dn in members_dn: - safe_members_dn.append(safe_dn(member_dn)) - for group_dn in groups_dn: - safe_groups_dn.append(safe_dn(group_dn)) - - members_dn = safe_members_dn - groups_dn = safe_groups_dn - - error = False - - for group in groups_dn: - if fix: # checks for existance of group and for already assigned members - result = connection.search(group, '(objectclass=*)', BASE, dereference_aliases=DEREF_NEVER, attributes=['member']) - - if not connection.strategy.sync: - response, result = connection.get_response(result) - else: - response, result = connection.response, connection.result - - if not result['description'] == 'success': - raise LDAPInvalidDnError(group + ' not found') - - existing_members = response[0]['attributes']['member'] if 'member' in response[0]['attributes'] else [] - else: - existing_members = members_dn - - existing_members = [element.lower() for element in existing_members] - changes = dict() - member_to_remove = [element for element in members_dn if element.lower() in existing_members] - if member_to_remove: - changes['member'] = (MODIFY_DELETE, member_to_remove) - if changes: - result = connection.modify(group, changes) - if not connection.strategy.sync: - _, result = connection.get_response(result) - else: - result = connection.result - if result['description'] != 'success': - error = True - break - - return not error +""" +""" + +# Created on 2016.12.26 +# +# Author: Giovanni Cannata +# +# Copyright 2016 - 2018 Giovanni Cannata +# +# This file is part of ldap3. +# +# ldap3 is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ldap3 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with ldap3 in the COPYING and COPYING.LESSER files. +# If not, see . + +from ...core.exceptions import LDAPInvalidDnError, LDAPOperationsErrorResult +from ... import SEQUENCE_TYPES, MODIFY_DELETE, BASE, DEREF_NEVER +from ...utils.dn import safe_dn + + +def ad_remove_members_from_groups(connection, + members_dn, + groups_dn, + fix, + raise_error=False): + """ + :param connection: a bound Connection object + :param members_dn: the list of members to remove from groups + :param groups_dn: the list of groups where members are to be removed + :param fix: checks for group existence and existing members + :param raise_error: If the operation fails it raises an error instead of returning False + :return: a boolean where True means that the operation was successful and False means an error has happened + Removes users-groups relations following the Activwe Directory rules: users are removed from groups' member attribute + + """ + if not isinstance(members_dn, SEQUENCE_TYPES): + members_dn = [members_dn] + + if not isinstance(groups_dn, SEQUENCE_TYPES): + groups_dn = [groups_dn] + + if connection.check_names: # builds new lists with sanitized dn + members_dn = [safe_dn(member_dn) for member_dn in members_dn] + groups_dn = [safe_dn(group_dn) for group_dn in groups_dn] + + error = False + + for group in groups_dn: + if fix: # checks for existance of group and for already assigned members + result = connection.search(group, '(objectclass=*)', BASE, dereference_aliases=DEREF_NEVER, attributes=['member']) + + if not connection.strategy.sync: + response, result = connection.get_response(result) + else: + response, result = connection.response, connection.result + + if not result['description'] == 'success': + raise LDAPInvalidDnError(group + ' not found') + + existing_members = response[0]['attributes']['member'] if 'member' in response[0]['attributes'] else [] + else: + existing_members = members_dn + + existing_members = [element.lower() for element in existing_members] + changes = dict() + member_to_remove = [element for element in members_dn if element.lower() in existing_members] + if member_to_remove: + changes['member'] = (MODIFY_DELETE, member_to_remove) + if changes: + result = connection.modify(group, changes) + if not connection.strategy.sync: + _, result = connection.get_response(result) + else: + result = connection.result + if result['description'] != 'success': + error = True + result_error_params = ['result', 'description', 'dn', 'message'] + if raise_error: + raise LDAPOperationsErrorResult([(k, v) for k, v in result.items() if k in result_error_params]) + break + + return not error diff --git a/server/www/packages/packages-linux/x64/ldap3/extend/novell/getBindDn.py b/server/www/packages/packages-linux/x64/ldap3/extend/novell/getBindDn.py index 39fae2b..2bc4deb 100644 --- a/server/www/packages/packages-linux/x64/ldap3/extend/novell/getBindDn.py +++ b/server/www/packages/packages-linux/x64/ldap3/extend/novell/getBindDn.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # diff --git a/server/www/packages/packages-linux/x64/ldap3/extend/novell/listReplicas.py b/server/www/packages/packages-linux/x64/ldap3/extend/novell/listReplicas.py index fdc6d08..804e8fe 100644 --- a/server/www/packages/packages-linux/x64/ldap3/extend/novell/listReplicas.py +++ b/server/www/packages/packages-linux/x64/ldap3/extend/novell/listReplicas.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # @@ -45,6 +45,6 @@ class ListReplicas(ExtendedOperation): def populate_result(self): try: - self.result['replicas'] = str(self.decoded_response['replicaList']) if self.decoded_response['replicaList'] else None + self.result['replicas'] = [str(replica) for replica in self.decoded_response] if self.decoded_response else None except TypeError: self.result['replicas'] = None diff --git a/server/www/packages/packages-linux/x64/ldap3/extend/novell/nmasGetUniversalPassword.py b/server/www/packages/packages-linux/x64/ldap3/extend/novell/nmasGetUniversalPassword.py index b8b045b..a16b79f 100644 --- a/server/www/packages/packages-linux/x64/ldap3/extend/novell/nmasGetUniversalPassword.py +++ b/server/www/packages/packages-linux/x64/ldap3/extend/novell/nmasGetUniversalPassword.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # diff --git a/server/www/packages/packages-linux/x64/ldap3/extend/novell/nmasSetUniversalPassword.py b/server/www/packages/packages-linux/x64/ldap3/extend/novell/nmasSetUniversalPassword.py index 65ea0d6..ee243ba 100644 --- a/server/www/packages/packages-linux/x64/ldap3/extend/novell/nmasSetUniversalPassword.py +++ b/server/www/packages/packages-linux/x64/ldap3/extend/novell/nmasSetUniversalPassword.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # diff --git a/server/www/packages/packages-linux/x64/ldap3/extend/novell/partition_entry_count.py b/server/www/packages/packages-linux/x64/ldap3/extend/novell/partition_entry_count.py index 8218aea..185fa4d 100644 --- a/server/www/packages/packages-linux/x64/ldap3/extend/novell/partition_entry_count.py +++ b/server/www/packages/packages-linux/x64/ldap3/extend/novell/partition_entry_count.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # diff --git a/server/www/packages/packages-linux/x64/ldap3/extend/novell/replicaInfo.py b/server/www/packages/packages-linux/x64/ldap3/extend/novell/replicaInfo.py index 45bd0e9..e331686 100644 --- a/server/www/packages/packages-linux/x64/ldap3/extend/novell/replicaInfo.py +++ b/server/www/packages/packages-linux/x64/ldap3/extend/novell/replicaInfo.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # diff --git a/server/www/packages/packages-linux/x64/ldap3/extend/operation.py b/server/www/packages/packages-linux/x64/ldap3/extend/operation.py index 9906885..2998af3 100644 --- a/server/www/packages/packages-linux/x64/ldap3/extend/operation.py +++ b/server/www/packages/packages-linux/x64/ldap3/extend/operation.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # diff --git a/server/www/packages/packages-linux/x64/ldap3/extend/standard/PagedSearch.py b/server/www/packages/packages-linux/x64/ldap3/extend/standard/PagedSearch.py index 1b5df49..30838df 100644 --- a/server/www/packages/packages-linux/x64/ldap3/extend/standard/PagedSearch.py +++ b/server/www/packages/packages-linux/x64/ldap3/extend/standard/PagedSearch.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # @@ -47,7 +47,11 @@ def paged_search_generator(connection, search_base = safe_dn(search_base) responses = [] - cookie = True # performs search at least one time + original_connection = None + original_auto_referrals = connection.auto_referrals + connection.auto_referrals = False # disable auto referrals because it cannot handle paged searches + cookie = True # performs search operation at least one time + cachekey = None # for referrals cache while cookie: result = connection.search(search_base, search_filter, @@ -69,13 +73,19 @@ def paged_search_generator(connection, response = connection.response result = connection.result + if result['referrals'] and original_auto_referrals: # if rererrals are returned start over the loop with a new connection to the referral + if not original_connection: + original_connection = connection + _, connection, cachekey = connection.strategy.create_referral_connection(result['referrals']) # change connection to a valid referrals + continue + responses.extend(response) try: cookie = result['controls']['1.2.840.113556.1.4.319']['value']['cookie'] except KeyError: cookie = None - if result and result['result'] not in DO_NOT_RAISE_EXCEPTIONS: + if connection.raise_exceptions and result and result['result'] not in DO_NOT_RAISE_EXCEPTIONS: if log_enabled(PROTOCOL): log(PROTOCOL, 'paged search operation result <%s> for <%s>', result, connection) if result['result'] == RESULT_SIZE_LIMIT_EXCEEDED: @@ -86,6 +96,14 @@ def paged_search_generator(connection, while responses: yield responses.pop() + if original_connection: + connection = original_connection + if connection.use_referral_cache and cachekey: + connection.strategy.referral_cache[cachekey] = connection + else: + connection.unbind() + + connection.auto_referrals = original_auto_referrals connection.response = None diff --git a/server/www/packages/packages-linux/x64/ldap3/extend/standard/PersistentSearch.py b/server/www/packages/packages-linux/x64/ldap3/extend/standard/PersistentSearch.py index 62286e1..1c73d4e 100644 --- a/server/www/packages/packages-linux/x64/ldap3/extend/standard/PersistentSearch.py +++ b/server/www/packages/packages-linux/x64/ldap3/extend/standard/PersistentSearch.py @@ -80,7 +80,8 @@ class PersistentSearch(object): else: self.controls = controls - self.controls.append(persistent_search_control(events_type, changes_only, notifications)) + if events_type and changes_only and notifications: + self.controls.append(persistent_search_control(events_type, changes_only, notifications)) self.start() def start(self): @@ -101,9 +102,10 @@ class PersistentSearch(object): controls=self.controls) self.connection.strategy.persistent_search_message_id = self.message_id - def stop(self): + def stop(self, unbind=True): self.connection.abandon(self.message_id) - self.connection.unbind() + if unbind: + self.connection.unbind() if self.message_id in self.connection.strategy._responses: del self.connection.strategy._responses[self.message_id] if hasattr(self.connection.strategy, '_requests') and self.message_id in self.connection.strategy._requests: # asynchronous strategy has a dict of request that could be returned by get_response() @@ -111,11 +113,25 @@ class PersistentSearch(object): self.connection.strategy.persistent_search_message_id = None self.message_id = None - def next(self): + def next(self, block=False, timeout=None): if not self.connection.strategy.streaming and not self.connection.strategy.callback: try: - return self.connection.strategy.events.get_nowait() + return self.connection.strategy.events.get(block, timeout) except Empty: return None raise LDAPExtensionError('Persistent search is not accumulating events in queue') + + def funnel(self, block=False, timeout=None): + esci = False + while not esci: + try: + entry = self.connection.strategy.events.get(block, timeout) + except Empty: + yield None + if entry['type'] == 'searchResEntry': + yield entry + else: + esci = True + + yield entry diff --git a/server/www/packages/packages-linux/x64/ldap3/extend/standard/modifyPassword.py b/server/www/packages/packages-linux/x64/ldap3/extend/standard/modifyPassword.py index 167816e..0ec4fd3 100644 --- a/server/www/packages/packages-linux/x64/ldap3/extend/standard/modifyPassword.py +++ b/server/www/packages/packages-linux/x64/ldap3/extend/standard/modifyPassword.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # @@ -67,6 +67,6 @@ class ModifyPassword(ExtendedOperation): self.result[self.response_attribute] = True else: # change was not successful, raises exception if raise_exception = True in connection or returns the operation result, error code is in result['result'] self.result[self.response_attribute] = False - if not self.connection.raise_exceptions: + if self.connection.raise_exceptions: from ...core.exceptions import LDAPOperationResult raise LDAPOperationResult(result=self.result['result'], description=self.result['description'], dn=self.result['dn'], message=self.result['message'], response_type=self.result['type']) diff --git a/server/www/packages/packages-linux/x64/ldap3/extend/standard/whoAmI.py b/server/www/packages/packages-linux/x64/ldap3/extend/standard/whoAmI.py index 0eda5c4..cb9235e 100644 --- a/server/www/packages/packages-linux/x64/ldap3/extend/standard/whoAmI.py +++ b/server/www/packages/packages-linux/x64/ldap3/extend/standard/whoAmI.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # @@ -24,11 +24,9 @@ # If not, see . # implements RFC4532 -from pyasn1.type.univ import NoValue - from ...extend.operation import ExtendedOperation from ...utils.conv import to_unicode -from ...protocol.rfc4511 import OctetString + class WhoAmI(ExtendedOperation): def config(self): diff --git a/server/www/packages/packages-linux/x64/ldap3/operation/bind.py b/server/www/packages/packages-linux/x64/ldap3/operation/bind.py index 0eecc4e..78eeca8 100644 --- a/server/www/packages/packages-linux/x64/ldap3/operation/bind.py +++ b/server/www/packages/packages-linux/x64/ldap3/operation/bind.py @@ -122,7 +122,7 @@ def bind_response_to_dict(response): 'description': ResultCode().getNamedValues().getName(response['resultCode']), 'dn': str(response['matchedDN']), 'message': str(response['diagnosticMessage']), - 'referrals': referrals_to_list(response['referral']), + 'referrals': referrals_to_list(response['referral']) if response['referral'] is not None and response['referral'].hasValue() else [], 'saslCreds': bytes(response['serverSaslCreds']) if response['serverSaslCreds'] is not None and response['serverSaslCreds'].hasValue() else None} diff --git a/server/www/packages/packages-linux/x64/ldap3/operation/search.py b/server/www/packages/packages-linux/x64/ldap3/operation/search.py index 7cf2fb3..70088d6 100644 --- a/server/www/packages/packages-linux/x64/ldap3/operation/search.py +++ b/server/www/packages/packages-linux/x64/ldap3/operation/search.py @@ -525,10 +525,11 @@ def search_result_entry_response_to_dict(response, schema, custom_formatter, che entry = dict() # entry['dn'] = str(response['object']) if response['object']: - entry['raw_dn'] = to_raw(response['object']) if isinstance(response['object'], STRING_TYPES): # mock strategies return string not a PyAsn1 object + entry['raw_dn'] = to_raw(response['object']) entry['dn'] = to_unicode(response['object']) else: + entry['raw_dn'] = str(response['object']) entry['dn'] = to_unicode(bytes(response['object']), from_server=True) else: entry['raw_dn'] = b'' @@ -555,6 +556,8 @@ def search_result_done_response_to_dict(response): result['controls'][control[0]] = control[1] return result + + def search_result_reference_response_to_dict(response): return {'uri': search_refs_to_list(response)} diff --git a/server/www/packages/packages-linux/x64/ldap3/protocol/convert.py b/server/www/packages/packages-linux/x64/ldap3/protocol/convert.py index 319f36d..e3a94f3 100644 --- a/server/www/packages/packages-linux/x64/ldap3/protocol/convert.py +++ b/server/www/packages/packages-linux/x64/ldap3/protocol/convert.py @@ -37,6 +37,7 @@ def attribute_to_dict(attribute): except PyAsn1Error: # invalid encoding, return bytes value return {'type': str(attribute['type']), 'values': [bytes(val) for val in attribute['vals']]} + def attributes_to_dict(attributes): attributes_dict = dict() for attribute in attributes: @@ -46,7 +47,10 @@ def attributes_to_dict(attributes): def referrals_to_list(referrals): - return [str(referral) for referral in referrals if referral] if referrals else None + if isinstance(referrals, list): + return [str(referral) for referral in referrals if referral] if referrals else None + else: + return [str(referral) for referral in referrals if referral] if referrals is not None and referrals.hasValue() else None def search_refs_to_list(search_refs): @@ -93,6 +97,7 @@ def ava_to_dict(ava): except Exception: return {'attribute': str(ava['attributeDesc']), 'value': bytes(ava['assertionValue'])} + def substring_to_dict(substring): return {'initial': substring['initial'] if substring['initial'] else '', 'any': [middle for middle in substring['any']] if substring['any'] else '', 'final': substring['final'] if substring['final'] else ''} diff --git a/server/www/packages/packages-linux/x64/ldap3/protocol/formatters/formatters.py b/server/www/packages/packages-linux/x64/ldap3/protocol/formatters/formatters.py index 36cd9c8..f7bfe0c 100644 --- a/server/www/packages/packages-linux/x64/ldap3/protocol/formatters/formatters.py +++ b/server/www/packages/packages-linux/x64/ldap3/protocol/formatters/formatters.py @@ -1,407 +1,436 @@ -""" -""" - -# Created on 2014.10.28 -# -# Author: Giovanni Cannata -# -# Copyright 2014 - 2018 Giovanni Cannata -# -# This file is part of ldap3. -# -# ldap3 is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ldap3 is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with ldap3 in the COPYING and COPYING.LESSER files. -# If not, see . - -import re - -from binascii import hexlify -from uuid import UUID -from datetime import datetime, timedelta -from ...utils.conv import to_unicode - -from ...core.timezone import OffsetTzInfo - -def format_unicode(raw_value): - try: - if str is not bytes: # Python 3 - return str(raw_value, 'utf-8', errors='strict') - else: # Python 2 - return unicode(raw_value, 'utf-8', errors='strict') - except (TypeError, UnicodeDecodeError): - pass - - return raw_value - - -def format_integer(raw_value): - try: - return int(raw_value) - except (TypeError, ValueError): # expected exceptions - pass - except Exception: # any other exception should be investigated, anyway the formatter return the raw_value - pass - - return raw_value - - -def format_binary(raw_value): - try: - return bytes(raw_value) - except TypeError: # expected exceptions - pass - except Exception: # any other exception should be investigated, anyway the formatter return the raw_value - pass - - return raw_value - - -def format_uuid(raw_value): - try: - return str(UUID(bytes=raw_value)) - except (TypeError, ValueError): - return format_unicode(raw_value) - except Exception: # any other exception should be investigated, anyway the formatter return the raw_value - pass - - return raw_value - - -def format_uuid_le(raw_value): - try: - return '{' + str(UUID(bytes_le=raw_value)) + '}' - except (TypeError, ValueError): - return format_unicode(raw_value) - except Exception: # any other exception should be investigated, anyway the formatter return the raw_value - pass - - return raw_value - - -def format_boolean(raw_value): - if raw_value in [b'TRUE', b'true', b'True']: - return True - if raw_value in [b'FALSE', b'false', b'False']: - return False - - return raw_value - - -def format_ad_timestamp(raw_value): - """ - Active Directory stores date/time values as the number of 100-nanosecond intervals - that have elapsed since the 0 hour on January 1, 1601 till the date/time that is being stored. - The time is always stored in Greenwich Mean Time (GMT) in the Active Directory. - """ - if raw_value == b'9223372036854775807': # max value to be stored in a 64 bit signed int - return datetime.max # returns datetime.datetime(9999, 12, 31, 23, 59, 59, 999999) - try: - timestamp = int(raw_value) - if timestamp < 0: # ad timestamp cannot be negative - return raw_value - except Exception: - return raw_value - - try: - return datetime.fromtimestamp(timestamp / 10000000.0 - 11644473600, tz=OffsetTzInfo(0, 'UTC')) # forces true division in python 2 - except (OSError, OverflowError, ValueError): # on Windows backwards timestamps are not allowed - try: - unix_epoch = datetime.fromtimestamp(0, tz=OffsetTzInfo(0, 'UTC')) - diff_seconds = timedelta(seconds=timestamp/10000000.0 - 11644473600) - return unix_epoch + diff_seconds - except Exception: - pass - except Exception: - pass - - return raw_value - - -try: # uses regular expressions and the timezone class (python3.2 and later) - from datetime import timezone - time_format = re.compile( - r''' - ^ - (?P[0-9]{4}) - (?P0[1-9]|1[0-2]) - (?P0[1-9]|[12][0-9]|3[01]) - (?P[01][0-9]|2[0-3]) - (?: - (?P[0-5][0-9]) - (?P[0-5][0-9]|60)? - )? - (?: - [.,] - (?P[0-9]+) - )? - (?: - Z - | - (?: - (?P[+-]) - (?P[01][0-9]|2[0-3]) - (?P[0-5][0-9])? - ) - ) - $ - ''', - re.VERBOSE - ) - - def format_time(raw_value): - try: - match = time_format.fullmatch(to_unicode(raw_value)) - if match is None: - return raw_value - matches = match.groupdict() - - offset = timedelta( - hours=int(matches['OffHour'] or 0), - minutes=int(matches['OffMinute'] or 0) - ) - - if matches['Offset'] == '-': - offset *= -1 - - # Python does not support leap second in datetime (!) - if matches['Second'] == '60': - matches['Second'] = '59' - - # According to RFC, fraction may be applied to an Hour/Minute (!) - fraction = float('0.' + (matches['Fraction'] or '0')) - - if matches['Minute'] is None: - fraction *= 60 - minute = int(fraction) - fraction -= minute - else: - minute = int(matches['Minute']) - - if matches['Second'] is None: - fraction *= 60 - second = int(fraction) - fraction -= second - else: - second = int(matches['Second']) - - microseconds = int(fraction * 1000000) - - return datetime( - int(matches['Year']), - int(matches['Month']), - int(matches['Day']), - int(matches['Hour']), - minute, - second, - microseconds, - timezone(offset), - ) - except Exception: # exceptions should be investigated, anyway the formatter return the raw_value - pass - return raw_value - -except ImportError: - def format_time(raw_value): - """ - From RFC4517: - A value of the Generalized Time syntax is a character string - representing a date and time. The LDAP-specific encoding of a value - of this syntax is a restriction of the format defined in [ISO8601], - and is described by the following ABNF: - - GeneralizedTime = century year month day hour - [ minute [ second / leap-second ] ] - [ fraction ] - g-time-zone - - century = 2(%x30-39) ; "00" to "99" - year = 2(%x30-39) ; "00" to "99" - month = ( %x30 %x31-39 ) ; "01" (January) to "09" - / ( %x31 %x30-32 ) ; "10" to "12" - day = ( %x30 %x31-39 ) ; "01" to "09" - / ( %x31-32 %x30-39 ) ; "10" to "29" - / ( %x33 %x30-31 ) ; "30" to "31" - hour = ( %x30-31 %x30-39 ) / ( %x32 %x30-33 ) ; "00" to "23" - minute = %x30-35 %x30-39 ; "00" to "59" - second = ( %x30-35 %x30-39 ) ; "00" to "59" - leap-second = ( %x36 %x30 ) ; "60" - fraction = ( DOT / COMMA ) 1*(%x30-39) - g-time-zone = %x5A ; "Z" - / g-differential - g-differential = ( MINUS / PLUS ) hour [ minute ] - MINUS = %x2D ; minus sign ("-") - """ - - if len(raw_value) < 10 or not all((c in b'0123456789+-,.Z' for c in raw_value)) or (b'Z' in raw_value and not raw_value.endswith(b'Z')): # first ten characters are mandatory and must be numeric or timezone or fraction - return raw_value - - # sets position for fixed values - year = int(raw_value[0: 4]) - month = int(raw_value[4: 6]) - day = int(raw_value[6: 8]) - hour = int(raw_value[8: 10]) - minute = 0 - second = 0 - microsecond = 0 - - remain = raw_value[10:] - if remain and remain.endswith(b'Z'): # uppercase 'Z' - sep = b'Z' - elif b'+' in remain: # timezone can be specified with +hh[mm] or -hh[mm] - sep = b'+' - elif b'-' in remain: - sep = b'-' - else: # timezone not specified - return raw_value - - time, _, offset = remain.partition(sep) - - if time and (b'.' in time or b',' in time): - # fraction time - if time[0] in b',.': - minute = 6 * int(time[1] if str is bytes else chr(time[1])) # Python 2 / Python 3 - elif time[2] in b',.': - minute = int(raw_value[10: 12]) - second = 6 * int(time[3] if str is bytes else chr(time[3])) # Python 2 / Python 3 - elif time[4] in b',.': - minute = int(raw_value[10: 12]) - second = int(raw_value[12: 14]) - microsecond = 100000 * int(time[5] if str is bytes else chr(time[5])) # Python 2 / Python 3 - elif len(time) == 2: # mmZ format - minute = int(raw_value[10: 12]) - elif len(time) == 0: # Z format - pass - elif len(time) == 4: # mmssZ - minute = int(raw_value[10: 12]) - second = int(raw_value[12: 14]) - else: - return raw_value - - if sep == b'Z': # UTC - timezone = OffsetTzInfo(0, 'UTC') - else: # build timezone - try: - if len(offset) == 2: - timezone_hour = int(offset[:2]) - timezone_minute = 0 - elif len(offset) == 4: - timezone_hour = int(offset[:2]) - timezone_minute = int(offset[2:4]) - else: # malformed timezone - raise ValueError - except ValueError: - return raw_value - if timezone_hour > 23 or timezone_minute > 59: # invalid timezone - return raw_value - - if str is not bytes: # Python 3 - timezone = OffsetTzInfo((timezone_hour * 60 + timezone_minute) * (1 if sep == b'+' else -1), 'UTC' + str(sep + offset, encoding='utf-8')) - else: # Python 2 - timezone = OffsetTzInfo((timezone_hour * 60 + timezone_minute) * (1 if sep == b'+' else -1), unicode('UTC' + sep + offset, encoding='utf-8')) - - try: - return datetime(year=year, - month=month, - day=day, - hour=hour, - minute=minute, - second=second, - microsecond=microsecond, - tzinfo=timezone) - except (TypeError, ValueError): - pass - - return raw_value - - -def format_time_with_0_year(raw_value): - try: - if raw_value.startswith(b'0000'): - return raw_value - except Exception: - try: - if raw_value.startswith('0000'): - return raw_value - except Exception: - pass - - return format_time(raw_value) - - -def format_sid(raw_value): - """ - SID= "S-1-" IdentifierAuthority 1*SubAuthority - IdentifierAuthority= IdentifierAuthorityDec / IdentifierAuthorityHex - ; If the identifier authority is < 2^32, the - ; identifier authority is represented as a decimal - ; number - ; If the identifier authority is >= 2^32, - ; the identifier authority is represented in - ; hexadecimal - IdentifierAuthorityDec = 1*10DIGIT - ; IdentifierAuthorityDec, top level authority of a - ; security identifier is represented as a decimal number - IdentifierAuthorityHex = "0x" 12HEXDIG - ; IdentifierAuthorityHex, the top-level authority of a - ; security identifier is represented as a hexadecimal number - SubAuthority= "-" 1*10DIGIT - ; Sub-Authority is always represented as a decimal number - ; No leading "0" characters are allowed when IdentifierAuthority - ; or SubAuthority is represented as a decimal number - ; All hexadecimal digits must be output in string format, - ; pre-pended by "0x" - - Revision (1 byte): An 8-bit unsigned integer that specifies the revision level of the SID. This value MUST be set to 0x01. - SubAuthorityCount (1 byte): An 8-bit unsigned integer that specifies the number of elements in the SubAuthority array. The maximum number of elements allowed is 15. - IdentifierAuthority (6 bytes): A SID_IDENTIFIER_AUTHORITY structure that indicates the authority under which the SID was created. It describes the entity that created the SID. The Identifier Authority value {0,0,0,0,0,5} denotes SIDs created by the NT SID authority. - SubAuthority (variable): A variable length array of unsigned 32-bit integers that uniquely identifies a principal relative to the IdentifierAuthority. Its length is determined by SubAuthorityCount. - """ - try: - if raw_value.startswith(b'S-1-'): - return raw_value - except Exception: - try: - if raw_value.startswith('S-1-'): - return raw_value - except Exception: - pass - try: - if str is not bytes: # Python 3 - revision = int(raw_value[0]) - sub_authority_count = int(raw_value[1]) - identifier_authority = int.from_bytes(raw_value[2:8], byteorder='big') - if identifier_authority >= 4294967296: # 2 ^ 32 - identifier_authority = hex(identifier_authority) - - sub_authority = '' - i = 0 - while i < sub_authority_count: - sub_authority += '-' + str(int.from_bytes(raw_value[8 + (i * 4): 12 + (i * 4)], byteorder='little')) # little endian - i += 1 - else: # Python 2 - revision = int(ord(raw_value[0])) - sub_authority_count = int(ord(raw_value[1])) - identifier_authority = int(hexlify(raw_value[2:8]), 16) - if identifier_authority >= 4294967296: # 2 ^ 32 - identifier_authority = hex(identifier_authority) - - sub_authority = '' - i = 0 - while i < sub_authority_count: - sub_authority += '-' + str(int(hexlify(raw_value[11 + (i * 4): 7 + (i * 4): -1]), 16)) # little endian - i += 1 - return 'S-' + str(revision) + '-' + str(identifier_authority) + sub_authority - except Exception: # any exception should be investigated, anyway the formatter return the raw_value - pass - - return raw_value +""" +""" + +# Created on 2014.10.28 +# +# Author: Giovanni Cannata +# +# Copyright 2014 - 2019 Giovanni Cannata +# +# This file is part of ldap3. +# +# ldap3 is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ldap3 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with ldap3 in the COPYING and COPYING.LESSER files. +# If not, see . + +import re + +from binascii import hexlify +from uuid import UUID +from datetime import datetime, timedelta +from ...utils.conv import to_unicode + +from ...core.timezone import OffsetTzInfo + + +def format_unicode(raw_value): + try: + if str is not bytes: # Python 3 + return str(raw_value, 'utf-8', errors='strict') + else: # Python 2 + return unicode(raw_value, 'utf-8', errors='strict') + except (TypeError, UnicodeDecodeError): + pass + + return raw_value + + +def format_integer(raw_value): + try: + return int(raw_value) + except (TypeError, ValueError): # expected exceptions + pass + except Exception: # any other exception should be investigated, anyway the formatter return the raw_value + pass + + return raw_value + + +def format_binary(raw_value): + try: + return bytes(raw_value) + except TypeError: # expected exceptions + pass + except Exception: # any other exception should be investigated, anyway the formatter return the raw_value + pass + + return raw_value + + +def format_uuid(raw_value): + try: + return str(UUID(bytes=raw_value)) + except (TypeError, ValueError): + return format_unicode(raw_value) + except Exception: # any other exception should be investigated, anyway the formatter return the raw_value + pass + + return raw_value + + +def format_uuid_le(raw_value): + try: + return '{' + str(UUID(bytes_le=raw_value)) + '}' + except (TypeError, ValueError): + return format_unicode(raw_value) + except Exception: # any other exception should be investigated, anyway the formatter return the raw_value + pass + + return raw_value + + +def format_boolean(raw_value): + if raw_value in [b'TRUE', b'true', b'True']: + return True + if raw_value in [b'FALSE', b'false', b'False']: + return False + + return raw_value + + +def format_ad_timestamp(raw_value): + """ + Active Directory stores date/time values as the number of 100-nanosecond intervals + that have elapsed since the 0 hour on January 1, 1601 till the date/time that is being stored. + The time is always stored in Greenwich Mean Time (GMT) in the Active Directory. + """ + utc_timezone = OffsetTzInfo(0, 'UTC') + if raw_value == b'9223372036854775807': # max value to be stored in a 64 bit signed int + return datetime.max.replace(tzinfo=utc_timezone) # returns datetime.datetime(9999, 12, 31, 23, 59, 59, 999999, tzinfo=OffsetTzInfo(offset=0, name='UTC')) + try: + timestamp = int(raw_value) + if timestamp < 0: # ad timestamp cannot be negative + timestamp = timestamp * -1 + except Exception: + return raw_value + + try: + return datetime.fromtimestamp(timestamp / 10000000.0 - 11644473600, + tz=utc_timezone) # forces true division in python 2 + except (OSError, OverflowError, ValueError): # on Windows backwards timestamps are not allowed + try: + unix_epoch = datetime.fromtimestamp(0, tz=utc_timezone) + diff_seconds = timedelta(seconds=timestamp / 10000000.0 - 11644473600) + return unix_epoch + diff_seconds + except Exception: + pass + except Exception: + pass + + return raw_value + + +try: # uses regular expressions and the timezone class (python3.2 and later) + from datetime import timezone + + time_format = re.compile( + r''' + ^ + (?P[0-9]{4}) + (?P0[1-9]|1[0-2]) + (?P0[1-9]|[12][0-9]|3[01]) + (?P[01][0-9]|2[0-3]) + (?: + (?P[0-5][0-9]) + (?P[0-5][0-9]|60)? + )? + (?: + [.,] + (?P[0-9]+) + )? + (?: + Z + | + (?: + (?P[+-]) + (?P[01][0-9]|2[0-3]) + (?P[0-5][0-9])? + ) + ) + $ + ''', + re.VERBOSE + ) + + + def format_time(raw_value): + try: + match = time_format.fullmatch(to_unicode(raw_value)) + if match is None: + return raw_value + matches = match.groupdict() + + offset = timedelta( + hours=int(matches['OffHour'] or 0), + minutes=int(matches['OffMinute'] or 0) + ) + + if matches['Offset'] == '-': + offset *= -1 + + # Python does not support leap second in datetime (!) + if matches['Second'] == '60': + matches['Second'] = '59' + + # According to RFC, fraction may be applied to an Hour/Minute (!) + fraction = float('0.' + (matches['Fraction'] or '0')) + + if matches['Minute'] is None: + fraction *= 60 + minute = int(fraction) + fraction -= minute + else: + minute = int(matches['Minute']) + + if matches['Second'] is None: + fraction *= 60 + second = int(fraction) + fraction -= second + else: + second = int(matches['Second']) + + microseconds = int(fraction * 1000000) + + return datetime( + int(matches['Year']), + int(matches['Month']), + int(matches['Day']), + int(matches['Hour']), + minute, + second, + microseconds, + timezone(offset), + ) + except Exception: # exceptions should be investigated, anyway the formatter return the raw_value + pass + return raw_value + +except ImportError: + def format_time(raw_value): + """ + From RFC4517: + A value of the Generalized Time syntax is a character string + representing a date and time. The LDAP-specific encoding of a value + of this syntax is a restriction of the format defined in [ISO8601], + and is described by the following ABNF: + + GeneralizedTime = century year month day hour + [ minute [ second / leap-second ] ] + [ fraction ] + g-time-zone + + century = 2(%x30-39) ; "00" to "99" + year = 2(%x30-39) ; "00" to "99" + month = ( %x30 %x31-39 ) ; "01" (January) to "09" + / ( %x31 %x30-32 ) ; "10" to "12" + day = ( %x30 %x31-39 ) ; "01" to "09" + / ( %x31-32 %x30-39 ) ; "10" to "29" + / ( %x33 %x30-31 ) ; "30" to "31" + hour = ( %x30-31 %x30-39 ) / ( %x32 %x30-33 ) ; "00" to "23" + minute = %x30-35 %x30-39 ; "00" to "59" + second = ( %x30-35 %x30-39 ) ; "00" to "59" + leap-second = ( %x36 %x30 ) ; "60" + fraction = ( DOT / COMMA ) 1*(%x30-39) + g-time-zone = %x5A ; "Z" + / g-differential + g-differential = ( MINUS / PLUS ) hour [ minute ] + MINUS = %x2D ; minus sign ("-") + """ + + if len(raw_value) < 10 or not all((c in b'0123456789+-,.Z' for c in raw_value)) or ( + b'Z' in raw_value and not raw_value.endswith( + b'Z')): # first ten characters are mandatory and must be numeric or timezone or fraction + return raw_value + + # sets position for fixed values + year = int(raw_value[0: 4]) + month = int(raw_value[4: 6]) + day = int(raw_value[6: 8]) + hour = int(raw_value[8: 10]) + minute = 0 + second = 0 + microsecond = 0 + + remain = raw_value[10:] + if remain and remain.endswith(b'Z'): # uppercase 'Z' + sep = b'Z' + elif b'+' in remain: # timezone can be specified with +hh[mm] or -hh[mm] + sep = b'+' + elif b'-' in remain: + sep = b'-' + else: # timezone not specified + return raw_value + + time, _, offset = remain.partition(sep) + + if time and (b'.' in time or b',' in time): + # fraction time + if time[0] in b',.': + minute = 6 * int(time[1] if str is bytes else chr(time[1])) # Python 2 / Python 3 + elif time[2] in b',.': + minute = int(raw_value[10: 12]) + second = 6 * int(time[3] if str is bytes else chr(time[3])) # Python 2 / Python 3 + elif time[4] in b',.': + minute = int(raw_value[10: 12]) + second = int(raw_value[12: 14]) + microsecond = 100000 * int(time[5] if str is bytes else chr(time[5])) # Python 2 / Python 3 + elif len(time) == 2: # mmZ format + minute = int(raw_value[10: 12]) + elif len(time) == 0: # Z format + pass + elif len(time) == 4: # mmssZ + minute = int(raw_value[10: 12]) + second = int(raw_value[12: 14]) + else: + return raw_value + + if sep == b'Z': # UTC + timezone = OffsetTzInfo(0, 'UTC') + else: # build timezone + try: + if len(offset) == 2: + timezone_hour = int(offset[:2]) + timezone_minute = 0 + elif len(offset) == 4: + timezone_hour = int(offset[:2]) + timezone_minute = int(offset[2:4]) + else: # malformed timezone + raise ValueError + except ValueError: + return raw_value + if timezone_hour > 23 or timezone_minute > 59: # invalid timezone + return raw_value + + if str is not bytes: # Python 3 + timezone = OffsetTzInfo((timezone_hour * 60 + timezone_minute) * (1 if sep == b'+' else -1), + 'UTC' + str(sep + offset, encoding='utf-8')) + else: # Python 2 + timezone = OffsetTzInfo((timezone_hour * 60 + timezone_minute) * (1 if sep == b'+' else -1), + unicode('UTC' + sep + offset, encoding='utf-8')) + + try: + return datetime(year=year, + month=month, + day=day, + hour=hour, + minute=minute, + second=second, + microsecond=microsecond, + tzinfo=timezone) + except (TypeError, ValueError): + pass + + return raw_value + + +def format_ad_timedelta(raw_value): + """ + Convert a negative filetime value to a timedelta. + """ + # Active Directory stores attributes like "minPwdAge" as a negative + # "filetime" timestamp, which is the number of 100-nanosecond intervals that + # have elapsed since the 0 hour on January 1, 1601. + # + # Handle the minimum value that can be stored in a 64 bit signed integer. + # See https://docs.microsoft.com/en-us/dotnet/api/system.int64.minvalue + # In attributes like "maxPwdAge", this signifies never. + if raw_value == b'-9223372036854775808': + return timedelta.max + # We can reuse format_ad_timestamp to get a datetime object from the + # timestamp. Afterwards, we can subtract a datetime representing 0 hour on + # January 1, 1601 from the returned datetime to get the timedelta. + return format_ad_timestamp(raw_value) - format_ad_timestamp(0) + + +def format_time_with_0_year(raw_value): + try: + if raw_value.startswith(b'0000'): + return raw_value + except Exception: + try: + if raw_value.startswith('0000'): + return raw_value + except Exception: + pass + + return format_time(raw_value) + + +def format_sid(raw_value): + """ + SID= "S-1-" IdentifierAuthority 1*SubAuthority + IdentifierAuthority= IdentifierAuthorityDec / IdentifierAuthorityHex + ; If the identifier authority is < 2^32, the + ; identifier authority is represented as a decimal + ; number + ; If the identifier authority is >= 2^32, + ; the identifier authority is represented in + ; hexadecimal + IdentifierAuthorityDec = 1*10DIGIT + ; IdentifierAuthorityDec, top level authority of a + ; security identifier is represented as a decimal number + IdentifierAuthorityHex = "0x" 12HEXDIG + ; IdentifierAuthorityHex, the top-level authority of a + ; security identifier is represented as a hexadecimal number + SubAuthority= "-" 1*10DIGIT + ; Sub-Authority is always represented as a decimal number + ; No leading "0" characters are allowed when IdentifierAuthority + ; or SubAuthority is represented as a decimal number + ; All hexadecimal digits must be output in string format, + ; pre-pended by "0x" + + Revision (1 byte): An 8-bit unsigned integer that specifies the revision level of the SID. This value MUST be set to 0x01. + SubAuthorityCount (1 byte): An 8-bit unsigned integer that specifies the number of elements in the SubAuthority array. The maximum number of elements allowed is 15. + IdentifierAuthority (6 bytes): A SID_IDENTIFIER_AUTHORITY structure that indicates the authority under which the SID was created. It describes the entity that created the SID. The Identifier Authority value {0,0,0,0,0,5} denotes SIDs created by the NT SID authority. + SubAuthority (variable): A variable length array of unsigned 32-bit integers that uniquely identifies a principal relative to the IdentifierAuthority. Its length is determined by SubAuthorityCount. + """ + try: + if raw_value.startswith(b'S-1-'): + return raw_value + except Exception: + try: + if raw_value.startswith('S-1-'): + return raw_value + except Exception: + pass + try: + if str is not bytes: # Python 3 + revision = int(raw_value[0]) + sub_authority_count = int(raw_value[1]) + identifier_authority = int.from_bytes(raw_value[2:8], byteorder='big') + if identifier_authority >= 4294967296: # 2 ^ 32 + identifier_authority = hex(identifier_authority) + + sub_authority = '' + i = 0 + while i < sub_authority_count: + sub_authority += '-' + str( + int.from_bytes(raw_value[8 + (i * 4): 12 + (i * 4)], byteorder='little')) # little endian + i += 1 + else: # Python 2 + revision = int(ord(raw_value[0])) + sub_authority_count = int(ord(raw_value[1])) + identifier_authority = int(hexlify(raw_value[2:8]), 16) + if identifier_authority >= 4294967296: # 2 ^ 32 + identifier_authority = hex(identifier_authority) + + sub_authority = '' + i = 0 + while i < sub_authority_count: + sub_authority += '-' + str(int(hexlify(raw_value[11 + (i * 4): 7 + (i * 4): -1]), 16)) # little endian + i += 1 + return 'S-' + str(revision) + '-' + str(identifier_authority) + sub_authority + except Exception: # any exception should be investigated, anyway the formatter return the raw_value + pass + + return raw_value diff --git a/server/www/packages/packages-linux/x64/ldap3/protocol/formatters/standard.py b/server/www/packages/packages-linux/x64/ldap3/protocol/formatters/standard.py index 77f7b2e..51187f1 100644 --- a/server/www/packages/packages-linux/x64/ldap3/protocol/formatters/standard.py +++ b/server/www/packages/packages-linux/x64/ldap3/protocol/formatters/standard.py @@ -1,232 +1,238 @@ -""" -""" - -# Created on 2014.10.28 -# -# Author: Giovanni Cannata -# -# Copyright 2014 - 2018 Giovanni Cannata -# -# This file is part of ldap3. -# -# ldap3 is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ldap3 is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with ldap3 in the COPYING and COPYING.LESSER files. -# If not, see . - -from ... import SEQUENCE_TYPES -from .formatters import format_ad_timestamp, format_binary, format_boolean,\ - format_integer, format_sid, format_time, format_unicode, format_uuid, format_uuid_le, format_time_with_0_year -from .validators import validate_integer, validate_time, always_valid,\ - validate_generic_single_value, validate_boolean, validate_ad_timestamp, validate_sid,\ - validate_uuid_le, validate_uuid, validate_zero_and_minus_one_and_positive_int, validate_guid, validate_time_with_0_year - -# for each syntax can be specified a format function and a input validation function - -standard_formatter = { - '1.2.840.113556.1.4.903': (format_binary, None), # Object (DN-binary) - Microsoft - '1.2.840.113556.1.4.904': (format_unicode, None), # Object (DN-string) - Microsoft - '1.2.840.113556.1.4.905': (format_unicode, None), # String (Teletex) - Microsoft - '1.2.840.113556.1.4.906': (format_integer, validate_integer), # Large integer - Microsoft - '1.2.840.113556.1.4.907': (format_binary, None), # String (NT-sec-desc) - Microsoft - '1.2.840.113556.1.4.1221': (format_binary, None), # Object (OR-name) - Microsoft - '1.2.840.113556.1.4.1362': (format_unicode, None), # String (Case) - Microsoft - '1.3.6.1.4.1.1466.115.121.1.1': (format_binary, None), # ACI item [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.2': (format_binary, None), # Access point [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.3': (format_unicode, None), # Attribute type description - '1.3.6.1.4.1.1466.115.121.1.4': (format_binary, None), # Audio [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.5': (format_binary, None), # Binary [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.6': (format_unicode, None), # Bit String - '1.3.6.1.4.1.1466.115.121.1.7': (format_boolean, validate_boolean), # Boolean - '1.3.6.1.4.1.1466.115.121.1.8': (format_binary, None), # Certificate [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.9': (format_binary, None), # Certificate List [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.10': (format_binary, None), # Certificate Pair [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.11': (format_unicode, None), # Country String - '1.3.6.1.4.1.1466.115.121.1.12': (format_unicode, None), # Distinguished name (DN) - '1.3.6.1.4.1.1466.115.121.1.13': (format_binary, None), # Data Quality Syntax [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.14': (format_unicode, None), # Delivery method - '1.3.6.1.4.1.1466.115.121.1.15': (format_unicode, None), # Directory string - '1.3.6.1.4.1.1466.115.121.1.16': (format_unicode, None), # DIT Content Rule Description - '1.3.6.1.4.1.1466.115.121.1.17': (format_unicode, None), # DIT Structure Rule Description - '1.3.6.1.4.1.1466.115.121.1.18': (format_binary, None), # DL Submit Permission [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.19': (format_binary, None), # DSA Quality Syntax [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.20': (format_binary, None), # DSE Type [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.21': (format_binary, None), # Enhanced Guide - '1.3.6.1.4.1.1466.115.121.1.22': (format_unicode, None), # Facsimile Telephone Number - '1.3.6.1.4.1.1466.115.121.1.23': (format_binary, None), # Fax - '1.3.6.1.4.1.1466.115.121.1.24': (format_time, validate_time), # Generalized time - '1.3.6.1.4.1.1466.115.121.1.25': (format_binary, None), # Guide [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.26': (format_unicode, None), # IA5 string - '1.3.6.1.4.1.1466.115.121.1.27': (format_integer, validate_integer), # Integer - '1.3.6.1.4.1.1466.115.121.1.28': (format_binary, None), # JPEG - '1.3.6.1.4.1.1466.115.121.1.29': (format_binary, None), # Master and Shadow Access Points [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.30': (format_unicode, None), # Matching rule description - '1.3.6.1.4.1.1466.115.121.1.31': (format_unicode, None), # Matching rule use description - '1.3.6.1.4.1.1466.115.121.1.32': (format_unicode, None), # Mail Preference [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.33': (format_unicode, None), # MHS OR Address [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.34': (format_unicode, None), # Name and optional UID - '1.3.6.1.4.1.1466.115.121.1.35': (format_unicode, None), # Name form description - '1.3.6.1.4.1.1466.115.121.1.36': (format_unicode, None), # Numeric string - '1.3.6.1.4.1.1466.115.121.1.37': (format_unicode, None), # Object class description - '1.3.6.1.4.1.1466.115.121.1.38': (format_unicode, None), # OID - '1.3.6.1.4.1.1466.115.121.1.39': (format_unicode, None), # Other mailbox - '1.3.6.1.4.1.1466.115.121.1.40': (format_binary, None), # Octet string - '1.3.6.1.4.1.1466.115.121.1.41': (format_unicode, None), # Postal address - '1.3.6.1.4.1.1466.115.121.1.42': (format_binary, None), # Protocol Information [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.43': (format_binary, None), # Presentation Address [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.44': (format_unicode, None), # Printable string - '1.3.6.1.4.1.1466.115.121.1.45': (format_binary, None), # Subtree specification [OBSOLETE - '1.3.6.1.4.1.1466.115.121.1.46': (format_binary, None), # Supplier Information [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.47': (format_binary, None), # Supplier Or Consumer [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.48': (format_binary, None), # Supplier And Consumer [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.49': (format_binary, None), # Supported Algorithm [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.50': (format_unicode, None), # Telephone number - '1.3.6.1.4.1.1466.115.121.1.51': (format_unicode, None), # Teletex terminal identifier - '1.3.6.1.4.1.1466.115.121.1.52': (format_unicode, None), # Teletex number - '1.3.6.1.4.1.1466.115.121.1.53': (format_time, validate_time), # Utc time (deprecated) - '1.3.6.1.4.1.1466.115.121.1.54': (format_unicode, None), # LDAP syntax description - '1.3.6.1.4.1.1466.115.121.1.55': (format_binary, None), # Modify rights [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.56': (format_binary, None), # LDAP Schema Definition [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.57': (format_unicode, None), # LDAP Schema Description [OBSOLETE] - '1.3.6.1.4.1.1466.115.121.1.58': (format_unicode, None), # Substring assertion - '1.3.6.1.1.16.1': (format_uuid, validate_uuid), # UUID - '1.3.6.1.1.16.4': (format_uuid, validate_uuid), # entryUUID (RFC 4530) - '2.16.840.1.113719.1.1.4.1.501': (format_uuid, validate_guid), # GUID (Novell) - '2.16.840.1.113719.1.1.5.1.0': (format_binary, None), # Unknown (Novell) - '2.16.840.1.113719.1.1.5.1.6': (format_unicode, None), # Case Ignore List (Novell) - '2.16.840.1.113719.1.1.5.1.12': (format_binary, None), # Tagged Data (Novell) - '2.16.840.1.113719.1.1.5.1.13': (format_binary, None), # Octet List (Novell) - '2.16.840.1.113719.1.1.5.1.14': (format_unicode, None), # Tagged String (Novell) - '2.16.840.1.113719.1.1.5.1.15': (format_unicode, None), # Tagged Name And String (Novell) - '2.16.840.1.113719.1.1.5.1.16': (format_binary, None), # NDS Replica Pointer (Novell) - '2.16.840.1.113719.1.1.5.1.17': (format_unicode, None), # NDS ACL (Novell) - '2.16.840.1.113719.1.1.5.1.19': (format_time, validate_time), # NDS Timestamp (Novell) - '2.16.840.1.113719.1.1.5.1.22': (format_integer, validate_integer), # Counter (Novell) - '2.16.840.1.113719.1.1.5.1.23': (format_unicode, None), # Tagged Name (Novell) - '2.16.840.1.113719.1.1.5.1.25': (format_unicode, None), # Typed Name (Novell) - 'supportedldapversion': (format_integer, None), # supportedLdapVersion (Microsoft) - 'octetstring': (format_binary, validate_uuid_le), # octect string (Microsoft) - '1.2.840.113556.1.4.2': (format_uuid_le, validate_uuid_le), # object guid (Microsoft) - '1.2.840.113556.1.4.13': (format_ad_timestamp, validate_ad_timestamp), # builtinCreationTime (Microsoft) - '1.2.840.113556.1.4.26': (format_ad_timestamp, validate_ad_timestamp), # creationTime (Microsoft) - '1.2.840.113556.1.4.49': (format_ad_timestamp, validate_ad_timestamp), # badPasswordTime (Microsoft) - '1.2.840.113556.1.4.51': (format_ad_timestamp, validate_ad_timestamp), # lastLogoff (Microsoft) - '1.2.840.113556.1.4.52': (format_ad_timestamp, validate_ad_timestamp), # lastLogon (Microsoft) - '1.2.840.113556.1.4.96': (format_ad_timestamp, validate_zero_and_minus_one_and_positive_int), # pwdLastSet (Microsoft, can be set to -1 only) - '1.2.840.113556.1.4.146': (format_sid, validate_sid), # objectSid (Microsoft) - '1.2.840.113556.1.4.159': (format_ad_timestamp, validate_ad_timestamp), # accountExpires (Microsoft) - '1.2.840.113556.1.4.662': (format_ad_timestamp, validate_ad_timestamp), # lockoutTime (Microsoft) - '1.2.840.113556.1.4.1696': (format_ad_timestamp, validate_ad_timestamp), # lastLogonTimestamp (Microsoft) - '1.3.6.1.4.1.42.2.27.8.1.17': (format_time_with_0_year, validate_time_with_0_year) # pwdAccountLockedTime (Novell) -} - - -def find_attribute_helpers(attr_type, name, custom_formatter): - """ - Tries to format following the OIDs info and format_helper specification. - Search for attribute oid, then attribute name (can be multiple), then attribute syntax - Precedence is: - 1. attribute name - 2. attribute oid(from schema) - 3. attribute names (from oid_info) - 4. attribute syntax (from schema) - Custom formatters can be defined in Server object and have precedence over the standard_formatters - If no formatter is found the raw_value is returned as bytes. - Attributes defined as SINGLE_VALUE in schema are returned as a single object, otherwise are returned as a list of object - Formatter functions can return any kind of object - return a tuple (formatter, validator) - """ - formatter = None - if custom_formatter and isinstance(custom_formatter, dict): # if custom formatters are defined they have precedence over the standard formatters - if name in custom_formatter: # search for attribute name, as returned by the search operation - formatter = custom_formatter[name] - - if not formatter and attr_type and attr_type.oid in custom_formatter: # search for attribute oid as returned by schema - formatter = custom_formatter[attr_type.oid] - if not formatter and attr_type and attr_type.oid_info: - if isinstance(attr_type.oid_info[2], SEQUENCE_TYPES): # search for multiple names defined in oid_info - for attr_name in attr_type.oid_info[2]: - if attr_name in custom_formatter: - formatter = custom_formatter[attr_name] - break - elif attr_type.oid_info[2] in custom_formatter: # search for name defined in oid_info - formatter = custom_formatter[attr_type.oid_info[2]] - - if not formatter and attr_type and attr_type.syntax in custom_formatter: # search for syntax defined in schema - formatter = custom_formatter[attr_type.syntax] - - if not formatter and name in standard_formatter: # search for attribute name, as returned by the search operation - formatter = standard_formatter[name] - - if not formatter and attr_type and attr_type.oid in standard_formatter: # search for attribute oid as returned by schema - formatter = standard_formatter[attr_type.oid] - - if not formatter and attr_type and attr_type.oid_info: - if isinstance(attr_type.oid_info[2], SEQUENCE_TYPES): # search for multiple names defined in oid_info - for attr_name in attr_type.oid_info[2]: - if attr_name in standard_formatter: - formatter = standard_formatter[attr_name] - break - elif attr_type.oid_info[2] in standard_formatter: # search for name defined in oid_info - formatter = standard_formatter[attr_type.oid_info[2]] - if not formatter and attr_type and attr_type.syntax in standard_formatter: # search for syntax defined in schema - formatter = standard_formatter[attr_type.syntax] - - if formatter is None: - return None, None - - return formatter - - -def format_attribute_values(schema, name, values, custom_formatter): - if not values: # RFCs states that attributes must always have values, but a flaky server returns empty values too - return [] - - if not isinstance(values, SEQUENCE_TYPES): - values = [values] - - if schema and schema.attribute_types and name in schema.attribute_types: - attr_type = schema.attribute_types[name] - else: - attr_type = None - - attribute_helpers = find_attribute_helpers(attr_type, name, custom_formatter) - if not isinstance(attribute_helpers, tuple): # custom formatter - formatter = attribute_helpers - else: - formatter = format_unicode if not attribute_helpers[0] else attribute_helpers[0] - - formatted_values = [formatter(raw_value) for raw_value in values] # executes formatter - if formatted_values: - return formatted_values[0] if (attr_type and attr_type.single_value) else formatted_values - else: # RFCs states that attributes must always have values, but AD return empty values in DirSync - return [] - - -def find_attribute_validator(schema, name, custom_validator): - if schema and schema.attribute_types and name in schema.attribute_types: - attr_type = schema.attribute_types[name] - else: - attr_type = None - - attribute_helpers = find_attribute_helpers(attr_type, name, custom_validator) - if not isinstance(attribute_helpers, tuple): # custom validator - validator = attribute_helpers - else: - if not attribute_helpers[1]: - if attr_type and attr_type.single_value: - validator = validate_generic_single_value # validate only single value - else: - validator = always_valid # unknown syntax, accepts single and multi value - else: - validator = attribute_helpers[1] - return validator +""" +""" + +# Created on 2014.10.28 +# +# Author: Giovanni Cannata +# +# Copyright 2014 - 2019 Giovanni Cannata +# +# This file is part of ldap3. +# +# ldap3 is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ldap3 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with ldap3 in the COPYING and COPYING.LESSER files. +# If not, see . + +from ... import SEQUENCE_TYPES +from .formatters import format_ad_timestamp, format_binary, format_boolean,\ + format_integer, format_sid, format_time, format_unicode, format_uuid, format_uuid_le, format_time_with_0_year,\ + format_ad_timedelta +from .validators import validate_integer, validate_time, always_valid,\ + validate_generic_single_value, validate_boolean, validate_ad_timestamp, validate_sid,\ + validate_uuid_le, validate_uuid, validate_zero_and_minus_one_and_positive_int, validate_guid, validate_time_with_0_year,\ + validate_ad_timedelta + +# for each syntax can be specified a format function and a input validation function + +standard_formatter = { + '1.2.840.113556.1.4.903': (format_binary, None), # Object (DN-binary) - Microsoft + '1.2.840.113556.1.4.904': (format_unicode, None), # Object (DN-string) - Microsoft + '1.2.840.113556.1.4.905': (format_unicode, None), # String (Teletex) - Microsoft + '1.2.840.113556.1.4.906': (format_integer, validate_integer), # Large integer - Microsoft + '1.2.840.113556.1.4.907': (format_binary, None), # String (NT-sec-desc) - Microsoft + '1.2.840.113556.1.4.1221': (format_binary, None), # Object (OR-name) - Microsoft + '1.2.840.113556.1.4.1362': (format_unicode, None), # String (Case) - Microsoft + '1.3.6.1.4.1.1466.115.121.1.1': (format_binary, None), # ACI item [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.2': (format_binary, None), # Access point [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.3': (format_unicode, None), # Attribute type description + '1.3.6.1.4.1.1466.115.121.1.4': (format_binary, None), # Audio [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.5': (format_binary, None), # Binary [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.6': (format_unicode, None), # Bit String + '1.3.6.1.4.1.1466.115.121.1.7': (format_boolean, validate_boolean), # Boolean + '1.3.6.1.4.1.1466.115.121.1.8': (format_binary, None), # Certificate [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.9': (format_binary, None), # Certificate List [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.10': (format_binary, None), # Certificate Pair [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.11': (format_unicode, None), # Country String + '1.3.6.1.4.1.1466.115.121.1.12': (format_unicode, None), # Distinguished name (DN) + '1.3.6.1.4.1.1466.115.121.1.13': (format_binary, None), # Data Quality Syntax [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.14': (format_unicode, None), # Delivery method + '1.3.6.1.4.1.1466.115.121.1.15': (format_unicode, None), # Directory string + '1.3.6.1.4.1.1466.115.121.1.16': (format_unicode, None), # DIT Content Rule Description + '1.3.6.1.4.1.1466.115.121.1.17': (format_unicode, None), # DIT Structure Rule Description + '1.3.6.1.4.1.1466.115.121.1.18': (format_binary, None), # DL Submit Permission [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.19': (format_binary, None), # DSA Quality Syntax [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.20': (format_binary, None), # DSE Type [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.21': (format_binary, None), # Enhanced Guide + '1.3.6.1.4.1.1466.115.121.1.22': (format_unicode, None), # Facsimile Telephone Number + '1.3.6.1.4.1.1466.115.121.1.23': (format_binary, None), # Fax + '1.3.6.1.4.1.1466.115.121.1.24': (format_time, validate_time), # Generalized time + '1.3.6.1.4.1.1466.115.121.1.25': (format_binary, None), # Guide [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.26': (format_unicode, None), # IA5 string + '1.3.6.1.4.1.1466.115.121.1.27': (format_integer, validate_integer), # Integer + '1.3.6.1.4.1.1466.115.121.1.28': (format_binary, None), # JPEG + '1.3.6.1.4.1.1466.115.121.1.29': (format_binary, None), # Master and Shadow Access Points [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.30': (format_unicode, None), # Matching rule description + '1.3.6.1.4.1.1466.115.121.1.31': (format_unicode, None), # Matching rule use description + '1.3.6.1.4.1.1466.115.121.1.32': (format_unicode, None), # Mail Preference [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.33': (format_unicode, None), # MHS OR Address [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.34': (format_unicode, None), # Name and optional UID + '1.3.6.1.4.1.1466.115.121.1.35': (format_unicode, None), # Name form description + '1.3.6.1.4.1.1466.115.121.1.36': (format_unicode, None), # Numeric string + '1.3.6.1.4.1.1466.115.121.1.37': (format_unicode, None), # Object class description + '1.3.6.1.4.1.1466.115.121.1.38': (format_unicode, None), # OID + '1.3.6.1.4.1.1466.115.121.1.39': (format_unicode, None), # Other mailbox + '1.3.6.1.4.1.1466.115.121.1.40': (format_binary, None), # Octet string + '1.3.6.1.4.1.1466.115.121.1.41': (format_unicode, None), # Postal address + '1.3.6.1.4.1.1466.115.121.1.42': (format_binary, None), # Protocol Information [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.43': (format_binary, None), # Presentation Address [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.44': (format_unicode, None), # Printable string + '1.3.6.1.4.1.1466.115.121.1.45': (format_binary, None), # Subtree specification [OBSOLETE + '1.3.6.1.4.1.1466.115.121.1.46': (format_binary, None), # Supplier Information [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.47': (format_binary, None), # Supplier Or Consumer [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.48': (format_binary, None), # Supplier And Consumer [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.49': (format_binary, None), # Supported Algorithm [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.50': (format_unicode, None), # Telephone number + '1.3.6.1.4.1.1466.115.121.1.51': (format_unicode, None), # Teletex terminal identifier + '1.3.6.1.4.1.1466.115.121.1.52': (format_unicode, None), # Teletex number + '1.3.6.1.4.1.1466.115.121.1.53': (format_time, validate_time), # Utc time (deprecated) + '1.3.6.1.4.1.1466.115.121.1.54': (format_unicode, None), # LDAP syntax description + '1.3.6.1.4.1.1466.115.121.1.55': (format_binary, None), # Modify rights [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.56': (format_binary, None), # LDAP Schema Definition [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.57': (format_unicode, None), # LDAP Schema Description [OBSOLETE] + '1.3.6.1.4.1.1466.115.121.1.58': (format_unicode, None), # Substring assertion + '1.3.6.1.1.16.1': (format_uuid, validate_uuid), # UUID + '1.3.6.1.1.16.4': (format_uuid, validate_uuid), # entryUUID (RFC 4530) + '2.16.840.1.113719.1.1.4.1.501': (format_uuid, validate_guid), # GUID (Novell) + '2.16.840.1.113719.1.1.5.1.0': (format_binary, None), # Unknown (Novell) + '2.16.840.1.113719.1.1.5.1.6': (format_unicode, None), # Case Ignore List (Novell) + '2.16.840.1.113719.1.1.5.1.12': (format_binary, None), # Tagged Data (Novell) + '2.16.840.1.113719.1.1.5.1.13': (format_binary, None), # Octet List (Novell) + '2.16.840.1.113719.1.1.5.1.14': (format_unicode, None), # Tagged String (Novell) + '2.16.840.1.113719.1.1.5.1.15': (format_unicode, None), # Tagged Name And String (Novell) + '2.16.840.1.113719.1.1.5.1.16': (format_binary, None), # NDS Replica Pointer (Novell) + '2.16.840.1.113719.1.1.5.1.17': (format_unicode, None), # NDS ACL (Novell) + '2.16.840.1.113719.1.1.5.1.19': (format_time, validate_time), # NDS Timestamp (Novell) + '2.16.840.1.113719.1.1.5.1.22': (format_integer, validate_integer), # Counter (Novell) + '2.16.840.1.113719.1.1.5.1.23': (format_unicode, None), # Tagged Name (Novell) + '2.16.840.1.113719.1.1.5.1.25': (format_unicode, None), # Typed Name (Novell) + 'supportedldapversion': (format_integer, None), # supportedLdapVersion (Microsoft) + 'octetstring': (format_binary, validate_uuid_le), # octect string (Microsoft) + '1.2.840.113556.1.4.2': (format_uuid_le, validate_uuid_le), # object guid (Microsoft) + '1.2.840.113556.1.4.13': (format_ad_timestamp, validate_ad_timestamp), # builtinCreationTime (Microsoft) + '1.2.840.113556.1.4.26': (format_ad_timestamp, validate_ad_timestamp), # creationTime (Microsoft) + '1.2.840.113556.1.4.49': (format_ad_timestamp, validate_ad_timestamp), # badPasswordTime (Microsoft) + '1.2.840.113556.1.4.51': (format_ad_timestamp, validate_ad_timestamp), # lastLogoff (Microsoft) + '1.2.840.113556.1.4.52': (format_ad_timestamp, validate_ad_timestamp), # lastLogon (Microsoft) + '1.2.840.113556.1.4.60': (format_ad_timedelta, validate_ad_timedelta), # lockoutDuration (Microsoft) + '1.2.840.113556.1.4.61': (format_ad_timedelta, validate_ad_timedelta), # lockOutObservationWindow (Microsoft) + '1.2.840.113556.1.4.74': (format_ad_timedelta, validate_ad_timedelta), # maxPwdAge (Microsoft) + '1.2.840.113556.1.4.78': (format_ad_timedelta, validate_ad_timedelta), # minPwdAge (Microsoft) + '1.2.840.113556.1.4.96': (format_ad_timestamp, validate_zero_and_minus_one_and_positive_int), # pwdLastSet (Microsoft, can be set to -1 only) + '1.2.840.113556.1.4.146': (format_sid, validate_sid), # objectSid (Microsoft) + '1.2.840.113556.1.4.159': (format_ad_timestamp, validate_ad_timestamp), # accountExpires (Microsoft) + '1.2.840.113556.1.4.662': (format_ad_timestamp, validate_ad_timestamp), # lockoutTime (Microsoft) + '1.2.840.113556.1.4.1696': (format_ad_timestamp, validate_ad_timestamp), # lastLogonTimestamp (Microsoft) + '1.3.6.1.4.1.42.2.27.8.1.17': (format_time_with_0_year, validate_time_with_0_year) # pwdAccountLockedTime (Novell) +} + + +def find_attribute_helpers(attr_type, name, custom_formatter): + """ + Tries to format following the OIDs info and format_helper specification. + Search for attribute oid, then attribute name (can be multiple), then attribute syntax + Precedence is: + 1. attribute name + 2. attribute oid(from schema) + 3. attribute names (from oid_info) + 4. attribute syntax (from schema) + Custom formatters can be defined in Server object and have precedence over the standard_formatters + If no formatter is found the raw_value is returned as bytes. + Attributes defined as SINGLE_VALUE in schema are returned as a single object, otherwise are returned as a list of object + Formatter functions can return any kind of object + return a tuple (formatter, validator) + """ + formatter = None + if custom_formatter and isinstance(custom_formatter, dict): # if custom formatters are defined they have precedence over the standard formatters + if name in custom_formatter: # search for attribute name, as returned by the search operation + formatter = custom_formatter[name] + + if not formatter and attr_type and attr_type.oid in custom_formatter: # search for attribute oid as returned by schema + formatter = custom_formatter[attr_type.oid] + if not formatter and attr_type and attr_type.oid_info: + if isinstance(attr_type.oid_info[2], SEQUENCE_TYPES): # search for multiple names defined in oid_info + for attr_name in attr_type.oid_info[2]: + if attr_name in custom_formatter: + formatter = custom_formatter[attr_name] + break + elif attr_type.oid_info[2] in custom_formatter: # search for name defined in oid_info + formatter = custom_formatter[attr_type.oid_info[2]] + + if not formatter and attr_type and attr_type.syntax in custom_formatter: # search for syntax defined in schema + formatter = custom_formatter[attr_type.syntax] + + if not formatter and name in standard_formatter: # search for attribute name, as returned by the search operation + formatter = standard_formatter[name] + + if not formatter and attr_type and attr_type.oid in standard_formatter: # search for attribute oid as returned by schema + formatter = standard_formatter[attr_type.oid] + + if not formatter and attr_type and attr_type.oid_info: + if isinstance(attr_type.oid_info[2], SEQUENCE_TYPES): # search for multiple names defined in oid_info + for attr_name in attr_type.oid_info[2]: + if attr_name in standard_formatter: + formatter = standard_formatter[attr_name] + break + elif attr_type.oid_info[2] in standard_formatter: # search for name defined in oid_info + formatter = standard_formatter[attr_type.oid_info[2]] + if not formatter and attr_type and attr_type.syntax in standard_formatter: # search for syntax defined in schema + formatter = standard_formatter[attr_type.syntax] + + if formatter is None: + return None, None + + return formatter + + +def format_attribute_values(schema, name, values, custom_formatter): + if not values: # RFCs states that attributes must always have values, but a flaky server returns empty values too + return [] + + if not isinstance(values, SEQUENCE_TYPES): + values = [values] + + if schema and schema.attribute_types and name in schema.attribute_types: + attr_type = schema.attribute_types[name] + else: + attr_type = None + + attribute_helpers = find_attribute_helpers(attr_type, name, custom_formatter) + if not isinstance(attribute_helpers, tuple): # custom formatter + formatter = attribute_helpers + else: + formatter = format_unicode if not attribute_helpers[0] else attribute_helpers[0] + + formatted_values = [formatter(raw_value) for raw_value in values] # executes formatter + if formatted_values: + return formatted_values[0] if (attr_type and attr_type.single_value) else formatted_values + else: # RFCs states that attributes must always have values, but AD return empty values in DirSync + return [] + + +def find_attribute_validator(schema, name, custom_validator): + if schema and schema.attribute_types and name in schema.attribute_types: + attr_type = schema.attribute_types[name] + else: + attr_type = None + + attribute_helpers = find_attribute_helpers(attr_type, name, custom_validator) + if not isinstance(attribute_helpers, tuple): # custom validator + validator = attribute_helpers + else: + if not attribute_helpers[1]: + if attr_type and attr_type.single_value: + validator = validate_generic_single_value # validate only single value + else: + validator = always_valid # unknown syntax, accepts single and multi value + else: + validator = attribute_helpers[1] + return validator diff --git a/server/www/packages/packages-linux/x64/ldap3/protocol/formatters/validators.py b/server/www/packages/packages-linux/x64/ldap3/protocol/formatters/validators.py index fff2198..d5511b7 100644 --- a/server/www/packages/packages-linux/x64/ldap3/protocol/formatters/validators.py +++ b/server/www/packages/packages-linux/x64/ldap3/protocol/formatters/validators.py @@ -1,461 +1,489 @@ -""" -""" - -# Created on 2016.08.09 -# -# Author: Giovanni Cannata -# -# Copyright 2016 - 2018 Giovanni Cannata -# -# This file is part of ldap3. -# -# ldap3 is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ldap3 is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with ldap3 in the COPYING and COPYING.LESSER files. -# If not, see . -from binascii import a2b_hex -from datetime import datetime -from calendar import timegm -from uuid import UUID -from struct import pack - - -from ... import SEQUENCE_TYPES, STRING_TYPES, NUMERIC_TYPES, INTEGER_TYPES -from .formatters import format_time, format_ad_timestamp -from ...utils.conv import to_raw, to_unicode, ldap_escape_to_bytes - -# Validators return True if value is valid, False if value is not valid, -# or a value different from True and False that is a valid value to substitute to the input value - - -def check_type(input_value, value_type): - if isinstance(input_value, value_type): - return True - - if isinstance(input_value, SEQUENCE_TYPES): - for value in input_value: - if not isinstance(value, value_type): - return False - return True - - return False - - -# noinspection PyUnusedLocal -def always_valid(input_value): - return True - - -def validate_generic_single_value(input_value): - if not isinstance(input_value, SEQUENCE_TYPES): - return True - - try: # object couldn't have a __len__ method - if len(input_value) == 1: - return True - except Exception: - pass - - return False - - -def validate_zero_and_minus_one_and_positive_int(input_value): - """Accept -1 only (used by pwdLastSet in AD) - """ - if not isinstance(input_value, SEQUENCE_TYPES): - if isinstance(input_value, NUMERIC_TYPES) or isinstance(input_value, STRING_TYPES): - return True if int(input_value) >= -1 else False - return False - else: - if len(input_value) == 1 and (isinstance(input_value[0], NUMERIC_TYPES) or isinstance(input_value[0], STRING_TYPES)): - return True if int(input_value[0]) >= -1 else False - - return False - - -def validate_integer(input_value): - if check_type(input_value, (float, bool)): - return False - if check_type(input_value, INTEGER_TYPES): - return True - - if not isinstance(input_value, SEQUENCE_TYPES): - sequence = False - input_value = [input_value] - else: - sequence = True # indicates if a sequence must be returned - - valid_values = [] # builds a list of valid int values - from decimal import Decimal, InvalidOperation - for element in input_value: - try: # try to convert any type to int, an invalid conversion raise TypeError or ValueError, doublecheck with Decimal type, if both are valid and equal then then int() value is used - value = to_unicode(element) if isinstance(element, bytes) else element - decimal_value = Decimal(value) - int_value = int(value) - if decimal_value == int_value: - valid_values.append(int_value) - else: - return False - except (ValueError, TypeError, InvalidOperation): - return False - - if sequence: - return valid_values - else: - return valid_values[0] - - -def validate_bytes(input_value): - return check_type(input_value, bytes) - - -def validate_boolean(input_value): - # it could be a real bool or the string TRUE or FALSE, # only a single valued is allowed - if validate_generic_single_value(input_value): # valid only if a single value or a sequence with a single element - if isinstance(input_value, SEQUENCE_TYPES): - input_value = input_value[0] - if isinstance(input_value, bool): - if input_value: - return 'TRUE' - else: - return 'FALSE' - if str is not bytes and isinstance(input_value, bytes): # python3 try to converts bytes to string - input_value = to_unicode(input_value) - if isinstance(input_value, STRING_TYPES): - if input_value.lower() == 'true': - return 'TRUE' - elif input_value.lower() == 'false': - return 'FALSE' - return False - - -def validate_time_with_0_year(input_value): - # validates generalized time but accept a 0000 year too - # if datetime object doesn't have a timezone it's considered local time and is adjusted to UTC - if not isinstance(input_value, SEQUENCE_TYPES): - sequence = False - input_value = [input_value] - else: - sequence = True # indicates if a sequence must be returned - - valid_values = [] - changed = False - for element in input_value: - if str is not bytes and isinstance(element, bytes): # python3 try to converts bytes to string - element = to_unicode(element) - if isinstance(element, STRING_TYPES): # tries to check if it is already be a Generalized Time - if element.startswith('0000') or isinstance(format_time(to_raw(element)), datetime): # valid Generalized Time string - valid_values.append(element) - else: - return False - elif isinstance(element, datetime): - changed = True - if element.tzinfo: # a datetime with a timezone - valid_values.append(element.strftime('%Y%m%d%H%M%S%z')) - else: # datetime without timezone, assumed local and adjusted to UTC - offset = datetime.now() - datetime.utcnow() - valid_values.append((element - offset).strftime('%Y%m%d%H%M%SZ')) - else: - return False - - if changed: - if sequence: - return valid_values - else: - return valid_values[0] - else: - return True - - -def validate_time(input_value): - # if datetime object doesn't have a timezone it's considered local time and is adjusted to UTC - if not isinstance(input_value, SEQUENCE_TYPES): - sequence = False - input_value = [input_value] - else: - sequence = True # indicates if a sequence must be returned - - valid_values = [] - changed = False - for element in input_value: - if str is not bytes and isinstance(element, bytes): # python3 try to converts bytes to string - element = to_unicode(element) - if isinstance(element, STRING_TYPES): # tries to check if it is already be a Generalized Time - if isinstance(format_time(to_raw(element)), datetime): # valid Generalized Time string - valid_values.append(element) - else: - return False - elif isinstance(element, datetime): - changed = True - if element.tzinfo: # a datetime with a timezone - valid_values.append(element.strftime('%Y%m%d%H%M%S%z')) - else: # datetime without timezone, assumed local and adjusted to UTC - offset = datetime.now() - datetime.utcnow() - valid_values.append((element - offset).strftime('%Y%m%d%H%M%SZ')) - else: - return False - - if changed: - if sequence: - return valid_values - else: - return valid_values[0] - else: - return True - - -def validate_ad_timestamp(input_value): - """ - Active Directory stores date/time values as the number of 100-nanosecond intervals - that have elapsed since the 0 hour on January 1, 1601 till the date/time that is being stored. - The time is always stored in Greenwich Mean Time (GMT) in the Active Directory. - """ - if not isinstance(input_value, SEQUENCE_TYPES): - sequence = False - input_value = [input_value] - else: - sequence = True # indicates if a sequence must be returned - - valid_values = [] - changed = False - for element in input_value: - if str is not bytes and isinstance(element, bytes): # python3 try to converts bytes to string - element = to_unicode(element) - if isinstance(element, NUMERIC_TYPES): - if 0 <= element <= 9223372036854775807: # min and max for the AD timestamp starting from 12:00 AM January 1, 1601 - valid_values.append(element) - else: - return False - elif isinstance(element, STRING_TYPES): # tries to check if it is already be a AD timestamp - if isinstance(format_ad_timestamp(to_raw(element)), datetime): # valid Generalized Time string - valid_values.append(element) - else: - return False - elif isinstance(element, datetime): - changed = True - if element.tzinfo: # a datetime with a timezone - valid_values.append(to_raw((timegm(element.utctimetuple()) + 11644473600) * 10000000, encoding='ascii')) - else: # datetime without timezone, assumed local and adjusted to UTC - offset = datetime.now() - datetime.utcnow() - valid_values.append(to_raw((timegm((element - offset).timetuple()) + 11644473600) * 10000000, encoding='ascii')) - else: - return False - - if changed: - if sequence: - return valid_values - else: - return valid_values[0] - else: - return True - - -def validate_guid(input_value): - """ - object guid in uuid format (Novell eDirectory) - """ - if not isinstance(input_value, SEQUENCE_TYPES): - sequence = False - input_value = [input_value] - else: - sequence = True # indicates if a sequence must be returned - - valid_values = [] - changed = False - for element in input_value: - if isinstance(element, STRING_TYPES): - try: - valid_values.append(UUID(element).bytes) - changed = True - except ValueError: # try if the value is an escaped byte sequence - try: - valid_values.append(UUID(element.replace('\\', '')).bytes) - changed = True - continue - except ValueError: - if str is not bytes: # python 3 - pass - else: - valid_values.append(element) - continue - return False - elif isinstance(element, (bytes, bytearray)): # assumes bytes are valid - valid_values.append(element) - else: - return False - - if changed: - if sequence: - return valid_values - else: - return valid_values[0] - else: - return True - -def validate_uuid(input_value): - """ - object entryUUID in uuid format - """ - if not isinstance(input_value, SEQUENCE_TYPES): - sequence = False - input_value = [input_value] - else: - sequence = True # indicates if a sequence must be returned - - valid_values = [] - changed = False - for element in input_value: - if isinstance(element, STRING_TYPES): - try: - valid_values.append(str(UUID(element))) - changed = True - except ValueError: # try if the value is an escaped byte sequence - try: - valid_values.append(str(UUID(element.replace('\\', '')))) - changed = True - continue - except ValueError: - if str is not bytes: # python 3 - pass - else: - valid_values.append(element) - continue - return False - elif isinstance(element, (bytes, bytearray)): # assumes bytes are valid - valid_values.append(element) - else: - return False - - if changed: - if sequence: - return valid_values - else: - return valid_values[0] - else: - return True - - -def validate_uuid_le(input_value): - """ - Active Directory stores objectGUID in uuid_le format, follows RFC4122 and MS-DTYP: - "{07039e68-4373-264d-a0a7-07039e684373}": string representation big endian, converted to little endian (with or without brace curles) - "689e030773434d26a7a007039e684373": packet representation, already in little endian - "\68\9e\03\07\73\43\4d\26\a7\a0\07\03\9e\68\43\73": bytes representation, already in little endian - byte sequence: already in little endian - - """ - if not isinstance(input_value, SEQUENCE_TYPES): - sequence = False - input_value = [input_value] - else: - sequence = True # indicates if a sequence must be returned - - valid_values = [] - changed = False - for element in input_value: - if isinstance(element, STRING_TYPES): - if element[0] == '{' and element[-1] == '}': - valid_values.append(UUID(hex=element).bytes_le) # string representation, value in big endian, converts to little endian - changed = True - elif '-' in element: - valid_values.append(UUID(hex=element).bytes_le) # string representation, value in big endian, converts to little endian - changed = True - elif '\\' in element: - valid_values.append(UUID(bytes_le=ldap_escape_to_bytes(element)).bytes_le) # byte representation, value in little endian - changed = True - elif '-' not in element: # value in little endian - valid_values.append(UUID(bytes_le=a2b_hex(element)).bytes_le) # packet representation, value in little endian, converts to little endian - changed = True - elif isinstance(element, (bytes, bytearray)): # assumes bytes are valid uuid - valid_values.append(element) # value is untouched, must be in little endian - else: - return False - - if changed: - if sequence: - return valid_values - else: - return valid_values[0] - else: - return True - - -def validate_sid(input_value): - """ - SID= "S-1-" IdentifierAuthority 1*SubAuthority - IdentifierAuthority= IdentifierAuthorityDec / IdentifierAuthorityHex - ; If the identifier authority is < 2^32, the - ; identifier authority is represented as a decimal - ; number - ; If the identifier authority is >= 2^32, - ; the identifier authority is represented in - ; hexadecimal - IdentifierAuthorityDec = 1*10DIGIT - ; IdentifierAuthorityDec, top level authority of a - ; security identifier is represented as a decimal number - IdentifierAuthorityHex = "0x" 12HEXDIG - ; IdentifierAuthorityHex, the top-level authority of a - ; security identifier is represented as a hexadecimal number - SubAuthority= "-" 1*10DIGIT - ; Sub-Authority is always represented as a decimal number - ; No leading "0" characters are allowed when IdentifierAuthority - ; or SubAuthority is represented as a decimal number - ; All hexadecimal digits must be output in string format, - ; pre-pended by "0x" - - Revision (1 byte): An 8-bit unsigned integer that specifies the revision level of the SID. This value MUST be set to 0x01. - SubAuthorityCount (1 byte): An 8-bit unsigned integer that specifies the number of elements in the SubAuthority array. The maximum number of elements allowed is 15. - IdentifierAuthority (6 bytes): A SID_IDENTIFIER_AUTHORITY structure that indicates the authority under which the SID was created. It describes the entity that created the SID. The Identifier Authority value {0,0,0,0,0,5} denotes SIDs created by the NT SID authority. - SubAuthority (variable): A variable length array of unsigned 32-bit integers that uniquely identifies a principal relative to the IdentifierAuthority. Its length is determined by SubAuthorityCount. - - If you have a SID like S-a-b-c-d-e-f-g-... - - Then the bytes are - a (revision) - N (number of dashes minus two) - bbbbbb (six bytes of "b" treated as a 48-bit number in big-endian format) - cccc (four bytes of "c" treated as a 32-bit number in little-endian format) - dddd (four bytes of "d" treated as a 32-bit number in little-endian format) - eeee (four bytes of "e" treated as a 32-bit number in little-endian format) - ffff (four bytes of "f" treated as a 32-bit number in little-endian format) - - """ - if not isinstance(input_value, SEQUENCE_TYPES): - sequence = False - input_value = [input_value] - else: - sequence = True # indicates if a sequence must be returned - - valid_values = [] - changed = False - for element in input_value: - if isinstance(element, STRING_TYPES): - if element.startswith('S-'): - parts = element.split('-') - sid_bytes = pack('q', int(parts[2]))[2:] # authority (in dec) - else: - sid_bytes += pack('>q', int(parts[2], 16))[2:] # authority (in hex) - for sub_auth in parts[3:]: - sid_bytes += pack('. +from binascii import a2b_hex +from datetime import datetime +from calendar import timegm +from uuid import UUID +from struct import pack + + +from ... import SEQUENCE_TYPES, STRING_TYPES, NUMERIC_TYPES, INTEGER_TYPES +from .formatters import format_time, format_ad_timestamp +from ...utils.conv import to_raw, to_unicode, ldap_escape_to_bytes, escape_bytes + +# Validators return True if value is valid, False if value is not valid, +# or a value different from True and False that is a valid value to substitute to the input value + + +def check_type(input_value, value_type): + if isinstance(input_value, value_type): + return True + + if isinstance(input_value, SEQUENCE_TYPES): + for value in input_value: + if not isinstance(value, value_type): + return False + return True + + return False + + +# noinspection PyUnusedLocal +def always_valid(input_value): + return True + + +def validate_generic_single_value(input_value): + if not isinstance(input_value, SEQUENCE_TYPES): + return True + + try: # object couldn't have a __len__ method + if len(input_value) == 1: + return True + except Exception: + pass + + return False + + +def validate_zero_and_minus_one_and_positive_int(input_value): + """Accept -1 and 0 only (used by pwdLastSet in AD) + """ + if not isinstance(input_value, SEQUENCE_TYPES): + if isinstance(input_value, NUMERIC_TYPES) or isinstance(input_value, STRING_TYPES): + return True if int(input_value) >= -1 else False + return False + else: + if len(input_value) == 1 and (isinstance(input_value[0], NUMERIC_TYPES) or isinstance(input_value[0], STRING_TYPES)): + return True if int(input_value[0]) >= -1 else False + + return False + + +def validate_integer(input_value): + if check_type(input_value, (float, bool)): + return False + if check_type(input_value, INTEGER_TYPES): + return True + + if not isinstance(input_value, SEQUENCE_TYPES): + sequence = False + input_value = [input_value] + else: + sequence = True # indicates if a sequence must be returned + + valid_values = [] # builds a list of valid int values + from decimal import Decimal, InvalidOperation + for element in input_value: + try: #try to convert any type to int, an invalid conversion raise TypeError or ValueError, doublecheck with Decimal type, if both are valid and equal then then int() value is used + value = to_unicode(element) if isinstance(element, bytes) else element + decimal_value = Decimal(value) + int_value = int(value) + if decimal_value == int_value: + valid_values.append(int_value) + else: + return False + except (ValueError, TypeError, InvalidOperation): + return False + + if sequence: + return valid_values + else: + return valid_values[0] + + +def validate_bytes(input_value): + return check_type(input_value, bytes) + + +def validate_boolean(input_value): + # it could be a real bool or the string TRUE or FALSE, # only a single valued is allowed + if validate_generic_single_value(input_value): # valid only if a single value or a sequence with a single element + if isinstance(input_value, SEQUENCE_TYPES): + input_value = input_value[0] + if isinstance(input_value, bool): + if input_value: + return 'TRUE' + else: + return 'FALSE' + if str is not bytes and isinstance(input_value, bytes): # python3 try to converts bytes to string + input_value = to_unicode(input_value) + if isinstance(input_value, STRING_TYPES): + if input_value.lower() == 'true': + return 'TRUE' + elif input_value.lower() == 'false': + return 'FALSE' + return False + + +def validate_time_with_0_year(input_value): + # validates generalized time but accept a 0000 year too + # if datetime object doesn't have a timezone it's considered local time and is adjusted to UTC + if not isinstance(input_value, SEQUENCE_TYPES): + sequence = False + input_value = [input_value] + else: + sequence = True # indicates if a sequence must be returned + + valid_values = [] + changed = False + for element in input_value: + if str is not bytes and isinstance(element, bytes): # python3 try to converts bytes to string + element = to_unicode(element) + if isinstance(element, STRING_TYPES): # tries to check if it is already be a Generalized Time + if element.startswith('0000') or isinstance(format_time(to_raw(element)), datetime): # valid Generalized Time string + valid_values.append(element) + else: + return False + elif isinstance(element, datetime): + changed = True + if element.tzinfo: # a datetime with a timezone + valid_values.append(element.strftime('%Y%m%d%H%M%S%z')) + else: # datetime without timezone, assumed local and adjusted to UTC + offset = datetime.now() - datetime.utcnow() + valid_values.append((element - offset).strftime('%Y%m%d%H%M%SZ')) + else: + return False + + if changed: + if sequence: + return valid_values + else: + return valid_values[0] + else: + return True + + +def validate_time(input_value): + # if datetime object doesn't have a timezone it's considered local time and is adjusted to UTC + if not isinstance(input_value, SEQUENCE_TYPES): + sequence = False + input_value = [input_value] + else: + sequence = True # indicates if a sequence must be returned + + valid_values = [] + changed = False + for element in input_value: + if str is not bytes and isinstance(element, bytes): # python3 try to converts bytes to string + element = to_unicode(element) + if isinstance(element, STRING_TYPES): # tries to check if it is already be a Generalized Time + if isinstance(format_time(to_raw(element)), datetime): # valid Generalized Time string + valid_values.append(element) + else: + return False + elif isinstance(element, datetime): + changed = True + if element.tzinfo: # a datetime with a timezone + valid_values.append(element.strftime('%Y%m%d%H%M%S%z')) + else: # datetime without timezone, assumed local and adjusted to UTC + offset = datetime.now() - datetime.utcnow() + valid_values.append((element - offset).strftime('%Y%m%d%H%M%SZ')) + else: + return False + + if changed: + if sequence: + return valid_values + else: + return valid_values[0] + else: + return True + + +def validate_ad_timestamp(input_value): + """ + Active Directory stores date/time values as the number of 100-nanosecond intervals + that have elapsed since the 0 hour on January 1, 1601 till the date/time that is being stored. + The time is always stored in Greenwich Mean Time (GMT) in the Active Directory. + """ + if not isinstance(input_value, SEQUENCE_TYPES): + sequence = False + input_value = [input_value] + else: + sequence = True # indicates if a sequence must be returned + + valid_values = [] + changed = False + for element in input_value: + if str is not bytes and isinstance(element, bytes): # python3 try to converts bytes to string + element = to_unicode(element) + if isinstance(element, NUMERIC_TYPES): + if 0 <= element <= 9223372036854775807: # min and max for the AD timestamp starting from 12:00 AM January 1, 1601 + valid_values.append(element) + else: + return False + elif isinstance(element, STRING_TYPES): # tries to check if it is already be a AD timestamp + if isinstance(format_ad_timestamp(to_raw(element)), datetime): # valid Generalized Time string + valid_values.append(element) + else: + return False + elif isinstance(element, datetime): + changed = True + if element.tzinfo: # a datetime with a timezone + valid_values.append(to_raw((timegm(element.utctimetuple()) + 11644473600) * 10000000, encoding='ascii')) + else: # datetime without timezone, assumed local and adjusted to UTC + offset = datetime.now() - datetime.utcnow() + valid_values.append(to_raw((timegm((element - offset).timetuple()) + 11644473600) * 10000000, encoding='ascii')) + else: + return False + + if changed: + if sequence: + return valid_values + else: + return valid_values[0] + else: + return True + + +def validate_ad_timedelta(input_value): + """ + Should be validated like an AD timestamp except that since it is a time + delta, it is stored as a negative number. + """ + if not isinstance(input_value, INTEGER_TYPES) or input_value > 0: + return False + return validate_ad_timestamp(input_value * -1) + + +def validate_guid(input_value): + """ + object guid in uuid format (Novell eDirectory) + """ + if not isinstance(input_value, SEQUENCE_TYPES): + sequence = False + input_value = [input_value] + else: + sequence = True # indicates if a sequence must be returned + + valid_values = [] + changed = False + for element in input_value: + if isinstance(element, STRING_TYPES): + try: + valid_values.append(UUID(element).bytes) + changed = True + except ValueError: # try if the value is an escaped byte sequence + try: + valid_values.append(UUID(element.replace('\\', '')).bytes) + changed = True + continue + except ValueError: + if str is not bytes: # python 3 + pass + else: + valid_values.append(element) + continue + return False + elif isinstance(element, (bytes, bytearray)): # assumes bytes are valid + valid_values.append(element) + else: + return False + + if changed: + if sequence: + return valid_values + else: + return valid_values[0] + else: + return True + + +def validate_uuid(input_value): + """ + object entryUUID in uuid format + """ + if not isinstance(input_value, SEQUENCE_TYPES): + sequence = False + input_value = [input_value] + else: + sequence = True # indicates if a sequence must be returned + + valid_values = [] + changed = False + for element in input_value: + if isinstance(element, STRING_TYPES): + try: + valid_values.append(str(UUID(element))) + changed = True + except ValueError: # try if the value is an escaped byte sequence + try: + valid_values.append(str(UUID(element.replace('\\', '')))) + changed = True + continue + except ValueError: + if str is not bytes: # python 3 + pass + else: + valid_values.append(element) + continue + return False + elif isinstance(element, (bytes, bytearray)): # assumes bytes are valid + valid_values.append(element) + else: + return False + + if changed: + if sequence: + return valid_values + else: + return valid_values[0] + else: + return True + + +def validate_uuid_le(input_value): + """ + Active Directory stores objectGUID in uuid_le format, follows RFC4122 and MS-DTYP: + "{07039e68-4373-264d-a0a7-07039e684373}": string representation big endian, converted to little endian (with or without brace curles) + "689e030773434d26a7a007039e684373": packet representation, already in little endian + "\68\9e\03\07\73\43\4d\26\a7\a0\07\03\9e\68\43\73": bytes representation, already in little endian + byte sequence: already in little endian + + """ + if not isinstance(input_value, SEQUENCE_TYPES): + sequence = False + input_value = [input_value] + else: + sequence = True # indicates if a sequence must be returned + + valid_values = [] + changed = False + for element in input_value: + error = False + if isinstance(element, STRING_TYPES): + if element[0] == '{' and element[-1] == '}': + try: + valid_values.append(UUID(hex=element).bytes_le) # string representation, value in big endian, converts to little endian + changed = True + except ValueError: + error = True + elif '-' in element: + try: + valid_values.append(UUID(hex=element).bytes_le) # string representation, value in big endian, converts to little endian + changed = True + except ValueError: + error = True + elif '\\' in element: + try: + uuid = UUID(bytes_le=ldap_escape_to_bytes(element)).bytes_le + uuid = escape_bytes(uuid) + valid_values.append(uuid) # byte representation, value in little endian + changed = True + except ValueError: + error = True + elif '-' not in element: # value in little endian + try: + valid_values.append(UUID(bytes_le=a2b_hex(element)).bytes_le) # packet representation, value in little endian, converts to little endian + changed = True + except ValueError: + error = True + if error and str == bytes: # python2 only assume value is bytes and valid + valid_values.append(element) # value is untouched, must be in little endian + elif isinstance(element, (bytes, bytearray)): # assumes bytes are valid uuid + valid_values.append(element) # value is untouched, must be in little endian + else: + return False + + if changed: + if sequence: + return valid_values + else: + return valid_values[0] + else: + return True + + +def validate_sid(input_value): + """ + SID= "S-1-" IdentifierAuthority 1*SubAuthority + IdentifierAuthority= IdentifierAuthorityDec / IdentifierAuthorityHex + ; If the identifier authority is < 2^32, the + ; identifier authority is represented as a decimal + ; number + ; If the identifier authority is >= 2^32, + ; the identifier authority is represented in + ; hexadecimal + IdentifierAuthorityDec = 1*10DIGIT + ; IdentifierAuthorityDec, top level authority of a + ; security identifier is represented as a decimal number + IdentifierAuthorityHex = "0x" 12HEXDIG + ; IdentifierAuthorityHex, the top-level authority of a + ; security identifier is represented as a hexadecimal number + SubAuthority= "-" 1*10DIGIT + ; Sub-Authority is always represented as a decimal number + ; No leading "0" characters are allowed when IdentifierAuthority + ; or SubAuthority is represented as a decimal number + ; All hexadecimal digits must be output in string format, + ; pre-pended by "0x" + + Revision (1 byte): An 8-bit unsigned integer that specifies the revision level of the SID. This value MUST be set to 0x01. + SubAuthorityCount (1 byte): An 8-bit unsigned integer that specifies the number of elements in the SubAuthority array. The maximum number of elements allowed is 15. + IdentifierAuthority (6 bytes): A SID_IDENTIFIER_AUTHORITY structure that indicates the authority under which the SID was created. It describes the entity that created the SID. The Identifier Authority value {0,0,0,0,0,5} denotes SIDs created by the NT SID authority. + SubAuthority (variable): A variable length array of unsigned 32-bit integers that uniquely identifies a principal relative to the IdentifierAuthority. Its length is determined by SubAuthorityCount. + + If you have a SID like S-a-b-c-d-e-f-g-... + + Then the bytes are + a (revision) + N (number of dashes minus two) + bbbbbb (six bytes of "b" treated as a 48-bit number in big-endian format) + cccc (four bytes of "c" treated as a 32-bit number in little-endian format) + dddd (four bytes of "d" treated as a 32-bit number in little-endian format) + eeee (four bytes of "e" treated as a 32-bit number in little-endian format) + ffff (four bytes of "f" treated as a 32-bit number in little-endian format) + + """ + if not isinstance(input_value, SEQUENCE_TYPES): + sequence = False + input_value = [input_value] + else: + sequence = True # indicates if a sequence must be returned + + valid_values = [] + changed = False + for element in input_value: + if isinstance(element, STRING_TYPES): + if element.startswith('S-'): + parts = element.split('-') + sid_bytes = pack('q', int(parts[2]))[2:] # authority (in dec) + else: + sid_bytes += pack('>q', int(parts[2], 16))[2:] # authority (in hex) + for sub_auth in parts[3:]: + sid_bytes += pack('= 1 and connection.sasl_credentials[0]: if connection.sasl_credentials[0] is True: @@ -70,9 +75,15 @@ def sasl_gssapi(connection, controls): target_name = gssapi.Name('ldap@' + connection.sasl_credentials[0], gssapi.NameType.hostbased_service) if len(connection.sasl_credentials) >= 2 and connection.sasl_credentials[1]: authz_id = connection.sasl_credentials[1].encode("utf-8") + if len(connection.sasl_credentials) >= 3 and connection.sasl_credentials[2]: + raw_creds = connection.sasl_credentials[2] if target_name is None: target_name = gssapi.Name('ldap@' + connection.server.host, gssapi.NameType.hostbased_service) - creds = gssapi.Credentials(name=gssapi.Name(connection.user), usage='initiate') if connection.user else None + + if raw_creds is not None: + creds = gssapi.Credentials(base=raw_creds, usage='initiate', store=connection.cred_store) + else: + creds = gssapi.Credentials(name=gssapi.Name(connection.user), usage='initiate', store=connection.cred_store) if connection.user else None ctx = gssapi.SecurityContext(name=target_name, mech=gssapi.MechType.kerberos, creds=creds) in_token = None try: diff --git a/server/www/packages/packages-linux/x64/ldap3/protocol/sasl/plain.py b/server/www/packages/packages-linux/x64/ldap3/protocol/sasl/plain.py index 1de2a36..5327275 100644 --- a/server/www/packages/packages-linux/x64/ldap3/protocol/sasl/plain.py +++ b/server/www/packages/packages-linux/x64/ldap3/protocol/sasl/plain.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # diff --git a/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/ad2012R2.py b/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/ad2012R2.py index f583973..d0f3363 100644 --- a/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/ad2012R2.py +++ b/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/ad2012R2.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # diff --git a/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/ds389.py b/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/ds389.py index 0ede92f..4d49331 100644 --- a/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/ds389.py +++ b/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/ds389.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # diff --git a/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/edir888.py b/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/edir888.py index 630d7dc..80499a4 100644 --- a/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/edir888.py +++ b/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/edir888.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # @@ -939,12 +939,7 @@ edir_8_8_8_dsa_info = """ "addEntryOps": [ "947" ], - "altServer": [ - "ldap://192.168.137.102:389/", - "ldaps://192.168.137.102:636/", - "ldap://192.168.137.103:389/", - "ldaps://192.168.137.103:636/" - ], + "altServer": [], "bindSecurityErrors": [ "3" ], diff --git a/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/edir914.py b/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/edir914.py new file mode 100644 index 0000000..f86c417 --- /dev/null +++ b/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/edir914.py @@ -0,0 +1,1157 @@ +""" +""" + +# Created on 2019.08.31 +# +# Author: Giovanni Cannata +# +# Copyright 2014 - 2019 Giovanni Cannata +# +# This file is part of ldap3. +# +# ldap3 is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ldap3 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with ldap3 in the COPYING and COPYING.LESSER files. +# If not, see . + +edir_9_1_4_schema = """ +{ + "raw": { + "attributeTypes": [ + "( 2.5.4.35 NAME 'userPassword' DESC 'Internal NDS policy forces this to be single-valued' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{128} USAGE directoryOperation )", + "( 2.5.18.1 NAME 'createTimestamp' DESC 'Operational Attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )", + "( 2.5.18.2 NAME 'modifyTimestamp' DESC 'Operational Attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )", + "( 2.5.18.10 NAME 'subschemaSubentry' DESC 'Operational Attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 USAGE directoryOperation )", + "( 2.5.21.9 NAME 'structuralObjectClass' DESC 'Operational Attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )", + "( 2.16.840.1.113719.1.27.4.49 NAME 'subordinateCount' DESC 'Operational Attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )", + "( 2.16.840.1.113719.1.27.4.48 NAME 'entryFlags' DESC 'Operational Attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )", + "( 2.16.840.1.113719.1.27.4.51 NAME 'federationBoundary' DESC 'Operational Attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 NO-USER-MODIFICATION USAGE directoryOperation )", + "( 2.5.21.5 NAME 'attributeTypes' DESC 'Operational Attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.3 USAGE directoryOperation )", + "( 2.5.21.6 NAME 'objectClasses' DESC 'Operational Attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.37 USAGE directoryOperation )", + "( 1.3.6.1.1.20 NAME 'entryDN' DESC 'Operational Attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )", + "( 2.16.840.1.113719.1.1.4.1.2 NAME 'ACL' SYNTAX 2.16.840.1.113719.1.1.5.1.17 X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' )", + "( 2.5.4.1 NAME 'aliasedObjectName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'Aliased Object Name' X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' )", + "( 2.16.840.1.113719.1.1.4.1.6 NAME 'backLink' SYNTAX 2.16.840.1.113719.1.1.5.1.23 NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Back Link' X-NDS_SERVER_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.8 NAME 'binderyProperty' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Bindery Property' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.7 NAME 'binderyObjectRestriction' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Bindery Object Restriction' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.9 NAME 'binderyType' SYNTAX 1.3.6.1.4.1.1466.115.121.1.36{64512} SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Bindery Type' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.11 NAME 'cAPrivateKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'CA Private Key' X-NDS_NONREMOVABLE '1' X-NDS_HIDDEN '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.12 NAME 'cAPublicKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'CA Public Key' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.10 NAME 'Cartridge' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.3 NAME ( 'cn' 'commonName' ) SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64} X-NDS_NAME 'CN' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '64' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.78 NAME 'printerConfiguration' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'Printer Configuration' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.15 NAME 'Convergence' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{1} SINGLE-VALUE X-NDS_UPPER_BOUND '1' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.6 NAME ( 'c' 'countryName' ) SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{2} SINGLE-VALUE X-NDS_NAME 'C' X-NDS_LOWER_BOUND '2' X-NDS_UPPER_BOUND '2' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.18 NAME 'defaultQueue' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'Default Queue' X-NDS_SERVER_READ '1' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.13 NAME ( 'description' 'multiLineDescription' ) SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{1024} X-NDS_NAME 'Description' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '1024' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.64 NAME 'partitionCreationTime' SYNTAX 2.16.840.1.113719.1.1.5.1.19 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Partition Creation Time' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.5.4.23 NAME 'facsimileTelephoneNumber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.22{64512} X-NDS_NAME 'Facsimile Telephone Number' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.117 NAME 'highConvergenceSyncInterval' SYNTAX 2.16.840.1.113719.1.1.5.1.27 SINGLE-VALUE X-NDS_NAME 'High Convergence Sync Interval' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.25 NAME 'groupMembership' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'Group Membership' X-NDS_NAME_VALUE_ACCESS '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.26 NAME 'ndsHomeDirectory' SYNTAX 2.16.840.1.113719.1.1.5.1.15{255} SINGLE-VALUE X-NDS_NAME 'Home Directory' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '255' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.27 NAME 'hostDevice' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'Host Device' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.28 NAME 'hostResourceName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NAME 'Host Resource Name' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.29 NAME 'hostServer' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'Host Server' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.30 NAME 'inheritedACL' SYNTAX 2.16.840.1.113719.1.1.5.1.17 NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Inherited ACL' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.5.4.7 NAME ( 'l' 'localityname' ) SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} X-NDS_NAME 'L' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '128' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.39 NAME 'loginAllowedTimeMap' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{42} SINGLE-VALUE X-NDS_NAME 'Login Allowed Time Map' X-NDS_LOWER_BOUND '42' X-NDS_UPPER_BOUND '42' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.40 NAME 'loginDisabled' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'Login Disabled' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.41 NAME 'loginExpirationTime' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE X-NDS_NAME 'Login Expiration Time' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.42 NAME 'loginGraceLimit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'Login Grace Limit' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.43 NAME 'loginGraceRemaining' SYNTAX 2.16.840.1.113719.1.1.5.1.22 SINGLE-VALUE X-NDS_NAME 'Login Grace Remaining' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.44 NAME 'loginIntruderAddress' SYNTAX 2.16.840.1.113719.1.1.5.1.12 SINGLE-VALUE X-NDS_NAME 'Login Intruder Address' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.45 NAME 'loginIntruderAttempts' SYNTAX 2.16.840.1.113719.1.1.5.1.22 SINGLE-VALUE X-NDS_NAME 'Login Intruder Attempts' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.46 NAME 'loginIntruderLimit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'Login Intruder Limit' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.31 NAME 'intruderAttemptResetInterval' SYNTAX 2.16.840.1.113719.1.1.5.1.27 SINGLE-VALUE X-NDS_NAME 'Intruder Attempt Reset Interval' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.47 NAME 'loginIntruderResetTime' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE X-NDS_NAME 'Login Intruder Reset Time' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.48 NAME 'loginMaximumSimultaneous' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'Login Maximum Simultaneous' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.49 NAME 'loginScript' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_NAME 'Login Script' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.50 NAME 'loginTime' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE X-NDS_NAME 'Login Time' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.31 NAME ( 'member' 'uniqueMember' ) SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'Member' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.52 NAME 'Memory' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.22 NAME 'eMailAddress' SYNTAX 2.16.840.1.113719.1.1.5.1.14{64512} X-NDS_NAME 'EMail Address' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.55 NAME 'networkAddress' SYNTAX 2.16.840.1.113719.1.1.5.1.12 X-NDS_NAME 'Network Address' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.56 NAME 'networkAddressRestriction' SYNTAX 2.16.840.1.113719.1.1.5.1.12 X-NDS_NAME 'Network Address Restriction' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.57 NAME 'notify' SYNTAX 2.16.840.1.113719.1.1.5.1.25 X-NDS_NAME 'Notify' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.114 NAME 'Obituary' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' X-NDS_READ_FILTERED '1' )", + "( 2.5.4.0 NAME 'objectClass' SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 X-NDS_NAME 'Object Class' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' )", + "( 2.16.840.1.113719.1.1.4.1.59 NAME 'operator' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'Operator' X-NDS_SERVER_READ '1' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.11 NAME ( 'ou' 'organizationalUnitName' ) SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64} X-NDS_NAME 'OU' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '64' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.10 NAME ( 'o' 'organizationname' ) SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64} X-NDS_NAME 'O' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '64' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.32 NAME 'owner' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'Owner' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.63 NAME 'pageDescriptionLanguage' SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{64} X-NDS_NAME 'Page Description Language' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '64' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.65 NAME 'passwordsUsed' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} USAGE directoryOperation X-NDS_NAME 'Passwords Used' X-NDS_NONREMOVABLE '1' X-NDS_HIDDEN '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.66 NAME 'passwordAllowChange' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'Password Allow Change' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.67 NAME 'passwordExpirationInterval' SYNTAX 2.16.840.1.113719.1.1.5.1.27 SINGLE-VALUE X-NDS_NAME 'Password Expiration Interval' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.68 NAME 'passwordExpirationTime' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE X-NDS_NAME 'Password Expiration Time' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.69 NAME 'passwordMinimumLength' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'Password Minimum Length' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.70 NAME 'passwordRequired' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'Password Required' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.71 NAME 'passwordUniqueRequired' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'Password Unique Required' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.72 NAME 'path' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} X-NDS_NAME 'Path' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.19 NAME 'physicalDeliveryOfficeName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} X-NDS_NAME 'Physical Delivery Office Name' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '128' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.16 NAME 'postalAddress' SYNTAX 1.3.6.1.4.1.1466.115.121.1.41{64512} X-NDS_NAME 'Postal Address' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.17 NAME 'postalCode' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{40} X-NDS_NAME 'Postal Code' X-NDS_UPPER_BOUND '40' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.18 NAME 'postOfficeBox' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{40} X-NDS_NAME 'Postal Office Box' X-NDS_UPPER_BOUND '40' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.80 NAME 'printJobConfiguration' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_NAME 'Print Job Configuration' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.79 NAME 'printerControl' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_NAME 'Printer Control' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.82 NAME 'privateKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Private Key' X-NDS_NONREMOVABLE '1' X-NDS_HIDDEN '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.83 NAME 'Profile' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.84 NAME 'publicKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Public Key' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_OPERATIONAL '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.85 NAME 'queue' SYNTAX 2.16.840.1.113719.1.1.5.1.25 X-NDS_NAME 'Queue' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.86 NAME 'queueDirectory' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{255} SINGLE-VALUE X-NDS_NAME 'Queue Directory' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '255' X-NDS_SERVER_READ '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.115 NAME 'Reference' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NEVER_SYNC '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_HIDDEN '1' X-NDS_FILTERED_REQUIRED '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.88 NAME 'Replica' SYNTAX 2.16.840.1.113719.1.1.5.1.16{64512} NO-USER-MODIFICATION USAGE directoryOperation X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.89 NAME 'Resource' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.33 NAME 'roleOccupant' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'Role Occupant' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.116 NAME 'higherPrivileges' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'Higher Privileges' X-NDS_SERVER_READ '1' X-NDS_NAME_VALUE_ACCESS '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.92 NAME 'securityEquals' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'Security Equals' X-NDS_SERVER_READ '1' X-NDS_NAME_VALUE_ACCESS '1' X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' )", + "( 2.5.4.34 NAME 'seeAlso' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'See Also' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.5 NAME 'serialNumber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{64} X-NDS_NAME 'Serial Number' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '64' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.95 NAME 'server' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'Server' X-NDS_SERVER_READ '1' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.8 NAME ( 'st' 'stateOrProvinceName' ) SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} X-NDS_NAME 'S' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '128' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.98 NAME 'status' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'Status' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_OPERATIONAL '1' )", + "( 2.5.4.9 NAME 'street' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} X-NDS_NAME 'SA' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '128' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.102 NAME 'supportedTypefaces' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64} X-NDS_NAME 'Supported Typefaces' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '64' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.101 NAME 'supportedServices' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64} X-NDS_NAME 'Supported Services' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '64' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.4 NAME ( 'sn' 'surname' ) SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64} X-NDS_NAME 'Surname' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '64' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.20 NAME 'telephoneNumber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{64512} X-NDS_NAME 'Telephone Number' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.12 NAME 'title' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64} X-NDS_NAME 'Title' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '64' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.111 NAME 'User' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_SERVER_READ '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.112 NAME 'Version' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64} SINGLE-VALUE X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '64' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.1 NAME 'accountBalance' SYNTAX 2.16.840.1.113719.1.1.5.1.22 SINGLE-VALUE X-NDS_NAME 'Account Balance' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.4 NAME 'allowUnlimitedCredit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'Allow Unlimited Credit' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.118 NAME 'lowConvergenceResetTime' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE USAGE directoryOperation X-NDS_NAME 'Low Convergence Reset Time' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.54 NAME 'minimumAccountBalance' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'Minimum Account Balance' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.104 NAME 'lowConvergenceSyncInterval' SYNTAX 2.16.840.1.113719.1.1.5.1.27 SINGLE-VALUE X-NDS_NAME 'Low Convergence Sync Interval' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.21 NAME 'Device' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.53 NAME 'messageServer' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'Message Server' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.34 NAME 'Language' SYNTAX 2.16.840.1.113719.1.1.5.1.6{64512} SINGLE-VALUE X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.100 NAME 'supportedConnections' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'Supported Connections' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.107 NAME 'typeCreatorMap' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_NAME 'Type Creator Map' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.108 NAME 'ndsUID' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'UID' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.24 NAME 'groupID' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'GID' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.110 NAME 'unknownBaseClass' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{32} SINGLE-VALUE USAGE directoryOperation X-NDS_NAME 'Unknown Base Class' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '32' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.87 NAME 'receivedUpTo' SYNTAX 2.16.840.1.113719.1.1.5.1.19 NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Received Up To' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.33 NAME 'synchronizedUpTo' SYNTAX 2.16.840.1.113719.1.1.5.1.19 NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Synchronized Up To' X-NDS_PUBLIC_READ '1' X-NDS_NEVER_SYNC '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.5 NAME 'authorityRevocation' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Authority Revocation' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.13 NAME 'certificateRevocation' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Certificate Revocation' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.17 NAME 'ndsCrossCertificatePair' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'Cross Certificate Pair' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.37 NAME 'lockedByIntruder' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'Locked By Intruder' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.77 NAME 'printer' SYNTAX 2.16.840.1.113719.1.1.5.1.25 X-NDS_NAME 'Printer' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.20 NAME 'detectIntruder' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'Detect Intruder' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.38 NAME 'lockoutAfterDetection' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'Lockout After Detection' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.32 NAME 'intruderLockoutResetInterval' SYNTAX 2.16.840.1.113719.1.1.5.1.27 SINGLE-VALUE X-NDS_NAME 'Intruder Lockout Reset Interval' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.96 NAME 'serverHolds' SYNTAX 2.16.840.1.113719.1.1.5.1.26 X-NDS_NAME 'Server Holds' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.91 NAME 'sAPName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{47} SINGLE-VALUE X-NDS_NAME 'SAP Name' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '47' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.113 NAME 'Volume' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.35 NAME 'lastLoginTime' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Last Login Time' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.81 NAME 'printServer' SYNTAX 2.16.840.1.113719.1.1.5.1.25 SINGLE-VALUE X-NDS_NAME 'Print Server' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.119 NAME 'nNSDomain' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} X-NDS_NAME 'NNS Domain' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '128' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.120 NAME 'fullName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{127} X-NDS_NAME 'Full Name' X-NDS_UPPER_BOUND '127' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.121 NAME 'partitionControl' SYNTAX 2.16.840.1.113719.1.1.5.1.25 NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Partition Control' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.122 NAME 'revision' SYNTAX 2.16.840.1.113719.1.1.5.1.22 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Revision' X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_SCHED_SYNC_NEVER '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.123 NAME 'certificateValidityInterval' SYNTAX 2.16.840.1.113719.1.1.5.1.27{4294967295} SINGLE-VALUE X-NDS_NAME 'Certificate Validity Interval' X-NDS_LOWER_BOUND '60' X-NDS_UPPER_BOUND '-1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.124 NAME 'externalSynchronizer' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'External Synchronizer' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.125 NAME 'messagingDatabaseLocation' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} SINGLE-VALUE X-NDS_NAME 'Messaging Database Location' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.126 NAME 'messageRoutingGroup' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'Message Routing Group' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.127 NAME 'messagingServer' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'Messaging Server' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.128 NAME 'Postmaster' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.162 NAME 'mailboxLocation' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'Mailbox Location' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.163 NAME 'mailboxID' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{8} SINGLE-VALUE X-NDS_NAME 'Mailbox ID' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '8' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.164 NAME 'externalName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'External Name' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.165 NAME 'securityFlags' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'Security Flags' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.166 NAME 'messagingServerType' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{32} SINGLE-VALUE X-NDS_NAME 'Messaging Server Type' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '32' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.167 NAME 'lastReferencedTime' SYNTAX 2.16.840.1.113719.1.1.5.1.19 SINGLE-VALUE USAGE directoryOperation X-NDS_NAME 'Last Referenced Time' X-NDS_NEVER_SYNC '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.5.4.42 NAME 'givenName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{32} X-NDS_NAME 'Given Name' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '32' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.43 NAME 'initials' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{8} X-NDS_NAME 'Initials' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '8' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' )", + "( 2.5.4.44 NAME 'generationQualifier' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{8} SINGLE-VALUE X-NDS_NAME 'Generational Qualifier' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '8' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.171 NAME 'profileMembership' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'Profile Membership' X-NDS_NAME_VALUE_ACCESS '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.172 NAME 'dsRevision' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'DS Revision' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_OPERATIONAL '1' )", + "( 2.16.840.1.113719.1.1.4.1.173 NAME 'supportedGateway' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{4096} X-NDS_NAME 'Supported Gateway' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '4096' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.174 NAME 'equivalentToMe' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'Equivalent To Me' X-NDS_SERVER_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' )", + "( 2.16.840.1.113719.1.1.4.1.175 NAME 'replicaUpTo' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Replica Up To' X-NDS_PUBLIC_READ '1' X-NDS_NEVER_SYNC '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.176 NAME 'partitionStatus' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Partition Status' X-NDS_PUBLIC_READ '1' X-NDS_NEVER_SYNC '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.177 NAME 'permanentConfigParms' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'Permanent Config Parms' X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.178 NAME 'Timezone' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.179 NAME 'binderyRestrictionLevel' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE USAGE directoryOperation X-NDS_NAME 'Bindery Restriction Level' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.180 NAME 'transitiveVector' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Transitive Vector' X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_SCHED_SYNC_NEVER '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.181 NAME 'T' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{32} X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '32' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.183 NAME 'purgeVector' SYNTAX 2.16.840.1.113719.1.1.5.1.19 NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Purge Vector' X-NDS_PUBLIC_READ '1' X-NDS_NEVER_SYNC '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_SCHED_SYNC_NEVER '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.184 NAME 'synchronizationTolerance' SYNTAX 2.16.840.1.113719.1.1.5.1.19 USAGE directoryOperation X-NDS_NAME 'Synchronization Tolerance' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.185 NAME 'passwordManagement' SYNTAX 2.16.840.1.113719.1.1.5.1.0 SINGLE-VALUE USAGE directoryOperation X-NDS_NAME 'Password Management' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.186 NAME 'usedBy' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Used By' X-NDS_SERVER_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.187 NAME 'Uses' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} NO-USER-MODIFICATION USAGE directoryOperation X-NDS_SERVER_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.500 NAME 'obituaryNotify' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Obituary Notify' X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.501 NAME 'GUID' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{16} SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_LOWER_BOUND '16' X-NDS_UPPER_BOUND '16' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.502 NAME 'otherGUID' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{16} USAGE directoryOperation X-NDS_NAME 'Other GUID' X-NDS_LOWER_BOUND '16' X-NDS_UPPER_BOUND '16' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.503 NAME 'auxiliaryClassFlag' SYNTAX 2.16.840.1.113719.1.1.5.1.0 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Auxiliary Class Flag' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.504 NAME 'unknownAuxiliaryClass' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{32} USAGE directoryOperation X-NDS_NAME 'Unknown Auxiliary Class' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '32' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 0.9.2342.19200300.100.1.1 NAME ( 'uid' 'userId' ) SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64} X-NDS_NAME 'uniqueID' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '64' X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' )", + "( 0.9.2342.19200300.100.1.25 NAME 'dc' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64} X-NDS_NAME 'dc' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '64' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.507 NAME 'auxClassObjectClassBackup' SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'AuxClass Object Class Backup' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.508 NAME 'localReceivedUpTo' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NAME 'Local Received Up To' X-NDS_PUBLIC_READ '1' X-NDS_NEVER_SYNC '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.141.4.4 NAME 'federationControl' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} USAGE directoryOperation X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.141.4.2 NAME 'federationSearchPath' SYNTAX 2.16.840.1.113719.1.1.5.1.6{64512} SINGLE-VALUE USAGE directoryOperation X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.141.4.3 NAME 'federationDNSName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE USAGE directoryOperation X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.141.4.1 NAME 'federationBoundaryType' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.14.4.1.4 NAME 'DirXML-Associations' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' )", + "( 2.5.18.3 NAME 'creatorsName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' X-NDS_READ_FILTERED '1' )", + "( 2.5.18.4 NAME 'modifiersName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NONREMOVABLE '1' X-NDS_FILTERED_REQUIRED '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.300 NAME 'languageId' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.27.4.35 NAME 'ndsPredicate' SYNTAX 2.16.840.1.113719.1.1.5.1.12 X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.27.4.36 NAME 'ndsPredicateState' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.27.4.37 NAME 'ndsPredicateFlush' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.27.4.38 NAME 'ndsPredicateTimeout' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{2147483647} SINGLE-VALUE X-NDS_UPPER_BOUND '2147483647' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.27.4.40 NAME 'ndsPredicateStatsDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.27.4.39 NAME 'ndsPredicateUseValues' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.601 NAME 'syncPanePoint' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_PUBLIC_READ '1' X-NDS_NEVER_SYNC '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.600 NAME 'syncWindowVector' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_PUBLIC_READ '1' X-NDS_NEVER_SYNC '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.602 NAME 'objectVersion' SYNTAX 2.16.840.1.113719.1.1.5.1.19 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.27.4.52 NAME 'memberQueryURL' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'memberQuery' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.302 NAME 'excludedMember' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.1.525 NAME 'auxClassCompatibility' SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 NO-USER-MODIFICATION USAGE directoryOperation X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.518 NAME 'ndsAgentPassword' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_HIDDEN '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.519 NAME 'ndsOperationCheckpoint' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} USAGE directoryOperation X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.520 NAME 'localReferral' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE USAGE directoryOperation X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.521 NAME 'treeReferral' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} USAGE directoryOperation X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.522 NAME 'schemaResetLock' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE USAGE directoryOperation X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.523 NAME 'modifiedACLEntry' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 USAGE directoryOperation X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.524 NAME 'monitoredConnection' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} USAGE directoryOperation X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.526 NAME 'localFederationBoundary' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE USAGE directoryOperation X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.527 NAME 'replicationFilter' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE USAGE directoryOperation X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.721 NAME 'ServerEBAEnabled' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NEVER_SYNC '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.716 NAME 'EBATreeConfiguration' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_PUBLIC_READ '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.722 NAME 'EBAPartitionConfiguration' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.723 NAME 'EBAServerConfiguration' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NEVER_SYNC '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' X-NDS_HIDDEN '1' )", + "( 2.16.840.1.113719.1.1.4.1.296 NAME 'loginActivationTime' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.687 NAME 'UpdateInProgress' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.720 NAME 'dsContainerReadyAttrs' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.4.400.1 NAME 'edirSchemaFlagVersion' SYNTAX 2.16.840.1.113719.1.1.5.1.0 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NONREMOVABLE '1' X-NDS_HIDDEN '1' X-NDS_READ_FILTERED '1' )", + "( 2.16.840.1.113719.1.1.4.1.512 NAME 'indexDefinition' SYNTAX 2.16.840.1.113719.1.1.5.1.6{64512} X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.1.513 NAME 'ndsStatusRepair' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.1.514 NAME 'ndsStatusExternalReference' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.1.515 NAME 'ndsStatusObituary' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.1.516 NAME 'ndsStatusSchema' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.1.517 NAME 'ndsStatusLimber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.1.511 NAME 'authoritative' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113730.3.1.34 NAME 'ref' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64512} X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.1.546 NAME 'CachedAttrsOnExtRefs' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.1.4.1.547 NAME 'ExtRefLastUpdatedTime' SYNTAX 2.16.840.1.113719.1.1.5.1.19 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-NDS_PUBLIC_READ '1' X-NDS_NEVER_SYNC '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.688 NAME 'NCPKeyMaterialName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.1.4.713 NAME 'UTF8LoginScript' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.1.4.714 NAME 'loginScriptCharset' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE )", + "( 2.16.840.1.113719.1.1.4.721 NAME 'NDSRightsToMonitor' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} X-NDS_NEVER_SYNC '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.1.1.192 NAME 'lDAPLogLevel' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{32768} SINGLE-VALUE X-NDS_NAME 'LDAP Log Level' X-NDS_UPPER_BOUND '32768' )", + "( 2.16.840.1.113719.1.27.4.12 NAME 'lDAPUDPPort' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{65535} SINGLE-VALUE X-NDS_NAME 'LDAP UDP Port' X-NDS_UPPER_BOUND '65535' )", + "( 2.16.840.1.113719.1.1.4.1.204 NAME 'lDAPLogFilename' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NAME 'LDAP Log Filename' )", + "( 2.16.840.1.113719.1.1.4.1.205 NAME 'lDAPBackupLogFilename' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NAME 'LDAP Backup Log Filename' )", + "( 2.16.840.1.113719.1.1.4.1.206 NAME 'lDAPLogSizeLimit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{4294967295} SINGLE-VALUE X-NDS_NAME 'LDAP Log Size Limit' X-NDS_LOWER_BOUND '2048' X-NDS_UPPER_BOUND '-1' )", + "( 2.16.840.1.113719.1.1.4.1.194 NAME 'lDAPSearchSizeLimit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{2147483647} SINGLE-VALUE X-NDS_NAME 'LDAP Search Size Limit' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '2147483647' )", + "( 2.16.840.1.113719.1.1.4.1.195 NAME 'lDAPSearchTimeLimit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{2147483647} SINGLE-VALUE X-NDS_NAME 'LDAP Search Time Limit' X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '2147483647' )", + "( 2.16.840.1.113719.1.1.4.1.207 NAME 'lDAPSuffix' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'LDAP Suffix' )", + "( 2.16.840.1.113719.1.27.4.70 NAME 'ldapConfigVersion' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.14 NAME 'ldapReferral' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NAME 'LDAP Referral' )", + "( 2.16.840.1.113719.1.27.4.73 NAME 'ldapDefaultReferralBehavior' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.23 NAME 'ldapSearchReferralUsage' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'LDAP:searchReferralUsage' )", + "( 2.16.840.1.113719.1.27.4.24 NAME 'lDAPOtherReferralUsage' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'LDAP:otherReferralUsage' )", + "( 2.16.840.1.113719.1.27.4.1 NAME 'ldapHostServer' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'LDAP Host Server' )", + "( 2.16.840.1.113719.1.27.4.2 NAME 'ldapGroupDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'LDAP Group' )", + "( 2.16.840.1.113719.1.27.4.3 NAME 'ldapTraceLevel' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{32768} SINGLE-VALUE X-NDS_NAME 'LDAP Screen Level' X-NDS_UPPER_BOUND '32768' )", + "( 2.16.840.1.113719.1.27.4.4 NAME 'searchSizeLimit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{2147483647} SINGLE-VALUE X-NDS_UPPER_BOUND '2147483647' )", + "( 2.16.840.1.113719.1.27.4.5 NAME 'searchTimeLimit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{2147483647} SINGLE-VALUE X-NDS_UPPER_BOUND '2147483647' )", + "( 2.16.840.1.113719.1.27.4.6 NAME 'ldapServerBindLimit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{4294967295} SINGLE-VALUE X-NDS_NAME 'LDAP Server Bind Limit' X-NDS_UPPER_BOUND '-1' )", + "( 2.16.840.1.113719.1.27.4.7 NAME 'ldapServerIdleTimeout' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{4294967295} SINGLE-VALUE X-NDS_NAME 'LDAP Server Idle Timeout' X-NDS_UPPER_BOUND '-1' )", + "( 2.16.840.1.113719.1.27.4.8 NAME 'ldapEnableTCP' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'LDAP Enable TCP' )", + "( 2.16.840.1.113719.1.27.4.10 NAME 'ldapEnableSSL' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'LDAP Enable SSL' )", + "( 2.16.840.1.113719.1.27.4.11 NAME 'ldapTCPPort' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{65535} SINGLE-VALUE X-NDS_NAME 'LDAP TCP Port' X-NDS_UPPER_BOUND '65535' )", + "( 2.16.840.1.113719.1.27.4.13 NAME 'ldapSSLPort' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{65535} SINGLE-VALUE X-NDS_NAME 'LDAP SSL Port' X-NDS_UPPER_BOUND '65535' )", + "( 2.16.840.1.113719.1.27.4.21 NAME 'filteredReplicaUsage' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.22 NAME 'ldapKeyMaterialName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NAME 'LDAP:keyMaterialName' )", + "( 2.16.840.1.113719.1.27.4.42 NAME 'extensionInfo' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.27.4.45 NAME 'nonStdClientSchemaCompatMode' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.46 NAME 'sslEnableMutualAuthentication' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.62 NAME 'ldapEnablePSearch' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.63 NAME 'ldapMaximumPSearchOperations' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.64 NAME 'ldapIgnorePSearchLimitsForEvents' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.65 NAME 'ldapTLSTrustedRootContainer' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )", + "( 2.16.840.1.113719.1.27.4.66 NAME 'ldapEnableMonitorEvents' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.67 NAME 'ldapMaximumMonitorEventsLoad' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.68 NAME 'ldapTLSRequired' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.69 NAME 'ldapTLSVerifyClientCertificate' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.71 NAME 'ldapDerefAlias' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.72 NAME 'ldapNonStdAllUserAttrsMode' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.75 NAME 'ldapBindRestrictions' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.79 NAME 'ldapInterfaces' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.27.4.80 NAME 'ldapChainSecureRequired' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.82 NAME 'ldapStdCompliance' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.83 NAME 'ldapDerefAliasOnAuth' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.84 NAME 'ldapGeneralizedTime' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.85 NAME 'ldapPermissiveModify' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.86 NAME 'ldapSSLConfig' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.27.4.15 NAME 'ldapServerList' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'LDAP Server List' )", + "( 2.16.840.1.113719.1.27.4.16 NAME 'ldapAttributeMap' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'LDAP Attribute Map v11' )", + "( 2.16.840.1.113719.1.27.4.17 NAME 'ldapClassMap' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'LDAP Class Map v11' )", + "( 2.16.840.1.113719.1.27.4.18 NAME 'ldapAllowClearTextPassword' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'LDAP Allow Clear Text Password' )", + "( 2.16.840.1.113719.1.27.4.19 NAME 'ldapAnonymousIdentity' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'LDAP Anonymous Identity' )", + "( 2.16.840.1.113719.1.27.4.52 NAME 'ldapAttributeList' SYNTAX 2.16.840.1.113719.1.1.5.1.6{64512} )", + "( 2.16.840.1.113719.1.27.4.53 NAME 'ldapClassList' SYNTAX 2.16.840.1.113719.1.1.5.1.6{64512} )", + "( 2.16.840.1.113719.1.27.4.56 NAME 'transitionGroupDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.74 NAME 'ldapTransitionBackLink' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.78 NAME 'ldapLBURPNumWriterThreads' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.27.4.20 NAME 'ldapServerDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'LDAP Server' )", + "( 0.9.2342.19200300.100.1.3 NAME 'mail' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} X-NDS_NAME 'Internet EMail Address' X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113730.3.1.3 NAME 'employeeNumber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} X-NDS_NAME 'NSCP:employeeNumber' )", + "( 2.16.840.1.113719.1.27.4.76 NAME 'referralExcludeFilter' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.27.4.77 NAME 'referralIncludeFilter' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.5.4.36 NAME 'userCertificate' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'userCertificate' X-NDS_PUBLIC_READ '1' )", + "( 2.5.4.37 NAME 'cACertificate' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'cACertificate' X-NDS_PUBLIC_READ '1' )", + "( 2.5.4.40 NAME 'crossCertificatePair' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'crossCertificatePair' X-NDS_PUBLIC_READ '1' )", + "( 2.5.4.58 NAME 'attributeCertificate' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.5.4.2 NAME 'knowledgeInformation' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{32768} X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '32768' )", + "( 2.5.4.14 NAME 'searchGuide' SYNTAX 1.3.6.1.4.1.1466.115.121.1.25{64512} X-NDS_NAME 'searchGuide' )", + "( 2.5.4.15 NAME 'businessCategory' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '128' )", + "( 2.5.4.21 NAME 'telexNumber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.52{64512} X-NDS_NAME 'telexNumber' )", + "( 2.5.4.22 NAME 'teletexTerminalIdentifier' SYNTAX 1.3.6.1.4.1.1466.115.121.1.51{64512} X-NDS_NAME 'teletexTerminalIdentifier' )", + "( 2.5.4.24 NAME 'x121Address' SYNTAX 1.3.6.1.4.1.1466.115.121.1.36{15} X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '15' )", + "( 2.5.4.25 NAME 'internationaliSDNNumber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.36{16} X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '16' )", + "( 2.5.4.26 NAME 'registeredAddress' SYNTAX 1.3.6.1.4.1.1466.115.121.1.41{64512} X-NDS_NAME 'registeredAddress' )", + "( 2.5.4.27 NAME 'destinationIndicator' SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{128} X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '128' )", + "( 2.5.4.28 NAME 'preferredDeliveryMethod' SYNTAX 1.3.6.1.4.1.1466.115.121.1.14{64512} SINGLE-VALUE X-NDS_NAME 'preferredDeliveryMethod' )", + "( 2.5.4.29 NAME 'presentationAddress' SYNTAX 1.3.6.1.4.1.1466.115.121.1.43{64512} SINGLE-VALUE X-NDS_NAME 'presentationAddress' )", + "( 2.5.4.30 NAME 'supportedApplicationContext' SYNTAX 1.3.6.1.4.1.1466.115.121.1.38{64512} X-NDS_NAME 'supportedApplicationContext' )", + "( 2.5.4.45 NAME 'x500UniqueIdentifier' SYNTAX 1.3.6.1.4.1.1466.115.121.1.6{64512} X-NDS_NAME 'x500UniqueIdentifier' )", + "( 2.5.4.46 NAME 'dnQualifier' SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{64512} )", + "( 2.5.4.47 NAME 'enhancedSearchGuide' SYNTAX 1.3.6.1.4.1.1466.115.121.1.21{64512} X-NDS_NAME 'enhancedSearchGuide' )", + "( 2.5.4.48 NAME 'protocolInformation' SYNTAX 1.3.6.1.4.1.1466.115.121.1.42{64512} X-NDS_NAME 'protocolInformation' )", + "( 2.5.4.51 NAME 'houseIdentifier' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{32768} X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '32768' )", + "( 2.5.4.52 NAME 'supportedAlgorithms' SYNTAX 1.3.6.1.4.1.1466.115.121.1.49{64512} X-NDS_NAME 'supportedAlgorithms' )", + "( 2.5.4.54 NAME 'dmdName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{32768} X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '32768' )", + "( 0.9.2342.19200300.100.1.6 NAME 'roomNumber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 0.9.2342.19200300.100.1.38 NAME 'associatedName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )", + "( 2.5.4.49 NAME 'dn' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.3.4.1 NAME 'httpServerDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )", + "( 2.16.840.1.113719.1.3.4.2 NAME 'httpHostServerDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.3.4.3 NAME 'httpThreadsPerCPU' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.3.4.4 NAME 'httpIOBufferSize' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.3.4.5 NAME 'httpRequestTimeout' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.3.4.6 NAME 'httpKeepAliveRequestTimeout' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.3.4.7 NAME 'httpSessionTimeout' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.3.4.8 NAME 'httpKeyMaterialObject' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.3.4.9 NAME 'httpTraceLevel' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.3.4.10 NAME 'httpAuthRequiresTLS' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.3.4.11 NAME 'httpDefaultClearPort' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.3.4.12 NAME 'httpDefaultTLSPort' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.3.4.13 NAME 'httpBindRestrictions' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.1.4.1.295 NAME 'emboxConfig' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE )", + "( 2.16.840.1.113719.1.54.4.1.1 NAME 'trusteesOfNewObject' SYNTAX 2.16.840.1.113719.1.1.5.1.17 X-NDS_NAME 'Trustees Of New Object' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.55.4.1.1 NAME 'newObjectSDSRights' SYNTAX 2.16.840.1.113719.1.1.5.1.17 X-NDS_NAME 'New Object's DS Rights' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.56.4.1.1 NAME 'newObjectSFSRights' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} X-NDS_NAME 'New Object's FS Rights' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.57.4.1.1 NAME 'setupScript' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_NAME 'Setup Script' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.58.4.1.1 NAME 'runSetupScript' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'Run Setup Script' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.59.4.1.1 NAME 'membersOfTemplate' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'Members Of Template' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.60.4.1.1 NAME 'volumeSpaceRestrictions' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} X-NDS_NAME 'Volume Space Restrictions' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.61.4.1.1 NAME 'setPasswordAfterCreate' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'Set Password After Create' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.62.4.1.1 NAME 'homeDirectoryRights' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 X-NDS_NAME 'Home Directory Rights' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.63.4.1.1 NAME 'newObjectSSelfRights' SYNTAX 2.16.840.1.113719.1.1.5.1.17 X-NDS_NAME 'New Object's Self Rights' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.8.4.1 NAME 'digitalMeID' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} SINGLE-VALUE )", + "( 2.16.840.1.113719.1.8.4.2 NAME 'assistant' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )", + "( 2.16.840.1.113719.1.8.4.3 NAME 'assistantPhone' SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{64512} )", + "( 2.16.840.1.113719.1.8.4.4 NAME 'city' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.5 NAME 'company' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 0.9.2342.19200300.100.1.43 NAME 'co' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.6 NAME 'directReports' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )", + "( 0.9.2342.19200300.100.1.10 NAME 'manager' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )", + "( 2.16.840.1.113719.1.8.4.7 NAME 'mailstop' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 0.9.2342.19200300.100.1.41 NAME 'mobile' SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{64512} )", + "( 0.9.2342.19200300.100.1.40 NAME 'personalTitle' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 0.9.2342.19200300.100.1.42 NAME 'pager' SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{64512} )", + "( 2.16.840.1.113719.1.8.4.8 NAME 'workforceID' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.9 NAME 'instantMessagingID' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.10 NAME 'preferredName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 0.9.2342.19200300.100.1.7 NAME 'photo' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} )", + "( 2.16.840.1.113719.1.8.4.11 NAME 'jobCode' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.12 NAME 'siteLocation' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.13 NAME 'employeeStatus' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113730.3.1.4 NAME 'employeeType' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.14 NAME 'costCenter' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.15 NAME 'costCenterDescription' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.16 NAME 'tollFreePhoneNumber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{64512} )", + "( 2.16.840.1.113719.1.8.4.17 NAME 'otherPhoneNumber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{64512} )", + "( 2.16.840.1.113719.1.8.4.18 NAME 'managerWorkforceID' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.19 NAME 'jackNumber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113730.3.1.2 NAME 'departmentNumber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.20 NAME 'vehicleInformation' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.21 NAME 'accessCardNumber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.32 NAME 'isManager' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.8.4.22 NAME 'homeCity' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.23 NAME 'homeEmailAddress' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 1.3.6.1.4.1.1466.101.120.31 NAME 'homeFax' SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{64512} )", + "( 0.9.2342.19200300.100.1.20 NAME 'homePhone' SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{64512} )", + "( 2.16.840.1.113719.1.8.4.24 NAME 'homeState' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 0.9.2342.19200300.100.1.39 NAME 'homePostalAddress' SYNTAX 1.3.6.1.4.1.1466.115.121.1.41{64512} )", + "( 2.16.840.1.113719.1.8.4.25 NAME 'homeZipCode' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.26 NAME 'personalMobile' SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{64512} )", + "( 2.16.840.1.113719.1.8.4.27 NAME 'children' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.28 NAME 'spouse' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.29 NAME 'vendorName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.30 NAME 'vendorAddress' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.8.4.31 NAME 'vendorPhoneNumber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{64512} )", + "( 2.16.840.1.113719.1.1.4.1.303 NAME 'dgIdentity' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME_VALUE_ACCESS '1' )", + "( 2.16.840.1.113719.1.1.4.1.304 NAME 'dgTimeOut' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.1.4.1.305 NAME 'dgAllowUnknown' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.1.4.1.306 NAME 'dgAllowDuplicates' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.1.4.1.546 NAME 'allowAliasToAncestor' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.39.4.1.1 NAME 'sASSecurityDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'SAS:Security DN' X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.4.1.2 NAME 'sASServiceDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'SAS:Service DN' X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.4.1.3 NAME 'sASSecretStore' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'SAS:SecretStore' )", + "( 2.16.840.1.113719.1.39.4.1.4 NAME 'sASSecretStoreKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE USAGE directoryOperation X-NDS_NAME 'SAS:SecretStore:Key' X-NDS_HIDDEN '1' )", + "( 2.16.840.1.113719.1.39.4.1.5 NAME 'sASSecretStoreData' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} USAGE directoryOperation X-NDS_NAME 'SAS:SecretStore:Data' X-NDS_HIDDEN '1' )", + "( 2.16.840.1.113719.1.39.4.1.6 NAME 'sASPKIStoreKeys' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} USAGE directoryOperation X-NDS_NAME 'SAS:PKIStore:Keys' X-NDS_HIDDEN '1' )", + "( 2.16.840.1.113719.1.48.4.1.1 NAME 'nDSPKIPublicKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'NDSPKI:Public Key' X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.2 NAME 'nDSPKIPrivateKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'NDSPKI:Private Key' )", + "( 2.16.840.1.113719.1.48.4.1.3 NAME 'nDSPKIPublicKeyCertificate' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'NDSPKI:Public Key Certificate' X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.4 NAME 'nDSPKICertificateChain' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'NDSPKI:Certificate Chain' X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.16 NAME 'nDSPKIPublicKeyEC' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'NDSPKI:Public Key EC' X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.17 NAME 'nDSPKIPrivateKeyEC' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'NDSPKI:Private Key EC' )", + "( 2.16.840.1.113719.1.48.4.1.18 NAME 'nDSPKIPublicKeyCertificateEC' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'NDSPKI:Public Key Certificate EC' X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.19 NAME 'crossCertificatePairEC' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'Cross Certificate Pair EC' X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.20 NAME 'nDSPKICertificateChainEC' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'NDSPKI:Certificate Chain EC' X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.5 NAME 'nDSPKIParentCA' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NAME 'NDSPKI:Parent CA' )", + "( 2.16.840.1.113719.1.48.4.1.6 NAME 'nDSPKIParentCADN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'NDSPKI:Parent CA DN' )", + "( 2.16.840.1.113719.1.48.4.1.20 NAME 'nDSPKISuiteBMode' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'NDSPKI:SuiteBMode' X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.7 NAME 'nDSPKIKeyFile' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'NDSPKI:Key File' )", + "( 2.16.840.1.113719.1.48.4.1.8 NAME 'nDSPKISubjectName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NAME 'NDSPKI:Subject Name' )", + "( 2.16.840.1.113719.1.48.4.1.11 NAME 'nDSPKIGivenName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NAME 'NDSPKI:Given Name' )", + "( 2.16.840.1.113719.1.48.4.1.9 NAME 'nDSPKIKeyMaterialDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'NDSPKI:Key Material DN' )", + "( 2.16.840.1.113719.1.48.4.1.10 NAME 'nDSPKITreeCADN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'NDSPKI:Tree CA DN' )", + "( 2.5.4.59 NAME 'cAECCertificate' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.12 NAME 'nDSPKIUserCertificateInfo' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} X-NDS_NAME 'NDSPKI:userCertificateInfo' )", + "( 2.16.840.1.113719.1.48.4.1.13 NAME 'nDSPKITrustedRootCertificate' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'NDSPKI:Trusted Root Certificate' X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.14 NAME 'nDSPKINotBefore' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NAME 'NDSPKI:Not Before' X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.15 NAME 'nDSPKINotAfter' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NAME 'NDSPKI:Not After' X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.101 NAME 'nDSPKISDKeyServerDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'NDSPKI:SD Key Server DN' X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.102 NAME 'nDSPKISDKeyStruct' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'NDSPKI:SD Key Struct' )", + "( 2.16.840.1.113719.1.48.4.1.103 NAME 'nDSPKISDKeyCert' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'NDSPKI:SD Key Cert' )", + "( 2.16.840.1.113719.1.48.4.1.104 NAME 'nDSPKISDKeyID' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'NDSPKI:SD Key ID' )", + "( 2.16.840.1.113719.1.39.4.1.105 NAME 'nDSPKIKeystore' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} USAGE directoryOperation X-NDS_NAME 'NDSPKI:Keystore' X-NDS_HIDDEN '1' )", + "( 2.16.840.1.113719.1.39.4.1.106 NAME 'ndspkiAdditionalRoots' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.31.4.2.3 NAME 'masvLabel' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.31.4.2.4 NAME 'masvProposedLabel' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.31.4.2.5 NAME 'masvDefaultRange' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.31.4.2.6 NAME 'masvAuthorizedRange' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.31.4.2.7 NAME 'masvDomainPolicy' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.31.4.1.8 NAME 'masvClearanceNames' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.31.4.1.9 NAME 'masvLabelNames' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.31.4.1.10 NAME 'masvLabelSecrecyLevelNames' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.31.4.1.11 NAME 'masvLabelSecrecyCategoryNames' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.31.4.1.12 NAME 'masvLabelIntegrityLevelNames' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.31.4.1.13 NAME 'masvLabelIntegrityCategoryNames' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.31.4.1.14 NAME 'masvPolicyUpdate' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.31.4.1.16 NAME 'masvNDSAttributeLabels' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.31.4.1.15 NAME 'masvPolicyDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.2 NAME 'sASLoginSequence' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} X-NDS_NAME 'SAS:Login Sequence' X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.8 NAME 'sASLoginPolicyUpdate' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'SAS:Login Policy Update' X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.38 NAME 'sasNMASProductOptions' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.74 NAME 'sasAuditConfiguration' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.14 NAME 'sASNDSPasswordWindow' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'SAS:NDS Password Window' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.15 NAME 'sASPolicyCredentials' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'SAS:Policy Credentials' X-NDS_SERVER_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.16 NAME 'sASPolicyMethods' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} X-NDS_NAME 'SAS:Policy Methods' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.17 NAME 'sASPolicyObjectVersion' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'SAS:Policy Object Version' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.18 NAME 'sASPolicyServiceSubtypes' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} X-NDS_NAME 'SAS:Policy Service Subtypes' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.19 NAME 'sASPolicyServices' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} X-NDS_NAME 'SAS:Policy Services' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.20 NAME 'sASPolicyUsers' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} X-NDS_NAME 'SAS:Policy Users' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.21 NAME 'sASAllowNDSPasswordWindow' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'SAS:Allow NDS Password Window' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.9 NAME 'sASMethodIdentifier' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'SAS:Method Identifier' X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.10 NAME 'sASMethodVendor' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NAME 'SAS:Method Vendor' X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.11 NAME 'sASAdvisoryMethodGrade' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NAME 'SAS:Advisory Method Grade' X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.12 NAME 'sASVendorSupport' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NAME 'SAS:Vendor Support' X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.13 NAME 'sasCertificateSearchContainers' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.70 NAME 'sasNMASMethodConfigData' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.22 NAME 'sASLoginClientMethodNetWare' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_NAME 'SAS:Login Client Method NetWare' X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.23 NAME 'sASLoginServerMethodNetWare' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_NAME 'SAS:Login Server Method NetWare' X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.24 NAME 'sASLoginClientMethodWINNT' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_NAME 'SAS:Login Client Method WINNT' X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.25 NAME 'sASLoginServerMethodWINNT' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_NAME 'SAS:Login Server Method WINNT' X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.26 NAME 'sasLoginClientMethodSolaris' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.27 NAME 'sasLoginServerMethodSolaris' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.28 NAME 'sasLoginClientMethodLinux' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.29 NAME 'sasLoginServerMethodLinux' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.30 NAME 'sasLoginClientMethodTru64' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.31 NAME 'sasLoginServerMethodTru64' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.32 NAME 'sasLoginClientMethodAIX' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.33 NAME 'sasLoginServerMethodAIX' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.34 NAME 'sasLoginClientMethodHPUX' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.35 NAME 'sasLoginServerMethodHPUX' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1000 NAME 'sasLoginClientMethods390' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1001 NAME 'sasLoginServerMethods390' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1002 NAME 'sasLoginClientMethodLinuxX64' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1003 NAME 'sasLoginServerMethodLinuxX64' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1004 NAME 'sasLoginClientMethodWinX64' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1005 NAME 'sasLoginServerMethodWinX64' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1006 NAME 'sasLoginClientMethodSolaris64' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1007 NAME 'sasLoginServerMethodSolaris64' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1008 NAME 'sasLoginClientMethodAIX64' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1009 NAME 'sasLoginServerMethodAIX64' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1011 NAME 'sasLoginServerMethodSolarisi386' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1012 NAME 'sasLoginClientMethodSolarisi386' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.78 NAME 'sasUnsignedMethodModules' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.79 NAME 'sasServerModuleName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.80 NAME 'sasServerModuleEntryPointName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.81 NAME 'sasSASLMechanismName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.82 NAME 'sasSASLMechanismEntryPointName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.83 NAME 'sasClientModuleName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.84 NAME 'sasClientModuleEntryPointName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.36 NAME 'sASLoginMethodContainerDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'SAS:Login Method Container DN' X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.37 NAME 'sASLoginPolicyDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'SAS:Login Policy DN' X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.63 NAME 'sasPostLoginMethodContainerDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.38 NAME 'rADIUSActiveConnections' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'RADIUS:Active Connections' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.39 NAME 'rADIUSAgedInterval' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'RADIUS:Aged Interval' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.40 NAME 'rADIUSAttributeList' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'RADIUS:Attribute List' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.41 NAME 'rADIUSAttributeLists' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'RADIUS:Attribute Lists' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.42 NAME 'rADIUSClient' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'RADIUS:Client' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.43 NAME 'rADIUSCommonNameResolution' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'RADIUS:Common Name Resolution' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.44 NAME 'rADIUSConcurrentLimit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'RADIUS:Concurrent Limit' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.45 NAME 'rADIUSConnectionHistory' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'RADIUS:Connection History' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.46 NAME 'rADIUSDASVersion' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'RADIUS:DAS Version' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.47 NAME 'rADIUSDefaultProfile' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NAME 'RADIUS:Default Profile' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.48 NAME 'rADIUSDialAccessGroup' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'RADIUS:Dial Access Group' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.49 NAME 'rADIUSEnableCommonNameLogin' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'RADIUS:Enable Common Name Login' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.50 NAME 'rADIUSEnableDialAccess' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NAME 'RADIUS:Enable Dial Access' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.51 NAME 'rADIUSInterimAcctingTimeout' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'RADIUS:Interim Accting Timeout' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.52 NAME 'rADIUSLookupContexts' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'RADIUS:Lookup Contexts' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.53 NAME 'rADIUSMaxDASHistoryRecord' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'RADIUS:Max DAS History Record' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.54 NAME 'rADIUSMaximumHistoryRecord' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'RADIUS:Maximum History Record' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.55 NAME 'rADIUSPassword' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'RADIUS:Password' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.56 NAME 'rADIUSPasswordPolicy' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'RADIUS:Password Policy' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.57 NAME 'rADIUSPrivateKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'RADIUS:Private Key' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.58 NAME 'rADIUSProxyContext' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} X-NDS_NAME 'RADIUS:Proxy Context' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.59 NAME 'rADIUSProxyDomain' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'RADIUS:Proxy Domain' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.60 NAME 'rADIUSProxyTarget' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'RADIUS:Proxy Target' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.61 NAME 'rADIUSPublicKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'RADIUS:Public Key' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.62 NAME 'rADIUSServiceList' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} X-NDS_NAME 'RADIUS:Service List' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.3 NAME 'sASLoginSecret' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'SAS:Login Secret' X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.4 NAME 'sASLoginSecretKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'SAS:Login Secret Key' X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.5 NAME 'sASEncryptionType' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'SAS:Encryption Type' X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.6 NAME 'sASLoginConfiguration' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'SAS:Login Configuration' X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.7 NAME 'sASLoginConfigurationKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'SAS:Login Configuration Key' X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.73 NAME 'sasDefaultLoginSequence' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.64 NAME 'sasAuthorizedLoginSequences' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.69 NAME 'sasAllowableSubjectNames' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.71 NAME 'sasLoginFailureDelay' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.72 NAME 'sasMethodVersion' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1010 NAME 'sasUpdateLoginInfo' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1011 NAME 'sasOTPEnabled' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1012 NAME 'sasOTPCounter' SYNTAX 2.16.840.1.113719.1.1.5.1.22 SINGLE-VALUE X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1013 NAME 'sasOTPLookAheadWindow' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1014 NAME 'sasOTPDigits' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1015 NAME 'sasOTPReSync' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.39.42.1.0.1016 NAME 'sasUpdateLoginTimeInterval' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.6.4.1 NAME 'snmpGroupDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.6.4.2 NAME 'snmpServerList' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )", + "( 2.16.840.1.113719.1.6.4.3 NAME 'snmpTrapConfig' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE )", + "( 2.16.840.1.113719.1.6.4.4 NAME 'snmpTrapDescription' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE )", + "( 2.16.840.1.113719.1.6.4.5 NAME 'snmpTrapInterval' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.6.4.6 NAME 'snmpTrapDisable' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.1.4.1.528 NAME 'ndapPartitionPasswordMgmt' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.1.529 NAME 'ndapClassPasswordMgmt' SYNTAX 2.16.840.1.113719.1.1.5.1.0 X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.1.530 NAME 'ndapPasswordMgmt' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.1.537 NAME 'ndapPartitionLoginMgmt' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.1.538 NAME 'ndapClassLoginMgmt' SYNTAX 2.16.840.1.113719.1.1.5.1.0 X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.1.539 NAME 'ndapLoginMgmt' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.1 NAME 'nspmPasswordKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE USAGE directoryOperation X-NDS_HIDDEN '1' )", + "( 2.16.840.1.113719.1.39.43.4.2 NAME 'nspmPassword' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE USAGE directoryOperation X-NDS_HIDDEN '1' )", + "( 2.16.840.1.113719.1.39.43.4.3 NAME 'nspmDistributionPassword' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE USAGE directoryOperation X-NDS_HIDDEN '1' )", + "( 2.16.840.1.113719.1.39.43.4.4 NAME 'nspmPasswordHistory' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} USAGE directoryOperation X-NDS_HIDDEN '1' )", + "( 2.16.840.1.113719.1.39.43.4.5 NAME 'nspmAdministratorChangeCount' SYNTAX 2.16.840.1.113719.1.1.5.1.22 SINGLE-VALUE USAGE directoryOperation X-NDS_HIDDEN '1' )", + "( 2.16.840.1.113719.1.39.43.4.6 NAME 'nspmPasswordPolicyDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.7 NAME 'nspmPreviousDistributionPassword' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE USAGE directoryOperation X-NDS_HIDDEN '1' )", + "( 2.16.840.1.113719.1.39.43.4.8 NAME 'nspmDoNotExpirePassword' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 1.3.6.1.4.1.42.2.27.8.1.16 NAME 'pwdChangedTime' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )", + "( 1.3.6.1.4.1.42.2.27.8.1.17 NAME 'pwdAccountLockedTime' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )", + "( 1.3.6.1.4.1.42.2.27.8.1.19 NAME 'pwdFailureTime' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 NO-USER-MODIFICATION USAGE directoryOperation )", + "( 2.16.840.1.113719.1.39.43.4.100 NAME 'nspmConfigurationOptions' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.102 NAME 'nspmChangePasswordMessage' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64512} SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.103 NAME 'nspmPasswordHistoryLimit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.104 NAME 'nspmPasswordHistoryExpiration' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 1.3.6.1.4.1.42.2.27.8.1.4 NAME 'pwdInHistory' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.105 NAME 'nspmMinPasswordLifetime' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.106 NAME 'nspmAdminsDoNotExpirePassword' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.107 NAME 'nspmPasswordACL' SYNTAX 2.16.840.1.113719.1.1.5.1.17 )", + "( 2.16.840.1.113719.1.39.43.4.200 NAME 'nspmMaximumLength' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.201 NAME 'nspmMinUpperCaseCharacters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.202 NAME 'nspmMaxUpperCaseCharacters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.203 NAME 'nspmMinLowerCaseCharacters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.204 NAME 'nspmMaxLowerCaseCharacters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.205 NAME 'nspmNumericCharactersAllowed' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.206 NAME 'nspmNumericAsFirstCharacter' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.207 NAME 'nspmNumericAsLastCharacter' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.208 NAME 'nspmMinNumericCharacters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.209 NAME 'nspmMaxNumericCharacters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.210 NAME 'nspmSpecialCharactersAllowed' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.211 NAME 'nspmSpecialAsFirstCharacter' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.212 NAME 'nspmSpecialAsLastCharacter' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.213 NAME 'nspmMinSpecialCharacters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.214 NAME 'nspmMaxSpecialCharacters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.215 NAME 'nspmMaxRepeatedCharacters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.216 NAME 'nspmMaxConsecutiveCharacters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.217 NAME 'nspmMinUniqueCharacters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.218 NAME 'nspmDisallowedAttributeValues' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64512} X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.219 NAME 'nspmExcludeList' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.220 NAME 'nspmCaseSensitive' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.221 NAME 'nspmPolicyPrecedence' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.222 NAME 'nspmExtendedCharactersAllowed' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.223 NAME 'nspmExtendedAsFirstCharacter' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.224 NAME 'nspmExtendedAsLastCharacter' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.225 NAME 'nspmMinExtendedCharacters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.226 NAME 'nspmMaxExtendedCharacters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.227 NAME 'nspmUpperAsFirstCharacter' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.228 NAME 'nspmUpperAsLastCharacter' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.229 NAME 'nspmLowerAsFirstCharacter' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.230 NAME 'nspmLowerAsLastCharacter' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.231 NAME 'nspmComplexityRules' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.233 NAME 'nspmAD2K8Syntax' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.234 NAME 'nspmAD2K8maxViolation' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.235 NAME 'nspmXCharLimit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.236 NAME 'nspmXCharHistoryLimit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.237 NAME 'nspmUnicodeAllowed' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.238 NAME 'nspmNonAlphaCharactersAllowed' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.239 NAME 'nspmMinNonAlphaCharacters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.240 NAME 'nspmMaxNonAlphaCharacters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.241 NAME 'nspmGraceLoginHistoryLimit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.300 NAME 'nspmPolicyAgentContainerDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.301 NAME 'nspmPolicyAgentNetWare' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.302 NAME 'nspmPolicyAgentWINNT' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.303 NAME 'nspmPolicyAgentSolaris' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.304 NAME 'nspmPolicyAgentLinux' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.305 NAME 'nspmPolicyAgentAIX' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.43.4.306 NAME 'nspmPolicyAgentHPUX' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 0.9.2342.19200300.100.1.55 NAME 'audio' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} )", + "( 2.16.840.1.113730.3.1.1 NAME 'carLicense' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113730.3.1.241 NAME 'displayName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 0.9.2342.19200300.100.1.60 NAME 'jpegPhoto' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} )", + "( 1.3.6.1.4.1.250.1.57 NAME 'labeledUri' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 0.9.2342.19200300.100.1.7 NAME 'ldapPhoto' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} )", + "( 2.16.840.1.113730.3.1.39 NAME 'preferredLanguage' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE )", + "( 0.9.2342.19200300.100.1.21 NAME 'secretary' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )", + "( 2.16.840.1.113730.3.1.40 NAME 'userSMIMECertificate' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} )", + "( 2.16.840.1.113730.3.1.216 NAME 'userPKCS12' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} )", + "( 2.16.840.1.113719.1.12.4.1.0 NAME 'auditAEncryptionKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'Audit:A Encryption Key' )", + "( 2.16.840.1.113719.1.12.4.2.0 NAME 'auditBEncryptionKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'Audit:B Encryption Key' )", + "( 2.16.840.1.113719.1.12.4.3.0 NAME 'auditContents' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'Audit:Contents' )", + "( 2.16.840.1.113719.1.12.4.4.0 NAME 'auditType' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'Audit:Type' )", + "( 2.16.840.1.113719.1.12.4.5.0 NAME 'auditCurrentEncryptionKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'Audit:Current Encryption Key' )", + "( 2.16.840.1.113719.1.12.4.6.0 NAME 'auditFileLink' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'Audit:File Link' )", + "( 2.16.840.1.113719.1.12.4.7.0 NAME 'auditLinkList' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NAME 'Audit:Link List' )", + "( 2.16.840.1.113719.1.12.4.8.0 NAME 'auditPath' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} SINGLE-VALUE X-NDS_NAME 'Audit:Path' )", + "( 2.16.840.1.113719.1.12.4.9.0 NAME 'auditPolicy' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_NAME 'Audit:Policy' )", + "( 2.16.840.1.113719.1.38.4.1.1 NAME 'wANMANWANPolicy' SYNTAX 2.16.840.1.113719.1.1.5.1.13{64512} X-NDS_NAME 'WANMAN:WAN Policy' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.38.4.1.2 NAME 'wANMANLANAreaMembership' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NAME 'WANMAN:LAN Area Membership' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.38.4.1.3 NAME 'wANMANCost' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_NAME 'WANMAN:Cost' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.38.4.1.4 NAME 'wANMANDefaultCost' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NAME 'WANMAN:Default Cost' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.135.4.30 NAME 'rbsAssignedRoles' SYNTAX 2.16.840.1.113719.1.1.5.1.25 )", + "( 2.16.840.1.113719.1.135.4.31 NAME 'rbsContent' SYNTAX 2.16.840.1.113719.1.1.5.1.25 )", + "( 2.16.840.1.113719.1.135.4.32 NAME 'rbsContentMembership' SYNTAX 2.16.840.1.113719.1.1.5.1.25 )", + "( 2.16.840.1.113719.1.135.4.33 NAME 'rbsEntryPoint' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64512} SINGLE-VALUE )", + "( 2.16.840.1.113719.1.135.4.34 NAME 'rbsMember' SYNTAX 2.16.840.1.113719.1.1.5.1.25 )", + "( 2.16.840.1.113719.1.135.4.35 NAME 'rbsOwnedCollections' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )", + "( 2.16.840.1.113719.1.135.4.36 NAME 'rbsPath' SYNTAX 2.16.840.1.113719.1.1.5.1.25 )", + "( 2.16.840.1.113719.1.135.4.37 NAME 'rbsParameters' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64512} )", + "( 2.16.840.1.113719.1.135.4.38 NAME 'rbsTaskRights' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} )", + "( 2.16.840.1.113719.1.135.4.39 NAME 'rbsTrusteeOf' SYNTAX 2.16.840.1.113719.1.1.5.1.25 )", + "( 2.16.840.1.113719.1.135.4.40 NAME 'rbsType' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} SINGLE-VALUE X-NDS_LOWER_BOUND '1' X-NDS_UPPER_BOUND '256' )", + "( 2.16.840.1.113719.1.135.4.41 NAME 'rbsURL' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE )", + "( 2.16.840.1.113719.1.135.4.42 NAME 'rbsTaskTemplates' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} )", + "( 2.16.840.1.113719.1.135.4.43 NAME 'rbsTaskTemplatesURL' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE )", + "( 2.16.840.1.113719.1.135.4.44 NAME 'rbsGALabel' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE )", + "( 2.16.840.1.113719.1.135.4.45 NAME 'rbsPageMembership' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64512} )", + "( 2.16.840.1.113719.1.135.4.46 NAME 'rbsTargetObjectType' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.135.4.47 NAME 'rbsContext' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.135.4.48 NAME 'rbsXMLInfo' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE )", + "( 2.16.840.1.113719.1.135.4.51 NAME 'rbsAssignedRoles2' SYNTAX 2.16.840.1.113719.1.1.5.1.25 )", + "( 2.16.840.1.113719.1.135.4.52 NAME 'rbsOwnedCollections2' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )", + "( 2.16.840.1.113719.1.1.4.1.540 NAME 'prSyncPolicyDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.1.4.1.541 NAME 'prSyncAttributes' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} X-NDS_SERVER_READ '1' )", + "( 2.16.840.1.113719.1.1.4.1.542 NAME 'dsEncryptedReplicationConfig' SYNTAX 2.16.840.1.113719.1.1.5.1.19 )", + "( 2.16.840.1.113719.1.1.4.1.543 NAME 'encryptionPolicyDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.1.4.1.544 NAME 'attrEncryptionRequiresSecure' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.1.4.1.545 NAME 'attrEncryptionDefinition' SYNTAX 2.16.840.1.113719.1.1.5.1.6{64512} X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.48.4.1.16 NAME 'ndspkiCRLFileName' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.17 NAME 'ndspkiStatus' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.18 NAME 'ndspkiIssueTime' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.19 NAME 'ndspkiNextIssueTime' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.20 NAME 'ndspkiAttemptTime' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.21 NAME 'ndspkiTimeInterval' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.22 NAME 'ndspkiCRLMaxProcessingInterval' SYNTAX 2.16.840.1.113719.1.1.5.1.27 SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.23 NAME 'ndspkiCRLNumber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.24 NAME 'ndspkiDistributionPoints' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.25 NAME 'ndspkiCRLProcessData' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.26 NAME 'ndspkiCRLConfigurationDNList' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.27 NAME 'ndspkiCADN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.28 NAME 'ndspkiCRLContainerDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.29 NAME 'ndspkiIssuedCertContainerDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.30 NAME 'ndspkiDistributionPointDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.31 NAME 'ndspkiCRLConfigurationDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.32 NAME 'ndspkiDirectory' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} )", + "( 2.5.4.38 NAME 'authorityRevocationList' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SINGLE-VALUE X-NDS_NAME 'ndspkiAuthorityRevocationList' X-NDS_PUBLIC_READ '1' )", + "( 2.5.4.39 NAME 'certificateRevocationList' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SINGLE-VALUE X-NDS_NAME 'ndspkiCertificateRevocationList' X-NDS_PUBLIC_READ '1' )", + "( 2.5.4.53 NAME 'deltaRevocationList' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SINGLE-VALUE X-NDS_NAME 'ndspkiDeltaRevocationList' X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.36 NAME 'ndspkiTrustedRootList' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.37 NAME 'ndspkiSecurityRightsLevel' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.48.4.1.38 NAME 'ndspkiKMOExport' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.48.4.1.39 NAME 'ndspkiCRLECConfigurationDNList' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.40 NAME 'ndspkiCRLType' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.41 NAME 'ndspkiCRLExtendValidity' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.42 NAME 'ndspkiDefaultRSAKeySize' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.43 NAME 'ndspkiDefaultECCurve' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.48.4.1.44 NAME 'ndspkiDefaultCertificateLife' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.7.4.1 NAME 'notfSMTPEmailHost' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.7.4.2 NAME 'notfSMTPEmailFrom' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.7.4.3 NAME 'notfSMTPEmailUserName' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.7.4.5 NAME 'notfMergeTemplateData' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64512} SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.7.4.6 NAME 'notfMergeTemplateSubject' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64512} SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.44.4.1 NAME 'nsimRequiredQuestions' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64512} SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.44.4.2 NAME 'nsimRandomQuestions' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64512} SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.44.4.3 NAME 'nsimNumberRandomQuestions' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.44.4.4 NAME 'nsimMinResponseLength' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.44.4.5 NAME 'nsimMaxResponseLength' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.44.4.6 NAME 'nsimForgottenLoginConfig' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.44.4.7 NAME 'nsimForgottenAction' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64512} SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.44.4.8 NAME 'nsimAssignments' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.44.4.9 NAME 'nsimChallengeSetDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.44.4.10 NAME 'nsimChallengeSetGUID' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.44.4.11 NAME 'nsimPwdRuleEnforcement' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.39.44.4.12 NAME 'nsimHint' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64512} SINGLE-VALUE X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.39.44.4.13 NAME 'nsimPasswordReminder' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{64512} SINGLE-VALUE )", + "( 2.16.840.1.113719.1.266.4.4 NAME 'sssProxyStoreKey' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE USAGE directoryOperation X-NDS_HIDDEN '1' )", + "( 2.16.840.1.113719.1.266.4.5 NAME 'sssProxyStoreSecrets' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} USAGE directoryOperation X-NDS_HIDDEN '1' )", + "( 2.16.840.1.113719.1.266.4.6 NAME 'sssActiveServerList' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} )", + "( 2.16.840.1.113719.1.266.4.7 NAME 'sssCacheRefreshInterval' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.266.4.8 NAME 'sssAdminList' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )", + "( 2.16.840.1.113719.1.266.4.9 NAME 'sssAdminGALabel' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} SINGLE-VALUE )", + "( 2.16.840.1.113719.1.266.4.10 NAME 'sssEnableReadTimestamps' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.266.4.11 NAME 'sssDisableMasterPasswords' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.266.4.12 NAME 'sssEnableAdminAccess' SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.266.4.13 NAME 'sssReadSecretPolicies' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} )", + "( 2.16.840.1.113719.1.266.4.14 NAME 'sssServerPolicyOverrideDN' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.1.4.1.531 NAME 'eDirCloneSource' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.1.532 NAME 'eDirCloneKeys' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{64512} NO-USER-MODIFICATION USAGE directoryOperation X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' X-NDS_HIDDEN '1' )", + "( 2.16.840.1.113719.1.1.4.1.533 NAME 'eDirCloneLock' SYNTAX 2.16.840.1.113719.1.1.5.1.15{64512} SINGLE-VALUE X-NDS_NOT_SCHED_SYNC_IMMEDIATE '1' )", + "( 2.16.840.1.113719.1.1.4.711 NAME 'groupMember' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )", + "( 2.16.840.1.113719.1.1.4.712 NAME 'nestedConfig' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )", + "( 2.16.840.1.113719.1.1.4.717 NAME 'xdasDSConfiguration' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.1.4.718 NAME 'xdasConfiguration' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.1.4.719 NAME 'xdasVersion' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{32768} SINGLE-VALUE X-NDS_UPPER_BOUND '32768' )", + "( 2.16.840.1.113719.1.347.4.79 NAME 'NAuditInstrumentation' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.347.4.2 NAME 'NAuditLoggingServer' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_PUBLIC_READ '1' )", + "( 2.16.840.1.113719.1.1.4.724 NAME 'cefConfiguration' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{64512} )", + "( 2.16.840.1.113719.1.1.4.725 NAME 'cefVersion' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27{32768} SINGLE-VALUE X-NDS_UPPER_BOUND '32768' )" + ], + "createTimestamp": [], + "dITContentRules": [], + "dITStructureRules": [], + "ldapSyntaxes": [ + "( 1.3.6.1.4.1.1466.115.121.1.1 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.2 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.3 X-NDS_SYNTAX '3' )", + "( 1.3.6.1.4.1.1466.115.121.1.4 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.5 X-NDS_SYNTAX '21' )", + "( 1.3.6.1.4.1.1466.115.121.1.6 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.7 X-NDS_SYNTAX '7' )", + "( 2.16.840.1.113719.1.1.5.1.6 X-NDS_SYNTAX '6' )", + "( 1.3.6.1.4.1.1466.115.121.1.8 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.9 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.10 X-NDS_SYNTAX '9' )", + "( 2.16.840.1.113719.1.1.5.1.22 X-NDS_SYNTAX '22' )", + "( 1.3.6.1.4.1.1466.115.121.1.11 X-NDS_SYNTAX '3' )", + "( 1.3.6.1.4.1.1466.115.121.1.12 X-NDS_SYNTAX '1' )", + "( 1.3.6.1.4.1.1466.115.121.1.13 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.14 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.15 X-NDS_SYNTAX '3' )", + "( 1.3.6.1.4.1.1466.115.121.1.16 X-NDS_SYNTAX '3' )", + "( 1.3.6.1.4.1.1466.115.121.1.17 X-NDS_SYNTAX '3' )", + "( 1.3.6.1.4.1.1466.115.121.1.18 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.19 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.20 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.21 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.22 X-NDS_SYNTAX '11' )", + "( 1.3.6.1.4.1.1466.115.121.1.23 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.24 X-NDS_SYNTAX '24' )", + "( 1.3.6.1.4.1.1466.115.121.1.25 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.26 X-NDS_SYNTAX '2' )", + "( 1.3.6.1.4.1.1466.115.121.1.27 X-NDS_SYNTAX '8' )", + "( 1.3.6.1.4.1.1466.115.121.1.28 X-NDS_SYNTAX '9' )", + "( 1.2.840.113556.1.4.906 X-NDS_SYNTAX '29' )", + "( 1.3.6.1.4.1.1466.115.121.1.54 X-NDS_SYNTAX '3' )", + "( 1.3.6.1.4.1.1466.115.121.1.56 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.57 X-NDS_SYNTAX '3' )", + "( 1.3.6.1.4.1.1466.115.121.1.29 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.30 X-NDS_SYNTAX '3' )", + "( 1.3.6.1.4.1.1466.115.121.1.31 X-NDS_SYNTAX '3' )", + "( 1.3.6.1.4.1.1466.115.121.1.32 X-NDS_SYNTAX '3' )", + "( 1.3.6.1.4.1.1466.115.121.1.33 X-NDS_SYNTAX '3' )", + "( 1.3.6.1.4.1.1466.115.121.1.55 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.34 X-NDS_SYNTAX '3' )", + "( 1.3.6.1.4.1.1466.115.121.1.35 X-NDS_SYNTAX '3' )", + "( 2.16.840.1.113719.1.1.5.1.19 X-NDS_SYNTAX '19' )", + "( 1.3.6.1.4.1.1466.115.121.1.36 X-NDS_SYNTAX '5' )", + "( 2.16.840.1.113719.1.1.5.1.17 X-NDS_SYNTAX '17' )", + "( 1.3.6.1.4.1.1466.115.121.1.37 X-NDS_SYNTAX '3' )", + "( 2.16.840.1.113719.1.1.5.1.13 X-NDS_SYNTAX '13' )", + "( 1.3.6.1.4.1.1466.115.121.1.40 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.38 X-NDS_SYNTAX '20' )", + "( 1.3.6.1.4.1.1466.115.121.1.39 X-NDS_SYNTAX '3' )", + "( 1.3.6.1.4.1.1466.115.121.1.41 X-NDS_SYNTAX '18' )", + "( 1.3.6.1.4.1.1466.115.121.1.43 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.44 X-NDS_SYNTAX '4' )", + "( 1.3.6.1.4.1.1466.115.121.1.42 X-NDS_SYNTAX '9' )", + "( 2.16.840.1.113719.1.1.5.1.16 X-NDS_SYNTAX '16' )", + "( 1.3.6.1.4.1.1466.115.121.1.58 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.45 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.46 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.47 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.48 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.49 X-NDS_SYNTAX '9' )", + "( 2.16.840.1.113719.1.1.5.1.12 X-NDS_SYNTAX '12' )", + "( 2.16.840.1.113719.1.1.5.1.23 X-NDS_SYNTAX '23' )", + "( 2.16.840.1.113719.1.1.5.1.15 X-NDS_SYNTAX '15' )", + "( 2.16.840.1.113719.1.1.5.1.14 X-NDS_SYNTAX '14' )", + "( 1.3.6.1.4.1.1466.115.121.1.50 X-NDS_SYNTAX '10' )", + "( 1.3.6.1.4.1.1466.115.121.1.51 X-NDS_SYNTAX '9' )", + "( 1.3.6.1.4.1.1466.115.121.1.52 X-NDS_SYNTAX '9' )", + "( 2.16.840.1.113719.1.1.5.1.25 X-NDS_SYNTAX '25' )", + "( 1.3.6.1.4.1.1466.115.121.1.53 X-NDS_SYNTAX '9' )", + "( 2.16.840.1.113719.1.1.5.1.26 X-NDS_SYNTAX '26' )", + "( 2.16.840.1.113719.1.1.5.1.27 X-NDS_SYNTAX '27' )" + ], + "matchingRuleUse": [], + "matchingRules": [], + "modifyTimestamp": [ + "20190831135835Z" + ], + "nameForms": [], + "objectClass": [ + "top", + "subschema" + ], + "objectClasses": [ + "( 2.5.6.0 NAME 'Top' STRUCTURAL MUST objectClass MAY ( cAPublicKey $ cAPrivateKey $ certificateValidityInterval $ authorityRevocation $ lastReferencedTime $ equivalentToMe $ ACL $ backLink $ binderyProperty $ Obituary $ Reference $ revision $ ndsCrossCertificatePair $ certificateRevocation $ usedBy $ GUID $ otherGUID $ DirXML-Associations $ creatorsName $ modifiersName $ objectVersion $ auxClassCompatibility $ unknownBaseClass $ unknownAuxiliaryClass $ masvProposedLabel $ masvDefaultRange $ masvAuthorizedRange $ auditFileLink $ rbsAssignedRoles $ rbsOwnedCollections $ rbsAssignedRoles2 $ rbsOwnedCollections2 ) X-NDS_NONREMOVABLE '1' X-NDS_ACL_TEMPLATES '16#subtree#[Creator]#[Entry Rights]' )", + "( 1.3.6.1.4.1.42.2.27.1.2.1 NAME 'aliasObject' SUP Top STRUCTURAL MUST aliasedObjectName X-NDS_NAME 'Alias' X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.5.6.2 NAME 'Country' SUP Top STRUCTURAL MUST c MAY ( description $ searchGuide $ sssActiveServerList $ sssServerPolicyOverrideDN ) X-NDS_NAMING 'c' X-NDS_CONTAINMENT ( 'Top' 'treeRoot' 'domain' ) X-NDS_NONREMOVABLE '1' )", + "( 2.5.6.3 NAME 'Locality' SUP Top STRUCTURAL MAY ( description $ l $ seeAlso $ st $ street $ searchGuide $ sssActiveServerList $ sssServerPolicyOverrideDN ) X-NDS_NAMING ( 'l' 'st' ) X-NDS_CONTAINMENT ( 'Country' 'organizationalUnit' 'Locality' 'Organization' 'domain' ) X-NDS_NONREMOVABLE '1' )", + "( 2.5.6.4 NAME 'Organization' SUP ( ndsLoginProperties $ ndsContainerLoginProperties ) STRUCTURAL MUST o MAY ( description $ facsimileTelephoneNumber $ l $ loginScript $ eMailAddress $ physicalDeliveryOfficeName $ postalAddress $ postalCode $ postOfficeBox $ printJobConfiguration $ printerControl $ seeAlso $ st $ street $ telephoneNumber $ loginIntruderLimit $ intruderAttemptResetInterval $ detectIntruder $ lockoutAfterDetection $ intruderLockoutResetInterval $ nNSDomain $ mailboxLocation $ mailboxID $ x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ internationaliSDNNumber $ businessCategory $ searchGuide $ rADIUSAttributeLists $ rADIUSDefaultProfile $ rADIUSDialAccessGroup $ rADIUSEnableDialAccess $ rADIUSServiceList $ sssActiveServerList $ sssServerPolicyOverrideDN $ userPassword ) X-NDS_NAMING 'o' X-NDS_CONTAINMENT ( 'Top' 'treeRoot' 'Country' 'Locality' 'domain' ) X-NDS_NONREMOVABLE '1' X-NDS_ACL_TEMPLATES ( '2#entry#[Self]#loginScript' '2#entry#[Self]#printJobConfiguration') )", + "( 2.5.6.5 NAME 'organizationalUnit' SUP ( ndsLoginProperties $ ndsContainerLoginProperties ) STRUCTURAL MUST ou MAY ( description $ facsimileTelephoneNumber $ l $ loginScript $ eMailAddress $ physicalDeliveryOfficeName $ postalAddress $ postalCode $ postOfficeBox $ printJobConfiguration $ printerControl $ seeAlso $ st $ street $ telephoneNumber $ loginIntruderLimit $ intruderAttemptResetInterval $ detectIntruder $ lockoutAfterDetection $ intruderLockoutResetInterval $ nNSDomain $ mailboxLocation $ mailboxID $ x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ internationaliSDNNumber $ businessCategory $ searchGuide $ rADIUSAttributeLists $ rADIUSDefaultProfile $ rADIUSDialAccessGroup $ rADIUSEnableDialAccess $ rADIUSServiceList $ sssActiveServerList $ sssServerPolicyOverrideDN $ userPassword ) X-NDS_NAMING 'ou' X-NDS_CONTAINMENT ( 'Locality' 'Organization' 'organizationalUnit' 'domain' ) X-NDS_NAME 'Organizational Unit' X-NDS_NONREMOVABLE '1' X-NDS_ACL_TEMPLATES ( '2#entry#[Self]#loginScript' '2#entry#[Self]#printJobConfiguration') )", + "( 2.5.6.8 NAME 'organizationalRole' SUP Top STRUCTURAL MUST cn MAY ( description $ facsimileTelephoneNumber $ l $ eMailAddress $ ou $ physicalDeliveryOfficeName $ postalAddress $ postalCode $ postOfficeBox $ roleOccupant $ seeAlso $ st $ street $ telephoneNumber $ mailboxLocation $ mailboxID $ x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ internationaliSDNNumber ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Organization' 'organizationalUnit' 'domain' ) X-NDS_NAME 'Organizational Role' X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.5.6.9 NAME ( 'groupOfNames' 'group' 'groupOfUniqueNames' ) SUP Top STRUCTURAL MUST cn MAY ( description $ l $ member $ ou $ o $ owner $ seeAlso $ groupID $ fullName $ eMailAddress $ mailboxLocation $ mailboxID $ Profile $ profileMembership $ loginScript $ businessCategory $ nspmPasswordPolicyDN ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Organization' 'organizationalUnit' 'domain' ) X-NDS_NAME 'Group' X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.5.6.6 NAME 'Person' SUP ndsLoginProperties STRUCTURAL MUST ( cn $ sn ) MAY ( description $ seeAlso $ telephoneNumber $ fullName $ givenName $ initials $ generationQualifier $ uid $ assistant $ assistantPhone $ city $ st $ company $ co $ directReports $ manager $ mailstop $ mobile $ personalTitle $ pager $ workforceID $ instantMessagingID $ preferredName $ photo $ jobCode $ siteLocation $ employeeStatus $ employeeType $ costCenter $ costCenterDescription $ tollFreePhoneNumber $ otherPhoneNumber $ managerWorkforceID $ roomNumber $ jackNumber $ departmentNumber $ vehicleInformation $ accessCardNumber $ isManager $ userPassword ) X-NDS_NAMING ( 'cn' 'uid' ) X-NDS_CONTAINMENT ( 'Organization' 'organizationalUnit' 'domain' ) X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.5.6.7 NAME 'organizationalPerson' SUP Person STRUCTURAL MAY ( facsimileTelephoneNumber $ l $ eMailAddress $ ou $ physicalDeliveryOfficeName $ postalAddress $ postalCode $ postOfficeBox $ st $ street $ title $ mailboxLocation $ mailboxID $ uid $ mail $ employeeNumber $ destinationIndicator $ internationaliSDNNumber $ preferredDeliveryMethod $ registeredAddress $ teletexTerminalIdentifier $ telexNumber $ x121Address $ businessCategory $ roomNumber $ x500UniqueIdentifier ) X-NDS_NAMING ( 'cn' 'ou' 'uid' ) X-NDS_CONTAINMENT ( 'Organization' 'organizationalUnit' 'domain' ) X-NDS_NAME 'Organizational Person' X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113730.3.2.2 NAME 'inetOrgPerson' SUP organizationalPerson STRUCTURAL MAY ( groupMembership $ ndsHomeDirectory $ loginAllowedTimeMap $ loginDisabled $ loginExpirationTime $ loginGraceLimit $ loginGraceRemaining $ loginIntruderAddress $ loginIntruderAttempts $ loginIntruderResetTime $ loginMaximumSimultaneous $ loginScript $ loginTime $ networkAddressRestriction $ networkAddress $ passwordsUsed $ passwordAllowChange $ passwordExpirationInterval $ passwordExpirationTime $ passwordMinimumLength $ passwordRequired $ passwordUniqueRequired $ printJobConfiguration $ privateKey $ Profile $ publicKey $ securityEquals $ accountBalance $ allowUnlimitedCredit $ minimumAccountBalance $ messageServer $ Language $ ndsUID $ lockedByIntruder $ serverHolds $ lastLoginTime $ typeCreatorMap $ higherPrivileges $ printerControl $ securityFlags $ profileMembership $ Timezone $ sASServiceDN $ sASSecretStore $ sASSecretStoreKey $ sASSecretStoreData $ sASPKIStoreKeys $ userCertificate $ nDSPKIUserCertificateInfo $ nDSPKIKeystore $ rADIUSActiveConnections $ rADIUSAttributeLists $ rADIUSConcurrentLimit $ rADIUSConnectionHistory $ rADIUSDefaultProfile $ rADIUSDialAccessGroup $ rADIUSEnableDialAccess $ rADIUSPassword $ rADIUSServiceList $ audio $ businessCategory $ carLicense $ departmentNumber $ employeeNumber $ employeeType $ displayName $ givenName $ homePhone $ homePostalAddress $ initials $ jpegPhoto $ labeledUri $ mail $ manager $ mobile $ o $ pager $ ldapPhoto $ preferredLanguage $ roomNumber $ secretary $ uid $ userSMIMECertificate $ x500UniqueIdentifier $ userPKCS12 $ sssProxyStoreKey $ sssProxyStoreSecrets $ sssServerPolicyOverrideDN ) X-NDS_NAME 'User' X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' X-NDS_ACL_TEMPLATES ( '2#subtree#[Self]#[All Attributes Rights]' '6#entry#[Self]#loginScript' '1#subtree#[Root Template]#[Entry Rights]' '2#entry#[Public]#messageServer' '2#entry#[Root Template]#groupMembership' '6#entry#[Self]#printJobConfiguration' '2#entry#[Root Template]#networkAddress') )", + "( 2.5.6.14 NAME 'Device' SUP Top STRUCTURAL MUST cn MAY ( description $ l $ networkAddress $ ou $ o $ owner $ seeAlso $ serialNumber ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Organization' 'organizationalUnit' 'domain' ) X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.6.1.4 NAME 'Computer' SUP Device STRUCTURAL MAY ( operator $ server $ status ) X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.6.1.17 NAME 'Printer' SUP Device STRUCTURAL MAY ( Cartridge $ printerConfiguration $ defaultQueue $ hostDevice $ printServer $ Memory $ networkAddressRestriction $ notify $ operator $ pageDescriptionLanguage $ queue $ status $ supportedTypefaces ) X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.6.1.21 NAME 'Resource' SUP Top ABSTRACT MUST cn MAY ( description $ hostResourceName $ l $ ou $ o $ seeAlso $ Uses ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Organization' 'organizationalUnit' 'domain' ) X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.6.1.20 NAME 'Queue' SUP Resource STRUCTURAL MUST queueDirectory MAY ( Device $ operator $ server $ User $ networkAddress $ Volume $ hostServer ) X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' X-NDS_ACL_TEMPLATES '2#subtree#[Root Template]#[All Attributes Rights]' )", + "( 2.16.840.1.113719.1.1.6.1.3 NAME 'binderyQueue' SUP Queue STRUCTURAL MUST binderyType X-NDS_NAMING ( 'cn' 'binderyType' ) X-NDS_NAME 'Bindery Queue' X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' X-NDS_ACL_TEMPLATES '2#subtree#[Root Template]#[All Attributes Rights]' )", + "( 2.16.840.1.113719.1.1.6.1.26 NAME 'Volume' SUP Resource STRUCTURAL MUST hostServer MAY status X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' X-NDS_ACL_TEMPLATES ( '2#entry#[Root Template]#hostResourceName' '2#entry#[Root Template]#hostServer') )", + "( 2.16.840.1.113719.1.1.6.1.7 NAME 'directoryMap' SUP Resource STRUCTURAL MUST hostServer MAY path X-NDS_NAME 'Directory Map' X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.6.1.19 NAME 'Profile' SUP Top STRUCTURAL MUST ( cn $ loginScript ) MAY ( description $ l $ ou $ o $ seeAlso $ fullName ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Organization' 'organizationalUnit' 'domain' ) X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.6.1.22 NAME 'Server' SUP Top ABSTRACT MUST cn MAY ( description $ hostDevice $ l $ ou $ o $ privateKey $ publicKey $ Resource $ seeAlso $ status $ User $ Version $ networkAddress $ accountBalance $ allowUnlimitedCredit $ minimumAccountBalance $ fullName $ securityEquals $ securityFlags $ Timezone $ ndapClassPasswordMgmt $ ndapClassLoginMgmt ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Organization' 'organizationalUnit' 'domain' ) X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' X-NDS_ACL_TEMPLATES ( '2#entry#[Public]#networkAddress' '16#subtree#[Self]#[Entry Rights]') )", + "( 2.16.840.1.113719.1.1.6.1.10 NAME 'ncpServer' SUP Server STRUCTURAL MAY ( operator $ supportedServices $ messagingServer $ dsRevision $ permanentConfigParms $ ndsPredicateStatsDN $ languageId $ indexDefinition $ CachedAttrsOnExtRefs $ NCPKeyMaterialName $ NDSRightsToMonitor $ ldapServerDN $ httpServerDN $ emboxConfig $ sASServiceDN $ cACertificate $ cAECCertificate $ nDSPKIPublicKey $ nDSPKIPrivateKey $ nDSPKICertificateChain $ nDSPKIParentCADN $ nDSPKISDKeyID $ nDSPKISDKeyStruct $ snmpGroupDN $ wANMANWANPolicy $ wANMANLANAreaMembership $ wANMANCost $ wANMANDefaultCost $ encryptionPolicyDN $ eDirCloneSource $ eDirCloneLock $ xdasDSConfiguration $ xdasConfiguration $ xdasVersion $ NAuditLoggingServer $ NAuditInstrumentation $ cefConfiguration $ cefVersion ) X-NDS_NAME 'NCP Server' X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' X-NDS_ACL_TEMPLATES '2#entry#[Public]#messagingServer' )", + "( 2.16.840.1.113719.1.1.6.1.18 NAME 'printServer' SUP Server STRUCTURAL MAY ( operator $ printer $ sAPName ) X-NDS_NAME 'Print Server' X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' X-NDS_ACL_TEMPLATES '2#subtree#[Root Template]#[All Attributes Rights]' )", + "( 2.16.840.1.113719.1.1.6.1.31 NAME 'CommExec' SUP Server STRUCTURAL MAY networkAddressRestriction X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.6.1.2 NAME 'binderyObject' SUP Top STRUCTURAL MUST ( binderyObjectRestriction $ binderyType $ cn ) X-NDS_NAMING ( 'cn' 'binderyType' ) X-NDS_CONTAINMENT ( 'Organization' 'organizationalUnit' 'domain' ) X-NDS_NAME 'Bindery Object' X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.6.1.15 NAME 'Partition' AUXILIARY MAY ( Convergence $ partitionCreationTime $ Replica $ inheritedACL $ lowConvergenceSyncInterval $ receivedUpTo $ synchronizedUpTo $ authorityRevocation $ certificateRevocation $ cAPrivateKey $ cAPublicKey $ ndsCrossCertificatePair $ lowConvergenceResetTime $ highConvergenceSyncInterval $ partitionControl $ replicaUpTo $ partitionStatus $ transitiveVector $ purgeVector $ synchronizationTolerance $ obituaryNotify $ localReceivedUpTo $ federationControl $ syncPanePoint $ syncWindowVector $ EBAPartitionConfiguration $ authoritative $ allowAliasToAncestor $ sASSecurityDN $ masvLabel $ ndapPartitionPasswordMgmt $ ndapPartitionLoginMgmt $ prSyncPolicyDN $ dsEncryptedReplicationConfig ) X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.6.1.0 NAME 'aFPServer' SUP Server STRUCTURAL MAY ( serialNumber $ supportedConnections ) X-NDS_NAME 'AFP Server' X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.6.1.27 NAME 'messagingServer' SUP Server STRUCTURAL MAY ( messagingDatabaseLocation $ messageRoutingGroup $ Postmaster $ supportedServices $ messagingServerType $ supportedGateway ) X-NDS_NAME 'Messaging Server' X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' X-NDS_ACL_TEMPLATES ( '1#subtree#[Self]#[Entry Rights]' '2#subtree#[Self]#[All Attributes Rights]' '6#entry#[Self]#status' '2#entry#[Public]#messagingServerType' '2#entry#[Public]#messagingDatabaseLocation') )", + "( 2.16.840.1.113719.1.1.6.1.28 NAME 'messageRoutingGroup' SUP groupOfNames STRUCTURAL X-NDS_NAME 'Message Routing Group' X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' X-NDS_ACL_TEMPLATES ( '1#subtree#[Self]#[Entry Rights]' '2#subtree#[Self]#[All Attributes Rights]') )", + "( 2.16.840.1.113719.1.1.6.1.29 NAME 'externalEntity' SUP Top STRUCTURAL MUST cn MAY ( description $ seeAlso $ facsimileTelephoneNumber $ l $ eMailAddress $ ou $ physicalDeliveryOfficeName $ postalAddress $ postalCode $ postOfficeBox $ st $ street $ title $ externalName $ mailboxLocation $ mailboxID ) X-NDS_NAMING ( 'cn' 'ou' ) X-NDS_CONTAINMENT ( 'Organization' 'organizationalUnit' 'domain' ) X-NDS_NAME 'External Entity' X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' X-NDS_ACL_TEMPLATES '2#entry#[Public]#externalName' )", + "( 2.16.840.1.113719.1.1.6.1.30 NAME 'List' SUP Top STRUCTURAL MUST cn MAY ( description $ l $ member $ ou $ o $ eMailAddress $ mailboxLocation $ mailboxID $ owner $ seeAlso $ fullName ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Organization' 'organizationalUnit' 'domain' ) X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' X-NDS_ACL_TEMPLATES '2#entry#[Root Template]#member' )", + "( 2.16.840.1.113719.1.1.6.1.32 NAME 'treeRoot' SUP Top STRUCTURAL MUST T MAY ( EBATreeConfiguration $ sssActiveServerList ) X-NDS_NAMING 'T' X-NDS_NAME 'Tree Root' X-NDS_NONREMOVABLE '1' )", + "( 0.9.2342.19200300.100.4.13 NAME 'domain' SUP ( Top $ ndsLoginProperties $ ndsContainerLoginProperties ) STRUCTURAL MUST dc MAY ( searchGuide $ o $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ l $ associatedName $ description $ sssActiveServerList $ sssServerPolicyOverrideDN $ userPassword ) X-NDS_NAMING 'dc' X-NDS_CONTAINMENT ( 'Top' 'treeRoot' 'Country' 'Locality' 'Organization' 'organizationalUnit' 'domain' ) X-NDS_NONREMOVABLE '1' )", + "( 1.3.6.1.4.1.1466.344 NAME 'dcObject' AUXILIARY MUST dc X-NDS_NAMING 'dc' X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.6.1.33 NAME 'ndsLoginProperties' SUP Top ABSTRACT MAY ( groupMembership $ loginAllowedTimeMap $ loginDisabled $ loginExpirationTime $ loginGraceLimit $ loginGraceRemaining $ loginIntruderAddress $ loginIntruderAttempts $ loginIntruderResetTime $ loginMaximumSimultaneous $ loginScript $ loginTime $ networkAddressRestriction $ networkAddress $ passwordsUsed $ passwordAllowChange $ passwordExpirationInterval $ passwordExpirationTime $ passwordMinimumLength $ passwordRequired $ passwordUniqueRequired $ privateKey $ Profile $ publicKey $ securityEquals $ accountBalance $ allowUnlimitedCredit $ minimumAccountBalance $ Language $ lockedByIntruder $ serverHolds $ lastLoginTime $ higherPrivileges $ securityFlags $ profileMembership $ Timezone $ loginActivationTime $ UTF8LoginScript $ loginScriptCharset $ sASNDSPasswordWindow $ sASLoginSecret $ sASLoginSecretKey $ sASEncryptionType $ sASLoginConfiguration $ sASLoginConfigurationKey $ sasLoginFailureDelay $ sasDefaultLoginSequence $ sasAuthorizedLoginSequences $ sasAllowableSubjectNames $ sasUpdateLoginInfo $ sasOTPEnabled $ sasOTPCounter $ sasOTPDigits $ sasOTPReSync $ sasUpdateLoginTimeInterval $ ndapPasswordMgmt $ ndapLoginMgmt $ nspmPasswordKey $ nspmPassword $ pwdChangedTime $ pwdAccountLockedTime $ pwdFailureTime $ nspmDoNotExpirePassword $ nspmDistributionPassword $ nspmPreviousDistributionPassword $ nspmPasswordHistory $ nspmAdministratorChangeCount $ nspmPasswordPolicyDN $ nsimHint $ nsimPasswordReminder $ userPassword ) X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.141.6.1 NAME 'federationBoundary' AUXILIARY MUST federationBoundaryType MAY ( federationControl $ federationDNSName $ federationSearchPath ) X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.6.1.34 NAME 'ndsContainerLoginProperties' SUP Top ABSTRACT MAY ( loginIntruderLimit $ intruderAttemptResetInterval $ detectIntruder $ lockoutAfterDetection $ intruderLockoutResetInterval $ sasLoginFailureDelay $ sasDefaultLoginSequence $ sasAuthorizedLoginSequences $ sasUpdateLoginInfo $ sasOTPEnabled $ sasOTPDigits $ sasUpdateLoginTimeInterval $ ndapPasswordMgmt $ ndapLoginMgmt $ nspmPasswordPolicyDN ) X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.27.6.3 NAME 'ndsPredicateStats' SUP Top STRUCTURAL MUST ( cn $ ndsPredicateState $ ndsPredicateFlush ) MAY ( ndsPredicate $ ndsPredicateTimeout $ ndsPredicateUseValues ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Country' 'Locality' 'Organization' 'organizationalUnit' 'domain' ) X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.6.400.1 NAME 'edirSchemaVersion' SUP Top ABSTRACT MAY edirSchemaFlagVersion X-NDS_NOT_CONTAINER '1' X-NDS_NONREMOVABLE '1' )", + "( 2.16.840.1.113719.1.1.6.1.47 NAME 'immediateSuperiorReference' AUXILIARY MAY ref X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.27.6.1 NAME 'ldapServer' SUP Top STRUCTURAL MUST cn MAY ( ldapHostServer $ ldapGroupDN $ ldapTraceLevel $ ldapServerBindLimit $ ldapServerIdleTimeout $ lDAPUDPPort $ lDAPSearchSizeLimit $ lDAPSearchTimeLimit $ lDAPLogLevel $ lDAPLogFilename $ lDAPBackupLogFilename $ lDAPLogSizeLimit $ Version $ searchSizeLimit $ searchTimeLimit $ ldapEnableTCP $ ldapTCPPort $ ldapEnableSSL $ ldapSSLPort $ ldapKeyMaterialName $ filteredReplicaUsage $ extensionInfo $ nonStdClientSchemaCompatMode $ sslEnableMutualAuthentication $ ldapEnablePSearch $ ldapMaximumPSearchOperations $ ldapIgnorePSearchLimitsForEvents $ ldapTLSTrustedRootContainer $ ldapEnableMonitorEvents $ ldapMaximumMonitorEventsLoad $ ldapTLSRequired $ ldapTLSVerifyClientCertificate $ ldapConfigVersion $ ldapDerefAlias $ ldapNonStdAllUserAttrsMode $ ldapBindRestrictions $ ldapDefaultReferralBehavior $ ldapReferral $ ldapSearchReferralUsage $ lDAPOtherReferralUsage $ ldapLBURPNumWriterThreads $ ldapInterfaces $ ldapChainSecureRequired $ ldapStdCompliance $ ldapDerefAliasOnAuth $ ldapGeneralizedTime $ ldapPermissiveModify $ ldapSSLConfig ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Country' 'Locality' 'organizationalUnit' 'Organization' 'domain' ) X-NDS_NAME 'LDAP Server' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.27.6.2 NAME 'ldapGroup' SUP Top STRUCTURAL MUST cn MAY ( ldapReferral $ ldapServerList $ ldapAllowClearTextPassword $ ldapAnonymousIdentity $ lDAPSuffix $ ldapAttributeMap $ ldapClassMap $ ldapSearchReferralUsage $ lDAPOtherReferralUsage $ transitionGroupDN $ ldapAttributeList $ ldapClassList $ ldapConfigVersion $ Version $ ldapDefaultReferralBehavior $ ldapTransitionBackLink $ ldapSSLConfig $ referralIncludeFilter $ referralExcludeFilter ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Country' 'Locality' 'organizationalUnit' 'Organization' 'domain' ) X-NDS_NAME 'LDAP Group' X-NDS_NOT_CONTAINER '1' )", + "( 2.5.6.22 NAME 'pkiCA' AUXILIARY MAY ( cACertificate $ certificateRevocationList $ authorityRevocationList $ crossCertificatePair $ attributeCertificate $ publicKey $ privateKey $ networkAddress $ loginTime $ lastLoginTime $ cAECCertificate $ crossCertificatePairEC ) X-NDS_NOT_CONTAINER '1' )", + "( 2.5.6.21 NAME 'pkiUser' AUXILIARY MAY userCertificate X-NDS_NOT_CONTAINER '1' )", + "( 2.5.6.15 NAME 'strongAuthenticationUser' AUXILIARY MAY userCertificate X-NDS_NOT_CONTAINER '1' )", + "( 2.5.6.11 NAME 'applicationProcess' SUP Top STRUCTURAL MUST cn MAY ( seeAlso $ ou $ l $ description ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Country' 'Locality' 'organizationalUnit' 'Organization' 'domain' ) )", + "( 2.5.6.12 NAME 'applicationEntity' SUP Top STRUCTURAL MUST ( presentationAddress $ cn ) MAY ( supportedApplicationContext $ seeAlso $ ou $ o $ l $ description ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Country' 'Locality' 'organizationalUnit' 'Organization' 'domain' ) )", + "( 2.5.6.13 NAME 'dSA' SUP applicationEntity STRUCTURAL MAY knowledgeInformation X-NDS_CONTAINMENT ( 'Country' 'Locality' 'organizationalUnit' 'Organization' 'domain' ) )", + "( 2.5.6.16 NAME 'certificationAuthority' AUXILIARY MUST ( authorityRevocationList $ certificateRevocationList $ cACertificate ) MAY crossCertificatePair X-NDS_NOT_CONTAINER '1' )", + "( 2.5.6.18 NAME 'userSecurityInformation' AUXILIARY MAY supportedAlgorithms X-NDS_NOT_CONTAINER '1' )", + "( 2.5.6.20 NAME 'dmd' SUP ndsLoginProperties AUXILIARY MUST dmdName MAY ( searchGuide $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ l $ description $ userPassword ) X-NDS_NOT_CONTAINER '1' )", + "( 2.5.6.16.2 NAME 'certificationAuthority-V2' AUXILIARY MUST ( authorityRevocationList $ certificateRevocationList $ cACertificate ) MAY ( crossCertificatePair $ deltaRevocationList ) X-NDS_NAME 'certificationAuthorityVer2' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.3.6.1 NAME 'httpServer' SUP Top STRUCTURAL MUST cn MAY ( httpHostServerDN $ httpThreadsPerCPU $ httpIOBufferSize $ httpRequestTimeout $ httpKeepAliveRequestTimeout $ httpSessionTimeout $ httpKeyMaterialObject $ httpTraceLevel $ httpAuthRequiresTLS $ httpDefaultClearPort $ httpDefaultTLSPort $ httpBindRestrictions ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'domain' 'Country' 'Locality' 'organizationalUnit' 'Organization' ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.64.6.1.1 NAME 'Template' SUP Top STRUCTURAL MUST cn MAY ( trusteesOfNewObject $ newObjectSDSRights $ newObjectSFSRights $ setupScript $ runSetupScript $ membersOfTemplate $ volumeSpaceRestrictions $ setPasswordAfterCreate $ homeDirectoryRights $ accountBalance $ allowUnlimitedCredit $ description $ eMailAddress $ facsimileTelephoneNumber $ groupMembership $ higherPrivileges $ ndsHomeDirectory $ l $ Language $ loginAllowedTimeMap $ loginDisabled $ loginExpirationTime $ loginGraceLimit $ loginMaximumSimultaneous $ loginScript $ mailboxID $ mailboxLocation $ member $ messageServer $ minimumAccountBalance $ networkAddressRestriction $ newObjectSSelfRights $ ou $ passwordAllowChange $ passwordExpirationInterval $ passwordExpirationTime $ passwordMinimumLength $ passwordRequired $ passwordUniqueRequired $ physicalDeliveryOfficeName $ postalAddress $ postalCode $ postOfficeBox $ Profile $ st $ street $ securityEquals $ securityFlags $ seeAlso $ telephoneNumber $ title $ assistant $ assistantPhone $ city $ company $ co $ manager $ managerWorkforceID $ mailstop $ siteLocation $ employeeType $ costCenter $ costCenterDescription $ tollFreePhoneNumber $ departmentNumber ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'organizationalUnit' 'Organization' ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.8.6.1 NAME 'homeInfo' AUXILIARY MAY ( homeCity $ homeEmailAddress $ homeFax $ homePhone $ homeState $ homePostalAddress $ homeZipCode $ personalMobile $ spouse $ children ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.8.6.2 NAME 'contingentWorker' AUXILIARY MAY ( vendorName $ vendorAddress $ vendorPhoneNumber ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.1.6.1.45 NAME 'dynamicGroup' SUP ( groupOfNames $ ndsLoginProperties ) STRUCTURAL MAY ( memberQueryURL $ excludedMember $ dgIdentity $ dgAllowUnknown $ dgTimeOut $ dgAllowDuplicates $ userPassword ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.1.6.1.46 NAME 'dynamicGroupAux' SUP ( groupOfNames $ ndsLoginProperties ) AUXILIARY MAY ( memberQueryURL $ excludedMember $ dgIdentity $ dgAllowUnknown $ dgTimeOut $ dgAllowDuplicates $ userPassword ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.39.6.1.1 NAME 'sASSecurity' SUP Top STRUCTURAL MUST cn MAY ( nDSPKITreeCADN $ masvPolicyDN $ sASLoginPolicyDN $ sASLoginMethodContainerDN $ sasPostLoginMethodContainerDN $ nspmPolicyAgentContainerDN ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Top' 'treeRoot' 'Country' 'Organization' 'domain' ) X-NDS_NAME 'SAS:Security' )", + "( 2.16.840.1.113719.1.39.6.1.2 NAME 'sASService' SUP Resource STRUCTURAL MAY ( hostServer $ privateKey $ publicKey $ allowUnlimitedCredit $ fullName $ lastLoginTime $ lockedByIntruder $ loginAllowedTimeMap $ loginDisabled $ loginExpirationTime $ loginIntruderAddress $ loginIntruderAttempts $ loginIntruderResetTime $ loginMaximumSimultaneous $ loginTime $ networkAddress $ networkAddressRestriction $ notify $ operator $ owner $ path $ securityEquals $ securityFlags $ status $ Version $ nDSPKIKeyMaterialDN $ ndspkiKMOExport ) X-NDS_NAMING 'cn' X-NDS_NAME 'SAS:Service' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.48.6.1.1 NAME 'nDSPKICertificateAuthority' SUP Top STRUCTURAL MUST cn MAY ( hostServer $ nDSPKIPublicKey $ nDSPKIPrivateKey $ nDSPKIPublicKeyCertificate $ nDSPKICertificateChain $ nDSPKICertificateChainEC $ nDSPKIParentCA $ nDSPKIParentCADN $ nDSPKISubjectName $ nDSPKIPublicKeyEC $ nDSPKIPrivateKeyEC $ nDSPKIPublicKeyCertificateEC $ crossCertificatePairEC $ nDSPKISuiteBMode $ cACertificate $ cAECCertificate $ ndspkiCRLContainerDN $ ndspkiIssuedCertContainerDN $ ndspkiCRLConfigurationDNList $ ndspkiCRLECConfigurationDNList $ ndspkiSecurityRightsLevel $ ndspkiDefaultRSAKeySize $ ndspkiDefaultECCurve $ ndspkiDefaultCertificateLife ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'sASSecurity' X-NDS_NAME 'NDSPKI:Certificate Authority' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.48.6.1.2 NAME 'nDSPKIKeyMaterial' SUP Top STRUCTURAL MUST cn MAY ( hostServer $ nDSPKIKeyFile $ nDSPKIPrivateKey $ nDSPKIPublicKey $ nDSPKIPublicKeyCertificate $ nDSPKICertificateChain $ nDSPKISubjectName $ nDSPKIGivenName $ ndspkiAdditionalRoots $ nDSPKINotBefore $ nDSPKINotAfter ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'sASSecurity' 'Organization' 'organizationalUnit' 'domain' ) X-NDS_NAME 'NDSPKI:Key Material' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.48.6.1.3 NAME 'nDSPKITrustedRoot' SUP Top STRUCTURAL MUST cn MAY ndspkiTrustedRootList X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'sASSecurity' 'Organization' 'organizationalUnit' 'Country' 'Locality' 'domain' ) X-NDS_NAME 'NDSPKI:Trusted Root' )", + "( 2.16.840.1.113719.1.48.6.1.4 NAME 'nDSPKITrustedRootObject' SUP Top STRUCTURAL MUST ( cn $ nDSPKITrustedRootCertificate ) MAY ( nDSPKISubjectName $ nDSPKINotBefore $ nDSPKINotAfter $ externalName $ givenName $ sn ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'nDSPKITrustedRoot' X-NDS_NAME 'NDSPKI:Trusted Root Object' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.48.6.1.101 NAME 'nDSPKISDKeyAccessPartition' SUP Top STRUCTURAL MUST cn X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'sASSecurity' X-NDS_NAME 'NDSPKI:SD Key Access Partition' )", + "( 2.16.840.1.113719.1.48.6.1.102 NAME 'nDSPKISDKeyList' SUP Top STRUCTURAL MUST cn MAY ( nDSPKISDKeyServerDN $ nDSPKISDKeyStruct $ nDSPKISDKeyCert ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'nDSPKISDKeyAccessPartition' X-NDS_NAME 'NDSPKI:SD Key List' )", + "( 2.16.840.1.113719.1.31.6.2.1 NAME 'mASVSecurityPolicy' SUP Top STRUCTURAL MUST cn MAY ( description $ masvDomainPolicy $ masvPolicyUpdate $ masvClearanceNames $ masvLabelNames $ masvLabelSecrecyLevelNames $ masvLabelSecrecyCategoryNames $ masvLabelIntegrityLevelNames $ masvLabelIntegrityCategoryNames $ masvNDSAttributeLabels ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'sASSecurity' X-NDS_NAME 'MASV:Security Policy' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.39.42.2.0.1 NAME 'sASLoginMethodContainer' SUP Top STRUCTURAL MUST cn MAY description X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'sASSecurity' 'Country' 'Locality' 'organizationalUnit' 'Organization' ) X-NDS_NAME 'SAS:Login Method Container' )", + "( 2.16.840.1.113719.1.39.42.2.0.4 NAME 'sASLoginPolicy' SUP Top STRUCTURAL MUST cn MAY ( description $ privateKey $ publicKey $ sASAllowNDSPasswordWindow $ sASPolicyCredentials $ sASPolicyMethods $ sASPolicyObjectVersion $ sASPolicyServiceSubtypes $ sASPolicyServices $ sASPolicyUsers $ sASLoginSequence $ sASLoginPolicyUpdate $ sasNMASProductOptions $ sasPolicyMethods $ sasPolicyServices $ sasPolicyUsers $ sasAllowNDSPasswordWindow $ sasLoginFailureDelay $ sasDefaultLoginSequence $ sasAuthorizedLoginSequences $ sasAuditConfiguration $ sasUpdateLoginInfo $ sasOTPEnabled $ sasOTPLookAheadWindow $ sasOTPDigits $ sasUpdateLoginTimeInterval $ nspmPasswordPolicyDN ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'sASSecurity' X-NDS_NAME 'SAS:Login Policy' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.39.42.2.0.7 NAME 'sASNMASBaseLoginMethod' SUP Top ABSTRACT MUST cn MAY ( description $ sASLoginSecret $ sASLoginSecretKey $ sASEncryptionType $ sASLoginConfiguration $ sASLoginConfigurationKey $ sASMethodIdentifier $ sASMethodVendor $ sASVendorSupport $ sASAdvisoryMethodGrade $ sASLoginClientMethodNetWare $ sASLoginServerMethodNetWare $ sASLoginClientMethodWINNT $ sASLoginServerMethodWINNT $ sasCertificateSearchContainers $ sasNMASMethodConfigData $ sasMethodVersion $ sASLoginPolicyUpdate $ sasUnsignedMethodModules $ sasServerModuleName $ sasServerModuleEntryPointName $ sasSASLMechanismName $ sasSASLMechanismEntryPointName $ sasClientModuleName $ sasClientModuleEntryPointName $ sasLoginClientMethodSolaris $ sasLoginServerMethodSolaris $ sasLoginClientMethodLinux $ sasLoginServerMethodLinux $ sasLoginClientMethodTru64 $ sasLoginServerMethodTru64 $ sasLoginClientMethodAIX $ sasLoginServerMethodAIX $ sasLoginClientMethodHPUX $ sasLoginServerMethodHPUX $ sasLoginClientMethods390 $ sasLoginServerMethods390 $ sasLoginClientMethodLinuxX64 $ sasLoginServerMethodLinuxX64 $ sasLoginClientMethodWinX64 $ sasLoginServerMethodWinX64 $ sasLoginClientMethodSolaris64 $ sasLoginServerMethodSolaris64 $ sasLoginClientMethodSolarisi386 $ sasLoginServerMethodSolarisi386 $ sasLoginClientMethodAIX64 $ sasLoginServerMethodAIX64 ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'sASLoginMethodContainer' X-NDS_NAME 'SAS:NMAS Base Login Method' )", + "( 2.16.840.1.113719.1.39.42.2.0.8 NAME 'sASNMASLoginMethod' SUP sASNMASBaseLoginMethod STRUCTURAL X-NDS_NAME 'SAS:NMAS Login Method' )", + "( 2.16.840.1.113719.1.39.42.2.0.9 NAME 'rADIUSDialAccessSystem' SUP Top STRUCTURAL MUST cn MAY ( publicKey $ privateKey $ rADIUSAgedInterval $ rADIUSClient $ rADIUSCommonNameResolution $ rADIUSConcurrentLimit $ rADIUSDASVersion $ rADIUSEnableCommonNameLogin $ rADIUSEnableDialAccess $ rADIUSInterimAcctingTimeout $ rADIUSLookupContexts $ rADIUSMaxDASHistoryRecord $ rADIUSMaximumHistoryRecord $ rADIUSPasswordPolicy $ rADIUSPrivateKey $ rADIUSProxyContext $ rADIUSProxyDomain $ rADIUSProxyTarget $ rADIUSPublicKey $ sASLoginConfiguration $ sASLoginConfigurationKey ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Country' 'Locality' 'organizationalUnit' 'Organization' ) X-NDS_NAME 'RADIUS:Dial Access System' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.39.42.2.0.10 NAME 'rADIUSProfile' SUP Top STRUCTURAL MUST cn MAY rADIUSAttributeList X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Country' 'Locality' 'organizationalUnit' 'Organization' ) X-NDS_NAME 'RADIUS:Profile' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.39.42.2.0.11 NAME 'sasPostLoginMethodContainer' SUP Top STRUCTURAL MUST cn MAY description X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'sASSecurity' )", + "( 2.16.840.1.113719.1.39.42.2.0.12 NAME 'sasPostLoginMethod' SUP Top STRUCTURAL MUST cn MAY ( description $ sASLoginSecret $ sASLoginSecretKey $ sASEncryptionType $ sASLoginConfiguration $ sASLoginConfigurationKey $ sASMethodIdentifier $ sASMethodVendor $ sASVendorSupport $ sASAdvisoryMethodGrade $ sASLoginClientMethodNetWare $ sASLoginServerMethodNetWare $ sASLoginClientMethodWINNT $ sASLoginServerMethodWINNT $ sasMethodVersion $ sASLoginPolicyUpdate $ sasUnsignedMethodModules $ sasServerModuleName $ sasServerModuleEntryPointName $ sasSASLMechanismName $ sasSASLMechanismEntryPointName $ sasClientModuleName $ sasClientModuleEntryPointName $ sasLoginClientMethodSolaris $ sasLoginServerMethodSolaris $ sasLoginClientMethodLinux $ sasLoginServerMethodLinux $ sasLoginClientMethodTru64 $ sasLoginServerMethodTru64 $ sasLoginClientMethodAIX $ sasLoginServerMethodAIX $ sasLoginClientMethodHPUX $ sasLoginServerMethodHPUX $ sasLoginClientMethods390 $ sasLoginServerMethods390 $ sasLoginClientMethodLinuxX64 $ sasLoginServerMethodLinuxX64 $ sasLoginClientMethodWinX64 $ sasLoginServerMethodWinX64 $ sasLoginClientMethodSolaris64 $ sasLoginServerMethodSolaris64 $ sasLoginClientMethodSolarisi386 $ sasLoginServerMethodSolarisi386 $ sasLoginClientMethodAIX64 $ sasLoginServerMethodAIX64 ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'sasPostLoginMethodContainer' )", + "( 2.16.840.1.113719.1.6.6.1 NAME 'snmpGroup' SUP Top STRUCTURAL MUST cn MAY ( Version $ snmpServerList $ snmpTrapDisable $ snmpTrapInterval $ snmpTrapDescription $ snmpTrapConfig ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Country' 'Locality' 'domain' 'organizationalUnit' 'Organization' ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.39.43.6.2 NAME 'nspmPasswordPolicyContainer' SUP Top STRUCTURAL MUST cn MAY description X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'sASSecurity' 'Country' 'domain' 'Locality' 'Organization' 'organizationalUnit' ) )", + "( 2.16.840.1.113719.1.39.43.6.3 NAME 'nspmPolicyAgent' SUP Top STRUCTURAL MUST cn MAY ( description $ nspmPolicyAgentNetWare $ nspmPolicyAgentWINNT $ nspmPolicyAgentSolaris $ nspmPolicyAgentLinux $ nspmPolicyAgentAIX $ nspmPolicyAgentHPUX ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'nspmPasswordPolicyContainer' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.39.43.6.1 NAME 'nspmPasswordPolicy' SUP Top STRUCTURAL MUST cn MAY ( description $ nspmPolicyPrecedence $ nspmConfigurationOptions $ nspmChangePasswordMessage $ passwordExpirationInterval $ loginGraceLimit $ nspmMinPasswordLifetime $ passwordUniqueRequired $ nspmPasswordHistoryLimit $ nspmPasswordHistoryExpiration $ passwordAllowChange $ passwordRequired $ passwordMinimumLength $ nspmMaximumLength $ nspmCaseSensitive $ nspmMinUpperCaseCharacters $ nspmMaxUpperCaseCharacters $ nspmMinLowerCaseCharacters $ nspmMaxLowerCaseCharacters $ nspmNumericCharactersAllowed $ nspmNumericAsFirstCharacter $ nspmNumericAsLastCharacter $ nspmMinNumericCharacters $ nspmMaxNumericCharacters $ nspmSpecialCharactersAllowed $ nspmSpecialAsFirstCharacter $ nspmSpecialAsLastCharacter $ nspmMinSpecialCharacters $ nspmMaxSpecialCharacters $ nspmMaxRepeatedCharacters $ nspmMaxConsecutiveCharacters $ nspmMinUniqueCharacters $ nspmDisallowedAttributeValues $ nspmExcludeList $ nspmExtendedCharactersAllowed $ nspmExtendedAsFirstCharacter $ nspmExtendedAsLastCharacter $ nspmMinExtendedCharacters $ nspmMaxExtendedCharacters $ nspmUpperAsFirstCharacter $ nspmUpperAsLastCharacter $ nspmLowerAsFirstCharacter $ nspmLowerAsLastCharacter $ nspmComplexityRules $ nspmAD2K8Syntax $ nspmAD2K8maxViolation $ nspmXCharLimit $ nspmXCharHistoryLimit $ nspmUnicodeAllowed $ nspmNonAlphaCharactersAllowed $ nspmMinNonAlphaCharacters $ nspmMaxNonAlphaCharacters $ pwdInHistory $ nspmAdminsDoNotExpirePassword $ nspmPasswordACL $ nsimChallengeSetDN $ nsimForgottenAction $ nsimForgottenLoginConfig $ nsimAssignments $ nsimChallengeSetGUID $ nsimPwdRuleEnforcement ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'nspmPasswordPolicyContainer' 'domain' 'Locality' 'Organization' 'organizationalUnit' 'Country' ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.39.43.6.4 NAME 'nspmPasswordAux' AUXILIARY MAY ( publicKey $ privateKey $ loginGraceLimit $ loginGraceRemaining $ passwordExpirationTime $ passwordRequired $ nspmPasswordKey $ nspmPassword $ nspmDistributionPassword $ nspmPreviousDistributionPassword $ nspmPasswordHistory $ nspmAdministratorChangeCount $ nspmPasswordPolicyDN $ pwdChangedTime $ pwdAccountLockedTime $ pwdFailureTime $ nspmDoNotExpirePassword ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.12.6.1.0 NAME 'auditFileObject' SUP Top STRUCTURAL MUST ( cn $ auditPolicy $ auditContents ) MAY ( description $ auditPath $ auditLinkList $ auditType $ auditCurrentEncryptionKey $ auditAEncryptionKey $ auditBEncryptionKey ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Top' 'Country' 'Locality' 'Organization' 'organizationalUnit' 'treeRoot' 'domain' ) X-NDS_NAME 'Audit:File Object' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.38.6.1.4 NAME 'wANMANLANArea' SUP Top STRUCTURAL MUST cn MAY ( description $ l $ member $ o $ ou $ owner $ seeAlso $ wANMANWANPolicy $ wANMANCost $ wANMANDefaultCost ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Country' 'Locality' 'Organization' 'organizationalUnit' ) X-NDS_NAME 'WANMAN:LAN Area' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.135.6.37.1 NAME 'rbsCollection' SUP Top STRUCTURAL MUST cn MAY ( owner $ description $ rbsXMLInfo ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Country' 'Locality' 'organizationalUnit' 'Organization' 'domain' ) )", + "( 2.16.840.1.113719.1.135.6.30.1 NAME 'rbsExternalScope' SUP Top ABSTRACT MUST cn MAY ( rbsURL $ description $ rbsXMLInfo ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'rbsCollection' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.135.6.31.1 NAME 'rbsModule' SUP Top STRUCTURAL MUST cn MAY ( rbsURL $ rbsPath $ rbsType $ description $ rbsXMLInfo ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'rbsCollection' )", + "( 2.16.840.1.113719.1.135.6.32.1 NAME 'rbsRole' SUP Top STRUCTURAL MUST cn MAY ( rbsContent $ rbsMember $ rbsTrusteeOf $ rbsGALabel $ rbsParameters $ description $ rbsXMLInfo ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'rbsCollection' )", + "( 2.16.840.1.113719.1.135.6.33.1 NAME 'rbsTask' SUP Top STRUCTURAL MUST cn MAY ( rbsContentMembership $ rbsType $ rbsTaskRights $ rbsEntryPoint $ rbsParameters $ rbsTaskTemplates $ rbsTaskTemplatesURL $ description $ rbsXMLInfo ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'rbsModule' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.135.6.34.1 NAME 'rbsBook' SUP rbsTask STRUCTURAL MAY ( rbsTargetObjectType $ rbsPageMembership ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.135.6.35.1 NAME 'rbsScope' SUP groupOfNames STRUCTURAL MAY ( rbsContext $ rbsXMLInfo ) X-NDS_CONTAINMENT 'rbsRole' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.135.6.45.1 NAME 'rbsCollection2' SUP Top STRUCTURAL MUST cn MAY ( rbsXMLInfo $ rbsParameters $ owner $ description ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Country' 'Locality' 'organizationalUnit' 'Organization' 'domain' ) )", + "( 2.16.840.1.113719.1.135.6.38.1 NAME 'rbsExternalScope2' SUP Top ABSTRACT MUST cn MAY ( rbsXMLInfo $ description ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'rbsCollection2' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.135.6.39.1 NAME 'rbsModule2' SUP Top STRUCTURAL MUST cn MAY ( rbsXMLInfo $ rbsPath $ rbsType $ description ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'rbsCollection2' )", + "( 2.16.840.1.113719.1.135.6.40.1 NAME 'rbsRole2' SUP Top STRUCTURAL MUST cn MAY ( rbsXMLInfo $ rbsContent $ rbsMember $ rbsTrusteeOf $ rbsParameters $ description ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'rbsCollection2' )", + "( 2.16.840.1.113719.1.135.6.41.1 NAME 'rbsTask2' SUP Top STRUCTURAL MUST cn MAY ( rbsXMLInfo $ rbsContentMembership $ rbsType $ rbsTaskRights $ rbsEntryPoint $ rbsParameters $ description ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'rbsModule2' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.135.6.42.1 NAME 'rbsBook2' SUP rbsTask2 STRUCTURAL MAY ( rbsTargetObjectType $ rbsPageMembership ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.135.6.43.1 NAME 'rbsScope2' SUP groupOfNames STRUCTURAL MAY ( rbsContext $ rbsXMLInfo ) X-NDS_CONTAINMENT 'rbsRole2' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.1.6.1.49 NAME 'prSyncPolicy' SUP Top STRUCTURAL MUST cn MAY prSyncAttributes X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'domain' 'Country' 'Locality' 'organizationalUnit' 'Organization' ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.1.6.1.50 NAME 'encryptionPolicy' SUP Top STRUCTURAL MUST cn MAY ( attrEncryptionDefinition $ attrEncryptionRequiresSecure ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Country' 'Locality' 'domain' 'organizationalUnit' 'Organization' ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.48.6.1.5 NAME 'ndspkiContainer' SUP Top STRUCTURAL MUST cn X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'ndspkiContainer' 'sASSecurity' 'Organization' 'organizationalUnit' 'Country' 'Locality' 'nDSPKITrustedRoot' ) )", + "( 2.16.840.1.113719.1.48.6.1.6 NAME 'ndspkiCertificate' SUP Top STRUCTURAL MUST ( cn $ userCertificate ) MAY ( nDSPKISubjectName $ nDSPKINotBefore $ nDSPKINotAfter $ externalName $ givenName $ sn ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'sASSecurity' 'Organization' 'organizationalUnit' 'Country' 'Locality' 'ndspkiContainer' 'nDSPKITrustedRoot' ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.48.6.1.7 NAME 'ndspkiCRLConfiguration' SUP Top STRUCTURAL MUST cn MAY ( ndspkiCRLFileName $ ndspkiDirectory $ ndspkiStatus $ ndspkiIssueTime $ ndspkiNextIssueTime $ ndspkiAttemptTime $ ndspkiTimeInterval $ ndspkiCRLMaxProcessingInterval $ ndspkiCRLNumber $ ndspkiDistributionPoints $ ndspkiDistributionPointDN $ ndspkiCADN $ ndspkiCRLProcessData $ nDSPKIPublicKey $ nDSPKIPrivateKey $ nDSPKIPublicKeyCertificate $ nDSPKICertificateChain $ nDSPKIParentCA $ nDSPKIParentCADN $ nDSPKISubjectName $ cACertificate $ hostServer $ ndspkiCRLType $ ndspkiCRLExtendValidity ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'ndspkiContainer' )", + "( 2.5.6.19 NAME 'cRLDistributionPoint' SUP Top STRUCTURAL MUST cn MAY ( authorityRevocationList $ authorityRevocationList $ cACertificate $ certificateRevocationList $ certificateRevocationList $ crossCertificatePair $ deltaRevocationList $ deltaRevocationList $ ndspkiCRLConfigurationDN ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'Country' 'Locality' 'organizationalUnit' 'Organization' 'sASSecurity' 'domain' 'ndspkiCRLConfiguration' ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.7.6.1 NAME 'notfTemplateCollection' SUP Top STRUCTURAL MUST cn MAY ( notfSMTPEmailHost $ notfSMTPEmailFrom $ notfSMTPEmailUserName $ sASSecretStore ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'sASSecurity' )", + "( 2.16.840.1.113719.1.7.6.2 NAME 'notfMergeTemplate' SUP Top STRUCTURAL MUST cn MAY ( notfMergeTemplateData $ notfMergeTemplateSubject ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'notfTemplateCollection' X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.39.44.6.1 NAME 'nsimChallengeSet' SUP Top STRUCTURAL MUST cn MAY ( description $ nsimRequiredQuestions $ nsimRandomQuestions $ nsimNumberRandomQuestions $ nsimMinResponseLength $ nsimMaxResponseLength ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'nspmPasswordPolicyContainer' 'Country' 'domain' 'Locality' 'Organization' 'organizationalUnit' ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.266.6.1 NAME 'sssServerPolicies' SUP Top STRUCTURAL MUST cn MAY ( sssCacheRefreshInterval $ sssEnableReadTimestamps $ sssDisableMasterPasswords $ sssEnableAdminAccess $ sssAdminList $ sssAdminGALabel $ sssReadSecretPolicies ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT 'sASSecurity' )", + "( 2.16.840.1.113719.1.266.6.2 NAME 'sssServerPolicyOverride' SUP Top STRUCTURAL MUST cn MAY ( sssCacheRefreshInterval $ sssEnableReadTimestamps $ sssDisableMasterPasswords $ sssEnableAdminAccess $ sssAdminList $ sssAdminGALabel $ sssReadSecretPolicies ) X-NDS_NAMING 'cn' X-NDS_CONTAINMENT ( 'sssServerPolicies' 'Organization' 'organizationalUnit' 'Country' 'Locality' 'domain' ) X-NDS_NOT_CONTAINER '1' )", + "( 2.16.840.1.113719.1.1.6.1.91 NAME 'nestedGroupAux' AUXILIARY MAY ( groupMember $ excludedMember $ nestedConfig $ groupMembership ) X-NDS_NOT_CONTAINER '1' )" + ] + }, + "schema_entry": "cn=schema", + "type": "SchemaInfo" +} +""" + +edir_9_1_4_dsa_info = """ +{ + "raw": { + "abandonOps": [ + "0" + ], + "addEntryOps": [ + "0" + ], + "altServer": [], + "bindSecurityErrors": [ + "0" + ], + "chainings": [ + "0" + ], + "compareOps": [ + "0" + ], + "directoryTreeName": [ + "TEST_TREE" + ], + "dsaName": [ + "cn=MYSERVER,o=resources" + ], + "errors": [ + "0" + ], + "extendedOps": [ + "0" + ], + "inBytes": [ + "293" + ], + "inOps": [ + "3" + ], + "listOps": [ + "0" + ], + "modifyEntryOps": [ + "0" + ], + "modifyRDNOps": [ + "0" + ], + "namingContexts": [ + "" + ], + "oneLevelSearchOps": [ + "0" + ], + "outBytes": [ + "14" + ], + "readOps": [ + "1" + ], + "referralsReturned": [ + "0" + ], + "removeEntryOps": [ + "0" + ], + "repUpdatesIn": [ + "0" + ], + "repUpdatesOut": [ + "0" + ], + "searchOps": [ + "1" + ], + "securityErrors": [ + "0" + ], + "simpleAuthBinds": [ + "1" + ], + "strongAuthBinds": [ + "0" + ], + "subschemaSubentry": [ + "cn=schema" + ], + "supportedCapabilities": [], + "supportedControl": [ + "2.16.840.1.113719.1.27.101.6", + "2.16.840.1.113719.1.27.101.5", + "1.2.840.113556.1.4.319", + "2.16.840.1.113730.3.4.3", + "2.16.840.1.113730.3.4.2", + "2.16.840.1.113719.1.27.101.57", + "2.16.840.1.113719.1.27.103.7", + "2.16.840.1.113719.1.27.101.40", + "2.16.840.1.113719.1.27.101.41", + "1.2.840.113556.1.4.1413", + "1.2.840.113556.1.4.805", + "2.16.840.1.113730.3.4.18", + "1.2.840.113556.1.4.529" + ], + "supportedExtension": [ + "2.16.840.1.113719.1.148.100.1", + "2.16.840.1.113719.1.148.100.3", + "2.16.840.1.113719.1.148.100.5", + "2.16.840.1.113719.1.148.100.7", + "2.16.840.1.113719.1.148.100.9", + "2.16.840.1.113719.1.148.100.11", + "2.16.840.1.113719.1.148.100.13", + "2.16.840.1.113719.1.148.100.15", + "2.16.840.1.113719.1.148.100.17", + "2.16.840.1.113719.1.39.42.100.1", + "2.16.840.1.113719.1.39.42.100.3", + "2.16.840.1.113719.1.39.42.100.5", + "2.16.840.1.113719.1.39.42.100.7", + "2.16.840.1.113719.1.39.42.100.9", + "2.16.840.1.113719.1.39.42.100.11", + "2.16.840.1.113719.1.39.42.100.13", + "2.16.840.1.113719.1.39.42.100.15", + "2.16.840.1.113719.1.39.42.100.17", + "2.16.840.1.113719.1.39.42.100.19", + "2.16.840.1.113719.1.39.42.100.21", + "2.16.840.1.113719.1.39.42.100.23", + "2.16.840.1.113719.1.39.42.100.25", + "2.16.840.1.113719.1.39.42.100.27", + "2.16.840.1.113719.1.39.42.100.29", + "1.3.6.1.4.1.4203.1.11.1", + "2.16.840.1.113719.1.27.100.1", + "2.16.840.1.113719.1.27.100.3", + "2.16.840.1.113719.1.27.100.5", + "2.16.840.1.113719.1.27.100.7", + "2.16.840.1.113719.1.27.100.11", + "2.16.840.1.113719.1.27.100.13", + "2.16.840.1.113719.1.27.100.15", + "2.16.840.1.113719.1.27.100.17", + "2.16.840.1.113719.1.27.100.19", + "2.16.840.1.113719.1.27.100.21", + "2.16.840.1.113719.1.27.100.23", + "2.16.840.1.113719.1.27.100.25", + "2.16.840.1.113719.1.27.100.27", + "2.16.840.1.113719.1.27.100.29", + "2.16.840.1.113719.1.27.100.31", + "2.16.840.1.113719.1.27.100.33", + "2.16.840.1.113719.1.27.100.35", + "2.16.840.1.113719.1.27.100.37", + "2.16.840.1.113719.1.27.100.39", + "2.16.840.1.113719.1.27.100.41", + "2.16.840.1.113719.1.27.100.96", + "2.16.840.1.113719.1.27.100.98", + "2.16.840.1.113719.1.27.100.101", + "2.16.840.1.113719.1.27.100.103", + "2.16.840.1.113719.1.142.100.1", + "2.16.840.1.113719.1.142.100.4", + "2.16.840.1.113719.1.142.100.6", + "2.16.840.1.113719.1.27.100.9", + "2.16.840.1.113719.1.27.100.43", + "2.16.840.1.113719.1.27.100.45", + "2.16.840.1.113719.1.27.100.47", + "2.16.840.1.113719.1.27.100.49", + "2.16.840.1.113719.1.27.100.51", + "2.16.840.1.113719.1.27.100.53", + "2.16.840.1.113719.1.27.100.55", + "1.3.6.1.4.1.1466.20037", + "2.16.840.1.113719.1.27.100.79", + "2.16.840.1.113719.1.27.100.84", + "2.16.840.1.113719.1.27.103.1", + "2.16.840.1.113719.1.27.103.2" + ], + "supportedFeatures": [ + "1.3.6.1.4.1.4203.1.5.1", + "2.16.840.1.113719.1.27.99.1" + ], + "supportedGroupingTypes": [ + "2.16.840.1.113719.1.27.103.8" + ], + "supportedLDAPVersion": [ + "2", + "3" + ], + "supportedSASLMechanisms": [ + "NMAS_LOGIN" + ], + "unAuthBinds": [ + "0" + ], + "vendorName": [ + "NetIQ Corporation" + ], + "vendorVersion": [ + "LDAP Agent for NetIQ eDirectory 9.1.4 (40105.09)" + ], + "wholeSubtreeSearchOps": [ + "0" + ] + }, + "type": "DsaInfo" +} +""" \ No newline at end of file diff --git a/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/slapd24.py b/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/slapd24.py index 30e1795..23b86e8 100644 --- a/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/slapd24.py +++ b/server/www/packages/packages-linux/x64/ldap3/protocol/schemas/slapd24.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # diff --git a/server/www/packages/packages-linux/x64/ldap3/strategy/asyncStream.py b/server/www/packages/packages-linux/x64/ldap3/strategy/asyncStream.py index 7977d7e..cfd61c8 100644 --- a/server/www/packages/packages-linux/x64/ldap3/strategy/asyncStream.py +++ b/server/www/packages/packages-linux/x64/ldap3/strategy/asyncStream.py @@ -55,7 +55,11 @@ class AsyncStreamStrategy(AsyncStrategy): self.persistent_search_message_id = None self.streaming = False self.callback = None - self.events = Queue() + if ldap_connection.pool_size: + self.events = Queue(ldap_connection.pool_size) + else: + self.events = Queue() + del self._requests # remove _requests dict from Async Strategy def _start_listen(self): @@ -77,7 +81,6 @@ class AsyncStreamStrategy(AsyncStrategy): if not self._header_added and self.stream.tell() == 0: header = add_ldif_header(['-'])[0] self.stream.write(prepare_for_stream(header + self.line_separator + self.line_separator)) - ldif_lines = persistent_search_response_to_ldif(change) if self.stream and ldif_lines and not self.connection.closed: fragment = self.line_separator.join(ldif_lines) diff --git a/server/www/packages/packages-linux/x64/ldap3/strategy/asynchronous.py b/server/www/packages/packages-linux/x64/ldap3/strategy/asynchronous.py index 8ac79ee..f4da2b4 100644 --- a/server/www/packages/packages-linux/x64/ldap3/strategy/asynchronous.py +++ b/server/www/packages/packages-linux/x64/ldap3/strategy/asynchronous.py @@ -59,7 +59,7 @@ class AsyncStrategy(BaseStrategy): def run(self): """ - Wait for data on socket, compute the length of the message and wait for enough bytes to decode the message + Waits for data on socket, computes the length of the message and waits for enough bytes to decode the message Message are appended to strategy._responses """ unprocessed = b'' diff --git a/server/www/packages/packages-linux/x64/ldap3/strategy/base.py b/server/www/packages/packages-linux/x64/ldap3/strategy/base.py index 0506703..03ce56e 100644 --- a/server/www/packages/packages-linux/x64/ldap3/strategy/base.py +++ b/server/www/packages/packages-linux/x64/ldap3/strategy/base.py @@ -28,7 +28,6 @@ from struct import pack from platform import system from time import sleep from random import choice -from datetime import datetime from .. import SYNC, ANONYMOUS, get_config_parameter, BASE, ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES, NO_ATTRIBUTES from ..core.results import DO_NOT_RAISE_EXCEPTIONS, RESULT_REFERRAL @@ -457,7 +456,7 @@ class BaseStrategy(object): """ message_type = ldap_message.getComponentByName('protocolOp').getName() component = ldap_message['protocolOp'].getComponent() - controls = ldap_message['controls'] + controls = ldap_message['controls'] if ldap_message['controls'].hasValue() else None if message_type == 'bindResponse': if not bytes(component['matchedDN']).startswith(b'NTLM'): # patch for microsoft ntlm authentication result = bind_response_to_dict(component) @@ -577,12 +576,12 @@ class BaseStrategy(object): return control_type, {'description': Oids.get(control_type, ''), 'criticality': criticality, 'value': control_value} @staticmethod - def decode_control_fast(control): + def decode_control_fast(control, from_server=True): """ decode control, return a 2-element tuple where the first element is the control oid and the second element is a dictionary with description (from Oids), criticality and decoded control value """ - control_type = str(to_unicode(control[0][3], from_server=True)) + control_type = str(to_unicode(control[0][3], from_server=from_server)) criticality = False control_value = None for r in control[1:]: @@ -701,13 +700,17 @@ class BaseStrategy(object): resp['attributes'][attr_type] = list() self.do_next_range_search(request, resp, attr_name) return True - def do_operation_on_referral(self, request, referrals): - if log_enabled(PROTOCOL): - log(PROTOCOL, 'following referral for <%s>', self.connection) + + def create_referral_connection(self, referrals): + referral_connection = None + selected_referral = None + cachekey = None valid_referral_list = self.valid_referral_list(referrals) if valid_referral_list: - preferred_referral_list = [referral for referral in valid_referral_list if referral['ssl'] == self.connection.server.ssl] - selected_referral = choice(preferred_referral_list) if preferred_referral_list else choice(valid_referral_list) + preferred_referral_list = [referral for referral in valid_referral_list if + referral['ssl'] == self.connection.server.ssl] + selected_referral = choice(preferred_referral_list) if preferred_referral_list else choice( + valid_referral_list) cachekey = (selected_referral['host'], selected_referral['port'] or self.connection.server.port, selected_referral['ssl']) if self.connection.use_referral_cache and cachekey in self.referral_cache: @@ -725,7 +728,8 @@ class BaseStrategy(object): local_certificate_file=self.connection.server.tls.certificate_file, validate=self.connection.server.tls.validate, version=self.connection.server.tls.version, - ca_certs_file=self.connection.server.tls.ca_certs_file) if selected_referral['ssl'] else None) + ca_certs_file=self.connection.server.tls.ca_certs_file) if + selected_referral['ssl'] else None) from ..core.connection import Connection @@ -758,6 +762,13 @@ class BaseStrategy(object): if self.connection.usage: self.connection._usage.referrals_followed += 1 + return selected_referral, referral_connection, cachekey + + def do_operation_on_referral(self, request, referrals): + if log_enabled(PROTOCOL): + log(PROTOCOL, 'following referral for <%s>', self.connection) + selected_referral, referral_connection, cachekey = self.create_referral_connection(referrals) + if selected_referral: if request['type'] == 'searchRequest': referral_connection.search(selected_referral['base'] or request['base'], selected_referral['filter'] or request['filter'], diff --git a/server/www/packages/packages-linux/x64/ldap3/strategy/ldifProducer.py b/server/www/packages/packages-linux/x64/ldap3/strategy/ldifProducer.py index 119e172..dfdde1b 100644 --- a/server/www/packages/packages-linux/x64/ldap3/strategy/ldifProducer.py +++ b/server/www/packages/packages-linux/x64/ldap3/strategy/ldifProducer.py @@ -101,6 +101,8 @@ class LdifProducerStrategy(BaseStrategy): self.connection.request = BaseStrategy.decode_request(message_type, request, controls) self.connection.request['controls'] = controls + if self._outstanding is None: + self._outstanding = dict() self._outstanding[message_id] = self.connection.request return message_id diff --git a/server/www/packages/packages-linux/x64/ldap3/strategy/mockBase.py b/server/www/packages/packages-linux/x64/ldap3/strategy/mockBase.py index f07c7c2..c933a59 100644 --- a/server/www/packages/packages-linux/x64/ldap3/strategy/mockBase.py +++ b/server/www/packages/packages-linux/x64/ldap3/strategy/mockBase.py @@ -724,12 +724,12 @@ class MockBaseStrategy(object): if extension[0] == '2.16.840.1.113719.1.27.100.31': # getBindDNRequest [NOVELL] result_code = 0 message = '' - response_name = '2.16.840.1.113719.1.27.100.32' # getBindDNResponse [NOVELL] + response_name = OctetString('2.16.840.1.113719.1.27.100.32') # getBindDNResponse [NOVELL] response_value = OctetString(self.bound) elif extension[0] == '1.3.6.1.4.1.4203.1.11.3': # WhoAmI [RFC4532] result_code = 0 message = '' - response_name = '1.3.6.1.4.1.4203.1.11.3' # WhoAmI [RFC4532] + response_name = OctetString('1.3.6.1.4.1.4203.1.11.3') # WhoAmI [RFC4532] response_value = OctetString(self.bound) break @@ -845,7 +845,6 @@ class MockBaseStrategy(object): attr_name = node.assertion['attr'] attr_value = node.assertion['value'] for candidate in candidates: - # if attr_name in self.connection.server.dit[candidate] and attr_value in self.connection.server.dit[candidate][attr_name]: if attr_name in self.connection.server.dit[candidate] and self.equal(candidate, attr_name, attr_value): node.matched.add(candidate) else: diff --git a/server/www/packages/packages-linux/x64/ldap3/strategy/mockSync.py b/server/www/packages/packages-linux/x64/ldap3/strategy/mockSync.py index b155781..4de6381 100644 --- a/server/www/packages/packages-linux/x64/ldap3/strategy/mockSync.py +++ b/server/www/packages/packages-linux/x64/ldap3/strategy/mockSync.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # diff --git a/server/www/packages/packages-linux/x64/ldap3/strategy/restartable.py b/server/www/packages/packages-linux/x64/ldap3/strategy/restartable.py index 77ef4cd..3ffaac6 100644 --- a/server/www/packages/packages-linux/x64/ldap3/strategy/restartable.py +++ b/server/www/packages/packages-linux/x64/ldap3/strategy/restartable.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # diff --git a/server/www/packages/packages-linux/x64/ldap3/strategy/reusable.py b/server/www/packages/packages-linux/x64/ldap3/strategy/reusable.py index d70c4d9..8d213c4 100644 --- a/server/www/packages/packages-linux/x64/ldap3/strategy/reusable.py +++ b/server/www/packages/packages-linux/x64/ldap3/strategy/reusable.py @@ -1,493 +1,495 @@ -""" -""" - -# Created on 2014.03.23 -# -# Author: Giovanni Cannata -# -# Copyright 2014 - 2018 Giovanni Cannata -# -# This file is part of ldap3. -# -# ldap3 is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ldap3 is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with ldap3 in the COPYING and COPYING.LESSER files. -# If not, see . - -from datetime import datetime -from os import linesep -from threading import Thread, Lock -from time import sleep - -from .. import RESTARTABLE, get_config_parameter, AUTO_BIND_DEFAULT, AUTO_BIND_NONE, AUTO_BIND_NO_TLS, AUTO_BIND_TLS_AFTER_BIND, AUTO_BIND_TLS_BEFORE_BIND -from .base import BaseStrategy -from ..core.usage import ConnectionUsage -from ..core.exceptions import LDAPConnectionPoolNameIsMandatoryError, LDAPConnectionPoolNotStartedError, LDAPOperationResult, LDAPExceptionError, LDAPResponseTimeoutError -from ..utils.log import log, log_enabled, ERROR, BASIC -from ..protocol.rfc4511 import LDAP_MAX_INT - -TERMINATE_REUSABLE = 'TERMINATE_REUSABLE_CONNECTION' - -BOGUS_BIND = -1 -BOGUS_UNBIND = -2 -BOGUS_EXTENDED = -3 -BOGUS_ABANDON = -4 - -try: - from queue import Queue, Empty -except ImportError: # Python 2 - # noinspection PyUnresolvedReferences - from Queue import Queue, Empty - - -# noinspection PyProtectedMember -class ReusableStrategy(BaseStrategy): - """ - A pool of reusable SyncWaitRestartable connections with lazy behaviour and limited lifetime. - The connection using this strategy presents itself as a normal connection, but internally the strategy has a pool of - connections that can be used as needed. Each connection lives in its own thread and has a busy/available status. - The strategy performs the requested operation on the first available connection. - The pool of connections is instantiated at strategy initialization. - Strategy has two customizable properties, the total number of connections in the pool and the lifetime of each connection. - When lifetime is expired the connection is closed and will be open again when needed. - """ - pools = dict() - - def receiving(self): - raise NotImplementedError - - def _start_listen(self): - raise NotImplementedError - - def _get_response(self, message_id): - raise NotImplementedError - - def get_stream(self): - raise NotImplementedError - - def set_stream(self, value): - raise NotImplementedError - - # noinspection PyProtectedMember - class ConnectionPool(object): - """ - Container for the Connection Threads - """ - def __new__(cls, connection): - if connection.pool_name in ReusableStrategy.pools: # returns existing connection pool - pool = ReusableStrategy.pools[connection.pool_name] - if not pool.started: # if pool is not started remove it from the pools singleton and create a new onw - del ReusableStrategy.pools[connection.pool_name] - return object.__new__(cls) - if connection.pool_keepalive and pool.keepalive != connection.pool_keepalive: # change lifetime - pool.keepalive = connection.pool_keepalive - if connection.pool_lifetime and pool.lifetime != connection.pool_lifetime: # change keepalive - pool.lifetime = connection.pool_lifetime - if connection.pool_size and pool.pool_size != connection.pool_size: # if pool size has changed terminate and recreate the connections - pool.terminate_pool() - pool.pool_size = connection.pool_size - return pool - else: - return object.__new__(cls) - - def __init__(self, connection): - if not hasattr(self, 'workers'): - self.name = connection.pool_name - self.master_connection = connection - self.workers = [] - self.pool_size = connection.pool_size or get_config_parameter('REUSABLE_THREADED_POOL_SIZE') - self.lifetime = connection.pool_lifetime or get_config_parameter('REUSABLE_THREADED_LIFETIME') - self.keepalive = connection.pool_keepalive - self.request_queue = Queue() - self.open_pool = False - self.bind_pool = False - self.tls_pool = False - self._incoming = dict() - self.counter = 0 - self.terminated_usage = ConnectionUsage() if connection._usage else None - self.terminated = False - self.pool_lock = Lock() - ReusableStrategy.pools[self.name] = self - self.started = False - if log_enabled(BASIC): - log(BASIC, 'instantiated ConnectionPool: <%r>', self) - - def __str__(self): - s = 'POOL: ' + str(self.name) + ' - status: ' + ('started' if self.started else 'terminated') - s += ' - responses in queue: ' + str(len(self._incoming)) - s += ' - pool size: ' + str(self.pool_size) - s += ' - lifetime: ' + str(self.lifetime) - s += ' - keepalive: ' + str(self.keepalive) - s += ' - open: ' + str(self.open_pool) - s += ' - bind: ' + str(self.bind_pool) - s += ' - tls: ' + str(self.tls_pool) + linesep - s += 'MASTER CONN: ' + str(self.master_connection) + linesep - s += 'WORKERS:' - if self.workers: - for i, worker in enumerate(self.workers): - s += linesep + str(i).rjust(5) + ': ' + str(worker) - else: - s += linesep + ' no active workers in pool' - - return s - - def __repr__(self): - return self.__str__() - - def get_info_from_server(self): - for worker in self.workers: - with worker.worker_lock: - if not worker.connection.server.schema or not worker.connection.server.info: - worker.get_info_from_server = True - else: - worker.get_info_from_server = False - - def rebind_pool(self): - for worker in self.workers: - with worker.worker_lock: - worker.connection.rebind(self.master_connection.user, - self.master_connection.password, - self.master_connection.authentication, - self.master_connection.sasl_mechanism, - self.master_connection.sasl_credentials) - - def start_pool(self): - if not self.started: - self.create_pool() - for worker in self.workers: - with worker.worker_lock: - worker.thread.start() - self.started = True - self.terminated = False - if log_enabled(BASIC): - log(BASIC, 'worker started for pool <%s>', self) - return True - return False - - def create_pool(self): - if log_enabled(BASIC): - log(BASIC, 'created pool <%s>', self) - self.workers = [ReusableStrategy.PooledConnectionWorker(self.master_connection, self.request_queue) for _ in range(self.pool_size)] - - def terminate_pool(self): - if not self.terminated: - if log_enabled(BASIC): - log(BASIC, 'terminating pool <%s>', self) - self.started = False - self.request_queue.join() # waits for all queue pending operations - for _ in range(len([worker for worker in self.workers if worker.thread.is_alive()])): # put a TERMINATE signal on the queue for each active thread - self.request_queue.put((TERMINATE_REUSABLE, None, None, None)) - self.request_queue.join() # waits for all queue terminate operations - self.terminated = True - if log_enabled(BASIC): - log(BASIC, 'pool terminated for <%s>', self) - - class PooledConnectionThread(Thread): - """ - The thread that holds the Reusable connection and receive operation request via the queue - Result are sent back in the pool._incoming list when ready - """ - def __init__(self, worker, master_connection): - Thread.__init__(self) - self.daemon = True - self.worker = worker - self.master_connection = master_connection - if log_enabled(BASIC): - log(BASIC, 'instantiated PooledConnectionThread: <%r>', self) - - # noinspection PyProtectedMember - def run(self): - self.worker.running = True - terminate = False - pool = self.master_connection.strategy.pool - while not terminate: - try: - counter, message_type, request, controls = pool.request_queue.get(block=True, timeout=self.master_connection.strategy.pool.keepalive) - except Empty: # issue an Abandon(0) operation to keep the connection live - Abandon(0) is a harmless operation - if not self.worker.connection.closed: - self.worker.connection.abandon(0) - continue - - with self.worker.worker_lock: - self.worker.busy = True - if counter == TERMINATE_REUSABLE: - terminate = True - if self.worker.connection.bound: - try: - self.worker.connection.unbind() - if log_enabled(BASIC): - log(BASIC, 'thread terminated') - except LDAPExceptionError: - pass - else: - if (datetime.now() - self.worker.creation_time).seconds >= self.master_connection.strategy.pool.lifetime: # destroy and create a new connection - try: - self.worker.connection.unbind() - except LDAPExceptionError: - pass - self.worker.new_connection() - if log_enabled(BASIC): - log(BASIC, 'thread respawn') - if message_type not in ['bindRequest', 'unbindRequest']: - try: - if pool.open_pool and self.worker.connection.closed: - self.worker.connection.open(read_server_info=False) - if pool.tls_pool and not self.worker.connection.tls_started: - self.worker.connection.start_tls(read_server_info=False) - if pool.bind_pool and not self.worker.connection.bound: - self.worker.connection.bind(read_server_info=False) - elif pool.open_pool and not self.worker.connection.closed: # connection already open, issues a start_tls - if pool.tls_pool and not self.worker.connection.tls_started: - self.worker.connection.start_tls(read_server_info=False) - if self.worker.get_info_from_server and counter: - self.worker.connection._fire_deferred() - self.worker.get_info_from_server = False - response = None - result = None - if message_type == 'searchRequest': - response = self.worker.connection.post_send_search(self.worker.connection.send(message_type, request, controls)) - else: - response = self.worker.connection.post_send_single_response(self.worker.connection.send(message_type, request, controls)) - result = self.worker.connection.result - with pool.pool_lock: - pool._incoming[counter] = (response, result, BaseStrategy.decode_request(message_type, request, controls)) - except LDAPOperationResult as e: # raise_exceptions has raised an exception. It must be redirected to the original connection thread - with pool.pool_lock: - pool._incoming[counter] = (type(e)(str(e)), None, None) - # except LDAPOperationResult as e: # raise_exceptions has raised an exception. It must be redirected to the original connection thread - # exc = e - # with pool.pool_lock: - # if exc: - # pool._incoming[counter] = (exc, None, None) - # else: - # pool._incoming[counter] = (response, result, BaseStrategy.decode_request(message_type, request, controls)) - - self.worker.busy = False - pool.request_queue.task_done() - self.worker.task_counter += 1 - if log_enabled(BASIC): - log(BASIC, 'thread terminated') - if self.master_connection.usage: - pool.terminated_usage += self.worker.connection.usage - self.worker.running = False - - class PooledConnectionWorker(object): - """ - Container for the restartable connection. it includes a thread and a lock to execute the connection in the pool - """ - def __init__(self, connection, request_queue): - self.master_connection = connection - self.request_queue = request_queue - self.running = False - self.busy = False - self.get_info_from_server = False - self.connection = None - self.creation_time = None - self.task_counter = 0 - self.new_connection() - self.thread = ReusableStrategy.PooledConnectionThread(self, self.master_connection) - self.worker_lock = Lock() - if log_enabled(BASIC): - log(BASIC, 'instantiated PooledConnectionWorker: <%s>', self) - - def __str__(self): - s = 'CONN: ' + str(self.connection) + linesep + ' THREAD: ' - s += 'running' if self.running else 'halted' - s += ' - ' + ('busy' if self.busy else 'available') - s += ' - ' + ('created at: ' + self.creation_time.isoformat()) - s += ' - time to live: ' + str(self.master_connection.strategy.pool.lifetime - (datetime.now() - self.creation_time).seconds) - s += ' - requests served: ' + str(self.task_counter) - - return s - - def new_connection(self): - from ..core.connection import Connection - # noinspection PyProtectedMember - self.creation_time = datetime.now() - self.connection = Connection(server=self.master_connection.server_pool if self.master_connection.server_pool else self.master_connection.server, - user=self.master_connection.user, - password=self.master_connection.password, - auto_bind=AUTO_BIND_NONE, # do not perform auto_bind because it reads again the schema - version=self.master_connection.version, - authentication=self.master_connection.authentication, - client_strategy=RESTARTABLE, - auto_referrals=self.master_connection.auto_referrals, - auto_range=self.master_connection.auto_range, - sasl_mechanism=self.master_connection.sasl_mechanism, - sasl_credentials=self.master_connection.sasl_credentials, - check_names=self.master_connection.check_names, - collect_usage=self.master_connection._usage, - read_only=self.master_connection.read_only, - raise_exceptions=self.master_connection.raise_exceptions, - lazy=False, - fast_decoder=self.master_connection.fast_decoder, - receive_timeout=self.master_connection.receive_timeout, - return_empty_attributes=self.master_connection.empty_attributes) - - # simulates auto_bind, always with read_server_info=False - if self.master_connection.auto_bind and self.master_connection.auto_bind not in [AUTO_BIND_NONE, AUTO_BIND_DEFAULT]: - if log_enabled(BASIC): - log(BASIC, 'performing automatic bind for <%s>', self.connection) - self.connection.open(read_server_info=False) - if self.master_connection.auto_bind == AUTO_BIND_NO_TLS: - self.connection.bind(read_server_info=False) - elif self.master_connection.auto_bind == AUTO_BIND_TLS_BEFORE_BIND: - self.connection.start_tls(read_server_info=False) - self.connection.bind(read_server_info=False) - elif self.master_connection.auto_bind == AUTO_BIND_TLS_AFTER_BIND: - self.connection.bind(read_server_info=False) - self.connection.start_tls(read_server_info=False) - - if self.master_connection.server_pool: - self.connection.server_pool = self.master_connection.server_pool - self.connection.server_pool.initialize(self.connection) - - # ReusableStrategy methods - def __init__(self, ldap_connection): - BaseStrategy.__init__(self, ldap_connection) - self.sync = False - self.no_real_dsa = False - self.pooled = True - self.can_stream = False - if hasattr(ldap_connection, 'pool_name') and ldap_connection.pool_name: - self.pool = ReusableStrategy.ConnectionPool(ldap_connection) - else: - if log_enabled(ERROR): - log(ERROR, 'reusable connection must have a pool_name') - raise LDAPConnectionPoolNameIsMandatoryError('reusable connection must have a pool_name') - - def open(self, reset_usage=True, read_server_info=True): - # read_server_info not used - self.pool.open_pool = True - self.pool.start_pool() - self.connection.closed = False - if self.connection.usage: - if reset_usage or not self.connection._usage.initial_connection_start_time: - self.connection._usage.start() - - def terminate(self): - self.pool.terminate_pool() - self.pool.open_pool = False - self.connection.bound = False - self.connection.closed = True - self.pool.bind_pool = False - self.pool.tls_pool = False - - def _close_socket(self): - """ - Doesn't really close the socket - """ - self.connection.closed = True - - if self.connection.usage: - self.connection._usage.closed_sockets += 1 - - def send(self, message_type, request, controls=None): - if self.pool.started: - if message_type == 'bindRequest': - self.pool.bind_pool = True - counter = BOGUS_BIND - elif message_type == 'unbindRequest': - self.pool.bind_pool = False - counter = BOGUS_UNBIND - elif message_type == 'abandonRequest': - counter = BOGUS_ABANDON - elif message_type == 'extendedReq' and self.connection.starting_tls: - self.pool.tls_pool = True - counter = BOGUS_EXTENDED - else: - with self.pool.pool_lock: - self.pool.counter += 1 - if self.pool.counter > LDAP_MAX_INT: - self.pool.counter = 1 - counter = self.pool.counter - self.pool.request_queue.put((counter, message_type, request, controls)) - return counter - if log_enabled(ERROR): - log(ERROR, 'reusable connection pool not started') - raise LDAPConnectionPoolNotStartedError('reusable connection pool not started') - - def validate_bind(self, controls): - # in case of a new connection or different credentials - if (self.connection.user != self.pool.master_connection.user or - self.connection.password != self.pool.master_connection.password or - self.connection.authentication != self.pool.master_connection.authentication or - self.connection.sasl_mechanism != self.pool.master_connection.sasl_mechanism or - self.connection.sasl_credentials != self.pool.master_connection.sasl_credentials): - self.pool.master_connection.user = self.connection.user - self.pool.master_connection.password = self.connection.password - self.pool.master_connection.authentication = self.connection.authentication - self.pool.master_connection.sasl_mechanism = self.connection.sasl_mechanism - self.pool.master_connection.sasl_credentials = self.connection.sasl_credentials - self.pool.rebind_pool() - temp_connection = self.pool.workers[0].connection - temp_connection.lazy = False - if not self.connection.server.schema or not self.connection.server.info: - result = self.pool.workers[0].connection.bind(controls=controls) - else: - result = self.pool.workers[0].connection.bind(controls=controls, read_server_info=False) - - temp_connection.unbind() - temp_connection.lazy = True - if result: - self.pool.bind_pool = True # bind pool if bind is validated - return result - - def get_response(self, counter, timeout=None, get_request=False): - sleeptime = get_config_parameter('RESPONSE_SLEEPTIME') - request=None - if timeout is None: - timeout = get_config_parameter('RESPONSE_WAITING_TIMEOUT') - if counter == BOGUS_BIND: # send a bogus bindResponse - response = list() - result = {'description': 'success', 'referrals': None, 'type': 'bindResponse', 'result': 0, 'dn': '', 'message': '', 'saslCreds': None} - elif counter == BOGUS_UNBIND: # bogus unbind response - response = None - result = None - elif counter == BOGUS_ABANDON: # abandon cannot be executed because of multiple connections - response = list() - result = {'result': 0, 'referrals': None, 'responseName': '1.3.6.1.4.1.1466.20037', 'type': 'extendedResp', 'description': 'success', 'responseValue': 'None', 'dn': '', 'message': ''} - elif counter == BOGUS_EXTENDED: # bogus startTls extended response - response = list() - result = {'result': 0, 'referrals': None, 'responseName': '1.3.6.1.4.1.1466.20037', 'type': 'extendedResp', 'description': 'success', 'responseValue': 'None', 'dn': '', 'message': ''} - self.connection.starting_tls = False - else: - response = None - result = None - while timeout >= 0: # waiting for completed message to appear in _incoming - try: - with self.connection.strategy.pool.pool_lock: - response, result, request = self.connection.strategy.pool._incoming.pop(counter) - except KeyError: - sleep(sleeptime) - timeout -= sleeptime - continue - break - - if timeout <= 0: - if log_enabled(ERROR): - log(ERROR, 'no response from worker threads in Reusable connection') - raise LDAPResponseTimeoutError('no response from worker threads in Reusable connection') - - if isinstance(response, LDAPOperationResult): - raise response # an exception has been raised with raise_exceptions - - if get_request: - return response, result, request - - return response, result - - def post_send_single_response(self, counter): - return counter - - def post_send_search(self, counter): - return counter +""" +""" + +# Created on 2014.03.23 +# +# Author: Giovanni Cannata +# +# Copyright 2014 - 2019 Giovanni Cannata +# +# This file is part of ldap3. +# +# ldap3 is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ldap3 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with ldap3 in the COPYING and COPYING.LESSER files. +# If not, see . + +from datetime import datetime +from os import linesep +from threading import Thread, Lock +from time import sleep + +from .. import RESTARTABLE, get_config_parameter, AUTO_BIND_DEFAULT, AUTO_BIND_NONE, AUTO_BIND_NO_TLS, AUTO_BIND_TLS_AFTER_BIND, AUTO_BIND_TLS_BEFORE_BIND +from .base import BaseStrategy +from ..core.usage import ConnectionUsage +from ..core.exceptions import LDAPConnectionPoolNameIsMandatoryError, LDAPConnectionPoolNotStartedError, LDAPOperationResult, LDAPExceptionError, LDAPResponseTimeoutError +from ..utils.log import log, log_enabled, ERROR, BASIC +from ..protocol.rfc4511 import LDAP_MAX_INT + +TERMINATE_REUSABLE = 'TERMINATE_REUSABLE_CONNECTION' + +BOGUS_BIND = -1 +BOGUS_UNBIND = -2 +BOGUS_EXTENDED = -3 +BOGUS_ABANDON = -4 + +try: + from queue import Queue, Empty +except ImportError: # Python 2 + # noinspection PyUnresolvedReferences + from Queue import Queue, Empty + + +# noinspection PyProtectedMember +class ReusableStrategy(BaseStrategy): + """ + A pool of reusable SyncWaitRestartable connections with lazy behaviour and limited lifetime. + The connection using this strategy presents itself as a normal connection, but internally the strategy has a pool of + connections that can be used as needed. Each connection lives in its own thread and has a busy/available status. + The strategy performs the requested operation on the first available connection. + The pool of connections is instantiated at strategy initialization. + Strategy has two customizable properties, the total number of connections in the pool and the lifetime of each connection. + When lifetime is expired the connection is closed and will be open again when needed. + """ + pools = dict() + + def receiving(self): + raise NotImplementedError + + def _start_listen(self): + raise NotImplementedError + + def _get_response(self, message_id): + raise NotImplementedError + + def get_stream(self): + raise NotImplementedError + + def set_stream(self, value): + raise NotImplementedError + + # noinspection PyProtectedMember + class ConnectionPool(object): + """ + Container for the Connection Threads + """ + def __new__(cls, connection): + if connection.pool_name in ReusableStrategy.pools: # returns existing connection pool + pool = ReusableStrategy.pools[connection.pool_name] + if not pool.started: # if pool is not started remove it from the pools singleton and create a new onw + del ReusableStrategy.pools[connection.pool_name] + return object.__new__(cls) + if connection.pool_keepalive and pool.keepalive != connection.pool_keepalive: # change lifetime + pool.keepalive = connection.pool_keepalive + if connection.pool_lifetime and pool.lifetime != connection.pool_lifetime: # change keepalive + pool.lifetime = connection.pool_lifetime + if connection.pool_size and pool.pool_size != connection.pool_size: # if pool size has changed terminate and recreate the connections + pool.terminate_pool() + pool.pool_size = connection.pool_size + return pool + else: + return object.__new__(cls) + + def __init__(self, connection): + if not hasattr(self, 'workers'): + self.name = connection.pool_name + self.master_connection = connection + self.workers = [] + self.pool_size = connection.pool_size or get_config_parameter('REUSABLE_THREADED_POOL_SIZE') + self.lifetime = connection.pool_lifetime or get_config_parameter('REUSABLE_THREADED_LIFETIME') + self.keepalive = connection.pool_keepalive + self.request_queue = Queue() + self.open_pool = False + self.bind_pool = False + self.tls_pool = False + self._incoming = dict() + self.counter = 0 + self.terminated_usage = ConnectionUsage() if connection._usage else None + self.terminated = False + self.pool_lock = Lock() + ReusableStrategy.pools[self.name] = self + self.started = False + if log_enabled(BASIC): + log(BASIC, 'instantiated ConnectionPool: <%r>', self) + + def __str__(self): + s = 'POOL: ' + str(self.name) + ' - status: ' + ('started' if self.started else 'terminated') + s += ' - responses in queue: ' + str(len(self._incoming)) + s += ' - pool size: ' + str(self.pool_size) + s += ' - lifetime: ' + str(self.lifetime) + s += ' - keepalive: ' + str(self.keepalive) + s += ' - open: ' + str(self.open_pool) + s += ' - bind: ' + str(self.bind_pool) + s += ' - tls: ' + str(self.tls_pool) + linesep + s += 'MASTER CONN: ' + str(self.master_connection) + linesep + s += 'WORKERS:' + if self.workers: + for i, worker in enumerate(self.workers): + s += linesep + str(i).rjust(5) + ': ' + str(worker) + else: + s += linesep + ' no active workers in pool' + + return s + + def __repr__(self): + return self.__str__() + + def get_info_from_server(self): + for worker in self.workers: + with worker.worker_lock: + if not worker.connection.server.schema or not worker.connection.server.info: + worker.get_info_from_server = True + else: + worker.get_info_from_server = False + + def rebind_pool(self): + for worker in self.workers: + with worker.worker_lock: + worker.connection.rebind(self.master_connection.user, + self.master_connection.password, + self.master_connection.authentication, + self.master_connection.sasl_mechanism, + self.master_connection.sasl_credentials) + + def start_pool(self): + if not self.started: + self.create_pool() + for worker in self.workers: + with worker.worker_lock: + worker.thread.start() + self.started = True + self.terminated = False + if log_enabled(BASIC): + log(BASIC, 'worker started for pool <%s>', self) + return True + return False + + def create_pool(self): + if log_enabled(BASIC): + log(BASIC, 'created pool <%s>', self) + self.workers = [ReusableStrategy.PooledConnectionWorker(self.master_connection, self.request_queue) for _ in range(self.pool_size)] + + def terminate_pool(self): + if not self.terminated: + if log_enabled(BASIC): + log(BASIC, 'terminating pool <%s>', self) + self.started = False + self.request_queue.join() # waits for all queue pending operations + for _ in range(len([worker for worker in self.workers if worker.thread.is_alive()])): # put a TERMINATE signal on the queue for each active thread + self.request_queue.put((TERMINATE_REUSABLE, None, None, None)) + self.request_queue.join() # waits for all queue terminate operations + self.terminated = True + if log_enabled(BASIC): + log(BASIC, 'pool terminated for <%s>', self) + + class PooledConnectionThread(Thread): + """ + The thread that holds the Reusable connection and receive operation request via the queue + Result are sent back in the pool._incoming list when ready + """ + def __init__(self, worker, master_connection): + Thread.__init__(self) + self.daemon = True + self.worker = worker + self.master_connection = master_connection + if log_enabled(BASIC): + log(BASIC, 'instantiated PooledConnectionThread: <%r>', self) + + # noinspection PyProtectedMember + def run(self): + self.worker.running = True + terminate = False + pool = self.master_connection.strategy.pool + while not terminate: + try: + counter, message_type, request, controls = pool.request_queue.get(block=True, timeout=self.master_connection.strategy.pool.keepalive) + except Empty: # issue an Abandon(0) operation to keep the connection live - Abandon(0) is a harmless operation + if not self.worker.connection.closed: + self.worker.connection.abandon(0) + continue + + with self.worker.worker_lock: + self.worker.busy = True + if counter == TERMINATE_REUSABLE: + terminate = True + if self.worker.connection.bound: + try: + self.worker.connection.unbind() + if log_enabled(BASIC): + log(BASIC, 'thread terminated') + except LDAPExceptionError: + pass + else: + if (datetime.now() - self.worker.creation_time).seconds >= self.master_connection.strategy.pool.lifetime: # destroy and create a new connection + try: + self.worker.connection.unbind() + except LDAPExceptionError: + pass + self.worker.new_connection() + if log_enabled(BASIC): + log(BASIC, 'thread respawn') + if message_type not in ['bindRequest', 'unbindRequest']: + try: + if pool.open_pool and self.worker.connection.closed: + self.worker.connection.open(read_server_info=False) + if pool.tls_pool and not self.worker.connection.tls_started: + self.worker.connection.start_tls(read_server_info=False) + if pool.bind_pool and not self.worker.connection.bound: + self.worker.connection.bind(read_server_info=False) + elif pool.open_pool and not self.worker.connection.closed: # connection already open, issues a start_tls + if pool.tls_pool and not self.worker.connection.tls_started: + self.worker.connection.start_tls(read_server_info=False) + if self.worker.get_info_from_server and counter: + self.worker.connection.refresh_server_info() + self.worker.get_info_from_server = False + response = None + result = None + if message_type == 'searchRequest': + response = self.worker.connection.post_send_search(self.worker.connection.send(message_type, request, controls)) + else: + response = self.worker.connection.post_send_single_response(self.worker.connection.send(message_type, request, controls)) + result = self.worker.connection.result + with pool.pool_lock: + pool._incoming[counter] = (response, result, BaseStrategy.decode_request(message_type, request, controls)) + except LDAPOperationResult as e: # raise_exceptions has raised an exception. It must be redirected to the original connection thread + with pool.pool_lock: + pool._incoming[counter] = (e, None, None) + # pool._incoming[counter] = (type(e)(str(e)), None, None) + # except LDAPOperationResult as e: # raise_exceptions has raised an exception. It must be redirected to the original connection thread + # exc = e + # with pool.pool_lock: + # if exc: + # pool._incoming[counter] = (exc, None, None) + # else: + # pool._incoming[counter] = (response, result, BaseStrategy.decode_request(message_type, request, controls)) + + self.worker.busy = False + pool.request_queue.task_done() + self.worker.task_counter += 1 + if log_enabled(BASIC): + log(BASIC, 'thread terminated') + if self.master_connection.usage: + pool.terminated_usage += self.worker.connection.usage + self.worker.running = False + + class PooledConnectionWorker(object): + """ + Container for the restartable connection. it includes a thread and a lock to execute the connection in the pool + """ + def __init__(self, connection, request_queue): + self.master_connection = connection + self.request_queue = request_queue + self.running = False + self.busy = False + self.get_info_from_server = False + self.connection = None + self.creation_time = None + self.task_counter = 0 + self.new_connection() + self.thread = ReusableStrategy.PooledConnectionThread(self, self.master_connection) + self.worker_lock = Lock() + if log_enabled(BASIC): + log(BASIC, 'instantiated PooledConnectionWorker: <%s>', self) + + def __str__(self): + s = 'CONN: ' + str(self.connection) + linesep + ' THREAD: ' + s += 'running' if self.running else 'halted' + s += ' - ' + ('busy' if self.busy else 'available') + s += ' - ' + ('created at: ' + self.creation_time.isoformat()) + s += ' - time to live: ' + str(self.master_connection.strategy.pool.lifetime - (datetime.now() - self.creation_time).seconds) + s += ' - requests served: ' + str(self.task_counter) + + return s + + def new_connection(self): + from ..core.connection import Connection + # noinspection PyProtectedMember + self.creation_time = datetime.now() + self.connection = Connection(server=self.master_connection.server_pool if self.master_connection.server_pool else self.master_connection.server, + user=self.master_connection.user, + password=self.master_connection.password, + auto_bind=AUTO_BIND_NONE, # do not perform auto_bind because it reads again the schema + version=self.master_connection.version, + authentication=self.master_connection.authentication, + client_strategy=RESTARTABLE, + auto_referrals=self.master_connection.auto_referrals, + auto_range=self.master_connection.auto_range, + sasl_mechanism=self.master_connection.sasl_mechanism, + sasl_credentials=self.master_connection.sasl_credentials, + check_names=self.master_connection.check_names, + collect_usage=self.master_connection._usage, + read_only=self.master_connection.read_only, + raise_exceptions=self.master_connection.raise_exceptions, + lazy=False, + fast_decoder=self.master_connection.fast_decoder, + receive_timeout=self.master_connection.receive_timeout, + return_empty_attributes=self.master_connection.empty_attributes) + + # simulates auto_bind, always with read_server_info=False + if self.master_connection.auto_bind and self.master_connection.auto_bind not in [AUTO_BIND_NONE, AUTO_BIND_DEFAULT]: + if log_enabled(BASIC): + log(BASIC, 'performing automatic bind for <%s>', self.connection) + self.connection.open(read_server_info=False) + if self.master_connection.auto_bind == AUTO_BIND_NO_TLS: + self.connection.bind(read_server_info=False) + elif self.master_connection.auto_bind == AUTO_BIND_TLS_BEFORE_BIND: + self.connection.start_tls(read_server_info=False) + self.connection.bind(read_server_info=False) + elif self.master_connection.auto_bind == AUTO_BIND_TLS_AFTER_BIND: + self.connection.bind(read_server_info=False) + self.connection.start_tls(read_server_info=False) + + if self.master_connection.server_pool: + self.connection.server_pool = self.master_connection.server_pool + self.connection.server_pool.initialize(self.connection) + + # ReusableStrategy methods + def __init__(self, ldap_connection): + BaseStrategy.__init__(self, ldap_connection) + self.sync = False + self.no_real_dsa = False + self.pooled = True + self.can_stream = False + if hasattr(ldap_connection, 'pool_name') and ldap_connection.pool_name: + self.pool = ReusableStrategy.ConnectionPool(ldap_connection) + else: + if log_enabled(ERROR): + log(ERROR, 'reusable connection must have a pool_name') + raise LDAPConnectionPoolNameIsMandatoryError('reusable connection must have a pool_name') + + def open(self, reset_usage=True, read_server_info=True): + # read_server_info not used + self.pool.open_pool = True + self.pool.start_pool() + self.connection.closed = False + if self.connection.usage: + if reset_usage or not self.connection._usage.initial_connection_start_time: + self.connection._usage.start() + + def terminate(self): + self.pool.terminate_pool() + self.pool.open_pool = False + self.connection.bound = False + self.connection.closed = True + self.pool.bind_pool = False + self.pool.tls_pool = False + + def _close_socket(self): + """ + Doesn't really close the socket + """ + self.connection.closed = True + + if self.connection.usage: + self.connection._usage.closed_sockets += 1 + + def send(self, message_type, request, controls=None): + if self.pool.started: + if message_type == 'bindRequest': + self.pool.bind_pool = True + counter = BOGUS_BIND + elif message_type == 'unbindRequest': + self.pool.bind_pool = False + counter = BOGUS_UNBIND + elif message_type == 'abandonRequest': + counter = BOGUS_ABANDON + elif message_type == 'extendedReq' and self.connection.starting_tls: + self.pool.tls_pool = True + counter = BOGUS_EXTENDED + else: + with self.pool.pool_lock: + self.pool.counter += 1 + if self.pool.counter > LDAP_MAX_INT: + self.pool.counter = 1 + counter = self.pool.counter + self.pool.request_queue.put((counter, message_type, request, controls)) + return counter + if log_enabled(ERROR): + log(ERROR, 'reusable connection pool not started') + raise LDAPConnectionPoolNotStartedError('reusable connection pool not started') + + def validate_bind(self, controls): + # in case of a new connection or different credentials + if (self.connection.user != self.pool.master_connection.user or + self.connection.password != self.pool.master_connection.password or + self.connection.authentication != self.pool.master_connection.authentication or + self.connection.sasl_mechanism != self.pool.master_connection.sasl_mechanism or + self.connection.sasl_credentials != self.pool.master_connection.sasl_credentials): + self.pool.master_connection.user = self.connection.user + self.pool.master_connection.password = self.connection.password + self.pool.master_connection.authentication = self.connection.authentication + self.pool.master_connection.sasl_mechanism = self.connection.sasl_mechanism + self.pool.master_connection.sasl_credentials = self.connection.sasl_credentials + self.pool.rebind_pool() + temp_connection = self.pool.workers[0].connection + old_lazy = temp_connection.lazy + temp_connection.lazy = False + if not self.connection.server.schema or not self.connection.server.info: + result = self.pool.workers[0].connection.bind(controls=controls) + else: + result = self.pool.workers[0].connection.bind(controls=controls, read_server_info=False) + + temp_connection.unbind() + temp_connection.lazy = old_lazy + if result: + self.pool.bind_pool = True # bind pool if bind is validated + return result + + def get_response(self, counter, timeout=None, get_request=False): + sleeptime = get_config_parameter('RESPONSE_SLEEPTIME') + request=None + if timeout is None: + timeout = get_config_parameter('RESPONSE_WAITING_TIMEOUT') + if counter == BOGUS_BIND: # send a bogus bindResponse + response = list() + result = {'description': 'success', 'referrals': None, 'type': 'bindResponse', 'result': 0, 'dn': '', 'message': '', 'saslCreds': None} + elif counter == BOGUS_UNBIND: # bogus unbind response + response = None + result = None + elif counter == BOGUS_ABANDON: # abandon cannot be executed because of multiple connections + response = list() + result = {'result': 0, 'referrals': None, 'responseName': '1.3.6.1.4.1.1466.20037', 'type': 'extendedResp', 'description': 'success', 'responseValue': 'None', 'dn': '', 'message': ''} + elif counter == BOGUS_EXTENDED: # bogus startTls extended response + response = list() + result = {'result': 0, 'referrals': None, 'responseName': '1.3.6.1.4.1.1466.20037', 'type': 'extendedResp', 'description': 'success', 'responseValue': 'None', 'dn': '', 'message': ''} + self.connection.starting_tls = False + else: + response = None + result = None + while timeout >= 0: # waiting for completed message to appear in _incoming + try: + with self.connection.strategy.pool.pool_lock: + response, result, request = self.connection.strategy.pool._incoming.pop(counter) + except KeyError: + sleep(sleeptime) + timeout -= sleeptime + continue + break + + if timeout <= 0: + if log_enabled(ERROR): + log(ERROR, 'no response from worker threads in Reusable connection') + raise LDAPResponseTimeoutError('no response from worker threads in Reusable connection') + + if isinstance(response, LDAPOperationResult): + raise response # an exception has been raised with raise_exceptions + + if get_request: + return response, result, request + + return response, result + + def post_send_single_response(self, counter): + return counter + + def post_send_search(self, counter): + return counter diff --git a/server/www/packages/packages-linux/x64/ldap3/strategy/sync.py b/server/www/packages/packages-linux/x64/ldap3/strategy/sync.py index b2c0257..fc7ccfb 100644 --- a/server/www/packages/packages-linux/x64/ldap3/strategy/sync.py +++ b/server/www/packages/packages-linux/x64/ldap3/strategy/sync.py @@ -67,7 +67,7 @@ class SyncStrategy(BaseStrategy): def receiving(self): """ - Receive data over the socket + Receives data over the socket Checks if the socket is closed """ messages = [] diff --git a/server/www/packages/packages-linux/x64/ldap3/utils/ciDict.py b/server/www/packages/packages-linux/x64/ldap3/utils/ciDict.py index f81ba1b..baf5d38 100644 --- a/server/www/packages/packages-linux/x64/ldap3/utils/ciDict.py +++ b/server/www/packages/packages-linux/x64/ldap3/utils/ciDict.py @@ -5,7 +5,7 @@ # # Author: Giovanni Cannata # -# Copyright 2014 - 2018 Giovanni Cannata +# Copyright 2014 - 2019 Giovanni Cannata # # This file is part of ldap3. # @@ -143,7 +143,7 @@ class CaseInsensitiveWithAliasDict(CaseInsensitiveDict): if ci_key in self._aliases: self.remove_alias(ci_key) - def set_alias(self, key, alias): + def set_alias(self, key, alias, ignore_duplicates=False): if not isinstance(alias, SEQUENCE_TYPES): alias = [alias] for alias_to_add in alias: @@ -153,20 +153,20 @@ class CaseInsensitiveWithAliasDict(CaseInsensitiveDict): if ci_alias not in self._case_insensitive_keymap: # checks if alias is used a key if ci_alias not in self._aliases: # checks if alias is used as another alias self._aliases[ci_alias] = ci_key - if ci_key in self._alias_keymap: # extend alias keymap + if ci_key in self._alias_keymap: # extends alias keymap self._alias_keymap[ci_key].append(self._ci_key(ci_alias)) else: self._alias_keymap[ci_key] = list() self._alias_keymap[ci_key].append(self._ci_key(ci_alias)) else: - if ci_key == self._ci_key(self._alias_keymap[ci_alias]): # passes if alias is already defined to the same key + if ci_key in self._alias_keymap and ci_alias in self._alias_keymap[ci_key]: # passes if alias is already defined to the same key pass - else: + elif not ignore_duplicates: raise KeyError('\'' + str(alias_to_add) + '\' already used as alias') else: if ci_key == self._ci_key(self._case_insensitive_keymap[ci_alias]): # passes if alias is already defined to the same key pass - else: + elif not ignore_duplicates: raise KeyError('\'' + str(alias_to_add) + '\' already used as key') else: raise KeyError('\'' + str(ci_key) + '\' is not an existing key') diff --git a/server/www/packages/packages-linux/x64/ldap3/utils/conv.py b/server/www/packages/packages-linux/x64/ldap3/utils/conv.py index ee90c66..9970926 100644 --- a/server/www/packages/packages-linux/x64/ldap3/utils/conv.py +++ b/server/www/packages/packages-linux/x64/ldap3/utils/conv.py @@ -1,278 +1,270 @@ -""" -""" - -# Created on 2014.04.26 -# -# Author: Giovanni Cannata -# -# Copyright 2014 - 2018 Giovanni Cannata -# -# This file is part of ldap3. -# -# ldap3 is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ldap3 is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with ldap3 in the COPYING and COPYING.LESSER files. -# If not, see . - -from base64 import b64encode, b64decode -import datetime -import re - -from .. import SEQUENCE_TYPES, STRING_TYPES, NUMERIC_TYPES, get_config_parameter -from ..utils.ciDict import CaseInsensitiveDict -from ..core.exceptions import LDAPDefinitionError - - -def to_unicode(obj, encoding=None, from_server=False): - """Try to convert bytes (and str in python2) to unicode. - Return object unmodified if python3 string, else raise an exception - """ - conf_default_client_encoding = get_config_parameter('DEFAULT_CLIENT_ENCODING') - conf_default_server_encoding = get_config_parameter('DEFAULT_SERVER_ENCODING') - conf_additional_server_encodings = get_config_parameter('ADDITIONAL_SERVER_ENCODINGS') - conf_additional_client_encodings = get_config_parameter('ADDITIONAL_CLIENT_ENCODINGS') - if isinstance(obj, NUMERIC_TYPES): - obj = str(obj) - - if isinstance(obj, (bytes, bytearray)): - if from_server: # data from server - if encoding is None: - encoding = conf_default_server_encoding - try: - return obj.decode(encoding) - except UnicodeDecodeError: - for encoding in conf_additional_server_encodings: # AD could have DN not encoded in utf-8 (even if this is not allowed by RFC4510) - try: - return obj.decode(encoding) - except UnicodeDecodeError: - pass - raise UnicodeError("Unable to convert server data to unicode: %r" % obj) - else: # data from client - if encoding is None: - encoding = conf_default_client_encoding - try: - return obj.decode(encoding) - except UnicodeDecodeError: - for encoding in conf_additional_client_encodings: # tries additional encodings - try: - return obj.decode(encoding) - except UnicodeDecodeError: - pass - raise UnicodeError("Unable to convert client data to unicode: %r" % obj) - - if isinstance(obj, STRING_TYPES): # python3 strings, python 2 unicode - return obj - - raise UnicodeError("Unable to convert type %s to unicode: %r" % (type(obj).__class__.__name__, obj)) - - -def to_raw(obj, encoding='utf-8'): - """Tries to convert to raw bytes from unicode""" - if isinstance(obj, NUMERIC_TYPES): - obj = str(obj) - - if not (isinstance(obj, bytes)): - if isinstance(obj, SEQUENCE_TYPES): - return [to_raw(element) for element in obj] - elif isinstance(obj, STRING_TYPES): - return obj.encode(encoding) - return obj - - -def escape_filter_chars(text, encoding=None): - """ Escape chars mentioned in RFC4515. """ - if encoding is None: - encoding = get_config_parameter('DEFAULT_ENCODING') - - try: - text = to_unicode(text, encoding) - escaped = text.replace('\\', '\\5c') - escaped = escaped.replace('*', '\\2a') - escaped = escaped.replace('(', '\\28') - escaped = escaped.replace(')', '\\29') - escaped = escaped.replace('\x00', '\\00') - except Exception: # probably raw bytes values, return escaped bytes value - escaped = to_unicode(escape_bytes(text)) - # escape all octets greater than 0x7F that are not part of a valid UTF-8 - # escaped = ''.join(c if c <= ord(b'\x7f') else escape_bytes(to_raw(to_unicode(c, encoding))) for c in escaped) - return escaped - - -def unescape_filter_chars(text, encoding=None): - """ unescape chars mentioned in RFC4515. """ - if encoding is None: - encoding = get_config_parameter('DEFAULT_ENCODING') - - unescaped = to_raw(text, encoding) - unescaped = unescaped.replace(b'\\5c', b'\\') - unescaped = unescaped.replace(b'\\5C', b'\\') - unescaped = unescaped.replace(b'\\2a', b'*') - unescaped = unescaped.replace(b'\\2A', b'*') - unescaped = unescaped.replace(b'\\28', b'(') - unescaped = unescaped.replace(b'\\29', b')') - unescaped = unescaped.replace(b'\\00', b'\x00') - return unescaped - - -def escape_bytes(bytes_value): - """ Convert a byte sequence to a properly escaped for LDAP (format BACKSLASH HEX HEX) string""" - if bytes_value: - if str is not bytes: # Python 3 - if isinstance(bytes_value, str): - bytes_value = bytearray(bytes_value, encoding='utf-8') - escaped = '\\'.join([('%02x' % int(b)) for b in bytes_value]) - else: # Python 2 - if isinstance(bytes_value, unicode): - bytes_value = bytes_value.encode('utf-8') - escaped = '\\'.join([('%02x' % ord(b)) for b in bytes_value]) - else: - escaped = '' - - return ('\\' + escaped) if escaped else '' - - -def prepare_for_stream(value): - if str is not bytes: # Python 3 - return value - else: # Python 2 - return value.decode() - -def json_encode_b64(obj): - try: - return dict(encoding='base64', encoded=b64encode(obj)) - except Exception as e: - raise LDAPDefinitionError('unable to encode ' + str(obj) + ' - ' + str(e)) - - -# noinspection PyProtectedMember -def check_json_dict(json_dict): - # needed for python 2 - - for k, v in json_dict.items(): - if isinstance(v, dict): - check_json_dict(v) - elif isinstance(v, CaseInsensitiveDict): - check_json_dict(v._store) - elif isinstance(v, SEQUENCE_TYPES): - for i, e in enumerate(v): - if isinstance(e, dict): - check_json_dict(e) - elif isinstance(e, CaseInsensitiveDict): - check_json_dict(e._store) - else: - v[i] = format_json(e) - else: - json_dict[k] = format_json(v) - - -def json_hook(obj): - if hasattr(obj, 'keys') and len(list(obj.keys())) == 2 and 'encoding' in obj.keys() and 'encoded' in obj.keys(): - return b64decode(obj['encoded']) - - return obj - - -# noinspection PyProtectedMember -def format_json(obj): - if isinstance(obj, CaseInsensitiveDict): - return obj._store - - if isinstance(obj, datetime.datetime): - return str(obj) - - if isinstance(obj, int): - return obj - - if str is bytes: # Python 2 - if isinstance(obj, long): # long exists only in python2 - return obj - - try: - if str is not bytes: # Python 3 - if isinstance(obj, bytes): - # return check_escape(str(obj, 'utf-8', errors='strict')) - return str(obj, 'utf-8', errors='strict') - raise LDAPDefinitionError('unable to serialize ' + str(obj)) - else: # Python 2 - if isinstance(obj, unicode): - return obj - else: - # return unicode(check_escape(obj)) - return unicode(obj) - except (TypeError, UnicodeDecodeError): - pass - - try: - return json_encode_b64(bytes(obj)) - except Exception: - pass - - raise LDAPDefinitionError('unable to serialize ' + str(obj)) - - -def is_filter_escaped(text): - if not type(text) == ((str is not bytes) and str or unicode): # requires str for Python 3 and unicode for Python 2 - raise ValueError('unicode input expected') - - return all(c not in text for c in '()*\0') and not re.search('\\\\([^0-9a-fA-F]|(.[^0-9a-fA-F]))', text) - - -# def ldap_escape_to_bytes(text): -# bytesequence = bytearray() -# if text.startswith('\\'): -# byte_values = text.split('\\') -# for value in byte_values[1:]: -# if len(value) != 2 and not value.isdigit(): -# raise LDAPDefinitionError('badly formatted LDAP byte escaped sequence') -# bytesequence.append(int(value, 16)) -# return bytes(bytesequence) -# raise LDAPDefinitionError('badly formatted LDAP byte escaped sequence') - - -def ldap_escape_to_bytes(text): - bytesequence = bytearray() - i = 0 - try: - if isinstance(text, STRING_TYPES): - while i < len(text): - if text[i] == '\\': - if len(text) > i + 2: - try: - bytesequence.append(int(text[i+1:i+3], 16)) - i += 3 - continue - except ValueError: - pass - bytesequence.append(92) # "\" ASCII code - else: - raw = to_raw(text[i]) - for c in raw: - bytesequence.append(c) - i += 1 - elif isinstance(text, (bytes, bytearray)): - while i < len(text): - if text[i] == 92: # "\" ASCII code - if len(text) > i + 2: - try: - bytesequence.append(int(text[i + 1:i + 3], 16)) - i += 3 - continue - except ValueError: - pass - bytesequence.append(92) # "\" ASCII code - else: - bytesequence.append(text[i]) - i += 1 - except Exception: - raise LDAPDefinitionError('badly formatted LDAP byte escaped sequence') - - return bytes(bytesequence) +""" +""" + +# Created on 2014.04.26 +# +# Author: Giovanni Cannata +# +# Copyright 2014 - 2019 Giovanni Cannata +# +# This file is part of ldap3. +# +# ldap3 is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ldap3 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with ldap3 in the COPYING and COPYING.LESSER files. +# If not, see . + +from base64 import b64encode, b64decode +import datetime +import re + +from .. import SEQUENCE_TYPES, STRING_TYPES, NUMERIC_TYPES, get_config_parameter +from ..utils.ciDict import CaseInsensitiveDict +from ..core.exceptions import LDAPDefinitionError + + +def to_unicode(obj, encoding=None, from_server=False): + """Try to convert bytes (and str in python2) to unicode. + Return object unmodified if python3 string, else raise an exception + """ + conf_default_client_encoding = get_config_parameter('DEFAULT_CLIENT_ENCODING') + conf_default_server_encoding = get_config_parameter('DEFAULT_SERVER_ENCODING') + conf_additional_server_encodings = get_config_parameter('ADDITIONAL_SERVER_ENCODINGS') + conf_additional_client_encodings = get_config_parameter('ADDITIONAL_CLIENT_ENCODINGS') + if isinstance(obj, NUMERIC_TYPES): + obj = str(obj) + + if isinstance(obj, (bytes, bytearray)): + if from_server: # data from server + if encoding is None: + encoding = conf_default_server_encoding + try: + return obj.decode(encoding) + except UnicodeDecodeError: + for encoding in conf_additional_server_encodings: # AD could have DN not encoded in utf-8 (even if this is not allowed by RFC4510) + try: + return obj.decode(encoding) + except UnicodeDecodeError: + pass + raise UnicodeError("Unable to convert server data to unicode: %r" % obj) + else: # data from client + if encoding is None: + encoding = conf_default_client_encoding + try: + return obj.decode(encoding) + except UnicodeDecodeError: + for encoding in conf_additional_client_encodings: # tries additional encodings + try: + return obj.decode(encoding) + except UnicodeDecodeError: + pass + raise UnicodeError("Unable to convert client data to unicode: %r" % obj) + + if isinstance(obj, STRING_TYPES): # python3 strings, python 2 unicode + return obj + + raise UnicodeError("Unable to convert type %s to unicode: %r" % (type(obj).__class__.__name__, obj)) + + +def to_raw(obj, encoding='utf-8'): + """Tries to convert to raw bytes from unicode""" + if isinstance(obj, NUMERIC_TYPES): + obj = str(obj) + + if not (isinstance(obj, bytes)): + if isinstance(obj, SEQUENCE_TYPES): + return [to_raw(element) for element in obj] + elif isinstance(obj, STRING_TYPES): + return obj.encode(encoding) + return obj + + +def escape_filter_chars(text, encoding=None): + """ Escape chars mentioned in RFC4515. """ + if encoding is None: + encoding = get_config_parameter('DEFAULT_ENCODING') + + try: + text = to_unicode(text, encoding) + escaped = text.replace('\\', '\\5c') + escaped = escaped.replace('*', '\\2a') + escaped = escaped.replace('(', '\\28') + escaped = escaped.replace(')', '\\29') + escaped = escaped.replace('\x00', '\\00') + except Exception: # probably raw bytes values, return escaped bytes value + escaped = to_unicode(escape_bytes(text)) + # escape all octets greater than 0x7F that are not part of a valid UTF-8 + # escaped = ''.join(c if c <= ord(b'\x7f') else escape_bytes(to_raw(to_unicode(c, encoding))) for c in escaped) + return escaped + + +def unescape_filter_chars(text, encoding=None): + """ unescape chars mentioned in RFC4515. """ + if encoding is None: + encoding = get_config_parameter('DEFAULT_ENCODING') + + unescaped = to_raw(text, encoding) + unescaped = unescaped.replace(b'\\5c', b'\\') + unescaped = unescaped.replace(b'\\5C', b'\\') + unescaped = unescaped.replace(b'\\2a', b'*') + unescaped = unescaped.replace(b'\\2A', b'*') + unescaped = unescaped.replace(b'\\28', b'(') + unescaped = unescaped.replace(b'\\29', b')') + unescaped = unescaped.replace(b'\\00', b'\x00') + return unescaped + + +def escape_bytes(bytes_value): + """ Convert a byte sequence to a properly escaped for LDAP (format BACKSLASH HEX HEX) string""" + if bytes_value: + if str is not bytes: # Python 3 + if isinstance(bytes_value, str): + bytes_value = bytearray(bytes_value, encoding='utf-8') + escaped = '\\'.join([('%02x' % int(b)) for b in bytes_value]) + else: # Python 2 + if isinstance(bytes_value, unicode): + bytes_value = bytes_value.encode('utf-8') + escaped = '\\'.join([('%02x' % ord(b)) for b in bytes_value]) + else: + escaped = '' + + return ('\\' + escaped) if escaped else '' + + +def prepare_for_stream(value): + if str is not bytes: # Python 3 + return value + else: # Python 2 + return value.decode() + + +def json_encode_b64(obj): + try: + return dict(encoding='base64', encoded=b64encode(obj)) + except Exception as e: + raise LDAPDefinitionError('unable to encode ' + str(obj) + ' - ' + str(e)) + + +# noinspection PyProtectedMember +def check_json_dict(json_dict): + # needed for python 2 + + for k, v in json_dict.items(): + if isinstance(v, dict): + check_json_dict(v) + elif isinstance(v, CaseInsensitiveDict): + check_json_dict(v._store) + elif isinstance(v, SEQUENCE_TYPES): + for i, e in enumerate(v): + if isinstance(e, dict): + check_json_dict(e) + elif isinstance(e, CaseInsensitiveDict): + check_json_dict(e._store) + else: + v[i] = format_json(e) + else: + json_dict[k] = format_json(v) + + +def json_hook(obj): + if hasattr(obj, 'keys') and len(list(obj.keys())) == 2 and 'encoding' in obj.keys() and 'encoded' in obj.keys(): + return b64decode(obj['encoded']) + + return obj + + +# noinspection PyProtectedMember +def format_json(obj): + if isinstance(obj, CaseInsensitiveDict): + return obj._store + + if isinstance(obj, datetime.datetime): + return str(obj) + + if isinstance(obj, int): + return obj + + if isinstance(obj, datetime.timedelta): + return str(obj) + + if str is bytes: # Python 2 + if isinstance(obj, long): # long exists only in python2 + return obj + + try: + if str is not bytes: # Python 3 + if isinstance(obj, bytes): + # return check_escape(str(obj, 'utf-8', errors='strict')) + return str(obj, 'utf-8', errors='strict') + raise LDAPDefinitionError('unable to serialize ' + str(obj)) + else: # Python 2 + if isinstance(obj, unicode): + return obj + else: + # return unicode(check_escape(obj)) + return unicode(obj) + except (TypeError, UnicodeDecodeError): + pass + + try: + return json_encode_b64(bytes(obj)) + except Exception: + pass + + raise LDAPDefinitionError('unable to serialize ' + str(obj)) + + +def is_filter_escaped(text): + if not type(text) == ((str is not bytes) and str or unicode): # requires str for Python 3 and unicode for Python 2 + raise ValueError('unicode input expected') + + return all(c not in text for c in '()*\0') and not re.search('\\\\([^0-9a-fA-F]|(.[^0-9a-fA-F]))', text) + + +def ldap_escape_to_bytes(text): + bytesequence = bytearray() + i = 0 + try: + if isinstance(text, STRING_TYPES): + while i < len(text): + if text[i] == '\\': + if len(text) > i + 2: + try: + bytesequence.append(int(text[i+1:i+3], 16)) + i += 3 + continue + except ValueError: + pass + bytesequence.append(92) # "\" ASCII code + else: + raw = to_raw(text[i]) + for c in raw: + bytesequence.append(c) + i += 1 + elif isinstance(text, (bytes, bytearray)): + while i < len(text): + if text[i] == 92: # "\" ASCII code + if len(text) > i + 2: + try: + bytesequence.append(int(text[i + 1:i + 3], 16)) + i += 3 + continue + except ValueError: + pass + bytesequence.append(92) # "\" ASCII code + else: + bytesequence.append(text[i]) + i += 1 + except Exception: + raise LDAPDefinitionError('badly formatted LDAP byte escaped sequence') + + return bytes(bytesequence) diff --git a/server/www/packages/packages-linux/x64/ldap3/utils/dn.py b/server/www/packages/packages-linux/x64/ldap3/utils/dn.py index d1a50a9..99705ce 100644 --- a/server/www/packages/packages-linux/x64/ldap3/utils/dn.py +++ b/server/www/packages/packages-linux/x64/ldap3/utils/dn.py @@ -1,375 +1,403 @@ -""" -""" - -# Created on 2014.09.08 -# -# Author: Giovanni Cannata -# -# Copyright 2014 - 2018 Giovanni Cannata -# -# This file is part of ldap3. -# -# ldap3 is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ldap3 is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with ldap3 in the COPYING and COPYING.LESSER files. -# If not, see . - -from string import hexdigits, ascii_letters, digits - -from .. import SEQUENCE_TYPES -from ..core.exceptions import LDAPInvalidDnError - - -STATE_ANY = 0 -STATE_ESCAPE = 1 -STATE_ESCAPE_HEX = 2 - - -def _add_ava(ava, decompose, remove_space, space_around_equal): - if not ava: - return '' - - space = ' ' if space_around_equal else '' - attr_name, _, value = ava.partition('=') - if decompose: - if remove_space: - component = (attr_name.strip(), value.strip()) - else: - component = (attr_name, value) - else: - if remove_space: - component = attr_name.strip() + space + '=' + space + value.strip() - else: - component = attr_name + space + '=' + space + value - - return component - - -def to_dn(iterator, decompose=False, remove_space=False, space_around_equal=False, separate_rdn=False): - """ - Convert an iterator to a list of dn parts - if decompose=True return a list of tuple (one for each dn component) else return a list of strings - if remove_space=True removes unneeded spaces - if space_around_equal=True add spaces around equal in returned strings - if separate_rdn=True consider multiple RDNs as different component of DN - """ - dn = [] - component = '' - escape_sequence = False - for c in iterator: - if c == '\\': # escape sequence - escape_sequence = True - elif escape_sequence and c != ' ': - escape_sequence = False - elif c == '+' and separate_rdn: - dn.append(_add_ava(component, decompose, remove_space, space_around_equal)) - component = '' - continue - elif c == ',': - if '=' in component: - dn.append(_add_ava(component, decompose, remove_space, space_around_equal)) - component = '' - continue - - component += c - - dn.append(_add_ava(component, decompose, remove_space, space_around_equal)) - return dn - - -def _find_first_unescaped(dn, char, pos): - while True: - pos = dn.find(char, pos) - if pos == -1: - break # no char found - if pos > 0 and dn[pos - 1] != '\\': # unescaped char - break - - pos += 1 - - return pos - - -def _find_last_unescaped(dn, char, start, stop=0): - while True: - stop = dn.rfind(char, start, stop) - if stop == -1: - break - if stop >= 0 and dn[stop - 1] != '\\': - break - - if stop < start: - stop = -1 - break - - return stop - - -def _get_next_ava(dn): - comma = _find_first_unescaped(dn, ',', 0) - plus = _find_first_unescaped(dn, '+', 0) - - if plus > 0 and (plus < comma or comma == -1): - equal = _find_first_unescaped(dn, '=', plus + 1) - if equal > plus + 1: - plus = _find_last_unescaped(dn, '+', plus, equal) - return dn[:plus], '+' - - if comma > 0: - equal = _find_first_unescaped(dn, '=', comma + 1) - if equal > comma + 1: - comma = _find_last_unescaped(dn, ',', comma, equal) - return dn[:comma], ',' - - return dn, '' - - -def _split_ava(ava, escape=False, strip=True): - equal = ava.find('=') - while equal > 0: # not first character - if ava[equal - 1] != '\\': # not an escaped equal so it must be an ava separator - # attribute_type1 = ava[0:equal].strip() if strip else ava[0:equal] - if strip: - attribute_type = ava[0:equal].strip() - attribute_value = _escape_attribute_value(ava[equal + 1:].strip()) if escape else ava[equal + 1:].strip() - else: - attribute_type = ava[0:equal] - attribute_value = _escape_attribute_value(ava[equal + 1:]) if escape else ava[equal + 1:] - - return attribute_type, attribute_value - equal = ava.find('=', equal + 1) - - return '', (ava.strip if strip else ava) # if no equal found return only value - - -def _validate_attribute_type(attribute_type): - if not attribute_type: - raise LDAPInvalidDnError('attribute type not present') - - if attribute_type == ' pairs') - if attribute_value[0] == ' ': # space cannot be used as first or last character - raise LDAPInvalidDnError('SPACE not allowed as first character of attribute value') - if attribute_value[-1] == ' ': - raise LDAPInvalidDnError('SPACE not allowed as last character of attribute value') - - state = STATE_ANY - for c in attribute_value: - if state == STATE_ANY: - if c == '\\': - state = STATE_ESCAPE - elif c in '"#+,;<=>\00': - raise LDAPInvalidDnError('special characters ' + c + ' must be escaped') - elif state == STATE_ESCAPE: - if c in hexdigits: - state = STATE_ESCAPE_HEX - elif c in ' "#+,;<=>\\\00': - state = STATE_ANY - else: - raise LDAPInvalidDnError('invalid escaped character ' + c) - elif state == STATE_ESCAPE_HEX: - if c in hexdigits: - state = STATE_ANY - else: - raise LDAPInvalidDnError('invalid escaped character ' + c) - - # final state - if state != STATE_ANY: - raise LDAPInvalidDnError('invalid final character') - - return True - - -def _escape_attribute_value(attribute_value): - if not attribute_value: - return '' - - if attribute_value[0] == '#': # with leading SHARP only pairs of hex characters are valid - valid_hex = True - if len(attribute_value) % 2 == 0: # string must be # + HEX HEX (an odd number of chars) - valid_hex = False - - if valid_hex: - for c in attribute_value: - if c not in hexdigits: # allowed only hex digits as per RFC 4514 - valid_hex = False - break - - if valid_hex: - return attribute_value - - state = STATE_ANY - escaped = '' - tmp_buffer = '' - for c in attribute_value: - if state == STATE_ANY: - if c == '\\': - state = STATE_ESCAPE - elif c in '"#+,;<=>\00': - escaped += '\\' + c - else: - escaped += c - elif state == STATE_ESCAPE: - if c in hexdigits: - tmp_buffer = c - state = STATE_ESCAPE_HEX - elif c in ' "#+,;<=>\\\00': - escaped += '\\' + c - state = STATE_ANY - else: - escaped += '\\\\' + c - elif state == STATE_ESCAPE_HEX: - if c in hexdigits: - escaped += '\\' + tmp_buffer + c - else: - escaped += '\\\\' + tmp_buffer + c - tmp_buffer = '' - state = STATE_ANY - - # final state - if state == STATE_ESCAPE: - escaped += '\\\\' - elif state == STATE_ESCAPE_HEX: - escaped += '\\\\' + tmp_buffer - - if escaped[0] == ' ': # leading SPACE must be escaped - escaped = '\\' + escaped - - if escaped[-1] == ' ' and len(escaped) > 1 and escaped[-2] != '\\': # trailing SPACE must be escaped - escaped = escaped[:-1] + '\\ ' - - return escaped - - -def parse_dn(dn, escape=False, strip=True): - rdns = [] - avas = [] - while dn: - ava, separator = _get_next_ava(dn) # if returned ava doesn't containg any unescaped equal it'a appended to last ava in avas - - dn = dn[len(ava) + 1:] - if _find_first_unescaped(ava, '=', 0) > 0 or len(avas) == 0: - avas.append((ava, separator)) - else: - avas[len(avas) - 1] = (avas[len(avas) - 1][0] + avas[len(avas) - 1][1] + ava, separator) - - for ava, separator in avas: - attribute_type, attribute_value = _split_ava(ava, escape, strip) - - if not _validate_attribute_type(attribute_type): - raise LDAPInvalidDnError('unable to validate attribute type in ' + ava) - - if not _validate_attribute_value(attribute_value): - raise LDAPInvalidDnError('unable to validate attribute value in ' + ava) - - rdns.append((attribute_type, attribute_value, separator)) - dn = dn[len(ava) + 1:] - - if not rdns: - raise LDAPInvalidDnError('empty dn') - - return rdns - - -def safe_dn(dn, decompose=False, reverse=False): - """ - normalize and escape a dn, if dn is a sequence it is joined. - the reverse parameter changes the join direction of the sequence - """ - if isinstance(dn, SEQUENCE_TYPES): - components = [rdn for rdn in dn] - if reverse: - dn = ','.join(reversed(components)) - else: - dn = ','.join(components) - if decompose: - escaped_dn = [] - else: - escaped_dn = '' - - if dn.startswith(''): # Active Directory allows looking up objects by putting its GUID in a specially-formatted DN (e.g. '') - escaped_dn = dn - elif '@' not in dn and '\\' not in dn: # active directory UPN (User Principal Name) consist of an account, the at sign (@) and a domain, or the domain level logn name domain\username - for component in parse_dn(dn, escape=True): - if decompose: - escaped_dn.append((component[0], component[1], component[2])) - else: - escaped_dn += component[0] + '=' + component[1] + component[2] - elif '@' in dn and '=' not in dn and len(dn.split('@')) != 2: - raise LDAPInvalidDnError('Active Directory User Principal Name must consist of name@domain') - elif '\\' in dn and '=' not in dn and len(dn.split('\\')) != 2: - raise LDAPInvalidDnError('Active Directory Domain Level Logon Name must consist of name\\domain') - else: - escaped_dn = dn - - return escaped_dn - - -def safe_rdn(dn, decompose=False): - """Returns a list of rdn for the dn, usually there is only one rdn, but it can be more than one when the + sign is used""" - escaped_rdn = [] - one_more = True - for component in parse_dn(dn, escape=True): - if component[2] == '+' or one_more: - if decompose: - escaped_rdn.append((component[0], component[1])) - else: - escaped_rdn.append(component[0] + '=' + component[1]) - if component[2] == '+': - one_more = True - else: - one_more = False - break - - if one_more: - raise LDAPInvalidDnError('bad dn ' + str(dn)) - - return escaped_rdn - - -def escape_rdn(rdn): - """ - Escape rdn characters to prevent injection according to RFC 4514. - """ - - # '/' must be handled first or the escape slashes will be escaped! - for char in ['\\', ',', '+', '"', '<', '>', ';', '=', '\x00']: - rdn = rdn.replace(char, '\\' + char) - - if rdn[0] == '#' or rdn[0] == ' ': - rdn = ''.join(('\\', rdn)) - - if rdn[-1] == ' ': - rdn = ''.join((rdn[:-1], '\\ ')) - - return rdn +""" +""" + +# Created on 2014.09.08 +# +# Author: Giovanni Cannata +# +# Copyright 2014 - 2019 Giovanni Cannata +# +# This file is part of ldap3. +# +# ldap3 is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ldap3 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with ldap3 in the COPYING and COPYING.LESSER files. +# If not, see . + +from string import hexdigits, ascii_letters, digits + +from .. import SEQUENCE_TYPES +from ..core.exceptions import LDAPInvalidDnError + + +STATE_ANY = 0 +STATE_ESCAPE = 1 +STATE_ESCAPE_HEX = 2 + + +def _add_ava(ava, decompose, remove_space, space_around_equal): + if not ava: + return '' + + space = ' ' if space_around_equal else '' + attr_name, _, value = ava.partition('=') + if decompose: + if remove_space: + component = (attr_name.strip(), value.strip()) + else: + component = (attr_name, value) + else: + if remove_space: + component = attr_name.strip() + space + '=' + space + value.strip() + else: + component = attr_name + space + '=' + space + value + + return component + + +def to_dn(iterator, decompose=False, remove_space=False, space_around_equal=False, separate_rdn=False): + """ + Convert an iterator to a list of dn parts + if decompose=True return a list of tuple (one for each dn component) else return a list of strings + if remove_space=True removes unneeded spaces + if space_around_equal=True add spaces around equal in returned strings + if separate_rdn=True consider multiple RDNs as different component of DN + """ + dn = [] + component = '' + escape_sequence = False + for c in iterator: + if c == '\\': # escape sequence + escape_sequence = True + elif escape_sequence and c != ' ': + escape_sequence = False + elif c == '+' and separate_rdn: + dn.append(_add_ava(component, decompose, remove_space, space_around_equal)) + component = '' + continue + elif c == ',': + if '=' in component: + dn.append(_add_ava(component, decompose, remove_space, space_around_equal)) + component = '' + continue + + component += c + + dn.append(_add_ava(component, decompose, remove_space, space_around_equal)) + return dn + + +def _find_first_unescaped(dn, char, pos): + while True: + pos = dn.find(char, pos) + if pos == -1: + break # no char found + if pos > 0 and dn[pos - 1] != '\\': # unescaped char + break + elif pos > 1 and dn[pos - 1] == '\\': # may be unescaped + escaped = True + for c in dn[pos - 2:0:-1]: + if c == '\\': + escaped = not escaped + else: + break + if not escaped: + break + pos += 1 + + return pos + + +def _find_last_unescaped(dn, char, start, stop=0): + while True: + stop = dn.rfind(char, start, stop) + if stop == -1: + break + if stop >= 0 and dn[stop - 1] != '\\': + break + elif stop > 1 and dn[stop - 1] == '\\': # may be unescaped + escaped = True + for c in dn[stop - 2:0:-1]: + if c == '\\': + escaped = not escaped + else: + break + if not escaped: + break + if stop < start: + stop = -1 + break + + return stop + + +def _get_next_ava(dn): + comma = _find_first_unescaped(dn, ',', 0) + plus = _find_first_unescaped(dn, '+', 0) + + if plus > 0 and (plus < comma or comma == -1): + equal = _find_first_unescaped(dn, '=', plus + 1) + if equal > plus + 1: + plus = _find_last_unescaped(dn, '+', plus, equal) + return dn[:plus], '+' + + if comma > 0: + equal = _find_first_unescaped(dn, '=', comma + 1) + if equal > comma + 1: + comma = _find_last_unescaped(dn, ',', comma, equal) + return dn[:comma], ',' + + return dn, '' + + +def _split_ava(ava, escape=False, strip=True): + equal = ava.find('=') + while equal > 0: # not first character + if ava[equal - 1] != '\\': # not an escaped equal so it must be an ava separator + # attribute_type1 = ava[0:equal].strip() if strip else ava[0:equal] + if strip: + attribute_type = ava[0:equal].strip() + attribute_value = _escape_attribute_value(ava[equal + 1:].strip()) if escape else ava[equal + 1:].strip() + else: + attribute_type = ava[0:equal] + attribute_value = _escape_attribute_value(ava[equal + 1:]) if escape else ava[equal + 1:] + + return attribute_type, attribute_value + equal = ava.find('=', equal + 1) + + return '', (ava.strip if strip else ava) # if no equal found return only value + + +def _validate_attribute_type(attribute_type): + if not attribute_type: + raise LDAPInvalidDnError('attribute type not present') + + if attribute_type == ' pairs') + if attribute_value[0] == ' ': # unescaped space cannot be used as leading or last character + raise LDAPInvalidDnError('SPACE must be escaped as leading character of attribute value') + if attribute_value.endswith(' ') and not attribute_value.endswith('\\ '): + raise LDAPInvalidDnError('SPACE must be escaped as trailing character of attribute value') + + state = STATE_ANY + for c in attribute_value: + if state == STATE_ANY: + if c == '\\': + state = STATE_ESCAPE + elif c in '"#+,;<=>\00': + raise LDAPInvalidDnError('special character ' + c + ' must be escaped') + elif state == STATE_ESCAPE: + if c in hexdigits: + state = STATE_ESCAPE_HEX + elif c in ' "#+,;<=>\\\00': + state = STATE_ANY + else: + raise LDAPInvalidDnError('invalid escaped character ' + c) + elif state == STATE_ESCAPE_HEX: + if c in hexdigits: + state = STATE_ANY + else: + raise LDAPInvalidDnError('invalid escaped character ' + c) + + # final state + if state != STATE_ANY: + raise LDAPInvalidDnError('invalid final character') + + return True + + +def _escape_attribute_value(attribute_value): + if not attribute_value: + return '' + + if attribute_value[0] == '#': # with leading SHARP only pairs of hex characters are valid + valid_hex = True + if len(attribute_value) % 2 == 0: # string must be # + HEX HEX (an odd number of chars) + valid_hex = False + + if valid_hex: + for c in attribute_value: + if c not in hexdigits: # allowed only hex digits as per RFC 4514 + valid_hex = False + break + + if valid_hex: + return attribute_value + + state = STATE_ANY + escaped = '' + tmp_buffer = '' + for c in attribute_value: + if state == STATE_ANY: + if c == '\\': + state = STATE_ESCAPE + elif c in '"#+,;<=>\00': + escaped += '\\' + c + else: + escaped += c + elif state == STATE_ESCAPE: + if c in hexdigits: + tmp_buffer = c + state = STATE_ESCAPE_HEX + elif c in ' "#+,;<=>\\\00': + escaped += '\\' + c + state = STATE_ANY + else: + escaped += '\\\\' + c + elif state == STATE_ESCAPE_HEX: + if c in hexdigits: + escaped += '\\' + tmp_buffer + c + else: + escaped += '\\\\' + tmp_buffer + c + tmp_buffer = '' + state = STATE_ANY + + # final state + if state == STATE_ESCAPE: + escaped += '\\\\' + elif state == STATE_ESCAPE_HEX: + escaped += '\\\\' + tmp_buffer + + if escaped[0] == ' ': # leading SPACE must be escaped + escaped = '\\' + escaped + + if escaped[-1] == ' ' and len(escaped) > 1 and escaped[-2] != '\\': # trailing SPACE must be escaped + escaped = escaped[:-1] + '\\ ' + + return escaped + + +def parse_dn(dn, escape=False, strip=False): + """ + Parses a DN into syntactic components + :param dn: + :param escape: + :param strip: + :return: + a list of tripels representing `attributeTypeAndValue` elements + containing `attributeType`, `attributeValue` and the following separator (`COMMA` or `PLUS`) if given, else an empty `str`. + in their original representation, still containing escapes or encoded as hex. + """ + rdns = [] + avas = [] + while dn: + ava, separator = _get_next_ava(dn) # if returned ava doesn't containg any unescaped equal it'a appended to last ava in avas + + dn = dn[len(ava) + 1:] + if _find_first_unescaped(ava, '=', 0) > 0 or len(avas) == 0: + avas.append((ava, separator)) + else: + avas[len(avas) - 1] = (avas[len(avas) - 1][0] + avas[len(avas) - 1][1] + ava, separator) + + for ava, separator in avas: + attribute_type, attribute_value = _split_ava(ava, escape, strip) + + if not _validate_attribute_type(attribute_type): + raise LDAPInvalidDnError('unable to validate attribute type in ' + ava) + + if not _validate_attribute_value(attribute_value): + raise LDAPInvalidDnError('unable to validate attribute value in ' + ava) + + rdns.append((attribute_type, attribute_value, separator)) + dn = dn[len(ava) + 1:] + + if not rdns: + raise LDAPInvalidDnError('empty dn') + + return rdns + + +def safe_dn(dn, decompose=False, reverse=False): + """ + normalize and escape a dn, if dn is a sequence it is joined. + the reverse parameter changes the join direction of the sequence + """ + if isinstance(dn, SEQUENCE_TYPES): + components = [rdn for rdn in dn] + if reverse: + dn = ','.join(reversed(components)) + else: + dn = ','.join(components) + if decompose: + escaped_dn = [] + else: + escaped_dn = '' + + if dn.startswith(''): # Active Directory allows looking up objects by putting its GUID in a specially-formatted DN (e.g. '') + escaped_dn = dn + elif dn.startswith(''): # Active Directory allows Binding to Well-Known Objects Using WKGUID in a specially-formatted DN (e.g. ) + escaped_dn = dn + elif '@' not in dn: # active directory UPN (User Principal Name) consist of an account, the at sign (@) and a domain, or the domain level logn name domain\username + for component in parse_dn(dn, escape=True): + if decompose: + escaped_dn.append((component[0], component[1], component[2])) + else: + escaped_dn += component[0] + '=' + component[1] + component[2] + elif '@' in dn and '=' not in dn and len(dn.split('@')) != 2: + raise LDAPInvalidDnError('Active Directory User Principal Name must consist of name@domain') + elif '\\' in dn and '=' not in dn and len(dn.split('\\')) != 2: + raise LDAPInvalidDnError('Active Directory Domain Level Logon Name must consist of name\\domain') + else: + escaped_dn = dn + + return escaped_dn + + +def safe_rdn(dn, decompose=False): + """Returns a list of rdn for the dn, usually there is only one rdn, but it can be more than one when the + sign is used""" + escaped_rdn = [] + one_more = True + for component in parse_dn(dn, escape=True): + if component[2] == '+' or one_more: + if decompose: + escaped_rdn.append((component[0], component[1])) + else: + escaped_rdn.append(component[0] + '=' + component[1]) + if component[2] == '+': + one_more = True + else: + one_more = False + break + + if one_more: + raise LDAPInvalidDnError('bad dn ' + str(dn)) + + return escaped_rdn + + +def escape_rdn(rdn): + """ + Escape rdn characters to prevent injection according to RFC 4514. + """ + + # '/' must be handled first or the escape slashes will be escaped! + for char in ['\\', ',', '+', '"', '<', '>', ';', '=', '\x00']: + rdn = rdn.replace(char, '\\' + char) + + if rdn[0] == '#' or rdn[0] == ' ': + rdn = ''.join(('\\', rdn)) + + if rdn[-1] == ' ': + rdn = ''.join((rdn[:-1], '\\ ')) + + return rdn diff --git a/server/www/packages/packages-linux/x64/ldap3/utils/ntlm.py b/server/www/packages/packages-linux/x64/ldap3/utils/ntlm.py index 54efaae..af32a87 100644 --- a/server/www/packages/packages-linux/x64/ldap3/utils/ntlm.py +++ b/server/www/packages/packages-linux/x64/ldap3/utils/ntlm.py @@ -483,7 +483,7 @@ class NtlmClient(object): temp += self.server_target_info_raw temp += pack(' +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -__version__ = '1.0.7' +__version__ = "1.1.0" diff --git a/server/www/packages/packages-linux/x64/mako/_ast_util.py b/server/www/packages/packages-linux/x64/mako/_ast_util.py index c410287..74c0851 100644 --- a/server/www/packages/packages-linux/x64/mako/_ast_util.py +++ b/server/www/packages/packages-linux/x64/mako/_ast_util.py @@ -1,5 +1,5 @@ # mako/_ast_util.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -8,69 +8,77 @@ ast ~~~ - The `ast` module helps Python applications to process trees of the Python - abstract syntax grammar. The abstract syntax itself might change with - each Python release; this module helps to find out programmatically what - the current grammar looks like and allows modifications of it. - - An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as - a flag to the `compile()` builtin function or by using the `parse()` - function from this module. The result will be a tree of objects whose - classes all inherit from `ast.AST`. - - A modified abstract syntax tree can be compiled into a Python code object - using the built-in `compile()` function. - - Additionally various helper functions are provided that make working with - the trees simpler. The main intention of the helper functions and this - module in general is to provide an easy to use interface for libraries - that work tightly with the python syntax (template engines for example). - + This is a stripped down version of Armin Ronacher's ast module. :copyright: Copyright 2008 by Armin Ronacher. :license: Python License. """ -from _ast import * # noqa + + +from _ast import Add +from _ast import And +from _ast import AST +from _ast import BitAnd +from _ast import BitOr +from _ast import BitXor +from _ast import Div +from _ast import Eq +from _ast import FloorDiv +from _ast import Gt +from _ast import GtE +from _ast import If +from _ast import In +from _ast import Invert +from _ast import Is +from _ast import IsNot +from _ast import LShift +from _ast import Lt +from _ast import LtE +from _ast import Mod +from _ast import Mult +from _ast import Name +from _ast import Not +from _ast import NotEq +from _ast import NotIn +from _ast import Or +from _ast import PyCF_ONLY_AST +from _ast import RShift +from _ast import Sub +from _ast import UAdd +from _ast import USub + from mako.compat import arg_stringname -BOOLOP_SYMBOLS = { - And: 'and', - Or: 'or' -} +BOOLOP_SYMBOLS = {And: "and", Or: "or"} BINOP_SYMBOLS = { - Add: '+', - Sub: '-', - Mult: '*', - Div: '/', - FloorDiv: '//', - Mod: '%', - LShift: '<<', - RShift: '>>', - BitOr: '|', - BitAnd: '&', - BitXor: '^' + Add: "+", + Sub: "-", + Mult: "*", + Div: "/", + FloorDiv: "//", + Mod: "%", + LShift: "<<", + RShift: ">>", + BitOr: "|", + BitAnd: "&", + BitXor: "^", } CMPOP_SYMBOLS = { - Eq: '==', - Gt: '>', - GtE: '>=', - In: 'in', - Is: 'is', - IsNot: 'is not', - Lt: '<', - LtE: '<=', - NotEq: '!=', - NotIn: 'not in' + Eq: "==", + Gt: ">", + GtE: ">=", + In: "in", + Is: "is", + IsNot: "is not", + Lt: "<", + LtE: "<=", + NotEq: "!=", + NotIn: "not in", } -UNARYOP_SYMBOLS = { - Invert: '~', - Not: 'not', - UAdd: '+', - USub: '-' -} +UNARYOP_SYMBOLS = {Invert: "~", Not: "not", UAdd: "+", USub: "-"} ALL_SYMBOLS = {} ALL_SYMBOLS.update(BOOLOP_SYMBOLS) @@ -79,105 +87,15 @@ ALL_SYMBOLS.update(CMPOP_SYMBOLS) ALL_SYMBOLS.update(UNARYOP_SYMBOLS) -def parse(expr, filename='', mode='exec'): +def parse(expr, filename="", mode="exec"): """Parse an expression into an AST node.""" return compile(expr, filename, mode, PyCF_ONLY_AST) -def to_source(node, indent_with=' ' * 4): - """ - This function can convert a node tree back into python sourcecode. This - is useful for debugging purposes, especially if you're dealing with custom - asts not generated by python itself. - - It could be that the sourcecode is evaluable when the AST itself is not - compilable / evaluable. The reason for this is that the AST contains some - more data than regular sourcecode does, which is dropped during - conversion. - - Each level of indentation is replaced with `indent_with`. Per default this - parameter is equal to four spaces as suggested by PEP 8, but it might be - adjusted to match the application's styleguide. - """ - generator = SourceGenerator(indent_with) - generator.visit(node) - return ''.join(generator.result) - - -def dump(node): - """ - A very verbose representation of the node passed. This is useful for - debugging purposes. - """ - def _format(node): - if isinstance(node, AST): - return '%s(%s)' % (node.__class__.__name__, - ', '.join('%s=%s' % (a, _format(b)) - for a, b in iter_fields(node))) - elif isinstance(node, list): - return '[%s]' % ', '.join(_format(x) for x in node) - return repr(node) - if not isinstance(node, AST): - raise TypeError('expected AST, got %r' % node.__class__.__name__) - return _format(node) - - -def copy_location(new_node, old_node): - """ - Copy the source location hint (`lineno` and `col_offset`) from the - old to the new node if possible and return the new one. - """ - for attr in 'lineno', 'col_offset': - if attr in old_node._attributes and attr in new_node._attributes \ - and hasattr(old_node, attr): - setattr(new_node, attr, getattr(old_node, attr)) - return new_node - - -def fix_missing_locations(node): - """ - Some nodes require a line number and the column offset. Without that - information the compiler will abort the compilation. Because it can be - a dull task to add appropriate line numbers and column offsets when - adding new nodes this function can help. It copies the line number and - column offset of the parent node to the child nodes without this - information. - - Unlike `copy_location` this works recursive and won't touch nodes that - already have a location information. - """ - def _fix(node, lineno, col_offset): - if 'lineno' in node._attributes: - if not hasattr(node, 'lineno'): - node.lineno = lineno - else: - lineno = node.lineno - if 'col_offset' in node._attributes: - if not hasattr(node, 'col_offset'): - node.col_offset = col_offset - else: - col_offset = node.col_offset - for child in iter_child_nodes(node): - _fix(child, lineno, col_offset) - _fix(node, 1, 0) - return node - - -def increment_lineno(node, n=1): - """ - Increment the line numbers of all nodes by `n` if they have line number - attributes. This is useful to "move code" to a different location in a - file. - """ - for node in zip((node,), walk(node)): - if 'lineno' in node._attributes: - node.lineno = getattr(node, 'lineno', 0) + n - - def iter_fields(node): """Iterate over all fields of a node, only yielding existing fields.""" # CPython 2.5 compat - if not hasattr(node, '_fields') or not node._fields: + if not hasattr(node, "_fields") or not node._fields: return for field in node._fields: try: @@ -186,65 +104,6 @@ def iter_fields(node): pass -def get_fields(node): - """Like `iter_fields` but returns a dict.""" - return dict(iter_fields(node)) - - -def iter_child_nodes(node): - """Iterate over all child nodes or a node.""" - for name, field in iter_fields(node): - if isinstance(field, AST): - yield field - elif isinstance(field, list): - for item in field: - if isinstance(item, AST): - yield item - - -def get_child_nodes(node): - """Like `iter_child_nodes` but returns a list.""" - return list(iter_child_nodes(node)) - - -def get_compile_mode(node): - """ - Get the mode for `compile` of a given node. If the node is not a `mod` - node (`Expression`, `Module` etc.) a `TypeError` is thrown. - """ - if not isinstance(node, mod): - raise TypeError('expected mod node, got %r' % node.__class__.__name__) - return { - Expression: 'eval', - Interactive: 'single' - }.get(node.__class__, 'expr') - - -def get_docstring(node): - """ - Return the docstring for the given node or `None` if no docstring can be - found. If the node provided does not accept docstrings a `TypeError` - will be raised. - """ - if not isinstance(node, (FunctionDef, ClassDef, Module)): - raise TypeError("%r can't have docstrings" % node.__class__.__name__) - if node.body and isinstance(node.body[0], Str): - return node.body[0].s - - -def walk(node): - """ - Iterate over all nodes. This is useful if you only want to modify nodes in - place and don't care about the context or the order the nodes are returned. - """ - from collections import deque - todo = deque([node]) - while todo: - node = todo.popleft() - todo.extend(iter_child_nodes(node)) - yield node - - class NodeVisitor(object): """ @@ -269,7 +128,7 @@ class NodeVisitor(object): exists for this node. In that case the generic visit function is used instead. """ - method = 'visit_' + node.__class__.__name__ + method = "visit_" + node.__class__.__name__ return getattr(self, method, None) def visit(self, node): @@ -367,7 +226,7 @@ class SourceGenerator(NodeVisitor): def write(self, x): if self.new_lines: if self.result: - self.result.append('\n' * self.new_lines) + self.result.append("\n" * self.new_lines) self.result.append(self.indent_with * self.indentation) self.new_lines = 0 self.result.append(x) @@ -386,7 +245,7 @@ class SourceGenerator(NodeVisitor): self.body(node.body) if node.orelse: self.newline() - self.write('else:') + self.write("else:") self.body(node.orelse) def signature(self, node): @@ -394,7 +253,7 @@ class SourceGenerator(NodeVisitor): def write_comma(): if want_comma: - self.write(', ') + self.write(", ") else: want_comma.append(True) @@ -403,19 +262,19 @@ class SourceGenerator(NodeVisitor): write_comma() self.visit(arg) if default is not None: - self.write('=') + self.write("=") self.visit(default) if node.vararg is not None: write_comma() - self.write('*' + arg_stringname(node.vararg)) + self.write("*" + arg_stringname(node.vararg)) if node.kwarg is not None: write_comma() - self.write('**' + arg_stringname(node.kwarg)) + self.write("**" + arg_stringname(node.kwarg)) def decorators(self, node): for decorator in node.decorator_list: self.newline() - self.write('@') + self.write("@") self.visit(decorator) # Statements @@ -424,29 +283,29 @@ class SourceGenerator(NodeVisitor): self.newline() for idx, target in enumerate(node.targets): if idx: - self.write(', ') + self.write(", ") self.visit(target) - self.write(' = ') + self.write(" = ") self.visit(node.value) def visit_AugAssign(self, node): self.newline() self.visit(node.target) - self.write(BINOP_SYMBOLS[type(node.op)] + '=') + self.write(BINOP_SYMBOLS[type(node.op)] + "=") self.visit(node.value) def visit_ImportFrom(self, node): self.newline() - self.write('from %s%s import ' % ('.' * node.level, node.module)) + self.write("from %s%s import " % ("." * node.level, node.module)) for idx, item in enumerate(node.names): if idx: - self.write(', ') + self.write(", ") self.write(item) def visit_Import(self, node): self.newline() for item in node.names: - self.write('import ') + self.write("import ") self.visit(item) def visit_Expr(self, node): @@ -457,9 +316,9 @@ class SourceGenerator(NodeVisitor): self.newline(n=2) self.decorators(node) self.newline() - self.write('def %s(' % node.name) + self.write("def %s(" % node.name) self.signature(node.args) - self.write('):') + self.write("):") self.body(node.body) def visit_ClassDef(self, node): @@ -467,200 +326,200 @@ class SourceGenerator(NodeVisitor): def paren_or_comma(): if have_args: - self.write(', ') + self.write(", ") else: have_args.append(True) - self.write('(') + self.write("(") self.newline(n=3) self.decorators(node) self.newline() - self.write('class %s' % node.name) + self.write("class %s" % node.name) for base in node.bases: paren_or_comma() self.visit(base) # XXX: the if here is used to keep this module compatible # with python 2.6. - if hasattr(node, 'keywords'): + if hasattr(node, "keywords"): for keyword in node.keywords: paren_or_comma() - self.write(keyword.arg + '=') + self.write(keyword.arg + "=") self.visit(keyword.value) if getattr(node, "starargs", None): paren_or_comma() - self.write('*') + self.write("*") self.visit(node.starargs) if getattr(node, "kwargs", None): paren_or_comma() - self.write('**') + self.write("**") self.visit(node.kwargs) - self.write(have_args and '):' or ':') + self.write(have_args and "):" or ":") self.body(node.body) def visit_If(self, node): self.newline() - self.write('if ') + self.write("if ") self.visit(node.test) - self.write(':') + self.write(":") self.body(node.body) while True: else_ = node.orelse if len(else_) == 1 and isinstance(else_[0], If): node = else_[0] self.newline() - self.write('elif ') + self.write("elif ") self.visit(node.test) - self.write(':') + self.write(":") self.body(node.body) else: self.newline() - self.write('else:') + self.write("else:") self.body(else_) break def visit_For(self, node): self.newline() - self.write('for ') + self.write("for ") self.visit(node.target) - self.write(' in ') + self.write(" in ") self.visit(node.iter) - self.write(':') + self.write(":") self.body_or_else(node) def visit_While(self, node): self.newline() - self.write('while ') + self.write("while ") self.visit(node.test) - self.write(':') + self.write(":") self.body_or_else(node) def visit_With(self, node): self.newline() - self.write('with ') + self.write("with ") self.visit(node.context_expr) if node.optional_vars is not None: - self.write(' as ') + self.write(" as ") self.visit(node.optional_vars) - self.write(':') + self.write(":") self.body(node.body) def visit_Pass(self, node): self.newline() - self.write('pass') + self.write("pass") def visit_Print(self, node): # XXX: python 2.6 only self.newline() - self.write('print ') + self.write("print ") want_comma = False if node.dest is not None: - self.write(' >> ') + self.write(" >> ") self.visit(node.dest) want_comma = True for value in node.values: if want_comma: - self.write(', ') + self.write(", ") self.visit(value) want_comma = True if not node.nl: - self.write(',') + self.write(",") def visit_Delete(self, node): self.newline() - self.write('del ') + self.write("del ") for idx, target in enumerate(node): if idx: - self.write(', ') + self.write(", ") self.visit(target) def visit_TryExcept(self, node): self.newline() - self.write('try:') + self.write("try:") self.body(node.body) for handler in node.handlers: self.visit(handler) def visit_TryFinally(self, node): self.newline() - self.write('try:') + self.write("try:") self.body(node.body) self.newline() - self.write('finally:') + self.write("finally:") self.body(node.finalbody) def visit_Global(self, node): self.newline() - self.write('global ' + ', '.join(node.names)) + self.write("global " + ", ".join(node.names)) def visit_Nonlocal(self, node): self.newline() - self.write('nonlocal ' + ', '.join(node.names)) + self.write("nonlocal " + ", ".join(node.names)) def visit_Return(self, node): self.newline() - self.write('return ') + self.write("return ") self.visit(node.value) def visit_Break(self, node): self.newline() - self.write('break') + self.write("break") def visit_Continue(self, node): self.newline() - self.write('continue') + self.write("continue") def visit_Raise(self, node): # XXX: Python 2.6 / 3.0 compatibility self.newline() - self.write('raise') - if hasattr(node, 'exc') and node.exc is not None: - self.write(' ') + self.write("raise") + if hasattr(node, "exc") and node.exc is not None: + self.write(" ") self.visit(node.exc) if node.cause is not None: - self.write(' from ') + self.write(" from ") self.visit(node.cause) - elif hasattr(node, 'type') and node.type is not None: + elif hasattr(node, "type") and node.type is not None: self.visit(node.type) if node.inst is not None: - self.write(', ') + self.write(", ") self.visit(node.inst) if node.tback is not None: - self.write(', ') + self.write(", ") self.visit(node.tback) # Expressions def visit_Attribute(self, node): self.visit(node.value) - self.write('.' + node.attr) + self.write("." + node.attr) def visit_Call(self, node): want_comma = [] def write_comma(): if want_comma: - self.write(', ') + self.write(", ") else: want_comma.append(True) self.visit(node.func) - self.write('(') + self.write("(") for arg in node.args: write_comma() self.visit(arg) for keyword in node.keywords: write_comma() - self.write(keyword.arg + '=') + self.write(keyword.arg + "=") self.visit(keyword.value) if getattr(node, "starargs", None): write_comma() - self.write('*') + self.write("*") self.visit(node.starargs) if getattr(node, "kwargs", None): write_comma() - self.write('**') + self.write("**") self.visit(node.kwargs) - self.write(')') + self.write(")") def visit_Name(self, node): self.write(node.id) @@ -680,106 +539,111 @@ class SourceGenerator(NodeVisitor): def visit_Num(self, node): self.write(repr(node.n)) + # newly needed in Python 3.8 + def visit_Constant(self, node): + self.write(repr(node.value)) + def visit_Tuple(self, node): - self.write('(') + self.write("(") idx = -1 for idx, item in enumerate(node.elts): if idx: - self.write(', ') + self.write(", ") self.visit(item) - self.write(idx and ')' or ',)') + self.write(idx and ")" or ",)") def sequence_visit(left, right): def visit(self, node): self.write(left) for idx, item in enumerate(node.elts): if idx: - self.write(', ') + self.write(", ") self.visit(item) self.write(right) + return visit - visit_List = sequence_visit('[', ']') - visit_Set = sequence_visit('{', '}') + visit_List = sequence_visit("[", "]") + visit_Set = sequence_visit("{", "}") del sequence_visit def visit_Dict(self, node): - self.write('{') + self.write("{") for idx, (key, value) in enumerate(zip(node.keys, node.values)): if idx: - self.write(', ') + self.write(", ") self.visit(key) - self.write(': ') + self.write(": ") self.visit(value) - self.write('}') + self.write("}") def visit_BinOp(self, node): - self.write('(') + self.write("(") self.visit(node.left) - self.write(' %s ' % BINOP_SYMBOLS[type(node.op)]) + self.write(" %s " % BINOP_SYMBOLS[type(node.op)]) self.visit(node.right) - self.write(')') + self.write(")") def visit_BoolOp(self, node): - self.write('(') + self.write("(") for idx, value in enumerate(node.values): if idx: - self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)]) + self.write(" %s " % BOOLOP_SYMBOLS[type(node.op)]) self.visit(value) - self.write(')') + self.write(")") def visit_Compare(self, node): - self.write('(') + self.write("(") self.visit(node.left) for op, right in zip(node.ops, node.comparators): - self.write(' %s ' % CMPOP_SYMBOLS[type(op)]) + self.write(" %s " % CMPOP_SYMBOLS[type(op)]) self.visit(right) - self.write(')') + self.write(")") def visit_UnaryOp(self, node): - self.write('(') + self.write("(") op = UNARYOP_SYMBOLS[type(node.op)] self.write(op) - if op == 'not': - self.write(' ') + if op == "not": + self.write(" ") self.visit(node.operand) - self.write(')') + self.write(")") def visit_Subscript(self, node): self.visit(node.value) - self.write('[') + self.write("[") self.visit(node.slice) - self.write(']') + self.write("]") def visit_Slice(self, node): if node.lower is not None: self.visit(node.lower) - self.write(':') + self.write(":") if node.upper is not None: self.visit(node.upper) if node.step is not None: - self.write(':') - if not (isinstance(node.step, Name) and node.step.id == 'None'): + self.write(":") + if not (isinstance(node.step, Name) and node.step.id == "None"): self.visit(node.step) def visit_ExtSlice(self, node): for idx, item in node.dims: if idx: - self.write(', ') + self.write(", ") self.visit(item) def visit_Yield(self, node): - self.write('yield ') + self.write("yield ") self.visit(node.value) def visit_Lambda(self, node): - self.write('lambda ') + self.write("lambda ") self.signature(node.args) - self.write(': ') + self.write(": ") self.visit(node.body) def visit_Ellipsis(self, node): - self.write('Ellipsis') + self.write("Ellipsis") def generator_visit(left, right): def visit(self, node): @@ -788,64 +652,65 @@ class SourceGenerator(NodeVisitor): for comprehension in node.generators: self.visit(comprehension) self.write(right) + return visit - visit_ListComp = generator_visit('[', ']') - visit_GeneratorExp = generator_visit('(', ')') - visit_SetComp = generator_visit('{', '}') + visit_ListComp = generator_visit("[", "]") + visit_GeneratorExp = generator_visit("(", ")") + visit_SetComp = generator_visit("{", "}") del generator_visit def visit_DictComp(self, node): - self.write('{') + self.write("{") self.visit(node.key) - self.write(': ') + self.write(": ") self.visit(node.value) for comprehension in node.generators: self.visit(comprehension) - self.write('}') + self.write("}") def visit_IfExp(self, node): self.visit(node.body) - self.write(' if ') + self.write(" if ") self.visit(node.test) - self.write(' else ') + self.write(" else ") self.visit(node.orelse) def visit_Starred(self, node): - self.write('*') + self.write("*") self.visit(node.value) def visit_Repr(self, node): # XXX: python 2.6 only - self.write('`') + self.write("`") self.visit(node.value) - self.write('`') + self.write("`") # Helper Nodes def visit_alias(self, node): self.write(node.name) if node.asname is not None: - self.write(' as ' + node.asname) + self.write(" as " + node.asname) def visit_comprehension(self, node): - self.write(' for ') + self.write(" for ") self.visit(node.target) - self.write(' in ') + self.write(" in ") self.visit(node.iter) if node.ifs: for if_ in node.ifs: - self.write(' if ') + self.write(" if ") self.visit(if_) def visit_excepthandler(self, node): self.newline() - self.write('except') + self.write("except") if node.type is not None: - self.write(' ') + self.write(" ") self.visit(node.type) if node.name is not None: - self.write(' as ') + self.write(" as ") self.visit(node.name) - self.write(':') + self.write(":") self.body(node.body) diff --git a/server/www/packages/packages-linux/x64/mako/ast.py b/server/www/packages/packages-linux/x64/mako/ast.py index 8d2d150..8f2cf2e 100644 --- a/server/www/packages/packages-linux/x64/mako/ast.py +++ b/server/www/packages/packages-linux/x64/mako/ast.py @@ -1,5 +1,5 @@ # mako/ast.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -7,9 +7,12 @@ """utilities for analyzing expressions and blocks of Python code, as well as generating Python from AST nodes""" -from mako import exceptions, pyparser, compat import re +from mako import compat +from mako import exceptions +from mako import pyparser + class PythonCode(object): @@ -72,36 +75,39 @@ class PythonFragment(PythonCode): """extends PythonCode to provide identifier lookups in partial control statements - e.g. + e.g.:: + for x in 5: elif y==9: except (MyException, e): - etc. + """ def __init__(self, code, **exception_kwargs): - m = re.match(r'^(\w+)(?:\s+(.*?))?:\s*(#|$)', code.strip(), re.S) + m = re.match(r"^(\w+)(?:\s+(.*?))?:\s*(#|$)", code.strip(), re.S) if not m: raise exceptions.CompileException( - "Fragment '%s' is not a partial control statement" % - code, **exception_kwargs) + "Fragment '%s' is not a partial control statement" % code, + **exception_kwargs + ) if m.group(3): - code = code[:m.start(3)] + code = code[: m.start(3)] (keyword, expr) = m.group(1, 2) - if keyword in ['for', 'if', 'while']: + if keyword in ["for", "if", "while"]: code = code + "pass" - elif keyword == 'try': + elif keyword == "try": code = code + "pass\nexcept:pass" - elif keyword == 'elif' or keyword == 'else': + elif keyword == "elif" or keyword == "else": code = "if False:pass\n" + code + "pass" - elif keyword == 'except': + elif keyword == "except": code = "try:pass\n" + code + "pass" - elif keyword == 'with': + elif keyword == "with": code = code + "pass" else: raise exceptions.CompileException( - "Unsupported control keyword: '%s'" % - keyword, **exception_kwargs) + "Unsupported control keyword: '%s'" % keyword, + **exception_kwargs + ) super(PythonFragment, self).__init__(code, **exception_kwargs) @@ -115,14 +121,17 @@ class FunctionDecl(object): f = pyparser.ParseFunc(self, **exception_kwargs) f.visit(expr) - if not hasattr(self, 'funcname'): + if not hasattr(self, "funcname"): raise exceptions.CompileException( "Code '%s' is not a function declaration" % code, - **exception_kwargs) + **exception_kwargs + ) if not allow_kwargs and self.kwargs: raise exceptions.CompileException( - "'**%s' keyword argument not allowed here" % - self.kwargnames[-1], **exception_kwargs) + "'**%s' keyword argument not allowed here" + % self.kwargnames[-1], + **exception_kwargs + ) def get_argument_expressions(self, as_call=False): """Return the argument declarations of this FunctionDecl as a printable @@ -157,8 +166,10 @@ class FunctionDecl(object): # `def foo(*, a=1, b, c=3)` namedecls.append(name) else: - namedecls.append("%s=%s" % ( - name, pyparser.ExpressionGenerator(default).value())) + namedecls.append( + "%s=%s" + % (name, pyparser.ExpressionGenerator(default).value()) + ) else: namedecls.append(name) @@ -171,8 +182,10 @@ class FunctionDecl(object): namedecls.append(name) else: default = defaults.pop(0) - namedecls.append("%s=%s" % ( - name, pyparser.ExpressionGenerator(default).value())) + namedecls.append( + "%s=%s" + % (name, pyparser.ExpressionGenerator(default).value()) + ) namedecls.reverse() return namedecls @@ -187,5 +200,6 @@ class FunctionArgs(FunctionDecl): """the argument portion of a function declaration""" def __init__(self, code, **kwargs): - super(FunctionArgs, self).__init__("def ANON(%s):pass" % code, - **kwargs) + super(FunctionArgs, self).__init__( + "def ANON(%s):pass" % code, **kwargs + ) diff --git a/server/www/packages/packages-linux/x64/mako/cache.py b/server/www/packages/packages-linux/x64/mako/cache.py index 1af17dd..b68b74f 100644 --- a/server/www/packages/packages-linux/x64/mako/cache.py +++ b/server/www/packages/packages-linux/x64/mako/cache.py @@ -1,10 +1,11 @@ # mako/cache.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -from mako import compat, util +from mako import compat +from mako import util _cache_plugins = util.PluginLoader("mako.cache") @@ -90,9 +91,8 @@ class Cache(object): return creation_function() return self.impl.get_or_create( - key, - creation_function, - **self._get_cache_kw(kw, context)) + key, creation_function, **self._get_cache_kw(kw, context) + ) def set(self, key, value, **kw): r"""Place a value in the cache. @@ -141,7 +141,7 @@ class Cache(object): template. """ - self.invalidate('render_body', __M_defname='render_body') + self.invalidate("render_body", __M_defname="render_body") def invalidate_def(self, name): """Invalidate the cached content of a particular ``<%def>`` within this @@ -149,7 +149,7 @@ class Cache(object): """ - self.invalidate('render_%s' % name, __M_defname='render_%s' % name) + self.invalidate("render_%s" % name, __M_defname="render_%s" % name) def invalidate_closure(self, name): """Invalidate a nested ``<%def>`` within this template. @@ -165,7 +165,7 @@ class Cache(object): self.invalidate(name, __M_defname=name) def _get_cache_kw(self, kw, context): - defname = kw.pop('__M_defname', None) + defname = kw.pop("__M_defname", None) if not defname: tmpl_kw = self.template.cache_args.copy() tmpl_kw.update(kw) @@ -177,7 +177,7 @@ class Cache(object): self._def_regions[defname] = tmpl_kw if context and self.impl.pass_context: tmpl_kw = tmpl_kw.copy() - tmpl_kw.setdefault('context', context) + tmpl_kw.setdefault("context", context) return tmpl_kw diff --git a/server/www/packages/packages-linux/x64/mako/cmd.py b/server/www/packages/packages-linux/x64/mako/cmd.py index 8db1346..5d52dfb 100644 --- a/server/www/packages/packages-linux/x64/mako/cmd.py +++ b/server/www/packages/packages-linux/x64/mako/cmd.py @@ -1,14 +1,16 @@ # mako/cmd.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from argparse import ArgumentParser -from os.path import isfile, dirname +from os.path import dirname +from os.path import isfile import sys -from mako.template import Template -from mako.lookup import TemplateLookup + from mako import exceptions +from mako.lookup import TemplateLookup +from mako.template import Template def varsplit(var): @@ -24,25 +26,41 @@ def _exit(): def cmdline(argv=None): - parser = ArgumentParser("usage: %prog [FILENAME]") + parser = ArgumentParser() parser.add_argument( - "--var", default=[], action="append", - help="variable (can be used multiple times, use name=value)") + "--var", + default=[], + action="append", + help="variable (can be used multiple times, use name=value)", + ) parser.add_argument( - "--template-dir", default=[], action="append", + "--template-dir", + default=[], + action="append", help="Directory to use for template lookup (multiple " "directories may be provided). If not given then if the " "template is read from stdin, the value defaults to be " "the current directory, otherwise it defaults to be the " - "parent directory of the file provided.") - parser.add_argument('input', nargs='?', default='-') + "parent directory of the file provided.", + ) + parser.add_argument( + "--output-encoding", default=None, help="force output encoding" + ) + parser.add_argument("input", nargs="?", default="-") options = parser.parse_args(argv) - if options.input == '-': + + output_encoding = options.output_encoding + + if options.input == "-": lookup_dirs = options.template_dir or ["."] lookup = TemplateLookup(lookup_dirs) try: - template = Template(sys.stdin.read(), lookup=lookup) + template = Template( + sys.stdin.read(), + lookup=lookup, + output_encoding=output_encoding, + ) except: _exit() else: @@ -52,7 +70,11 @@ def cmdline(argv=None): lookup_dirs = options.template_dir or [dirname(filename)] lookup = TemplateLookup(lookup_dirs) try: - template = Template(filename=filename, lookup=lookup) + template = Template( + filename=filename, + lookup=lookup, + output_encoding=output_encoding, + ) except: _exit() diff --git a/server/www/packages/packages-linux/x64/mako/codegen.py b/server/www/packages/packages-linux/x64/mako/codegen.py index d4ecbe8..8f9eef4 100644 --- a/server/www/packages/packages-linux/x64/mako/codegen.py +++ b/server/www/packages/packages-linux/x64/mako/codegen.py @@ -1,5 +1,5 @@ # mako/codegen.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -7,11 +7,17 @@ """provides functionality for rendering a parsetree constructing into module source code.""" -import time +import json import re -from mako.pygen import PythonPrinter -from mako import util, ast, parsetree, filters, exceptions +import time + +from mako import ast from mako import compat +from mako import exceptions +from mako import filters +from mako import parsetree +from mako import util +from mako.pygen import PythonPrinter MAGIC_NUMBER = 10 @@ -20,22 +26,24 @@ MAGIC_NUMBER = 10 # template and are not accessed via the # context itself TOPLEVEL_DECLARED = set(["UNDEFINED", "STOP_RENDERING"]) -RESERVED_NAMES = set(['context', 'loop']).union(TOPLEVEL_DECLARED) +RESERVED_NAMES = set(["context", "loop"]).union(TOPLEVEL_DECLARED) -def compile(node, - uri, - filename=None, - default_filters=None, - buffer_filters=None, - imports=None, - future_imports=None, - source_encoding=None, - generate_magic_comment=True, - disable_unicode=False, - strict_undefined=False, - enable_loop=True, - reserved_names=frozenset()): +def compile( # noqa + node, + uri, + filename=None, + default_filters=None, + buffer_filters=None, + imports=None, + future_imports=None, + source_encoding=None, + generate_magic_comment=True, + disable_unicode=False, + strict_undefined=False, + enable_loop=True, + reserved_names=frozenset(), +): """Generate module source code given a parsetree node, uri, and optional source filename""" @@ -49,38 +57,43 @@ def compile(node, buf = util.FastEncodingBuffer() printer = PythonPrinter(buf) - _GenerateRenderMethod(printer, - _CompileContext(uri, - filename, - default_filters, - buffer_filters, - imports, - future_imports, - source_encoding, - generate_magic_comment, - disable_unicode, - strict_undefined, - enable_loop, - reserved_names), - node) + _GenerateRenderMethod( + printer, + _CompileContext( + uri, + filename, + default_filters, + buffer_filters, + imports, + future_imports, + source_encoding, + generate_magic_comment, + disable_unicode, + strict_undefined, + enable_loop, + reserved_names, + ), + node, + ) return buf.getvalue() class _CompileContext(object): - - def __init__(self, - uri, - filename, - default_filters, - buffer_filters, - imports, - future_imports, - source_encoding, - generate_magic_comment, - disable_unicode, - strict_undefined, - enable_loop, - reserved_names): + def __init__( + self, + uri, + filename, + default_filters, + buffer_filters, + imports, + future_imports, + source_encoding, + generate_magic_comment, + disable_unicode, + strict_undefined, + enable_loop, + reserved_names, + ): self.uri = uri self.filename = filename self.default_filters = default_filters @@ -113,12 +126,12 @@ class _GenerateRenderMethod(object): name = "render_%s" % node.funcname args = node.get_argument_expressions() filtered = len(node.filter_args.args) > 0 - buffered = eval(node.attributes.get('buffered', 'False')) - cached = eval(node.attributes.get('cached', 'False')) + buffered = eval(node.attributes.get("buffered", "False")) + cached = eval(node.attributes.get("cached", "False")) defs = None pagetag = None if node.is_block and not node.is_anonymous: - args += ['**pageargs'] + args += ["**pageargs"] else: defs = self.write_toplevel() pagetag = self.compiler.pagetag @@ -126,25 +139,23 @@ class _GenerateRenderMethod(object): if pagetag is not None: args = pagetag.body_decl.get_argument_expressions() if not pagetag.body_decl.kwargs: - args += ['**pageargs'] - cached = eval(pagetag.attributes.get('cached', 'False')) + args += ["**pageargs"] + cached = eval(pagetag.attributes.get("cached", "False")) self.compiler.enable_loop = self.compiler.enable_loop or eval( - pagetag.attributes.get( - 'enable_loop', 'False') + pagetag.attributes.get("enable_loop", "False") ) else: - args = ['**pageargs'] + args = ["**pageargs"] cached = False buffered = filtered = False if args is None: - args = ['context'] + args = ["context"] else: - args = [a for a in ['context'] + args] + args = [a for a in ["context"] + args] self.write_render_callable( - pagetag or node, - name, args, - buffered, filtered, cached) + pagetag or node, name, args, buffered, filtered, cached + ) if defs is not None: for node in defs: @@ -154,8 +165,9 @@ class _GenerateRenderMethod(object): self.write_metadata_struct() def write_metadata_struct(self): - self.printer.source_map[self.printer.lineno] = \ - max(self.printer.source_map) + self.printer.source_map[self.printer.lineno] = max( + self.printer.source_map + ) struct = { "filename": self.compiler.filename, "uri": self.compiler.uri, @@ -164,10 +176,9 @@ class _GenerateRenderMethod(object): } self.printer.writelines( '"""', - '__M_BEGIN_METADATA', - compat.json.dumps(struct), - '__M_END_METADATA\n' - '"""' + "__M_BEGIN_METADATA", + json.dumps(struct), + "__M_END_METADATA\n" '"""', ) @property @@ -186,7 +197,6 @@ class _GenerateRenderMethod(object): self.compiler.pagetag = None class FindTopLevel(object): - def visitInheritTag(s, node): inherit.append(node) @@ -214,14 +224,19 @@ class _GenerateRenderMethod(object): module_identifiers.declared = module_ident # module-level names, python code - if self.compiler.generate_magic_comment and \ - self.compiler.source_encoding: - self.printer.writeline("# -*- coding:%s -*-" % - self.compiler.source_encoding) + if ( + self.compiler.generate_magic_comment + and self.compiler.source_encoding + ): + self.printer.writeline( + "# -*- coding:%s -*-" % self.compiler.source_encoding + ) if self.compiler.future_imports: - self.printer.writeline("from __future__ import %s" % - (", ".join(self.compiler.future_imports),)) + self.printer.writeline( + "from __future__ import %s" + % (", ".join(self.compiler.future_imports),) + ) self.printer.writeline("from mako import runtime, filters, cache") self.printer.writeline("UNDEFINED = runtime.UNDEFINED") self.printer.writeline("STOP_RENDERING = runtime.STOP_RENDERING") @@ -231,36 +246,41 @@ class _GenerateRenderMethod(object): self.printer.writeline("_modified_time = %r" % time.time()) self.printer.writeline("_enable_loop = %r" % self.compiler.enable_loop) self.printer.writeline( - "_template_filename = %r" % self.compiler.filename) + "_template_filename = %r" % self.compiler.filename + ) self.printer.writeline("_template_uri = %r" % self.compiler.uri) self.printer.writeline( - "_source_encoding = %r" % self.compiler.source_encoding) + "_source_encoding = %r" % self.compiler.source_encoding + ) if self.compiler.imports: - buf = '' + buf = "" for imp in self.compiler.imports: buf += imp + "\n" self.printer.writeline(imp) impcode = ast.PythonCode( buf, - source='', lineno=0, + source="", + lineno=0, pos=0, - filename='template defined imports') + filename="template defined imports", + ) else: impcode = None main_identifiers = module_identifiers.branch(self.node) - module_identifiers.topleveldefs = \ - module_identifiers.topleveldefs.\ - union(main_identifiers.topleveldefs) + mit = module_identifiers.topleveldefs + module_identifiers.topleveldefs = mit.union( + main_identifiers.topleveldefs + ) module_identifiers.declared.update(TOPLEVEL_DECLARED) if impcode: module_identifiers.declared.update(impcode.declared_identifiers) self.compiler.identifiers = module_identifiers - self.printer.writeline("_exports = %r" % - [n.name for n in - main_identifiers.topleveldefs.values()] - ) + self.printer.writeline( + "_exports = %r" + % [n.name for n in main_identifiers.topleveldefs.values()] + ) self.printer.write_blanks(2) if len(module_code): @@ -274,8 +294,9 @@ class _GenerateRenderMethod(object): return list(main_identifiers.topleveldefs.values()) - def write_render_callable(self, node, name, args, buffered, filtered, - cached): + def write_render_callable( + self, node, name, args, buffered, filtered, cached + ): """write a top-level render callable. this could be the main render() method or that of a top-level def.""" @@ -284,32 +305,38 @@ class _GenerateRenderMethod(object): decorator = node.decorator if decorator: self.printer.writeline( - "@runtime._decorate_toplevel(%s)" % decorator) + "@runtime._decorate_toplevel(%s)" % decorator + ) self.printer.start_source(node.lineno) self.printer.writelines( - "def %s(%s):" % (name, ','.join(args)), + "def %s(%s):" % (name, ",".join(args)), # push new frame, assign current frame to __M_caller "__M_caller = context.caller_stack._push_frame()", - "try:" + "try:", ) if buffered or filtered or cached: self.printer.writeline("context._push_buffer()") self.identifier_stack.append( - self.compiler.identifiers.branch(self.node)) - if (not self.in_def or self.node.is_block) and '**pageargs' in args: - self.identifier_stack[-1].argument_declared.add('pageargs') + self.compiler.identifiers.branch(self.node) + ) + if (not self.in_def or self.node.is_block) and "**pageargs" in args: + self.identifier_stack[-1].argument_declared.add("pageargs") if not self.in_def and ( - len(self.identifiers.locally_assigned) > 0 or - len(self.identifiers.argument_declared) > 0 + len(self.identifiers.locally_assigned) > 0 + or len(self.identifiers.argument_declared) > 0 ): - self.printer.writeline("__M_locals = __M_dict_builtin(%s)" % - ','.join([ - "%s=%s" % (x, x) for x in - self.identifiers.argument_declared - ])) + self.printer.writeline( + "__M_locals = __M_dict_builtin(%s)" + % ",".join( + [ + "%s=%s" % (x, x) + for x in self.identifiers.argument_declared + ] + ) + ) self.write_variable_declares(self.identifiers, toplevel=True) @@ -321,16 +348,14 @@ class _GenerateRenderMethod(object): self.printer.write_blanks(2) if cached: self.write_cache_decorator( - node, name, - args, buffered, - self.identifiers, toplevel=True) + node, name, args, buffered, self.identifiers, toplevel=True + ) def write_module_code(self, module_code): """write module-level template code, i.e. that which is enclosed in <%! %> tags in the template.""" for n in module_code: - self.printer.start_source(n.lineno) - self.printer.write_indented_block(n.text) + self.printer.write_indented_block(n.text, starting_lineno=n.lineno) def write_inherit(self, node): """write the module-level inheritance-determination callable.""" @@ -338,9 +363,9 @@ class _GenerateRenderMethod(object): self.printer.writelines( "def _mako_inherit(template, context):", "_mako_generate_namespaces(context)", - "return runtime._inherit_from(context, %s, _template_uri)" % - (node.parsed_attributes['file']), - None + "return runtime._inherit_from(context, %s, _template_uri)" + % (node.parsed_attributes["file"]), + None, ) def write_namespaces(self, namespaces): @@ -352,12 +377,13 @@ class _GenerateRenderMethod(object): "except KeyError:", "_mako_generate_namespaces(context)", "return context.namespaces[(__name__, name)]", - None, None + None, + None, ) self.printer.writeline("def _mako_generate_namespaces(context):") for node in namespaces.values(): - if 'import' in node.attributes: + if "import" in node.attributes: self.compiler.has_ns_imports = True self.printer.start_source(node.lineno) if len(node.nodes): @@ -367,7 +393,6 @@ class _GenerateRenderMethod(object): self.in_def = True class NSDefVisitor(object): - def visitDefTag(s, node): s.visitDefOrBase(node) @@ -383,56 +408,54 @@ class _GenerateRenderMethod(object): ) self.write_inline_def(node, identifiers, nested=False) export.append(node.funcname) + vis = NSDefVisitor() for n in node.nodes: n.accept_visitor(vis) - self.printer.writeline("return [%s]" % (','.join(export))) + self.printer.writeline("return [%s]" % (",".join(export))) self.printer.writeline(None) self.in_def = False callable_name = "make_namespace()" else: callable_name = "None" - if 'file' in node.parsed_attributes: + if "file" in node.parsed_attributes: self.printer.writeline( "ns = runtime.TemplateNamespace(%r," " context._clean_inheritance_tokens()," " templateuri=%s, callables=%s, " - " calling_uri=_template_uri)" % - ( + " calling_uri=_template_uri)" + % ( node.name, - node.parsed_attributes.get('file', 'None'), + node.parsed_attributes.get("file", "None"), callable_name, ) ) - elif 'module' in node.parsed_attributes: + elif "module" in node.parsed_attributes: self.printer.writeline( "ns = runtime.ModuleNamespace(%r," " context._clean_inheritance_tokens()," " callables=%s, calling_uri=_template_uri," - " module=%s)" % - ( + " module=%s)" + % ( node.name, callable_name, - node.parsed_attributes.get( - 'module', 'None') + node.parsed_attributes.get("module", "None"), ) ) else: self.printer.writeline( "ns = runtime.Namespace(%r," " context._clean_inheritance_tokens()," - " callables=%s, calling_uri=_template_uri)" % - ( - node.name, - callable_name, - ) + " callables=%s, calling_uri=_template_uri)" + % (node.name, callable_name) ) - if eval(node.attributes.get('inheritable', "False")): + if eval(node.attributes.get("inheritable", "False")): self.printer.writeline("context['self'].%s = ns" % (node.name)) self.printer.writeline( - "context.namespaces[(__name__, %s)] = ns" % repr(node.name)) + "context.namespaces[(__name__, %s)] = ns" % repr(node.name) + ) self.printer.write_blanks(1) if not len(namespaces): self.printer.writeline("pass") @@ -468,7 +491,8 @@ class _GenerateRenderMethod(object): # write closure functions for closures that we define # right here to_write = to_write.union( - [c.funcname for c in identifiers.closuredefs.values()]) + [c.funcname for c in identifiers.closuredefs.values()] + ) # remove identifiers that are declared in the argument # signature of the callable @@ -492,23 +516,22 @@ class _GenerateRenderMethod(object): if limit is not None: to_write = to_write.intersection(limit) - if toplevel and getattr(self.compiler, 'has_ns_imports', False): + if toplevel and getattr(self.compiler, "has_ns_imports", False): self.printer.writeline("_import_ns = {}") self.compiler.has_imports = True for ident, ns in self.compiler.namespaces.items(): - if 'import' in ns.attributes: + if "import" in ns.attributes: self.printer.writeline( "_mako_get_namespace(context, %r)." - "_populate(_import_ns, %r)" % - ( + "_populate(_import_ns, %r)" + % ( ident, - re.split(r'\s*,\s*', ns.attributes['import']) - )) + re.split(r"\s*,\s*", ns.attributes["import"]), + ) + ) if has_loop: - self.printer.writeline( - 'loop = __M_loop = runtime.LoopStack()' - ) + self.printer.writeline("loop = __M_loop = runtime.LoopStack()") for ident in to_write: if ident in comp_idents: @@ -526,37 +549,36 @@ class _GenerateRenderMethod(object): elif ident in self.compiler.namespaces: self.printer.writeline( - "%s = _mako_get_namespace(context, %r)" % - (ident, ident) + "%s = _mako_get_namespace(context, %r)" % (ident, ident) ) else: - if getattr(self.compiler, 'has_ns_imports', False): + if getattr(self.compiler, "has_ns_imports", False): if self.compiler.strict_undefined: self.printer.writelines( - "%s = _import_ns.get(%r, UNDEFINED)" % - (ident, ident), + "%s = _import_ns.get(%r, UNDEFINED)" + % (ident, ident), "if %s is UNDEFINED:" % ident, "try:", "%s = context[%r]" % (ident, ident), "except KeyError:", - "raise NameError(\"'%s' is not defined\")" % - ident, - None, None + "raise NameError(\"'%s' is not defined\")" % ident, + None, + None, ) else: self.printer.writeline( "%s = _import_ns.get" - "(%r, context.get(%r, UNDEFINED))" % - (ident, ident, ident)) + "(%r, context.get(%r, UNDEFINED))" + % (ident, ident, ident) + ) else: if self.compiler.strict_undefined: self.printer.writelines( "try:", "%s = context[%r]" % (ident, ident), "except KeyError:", - "raise NameError(\"'%s' is not defined\")" % - ident, - None + "raise NameError(\"'%s' is not defined\")" % ident, + None, ) else: self.printer.writeline( @@ -572,14 +594,16 @@ class _GenerateRenderMethod(object): nameargs = node.get_argument_expressions(as_call=True) if not self.in_def and ( - len(self.identifiers.locally_assigned) > 0 or - len(self.identifiers.argument_declared) > 0): - nameargs.insert(0, 'context._locals(__M_locals)') + len(self.identifiers.locally_assigned) > 0 + or len(self.identifiers.argument_declared) > 0 + ): + nameargs.insert(0, "context._locals(__M_locals)") else: - nameargs.insert(0, 'context') + nameargs.insert(0, "context") self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls))) self.printer.writeline( - "return render_%s(%s)" % (funcname, ",".join(nameargs))) + "return render_%s(%s)" % (funcname, ",".join(nameargs)) + ) self.printer.writeline(None) def write_inline_def(self, node, identifiers, nested): @@ -590,21 +614,21 @@ class _GenerateRenderMethod(object): decorator = node.decorator if decorator: self.printer.writeline( - "@runtime._decorate_inline(context, %s)" % decorator) + "@runtime._decorate_inline(context, %s)" % decorator + ) self.printer.writeline( - "def %s(%s):" % (node.funcname, ",".join(namedecls))) + "def %s(%s):" % (node.funcname, ",".join(namedecls)) + ) filtered = len(node.filter_args.args) > 0 - buffered = eval(node.attributes.get('buffered', 'False')) - cached = eval(node.attributes.get('cached', 'False')) + buffered = eval(node.attributes.get("buffered", "False")) + cached = eval(node.attributes.get("cached", "False")) self.printer.writelines( # push new frame, assign current frame to __M_caller "__M_caller = context.caller_stack._push_frame()", - "try:" + "try:", ) if buffered or filtered or cached: - self.printer.writelines( - "context._push_buffer()", - ) + self.printer.writelines("context._push_buffer()") identifiers = identifiers.branch(node, nested=nested) @@ -618,12 +642,19 @@ class _GenerateRenderMethod(object): self.write_def_finish(node, buffered, filtered, cached) self.printer.writeline(None) if cached: - self.write_cache_decorator(node, node.funcname, - namedecls, False, identifiers, - inline=True, toplevel=False) + self.write_cache_decorator( + node, + node.funcname, + namedecls, + False, + identifiers, + inline=True, + toplevel=False, + ) - def write_def_finish(self, node, buffered, filtered, cached, - callstack=True): + def write_def_finish( + self, node, buffered, filtered, cached, callstack=True + ): """write the end section of a rendering function, either outermost or inline. @@ -636,9 +667,7 @@ class _GenerateRenderMethod(object): self.printer.writeline("return ''") if callstack: self.printer.writelines( - "finally:", - "context.caller_stack._pop_frame()", - None + "finally:", "context.caller_stack._pop_frame()", None ) if buffered or filtered or cached: @@ -648,13 +677,12 @@ class _GenerateRenderMethod(object): # implemenation might be using a context with no # extra buffers self.printer.writelines( - "finally:", - "__M_buf = context._pop_buffer()" + "finally:", "__M_buf = context._pop_buffer()" ) else: self.printer.writelines( "finally:", - "__M_buf, __M_writer = context._pop_buffer_and_writer()" + "__M_buf, __M_writer = context._pop_buffer_and_writer()", ) if callstack: @@ -662,89 +690,100 @@ class _GenerateRenderMethod(object): s = "__M_buf.getvalue()" if filtered: - s = self.create_filter_callable(node.filter_args.args, s, - False) + s = self.create_filter_callable( + node.filter_args.args, s, False + ) self.printer.writeline(None) if buffered and not cached: - s = self.create_filter_callable(self.compiler.buffer_filters, - s, False) + s = self.create_filter_callable( + self.compiler.buffer_filters, s, False + ) if buffered or cached: self.printer.writeline("return %s" % s) else: - self.printer.writelines( - "__M_writer(%s)" % s, - "return ''" - ) + self.printer.writelines("__M_writer(%s)" % s, "return ''") - def write_cache_decorator(self, node_or_pagetag, name, - args, buffered, identifiers, - inline=False, toplevel=False): + def write_cache_decorator( + self, + node_or_pagetag, + name, + args, + buffered, + identifiers, + inline=False, + toplevel=False, + ): """write a post-function decorator to replace a rendering callable with a cached version of itself.""" self.printer.writeline("__M_%s = %s" % (name, name)) - cachekey = node_or_pagetag.parsed_attributes.get('cache_key', - repr(name)) + cachekey = node_or_pagetag.parsed_attributes.get( + "cache_key", repr(name) + ) cache_args = {} if self.compiler.pagetag is not None: cache_args.update( - ( - pa[6:], - self.compiler.pagetag.parsed_attributes[pa] - ) + (pa[6:], self.compiler.pagetag.parsed_attributes[pa]) for pa in self.compiler.pagetag.parsed_attributes - if pa.startswith('cache_') and pa != 'cache_key' + if pa.startswith("cache_") and pa != "cache_key" ) cache_args.update( - ( - pa[6:], - node_or_pagetag.parsed_attributes[pa] - ) for pa in node_or_pagetag.parsed_attributes - if pa.startswith('cache_') and pa != 'cache_key' + (pa[6:], node_or_pagetag.parsed_attributes[pa]) + for pa in node_or_pagetag.parsed_attributes + if pa.startswith("cache_") and pa != "cache_key" ) - if 'timeout' in cache_args: - cache_args['timeout'] = int(eval(cache_args['timeout'])) + if "timeout" in cache_args: + cache_args["timeout"] = int(eval(cache_args["timeout"])) - self.printer.writeline("def %s(%s):" % (name, ','.join(args))) + self.printer.writeline("def %s(%s):" % (name, ",".join(args))) # form "arg1, arg2, arg3=arg3, arg4=arg4", etc. pass_args = [ - "%s=%s" % ((a.split('=')[0],) * 2) if '=' in a else a - for a in args + "%s=%s" % ((a.split("=")[0],) * 2) if "=" in a else a for a in args ] self.write_variable_declares( identifiers, toplevel=toplevel, - limit=node_or_pagetag.undeclared_identifiers() + limit=node_or_pagetag.undeclared_identifiers(), ) if buffered: - s = "context.get('local')."\ - "cache._ctx_get_or_create("\ - "%s, lambda:__M_%s(%s), context, %s__M_defname=%r)" % ( - cachekey, name, ','.join(pass_args), - ''.join(["%s=%s, " % (k, v) - for k, v in cache_args.items()]), - name + s = ( + "context.get('local')." + "cache._ctx_get_or_create(" + "%s, lambda:__M_%s(%s), context, %s__M_defname=%r)" + % ( + cachekey, + name, + ",".join(pass_args), + "".join( + ["%s=%s, " % (k, v) for k, v in cache_args.items()] + ), + name, ) + ) # apply buffer_filters - s = self.create_filter_callable(self.compiler.buffer_filters, s, - False) + s = self.create_filter_callable( + self.compiler.buffer_filters, s, False + ) self.printer.writelines("return " + s, None) else: self.printer.writelines( "__M_writer(context.get('local')." "cache._ctx_get_or_create(" - "%s, lambda:__M_%s(%s), context, %s__M_defname=%r))" % - ( - cachekey, name, ','.join(pass_args), - ''.join(["%s=%s, " % (k, v) - for k, v in cache_args.items()]), + "%s, lambda:__M_%s(%s), context, %s__M_defname=%r))" + % ( + cachekey, + name, + ",".join(pass_args), + "".join( + ["%s=%s, " % (k, v) for k, v in cache_args.items()] + ), name, ), "return ''", - None + None, ) def create_filter_callable(self, args, target, is_expression): @@ -753,24 +792,24 @@ class _GenerateRenderMethod(object): 'default' filter aliases as needed.""" def locate_encode(name): - if re.match(r'decode\..+', name): + if re.match(r"decode\..+", name): return "filters." + name elif self.compiler.disable_unicode: return filters.NON_UNICODE_ESCAPES.get(name, name) else: return filters.DEFAULT_ESCAPES.get(name, name) - if 'n' not in args: + if "n" not in args: if is_expression: if self.compiler.pagetag: args = self.compiler.pagetag.filter_args.args + args - if self.compiler.default_filters: + if self.compiler.default_filters and "n" not in args: args = self.compiler.default_filters + args for e in args: # if filter given as a function, get just the identifier portion - if e == 'n': + if e == "n": continue - m = re.match(r'(.+?)(\(.*\))', e) + m = re.match(r"(.+?)(\(.*\))", e) if m: ident, fargs = m.group(1, 2) f = locate_encode(ident) @@ -783,15 +822,18 @@ class _GenerateRenderMethod(object): def visitExpression(self, node): self.printer.start_source(node.lineno) - if len(node.escapes) or \ - ( - self.compiler.pagetag is not None and - len(self.compiler.pagetag.filter_args.args) - ) or \ - len(self.compiler.default_filters): + if ( + len(node.escapes) + or ( + self.compiler.pagetag is not None + and len(self.compiler.pagetag.filter_args.args) + ) + or len(self.compiler.default_filters) + ): - s = self.create_filter_callable(node.escapes_code.args, - "%s" % node.text, True) + s = self.create_filter_callable( + node.escapes_code.args, "%s" % node.text, True + ) self.printer.writeline("__M_writer(%s)" % s) else: self.printer.writeline("__M_writer(%s)" % node.text) @@ -800,12 +842,12 @@ class _GenerateRenderMethod(object): if node.isend: self.printer.writeline(None) if node.has_loop_context: - self.printer.writeline('finally:') + self.printer.writeline("finally:") self.printer.writeline("loop = __M_loop._exit()") self.printer.writeline(None) else: self.printer.start_source(node.lineno) - if self.compiler.enable_loop and node.keyword == 'for': + if self.compiler.enable_loop and node.keyword == "for": text = mangle_mako_loop(node, self.printer) else: text = node.text @@ -817,12 +859,16 @@ class _GenerateRenderMethod(object): # and end control lines, and # 3) any control line with no content other than comments if not children or ( - compat.all(isinstance(c, (parsetree.Comment, - parsetree.ControlLine)) - for c in children) and - compat.all((node.is_ternary(c.keyword) or c.isend) - for c in children - if isinstance(c, parsetree.ControlLine))): + compat.all( + isinstance(c, (parsetree.Comment, parsetree.ControlLine)) + for c in children + ) + and compat.all( + (node.is_ternary(c.keyword) or c.isend) + for c in children + if isinstance(c, parsetree.ControlLine) + ) + ): self.printer.writeline("pass") def visitText(self, node): @@ -833,8 +879,7 @@ class _GenerateRenderMethod(object): filtered = len(node.filter_args.args) > 0 if filtered: self.printer.writelines( - "__M_writer = context._push_writer()", - "try:", + "__M_writer = context._push_writer()", "try:" ) for n in node.nodes: n.accept_visitor(self) @@ -842,18 +887,18 @@ class _GenerateRenderMethod(object): self.printer.writelines( "finally:", "__M_buf, __M_writer = context._pop_buffer_and_writer()", - "__M_writer(%s)" % - self.create_filter_callable( - node.filter_args.args, - "__M_buf.getvalue()", - False), - None + "__M_writer(%s)" + % self.create_filter_callable( + node.filter_args.args, "__M_buf.getvalue()", False + ), + None, ) def visitCode(self, node): if not node.ismodule: - self.printer.start_source(node.lineno) - self.printer.write_indented_block(node.text) + self.printer.write_indented_block( + node.text, starting_lineno=node.lineno + ) if not self.in_def and len(self.identifiers.locally_assigned) > 0: # if we are the "template" def, fudge locally @@ -861,24 +906,28 @@ class _GenerateRenderMethod(object): # which is used for def calls within the same template, # to simulate "enclosing scope" self.printer.writeline( - '__M_locals_builtin_stored = __M_locals_builtin()') + "__M_locals_builtin_stored = __M_locals_builtin()" + ) self.printer.writeline( - '__M_locals.update(__M_dict_builtin([(__M_key,' - ' __M_locals_builtin_stored[__M_key]) for __M_key in' - ' [%s] if __M_key in __M_locals_builtin_stored]))' % - ','.join([repr(x) for x in node.declared_identifiers()])) + "__M_locals.update(__M_dict_builtin([(__M_key," + " __M_locals_builtin_stored[__M_key]) for __M_key in" + " [%s] if __M_key in __M_locals_builtin_stored]))" + % ",".join([repr(x) for x in node.declared_identifiers()]) + ) def visitIncludeTag(self, node): self.printer.start_source(node.lineno) - args = node.attributes.get('args') + args = node.attributes.get("args") if args: self.printer.writeline( - "runtime._include_file(context, %s, _template_uri, %s)" % - (node.parsed_attributes['file'], args)) + "runtime._include_file(context, %s, _template_uri, %s)" + % (node.parsed_attributes["file"], args) + ) else: self.printer.writeline( - "runtime._include_file(context, %s, _template_uri)" % - (node.parsed_attributes['file'])) + "runtime._include_file(context, %s, _template_uri)" + % (node.parsed_attributes["file"]) + ) def visitNamespaceTag(self, node): pass @@ -891,13 +940,14 @@ class _GenerateRenderMethod(object): self.printer.writeline("%s()" % node.funcname) else: nameargs = node.get_argument_expressions(as_call=True) - nameargs += ['**pageargs'] + nameargs += ["**pageargs"] self.printer.writeline( "if 'parent' not in context._data or " - "not hasattr(context._data['parent'], '%s'):" - % node.funcname) + "not hasattr(context._data['parent'], '%s'):" % node.funcname + ) self.printer.writeline( - "context['self'].%s(%s)" % (node.funcname, ",".join(nameargs))) + "context['self'].%s(%s)" % (node.funcname, ",".join(nameargs)) + ) self.printer.writeline("\n") def visitCallNamespaceTag(self, node): @@ -908,19 +958,18 @@ class _GenerateRenderMethod(object): def visitCallTag(self, node): self.printer.writeline("def ccall(caller):") - export = ['body'] + export = ["body"] callable_identifiers = self.identifiers.branch(node, nested=True) body_identifiers = callable_identifiers.branch(node, nested=False) # we want the 'caller' passed to ccall to be used # for the body() function, but for other non-body() # <%def>s within <%call> we want the current caller # off the call stack (if any) - body_identifiers.add_declared('caller') + body_identifiers.add_declared("caller") self.identifier_stack.append(body_identifiers) class DefVisitor(object): - def visitDefTag(s, node): s.visitDefOrBase(node) @@ -942,16 +991,13 @@ class _GenerateRenderMethod(object): self.identifier_stack.pop() bodyargs = node.body_decl.get_argument_expressions() - self.printer.writeline("def body(%s):" % ','.join(bodyargs)) + self.printer.writeline("def body(%s):" % ",".join(bodyargs)) # TODO: figure out best way to specify # buffering/nonbuffering (at call time would be better) buffered = False if buffered: - self.printer.writelines( - "context._push_buffer()", - "try:" - ) + self.printer.writelines("context._push_buffer()", "try:") self.write_variable_declares(body_identifiers) self.identifier_stack.append(body_identifiers) @@ -960,25 +1006,22 @@ class _GenerateRenderMethod(object): self.identifier_stack.pop() self.write_def_finish(node, buffered, False, False, callstack=False) - self.printer.writelines( - None, - "return [%s]" % (','.join(export)), - None - ) + self.printer.writelines(None, "return [%s]" % (",".join(export)), None) self.printer.writelines( # push on caller for nested call "context.caller_stack.nextcaller = " "runtime.Namespace('caller', context, " "callables=ccall(__M_caller))", - "try:") + "try:", + ) self.printer.start_source(node.lineno) self.printer.writelines( - "__M_writer(%s)" % self.create_filter_callable( - [], node.expression, True), + "__M_writer(%s)" + % self.create_filter_callable([], node.expression, True), "finally:", "context.caller_stack.nextcaller = None", - None + None, ) @@ -996,10 +1039,12 @@ class _Identifiers(object): else: # things that have already been declared # in an enclosing namespace (i.e. names we can just use) - self.declared = set(parent.declared).\ - union([c.name for c in parent.closuredefs.values()]).\ - union(parent.locally_declared).\ - union(parent.argument_declared) + self.declared = ( + set(parent.declared) + .union([c.name for c in parent.closuredefs.values()]) + .union(parent.locally_declared) + .union(parent.argument_declared) + ) # if these identifiers correspond to a "nested" # scope, it means whatever the parent identifiers @@ -1043,11 +1088,13 @@ class _Identifiers(object): node.accept_visitor(self) illegal_names = self.compiler.reserved_names.intersection( - self.locally_declared) + self.locally_declared + ) if illegal_names: raise exceptions.NameConflictError( - "Reserved words declared in template: %s" % - ", ".join(illegal_names)) + "Reserved words declared in template: %s" + % ", ".join(illegal_names) + ) def branch(self, node, **kwargs): """create a new Identifiers for a new Node, with @@ -1060,24 +1107,28 @@ class _Identifiers(object): return set(self.topleveldefs.union(self.closuredefs).values()) def __repr__(self): - return "Identifiers(declared=%r, locally_declared=%r, "\ - "undeclared=%r, topleveldefs=%r, closuredefs=%r, "\ - "argumentdeclared=%r)" %\ - ( + return ( + "Identifiers(declared=%r, locally_declared=%r, " + "undeclared=%r, topleveldefs=%r, closuredefs=%r, " + "argumentdeclared=%r)" + % ( list(self.declared), list(self.locally_declared), list(self.undeclared), [c.name for c in self.topleveldefs.values()], [c.name for c in self.closuredefs.values()], - self.argument_declared) + self.argument_declared, + ) + ) def check_declared(self, node): """update the state of this Identifiers with the undeclared and declared identifiers of the given node.""" for ident in node.undeclared_identifiers(): - if ident != 'context' and\ - ident not in self.declared.union(self.locally_declared): + if ident != "context" and ident not in self.declared.union( + self.locally_declared + ): self.undeclared.add(ident) for ident in node.declared_identifiers(): self.locally_declared.add(ident) @@ -1097,7 +1148,8 @@ class _Identifiers(object): if not node.ismodule: self.check_declared(node) self.locally_assigned = self.locally_assigned.union( - node.declared_identifiers()) + node.declared_identifiers() + ) def visitNamespaceTag(self, node): # only traverse into the sub-elements of a @@ -1110,13 +1162,16 @@ class _Identifiers(object): def _check_name_exists(self, collection, node): existing = collection.get(node.funcname) collection[node.funcname] = node - if existing is not None and \ - existing is not node and \ - (node.is_block or existing.is_block): + if ( + existing is not None + and existing is not node + and (node.is_block or existing.is_block) + ): raise exceptions.CompileException( "%%def or %%block named '%s' already " - "exists in this template." % - node.funcname, **node.exception_kwargs) + "exists in this template." % node.funcname, + **node.exception_kwargs + ) def visitDefTag(self, node): if node.is_root() and not node.is_anonymous: @@ -1125,8 +1180,9 @@ class _Identifiers(object): self._check_name_exists(self.closuredefs, node) for ident in node.undeclared_identifiers(): - if ident != 'context' and \ - ident not in self.declared.union(self.locally_declared): + if ident != "context" and ident not in self.declared.union( + self.locally_declared + ): self.undeclared.add(ident) # visit defs only one level deep @@ -1143,16 +1199,22 @@ class _Identifiers(object): if isinstance(self.node, parsetree.DefTag): raise exceptions.CompileException( "Named block '%s' not allowed inside of def '%s'" - % (node.name, self.node.name), **node.exception_kwargs) - elif isinstance(self.node, - (parsetree.CallTag, parsetree.CallNamespaceTag)): + % (node.name, self.node.name), + **node.exception_kwargs + ) + elif isinstance( + self.node, (parsetree.CallTag, parsetree.CallNamespaceTag) + ): raise exceptions.CompileException( "Named block '%s' not allowed inside of <%%call> tag" - % (node.name, ), **node.exception_kwargs) + % (node.name,), + **node.exception_kwargs + ) for ident in node.undeclared_identifiers(): - if ident != 'context' and \ - ident not in self.declared.union(self.locally_declared): + if ident != "context" and ident not in self.declared.union( + self.locally_declared + ): self.undeclared.add(ident) if not node.is_anonymous: @@ -1167,8 +1229,9 @@ class _Identifiers(object): def visitTextTag(self, node): for ident in node.undeclared_identifiers(): - if ident != 'context' and \ - ident not in self.declared.union(self.locally_declared): + if ident != "context" and ident not in self.declared.union( + self.locally_declared + ): self.undeclared.add(ident) def visitIncludeTag(self, node): @@ -1185,9 +1248,9 @@ class _Identifiers(object): def visitCallTag(self, node): if node is self.node: for ident in node.undeclared_identifiers(): - if ident != 'context' and \ - ident not in self.declared.union( - self.locally_declared): + if ident != "context" and ident not in self.declared.union( + self.locally_declared + ): self.undeclared.add(ident) for ident in node.declared_identifiers(): self.argument_declared.add(ident) @@ -1195,15 +1258,15 @@ class _Identifiers(object): n.accept_visitor(self) else: for ident in node.undeclared_identifiers(): - if ident != 'context' and \ - ident not in self.declared.union( - self.locally_declared): + if ident != "context" and ident not in self.declared.union( + self.locally_declared + ): self.undeclared.add(ident) _FOR_LOOP = re.compile( - r'^for\s+((?:\(?)\s*[A-Za-z_][A-Za-z_0-9]*' - r'(?:\s*,\s*(?:[A-Za-z_][A-Za-z0-9_]*),??)*\s*(?:\)?))\s+in\s+(.*):' + r"^for\s+((?:\(?)\s*[A-Za-z_][A-Za-z_0-9]*" + r"(?:\s*,\s*(?:[A-Za-z_][A-Za-z0-9_]*),??)*\s*(?:\)?))\s+in\s+(.*):" ) @@ -1218,11 +1281,11 @@ def mangle_mako_loop(node, printer): match = _FOR_LOOP.match(node.text) if match: printer.writelines( - 'loop = __M_loop._enter(%s)' % match.group(2), - 'try:' + "loop = __M_loop._enter(%s)" % match.group(2), + "try:" # 'with __M_loop(%s) as loop:' % match.group(2) ) - text = 'for %s in loop:' % match.group(1) + text = "for %s in loop:" % match.group(1) else: raise SyntaxError("Couldn't apply loop context: %s" % node.text) else: @@ -1239,7 +1302,7 @@ class LoopVariable(object): self.detected = False def _loop_reference_detected(self, node): - if 'loop' in node.undeclared_identifiers(): + if "loop" in node.undeclared_identifiers(): self.detected = True else: for n in node.get_children(): diff --git a/server/www/packages/packages-linux/x64/mako/compat.py b/server/www/packages/packages-linux/x64/mako/compat.py index a2ab243..4460fde 100644 --- a/server/www/packages/packages-linux/x64/mako/compat.py +++ b/server/www/packages/packages-linux/x64/mako/compat.py @@ -1,34 +1,52 @@ +# mako/compat.py +# Copyright 2006-2019 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import collections +import inspect import sys -import time py3k = sys.version_info >= (3, 0) -py33 = sys.version_info >= (3, 3) py2k = sys.version_info < (3,) -py26 = sys.version_info >= (2, 6) py27 = sys.version_info >= (2, 7) -jython = sys.platform.startswith('java') -win32 = sys.platform.startswith('win') -pypy = hasattr(sys, 'pypy_version_info') +jython = sys.platform.startswith("java") +win32 = sys.platform.startswith("win") +pypy = hasattr(sys, "pypy_version_info") -if py3k: - # create a "getargspec" from getfullargspec(), which is not deprecated - # in Py3K; getargspec() has started to emit warnings as of Py3.5. - # As of Py3.4, now they are trying to move from getfullargspec() - # to "signature()", but getfullargspec() is not deprecated, so stick - # with that for now. +ArgSpec = collections.namedtuple( + "ArgSpec", ["args", "varargs", "keywords", "defaults"] +) - import collections - ArgSpec = collections.namedtuple( - "ArgSpec", - ["args", "varargs", "keywords", "defaults"]) - from inspect import getfullargspec as inspect_getfullargspec - def inspect_getargspec(func): - return ArgSpec( - *inspect_getfullargspec(func)[0:4] - ) -else: - from inspect import getargspec as inspect_getargspec # noqa +def inspect_getargspec(func): + """getargspec based on fully vendored getfullargspec from Python 3.3.""" + + if inspect.ismethod(func): + func = func.__func__ + if not inspect.isfunction(func): + raise TypeError("{!r} is not a Python function".format(func)) + + co = func.__code__ + if not inspect.iscode(co): + raise TypeError("{!r} is not a code object".format(co)) + + nargs = co.co_argcount + names = co.co_varnames + nkwargs = co.co_kwonlyargcount if py3k else 0 + args = list(names[:nargs]) + + nargs += nkwargs + varargs = None + if co.co_flags & inspect.CO_VARARGS: + varargs = co.co_varnames[nargs] + nargs = nargs + 1 + varkw = None + if co.co_flags & inspect.CO_VARKEYWORDS: + varkw = co.co_varnames[nargs] + + return ArgSpec(args, varargs, varkw, func.__defaults__) if py3k: @@ -36,7 +54,8 @@ if py3k: import builtins as compat_builtins from urllib.parse import quote_plus, unquote_plus from html.entities import codepoint2name, name2codepoint - string_types = str, + + string_types = (str,) binary_type = bytes text_type = str @@ -51,8 +70,10 @@ if py3k: def octal(lit): return eval("0o" + lit) + else: import __builtin__ as compat_builtins # noqa + try: from cStringIO import StringIO except: @@ -62,7 +83,8 @@ else: from urllib import quote_plus, unquote_plus # noqa from htmlentitydefs import codepoint2name, name2codepoint # noqa - string_types = basestring, # noqa + + string_types = (basestring,) # noqa binary_type = str text_type = unicode # noqa @@ -76,16 +98,18 @@ else: return eval("0" + lit) -if py33: +if py3k: from importlib import machinery def load_module(module_id, path): return machinery.SourceFileLoader(module_id, path).load_module() + + else: import imp def load_module(module_id, path): - fp = open(path, 'rb') + fp = open(path, "rb") try: return imp.load_source(module_id, path, fp) finally: @@ -93,93 +117,32 @@ else: if py3k: + def reraise(tp, value, tb=None, cause=None): if cause is not None: value.__cause__ = cause if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value + + else: - exec("def reraise(tp, value, tb=None, cause=None):\n" - " raise tp, value, tb\n") + exec( + "def reraise(tp, value, tb=None, cause=None):\n" + " raise tp, value, tb\n" + ) def exception_as(): return sys.exc_info()[1] -try: - import threading - if py3k: - import _thread as thread - else: - import thread -except ImportError: - import dummy_threading as threading # noqa - if py3k: - import _dummy_thread as thread - else: - import dummy_thread as thread # noqa -if win32 or jython: - time_func = time.clock -else: - time_func = time.time - -try: - from functools import partial -except: - def partial(func, *args, **keywords): - def newfunc(*fargs, **fkeywords): - newkeywords = keywords.copy() - newkeywords.update(fkeywords) - return func(*(args + fargs), **newkeywords) - return newfunc - - -all = all -import json # noqa +all = all # noqa def exception_name(exc): return exc.__class__.__name__ -try: - from inspect import CO_VARKEYWORDS, CO_VARARGS - - def inspect_func_args(fn): - if py3k: - co = fn.__code__ - else: - co = fn.func_code - - nargs = co.co_argcount - names = co.co_varnames - args = list(names[:nargs]) - - varargs = None - if co.co_flags & CO_VARARGS: - varargs = co.co_varnames[nargs] - nargs = nargs + 1 - varkw = None - if co.co_flags & CO_VARKEYWORDS: - varkw = co.co_varnames[nargs] - - if py3k: - return args, varargs, varkw, fn.__defaults__ - else: - return args, varargs, varkw, fn.func_defaults -except ImportError: - import inspect - - def inspect_func_args(fn): - return inspect.getargspec(fn) - -if py3k: - def callable(fn): - return hasattr(fn, '__call__') -else: - callable = callable - ################################################ # cross-compatible metaclass implementation @@ -187,6 +150,8 @@ else: def with_metaclass(meta, base=object): """Create a base class with a metaclass.""" return meta("%sBase" % meta.__name__, (base,), {}) + + ################################################ @@ -195,7 +160,7 @@ def arg_stringname(func_arg): In Python3.4 a function's args are of _ast.arg type not _ast.name """ - if hasattr(func_arg, 'arg'): + if hasattr(func_arg, "arg"): return func_arg.arg else: return str(func_arg) diff --git a/server/www/packages/packages-linux/x64/mako/exceptions.py b/server/www/packages/packages-linux/x64/mako/exceptions.py index cb6fb3f..b6388b1 100644 --- a/server/www/packages/packages-linux/x64/mako/exceptions.py +++ b/server/www/packages/packages-linux/x64/mako/exceptions.py @@ -1,14 +1,16 @@ # mako/exceptions.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """exception classes""" -import traceback import sys -from mako import util, compat +import traceback + +from mako import compat +from mako import util class MakoException(Exception): @@ -27,11 +29,10 @@ def _format_filepos(lineno, pos, filename): class CompileException(MakoException): - def __init__(self, message, source, lineno, pos, filename): MakoException.__init__( - self, - message + _format_filepos(lineno, pos, filename)) + self, message + _format_filepos(lineno, pos, filename) + ) self.lineno = lineno self.pos = pos self.filename = filename @@ -39,11 +40,10 @@ class CompileException(MakoException): class SyntaxException(MakoException): - def __init__(self, message, source, lineno, pos, filename): MakoException.__init__( - self, - message + _format_filepos(lineno, pos, filename)) + self, message + _format_filepos(lineno, pos, filename) + ) self.lineno = lineno self.pos = pos self.filename = filename @@ -115,7 +115,7 @@ class RichTraceback(object): # str(Exception(u'\xe6')) work in Python < 2.6 self.message = self.error.args[0] if not isinstance(self.message, compat.text_type): - self.message = compat.text_type(self.message, 'ascii', 'replace') + self.message = compat.text_type(self.message, "ascii", "replace") def _get_reformatted_records(self, records): for rec in records: @@ -151,25 +151,30 @@ class RichTraceback(object): source, and code line from that line number of the template.""" import mako.template + mods = {} rawrecords = traceback.extract_tb(trcback) new_trcback = [] for filename, lineno, function, line in rawrecords: if not line: - line = '' + line = "" try: - (line_map, template_lines) = mods[filename] + (line_map, template_lines, template_filename) = mods[filename] except KeyError: try: info = mako.template._get_module_info(filename) module_source = info.code template_source = info.source - template_filename = info.template_filename or filename + template_filename = ( + info.template_filename + or info.template_uri + or filename + ) except KeyError: # A normal .py file (not a Template) if not compat.py3k: try: - fp = open(filename, 'rb') + fp = open(filename, "rb") encoding = util.parse_encoding(fp) fp.close() except IOError: @@ -177,21 +182,33 @@ class RichTraceback(object): if encoding: line = line.decode(encoding) else: - line = line.decode('ascii', 'replace') - new_trcback.append((filename, lineno, function, line, - None, None, None, None)) + line = line.decode("ascii", "replace") + new_trcback.append( + ( + filename, + lineno, + function, + line, + None, + None, + None, + None, + ) + ) continue template_ln = 1 - source_map = mako.template.ModuleInfo.\ - get_module_source_metadata( - module_source, full_line_map=True) - line_map = source_map['full_line_map'] + mtm = mako.template.ModuleInfo + source_map = mtm.get_module_source_metadata( + module_source, full_line_map=True + ) + line_map = source_map["full_line_map"] - template_lines = [line_ for line_ in - template_source.split("\n")] - mods[filename] = (line_map, template_lines) + template_lines = [ + line_ for line_ in template_source.split("\n") + ] + mods[filename] = (line_map, template_lines, template_filename) template_ln = line_map[lineno - 1] @@ -199,9 +216,18 @@ class RichTraceback(object): template_line = template_lines[template_ln - 1] else: template_line = None - new_trcback.append((filename, lineno, function, - line, template_filename, template_ln, - template_line, template_source)) + new_trcback.append( + ( + filename, + lineno, + function, + line, + template_filename, + template_ln, + template_line, + template_source, + ) + ) if not self.source: for l in range(len(new_trcback) - 1, 0, -1): if new_trcback[l][5]: @@ -212,15 +238,17 @@ class RichTraceback(object): if new_trcback: try: # A normal .py file (not a Template) - fp = open(new_trcback[-1][0], 'rb') + fp = open(new_trcback[-1][0], "rb") encoding = util.parse_encoding(fp) + if compat.py3k and not encoding: + encoding = "utf-8" fp.seek(0) self.source = fp.read() fp.close() if encoding: self.source = self.source.decode(encoding) except IOError: - self.source = '' + self.source = "" self.lineno = new_trcback[-1][1] return new_trcback @@ -233,7 +261,9 @@ def text_error_template(lookup=None): """ import mako.template - return mako.template.Template(r""" + + return mako.template.Template( + r""" <%page args="error=None, traceback=None"/> <%! from mako.exceptions import RichTraceback @@ -247,7 +277,8 @@ Traceback (most recent call last): ${line | trim} % endfor ${tback.errorname}: ${tback.message} -""") +""" + ) def _install_pygments(): @@ -259,9 +290,10 @@ def _install_pygments(): def _install_fallback(): global syntax_highlight, pygments_html_formatter from mako.filters import html_escape + pygments_html_formatter = None - def syntax_highlight(filename='', language=None): + def syntax_highlight(filename="", language=None): return html_escape @@ -270,6 +302,8 @@ def _install_highlighting(): _install_pygments() except ImportError: _install_fallback() + + _install_highlighting() @@ -287,7 +321,9 @@ def html_error_template(): """ import mako.template - return mako.template.Template(r""" + + return mako.template.Template( + r""" <%! from mako.exceptions import RichTraceback, syntax_highlight,\ pygments_html_formatter @@ -390,5 +426,7 @@ def html_error_template(): % endif -""", output_encoding=sys.getdefaultencoding(), - encoding_errors='htmlentityreplace') +""", + output_encoding=sys.getdefaultencoding(), + encoding_errors="htmlentityreplace", + ) diff --git a/server/www/packages/packages-linux/x64/mako/ext/autohandler.py b/server/www/packages/packages-linux/x64/mako/ext/autohandler.py index 9d1c911..55afb95 100644 --- a/server/www/packages/packages-linux/x64/mako/ext/autohandler.py +++ b/server/www/packages/packages-linux/x64/mako/ext/autohandler.py @@ -1,5 +1,5 @@ # ext/autohandler.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -8,29 +8,29 @@ requires that the TemplateLookup class is used with templates. -usage: +usage:: -<%! - from mako.ext.autohandler import autohandler -%> -<%inherit file="${autohandler(template, context)}"/> + <%! + from mako.ext.autohandler import autohandler + %> + <%inherit file="${autohandler(template, context)}"/> -or with custom autohandler filename: +or with custom autohandler filename:: -<%! - from mako.ext.autohandler import autohandler -%> -<%inherit file="${autohandler(template, context, name='somefilename')}"/> + <%! + from mako.ext.autohandler import autohandler + %> + <%inherit file="${autohandler(template, context, name='somefilename')}"/> """ -import posixpath import os +import posixpath import re -def autohandler(template, context, name='autohandler'): +def autohandler(template, context, name="autohandler"): lookup = context.lookup _template_uri = template.module._template_uri if not lookup.filesystem_checks: @@ -39,13 +39,14 @@ def autohandler(template, context, name='autohandler'): except KeyError: pass - tokens = re.findall(r'([^/]+)', posixpath.dirname(_template_uri)) + [name] + tokens = re.findall(r"([^/]+)", posixpath.dirname(_template_uri)) + [name] while len(tokens): - path = '/' + '/'.join(tokens) + path = "/" + "/".join(tokens) if path != _template_uri and _file_exists(lookup, path): if not lookup.filesystem_checks: return lookup._uri_cache.setdefault( - (autohandler, _template_uri, name), path) + (autohandler, _template_uri, name), path + ) else: return path if len(tokens) == 1: @@ -54,15 +55,16 @@ def autohandler(template, context, name='autohandler'): if not lookup.filesystem_checks: return lookup._uri_cache.setdefault( - (autohandler, _template_uri, name), None) + (autohandler, _template_uri, name), None + ) else: return None def _file_exists(lookup, path): - psub = re.sub(r'^/', '', path) + psub = re.sub(r"^/", "", path) for d in lookup.directories: - if os.path.exists(d + '/' + psub): + if os.path.exists(d + "/" + psub): return True else: return False diff --git a/server/www/packages/packages-linux/x64/mako/ext/babelplugin.py b/server/www/packages/packages-linux/x64/mako/ext/babelplugin.py index 0b5e84f..dbe2cd0 100644 --- a/server/www/packages/packages-linux/x64/mako/ext/babelplugin.py +++ b/server/www/packages/packages-linux/x64/mako/ext/babelplugin.py @@ -1,23 +1,24 @@ # ext/babelplugin.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """gettext message extraction via Babel: http://babel.edgewall.org/""" from babel.messages.extract import extract_python + from mako.ext.extract import MessageExtractor class BabelMakoExtractor(MessageExtractor): - def __init__(self, keywords, comment_tags, options): self.keywords = keywords self.options = options self.config = { - 'comment-tags': u' '.join(comment_tags), - 'encoding': options.get('input_encoding', - options.get('encoding', None)), + "comment-tags": u" ".join(comment_tags), + "encoding": options.get( + "input_encoding", options.get("encoding", None) + ), } super(BabelMakoExtractor, self).__init__() @@ -25,12 +26,19 @@ class BabelMakoExtractor(MessageExtractor): return self.process_file(fileobj) def process_python(self, code, code_lineno, translator_strings): - comment_tags = self.config['comment-tags'] - for lineno, funcname, messages, python_translator_comments \ - in extract_python(code, - self.keywords, comment_tags, self.options): - yield (code_lineno + (lineno - 1), funcname, messages, - translator_strings + python_translator_comments) + comment_tags = self.config["comment-tags"] + for ( + lineno, + funcname, + messages, + python_translator_comments, + ) in extract_python(code, self.keywords, comment_tags, self.options): + yield ( + code_lineno + (lineno - 1), + funcname, + messages, + translator_strings + python_translator_comments, + ) def extract(fileobj, keywords, comment_tags, options): diff --git a/server/www/packages/packages-linux/x64/mako/ext/beaker_cache.py b/server/www/packages/packages-linux/x64/mako/ext/beaker_cache.py index c7c260d..b415c9c 100644 --- a/server/www/packages/packages-linux/x64/mako/ext/beaker_cache.py +++ b/server/www/packages/packages-linux/x64/mako/ext/beaker_cache.py @@ -1,7 +1,12 @@ +# ext/beaker_cache.py +# Copyright 2006-2019 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + """Provide a :class:`.CacheImpl` for the Beaker caching system.""" from mako import exceptions - from mako.cache import CacheImpl try: @@ -27,36 +32,37 @@ class BeakerCacheImpl(CacheImpl): def __init__(self, cache): if not has_beaker: raise exceptions.RuntimeException( - "Can't initialize Beaker plugin; Beaker is not installed.") + "Can't initialize Beaker plugin; Beaker is not installed." + ) global _beaker_cache if _beaker_cache is None: - if 'manager' in cache.template.cache_args: - _beaker_cache = cache.template.cache_args['manager'] + if "manager" in cache.template.cache_args: + _beaker_cache = cache.template.cache_args["manager"] else: _beaker_cache = beaker_cache.CacheManager() super(BeakerCacheImpl, self).__init__(cache) def _get_cache(self, **kw): - expiretime = kw.pop('timeout', None) - if 'dir' in kw: - kw['data_dir'] = kw.pop('dir') + expiretime = kw.pop("timeout", None) + if "dir" in kw: + kw["data_dir"] = kw.pop("dir") elif self.cache.template.module_directory: - kw['data_dir'] = self.cache.template.module_directory + kw["data_dir"] = self.cache.template.module_directory - if 'manager' in kw: - kw.pop('manager') + if "manager" in kw: + kw.pop("manager") - if kw.get('type') == 'memcached': - kw['type'] = 'ext:memcached' + if kw.get("type") == "memcached": + kw["type"] = "ext:memcached" - if 'region' in kw: - region = kw.pop('region') + if "region" in kw: + region = kw.pop("region") cache = _beaker_cache.get_cache_region(self.cache.id, region, **kw) else: cache = _beaker_cache.get_cache(self.cache.id, **kw) - cache_args = {'starttime': self.cache.starttime} + cache_args = {"starttime": self.cache.starttime} if expiretime: - cache_args['expiretime'] = expiretime + cache_args["expiretime"] = expiretime return cache, cache_args def get_or_create(self, key, creation_function, **kw): diff --git a/server/www/packages/packages-linux/x64/mako/ext/extract.py b/server/www/packages/packages-linux/x64/mako/ext/extract.py index d777ea8..8a1bd54 100644 --- a/server/www/packages/packages-linux/x64/mako/ext/extract.py +++ b/server/www/packages/packages-linux/x64/mako/ext/extract.py @@ -1,30 +1,39 @@ +# ext/extract.py +# Copyright 2006-2019 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + import re + from mako import compat from mako import lexer from mako import parsetree class MessageExtractor(object): - def process_file(self, fileobj): template_node = lexer.Lexer( - fileobj.read(), - input_encoding=self.config['encoding']).parse() + fileobj.read(), input_encoding=self.config["encoding"] + ).parse() for extracted in self.extract_nodes(template_node.get_children()): yield extracted def extract_nodes(self, nodes): translator_comments = [] in_translator_comments = False - input_encoding = self.config['encoding'] or 'ascii' + input_encoding = self.config["encoding"] or "ascii" comment_tags = list( - filter(None, re.split(r'\s+', self.config['comment-tags']))) + filter(None, re.split(r"\s+", self.config["comment-tags"])) + ) for node in nodes: child_nodes = None - if in_translator_comments and \ - isinstance(node, parsetree.Text) and \ - not node.content.strip(): + if ( + in_translator_comments + and isinstance(node, parsetree.Text) + and not node.content.strip() + ): # Ignore whitespace within translator comments continue @@ -32,13 +41,15 @@ class MessageExtractor(object): value = node.text.strip() if in_translator_comments: translator_comments.extend( - self._split_comment(node.lineno, value)) + self._split_comment(node.lineno, value) + ) continue for comment_tag in comment_tags: if value.startswith(comment_tag): in_translator_comments = True translator_comments.extend( - self._split_comment(node.lineno, value)) + self._split_comment(node.lineno, value) + ) continue if isinstance(node, parsetree.DefTag): @@ -69,15 +80,18 @@ class MessageExtractor(object): continue # Comments don't apply unless they immediately precede the message - if translator_comments and \ - translator_comments[-1][0] < node.lineno - 1: + if ( + translator_comments + and translator_comments[-1][0] < node.lineno - 1 + ): translator_comments = [] translator_strings = [ - comment[1] for comment in translator_comments] + comment[1] for comment in translator_comments + ] if isinstance(code, compat.text_type): - code = code.encode(input_encoding, 'backslashreplace') + code = code.encode(input_encoding, "backslashreplace") used_translator_comments = False # We add extra newline to work around a pybabel bug @@ -85,10 +99,11 @@ class MessageExtractor(object): # input string of the input is non-ascii) # Also, because we added it, we have to subtract one from # node.lineno - code = compat.byte_buffer(compat.b('\n') + code) + code = compat.byte_buffer(compat.b("\n") + code) for message in self.process_python( - code, node.lineno - 1, translator_strings): + code, node.lineno - 1, translator_strings + ): yield message used_translator_comments = True @@ -104,5 +119,7 @@ class MessageExtractor(object): def _split_comment(lineno, comment): """Return the multiline comment at lineno split into a list of comment line numbers and the accompanying comment line""" - return [(lineno + index, line) for index, line in - enumerate(comment.splitlines())] + return [ + (lineno + index, line) + for index, line in enumerate(comment.splitlines()) + ] diff --git a/server/www/packages/packages-linux/x64/mako/ext/linguaplugin.py b/server/www/packages/packages-linux/x64/mako/ext/linguaplugin.py index 46b0d6a..955a5cb 100644 --- a/server/www/packages/packages-linux/x64/mako/ext/linguaplugin.py +++ b/server/www/packages/packages-linux/x64/mako/ext/linguaplugin.py @@ -1,43 +1,57 @@ +# ext/linguaplugin.py +# Copyright 2006-2019 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + import io + from lingua.extractors import Extractor -from lingua.extractors import Message from lingua.extractors import get_extractor -from mako.ext.extract import MessageExtractor +from lingua.extractors import Message + from mako import compat +from mako.ext.extract import MessageExtractor class LinguaMakoExtractor(Extractor, MessageExtractor): - '''Mako templates''' - extensions = ['.mako'] - default_config = { - 'encoding': 'utf-8', - 'comment-tags': '', - } + """Mako templates""" + + extensions = [".mako"] + default_config = {"encoding": "utf-8", "comment-tags": ""} def __call__(self, filename, options, fileobj=None): self.options = options self.filename = filename - self.python_extractor = get_extractor('x.py') + self.python_extractor = get_extractor("x.py") if fileobj is None: - fileobj = open(filename, 'rb') + fileobj = open(filename, "rb") return self.process_file(fileobj) def process_python(self, code, code_lineno, translator_strings): source = code.getvalue().strip() - if source.endswith(compat.b(':')): - if source in (compat.b('try:'), compat.b('else:')) or source.startswith(compat.b('except')): - source = compat.b('') # Ignore try/except and else - elif source.startswith(compat.b('elif')): - source = source[2:] # Replace "elif" with "if" - source += compat.b('pass') + if source.endswith(compat.b(":")): + if source in ( + compat.b("try:"), + compat.b("else:"), + ) or source.startswith(compat.b("except")): + source = compat.b("") # Ignore try/except and else + elif source.startswith(compat.b("elif")): + source = source[2:] # Replace "elif" with "if" + source += compat.b("pass") code = io.BytesIO(source) for msg in self.python_extractor( - self.filename, self.options, code, code_lineno -1): + self.filename, self.options, code, code_lineno - 1 + ): if translator_strings: - msg = Message(msg.msgctxt, msg.msgid, msg.msgid_plural, - msg.flags, - compat.u(' ').join( - translator_strings + [msg.comment]), - msg.tcomment, msg.location) + msg = Message( + msg.msgctxt, + msg.msgid, + msg.msgid_plural, + msg.flags, + compat.u(" ").join(translator_strings + [msg.comment]), + msg.tcomment, + msg.location, + ) yield msg diff --git a/server/www/packages/packages-linux/x64/mako/ext/preprocessors.py b/server/www/packages/packages-linux/x64/mako/ext/preprocessors.py index 9b700d1..1eeb7c5 100644 --- a/server/www/packages/packages-linux/x64/mako/ext/preprocessors.py +++ b/server/www/packages/packages-linux/x64/mako/ext/preprocessors.py @@ -1,5 +1,5 @@ # ext/preprocessors.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -17,4 +17,4 @@ def convert_comments(text): from mako.ext.preprocessors import convert_comments t = Template(..., preprocessor=convert_comments)""" - return re.sub(r'(?<=\n)\s*#[^#]', "##", text) + return re.sub(r"(?<=\n)\s*#[^#]", "##", text) diff --git a/server/www/packages/packages-linux/x64/mako/ext/pygmentplugin.py b/server/www/packages/packages-linux/x64/mako/ext/pygmentplugin.py index 4057caa..1734ccd 100644 --- a/server/www/packages/packages-linux/x64/mako/ext/pygmentplugin.py +++ b/server/www/packages/packages-linux/x64/mako/ext/pygmentplugin.py @@ -1,45 +1,73 @@ # ext/pygmentplugin.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -from pygments.lexers.web import \ - HtmlLexer, XmlLexer, JavascriptLexer, CssLexer -from pygments.lexers.agile import PythonLexer, Python3Lexer -from pygments.lexer import DelegatingLexer, RegexLexer, bygroups, \ - include, using -from pygments.token import \ - Text, Comment, Operator, Keyword, Name, String, Other -from pygments.formatters.html import HtmlFormatter from pygments import highlight +from pygments.formatters.html import HtmlFormatter +from pygments.lexer import bygroups +from pygments.lexer import DelegatingLexer +from pygments.lexer import include +from pygments.lexer import RegexLexer +from pygments.lexer import using +from pygments.lexers.agile import Python3Lexer +from pygments.lexers.agile import PythonLexer +from pygments.lexers.web import CssLexer +from pygments.lexers.web import HtmlLexer +from pygments.lexers.web import JavascriptLexer +from pygments.lexers.web import XmlLexer +from pygments.token import Comment +from pygments.token import Keyword +from pygments.token import Name +from pygments.token import Operator +from pygments.token import Other +from pygments.token import String +from pygments.token import Text + from mako import compat class MakoLexer(RegexLexer): - name = 'Mako' - aliases = ['mako'] - filenames = ['*.mao'] + name = "Mako" + aliases = ["mako"] + filenames = ["*.mao"] tokens = { - 'root': [ - (r'(\s*)(\%)(\s*end(?:\w+))(\n|\Z)', - bygroups(Text, Comment.Preproc, Keyword, Other)), - (r'(\s*)(\%(?!%))([^\n]*)(\n|\Z)', - bygroups(Text, Comment.Preproc, using(PythonLexer), Other)), - (r'(\s*)(##[^\n]*)(\n|\Z)', - bygroups(Text, Comment.Preproc, Other)), - (r'''(?s)<%doc>.*?''', Comment.Preproc), - (r'(<%)([\w\.\:]+)', - bygroups(Comment.Preproc, Name.Builtin), 'tag'), - (r'()', - bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)), - (r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'), - (r'(<%(?:!?))(.*?)(%>)(?s)', - bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), - (r'(\$\{)(.*?)(\})', - bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), - (r'''(?sx) + "root": [ + ( + r"(\s*)(\%)(\s*end(?:\w+))(\n|\Z)", + bygroups(Text, Comment.Preproc, Keyword, Other), + ), + ( + r"(\s*)(\%(?!%))([^\n]*)(\n|\Z)", + bygroups(Text, Comment.Preproc, using(PythonLexer), Other), + ), + ( + r"(\s*)(##[^\n]*)(\n|\Z)", + bygroups(Text, Comment.Preproc, Other), + ), + (r"""(?s)<%doc>.*?""", Comment.Preproc), + ( + r"(<%)([\w\.\:]+)", + bygroups(Comment.Preproc, Name.Builtin), + "tag", + ), + ( + r"()", + bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc), + ), + (r"<%(?=([\w\.\:]+))", Comment.Preproc, "ondeftags"), + ( + r"(?s)(<%(?:!?))(.*?)(%>)", + bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc), + ), + ( + r"(\$\{)(.*?)(\})", + bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc), + ), + ( + r"""(?sx) (.+?) # anything, followed by: (?: (?<=\n)(?=%(?!%)|\#\#) | # an eval or comment line @@ -52,76 +80,78 @@ class MakoLexer(RegexLexer): (\\\n) | # an escaped newline \Z # end of string ) - ''', bygroups(Other, Operator)), - (r'\s+', Text), + """, + bygroups(Other, Operator), + ), + (r"\s+", Text), ], - 'ondeftags': [ - (r'<%', Comment.Preproc), - (r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin), - include('tag'), + "ondeftags": [ + (r"<%", Comment.Preproc), + (r"(?<=<%)(include|inherit|namespace|page)", Name.Builtin), + include("tag"), ], - 'tag': [ - (r'((?:\w+)\s*=)\s*(".*?")', - bygroups(Name.Attribute, String)), - (r'/?\s*>', Comment.Preproc, '#pop'), - (r'\s+', Text), + "tag": [ + (r'((?:\w+)\s*=)\s*(".*?")', bygroups(Name.Attribute, String)), + (r"/?\s*>", Comment.Preproc, "#pop"), + (r"\s+", Text), ], - 'attr': [ - ('".*?"', String, '#pop'), - ("'.*?'", String, '#pop'), - (r'[^\s>]+', String, '#pop'), + "attr": [ + ('".*?"', String, "#pop"), + ("'.*?'", String, "#pop"), + (r"[^\s>]+", String, "#pop"), ], } class MakoHtmlLexer(DelegatingLexer): - name = 'HTML+Mako' - aliases = ['html+mako'] + name = "HTML+Mako" + aliases = ["html+mako"] def __init__(self, **options): - super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer, - **options) + super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer, **options) class MakoXmlLexer(DelegatingLexer): - name = 'XML+Mako' - aliases = ['xml+mako'] + name = "XML+Mako" + aliases = ["xml+mako"] def __init__(self, **options): - super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer, - **options) + super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer, **options) class MakoJavascriptLexer(DelegatingLexer): - name = 'JavaScript+Mako' - aliases = ['js+mako', 'javascript+mako'] + name = "JavaScript+Mako" + aliases = ["js+mako", "javascript+mako"] def __init__(self, **options): - super(MakoJavascriptLexer, self).__init__(JavascriptLexer, - MakoLexer, **options) + super(MakoJavascriptLexer, self).__init__( + JavascriptLexer, MakoLexer, **options + ) class MakoCssLexer(DelegatingLexer): - name = 'CSS+Mako' - aliases = ['css+mako'] + name = "CSS+Mako" + aliases = ["css+mako"] def __init__(self, **options): - super(MakoCssLexer, self).__init__(CssLexer, MakoLexer, - **options) + super(MakoCssLexer, self).__init__(CssLexer, MakoLexer, **options) -pygments_html_formatter = HtmlFormatter(cssclass='syntax-highlighted', - linenos=True) +pygments_html_formatter = HtmlFormatter( + cssclass="syntax-highlighted", linenos=True +) -def syntax_highlight(filename='', language=None): +def syntax_highlight(filename="", language=None): mako_lexer = MakoLexer() if compat.py3k: python_lexer = Python3Lexer() else: python_lexer = PythonLexer() - if filename.startswith('memory:') or language == 'mako': - return lambda string: highlight(string, mako_lexer, - pygments_html_formatter) - return lambda string: highlight(string, python_lexer, - pygments_html_formatter) + if filename.startswith("memory:") or language == "mako": + return lambda string: highlight( + string, mako_lexer, pygments_html_formatter + ) + return lambda string: highlight( + string, python_lexer, pygments_html_formatter + ) diff --git a/server/www/packages/packages-linux/x64/mako/ext/turbogears.py b/server/www/packages/packages-linux/x64/mako/ext/turbogears.py index eaa2d78..fdb7741 100644 --- a/server/www/packages/packages-linux/x64/mako/ext/turbogears.py +++ b/server/www/packages/packages-linux/x64/mako/ext/turbogears.py @@ -1,5 +1,5 @@ # ext/turbogears.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -13,7 +13,7 @@ class TGPlugin(object): """TurboGears compatible Template Plugin.""" - def __init__(self, extra_vars_func=None, options=None, extension='mak'): + def __init__(self, extra_vars_func=None, options=None, extension="mak"): self.extra_vars_func = extra_vars_func self.extension = extension if not options: @@ -22,9 +22,9 @@ class TGPlugin(object): # Pull the options out and initialize the lookup lookup_options = {} for k, v in options.items(): - if k.startswith('mako.'): + if k.startswith("mako."): lookup_options[k[5:]] = v - elif k in ['directories', 'filesystem_checks', 'module_directory']: + elif k in ["directories", "filesystem_checks", "module_directory"]: lookup_options[k] = v self.lookup = TemplateLookup(**lookup_options) @@ -40,14 +40,17 @@ class TGPlugin(object): if template_string is not None: return Template(template_string, **self.tmpl_options) # Translate TG dot notation to normal / template path - if '/' not in templatename: - templatename = '/' + templatename.replace('.', '/') + '.' +\ - self.extension + if "/" not in templatename: + templatename = ( + "/" + templatename.replace(".", "/") + "." + self.extension + ) # Lookup template return self.lookup.get_template(templatename) - def render(self, info, format="html", fragment=False, template=None): + def render( + self, info, format="html", fragment=False, template=None # noqa + ): if isinstance(template, compat.string_types): template = self.load_template(template) diff --git a/server/www/packages/packages-linux/x64/mako/filters.py b/server/www/packages/packages-linux/x64/mako/filters.py index c082690..ba69fdd 100644 --- a/server/www/packages/packages-linux/x64/mako/filters.py +++ b/server/www/packages/packages-linux/x64/mako/filters.py @@ -1,24 +1,25 @@ # mako/filters.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -import re import codecs - -from mako.compat import quote_plus, unquote_plus, codepoint2name, \ - name2codepoint +import re from mako import compat +from mako.compat import codepoint2name +from mako.compat import name2codepoint +from mako.compat import quote_plus +from mako.compat import unquote_plus xml_escapes = { - '&': '&', - '>': '>', - '<': '<', - '"': '"', # also " in html-only - "'": ''' # also ' in html-only + "&": "&", + ">": ">", + "<": "<", + '"': """, # also " in html-only + "'": "'", # also ' in html-only } # XXX: " is valid in HTML and XML @@ -37,6 +38,7 @@ def legacy_html_escape(s): try: import markupsafe + html_escape = markupsafe.escape except ImportError: html_escape = legacy_html_escape @@ -69,7 +71,6 @@ def trim(string): class Decode(object): - def __getattr__(self, key): def decode(x): if isinstance(x, compat.text_type): @@ -78,24 +79,31 @@ class Decode(object): return decode(str(x)) else: return compat.text_type(x, encoding=key) + return decode + + decode = Decode() -_ASCII_re = re.compile(r'\A[\x00-\x7f]*\Z') +_ASCII_re = re.compile(r"\A[\x00-\x7f]*\Z") def is_ascii_str(text): return isinstance(text, str) and _ASCII_re.match(text) + ################################################################ class XMLEntityEscaper(object): - def __init__(self, codepoint2name, name2codepoint): - self.codepoint2entity = dict([(c, compat.text_type('&%s;' % n)) - for c, n in codepoint2name.items()]) + self.codepoint2entity = dict( + [ + (c, compat.text_type("&%s;" % n)) + for c, n in codepoint2name.items() + ] + ) self.name2codepoint = name2codepoint def escape_entities(self, text): @@ -110,7 +118,7 @@ class XMLEntityEscaper(object): try: return self.codepoint2entity[codepoint] except (KeyError, IndexError): - return '&#x%X;' % codepoint + return "&#x%X;" % codepoint __escapable = re.compile(r'["&<>]|[^\x00-\x7f]') @@ -123,19 +131,22 @@ class XMLEntityEscaper(object): The return value is guaranteed to be ASCII. """ - return self.__escapable.sub(self.__escape, compat.text_type(text) - ).encode('ascii') + return self.__escapable.sub( + self.__escape, compat.text_type(text) + ).encode("ascii") # XXX: This regexp will not match all valid XML entity names__. # (It punts on details involving involving CombiningChars and Extenders.) # # .. __: http://www.w3.org/TR/2000/REC-xml-20001006#NT-EntityRef - __characterrefs = re.compile(r'''& (?: + __characterrefs = re.compile( + r"""& (?: \#(\d+) | \#x([\da-f]+) | ( (?!\d) [:\w] [-.:\w]+ ) - ) ;''', - re.X | re.UNICODE) + ) ;""", + re.X | re.UNICODE, + ) def __unescape(self, m): dval, hval, name = m.groups() @@ -144,7 +155,7 @@ class XMLEntityEscaper(object): elif hval: codepoint = int(hval, 16) else: - codepoint = self.name2codepoint.get(name, 0xfffd) + codepoint = self.name2codepoint.get(name, 0xFFFD) # U+FFFD = "REPLACEMENT CHARACTER" if codepoint < 128: return chr(codepoint) @@ -168,42 +179,41 @@ html_entities_unescape = _html_entities_escaper.unescape def htmlentityreplace_errors(ex): """An encoding error handler. - This python `codecs`_ error handler replaces unencodable + This python codecs error handler replaces unencodable characters with HTML entities, or, if no HTML entity exists for - the character, XML character references. + the character, XML character references:: - >>> u'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace') - 'The cost was €12.' + >>> u'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace') + 'The cost was €12.' """ if isinstance(ex, UnicodeEncodeError): # Handle encoding errors - bad_text = ex.object[ex.start:ex.end] + bad_text = ex.object[ex.start : ex.end] text = _html_entities_escaper.escape(bad_text) return (compat.text_type(text), ex.end) raise ex -codecs.register_error('htmlentityreplace', htmlentityreplace_errors) + +codecs.register_error("htmlentityreplace", htmlentityreplace_errors) # TODO: options to make this dynamic per-compilation will be added in a later # release DEFAULT_ESCAPES = { - 'x': 'filters.xml_escape', - 'h': 'filters.html_escape', - 'u': 'filters.url_escape', - 'trim': 'filters.trim', - 'entity': 'filters.html_entities_escape', - 'unicode': 'unicode', - 'decode': 'decode', - 'str': 'str', - 'n': 'n' + "x": "filters.xml_escape", + "h": "filters.html_escape", + "u": "filters.url_escape", + "trim": "filters.trim", + "entity": "filters.html_entities_escape", + "unicode": "unicode", + "decode": "decode", + "str": "str", + "n": "n", } if compat.py3k: - DEFAULT_ESCAPES.update({ - 'unicode': 'str' - }) + DEFAULT_ESCAPES.update({"unicode": "str"}) NON_UNICODE_ESCAPES = DEFAULT_ESCAPES.copy() -NON_UNICODE_ESCAPES['h'] = 'filters.legacy_html_escape' -NON_UNICODE_ESCAPES['u'] = 'filters.legacy_url_escape' +NON_UNICODE_ESCAPES["h"] = "filters.legacy_html_escape" +NON_UNICODE_ESCAPES["u"] = "filters.legacy_url_escape" diff --git a/server/www/packages/packages-linux/x64/mako/lexer.py b/server/www/packages/packages-linux/x64/mako/lexer.py index cf4187f..dadd663 100644 --- a/server/www/packages/packages-linux/x64/mako/lexer.py +++ b/server/www/packages/packages-linux/x64/mako/lexer.py @@ -1,24 +1,31 @@ # mako/lexer.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """provides the Lexer class for parsing template strings into parse trees.""" -import re import codecs -from mako import parsetree, exceptions, compat +import re + +from mako import compat +from mako import exceptions +from mako import parsetree from mako.pygen import adjust_whitespace _regexp_cache = {} class Lexer(object): - - def __init__(self, text, filename=None, - disable_unicode=False, - input_encoding=None, preprocessor=None): + def __init__( + self, + text, + filename=None, + disable_unicode=False, + input_encoding=None, + preprocessor=None, + ): self.text = text self.filename = filename self.template = parsetree.TemplateNode(self.filename) @@ -34,22 +41,24 @@ class Lexer(object): if compat.py3k and disable_unicode: raise exceptions.UnsupportedError( - "Mako for Python 3 does not " - "support disabling Unicode") + "Mako for Python 3 does not " "support disabling Unicode" + ) if preprocessor is None: self.preprocessor = [] - elif not hasattr(preprocessor, '__iter__'): + elif not hasattr(preprocessor, "__iter__"): self.preprocessor = [preprocessor] else: self.preprocessor = preprocessor @property def exception_kwargs(self): - return {'source': self.text, - 'lineno': self.matched_lineno, - 'pos': self.matched_charpos, - 'filename': self.filename} + return { + "source": self.text, + "lineno": self.matched_lineno, + "pos": self.matched_charpos, + "filename": self.filename, + } def match(self, regexp, flags=None): """compile the given regexp, cache the reg, and call match_reg().""" @@ -83,9 +92,9 @@ class Lexer(object): else: self.match_position = end self.matched_lineno = self.lineno - lines = re.findall(r"\n", self.text[mp:self.match_position]) + lines = re.findall(r"\n", self.text[mp : self.match_position]) cp = mp - 1 - while (cp >= 0 and cp < self.textlength and self.text[cp] != '\n'): + while cp >= 0 and cp < self.textlength and self.text[cp] != "\n": cp -= 1 self.matched_charpos = mp - cp self.lineno += len(lines) @@ -97,46 +106,49 @@ class Lexer(object): def parse_until_text(self, watch_nesting, *text): startpos = self.match_position - text_re = r'|'.join(text) + text_re = r"|".join(text) brace_level = 0 paren_level = 0 bracket_level = 0 while True: - match = self.match(r'#.*\n') + match = self.match(r"#.*\n") if match: continue - match = self.match(r'(\"\"\"|\'\'\'|\"|\')[^\\]*?(\\.[^\\]*?)*\1', - re.S) + match = self.match( + r"(\"\"\"|\'\'\'|\"|\')[^\\]*?(\\.[^\\]*?)*\1", re.S + ) if match: continue - match = self.match(r'(%s)' % text_re) - if match and not (watch_nesting - and (brace_level > 0 or paren_level > 0 - or bracket_level > 0)): - return \ - self.text[startpos: - self.match_position - len(match.group(1))],\ - match.group(1) + match = self.match(r"(%s)" % text_re) + if match and not ( + watch_nesting + and (brace_level > 0 or paren_level > 0 or bracket_level > 0) + ): + return ( + self.text[ + startpos : self.match_position - len(match.group(1)) + ], + match.group(1), + ) elif not match: match = self.match(r"(.*?)(?=\"|\'|#|%s)" % text_re, re.S) if match: - brace_level += match.group(1).count('{') - brace_level -= match.group(1).count('}') - paren_level += match.group(1).count('(') - paren_level -= match.group(1).count(')') - bracket_level += match.group(1).count('[') - bracket_level -= match.group(1).count(']') + brace_level += match.group(1).count("{") + brace_level -= match.group(1).count("}") + paren_level += match.group(1).count("(") + paren_level -= match.group(1).count(")") + bracket_level += match.group(1).count("[") + bracket_level -= match.group(1).count("]") continue raise exceptions.SyntaxException( - "Expected: %s" % - ','.join(text), - **self.exception_kwargs) + "Expected: %s" % ",".join(text), **self.exception_kwargs + ) def append_node(self, nodecls, *args, **kwargs): - kwargs.setdefault('source', self.text) - kwargs.setdefault('lineno', self.matched_lineno) - kwargs.setdefault('pos', self.matched_charpos) - kwargs['filename'] = self.filename + kwargs.setdefault("source", self.text) + kwargs.setdefault("lineno", self.matched_lineno) + kwargs.setdefault("pos", self.matched_charpos) + kwargs["filename"] = self.filename node = nodecls(*args, **kwargs) if len(self.tag): self.tag[-1].nodes.append(node) @@ -149,8 +161,10 @@ class Lexer(object): if self.control_line: control_frame = self.control_line[-1] control_frame.nodes.append(node) - if not (isinstance(node, parsetree.ControlLine) and - control_frame.is_ternary(node.keyword)): + if not ( + isinstance(node, parsetree.ControlLine) + and control_frame.is_ternary(node.keyword) + ): if self.ternary_stack and self.ternary_stack[-1]: self.ternary_stack[-1][-1].nodes.append(node) if isinstance(node, parsetree.Tag): @@ -164,17 +178,20 @@ class Lexer(object): elif node.is_primary: self.control_line.append(node) self.ternary_stack.append([]) - elif self.control_line and \ - self.control_line[-1].is_ternary(node.keyword): + elif self.control_line and self.control_line[-1].is_ternary( + node.keyword + ): self.ternary_stack[-1].append(node) - elif self.control_line and \ - not self.control_line[-1].is_ternary(node.keyword): + elif self.control_line and not self.control_line[-1].is_ternary( + node.keyword + ): raise exceptions.SyntaxException( - "Keyword '%s' not a legal ternary for keyword '%s'" % - (node.keyword, self.control_line[-1].keyword), - **self.exception_kwargs) + "Keyword '%s' not a legal ternary for keyword '%s'" + % (node.keyword, self.control_line[-1].keyword), + **self.exception_kwargs + ) - _coding_re = re.compile(r'#.*coding[:=]\s*([-\w.]+).*\r?\n') + _coding_re = re.compile(r"#.*coding[:=]\s*([-\w.]+).*\r?\n") def decode_raw_stream(self, text, decode_raw, known_encoding, filename): """given string/unicode or bytes/string, determine encoding @@ -184,44 +201,48 @@ class Lexer(object): """ if isinstance(text, compat.text_type): m = self._coding_re.match(text) - encoding = m and m.group(1) or known_encoding or 'ascii' + encoding = m and m.group(1) or known_encoding or "ascii" return encoding, text if text.startswith(codecs.BOM_UTF8): - text = text[len(codecs.BOM_UTF8):] - parsed_encoding = 'utf-8' - m = self._coding_re.match(text.decode('utf-8', 'ignore')) - if m is not None and m.group(1) != 'utf-8': + text = text[len(codecs.BOM_UTF8) :] + parsed_encoding = "utf-8" + m = self._coding_re.match(text.decode("utf-8", "ignore")) + if m is not None and m.group(1) != "utf-8": raise exceptions.CompileException( "Found utf-8 BOM in file, with conflicting " "magic encoding comment of '%s'" % m.group(1), - text.decode('utf-8', 'ignore'), - 0, 0, filename) + text.decode("utf-8", "ignore"), + 0, + 0, + filename, + ) else: - m = self._coding_re.match(text.decode('utf-8', 'ignore')) + m = self._coding_re.match(text.decode("utf-8", "ignore")) if m: parsed_encoding = m.group(1) else: - parsed_encoding = known_encoding or 'ascii' + parsed_encoding = known_encoding or "ascii" if decode_raw: try: text = text.decode(parsed_encoding) except UnicodeDecodeError: raise exceptions.CompileException( - "Unicode decode operation of encoding '%s' failed" % - parsed_encoding, - text.decode('utf-8', 'ignore'), - 0, 0, filename) + "Unicode decode operation of encoding '%s' failed" + % parsed_encoding, + text.decode("utf-8", "ignore"), + 0, + 0, + filename, + ) return parsed_encoding, text def parse(self): self.encoding, self.text = self.decode_raw_stream( - self.text, - not self.disable_unicode, - self.encoding, - self.filename) + self.text, not self.disable_unicode, self.encoding, self.filename + ) for preproc in self.preprocessor: self.text = preproc(self.text) @@ -232,7 +253,7 @@ class Lexer(object): self.textlength = len(self.text) - while (True): + while True: if self.match_position > self.textlength: break @@ -258,20 +279,24 @@ class Lexer(object): raise exceptions.CompileException("assertion failed") if len(self.tag): - raise exceptions.SyntaxException("Unclosed tag: <%%%s>" % - self.tag[-1].keyword, - **self.exception_kwargs) + raise exceptions.SyntaxException( + "Unclosed tag: <%%%s>" % self.tag[-1].keyword, + **self.exception_kwargs + ) if len(self.control_line): raise exceptions.SyntaxException( - "Unterminated control keyword: '%s'" % - self.control_line[-1].keyword, + "Unterminated control keyword: '%s'" + % self.control_line[-1].keyword, self.text, self.control_line[-1].lineno, - self.control_line[-1].pos, self.filename) + self.control_line[-1].pos, + self.filename, + ) return self.template def match_tag_start(self): - match = self.match(r''' + match = self.match( + r""" \<% # opening tag ([\w\.\:]+) # keyword @@ -283,9 +308,9 @@ class Lexer(object): (/)?> # closing - ''', - - re.I | re.S | re.X) + """, + re.I | re.S | re.X, + ) if match: keyword, attr, isend = match.groups() @@ -293,22 +318,23 @@ class Lexer(object): attributes = {} if attr: for att in re.findall( - r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr): + r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr + ): key, val1, val2 = att text = val1 or val2 - text = text.replace('\r\n', '\n') + text = text.replace("\r\n", "\n") attributes[key] = text self.append_node(parsetree.Tag, keyword, attributes) if isend: self.tag.pop() else: - if keyword == 'text': - match = self.match(r'(.*?)(?=\)', re.S) + if keyword == "text": + match = self.match(r"(.*?)(?=\)", re.S) if not match: raise exceptions.SyntaxException( - "Unclosed tag: <%%%s>" % - self.tag[-1].keyword, - **self.exception_kwargs) + "Unclosed tag: <%%%s>" % self.tag[-1].keyword, + **self.exception_kwargs + ) self.append_node(parsetree.Text, match.group(1)) return self.match_tag_end() return True @@ -316,25 +342,27 @@ class Lexer(object): return False def match_tag_end(self): - match = self.match(r'\') + match = self.match(r"\") if match: if not len(self.tag): raise exceptions.SyntaxException( - "Closing tag without opening tag: " % - match.group(1), - **self.exception_kwargs) + "Closing tag without opening tag: " + % match.group(1), + **self.exception_kwargs + ) elif self.tag[-1].keyword != match.group(1): raise exceptions.SyntaxException( - "Closing tag does not match tag: <%%%s>" % - (match.group(1), self.tag[-1].keyword), - **self.exception_kwargs) + "Closing tag does not match tag: <%%%s>" + % (match.group(1), self.tag[-1].keyword), + **self.exception_kwargs + ) self.tag.pop() return True else: return False def match_end(self): - match = self.match(r'\Z', re.S) + match = self.match(r"\Z", re.S) if match: string = match.group() if string: @@ -345,7 +373,8 @@ class Lexer(object): return False def match_text(self): - match = self.match(r""" + match = self.match( + r""" (.*?) # anything, followed by: ( (?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based @@ -360,7 +389,9 @@ class Lexer(object): (\\\r?\n) # an escaped newline - throw away | \Z # end of string - )""", re.X | re.S) + )""", + re.X | re.S, + ) if match: text = match.group(1) @@ -374,14 +405,17 @@ class Lexer(object): match = self.match(r"<%(!)?") if match: line, pos = self.matched_lineno, self.matched_charpos - text, end = self.parse_until_text(False, r'%>') + text, end = self.parse_until_text(False, r"%>") # the trailing newline helps # compiler.parse() not complain about indentation text = adjust_whitespace(text) + "\n" self.append_node( parsetree.Code, text, - match.group(1) == '!', lineno=line, pos=pos) + match.group(1) == "!", + lineno=line, + pos=pos, + ) return True else: return False @@ -390,16 +424,19 @@ class Lexer(object): match = self.match(r"\${") if match: line, pos = self.matched_lineno, self.matched_charpos - text, end = self.parse_until_text(True, r'\|', r'}') - if end == '|': - escapes, end = self.parse_until_text(True, r'}') + text, end = self.parse_until_text(True, r"\|", r"}") + if end == "|": + escapes, end = self.parse_until_text(True, r"}") else: escapes = "" - text = text.replace('\r\n', '\n') + text = text.replace("\r\n", "\n") self.append_node( parsetree.Expression, - text, escapes.strip(), - lineno=line, pos=pos) + text, + escapes.strip(), + lineno=line, + pos=pos, + ) return True else: return False @@ -407,31 +444,35 @@ class Lexer(object): def match_control_line(self): match = self.match( r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)" - r"(?:\r?\n|\Z)", re.M) + r"(?:\r?\n|\Z)", + re.M, + ) if match: operator = match.group(1) text = match.group(2) - if operator == '%': - m2 = re.match(r'(end)?(\w+)\s*(.*)', text) + if operator == "%": + m2 = re.match(r"(end)?(\w+)\s*(.*)", text) if not m2: raise exceptions.SyntaxException( - "Invalid control line: '%s'" % - text, - **self.exception_kwargs) + "Invalid control line: '%s'" % text, + **self.exception_kwargs + ) isend, keyword = m2.group(1, 2) - isend = (isend is not None) + isend = isend is not None if isend: if not len(self.control_line): raise exceptions.SyntaxException( - "No starting keyword '%s' for '%s'" % - (keyword, text), - **self.exception_kwargs) + "No starting keyword '%s' for '%s'" + % (keyword, text), + **self.exception_kwargs + ) elif self.control_line[-1].keyword != keyword: raise exceptions.SyntaxException( - "Keyword '%s' doesn't match keyword '%s'" % - (text, self.control_line[-1].keyword), - **self.exception_kwargs) + "Keyword '%s' doesn't match keyword '%s'" + % (text, self.control_line[-1].keyword), + **self.exception_kwargs + ) self.append_node(parsetree.ControlLine, keyword, isend, text) else: self.append_node(parsetree.Comment, text) diff --git a/server/www/packages/packages-linux/x64/mako/lookup.py b/server/www/packages/packages-linux/x64/mako/lookup.py index 0d3f304..93558b2 100644 --- a/server/www/packages/packages-linux/x64/mako/lookup.py +++ b/server/www/packages/packages-linux/x64/mako/lookup.py @@ -1,14 +1,16 @@ # mako/lookup.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import os -import stat import posixpath import re -from mako import exceptions, util +import stat + +from mako import exceptions +from mako import util from mako.template import Template try: @@ -151,41 +153,41 @@ class TemplateLookup(TemplateCollection): """ - def __init__(self, - directories=None, - module_directory=None, - filesystem_checks=True, - collection_size=-1, - format_exceptions=False, - error_handler=None, - disable_unicode=False, - bytestring_passthrough=False, - output_encoding=None, - encoding_errors='strict', + def __init__( + self, + directories=None, + module_directory=None, + filesystem_checks=True, + collection_size=-1, + format_exceptions=False, + error_handler=None, + disable_unicode=False, + bytestring_passthrough=False, + output_encoding=None, + encoding_errors="strict", + cache_args=None, + cache_impl="beaker", + cache_enabled=True, + cache_type=None, + cache_dir=None, + cache_url=None, + modulename_callable=None, + module_writer=None, + default_filters=None, + buffer_filters=(), + strict_undefined=False, + imports=None, + future_imports=None, + enable_loop=True, + input_encoding=None, + preprocessor=None, + lexer_cls=None, + include_error_handler=None, + ): - cache_args=None, - cache_impl='beaker', - cache_enabled=True, - cache_type=None, - cache_dir=None, - cache_url=None, - - modulename_callable=None, - module_writer=None, - default_filters=None, - buffer_filters=(), - strict_undefined=False, - imports=None, - future_imports=None, - enable_loop=True, - input_encoding=None, - preprocessor=None, - lexer_cls=None, - include_error_handler=None): - - self.directories = [posixpath.normpath(d) for d in - util.to_list(directories, ()) - ] + self.directories = [ + posixpath.normpath(d) for d in util.to_list(directories, ()) + ] self.module_directory = module_directory self.modulename_callable = modulename_callable self.filesystem_checks = filesystem_checks @@ -195,34 +197,34 @@ class TemplateLookup(TemplateCollection): cache_args = {} # transfer deprecated cache_* args if cache_dir: - cache_args.setdefault('dir', cache_dir) + cache_args.setdefault("dir", cache_dir) if cache_url: - cache_args.setdefault('url', cache_url) + cache_args.setdefault("url", cache_url) if cache_type: - cache_args.setdefault('type', cache_type) + cache_args.setdefault("type", cache_type) self.template_args = { - 'format_exceptions': format_exceptions, - 'error_handler': error_handler, - 'include_error_handler': include_error_handler, - 'disable_unicode': disable_unicode, - 'bytestring_passthrough': bytestring_passthrough, - 'output_encoding': output_encoding, - 'cache_impl': cache_impl, - 'encoding_errors': encoding_errors, - 'input_encoding': input_encoding, - 'module_directory': module_directory, - 'module_writer': module_writer, - 'cache_args': cache_args, - 'cache_enabled': cache_enabled, - 'default_filters': default_filters, - 'buffer_filters': buffer_filters, - 'strict_undefined': strict_undefined, - 'imports': imports, - 'future_imports': future_imports, - 'enable_loop': enable_loop, - 'preprocessor': preprocessor, - 'lexer_cls': lexer_cls + "format_exceptions": format_exceptions, + "error_handler": error_handler, + "include_error_handler": include_error_handler, + "disable_unicode": disable_unicode, + "bytestring_passthrough": bytestring_passthrough, + "output_encoding": output_encoding, + "cache_impl": cache_impl, + "encoding_errors": encoding_errors, + "input_encoding": input_encoding, + "module_directory": module_directory, + "module_writer": module_writer, + "cache_args": cache_args, + "cache_enabled": cache_enabled, + "default_filters": default_filters, + "buffer_filters": buffer_filters, + "strict_undefined": strict_undefined, + "imports": imports, + "future_imports": future_imports, + "enable_loop": enable_loop, + "preprocessor": preprocessor, + "lexer_cls": lexer_cls, } if collection_size == -1: @@ -248,17 +250,18 @@ class TemplateLookup(TemplateCollection): else: return self._collection[uri] except KeyError: - u = re.sub(r'^\/+', '', uri) - for dir in self.directories: + u = re.sub(r"^\/+", "", uri) + for dir_ in self.directories: # make sure the path seperators are posix - os.altsep is empty # on POSIX and cannot be used. - dir = dir.replace(os.path.sep, posixpath.sep) - srcfile = posixpath.normpath(posixpath.join(dir, u)) + dir_ = dir_.replace(os.path.sep, posixpath.sep) + srcfile = posixpath.normpath(posixpath.join(dir_, u)) if os.path.isfile(srcfile): return self._load(srcfile, uri) else: raise exceptions.TopLevelLookupException( - "Cant locate template for uri %r" % uri) + "Cant locate template for uri %r" % uri + ) def adjust_uri(self, uri, relativeto): """Adjust the given ``uri`` based on the given relative URI.""" @@ -267,12 +270,13 @@ class TemplateLookup(TemplateCollection): if key in self._uri_cache: return self._uri_cache[key] - if uri[0] != '/': + if uri[0] != "/": if relativeto is not None: v = self._uri_cache[key] = posixpath.join( - posixpath.dirname(relativeto), uri) + posixpath.dirname(relativeto), uri + ) else: - v = self._uri_cache[key] = '/' + uri + v = self._uri_cache[key] = "/" + uri else: v = self._uri_cache[key] = uri return v @@ -295,9 +299,9 @@ class TemplateLookup(TemplateCollection): """ filename = posixpath.normpath(filename) - for dir in self.directories: - if filename[0:len(dir)] == dir: - return filename[len(dir):] + for dir_ in self.directories: + if filename[0 : len(dir_)] == dir_: + return filename[len(dir_) :] else: return None @@ -320,7 +324,8 @@ class TemplateLookup(TemplateCollection): filename=posixpath.normpath(filename), lookup=self, module_filename=module_filename, - **self.template_args) + **self.template_args + ) return template except: # if compilation fails etc, ensure @@ -337,8 +342,7 @@ class TemplateLookup(TemplateCollection): try: template_stat = os.stat(template.filename) - if template.module._modified_time < \ - template_stat[stat.ST_MTIME]: + if template.module._modified_time < template_stat[stat.ST_MTIME]: self._collection.pop(uri, None) return self._load(template.filename, uri) else: @@ -346,7 +350,8 @@ class TemplateLookup(TemplateCollection): except OSError: self._collection.pop(uri, None) raise exceptions.TemplateLookupException( - "Cant locate template for uri %r" % uri) + "Cant locate template for uri %r" % uri + ) def put_string(self, uri, text): """Place a new :class:`.Template` object into this @@ -355,10 +360,8 @@ class TemplateLookup(TemplateCollection): """ self._collection[uri] = Template( - text, - lookup=self, - uri=uri, - **self.template_args) + text, lookup=self, uri=uri, **self.template_args + ) def put_template(self, uri, template): """Place a new :class:`.Template` object into this diff --git a/server/www/packages/packages-linux/x64/mako/parsetree.py b/server/www/packages/packages-linux/x64/mako/parsetree.py index e129916..2881da1 100644 --- a/server/www/packages/packages-linux/x64/mako/parsetree.py +++ b/server/www/packages/packages-linux/x64/mako/parsetree.py @@ -1,14 +1,19 @@ # mako/parsetree.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """defines the parse tree components for Mako templates.""" -from mako import exceptions, ast, util, filters, compat import re +from mako import ast +from mako import compat +from mako import exceptions +from mako import filters +from mako import util + class Node(object): @@ -22,8 +27,12 @@ class Node(object): @property def exception_kwargs(self): - return {'source': self.source, 'lineno': self.lineno, - 'pos': self.pos, 'filename': self.filename} + return { + "source": self.source, + "lineno": self.lineno, + "pos": self.pos, + "filename": self.filename, + } def get_children(self): return [] @@ -42,7 +51,7 @@ class TemplateNode(Node): """a 'container' node that stores the overall collection of nodes.""" def __init__(self, filename): - super(TemplateNode, self).__init__('', 0, 0, filename) + super(TemplateNode, self).__init__("", 0, 0, filename) self.nodes = [] self.page_attributes = {} @@ -52,7 +61,8 @@ class TemplateNode(Node): def __repr__(self): return "TemplateNode(%s, %r)" % ( util.sorted_dict_repr(self.page_attributes), - self.nodes) + self.nodes, + ) class ControlLine(Node): @@ -74,7 +84,7 @@ class ControlLine(Node): self.text = text self.keyword = keyword self.isend = isend - self.is_primary = keyword in ['for', 'if', 'while', 'try', 'with'] + self.is_primary = keyword in ["for", "if", "while", "try", "with"] self.nodes = [] if self.isend: self._declared_identifiers = [] @@ -98,9 +108,9 @@ class ControlLine(Node): for this ControlLine""" return keyword in { - 'if': set(['else', 'elif']), - 'try': set(['except', 'finally']), - 'for': set(['else']) + "if": set(["else", "elif"]), + "try": set(["except", "finally"]), + "for": set(["else"]), }.get(self.keyword, []) def __repr__(self): @@ -108,7 +118,7 @@ class ControlLine(Node): self.keyword, self.text, self.isend, - (self.lineno, self.pos) + (self.lineno, self.pos), ) @@ -158,7 +168,7 @@ class Code(Node): return "Code(%r, %r, %r)" % ( self.text, self.ismodule, - (self.lineno, self.pos) + (self.lineno, self.pos), ) @@ -208,7 +218,7 @@ class Expression(Node): return "Expression(%r, %r, %r)" % ( self.text, self.escapes_code.args, - (self.lineno, self.pos) + (self.lineno, self.pos), ) @@ -219,45 +229,55 @@ class _TagMeta(type): _classmap = {} - def __init__(cls, clsname, bases, dict): - if getattr(cls, '__keyword__', None) is not None: + def __init__(cls, clsname, bases, dict_): + if getattr(cls, "__keyword__", None) is not None: cls._classmap[cls.__keyword__] = cls - super(_TagMeta, cls).__init__(clsname, bases, dict) + super(_TagMeta, cls).__init__(clsname, bases, dict_) def __call__(cls, keyword, attributes, **kwargs): if ":" in keyword: - ns, defname = keyword.split(':') - return type.__call__(CallNamespaceTag, ns, defname, - attributes, **kwargs) + ns, defname = keyword.split(":") + return type.__call__( + CallNamespaceTag, ns, defname, attributes, **kwargs + ) try: cls = _TagMeta._classmap[keyword] except KeyError: raise exceptions.CompileException( "No such tag: '%s'" % keyword, - source=kwargs['source'], - lineno=kwargs['lineno'], - pos=kwargs['pos'], - filename=kwargs['filename'] + source=kwargs["source"], + lineno=kwargs["lineno"], + pos=kwargs["pos"], + filename=kwargs["filename"], ) return type.__call__(cls, keyword, attributes, **kwargs) class Tag(compat.with_metaclass(_TagMeta, Node)): - """abstract base class for tags. - <%sometag/> + e.g.:: - <%someothertag> - stuff - + <%sometag/> + + <%someothertag> + stuff + """ + __keyword__ = None - def __init__(self, keyword, attributes, expressions, - nonexpressions, required, **kwargs): + def __init__( + self, + keyword, + attributes, + expressions, + nonexpressions, + required, + **kwargs + ): r"""construct a new Tag instance. this constructor not called directly, and is only called @@ -284,9 +304,10 @@ class Tag(compat.with_metaclass(_TagMeta, Node)): missing = [r for r in required if r not in self.parsed_attributes] if len(missing): raise exceptions.CompileException( - "Missing attribute(s): %s" % - ",".join([repr(m) for m in missing]), - **self.exception_kwargs) + "Missing attribute(s): %s" + % ",".join([repr(m) for m in missing]), + **self.exception_kwargs + ) self.parent = None self.nodes = [] @@ -302,36 +323,40 @@ class Tag(compat.with_metaclass(_TagMeta, Node)): for key in self.attributes: if key in expressions: expr = [] - for x in re.compile(r'(\${.+?})', - re.S).split(self.attributes[key]): - m = re.compile(r'^\${(.+?)}$', re.S).match(x) + for x in re.compile(r"(\${.+?})", re.S).split( + self.attributes[key] + ): + m = re.compile(r"^\${(.+?)}$", re.S).match(x) if m: - code = ast.PythonCode(m.group(1).rstrip(), - **self.exception_kwargs) + code = ast.PythonCode( + m.group(1).rstrip(), **self.exception_kwargs + ) # we aren't discarding "declared_identifiers" here, # which we do so that list comprehension-declared # variables aren't counted. As yet can't find a # condition that requires it here. - undeclared_identifiers = \ - undeclared_identifiers.union( - code.undeclared_identifiers) - expr.append('(%s)' % m.group(1)) + undeclared_identifiers = undeclared_identifiers.union( + code.undeclared_identifiers + ) + expr.append("(%s)" % m.group(1)) else: if x: expr.append(repr(x)) - self.parsed_attributes[key] = " + ".join(expr) or repr('') + self.parsed_attributes[key] = " + ".join(expr) or repr("") elif key in nonexpressions: - if re.search(r'\${.+?}', self.attributes[key]): + if re.search(r"\${.+?}", self.attributes[key]): raise exceptions.CompileException( "Attibute '%s' in tag '%s' does not allow embedded " "expressions" % (key, self.keyword), - **self.exception_kwargs) + **self.exception_kwargs + ) self.parsed_attributes[key] = repr(self.attributes[key]) else: raise exceptions.CompileException( - "Invalid attribute for tag '%s': '%s'" % - (self.keyword, key), - **self.exception_kwargs) + "Invalid attribute for tag '%s': '%s'" + % (self.keyword, key), + **self.exception_kwargs + ) self.expression_undeclared_identifiers = undeclared_identifiers def declared_identifiers(self): @@ -341,56 +366,64 @@ class Tag(compat.with_metaclass(_TagMeta, Node)): return self.expression_undeclared_identifiers def __repr__(self): - return "%s(%r, %s, %r, %r)" % (self.__class__.__name__, - self.keyword, - util.sorted_dict_repr(self.attributes), - (self.lineno, self.pos), - self.nodes - ) + return "%s(%r, %s, %r, %r)" % ( + self.__class__.__name__, + self.keyword, + util.sorted_dict_repr(self.attributes), + (self.lineno, self.pos), + self.nodes, + ) class IncludeTag(Tag): - __keyword__ = 'include' + __keyword__ = "include" def __init__(self, keyword, attributes, **kwargs): super(IncludeTag, self).__init__( keyword, attributes, - ('file', 'import', 'args'), - (), ('file',), **kwargs) + ("file", "import", "args"), + (), + ("file",), + **kwargs + ) self.page_args = ast.PythonCode( - "__DUMMY(%s)" % attributes.get('args', ''), - **self.exception_kwargs) + "__DUMMY(%s)" % attributes.get("args", ""), **self.exception_kwargs + ) def declared_identifiers(self): return [] def undeclared_identifiers(self): - identifiers = self.page_args.undeclared_identifiers.\ - difference(set(["__DUMMY"])).\ - difference(self.page_args.declared_identifiers) - return identifiers.union(super(IncludeTag, self). - undeclared_identifiers()) + identifiers = self.page_args.undeclared_identifiers.difference( + set(["__DUMMY"]) + ).difference(self.page_args.declared_identifiers) + return identifiers.union( + super(IncludeTag, self).undeclared_identifiers() + ) class NamespaceTag(Tag): - __keyword__ = 'namespace' + __keyword__ = "namespace" def __init__(self, keyword, attributes, **kwargs): super(NamespaceTag, self).__init__( - keyword, attributes, - ('file',), - ('name', 'inheritable', - 'import', 'module'), - (), **kwargs) + keyword, + attributes, + ("file",), + ("name", "inheritable", "import", "module"), + (), + **kwargs + ) - self.name = attributes.get('name', '__anon_%s' % hex(abs(id(self)))) - if 'name' not in attributes and 'import' not in attributes: + self.name = attributes.get("name", "__anon_%s" % hex(abs(id(self)))) + if "name" not in attributes and "import" not in attributes: raise exceptions.CompileException( "'name' and/or 'import' attributes are required " "for <%namespace>", - **self.exception_kwargs) - if 'file' in attributes and 'module' in attributes: + **self.exception_kwargs + ) + if "file" in attributes and "module" in attributes: raise exceptions.CompileException( "<%namespace> may only have one of 'file' or 'module'", **self.exception_kwargs @@ -401,51 +434,51 @@ class NamespaceTag(Tag): class TextTag(Tag): - __keyword__ = 'text' + __keyword__ = "text" def __init__(self, keyword, attributes, **kwargs): super(TextTag, self).__init__( - keyword, - attributes, (), - ('filter'), (), **kwargs) + keyword, attributes, (), ("filter"), (), **kwargs + ) self.filter_args = ast.ArgumentList( - attributes.get('filter', ''), - **self.exception_kwargs) + attributes.get("filter", ""), **self.exception_kwargs + ) def undeclared_identifiers(self): - return self.filter_args.\ - undeclared_identifiers.\ - difference(filters.DEFAULT_ESCAPES.keys()).union( - self.expression_undeclared_identifiers - ) + return self.filter_args.undeclared_identifiers.difference( + filters.DEFAULT_ESCAPES.keys() + ).union(self.expression_undeclared_identifiers) class DefTag(Tag): - __keyword__ = 'def' + __keyword__ = "def" def __init__(self, keyword, attributes, **kwargs): - expressions = ['buffered', 'cached'] + [ - c for c in attributes if c.startswith('cache_')] + expressions = ["buffered", "cached"] + [ + c for c in attributes if c.startswith("cache_") + ] super(DefTag, self).__init__( keyword, attributes, expressions, - ('name', 'filter', 'decorator'), - ('name',), - **kwargs) - name = attributes['name'] - if re.match(r'^[\w_]+$', name): + ("name", "filter", "decorator"), + ("name",), + **kwargs + ) + name = attributes["name"] + if re.match(r"^[\w_]+$", name): raise exceptions.CompileException( - "Missing parenthesis in %def", - **self.exception_kwargs) - self.function_decl = ast.FunctionDecl("def " + name + ":pass", - **self.exception_kwargs) + "Missing parenthesis in %def", **self.exception_kwargs + ) + self.function_decl = ast.FunctionDecl( + "def " + name + ":pass", **self.exception_kwargs + ) self.name = self.function_decl.funcname - self.decorator = attributes.get('decorator', '') + self.decorator = attributes.get("decorator", "") self.filter_args = ast.ArgumentList( - attributes.get('filter', ''), - **self.exception_kwargs) + attributes.get("filter", ""), **self.exception_kwargs + ) is_anonymous = False is_block = False @@ -463,51 +496,58 @@ class DefTag(Tag): def undeclared_identifiers(self): res = [] for c in self.function_decl.defaults: - res += list(ast.PythonCode(c, **self.exception_kwargs). - undeclared_identifiers) - return set(res).union( - self.filter_args. - undeclared_identifiers. - difference(filters.DEFAULT_ESCAPES.keys()) - ).union( - self.expression_undeclared_identifiers - ).difference( - self.function_decl.allargnames + res += list( + ast.PythonCode( + c, **self.exception_kwargs + ).undeclared_identifiers + ) + return ( + set(res) + .union( + self.filter_args.undeclared_identifiers.difference( + filters.DEFAULT_ESCAPES.keys() + ) + ) + .union(self.expression_undeclared_identifiers) + .difference(self.function_decl.allargnames) ) class BlockTag(Tag): - __keyword__ = 'block' + __keyword__ = "block" def __init__(self, keyword, attributes, **kwargs): - expressions = ['buffered', 'cached', 'args'] + [ - c for c in attributes if c.startswith('cache_')] + expressions = ["buffered", "cached", "args"] + [ + c for c in attributes if c.startswith("cache_") + ] super(BlockTag, self).__init__( keyword, attributes, expressions, - ('name', 'filter', 'decorator'), + ("name", "filter", "decorator"), (), - **kwargs) - name = attributes.get('name') - if name and not re.match(r'^[\w_]+$', name): + **kwargs + ) + name = attributes.get("name") + if name and not re.match(r"^[\w_]+$", name): raise exceptions.CompileException( "%block may not specify an argument signature", - **self.exception_kwargs) - if not name and attributes.get('args', None): - raise exceptions.CompileException( - "Only named %blocks may specify args", **self.exception_kwargs ) - self.body_decl = ast.FunctionArgs(attributes.get('args', ''), - **self.exception_kwargs) + if not name and attributes.get("args", None): + raise exceptions.CompileException( + "Only named %blocks may specify args", **self.exception_kwargs + ) + self.body_decl = ast.FunctionArgs( + attributes.get("args", ""), **self.exception_kwargs + ) self.name = name - self.decorator = attributes.get('decorator', '') + self.decorator = attributes.get("decorator", "") self.filter_args = ast.ArgumentList( - attributes.get('filter', ''), - **self.exception_kwargs) + attributes.get("filter", ""), **self.exception_kwargs + ) is_block = True @@ -517,7 +557,7 @@ class BlockTag(Tag): @property def funcname(self): - return self.name or "__M_anon_%d" % (self.lineno, ) + return self.name or "__M_anon_%d" % (self.lineno,) def get_argument_expressions(self, **kw): return self.body_decl.get_argument_expressions(**kw) @@ -526,91 +566,100 @@ class BlockTag(Tag): return self.body_decl.allargnames def undeclared_identifiers(self): - return (self.filter_args. - undeclared_identifiers. - difference(filters.DEFAULT_ESCAPES.keys()) - ).union(self.expression_undeclared_identifiers) + return ( + self.filter_args.undeclared_identifiers.difference( + filters.DEFAULT_ESCAPES.keys() + ) + ).union(self.expression_undeclared_identifiers) class CallTag(Tag): - __keyword__ = 'call' + __keyword__ = "call" def __init__(self, keyword, attributes, **kwargs): - super(CallTag, self).__init__(keyword, attributes, - ('args'), ('expr',), ('expr',), **kwargs) - self.expression = attributes['expr'] + super(CallTag, self).__init__( + keyword, attributes, ("args"), ("expr",), ("expr",), **kwargs + ) + self.expression = attributes["expr"] self.code = ast.PythonCode(self.expression, **self.exception_kwargs) - self.body_decl = ast.FunctionArgs(attributes.get('args', ''), - **self.exception_kwargs) + self.body_decl = ast.FunctionArgs( + attributes.get("args", ""), **self.exception_kwargs + ) def declared_identifiers(self): return self.code.declared_identifiers.union(self.body_decl.allargnames) def undeclared_identifiers(self): - return self.code.undeclared_identifiers.\ - difference(self.code.declared_identifiers) + return self.code.undeclared_identifiers.difference( + self.code.declared_identifiers + ) class CallNamespaceTag(Tag): - def __init__(self, namespace, defname, attributes, **kwargs): super(CallNamespaceTag, self).__init__( namespace + ":" + defname, attributes, - tuple(attributes.keys()) + ('args', ), + tuple(attributes.keys()) + ("args",), (), (), - **kwargs) + **kwargs + ) self.expression = "%s.%s(%s)" % ( namespace, defname, - ",".join(["%s=%s" % (k, v) for k, v in - self.parsed_attributes.items() - if k != 'args']) + ",".join( + [ + "%s=%s" % (k, v) + for k, v in self.parsed_attributes.items() + if k != "args" + ] + ), ) self.code = ast.PythonCode(self.expression, **self.exception_kwargs) self.body_decl = ast.FunctionArgs( - attributes.get('args', ''), - **self.exception_kwargs) + attributes.get("args", ""), **self.exception_kwargs + ) def declared_identifiers(self): return self.code.declared_identifiers.union(self.body_decl.allargnames) def undeclared_identifiers(self): - return self.code.undeclared_identifiers.\ - difference(self.code.declared_identifiers) + return self.code.undeclared_identifiers.difference( + self.code.declared_identifiers + ) class InheritTag(Tag): - __keyword__ = 'inherit' + __keyword__ = "inherit" def __init__(self, keyword, attributes, **kwargs): super(InheritTag, self).__init__( - keyword, attributes, - ('file',), (), ('file',), **kwargs) + keyword, attributes, ("file",), (), ("file",), **kwargs + ) class PageTag(Tag): - __keyword__ = 'page' + __keyword__ = "page" def __init__(self, keyword, attributes, **kwargs): - expressions = \ - ['cached', 'args', 'expression_filter', 'enable_loop'] + \ - [c for c in attributes if c.startswith('cache_')] + expressions = [ + "cached", + "args", + "expression_filter", + "enable_loop", + ] + [c for c in attributes if c.startswith("cache_")] super(PageTag, self).__init__( - keyword, - attributes, - expressions, - (), - (), - **kwargs) - self.body_decl = ast.FunctionArgs(attributes.get('args', ''), - **self.exception_kwargs) + keyword, attributes, expressions, (), (), **kwargs + ) + self.body_decl = ast.FunctionArgs( + attributes.get("args", ""), **self.exception_kwargs + ) self.filter_args = ast.ArgumentList( - attributes.get('expression_filter', ''), - **self.exception_kwargs) + attributes.get("expression_filter", ""), **self.exception_kwargs + ) def declared_identifiers(self): return self.body_decl.allargnames diff --git a/server/www/packages/packages-linux/x64/mako/pygen.py b/server/www/packages/packages-linux/x64/mako/pygen.py index 8514e02..603676d 100644 --- a/server/www/packages/packages-linux/x64/mako/pygen.py +++ b/server/www/packages/packages-linux/x64/mako/pygen.py @@ -1,5 +1,5 @@ # mako/pygen.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -7,11 +7,11 @@ """utilities for generating and formatting literal Python code.""" import re + from mako import exceptions class PythonPrinter(object): - def __init__(self, stream): # indentation counter self.indent = 0 @@ -54,14 +54,16 @@ class PythonPrinter(object): self.stream.write("\n" * num) self._update_lineno(num) - def write_indented_block(self, block): + def write_indented_block(self, block, starting_lineno=None): """print a line or lines of python which already contain indentation. The indentation of the total block of lines will be adjusted to that of the current indent level.""" self.in_indent_lines = False - for l in re.split(r'\r?\n', block): + for i, l in enumerate(re.split(r"\r?\n", block)): self.line_buffer.append(l) + if starting_lineno is not None: + self.start_source(starting_lineno + i) self._update_lineno(1) def writelines(self, *lines): @@ -83,21 +85,18 @@ class PythonPrinter(object): self.in_indent_lines = True if ( - line is None or - re.match(r"^\s*#", line) or - re.match(r"^\s*$", line) + line is None + or re.match(r"^\s*#", line) + or re.match(r"^\s*$", line) ): hastext = False else: hastext = True - is_comment = line and len(line) and line[0] == '#' + is_comment = line and len(line) and line[0] == "#" # see if this line should decrease the indentation level - if ( - not is_comment and - (not hastext or self._is_unindentor(line)) - ): + if not is_comment and (not hastext or self._is_unindentor(line)): if self.indent > 0: self.indent -= 1 @@ -106,7 +105,8 @@ class PythonPrinter(object): # module wont compile. if len(self.indent_detail) == 0: raise exceptions.SyntaxException( - "Too many whitespace closures") + "Too many whitespace closures" + ) self.indent_detail.pop() if line is None: @@ -136,8 +136,9 @@ class PythonPrinter(object): # its not a "compound" keyword. but lets also # test for valid Python keywords that might be indenting us, # else assume its a non-indenting line - m2 = re.match(r"^\s*(def|class|else|elif|except|finally)", - line) + m2 = re.match( + r"^\s*(def|class|else|elif|except|finally)", line + ) if m2: self.indent += 1 self.indent_detail.append(indentor) @@ -189,14 +190,15 @@ class PythonPrinter(object): # return False - def _indent_line(self, line, stripspace=''): + def _indent_line(self, line, stripspace=""): """indent the given line according to the current indent level. stripspace is a string of space that will be truncated from the start of the line before indenting.""" - return re.sub(r"^%s" % stripspace, self.indentstring - * self.indent, line) + return re.sub( + r"^%s" % stripspace, self.indentstring * self.indent, line + ) def _reset_multi_line_flags(self): """reset the flags which would indicate we are in a backslashed @@ -214,7 +216,7 @@ class PythonPrinter(object): # a literal multiline string with unfortunately placed # whitespace - current_state = (self.backslashed or self.triplequoted) + current_state = self.backslashed or self.triplequoted if re.search(r"\\$", line): self.backslashed = True @@ -251,7 +253,7 @@ def adjust_whitespace(text): (backslashed, triplequoted) = (0, 1) def in_multi_line(line): - start_state = (state[backslashed] or state[triplequoted]) + start_state = state[backslashed] or state[triplequoted] if re.search(r"\\$", line): state[backslashed] = True @@ -261,7 +263,7 @@ def adjust_whitespace(text): def match(reg, t): m = re.match(reg, t) if m: - return m, t[len(m.group(0)):] + return m, t[len(m.group(0)) :] else: return None, t @@ -273,7 +275,7 @@ def adjust_whitespace(text): else: m, line = match(r".*?(?=%s|$)" % state[triplequoted], line) else: - m, line = match(r'#', line) + m, line = match(r"#", line) if m: return start_state @@ -286,13 +288,13 @@ def adjust_whitespace(text): return start_state - def _indent_line(line, stripspace=''): - return re.sub(r"^%s" % stripspace, '', line) + def _indent_line(line, stripspace=""): + return re.sub(r"^%s" % stripspace, "", line) lines = [] stripspace = None - for line in re.split(r'\r?\n', text): + for line in re.split(r"\r?\n", text): if in_multi_line(line): lines.append(line) else: diff --git a/server/www/packages/packages-linux/x64/mako/pyparser.py b/server/www/packages/packages-linux/x64/mako/pyparser.py index 15d0da6..e41c304 100644 --- a/server/www/packages/packages-linux/x64/mako/pyparser.py +++ b/server/www/packages/packages-linux/x64/mako/pyparser.py @@ -1,5 +1,5 @@ # mako/pyparser.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -10,46 +10,52 @@ Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler module is used. """ -from mako import exceptions, util, compat -from mako.compat import arg_stringname import operator +import _ast + +from mako import _ast_util +from mako import compat +from mako import exceptions +from mako import util +from mako.compat import arg_stringname + if compat.py3k: # words that cannot be assigned to (notably # smaller than the total keys in __builtins__) - reserved = set(['True', 'False', 'None', 'print']) + reserved = set(["True", "False", "None", "print"]) # the "id" attribute on a function node - arg_id = operator.attrgetter('arg') + arg_id = operator.attrgetter("arg") else: # words that cannot be assigned to (notably # smaller than the total keys in __builtins__) - reserved = set(['True', 'False', 'None']) + reserved = set(["True", "False", "None"]) # the "id" attribute on a function node - arg_id = operator.attrgetter('id') + arg_id = operator.attrgetter("id") -import _ast util.restore__ast(_ast) -from mako import _ast_util -def parse(code, mode='exec', **exception_kwargs): +def parse(code, mode="exec", **exception_kwargs): """Parse an expression into AST""" try: - return _ast_util.parse(code, '', mode) + return _ast_util.parse(code, "", mode) except Exception: raise exceptions.SyntaxException( - "(%s) %s (%r)" % ( + "(%s) %s (%r)" + % ( compat.exception_as().__class__.__name__, compat.exception_as(), - code[0:50] - ), **exception_kwargs) + code[0:50], + ), + **exception_kwargs + ) class FindIdentifiers(_ast_util.NodeVisitor): - def __init__(self, listener, **exception_kwargs): self.in_function = False self.in_assign_targets = False @@ -119,9 +125,9 @@ class FindIdentifiers(_ast_util.NodeVisitor): self.in_function = True local_ident_stack = self.local_ident_stack - self.local_ident_stack = local_ident_stack.union([ - arg_id(arg) for arg in self._expand_tuples(node.args.args) - ]) + self.local_ident_stack = local_ident_stack.union( + [arg_id(arg) for arg in self._expand_tuples(node.args.args)] + ) if islambda: self.visit(node.body) else: @@ -146,9 +152,11 @@ class FindIdentifiers(_ast_util.NodeVisitor): # this is eqiuvalent to visit_AssName in # compiler self._add_declared(node.id) - elif node.id not in reserved and node.id \ - not in self.listener.declared_identifiers and node.id \ - not in self.local_ident_stack: + elif ( + node.id not in reserved + and node.id not in self.listener.declared_identifiers + and node.id not in self.local_ident_stack + ): self.listener.undeclared_identifiers.add(node.id) def visit_Import(self, node): @@ -156,24 +164,25 @@ class FindIdentifiers(_ast_util.NodeVisitor): if name.asname is not None: self._add_declared(name.asname) else: - self._add_declared(name.name.split('.')[0]) + self._add_declared(name.name.split(".")[0]) def visit_ImportFrom(self, node): for name in node.names: if name.asname is not None: self._add_declared(name.asname) else: - if name.name == '*': + if name.name == "*": raise exceptions.CompileException( "'import *' is not supported, since all identifier " "names must be explicitly declared. Please use the " "form 'from import , , " - "...' instead.", **self.exception_kwargs) + "...' instead.", + **self.exception_kwargs + ) self._add_declared(name.name) class FindTuple(_ast_util.NodeVisitor): - def __init__(self, listener, code_factory, **exception_kwargs): self.listener = listener self.exception_kwargs = exception_kwargs @@ -184,16 +193,17 @@ class FindTuple(_ast_util.NodeVisitor): p = self.code_factory(n, **self.exception_kwargs) self.listener.codeargs.append(p) self.listener.args.append(ExpressionGenerator(n).value()) - self.listener.declared_identifiers = \ - self.listener.declared_identifiers.union( - p.declared_identifiers) - self.listener.undeclared_identifiers = \ - self.listener.undeclared_identifiers.union( - p.undeclared_identifiers) + ldi = self.listener.declared_identifiers + self.listener.declared_identifiers = ldi.union( + p.declared_identifiers + ) + lui = self.listener.undeclared_identifiers + self.listener.undeclared_identifiers = lui.union( + p.undeclared_identifiers + ) class ParseFunc(_ast_util.NodeVisitor): - def __init__(self, listener, **exception_kwargs): self.listener = listener self.exception_kwargs = exception_kwargs @@ -224,10 +234,9 @@ class ParseFunc(_ast_util.NodeVisitor): class ExpressionGenerator(object): - def __init__(self, astnode): - self.generator = _ast_util.SourceGenerator(' ' * 4) + self.generator = _ast_util.SourceGenerator(" " * 4) self.generator.visit(astnode) def value(self): - return ''.join(self.generator.result) + return "".join(self.generator.result) diff --git a/server/www/packages/packages-linux/x64/mako/runtime.py b/server/www/packages/packages-linux/x64/mako/runtime.py index 769541c..0e7149b 100644 --- a/server/www/packages/packages-linux/x64/mako/runtime.py +++ b/server/www/packages/packages-linux/x64/mako/runtime.py @@ -1,5 +1,5 @@ # mako/runtime.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -7,10 +7,14 @@ """provides runtime services for templates, including Context, Namespace, and various helper functions.""" -from mako import exceptions, util, compat -from mako.compat import compat_builtins +import functools import sys +from mako import compat +from mako import exceptions +from mako import util +from mako.compat import compat_builtins + class Context(object): @@ -34,18 +38,19 @@ class Context(object): # "capture" function which proxies to the # generic "capture" function - self._data['capture'] = compat.partial(capture, self) + self._data["capture"] = functools.partial(capture, self) # "caller" stack used by def calls with content - self.caller_stack = self._data['caller'] = CallerStack() + self.caller_stack = self._data["caller"] = CallerStack() def _set_with_template(self, t): self._with_template = t illegal_names = t.reserved_names.intersection(self._data) if illegal_names: raise exceptions.NameConflictError( - "Reserved words passed to render(): %s" % - ", ".join(illegal_names)) + "Reserved words passed to render(): %s" + % ", ".join(illegal_names) + ) @property def lookup(self): @@ -177,14 +182,13 @@ class Context(object): c = self._copy() x = c._data - x.pop('self', None) - x.pop('parent', None) - x.pop('next', None) + x.pop("self", None) + x.pop("parent", None) + x.pop("next", None) return c class CallerStack(list): - def __init__(self): self.nextcaller = None @@ -231,6 +235,7 @@ class Undefined(object): def __bool__(self): return False + UNDEFINED = Undefined() STOP_RENDERING = "" @@ -342,7 +347,6 @@ class LoopContext(object): class _NSAttr(object): - def __init__(self, parent): self.__parent = parent @@ -373,9 +377,15 @@ class Namespace(object): """ - def __init__(self, name, context, - callables=None, inherits=None, - populate_self=True, calling_uri=None): + def __init__( + self, + name, + context, + callables=None, + inherits=None, + populate_self=True, + calling_uri=None, + ): self.name = name self.context = context self.inherits = inherits @@ -473,9 +483,12 @@ class Namespace(object): if key in self.context.namespaces: return self.context.namespaces[key] else: - ns = TemplateNamespace(uri, self.context._copy(), - templateuri=uri, - calling_uri=self._templateuri) + ns = TemplateNamespace( + uri, + self.context._copy(), + templateuri=uri, + calling_uri=self._templateuri, + ) self.context.namespaces[key] = ns return ns @@ -518,7 +531,7 @@ class Namespace(object): def _populate(self, d, l): for ident in l: - if ident == '*': + if ident == "*": for (k, v) in self._get_star(): d[k] = v else: @@ -536,8 +549,8 @@ class Namespace(object): val = getattr(self.inherits, key) else: raise AttributeError( - "Namespace '%s' has no member '%s'" % - (self.name, key)) + "Namespace '%s' has no member '%s'" % (self.name, key) + ) setattr(self, key, val) return val @@ -546,9 +559,17 @@ class TemplateNamespace(Namespace): """A :class:`.Namespace` specific to a :class:`.Template` instance.""" - def __init__(self, name, context, template=None, templateuri=None, - callables=None, inherits=None, - populate_self=True, calling_uri=None): + def __init__( + self, + name, + context, + template=None, + templateuri=None, + callables=None, + inherits=None, + populate_self=True, + calling_uri=None, + ): self.name = name self.context = context self.inherits = inherits @@ -556,8 +577,7 @@ class TemplateNamespace(Namespace): self.callables = dict([(c.__name__, c) for c in callables]) if templateuri is not None: - self.template = _lookup_template(context, templateuri, - calling_uri) + self.template = _lookup_template(context, templateuri, calling_uri) self._templateuri = self.template.module._template_uri elif template is not None: self.template = template @@ -566,9 +586,9 @@ class TemplateNamespace(Namespace): raise TypeError("'template' argument is required.") if populate_self: - lclcallable, lclcontext = \ - _populate_self_namespace(context, self.template, - self_ns=self) + lclcallable, lclcontext = _populate_self_namespace( + context, self.template, self_ns=self + ) @property def module(self): @@ -606,7 +626,8 @@ class TemplateNamespace(Namespace): def get(key): callable_ = self.template._get_def_callable(key) - return compat.partial(callable_, self.context) + return functools.partial(callable_, self.context) + for k in self.template.module._exports: yield (k, get(k)) @@ -615,14 +636,14 @@ class TemplateNamespace(Namespace): val = self.callables[key] elif self.template.has_def(key): callable_ = self.template._get_def_callable(key) - val = compat.partial(callable_, self.context) + val = functools.partial(callable_, self.context) elif self.inherits: val = getattr(self.inherits, key) else: raise AttributeError( - "Namespace '%s' has no member '%s'" % - (self.name, key)) + "Namespace '%s' has no member '%s'" % (self.name, key) + ) setattr(self, key, val) return val @@ -631,9 +652,16 @@ class ModuleNamespace(Namespace): """A :class:`.Namespace` specific to a Python module instance.""" - def __init__(self, name, context, module, - callables=None, inherits=None, - populate_self=True, calling_uri=None): + def __init__( + self, + name, + context, + module, + callables=None, + inherits=None, + populate_self=True, + calling_uri=None, + ): self.name = name self.context = context self.inherits = inherits @@ -641,7 +669,7 @@ class ModuleNamespace(Namespace): self.callables = dict([(c.__name__, c) for c in callables]) mod = __import__(module) - for token in module.split('.')[1:]: + for token in module.split(".")[1:]: mod = getattr(mod, token) self.module = mod @@ -657,23 +685,23 @@ class ModuleNamespace(Namespace): for key in self.callables: yield (key, self.callables[key]) for key in dir(self.module): - if key[0] != '_': + if key[0] != "_": callable_ = getattr(self.module, key) - if compat.callable(callable_): - yield key, compat.partial(callable_, self.context) + if callable(callable_): + yield key, functools.partial(callable_, self.context) def __getattr__(self, key): if key in self.callables: val = self.callables[key] elif hasattr(self.module, key): callable_ = getattr(self.module, key) - val = compat.partial(callable_, self.context) + val = functools.partial(callable_, self.context) elif self.inherits: val = getattr(self.inherits, key) else: raise AttributeError( - "Namespace '%s' has no member '%s'" % - (self.name, key)) + "Namespace '%s' has no member '%s'" % (self.name, key) + ) setattr(self, key, val) return val @@ -692,6 +720,7 @@ def supports_caller(func): return func(context, *args, **kwargs) finally: context.caller_stack._pop_frame() + return wrap_stackframe @@ -703,7 +732,7 @@ def capture(context, callable_, *args, **kwargs): """ - if not compat.callable(callable_): + if not callable(callable_): raise exceptions.RuntimeException( "capture() function expects a callable as " "its argument (i.e. capture(func, *args, **kwargs))" @@ -721,13 +750,16 @@ def _decorate_toplevel(fn): def go(context, *args, **kw): def y(*args, **kw): return render_fn(context, *args, **kw) + try: y.__name__ = render_fn.__name__[7:] except TypeError: # < Python 2.4 pass return fn(y)(context, *args, **kw) + return go + return decorate_render @@ -737,7 +769,9 @@ def _decorate_inline(context, fn): def go(*args, **kw): return dec(context, *args, **kw) + return go + return decorate_render @@ -747,8 +781,8 @@ def _include_file(context, uri, calling_uri, **kwargs): template = _lookup_template(context, uri, calling_uri) (callable_, ctx) = _populate_self_namespace( - context._clean_inheritance_tokens(), - template) + context._clean_inheritance_tokens(), template + ) kwargs = _kwargs_for_include(callable_, context._data, **kwargs) if template.include_error_handler: try: @@ -769,23 +803,25 @@ def _inherit_from(context, uri, calling_uri): if uri is None: return None template = _lookup_template(context, uri, calling_uri) - self_ns = context['self'] + self_ns = context["self"] ih = self_ns while ih.inherits is not None: ih = ih.inherits - lclcontext = context._locals({'next': ih}) - ih.inherits = TemplateNamespace("self:%s" % template.uri, - lclcontext, - template=template, - populate_self=False) - context._data['parent'] = lclcontext._data['local'] = ih.inherits - callable_ = getattr(template.module, '_mako_inherit', None) + lclcontext = context._locals({"next": ih}) + ih.inherits = TemplateNamespace( + "self:%s" % template.uri, + lclcontext, + template=template, + populate_self=False, + ) + context._data["parent"] = lclcontext._data["local"] = ih.inherits + callable_ = getattr(template.module, "_mako_inherit", None) if callable_ is not None: ret = callable_(template, lclcontext) if ret: return ret - gen_ns = getattr(template.module, '_mako_generate_namespaces', None) + gen_ns = getattr(template.module, "_mako_generate_namespaces", None) if gen_ns is not None: gen_ns(context) return (template.callable_, lclcontext) @@ -795,8 +831,9 @@ def _lookup_template(context, uri, relativeto): lookup = context._with_template.lookup if lookup is None: raise exceptions.TemplateLookupException( - "Template '%s' has no TemplateLookup associated" % - context._with_template.uri) + "Template '%s' has no TemplateLookup associated" + % context._with_template.uri + ) uri = lookup.adjust_uri(uri, relativeto) try: return lookup.get_template(uri) @@ -806,11 +843,14 @@ def _lookup_template(context, uri, relativeto): def _populate_self_namespace(context, template, self_ns=None): if self_ns is None: - self_ns = TemplateNamespace('self:%s' % template.uri, - context, template=template, - populate_self=False) - context._data['self'] = context._data['local'] = self_ns - if hasattr(template.module, '_mako_inherit'): + self_ns = TemplateNamespace( + "self:%s" % template.uri, + context, + template=template, + populate_self=False, + ) + context._data["self"] = context._data["local"] = self_ns + if hasattr(template.module, "_mako_inherit"): ret = template.module._mako_inherit(template, context) if ret: return ret @@ -829,18 +869,24 @@ def _render(template, callable_, args, data, as_unicode=False): buf = util.FastEncodingBuffer( as_unicode=as_unicode, encoding=template.output_encoding, - errors=template.encoding_errors) + errors=template.encoding_errors, + ) context = Context(buf, **data) context._outputting_as_unicode = as_unicode context._set_with_template(template) - _render_context(template, callable_, context, *args, - **_kwargs_for_callable(callable_, data)) + _render_context( + template, + callable_, + context, + *args, + **_kwargs_for_callable(callable_, data) + ) return context._pop_buffer().getvalue() def _kwargs_for_callable(callable_, data): - argspec = compat.inspect_func_args(callable_) + argspec = compat.inspect_getargspec(callable_) # for normal pages, **pageargs is usually present if argspec[2]: return data @@ -849,22 +895,23 @@ def _kwargs_for_callable(callable_, data): namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None] kwargs = {} for arg in namedargs: - if arg != 'context' and arg in data and arg not in kwargs: + if arg != "context" and arg in data and arg not in kwargs: kwargs[arg] = data[arg] return kwargs def _kwargs_for_include(callable_, data, **kwargs): - argspec = compat.inspect_func_args(callable_) + argspec = compat.inspect_getargspec(callable_) namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None] for arg in namedargs: - if arg != 'context' and arg in data and arg not in kwargs: + if arg != "context" and arg in data and arg not in kwargs: kwargs[arg] = data[arg] return kwargs def _render_context(tmpl, callable_, context, *args, **kwargs): import mako.template as template + # create polymorphic 'self' namespace for this # template with possibly updated context if not isinstance(tmpl, template.DefTemplate): @@ -886,8 +933,9 @@ def _exec_template(callable_, context, args=None, kwargs=None): be interpreted here. """ template = context._with_template - if template is not None and \ - (template.format_exceptions or template.error_handler): + if template is not None and ( + template.format_exceptions or template.error_handler + ): try: callable_(context, *args, **kwargs) except Exception: @@ -908,11 +956,15 @@ def _render_error(template, context, error): error_template = exceptions.html_error_template() if context._outputting_as_unicode: context._buffer_stack[:] = [ - util.FastEncodingBuffer(as_unicode=True)] + util.FastEncodingBuffer(as_unicode=True) + ] else: - context._buffer_stack[:] = [util.FastEncodingBuffer( - error_template.output_encoding, - error_template.encoding_errors)] + context._buffer_stack[:] = [ + util.FastEncodingBuffer( + error_template.output_encoding, + error_template.encoding_errors, + ) + ] context._set_with_template(error_template) error_template.render_context(context, error=error) diff --git a/server/www/packages/packages-linux/x64/mako/template.py b/server/www/packages/packages-linux/x64/mako/template.py index 329632c..937d15b 100644 --- a/server/www/packages/packages-linux/x64/mako/template.py +++ b/server/www/packages/packages-linux/x64/mako/template.py @@ -1,5 +1,5 @@ # mako/template.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -7,8 +7,7 @@ """Provides the Template class, a facade for parsing, generating and executing template strings, as well as template runtime operations.""" -from mako.lexer import Lexer -from mako import runtime, util, exceptions, codegen, cache, compat +import json import os import re import shutil @@ -18,6 +17,14 @@ import tempfile import types import weakref +from mako import cache +from mako import codegen +from mako import compat +from mako import exceptions +from mako import runtime +from mako import util +from mako.lexer import Lexer + class Template(object): @@ -230,41 +237,43 @@ class Template(object): lexer_cls = Lexer - def __init__(self, - text=None, - filename=None, - uri=None, - format_exceptions=False, - error_handler=None, - lookup=None, - output_encoding=None, - encoding_errors='strict', - module_directory=None, - cache_args=None, - cache_impl='beaker', - cache_enabled=True, - cache_type=None, - cache_dir=None, - cache_url=None, - module_filename=None, - input_encoding=None, - disable_unicode=False, - module_writer=None, - bytestring_passthrough=False, - default_filters=None, - buffer_filters=(), - strict_undefined=False, - imports=None, - future_imports=None, - enable_loop=True, - preprocessor=None, - lexer_cls=None, - include_error_handler=None): + def __init__( + self, + text=None, + filename=None, + uri=None, + format_exceptions=False, + error_handler=None, + lookup=None, + output_encoding=None, + encoding_errors="strict", + module_directory=None, + cache_args=None, + cache_impl="beaker", + cache_enabled=True, + cache_type=None, + cache_dir=None, + cache_url=None, + module_filename=None, + input_encoding=None, + disable_unicode=False, + module_writer=None, + bytestring_passthrough=False, + default_filters=None, + buffer_filters=(), + strict_undefined=False, + imports=None, + future_imports=None, + enable_loop=True, + preprocessor=None, + lexer_cls=None, + include_error_handler=None, + ): if uri: - self.module_id = re.sub(r'\W', "_", uri) + self.module_id = re.sub(r"\W", "_", uri) self.uri = uri elif filename: - self.module_id = re.sub(r'\W', "_", filename) + self.module_id = re.sub(r"\W", "_", filename) drive, path = os.path.splitdrive(filename) path = os.path.normpath(path).replace(os.path.sep, "/") self.uri = path @@ -278,9 +287,10 @@ class Template(object): u_norm = os.path.normpath(u_norm) if u_norm.startswith(".."): raise exceptions.TemplateLookupException( - "Template uri \"%s\" is invalid - " + 'Template uri "%s" is invalid - ' "it cannot be relative outside " - "of the root path." % self.uri) + "of the root path." % self.uri + ) self.input_encoding = input_encoding self.output_encoding = output_encoding @@ -293,17 +303,18 @@ class Template(object): if compat.py3k and disable_unicode: raise exceptions.UnsupportedError( - "Mako for Python 3 does not " - "support disabling Unicode") + "Mako for Python 3 does not " "support disabling Unicode" + ) elif output_encoding and disable_unicode: raise exceptions.UnsupportedError( "output_encoding must be set to " - "None when disable_unicode is used.") + "None when disable_unicode is used." + ) if default_filters is None: if compat.py3k or self.disable_unicode: - self.default_filters = ['str'] + self.default_filters = ["str"] else: - self.default_filters = ['unicode'] + self.default_filters = ["unicode"] else: self.default_filters = default_filters self.buffer_filters = buffer_filters @@ -320,7 +331,7 @@ class Template(object): (code, module) = _compile_text(self, text, filename) self._code = code self._source = text - ModuleInfo(module, None, self, filename, code, text) + ModuleInfo(module, None, self, filename, code, text, uri) elif filename is not None: # if template filename and a module directory, load # a filesystem-based module file, generating if needed @@ -329,8 +340,7 @@ class Template(object): elif module_directory is not None: path = os.path.abspath( os.path.join( - os.path.normpath(module_directory), - u_norm + ".py" + os.path.normpath(module_directory), u_norm + ".py" ) ) else: @@ -338,7 +348,8 @@ class Template(object): module = self._compile_from_file(path, filename) else: raise exceptions.RuntimeException( - "Template requires text or filename") + "Template requires text or filename" + ) self.module = module self.filename = filename @@ -351,8 +362,12 @@ class Template(object): self.module_directory = module_directory self._setup_cache_args( - cache_impl, cache_enabled, cache_args, - cache_type, cache_dir, cache_url + cache_impl, + cache_enabled, + cache_args, + cache_type, + cache_dir, + cache_url, ) @util.memoized_property @@ -360,11 +375,17 @@ class Template(object): if self.enable_loop: return codegen.RESERVED_NAMES else: - return codegen.RESERVED_NAMES.difference(['loop']) + return codegen.RESERVED_NAMES.difference(["loop"]) - def _setup_cache_args(self, - cache_impl, cache_enabled, cache_args, - cache_type, cache_dir, cache_url): + def _setup_cache_args( + self, + cache_impl, + cache_enabled, + cache_args, + cache_type, + cache_dir, + cache_url, + ): self.cache_impl = cache_impl self.cache_enabled = cache_enabled if cache_args: @@ -374,49 +395,42 @@ class Template(object): # transfer deprecated cache_* args if cache_type: - self.cache_args['type'] = cache_type + self.cache_args["type"] = cache_type if cache_dir: - self.cache_args['dir'] = cache_dir + self.cache_args["dir"] = cache_dir if cache_url: - self.cache_args['url'] = cache_url + self.cache_args["url"] = cache_url def _compile_from_file(self, path, filename): if path is not None: util.verify_directory(os.path.dirname(path)) filemtime = os.stat(filename)[stat.ST_MTIME] - if not os.path.exists(path) or \ - os.stat(path)[stat.ST_MTIME] < filemtime: + if ( + not os.path.exists(path) + or os.stat(path)[stat.ST_MTIME] < filemtime + ): data = util.read_file(filename) _compile_module_file( - self, - data, - filename, - path, - self.module_writer) + self, data, filename, path, self.module_writer + ) module = compat.load_module(self.module_id, path) del sys.modules[self.module_id] if module._magic_number != codegen.MAGIC_NUMBER: data = util.read_file(filename) _compile_module_file( - self, - data, - filename, - path, - self.module_writer) + self, data, filename, path, self.module_writer + ) module = compat.load_module(self.module_id, path) del sys.modules[self.module_id] - ModuleInfo(module, path, self, filename, None, None) + ModuleInfo(module, path, self, filename, None, None, None) else: # template filename and no module directory, compile code # in memory data = util.read_file(filename) - code, module = _compile_text( - self, - data, - filename) + code, module = _compile_text(self, data, filename) self._source = None self._code = code - ModuleInfo(module, None, self, filename, code, None) + ModuleInfo(module, None, self, filename, code, None, None) return module @property @@ -437,15 +451,15 @@ class Template(object): @property def cache_dir(self): - return self.cache_args['dir'] + return self.cache_args["dir"] @property def cache_url(self): - return self.cache_args['url'] + return self.cache_args["url"] @property def cache_type(self): - return self.cache_args['type'] + return self.cache_args["type"] def render(self, *args, **data): """Render the output of this template as a string. @@ -464,11 +478,9 @@ class Template(object): def render_unicode(self, *args, **data): """Render the output of this template as a unicode object.""" - return runtime._render(self, - self.callable_, - args, - data, - as_unicode=True) + return runtime._render( + self, self.callable_, args, data, as_unicode=True + ) def render_context(self, context, *args, **kwargs): """Render this :class:`.Template` with the given context. @@ -476,13 +488,9 @@ class Template(object): The data is written to the context's buffer. """ - if getattr(context, '_with_template', None) is None: + if getattr(context, "_with_template", None) is None: context._set_with_template(self) - runtime._render_context(self, - self.callable_, - context, - *args, - **kwargs) + runtime._render_context(self, self.callable_, context, *args, **kwargs) def has_def(self, name): return hasattr(self.module, "render_%s" % name) @@ -498,7 +506,7 @@ class Template(object): .. versionadded:: 1.0.4 """ - return [i[7:] for i in dir(self.module) if i[:7] == 'render_'] + return [i[7:] for i in dir(self.module) if i[:7] == "render_"] def _get_def_callable(self, name): return getattr(self.module, "render_%s" % name) @@ -526,28 +534,30 @@ class ModuleTemplate(Template): """ - def __init__(self, module, - module_filename=None, - template=None, - template_filename=None, - module_source=None, - template_source=None, - output_encoding=None, - encoding_errors='strict', - disable_unicode=False, - bytestring_passthrough=False, - format_exceptions=False, - error_handler=None, - lookup=None, - cache_args=None, - cache_impl='beaker', - cache_enabled=True, - cache_type=None, - cache_dir=None, - cache_url=None, - include_error_handler=None, - ): - self.module_id = re.sub(r'\W', "_", module._template_uri) + def __init__( + self, + module, + module_filename=None, + template=None, + template_filename=None, + module_source=None, + template_source=None, + output_encoding=None, + encoding_errors="strict", + disable_unicode=False, + bytestring_passthrough=False, + format_exceptions=False, + error_handler=None, + lookup=None, + cache_args=None, + cache_impl="beaker", + cache_enabled=True, + cache_type=None, + cache_dir=None, + cache_url=None, + include_error_handler=None, + ): + self.module_id = re.sub(r"\W", "_", module._template_uri) self.uri = module._template_uri self.input_encoding = module._source_encoding self.output_encoding = output_encoding @@ -558,21 +568,25 @@ class ModuleTemplate(Template): if compat.py3k and disable_unicode: raise exceptions.UnsupportedError( - "Mako for Python 3 does not " - "support disabling Unicode") + "Mako for Python 3 does not " "support disabling Unicode" + ) elif output_encoding and disable_unicode: raise exceptions.UnsupportedError( "output_encoding must be set to " - "None when disable_unicode is used.") + "None when disable_unicode is used." + ) self.module = module self.filename = template_filename - ModuleInfo(module, - module_filename, - self, - template_filename, - module_source, - template_source) + ModuleInfo( + module, + module_filename, + self, + template_filename, + module_source, + template_source, + module._template_uri, + ) self.callable_ = self.module.render_body self.format_exceptions = format_exceptions @@ -580,8 +594,12 @@ class ModuleTemplate(Template): self.include_error_handler = include_error_handler self.lookup = lookup self._setup_cache_args( - cache_impl, cache_enabled, cache_args, - cache_type, cache_dir, cache_url + cache_impl, + cache_enabled, + cache_args, + cache_type, + cache_dir, + cache_url, ) @@ -614,20 +632,25 @@ class ModuleInfo(object): source code based on a module's identifier. """ + _modules = weakref.WeakValueDictionary() - def __init__(self, - module, - module_filename, - template, - template_filename, - module_source, - template_source): + def __init__( + self, + module, + module_filename, + template, + template_filename, + module_source, + template_source, + template_uri, + ): self.module = module self.module_filename = module_filename self.template_filename = template_filename self.module_source = module_source self.template_source = template_source + self.template_uri = template_uri self._modules[module.__name__] = template._mmarker = self if module_filename: self._modules[module_filename] = self @@ -635,15 +658,15 @@ class ModuleInfo(object): @classmethod def get_module_source_metadata(cls, module_source, full_line_map=False): source_map = re.search( - r"__M_BEGIN_METADATA(.+?)__M_END_METADATA", - module_source, re.S).group(1) - source_map = compat.json.loads(source_map) - source_map['line_map'] = dict( - (int(k), int(v)) - for k, v in source_map['line_map'].items()) + r"__M_BEGIN_METADATA(.+?)__M_END_METADATA", module_source, re.S + ).group(1) + source_map = json.loads(source_map) + source_map["line_map"] = dict( + (int(k), int(v)) for k, v in source_map["line_map"].items() + ) if full_line_map: - f_line_map = source_map['full_line_map'] = [] - line_map = source_map['line_map'] + f_line_map = source_map["full_line_map"] = [] + line_map = source_map["line_map"] curr_templ_line = 1 for mod_line in range(1, max(line_map)): @@ -662,10 +685,12 @@ class ModuleInfo(object): @property def source(self): if self.template_source is not None: - if self.module._source_encoding and \ - not isinstance(self.template_source, compat.text_type): + if self.module._source_encoding and not isinstance( + self.template_source, compat.text_type + ): return self.template_source.decode( - self.module._source_encoding) + self.module._source_encoding + ) else: return self.template_source else: @@ -677,38 +702,46 @@ class ModuleInfo(object): def _compile(template, text, filename, generate_magic_comment): - lexer = template.lexer_cls(text, - filename, - disable_unicode=template.disable_unicode, - input_encoding=template.input_encoding, - preprocessor=template.preprocessor) + lexer = template.lexer_cls( + text, + filename, + disable_unicode=template.disable_unicode, + input_encoding=template.input_encoding, + preprocessor=template.preprocessor, + ) node = lexer.parse() - source = codegen.compile(node, - template.uri, - filename, - default_filters=template.default_filters, - buffer_filters=template.buffer_filters, - imports=template.imports, - future_imports=template.future_imports, - source_encoding=lexer.encoding, - generate_magic_comment=generate_magic_comment, - disable_unicode=template.disable_unicode, - strict_undefined=template.strict_undefined, - enable_loop=template.enable_loop, - reserved_names=template.reserved_names) + source = codegen.compile( + node, + template.uri, + filename, + default_filters=template.default_filters, + buffer_filters=template.buffer_filters, + imports=template.imports, + future_imports=template.future_imports, + source_encoding=lexer.encoding, + generate_magic_comment=generate_magic_comment, + disable_unicode=template.disable_unicode, + strict_undefined=template.strict_undefined, + enable_loop=template.enable_loop, + reserved_names=template.reserved_names, + ) return source, lexer def _compile_text(template, text, filename): identifier = template.module_id - source, lexer = _compile(template, text, filename, - generate_magic_comment=template.disable_unicode) + source, lexer = _compile( + template, + text, + filename, + generate_magic_comment=template.disable_unicode, + ) cid = identifier if not compat.py3k and isinstance(cid, compat.text_type): cid = cid.encode() module = types.ModuleType(cid) - code = compile(source, cid, 'exec') + code = compile(source, cid, "exec") # this exec() works for 2.4->3.3. exec(code, module.__dict__, module.__dict__) @@ -716,11 +749,12 @@ def _compile_text(template, text, filename): def _compile_module_file(template, text, filename, outputpath, module_writer): - source, lexer = _compile(template, text, filename, - generate_magic_comment=True) + source, lexer = _compile( + template, text, filename, generate_magic_comment=True + ) if isinstance(source, compat.text_type): - source = source.encode(lexer.encoding or 'ascii') + source = source.encode(lexer.encoding or "ascii") if module_writer: module_writer(source, outputpath) @@ -737,9 +771,9 @@ def _compile_module_file(template, text, filename, outputpath, module_writer): def _get_module_info_from_callable(callable_): if compat.py3k: - return _get_module_info(callable_.__globals__['__name__']) + return _get_module_info(callable_.__globals__["__name__"]) else: - return _get_module_info(callable_.func_globals['__name__']) + return _get_module_info(callable_.func_globals["__name__"]) def _get_module_info(filename): diff --git a/server/www/packages/packages-linux/x64/mako/util.py b/server/www/packages/packages-linux/x64/mako/util.py index 2f089ff..07f7531 100644 --- a/server/www/packages/packages-linux/x64/mako/util.py +++ b/server/www/packages/packages-linux/x64/mako/util.py @@ -1,15 +1,17 @@ # mako/util.py -# Copyright (C) 2006-2016 the Mako authors and contributors +# Copyright 2006-2019 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -import re -import collections import codecs -import os -from mako import compat +import collections import operator +import os +import re +import timeit + +from mako import compat def update_wrapper(decorated, fn): @@ -19,7 +21,6 @@ def update_wrapper(decorated, fn): class PluginLoader(object): - def __init__(self, group): self.group = group self.impls = {} @@ -29,16 +30,16 @@ class PluginLoader(object): return self.impls[name]() else: import pkg_resources - for impl in pkg_resources.iter_entry_points( - self.group, - name): + + for impl in pkg_resources.iter_entry_points(self.group, name): self.impls[name] = impl.load return impl.load() else: from mako import exceptions + raise exceptions.RuntimeException( - "Can't load plugin %s %s" % - (self.group, name)) + "Can't load plugin %s %s" % (self.group, name) + ) def register(self, name, modulepath, objname): def load(): @@ -46,18 +47,19 @@ class PluginLoader(object): for token in modulepath.split(".")[1:]: mod = getattr(mod, token) return getattr(mod, objname) + self.impls[name] = load -def verify_directory(dir): +def verify_directory(dir_): """create and/or verify a filesystem directory.""" tries = 0 - while not os.path.exists(dir): + while not os.path.exists(dir_): try: tries += 1 - os.makedirs(dir, compat.octal("0775")) + os.makedirs(dir_, compat.octal("0775")) except: if tries > 5: raise @@ -109,11 +111,15 @@ class memoized_instancemethod(object): def oneshot(*args, **kw): result = self.fget(obj, *args, **kw) - memo = lambda *a, **kw: result + + def memo(*a, **kw): + return result + memo.__name__ = self.__name__ memo.__doc__ = self.__doc__ obj.__dict__[self.__name__] = memo return result + oneshot.__name__ = self.__name__ oneshot.__doc__ = self.__doc__ return oneshot @@ -137,13 +143,13 @@ class FastEncodingBuffer(object): """a very rudimentary buffer that is faster than StringIO, but doesn't crash on unicode data like cStringIO.""" - def __init__(self, encoding=None, errors='strict', as_unicode=False): + def __init__(self, encoding=None, errors="strict", as_unicode=False): self.data = collections.deque() self.encoding = encoding if as_unicode: - self.delim = compat.u('') + self.delim = compat.u("") else: - self.delim = '' + self.delim = "" self.as_unicode = as_unicode self.errors = errors self.write = self.data.append @@ -154,8 +160,9 @@ class FastEncodingBuffer(object): def getvalue(self): if self.encoding: - return self.delim.join(self.data).encode(self.encoding, - self.errors) + return self.delim.join(self.data).encode( + self.encoding, self.errors + ) else: return self.delim.join(self.data) @@ -171,22 +178,21 @@ class LRUCache(dict): """ class _Item(object): - def __init__(self, key, value): self.key = key self.value = value - self.timestamp = compat.time_func() + self.timestamp = timeit.default_timer() def __repr__(self): return repr(self.value) - def __init__(self, capacity, threshold=.5): + def __init__(self, capacity, threshold=0.5): self.capacity = capacity self.threshold = threshold def __getitem__(self, key): item = dict.__getitem__(self, key) - item.timestamp = compat.time_func() + item.timestamp = timeit.default_timer() return item.value def values(self): @@ -210,9 +216,12 @@ class LRUCache(dict): def _manage_size(self): while len(self) > self.capacity + self.capacity * self.threshold: - bytime = sorted(dict.values(self), - key=operator.attrgetter('timestamp'), reverse=True) - for item in bytime[self.capacity:]: + bytime = sorted( + dict.values(self), + key=operator.attrgetter("timestamp"), + reverse=True, + ) + for item in bytime[self.capacity :]: try: del self[item.key] except KeyError: @@ -220,10 +229,11 @@ class LRUCache(dict): # broke in on us. loop around and try again break + # Regexp to match python magic encoding line _PYTHON_MAGIC_COMMENT_re = re.compile( - r'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)', - re.VERBOSE) + r"[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)", re.VERBOSE +) def parse_encoding(fp): @@ -242,13 +252,14 @@ def parse_encoding(fp): line1 = fp.readline() has_bom = line1.startswith(codecs.BOM_UTF8) if has_bom: - line1 = line1[len(codecs.BOM_UTF8):] + line1 = line1[len(codecs.BOM_UTF8) :] - m = _PYTHON_MAGIC_COMMENT_re.match(line1.decode('ascii', 'ignore')) + m = _PYTHON_MAGIC_COMMENT_re.match(line1.decode("ascii", "ignore")) if not m: try: import parser - parser.suite(line1.decode('ascii', 'ignore')) + + parser.suite(line1.decode("ascii", "ignore")) except (ImportError, SyntaxError): # Either it's a real syntax error, in which case the source # is not valid python source, or line2 is a continuation of @@ -258,14 +269,16 @@ def parse_encoding(fp): else: line2 = fp.readline() m = _PYTHON_MAGIC_COMMENT_re.match( - line2.decode('ascii', 'ignore')) + line2.decode("ascii", "ignore") + ) if has_bom: if m: raise SyntaxError( "python refuses to compile code with both a UTF8" - " byte-order-mark and a magic encoding comment") - return 'utf_8' + " byte-order-mark and a magic encoding comment" + ) + return "utf_8" elif m: return m.group(1) else: @@ -289,10 +302,11 @@ def restore__ast(_ast): """Attempt to restore the required classes to the _ast module if it appears to be missing them """ - if hasattr(_ast, 'AST'): + if hasattr(_ast, "AST"): return _ast.PyCF_ONLY_AST = 2 << 9 - m = compile("""\ + m = compile( + """\ def foo(): pass class Bar(object): pass if False: pass @@ -305,13 +319,17 @@ baz = 'mako' baz and 'foo' or 'bar' (mako is baz == baz) is not baz != mako mako > baz < mako >= baz <= mako -mako in baz not in mako""", '', 'exec', _ast.PyCF_ONLY_AST) +mako in baz not in mako""", + "", + "exec", + _ast.PyCF_ONLY_AST, + ) _ast.Module = type(m) for cls in _ast.Module.__mro__: - if cls.__name__ == 'mod': + if cls.__name__ == "mod": _ast.mod = cls - elif cls.__name__ == 'AST': + elif cls.__name__ == "AST": _ast.AST = cls _ast.FunctionDef = type(m.body[0]) @@ -361,7 +379,7 @@ mako in baz not in mako""", '', 'exec', _ast.PyCF_ONLY_AST) _ast.NotIn = type(m.body[12].value.ops[1]) -def read_file(path, mode='rb'): +def read_file(path, mode="rb"): fp = open(path, mode) try: data = fp.read() diff --git a/server/www/packages/packages-linux/x64/psutil/__init__.py b/server/www/packages/packages-linux/x64/psutil/__init__.py index c2a83fb..3f38058 100644 --- a/server/www/packages/packages-linux/x64/psutil/__init__.py +++ b/server/www/packages/packages-linux/x64/psutil/__init__.py @@ -17,7 +17,7 @@ sensors) in Python. Supported platforms: - Sun Solaris - AIX -Works with Python versions from 2.6 to 3.X. +Works with Python versions from 2.6 to 3.4+. """ from __future__ import division @@ -25,12 +25,12 @@ from __future__ import division import collections import contextlib import datetime -import errno import functools import os import signal import subprocess import sys +import threading import time try: import pwd @@ -43,6 +43,8 @@ from ._common import memoize from ._common import memoize_when_activated from ._common import wrap_numbers as _wrap_numbers from ._compat import long +from ._compat import PermissionError +from ._compat import ProcessLookupError from ._compat import PY3 as _PY3 from ._common import STATUS_DEAD @@ -86,12 +88,6 @@ from ._common import POSIX # NOQA from ._common import SUNOS from ._common import WINDOWS -from ._exceptions import AccessDenied -from ._exceptions import Error -from ._exceptions import NoSuchProcess -from ._exceptions import TimeoutExpired -from ._exceptions import ZombieProcess - if LINUX: # This is public API and it will be retrieved from _pslinux.py # via sys.modules. @@ -151,6 +147,10 @@ elif WINDOWS: from ._psutil_windows import NORMAL_PRIORITY_CLASS # NOQA from ._psutil_windows import REALTIME_PRIORITY_CLASS # NOQA from ._pswindows import CONN_DELETE_TCB # NOQA + from ._pswindows import IOPRIO_VERYLOW # NOQA + from ._pswindows import IOPRIO_LOW # NOQA + from ._pswindows import IOPRIO_NORMAL # NOQA + from ._pswindows import IOPRIO_HIGH # NOQA elif MACOS: from . import _psosx as _psplatform @@ -211,23 +211,26 @@ __all__ = [ "pid_exists", "pids", "process_iter", "wait_procs", # proc "virtual_memory", "swap_memory", # memory "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu - "cpu_stats", # "cpu_freq", + "cpu_stats", # "cpu_freq", "getloadavg" "net_io_counters", "net_connections", "net_if_addrs", # network "net_if_stats", "disk_io_counters", "disk_partitions", "disk_usage", # disk # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors "users", "boot_time", # others ] + + __all__.extend(_psplatform.__extra__all__) __author__ = "Giampaolo Rodola'" -__version__ = "5.4.8" +__version__ = "5.6.5" version_info = tuple([int(num) for num in __version__.split('.')]) + +_timer = getattr(time, 'monotonic', time.time) AF_LINK = _psplatform.AF_LINK POWER_TIME_UNLIMITED = _common.POWER_TIME_UNLIMITED POWER_TIME_UNKNOWN = _common.POWER_TIME_UNKNOWN _TOTAL_PHYMEM = None -_timer = getattr(time, 'monotonic', time.time) - +_LOWEST_PID = None # Sanity check in case the user messed up with psutil installation # or did something weird with sys.path. In this case we might end @@ -251,6 +254,112 @@ if (int(__version__.replace('.', '')) != raise ImportError(msg) +# ===================================================================== +# --- Exceptions +# ===================================================================== + + +class Error(Exception): + """Base exception class. All other psutil exceptions inherit + from this one. + """ + + def __init__(self, msg=""): + Exception.__init__(self, msg) + self.msg = msg + + def __repr__(self): + ret = "psutil.%s %s" % (self.__class__.__name__, self.msg) + return ret.strip() + + __str__ = __repr__ + + +class NoSuchProcess(Error): + """Exception raised when a process with a certain PID doesn't + or no longer exists. + """ + + def __init__(self, pid, name=None, msg=None): + Error.__init__(self, msg) + self.pid = pid + self.name = name + self.msg = msg + if msg is None: + if name: + details = "(pid=%s, name=%s)" % (self.pid, repr(self.name)) + else: + details = "(pid=%s)" % self.pid + self.msg = "process no longer exists " + details + + +class ZombieProcess(NoSuchProcess): + """Exception raised when querying a zombie process. This is + raised on macOS, BSD and Solaris only, and not always: depending + on the query the OS may be able to succeed anyway. + On Linux all zombie processes are querable (hence this is never + raised). Windows doesn't have zombie processes. + """ + + def __init__(self, pid, name=None, ppid=None, msg=None): + NoSuchProcess.__init__(self, msg) + self.pid = pid + self.ppid = ppid + self.name = name + self.msg = msg + if msg is None: + args = ["pid=%s" % pid] + if name: + args.append("name=%s" % repr(self.name)) + if ppid: + args.append("ppid=%s" % self.ppid) + details = "(%s)" % ", ".join(args) + self.msg = "process still exists but it's a zombie " + details + + +class AccessDenied(Error): + """Exception raised when permission to perform an action is denied.""" + + def __init__(self, pid=None, name=None, msg=None): + Error.__init__(self, msg) + self.pid = pid + self.name = name + self.msg = msg + if msg is None: + if (pid is not None) and (name is not None): + self.msg = "(pid=%s, name=%s)" % (pid, repr(name)) + elif (pid is not None): + self.msg = "(pid=%s)" % self.pid + else: + self.msg = "" + + +class TimeoutExpired(Error): + """Raised on Process.wait(timeout) if timeout expires and process + is still alive. + """ + + def __init__(self, seconds, pid=None, name=None): + Error.__init__(self, "timeout after %s seconds" % seconds) + self.seconds = seconds + self.pid = pid + self.name = name + if (pid is not None) and (name is not None): + self.msg += " (pid=%s, name=%s)" % (pid, repr(name)) + elif (pid is not None): + self.msg += " (pid=%s)" % self.pid + + +# Push exception classes into platform specific module namespace. +_psplatform.NoSuchProcess = NoSuchProcess +_psplatform.ZombieProcess = ZombieProcess +_psplatform.AccessDenied = AccessDenied +_psplatform.TimeoutExpired = TimeoutExpired +if POSIX: + from . import _psposix + _psposix.TimeoutExpired = TimeoutExpired + + # ===================================================================== # --- Utils # ===================================================================== @@ -352,7 +461,7 @@ class Process(object): self._create_time = None self._gone = False self._hash = None - self._oneshot_inctx = False + self._lock = threading.RLock() # used for caching on Windows only (on POSIX ppid may change) self._ppid = None # platform-specific modules define an _psplatform.Process @@ -456,40 +565,45 @@ class Process(object): ... >>> """ - if self._oneshot_inctx: - # NOOP: this covers the use case where the user enters the - # context twice. Since as_dict() internally uses oneshot() - # I expect that the code below will be a pretty common - # "mistake" that the user will make, so let's guard - # against that: - # - # >>> with p.oneshot(): - # ... p.as_dict() - # ... - yield - else: - self._oneshot_inctx = True - try: - # cached in case cpu_percent() is used - self.cpu_times.cache_activate() - # cached in case memory_percent() is used - self.memory_info.cache_activate() - # cached in case parent() is used - self.ppid.cache_activate() - # cached in case username() is used - if POSIX: - self.uids.cache_activate() - # specific implementation cache - self._proc.oneshot_enter() + with self._lock: + if hasattr(self, "_cache"): + # NOOP: this covers the use case where the user enters the + # context twice: + # + # >>> with p.oneshot(): + # ... with p.oneshot(): + # ... + # + # Also, since as_dict() internally uses oneshot() + # I expect that the code below will be a pretty common + # "mistake" that the user will make, so let's guard + # against that: + # + # >>> with p.oneshot(): + # ... p.as_dict() + # ... yield - finally: - self.cpu_times.cache_deactivate() - self.memory_info.cache_deactivate() - self.ppid.cache_deactivate() - if POSIX: - self.uids.cache_deactivate() - self._proc.oneshot_exit() - self._oneshot_inctx = False + else: + try: + # cached in case cpu_percent() is used + self.cpu_times.cache_activate(self) + # cached in case memory_percent() is used + self.memory_info.cache_activate(self) + # cached in case parent() is used + self.ppid.cache_activate(self) + # cached in case username() is used + if POSIX: + self.uids.cache_activate(self) + # specific implementation cache + self._proc.oneshot_enter() + yield + finally: + self.cpu_times.cache_deactivate(self) + self.memory_info.cache_deactivate(self) + self.ppid.cache_deactivate(self) + if POSIX: + self.uids.cache_deactivate(self) + self._proc.oneshot_exit() def as_dict(self, attrs=None, ad_value=None): """Utility method returning process information as a @@ -540,6 +654,9 @@ class Process(object): checking whether PID has been reused. If no parent is known return None. """ + lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0] + if self.pid == lowest_pid: + return None ppid = self.ppid() if ppid is not None: ctime = self.create_time() @@ -551,6 +668,17 @@ class Process(object): except NoSuchProcess: pass + def parents(self): + """Return the parents of this process as a list of Process + instances. If no parents are known return an empty list. + """ + parents = [] + proc = self.parent() + while proc is not None: + parents.append(proc) + proc = proc.parent() + return parents + def is_running(self): """Return whether this process is running. It also checks if PID has been reused by another process in @@ -799,9 +927,6 @@ class Process(object): (and set). (Windows, Linux and BSD only). """ - # Automatically remove duplicates both on get and - # set (for get it's not really necessary, it's - # just for extra safety). if cpus is None: return list(set(self._proc.cpu_affinity_get())) else: @@ -825,7 +950,7 @@ class Process(object): """ return self._proc.cpu_num() - # Linux, macOS and Windows only + # Linux, macOS, Windows, Solaris, AIX if hasattr(_psplatform.Process, "environ"): def environ(self): @@ -1095,7 +1220,6 @@ class Process(object): return (value / float(total_phymem)) * 100 if hasattr(_psplatform.Process, "memory_maps"): - # Available everywhere except OpenBSD and NetBSD. def memory_maps(self, grouped=True): """Return process' mapped memory regions as a list of namedtuples whose fields are variable depending on the platform. @@ -1167,18 +1291,16 @@ class Process(object): "calling process (os.getpid()) instead of PID 0") try: os.kill(self.pid, sig) - except OSError as err: - if err.errno == errno.ESRCH: - if OPENBSD and pid_exists(self.pid): - # We do this because os.kill() lies in case of - # zombie processes. - raise ZombieProcess(self.pid, self._name, self._ppid) - else: - self._gone = True - raise NoSuchProcess(self.pid, self._name) - if err.errno in (errno.EPERM, errno.EACCES): - raise AccessDenied(self.pid, self._name) - raise + except ProcessLookupError: + if OPENBSD and pid_exists(self.pid): + # We do this because os.kill() lies in case of + # zombie processes. + raise ZombieProcess(self.pid, self._name, self._ppid) + else: + self._gone = True + raise NoSuchProcess(self.pid, self._name) + except PermissionError: + raise AccessDenied(self.pid, self._name) @_assert_pid_not_reused def send_signal(self, sig): @@ -1298,7 +1420,7 @@ class Popen(Process): http://bugs.python.org/issue6973. For a complete documentation refer to: - http://docs.python.org/library/subprocess.html + http://docs.python.org/3/library/subprocess.html """ def __init__(self, *args, **kwargs): @@ -1354,7 +1476,7 @@ class Popen(Process): _as_dict_attrnames = set( [x for x in dir(Process) if not x.startswith('_') and x not in ['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait', - 'is_running', 'as_dict', 'parent', 'children', 'rlimit', + 'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit', 'memory_info_ex', 'oneshot']]) @@ -1365,7 +1487,10 @@ _as_dict_attrnames = set( def pids(): """Return a list of current running PIDs.""" - return _psplatform.pids() + global _LOWEST_PID + ret = sorted(_psplatform.pids()) + _LOWEST_PID = ret[0] + return ret def pid_exists(pid): @@ -1387,6 +1512,7 @@ def pid_exists(pid): _pmap = {} +_lock = threading.Lock() def process_iter(attrs=None, ad_value=None): @@ -1414,21 +1540,26 @@ def process_iter(attrs=None, ad_value=None): proc = Process(pid) if attrs is not None: proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value) - _pmap[proc.pid] = proc + with _lock: + _pmap[proc.pid] = proc return proc def remove(pid): - _pmap.pop(pid, None) + with _lock: + _pmap.pop(pid, None) a = set(pids()) b = set(_pmap.keys()) new_pids = a - b gone_pids = b - a - for pid in gone_pids: remove(pid) - for pid, proc in sorted(list(_pmap.items()) + - list(dict.fromkeys(new_pids).items())): + + with _lock: + ls = sorted(list(_pmap.items()) + + list(dict.fromkeys(new_pids).items())) + + for pid, proc in ls: try: if proc is None: # new process yield add(pid) @@ -1885,6 +2016,17 @@ if hasattr(_psplatform, "cpu_freq"): __all__.append("cpu_freq") +if hasattr(os, "getloadavg") or hasattr(_psplatform, "getloadavg"): + # Perform this hasattr check once on import time to either use the + # platform based code or proxy straight from the os module. + if hasattr(os, "getloadavg"): + getloadavg = os.getloadavg + else: + getloadavg = _psplatform.getloadavg + + __all__.append("getloadavg") + + # ===================================================================== # --- system memory related functions # ===================================================================== @@ -1910,7 +2052,7 @@ def virtual_memory(): - used: memory used, calculated differently depending on the platform and designed for informational purposes only: - macOS: active + inactive + wired + macOS: active + wired BSD: active + wired + cached Linux: total - free @@ -2306,19 +2448,16 @@ if WINDOWS: def test(): # pragma: no cover - """List info of all currently running processes emulating ps aux - output. - """ + from ._common import bytes2human + from ._compat import get_terminal_size + today_day = datetime.date.today() - templ = "%-10s %5s %4s %7s %7s %-13s %5s %7s %s" - attrs = ['pid', 'memory_percent', 'name', 'cpu_times', 'create_time', - 'memory_info'] - if POSIX: - attrs.append('uids') - attrs.append('terminal') - print(templ % ("USER", "PID", "%MEM", "VSZ", "RSS", "TTY", "START", "TIME", - "COMMAND")) - for p in process_iter(attrs=attrs, ad_value=''): + templ = "%-10s %5s %5s %7s %7s %5s %6s %6s %6s %s" + attrs = ['pid', 'memory_percent', 'name', 'cmdline', 'cpu_times', + 'create_time', 'memory_info', 'status', 'nice', 'username'] + print(templ % ("USER", "PID", "%MEM", "VSZ", "RSS", "NICE", + "STATUS", "START", "TIME", "CMDLINE")) + for p in process_iter(attrs, ad_value=None): if p.info['create_time']: ctime = datetime.datetime.fromtimestamp(p.info['create_time']) if ctime.date() == today_day: @@ -2327,30 +2466,46 @@ def test(): # pragma: no cover ctime = ctime.strftime("%b%d") else: ctime = '' - cputime = time.strftime("%M:%S", - time.localtime(sum(p.info['cpu_times']))) - try: - user = p.username() - except Error: - user = '' - if WINDOWS and '\\' in user: + if p.info['cpu_times']: + cputime = time.strftime("%M:%S", + time.localtime(sum(p.info['cpu_times']))) + else: + cputime = '' + + user = p.info['username'] or '' + if not user and POSIX: + try: + user = p.uids()[0] + except Error: + pass + if user and WINDOWS and '\\' in user: user = user.split('\\')[1] - vms = p.info['memory_info'] and \ - int(p.info['memory_info'].vms / 1024) or '?' - rss = p.info['memory_info'] and \ - int(p.info['memory_info'].rss / 1024) or '?' - memp = p.info['memory_percent'] and \ - round(p.info['memory_percent'], 1) or '?' - print(templ % ( + user = user[:9] + vms = bytes2human(p.info['memory_info'].vms) if \ + p.info['memory_info'] is not None else '' + rss = bytes2human(p.info['memory_info'].rss) if \ + p.info['memory_info'] is not None else '' + memp = round(p.info['memory_percent'], 1) if \ + p.info['memory_percent'] is not None else '' + nice = int(p.info['nice']) if p.info['nice'] else '' + if p.info['cmdline']: + cmdline = ' '.join(p.info['cmdline']) + else: + cmdline = p.info['name'] + status = p.info['status'][:5] if p.info['status'] else '' + + line = templ % ( user[:10], p.info['pid'], memp, vms, rss, - p.info.get('terminal', '') or '?', + nice, + status, ctime, cputime, - p.info['name'].strip() or '?')) + cmdline) + print(line[:get_terminal_size()[0]]) del memoize, memoize_when_activated, division, deprecated_method diff --git a/server/www/packages/packages-linux/x64/psutil/_common.py b/server/www/packages/packages-linux/x64/psutil/_common.py index bee9579..126d9d6 100644 --- a/server/www/packages/packages-linux/x64/psutil/_common.py +++ b/server/www/packages/packages-linux/x64/psutil/_common.py @@ -64,6 +64,7 @@ __all__ = [ 'conn_tmap', 'deprecated_method', 'isfile_strict', 'memoize', 'parse_environ_block', 'path_exists_strict', 'usage_percent', 'supports_ipv6', 'sockfam_to_enum', 'socktype_to_enum', "wrap_numbers", + 'bytes2human', 'conn_to_ntuple', ] @@ -256,8 +257,6 @@ if AF_UNIX is not None: "unix": ([AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]), }) -del AF_INET, AF_UNIX, SOCK_STREAM, SOCK_DGRAM - # =================================================================== # --- utils @@ -267,12 +266,12 @@ del AF_INET, AF_UNIX, SOCK_STREAM, SOCK_DGRAM def usage_percent(used, total, round_=None): """Calculate percentage usage of 'used' against 'total'.""" try: - ret = (used / total) * 100 + ret = (float(used) / total) * 100 except ZeroDivisionError: - ret = 0.0 if isinstance(used, float) or isinstance(total, float) else 0 - if round_ is not None: - return round(ret, round_) + return 0.0 else: + if round_ is not None: + ret = round(ret, round_) return ret @@ -327,7 +326,7 @@ def memoize_when_activated(fun): 1 >>> >>> # activated - >>> foo.cache_activate() + >>> foo.cache_activate(self) >>> foo() 1 >>> foo() @@ -336,26 +335,30 @@ def memoize_when_activated(fun): """ @functools.wraps(fun) def wrapper(self): - if not wrapper.cache_activated: + try: + # case 1: we previously entered oneshot() ctx + ret = self._cache[fun] + except AttributeError: + # case 2: we never entered oneshot() ctx return fun(self) - else: - try: - ret = cache[fun] - except KeyError: - ret = cache[fun] = fun(self) - return ret + except KeyError: + # case 3: we entered oneshot() ctx but there's no cache + # for this entry yet + ret = self._cache[fun] = fun(self) + return ret - def cache_activate(): - """Activate cache.""" - wrapper.cache_activated = True + def cache_activate(proc): + """Activate cache. Expects a Process instance. Cache will be + stored as a "_cache" instance attribute.""" + proc._cache = {} - def cache_deactivate(): + def cache_deactivate(proc): """Deactivate and clear cache.""" - wrapper.cache_activated = False - cache.clear() + try: + del proc._cache + except AttributeError: + pass - cache = {} - wrapper.cache_activated = False wrapper.cache_activate = cache_activate wrapper.cache_deactivate = cache_deactivate return wrapper @@ -442,7 +445,7 @@ def sockfam_to_enum(num): else: # pragma: no cover try: return socket.AddressFamily(num) - except (ValueError, AttributeError): + except ValueError: return num @@ -454,11 +457,30 @@ def socktype_to_enum(num): return num else: # pragma: no cover try: - return socket.AddressType(num) - except (ValueError, AttributeError): + return socket.SocketKind(num) + except ValueError: return num +def conn_to_ntuple(fd, fam, type_, laddr, raddr, status, status_map, pid=None): + """Convert a raw connection tuple to a proper ntuple.""" + if fam in (socket.AF_INET, AF_INET6): + if laddr: + laddr = addr(*laddr) + if raddr: + raddr = addr(*raddr) + if type_ == socket.SOCK_STREAM and fam in (AF_INET, AF_INET6): + status = status_map.get(status, CONN_NONE) + else: + status = CONN_NONE # ignore whatever C returned to us + fam = sockfam_to_enum(fam) + type_ = socktype_to_enum(type_) + if pid is None: + return pconn(fd, fam, type_, laddr, raddr, status) + else: + return sconn(fd, fam, type_, laddr, raddr, status, pid) + + def deprecated_method(replacement): """A decorator which can be used to mark a method as deprecated 'replcement' is the method name which will be called instead. @@ -471,7 +493,7 @@ def deprecated_method(replacement): @functools.wraps(fun) def inner(self, *args, **kwargs): - warnings.warn(msg, category=FutureWarning, stacklevel=2) + warnings.warn(msg, category=DeprecationWarning, stacklevel=2) return getattr(self, replacement)(*args, **kwargs) return inner return outer @@ -594,3 +616,36 @@ def open_text(fname, **kwargs): kwargs.setdefault('encoding', ENCODING) kwargs.setdefault('errors', ENCODING_ERRS) return open(fname, "rt", **kwargs) + + +def bytes2human(n, format="%(value).1f%(symbol)s"): + """Used by various scripts. See: + http://goo.gl/zeJZl + + >>> bytes2human(10000) + '9.8K' + >>> bytes2human(100001221) + '95.4M' + """ + symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') + prefix = {} + for i, s in enumerate(symbols[1:]): + prefix[s] = 1 << (i + 1) * 10 + for symbol in reversed(symbols[1:]): + if n >= prefix[symbol]: + value = float(n) / prefix[symbol] + return format % locals() + return format % dict(symbol=symbols[0], value=n) + + +def get_procfs_path(): + """Return updated psutil.PROCFS_PATH constant.""" + return sys.modules['psutil'].PROCFS_PATH + + +if PY3: + def decode(s): + return s.decode(encoding=ENCODING, errors=ENCODING_ERRS) +else: + def decode(s): + return s diff --git a/server/www/packages/packages-linux/x64/psutil/_compat.py b/server/www/packages/packages-linux/x64/psutil/_compat.py index 08aefe4..07ab909 100644 --- a/server/www/packages/packages-linux/x64/psutil/_compat.py +++ b/server/www/packages/packages-linux/x64/psutil/_compat.py @@ -5,12 +5,15 @@ """Module which provides compatibility with older Python versions.""" import collections +import errno import functools import os import sys __all__ = ["PY3", "long", "xrange", "unicode", "basestring", "u", "b", - "lru_cache", "which"] + "lru_cache", "which", "get_terminal_size", + "FileNotFoundError", "PermissionError", "ProcessLookupError", + "InterruptedError", "ChildProcessError", "FileExistsError"] PY3 = sys.version_info[0] == 3 @@ -38,6 +41,73 @@ else: return s +# --- exceptions + + +if PY3: + FileNotFoundError = FileNotFoundError # NOQA + PermissionError = PermissionError # NOQA + ProcessLookupError = ProcessLookupError # NOQA + InterruptedError = InterruptedError # NOQA + ChildProcessError = ChildProcessError # NOQA + FileExistsError = FileExistsError # NOQA +else: + # https://github.com/PythonCharmers/python-future/blob/exceptions/ + # src/future/types/exceptions/pep3151.py + + def instance_checking_exception(base_exception=Exception): + def wrapped(instance_checker): + class TemporaryClass(base_exception): + + def __init__(self, *args, **kwargs): + if len(args) == 1 and isinstance(args[0], TemporaryClass): + unwrap_me = args[0] + for attr in dir(unwrap_me): + if not attr.startswith('__'): + setattr(self, attr, getattr(unwrap_me, attr)) + else: + super(TemporaryClass, self).__init__(*args, **kwargs) + + class __metaclass__(type): + def __instancecheck__(cls, inst): + return instance_checker(inst) + + def __subclasscheck__(cls, classinfo): + value = sys.exc_info()[1] + return isinstance(value, cls) + + TemporaryClass.__name__ = instance_checker.__name__ + TemporaryClass.__doc__ = instance_checker.__doc__ + return TemporaryClass + + return wrapped + + @instance_checking_exception(EnvironmentError) + def FileNotFoundError(inst): + return getattr(inst, 'errno', object()) == errno.ENOENT + + @instance_checking_exception(EnvironmentError) + def ProcessLookupError(inst): + return getattr(inst, 'errno', object()) == errno.ESRCH + + @instance_checking_exception(EnvironmentError) + def PermissionError(inst): + return getattr(inst, 'errno', object()) in ( + errno.EACCES, errno.EPERM) + + @instance_checking_exception(EnvironmentError) + def InterruptedError(inst): + return getattr(inst, 'errno', object()) == errno.EINTR + + @instance_checking_exception(EnvironmentError) + def ChildProcessError(inst): + return getattr(inst, 'errno', object()) == errno.ECHILD + + @instance_checking_exception(EnvironmentError) + def FileExistsError(inst): + return getattr(inst, 'errno', object()) == errno.EEXIST + + # --- stdlib additions @@ -239,3 +309,24 @@ except ImportError: if _access_check(name, mode): return name return None + + +# python 3.3 +try: + from shutil import get_terminal_size +except ImportError: + def get_terminal_size(fallback=(80, 24)): + try: + import fcntl + import termios + import struct + except ImportError: + return fallback + else: + try: + # This should work on Linux. + res = struct.unpack( + 'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234')) + return (res[1], res[0]) + except Exception: + return fallback diff --git a/server/www/packages/packages-linux/x64/psutil/_exceptions.py b/server/www/packages/packages-linux/x64/psutil/_exceptions.py deleted file mode 100644 index 6dbbd28..0000000 --- a/server/www/packages/packages-linux/x64/psutil/_exceptions.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - - -class Error(Exception): - """Base exception class. All other psutil exceptions inherit - from this one. - """ - - def __init__(self, msg=""): - Exception.__init__(self, msg) - self.msg = msg - - def __repr__(self): - ret = "psutil.%s %s" % (self.__class__.__name__, self.msg) - return ret.strip() - - __str__ = __repr__ - - -class NoSuchProcess(Error): - """Exception raised when a process with a certain PID doesn't - or no longer exists. - """ - - def __init__(self, pid, name=None, msg=None): - Error.__init__(self, msg) - self.pid = pid - self.name = name - self.msg = msg - if msg is None: - if name: - details = "(pid=%s, name=%s)" % (self.pid, repr(self.name)) - else: - details = "(pid=%s)" % self.pid - self.msg = "process no longer exists " + details - - -class ZombieProcess(NoSuchProcess): - """Exception raised when querying a zombie process. This is - raised on macOS, BSD and Solaris only, and not always: depending - on the query the OS may be able to succeed anyway. - On Linux all zombie processes are querable (hence this is never - raised). Windows doesn't have zombie processes. - """ - - def __init__(self, pid, name=None, ppid=None, msg=None): - NoSuchProcess.__init__(self, msg) - self.pid = pid - self.ppid = ppid - self.name = name - self.msg = msg - if msg is None: - args = ["pid=%s" % pid] - if name: - args.append("name=%s" % repr(self.name)) - if ppid: - args.append("ppid=%s" % self.ppid) - details = "(%s)" % ", ".join(args) - self.msg = "process still exists but it's a zombie " + details - - -class AccessDenied(Error): - """Exception raised when permission to perform an action is denied.""" - - def __init__(self, pid=None, name=None, msg=None): - Error.__init__(self, msg) - self.pid = pid - self.name = name - self.msg = msg - if msg is None: - if (pid is not None) and (name is not None): - self.msg = "(pid=%s, name=%s)" % (pid, repr(name)) - elif (pid is not None): - self.msg = "(pid=%s)" % self.pid - else: - self.msg = "" - - -class TimeoutExpired(Error): - """Raised on Process.wait(timeout) if timeout expires and process - is still alive. - """ - - def __init__(self, seconds, pid=None, name=None): - Error.__init__(self, "timeout after %s seconds" % seconds) - self.seconds = seconds - self.pid = pid - self.name = name - if (pid is not None) and (name is not None): - self.msg += " (pid=%s, name=%s)" % (pid, repr(name)) - elif (pid is not None): - self.msg += " (pid=%s)" % self.pid diff --git a/server/www/packages/packages-linux/x64/psutil/_psaix.py b/server/www/packages/packages-linux/x64/psutil/_psaix.py index 7ba212d..79e3be1 100644 --- a/server/www/packages/packages-linux/x64/psutil/_psaix.py +++ b/server/www/packages/packages-linux/x64/psutil/_psaix.py @@ -6,31 +6,29 @@ """AIX platform implementation.""" -import errno +import functools import glob import os import re import subprocess import sys from collections import namedtuple -from socket import AF_INET from . import _common from . import _psposix from . import _psutil_aix as cext from . import _psutil_posix as cext_posix -from ._common import AF_INET6 +from ._common import conn_to_ntuple +from ._common import get_procfs_path from ._common import memoize_when_activated from ._common import NIC_DUPLEX_FULL from ._common import NIC_DUPLEX_HALF from ._common import NIC_DUPLEX_UNKNOWN -from ._common import sockfam_to_enum -from ._common import socktype_to_enum from ._common import usage_percent +from ._compat import FileNotFoundError +from ._compat import PermissionError +from ._compat import ProcessLookupError from ._compat import PY3 -from ._exceptions import AccessDenied -from ._exceptions import NoSuchProcess -from ._exceptions import ZombieProcess __extra__all__ = ["PROCFS_PATH"] @@ -42,6 +40,8 @@ __extra__all__ = ["PROCFS_PATH"] HAS_THREADS = hasattr(cext, "proc_threads") +HAS_NET_IO_COUNTERS = hasattr(cext, "net_io_counters") +HAS_PROC_IO_COUNTERS = hasattr(cext, "proc_io_counters") PAGE_SIZE = os.sysconf('SC_PAGE_SIZE') AF_LINK = cext_posix.AF_LINK @@ -79,6 +79,13 @@ proc_info_map = dict( status=6, ttynr=7) +# These objects get set on "import psutil" from the __init__.py +# file, see: https://github.com/giampaolo/psutil/issues/1402 +NoSuchProcess = None +ZombieProcess = None +AccessDenied = None +TimeoutExpired = None + # ===================================================================== # --- named tuples @@ -93,21 +100,6 @@ pfullmem = pmem scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait']) # psutil.virtual_memory() svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free']) -# psutil.Process.memory_maps(grouped=True) -pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss', 'anon', 'locked']) -# psutil.Process.memory_maps(grouped=False) -pmmap_ext = namedtuple( - 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields)) - - -# ===================================================================== -# --- utils -# ===================================================================== - - -def get_procfs_path(): - """Return updated psutil.PROCFS_PATH constant.""" - return sys.modules['psutil'].PROCFS_PATH # ===================================================================== @@ -212,7 +204,9 @@ def disk_partitions(all=False): net_if_addrs = cext_posix.net_if_addrs -net_io_counters = cext.net_io_counters + +if HAS_NET_IO_COUNTERS: + net_io_counters = cext.net_io_counters def net_connections(kind, _pid=-1): @@ -225,27 +219,17 @@ def net_connections(kind, _pid=-1): % (kind, ', '.join([repr(x) for x in cmap]))) families, types = _common.conn_tmap[kind] rawlist = cext.net_connections(_pid) - ret = set() + ret = [] for item in rawlist: fd, fam, type_, laddr, raddr, status, pid = item if fam not in families: continue if type_ not in types: continue - status = TCP_STATUSES[status] - if fam in (AF_INET, AF_INET6): - if laddr: - laddr = _common.addr(*laddr) - if raddr: - raddr = _common.addr(*raddr) - fam = sockfam_to_enum(fam) - type_ = socktype_to_enum(type_) - if _pid == -1: - nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid) - else: - nt = _common.pconn(fd, fam, type_, laddr, raddr, status) - ret.add(nt) - return list(ret) + nt = conn_to_ntuple(fd, fam, type_, laddr, raddr, status, + TCP_STATUSES, pid=pid if _pid == -1 else None) + ret.append(nt) + return ret def net_if_stats(): @@ -328,33 +312,27 @@ def wrap_exceptions(fun): """Call callable into a try/except clause and translate ENOENT, EACCES and EPERM in NoSuchProcess or AccessDenied exceptions. """ - + @functools.wraps(fun) def wrapper(self, *args, **kwargs): try: return fun(self, *args, **kwargs) - except EnvironmentError as err: - # support for private module import - if (NoSuchProcess is None or AccessDenied is None or - ZombieProcess is None): - raise + except (FileNotFoundError, ProcessLookupError): # ENOENT (no such file or directory) gets raised on open(). # ESRCH (no such process) can get raised on read() if # process is gone in meantime. - if err.errno in (errno.ENOENT, errno.ESRCH): - if not pid_exists(self.pid): - raise NoSuchProcess(self.pid, self._name) - else: - raise ZombieProcess(self.pid, self._name, self._ppid) - if err.errno in (errno.EPERM, errno.EACCES): - raise AccessDenied(self.pid, self._name) - raise + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) + else: + raise ZombieProcess(self.pid, self._name, self._ppid) + except PermissionError: + raise AccessDenied(self.pid, self._name) return wrapper class Process(object): """Wrapper class around underlying C implementation.""" - __slots__ = ["pid", "_name", "_ppid", "_procfs_path"] + __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"] def __init__(self, pid): self.pid = pid @@ -363,23 +341,19 @@ class Process(object): self._procfs_path = get_procfs_path() def oneshot_enter(self): - self._proc_name_and_args.cache_activate() - self._proc_basic_info.cache_activate() - self._proc_cred.cache_activate() + self._proc_basic_info.cache_activate(self) + self._proc_cred.cache_activate(self) def oneshot_exit(self): - self._proc_name_and_args.cache_deactivate() - self._proc_basic_info.cache_deactivate() - self._proc_cred.cache_deactivate() - - @memoize_when_activated - def _proc_name_and_args(self): - return cext.proc_name_and_args(self.pid, self._procfs_path) + self._proc_basic_info.cache_deactivate(self) + self._proc_cred.cache_deactivate(self) + @wrap_exceptions @memoize_when_activated def _proc_basic_info(self): return cext.proc_basic_info(self.pid, self._procfs_path) + @wrap_exceptions @memoize_when_activated def _proc_cred(self): return cext.proc_cred(self.pid, self._procfs_path) @@ -388,22 +362,25 @@ class Process(object): def name(self): if self.pid == 0: return "swapper" - # note: this is limited to 15 characters - return self._proc_name_and_args()[0].rstrip("\x00") + # note: max 16 characters + return cext.proc_name(self.pid, self._procfs_path).rstrip("\x00") @wrap_exceptions def exe(self): # there is no way to get executable path in AIX other than to guess, # and guessing is more complex than what's in the wrapping class - exe = self.cmdline()[0] + cmdline = self.cmdline() + if not cmdline: + return '' + exe = cmdline[0] if os.path.sep in exe: # relative or absolute path if not os.path.isabs(exe): # if cwd has changed, we're out of luck - this may be wrong! exe = os.path.abspath(os.path.join(self.cwd(), exe)) if (os.path.isabs(exe) and - os.path.isfile(exe) and - os.access(exe, os.X_OK)): + os.path.isfile(exe) and + os.access(exe, os.X_OK)): return exe # not found, move to search in PATH using basename only exe = os.path.basename(exe) @@ -411,13 +388,17 @@ class Process(object): for path in os.environ["PATH"].split(":"): possible_exe = os.path.abspath(os.path.join(path, exe)) if (os.path.isfile(possible_exe) and - os.access(possible_exe, os.X_OK)): + os.access(possible_exe, os.X_OK)): return possible_exe return '' @wrap_exceptions def cmdline(self): - return self._proc_name_and_args()[1].split(' ') + return cext.proc_args(self.pid) + + @wrap_exceptions + def environ(self): + return cext.proc_environ(self.pid) @wrap_exceptions def create_time(self): @@ -503,11 +484,9 @@ class Process(object): try: result = os.readlink("%s/%s/cwd" % (procfs_path, self.pid)) return result.rstrip('/') - except OSError as err: - if err.errno == errno.ENOENT: - os.stat("%s/%s" % (procfs_path, self.pid)) # raise NSP or AD - return None - raise + except FileNotFoundError: + os.stat("%s/%s" % (procfs_path, self.pid)) # raise NSP or AD + return None @wrap_exceptions def memory_info(self): @@ -561,14 +540,15 @@ class Process(object): def wait(self, timeout=None): return _psposix.wait_pid(self.pid, timeout, self._name) - @wrap_exceptions - def io_counters(self): - try: - rc, wc, rb, wb = cext.proc_io_counters(self.pid) - except OSError: - # if process is terminated, proc_io_counters returns OSError - # instead of NSP - if not pid_exists(self.pid): - raise NoSuchProcess(self.pid, self._name) - raise - return _common.pio(rc, wc, rb, wb) + if HAS_PROC_IO_COUNTERS: + @wrap_exceptions + def io_counters(self): + try: + rc, wc, rb, wb = cext.proc_io_counters(self.pid) + except OSError: + # if process is terminated, proc_io_counters returns OSError + # instead of NSP + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) + raise + return _common.pio(rc, wc, rb, wb) diff --git a/server/www/packages/packages-linux/x64/psutil/_psbsd.py b/server/www/packages/packages-linux/x64/psutil/_psbsd.py index c2896cb..2f41dc0 100644 --- a/server/www/packages/packages-linux/x64/psutil/_psbsd.py +++ b/server/www/packages/packages-linux/x64/psutil/_psbsd.py @@ -10,26 +10,25 @@ import functools import os import xml.etree.ElementTree as ET from collections import namedtuple -from socket import AF_INET +from collections import defaultdict from . import _common from . import _psposix from . import _psutil_bsd as cext from . import _psutil_posix as cext_posix -from ._common import AF_INET6 from ._common import conn_tmap +from ._common import conn_to_ntuple from ._common import FREEBSD from ._common import memoize from ._common import memoize_when_activated from ._common import NETBSD from ._common import OPENBSD -from ._common import sockfam_to_enum -from ._common import socktype_to_enum from ._common import usage_percent +from ._compat import FileNotFoundError +from ._compat import PermissionError +from ._compat import ProcessLookupError from ._compat import which -from ._exceptions import AccessDenied -from ._exceptions import NoSuchProcess -from ._exceptions import ZombieProcess + __extra__all__ = [] @@ -136,6 +135,13 @@ kinfo_proc_map = dict( name=24, ) +# These objects get set on "import psutil" from the __init__.py +# file, see: https://github.com/giampaolo/psutil/issues/1402 +NoSuchProcess = None +ZombieProcess = None +AccessDenied = None +TimeoutExpired = None + # ===================================================================== # --- named tuples @@ -394,22 +400,8 @@ def net_connections(kind): fd, fam, type, laddr, raddr, status, pid = item # TODO: apply filter at C level if fam in families and type in types: - try: - status = TCP_STATUSES[status] - except KeyError: - # XXX: Not sure why this happens. I saw this occurring - # with IPv6 sockets opened by 'vim'. Those sockets - # have a very short lifetime so maybe the kernel - # can't initialize their status? - status = TCP_STATUSES[cext.PSUTIL_CONN_NONE] - if fam in (AF_INET, AF_INET6): - if laddr: - laddr = _common.addr(*laddr) - if raddr: - raddr = _common.addr(*raddr) - fam = sockfam_to_enum(fam) - type = socktype_to_enum(type) - nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid) + nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status, + TCP_STATUSES, pid) ret.add(nt) return list(ret) @@ -437,6 +429,47 @@ if FREEBSD: secsleft = minsleft * 60 return _common.sbattery(percent, secsleft, power_plugged) + def sensors_temperatures(): + "Return CPU cores temperatures if available, else an empty dict." + ret = defaultdict(list) + num_cpus = cpu_count_logical() + for cpu in range(num_cpus): + try: + current, high = cext.sensors_cpu_temperature(cpu) + if high <= 0: + high = None + name = "Core %s" % cpu + ret["coretemp"].append( + _common.shwtemp(name, current, high, high)) + except NotImplementedError: + pass + + return ret + + def cpu_freq(): + """Return frequency metrics for CPUs. As of Dec 2018 only + CPU 0 appears to be supported by FreeBSD and all other cores + match the frequency of CPU 0. + """ + ret = [] + num_cpus = cpu_count_logical() + for cpu in range(num_cpus): + try: + current, available_freq = cext.cpu_frequency(cpu) + except NotImplementedError: + continue + if available_freq: + try: + min_freq = int(available_freq.split(" ")[-1].split("/")[0]) + except(IndexError, ValueError): + min_freq = None + try: + max_freq = int(available_freq.split(" ")[0].split("/")[0]) + except(IndexError, ValueError): + max_freq = None + ret.append(_common.scpufreq(current, min_freq, max_freq)) + return ret + # ===================================================================== # --- other system functions @@ -505,6 +538,14 @@ else: pid_exists = _psposix.pid_exists +def is_zombie(pid): + try: + st = cext.proc_oneshot_info(pid)[kinfo_proc_map['status']] + return st == cext.SZOMB + except Exception: + return False + + def wrap_exceptions(fun): """Decorator which translates bare OSError exceptions into NoSuchProcess and AccessDenied. @@ -513,19 +554,19 @@ def wrap_exceptions(fun): def wrapper(self, *args, **kwargs): try: return fun(self, *args, **kwargs) - except OSError as err: + except ProcessLookupError: + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) + else: + raise ZombieProcess(self.pid, self._name, self._ppid) + except PermissionError: + raise AccessDenied(self.pid, self._name) + except OSError: if self.pid == 0: if 0 in pids(): raise AccessDenied(self.pid, self._name) else: raise - if err.errno == errno.ESRCH: - if not pid_exists(self.pid): - raise NoSuchProcess(self.pid, self._name) - else: - raise ZombieProcess(self.pid, self._name, self._ppid) - if err.errno in (errno.EPERM, errno.EACCES): - raise AccessDenied(self.pid, self._name) raise return wrapper @@ -535,30 +576,35 @@ def wrap_exceptions_procfs(inst): """Same as above, for routines relying on reading /proc fs.""" try: yield - except EnvironmentError as err: + except (ProcessLookupError, FileNotFoundError): # ENOENT (no such file or directory) gets raised on open(). # ESRCH (no such process) can get raised on read() if # process is gone in meantime. - if err.errno in (errno.ENOENT, errno.ESRCH): - if not pid_exists(inst.pid): - raise NoSuchProcess(inst.pid, inst._name) - else: - raise ZombieProcess(inst.pid, inst._name, inst._ppid) - if err.errno in (errno.EPERM, errno.EACCES): - raise AccessDenied(inst.pid, inst._name) - raise + if not pid_exists(inst.pid): + raise NoSuchProcess(inst.pid, inst._name) + else: + raise ZombieProcess(inst.pid, inst._name, inst._ppid) + except PermissionError: + raise AccessDenied(inst.pid, inst._name) class Process(object): """Wrapper class around underlying C implementation.""" - __slots__ = ["pid", "_name", "_ppid"] + __slots__ = ["pid", "_name", "_ppid", "_cache"] def __init__(self, pid): self.pid = pid self._name = None self._ppid = None + def _assert_alive(self): + """Raise NSP if the process disappeared on us.""" + # For those C function who do not raise NSP, possibly returning + # incorrect or incomplete result. + cext.proc_name(self.pid) + + @wrap_exceptions @memoize_when_activated def oneshot(self): """Retrieves multiple process info in one shot as a raw tuple.""" @@ -567,10 +613,10 @@ class Process(object): return ret def oneshot_enter(self): - self.oneshot.cache_activate() + self.oneshot.cache_activate(self) def oneshot_exit(self): - self.oneshot.cache_deactivate() + self.oneshot.cache_deactivate(self) @wrap_exceptions def name(self): @@ -612,10 +658,14 @@ class Process(object): return cext.proc_cmdline(self.pid) except OSError as err: if err.errno == errno.EINVAL: - if not pid_exists(self.pid): - raise NoSuchProcess(self.pid, self._name) - else: + if is_zombie(self.pid): raise ZombieProcess(self.pid, self._name, self._ppid) + elif not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name, self._ppid) + else: + # XXX: this happens with unicode tests. It means the C + # routine is unable to decode invalid unicode chars. + return [] else: raise else: @@ -705,10 +755,7 @@ class Process(object): ntuple = _common.pthread(thread_id, utime, stime) retlist.append(ntuple) if OPENBSD: - # On OpenBSD the underlying C function does not raise NSP - # in case the process is gone (and the returned list may - # incomplete). - self.name() # raise NSP if the process disappeared on us + self._assert_alive() return retlist @wrap_exceptions @@ -719,29 +766,16 @@ class Process(object): if NETBSD: families, types = conn_tmap[kind] - ret = set() + ret = [] rawlist = cext.net_connections(self.pid) for item in rawlist: fd, fam, type, laddr, raddr, status, pid = item assert pid == self.pid if fam in families and type in types: - try: - status = TCP_STATUSES[status] - except KeyError: - status = TCP_STATUSES[cext.PSUTIL_CONN_NONE] - if fam in (AF_INET, AF_INET6): - if laddr: - laddr = _common.addr(*laddr) - if raddr: - raddr = _common.addr(*raddr) - fam = sockfam_to_enum(fam) - type = socktype_to_enum(type) - nt = _common.pconn(fd, fam, type, laddr, raddr, status) - ret.add(nt) - # On NetBSD the underlying C function does not raise NSP - # in case the process is gone (and the returned list may - # incomplete). - self.name() # raise NSP if the process disappeared on us + nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status, + TCP_STATUSES) + ret.append(nt) + self._assert_alive() return list(ret) families, types = conn_tmap[kind] @@ -749,21 +783,13 @@ class Process(object): ret = [] for item in rawlist: fd, fam, type, laddr, raddr, status = item - if fam in (AF_INET, AF_INET6): - if laddr: - laddr = _common.addr(*laddr) - if raddr: - raddr = _common.addr(*raddr) - fam = sockfam_to_enum(fam) - type = socktype_to_enum(type) - status = TCP_STATUSES[status] - nt = _common.pconn(fd, fam, type, laddr, raddr, status) + nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status, + TCP_STATUSES) ret.append(nt) + if OPENBSD: - # On OpenBSD the underlying C function does not raise NSP - # in case the process is gone (and the returned list may - # incomplete). - self.name() # raise NSP if the process disappeared on us + self._assert_alive() + return ret @wrap_exceptions @@ -800,10 +826,7 @@ class Process(object): # it into None if OPENBSD and self.pid == 0: return None # ...else it would raise EINVAL - elif NETBSD: - with wrap_exceptions_procfs(self): - return os.readlink("/proc/%s/cwd" % self.pid) - elif HAS_PROC_OPEN_FILES: + elif NETBSD or HAS_PROC_OPEN_FILES: # FreeBSD < 8 does not support functions based on # kinfo_getfile() and kinfo_getvmmap() return cext.proc_cwd(self.pid) or None @@ -839,9 +862,7 @@ class Process(object): """Return the number of file descriptors opened by this process.""" ret = cext.proc_num_fds(self.pid) if NETBSD: - # On NetBSD the underlying C function does not raise NSP - # in case the process is gone. - self.name() # raise NSP if the process disappeared on us + self._assert_alive() return ret else: num_fds = _not_implemented diff --git a/server/www/packages/packages-linux/x64/psutil/_pslinux.py b/server/www/packages/packages-linux/x64/psutil/_pslinux.py index b775d39..d29ccc8 100644 --- a/server/www/packages/packages-linux/x64/psutil/_pslinux.py +++ b/server/www/packages/packages-linux/x64/psutil/_pslinux.py @@ -25,8 +25,8 @@ from . import _common from . import _psposix from . import _psutil_linux as cext from . import _psutil_posix as cext_posix -from ._common import ENCODING -from ._common import ENCODING_ERRS +from ._common import decode +from ._common import get_procfs_path from ._common import isfile_strict from ._common import memoize from ._common import memoize_when_activated @@ -41,11 +41,10 @@ from ._common import supports_ipv6 from ._common import usage_percent from ._compat import b from ._compat import basestring -from ._compat import long +from ._compat import FileNotFoundError +from ._compat import PermissionError +from ._compat import ProcessLookupError from ._compat import PY3 -from ._exceptions import AccessDenied -from ._exceptions import NoSuchProcess -from ._exceptions import ZombieProcess if sys.version_info >= (3, 4): import enum @@ -74,6 +73,7 @@ POWER_SUPPLY_PATH = "/sys/class/power_supply" HAS_SMAPS = os.path.exists('/proc/%s/smaps' % os.getpid()) HAS_PRLIMIT = hasattr(cext, "linux_prlimit") HAS_PROC_IO_PRIORITY = hasattr(cext, "proc_ioprio_get") +HAS_CPU_AFFINITY = hasattr(cext, "proc_cpu_affinity_get") _DEFAULT = object() # RLIMIT_* constants, not guaranteed to be present on all kernels @@ -161,6 +161,13 @@ TCP_STATUSES = { "0B": _common.CONN_CLOSING } +# These objects get set on "import psutil" from the __init__.py +# file, see: https://github.com/giampaolo/psutil/issues/1402 +NoSuchProcess = None +ZombieProcess = None +AccessDenied = None +TimeoutExpired = None + # ===================================================================== # --- named tuples @@ -197,6 +204,10 @@ pmmap_ext = namedtuple( pio = namedtuple('pio', ['read_count', 'write_count', 'read_bytes', 'write_bytes', 'read_chars', 'write_chars']) +# psutil.Process.cpu_times() +pcputimes = namedtuple('pcputimes', + ['user', 'system', 'children_user', 'children_system', + 'iowait']) # ===================================================================== @@ -204,19 +215,6 @@ pio = namedtuple('pio', ['read_count', 'write_count', # ===================================================================== -if PY3: - def decode(s): - return s.decode(encoding=ENCODING, errors=ENCODING_ERRS) -else: - def decode(s): - return s - - -def get_procfs_path(): - """Return updated psutil.PROCFS_PATH constant.""" - return sys.modules['psutil'].PROCFS_PATH - - def readlink(path): """Wrapper around os.readlink().""" assert isinstance(path, basestring), path @@ -623,6 +621,17 @@ def cpu_count_logical(): def cpu_count_physical(): """Return the number of physical cores in the system.""" + # Method #1 + core_ids = set() + for path in glob.glob( + "/sys/devices/system/cpu/cpu[0-9]*/topology/core_id"): + with open_binary(path) as f: + core_ids.add(int(f.read())) + result = len(core_ids) + if result != 0: + return result + + # Method #2 mapping = {} current_info = {} with open_binary('%s/cpuinfo' % get_procfs_path()) as f: @@ -642,8 +651,8 @@ def cpu_count_physical(): key, value = line.split(b'\t:', 1) current_info[key] = int(value) - # mimic os.cpu_count() - return sum(mapping.values()) or None + result = sum(mapping.values()) + return result or None # mimic os.cpu_count() def cpu_stats(): @@ -667,30 +676,26 @@ def cpu_stats(): ctx_switches, interrupts, soft_interrupts, syscalls) -if os.path.exists("/sys/devices/system/cpu/cpufreq") or \ +if os.path.exists("/sys/devices/system/cpu/cpufreq/policy0") or \ os.path.exists("/sys/devices/system/cpu/cpu0/cpufreq"): def cpu_freq(): """Return frequency metrics for all CPUs. Contrarily to other OSes, Linux updates these values in real-time. """ - # scaling_* files seem preferable to cpuinfo_*, see: - # http://unix.stackexchange.com/a/87537/168884 - ret = [] - ls = glob.glob("/sys/devices/system/cpu/cpufreq/policy*") - if ls: - # Sort the list so that '10' comes after '2'. This should - # ensure the CPU order is consistent with other CPU functions - # having a 'percpu' argument and returning results for multiple - # CPUs (cpu_times(), cpu_percent(), cpu_times_percent()). - ls.sort(key=lambda x: int(os.path.basename(x)[6:])) - else: - # https://github.com/giampaolo/psutil/issues/981 - ls = glob.glob("/sys/devices/system/cpu/cpu[0-9]*/cpufreq") - ls.sort(key=lambda x: int(re.search('[0-9]+', x).group(0))) + def get_path(num): + for p in ("/sys/devices/system/cpu/cpufreq/policy%s" % num, + "/sys/devices/system/cpu/cpu%s/cpufreq" % num): + if os.path.exists(p): + return p - pjoin = os.path.join - for path in ls: + ret = [] + for n in range(cpu_count_logical()): + path = get_path(n) + if not path: + continue + + pjoin = os.path.join curr = cat(pjoin(path, "scaling_cur_freq"), fallback=None) if curr is None: # Likely an old RedHat, see: @@ -715,9 +720,15 @@ elif os.path.exists("/proc/cpuinfo"): for line in f: if line.lower().startswith(b'cpu mhz'): key, value = line.split(b'\t:', 1) - ret.append(_common.scpufreq(float(value), None, None)) + ret.append(_common.scpufreq(float(value), 0., 0.)) return ret +else: + def cpu_freq(): + """Dummy implementation when none of the above files are present. + """ + return [] + # ===================================================================== # --- network @@ -769,17 +780,16 @@ class Connections: for fd in os.listdir("%s/%s/fd" % (self._procfs_path, pid)): try: inode = readlink("%s/%s/fd/%s" % (self._procfs_path, pid, fd)) - except OSError as err: + except (FileNotFoundError, ProcessLookupError): # ENOENT == file which is gone in the meantime; # os.stat('/proc/%s' % self.pid) will be done later # to force NSP (if it's the case) - if err.errno in (errno.ENOENT, errno.ESRCH): - continue - elif err.errno == errno.EINVAL: + continue + except OSError as err: + if err.errno == errno.EINVAL: # not a link continue - else: - raise + raise else: if inode.startswith('socket:['): # the process is using a socket @@ -792,7 +802,7 @@ class Connections: for pid in pids(): try: inodes.update(self.get_proc_inodes(pid)) - except OSError as err: + except (FileNotFoundError, ProcessLookupError, PermissionError): # os.listdir() is gonna raise a lot of access denied # exceptions in case of unprivileged user; that's fine # as we'll just end up returning a connection with PID @@ -800,9 +810,7 @@ class Connections: # Both netstat -an and lsof does the same so it's # unlikely we can do any better. # ENOENT just means a PID disappeared on us. - if err.errno not in ( - errno.ENOENT, errno.ESRCH, errno.EPERM, errno.EACCES): - raise + continue return inodes @staticmethod @@ -930,7 +938,7 @@ class Connections: path = tokens[-1] else: path = "" - type_ = int(type_) + type_ = _common.socktype_to_enum(int(type_)) # XXX: determining the remote endpoint of a # UNIX socket on Linux is not possible, see: # https://serverfault.com/questions/252723/ @@ -1155,13 +1163,13 @@ def disk_partitions(all=False): fstypes.add("zfs") # See: https://github.com/giampaolo/psutil/issues/1307 - if procfs_path == "/proc": - mtab_path = os.path.realpath("/etc/mtab") + if procfs_path == "/proc" and os.path.isfile('/etc/mtab'): + mounts_path = os.path.realpath("/etc/mtab") else: - mtab_path = os.path.realpath("%s/self/mounts" % procfs_path) + mounts_path = os.path.realpath("%s/self/mounts" % procfs_path) retlist = [] - partitions = cext.disk_partitions(mtab_path) + partitions = cext.disk_partitions(mounts_path) for partition in partitions: device, mountpoint, fstype, opts = partition if device == 'none': @@ -1487,11 +1495,10 @@ def ppid_map(): try: with open_binary("%s/%s/stat" % (procfs_path, pid)) as f: data = f.read() - except EnvironmentError as err: + except (FileNotFoundError, ProcessLookupError): # Note: we should be able to access /stat for all processes # aka it's unlikely we'll bump into EPERM, which is good. - if err.errno not in (errno.ENOENT, errno.ESRCH): - raise + pass else: rpar = data.rfind(b')') dset = data[rpar + 2:].split() @@ -1508,16 +1515,12 @@ def wrap_exceptions(fun): def wrapper(self, *args, **kwargs): try: return fun(self, *args, **kwargs) - except EnvironmentError as err: - if err.errno in (errno.EPERM, errno.EACCES): - raise AccessDenied(self.pid, self._name) - # ESRCH (no such process) can be raised on read() if - # process is gone in the meantime. - if err.errno == errno.ESRCH: - raise NoSuchProcess(self.pid, self._name) - # ENOENT (no such file or directory) can be raised on open(). - if err.errno == errno.ENOENT and not os.path.exists("%s/%s" % ( - self._procfs_path, self.pid)): + except PermissionError: + raise AccessDenied(self.pid, self._name) + except ProcessLookupError: + raise NoSuchProcess(self.pid, self._name) + except FileNotFoundError: + if not os.path.exists("%s/%s" % (self._procfs_path, self.pid)): raise NoSuchProcess(self.pid, self._name) # Note: zombies will keep existing under /proc until they're # gone so there's no way to distinguish them in here. @@ -1528,7 +1531,7 @@ def wrap_exceptions(fun): class Process(object): """Linux process implementation.""" - __slots__ = ["pid", "_name", "_ppid", "_procfs_path"] + __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"] def __init__(self, pid): self.pid = pid @@ -1536,13 +1539,20 @@ class Process(object): self._ppid = None self._procfs_path = get_procfs_path() + def _assert_alive(self): + """Raise NSP if the process disappeared on us.""" + # For those C function who do not raise NSP, possibly returning + # incorrect or incomplete result. + os.stat('%s/%s' % (self._procfs_path, self.pid)) + + @wrap_exceptions @memoize_when_activated def _parse_stat_file(self): - """Parse /proc/{pid}/stat file. Return a list of fields where - process name is in position 0. + """Parse /proc/{pid}/stat file and return a dict with various + process info. Using "man proc" as a reference: where "man proc" refers to - position N, always substract 2 (e.g starttime pos 22 in - 'man proc' == pos 20 in the list returned here). + position N always substract 3 (e.g ppid position 4 in + 'man proc' == position 1 in here). The return value is cached in case oneshot() ctx manager is in use. """ @@ -1553,9 +1563,24 @@ class Process(object): # the first occurrence of "(" and the last occurence of ")". rpar = data.rfind(b')') name = data[data.find(b'(') + 1:rpar] - others = data[rpar + 2:].split() - return [name] + others + fields = data[rpar + 2:].split() + ret = {} + ret['name'] = name + ret['status'] = fields[0] + ret['ppid'] = fields[1] + ret['ttynr'] = fields[4] + ret['utime'] = fields[11] + ret['stime'] = fields[12] + ret['children_utime'] = fields[13] + ret['children_stime'] = fields[14] + ret['create_time'] = fields[19] + ret['cpu_num'] = fields[36] + ret['blkio_ticks'] = fields[39] # aka 'delayacct_blkio_ticks' + + return ret + + @wrap_exceptions @memoize_when_activated def _read_status_file(self): """Read /proc/{pid}/stat file and return its content. @@ -1565,6 +1590,7 @@ class Process(object): with open_binary("%s/%s/status" % (self._procfs_path, self.pid)) as f: return f.read() + @wrap_exceptions @memoize_when_activated def _read_smaps_file(self): with open_binary("%s/%s/smaps" % (self._procfs_path, self.pid), @@ -1572,18 +1598,18 @@ class Process(object): return f.read().strip() def oneshot_enter(self): - self._parse_stat_file.cache_activate() - self._read_status_file.cache_activate() - self._read_smaps_file.cache_activate() + self._parse_stat_file.cache_activate(self) + self._read_status_file.cache_activate(self) + self._read_smaps_file.cache_activate(self) def oneshot_exit(self): - self._parse_stat_file.cache_deactivate() - self._read_status_file.cache_deactivate() - self._read_smaps_file.cache_deactivate() + self._parse_stat_file.cache_deactivate(self) + self._read_status_file.cache_deactivate(self) + self._read_smaps_file.cache_deactivate(self) @wrap_exceptions def name(self): - name = self._parse_stat_file()[0] + name = self._parse_stat_file()['name'] if PY3: name = decode(name) # XXX - gets changed later and probably needs refactoring @@ -1592,21 +1618,19 @@ class Process(object): def exe(self): try: return readlink("%s/%s/exe" % (self._procfs_path, self.pid)) - except OSError as err: - if err.errno in (errno.ENOENT, errno.ESRCH): - # no such file error; might be raised also if the - # path actually exists for system processes with - # low pids (about 0-20) - if os.path.lexists("%s/%s" % (self._procfs_path, self.pid)): - return "" + except (FileNotFoundError, ProcessLookupError): + # no such file error; might be raised also if the + # path actually exists for system processes with + # low pids (about 0-20) + if os.path.lexists("%s/%s" % (self._procfs_path, self.pid)): + return "" + else: + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) else: - if not pid_exists(self.pid): - raise NoSuchProcess(self.pid, self._name) - else: - raise ZombieProcess(self.pid, self._name, self._ppid) - if err.errno in (errno.EPERM, errno.EACCES): - raise AccessDenied(self.pid, self._name) - raise + raise ZombieProcess(self.pid, self._name, self._ppid) + except PermissionError: + raise AccessDenied(self.pid, self._name) @wrap_exceptions def cmdline(self): @@ -1625,7 +1649,7 @@ class Process(object): sep = '\x00' if data.endswith('\x00') else ' ' if data.endswith(sep): data = data[:-1] - return [x for x in data.split(sep)] + return data.split(sep) @wrap_exceptions def environ(self): @@ -1635,13 +1659,14 @@ class Process(object): @wrap_exceptions def terminal(self): - tty_nr = int(self._parse_stat_file()[5]) + tty_nr = int(self._parse_stat_file()['ttynr']) tmap = _psposix.get_terminal_map() try: return tmap[tty_nr] except KeyError: return None + # May not be available on old kernels. if os.path.exists('/proc/%s/io' % os.getpid()): @wrap_exceptions def io_counters(self): @@ -1673,24 +1698,21 @@ class Process(object): except KeyError as err: raise ValueError("%r field was not found in %s; found fields " "are %r" % (err[0], fname, fields)) - else: - def io_counters(self): - raise NotImplementedError("couldn't find /proc/%s/io (kernel " - "too old?)" % self.pid) @wrap_exceptions def cpu_times(self): values = self._parse_stat_file() - utime = float(values[12]) / CLOCK_TICKS - stime = float(values[13]) / CLOCK_TICKS - children_utime = float(values[14]) / CLOCK_TICKS - children_stime = float(values[15]) / CLOCK_TICKS - return _common.pcputimes(utime, stime, children_utime, children_stime) + utime = float(values['utime']) / CLOCK_TICKS + stime = float(values['stime']) / CLOCK_TICKS + children_utime = float(values['children_utime']) / CLOCK_TICKS + children_stime = float(values['children_stime']) / CLOCK_TICKS + iowait = float(values['blkio_ticks']) / CLOCK_TICKS + return pcputimes(utime, stime, children_utime, children_stime, iowait) @wrap_exceptions def cpu_num(self): """What CPU the process is on.""" - return int(self._parse_stat_file()[37]) + return int(self._parse_stat_file()['cpu_num']) @wrap_exceptions def wait(self, timeout=None): @@ -1698,14 +1720,14 @@ class Process(object): @wrap_exceptions def create_time(self): - values = self._parse_stat_file() + ctime = float(self._parse_stat_file()['create_time']) # According to documentation, starttime is in field 21 and the # unit is jiffies (clock ticks). # We first divide it for clock ticks and then add uptime returning # seconds since the epoch, in UTC. # Also use cached value if available. bt = BOOT_TIME or boot_time() - return (float(values[20]) / CLOCK_TICKS) + bt + return (ctime / CLOCK_TICKS) + bt @wrap_exceptions def memory_info(self): @@ -1767,6 +1789,9 @@ class Process(object): """Return process's mapped memory regions as a list of named tuples. Fields are explained in 'man proc'; here is an updated (Apr 2012) version: http://goo.gl/fmebo + + /proc/{PID}/smaps does not exist on kernels < 2.6.14 or if + CONFIG_MMU kernel configuration option is not enabled. """ def get_blocks(lines, current_block): data = {} @@ -1827,25 +1852,16 @@ class Process(object): )) return ls - else: # pragma: no cover - def memory_maps(self): - raise NotImplementedError( - "/proc/%s/smaps does not exist on kernels < 2.6.14 or " - "if CONFIG_MMU kernel configuration option is not " - "enabled." % self.pid) - @wrap_exceptions def cwd(self): try: return readlink("%s/%s/cwd" % (self._procfs_path, self.pid)) - except OSError as err: + except (FileNotFoundError, ProcessLookupError): # https://github.com/giampaolo/psutil/issues/986 - if err.errno in (errno.ENOENT, errno.ESRCH): - if not pid_exists(self.pid): - raise NoSuchProcess(self.pid, self._name) - else: - raise ZombieProcess(self.pid, self._name, self._ppid) - raise + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) + else: + raise ZombieProcess(self.pid, self._name, self._ppid) @wrap_exceptions def num_ctx_switches(self, @@ -1881,13 +1897,11 @@ class Process(object): try: with open_binary(fname) as f: st = f.read().strip() - except IOError as err: - if err.errno == errno.ENOENT: - # no such file or directory; it means thread - # disappeared on us - hit_enoent = True - continue - raise + except FileNotFoundError: + # no such file or directory; it means thread + # disappeared on us + hit_enoent = True + continue # ignore the first two values ("pid (exe)") st = st[st.find(b')') + 2:] values = st.split(b' ') @@ -1896,8 +1910,7 @@ class Process(object): ntuple = _common.pthread(int(thread_id), utime, stime) retlist.append(ntuple) if hit_enoent: - # raise NSP if the process disappeared on us - os.stat('%s/%s' % (self._procfs_path, self.pid)) + self._assert_alive() return retlist @wrap_exceptions @@ -1913,38 +1926,41 @@ class Process(object): def nice_set(self, value): return cext_posix.setpriority(self.pid, value) - @wrap_exceptions - def cpu_affinity_get(self): - return cext.proc_cpu_affinity_get(self.pid) + # starting from CentOS 6. + if HAS_CPU_AFFINITY: - def _get_eligible_cpus( - self, _re=re.compile(br"Cpus_allowed_list:\t(\d+)-(\d+)")): - # See: https://github.com/giampaolo/psutil/issues/956 - data = self._read_status_file() - match = _re.findall(data) - if match: - return list(range(int(match[0][0]), int(match[0][1]) + 1)) - else: - return list(range(len(per_cpu_times()))) + @wrap_exceptions + def cpu_affinity_get(self): + return cext.proc_cpu_affinity_get(self.pid) - @wrap_exceptions - def cpu_affinity_set(self, cpus): - try: - cext.proc_cpu_affinity_set(self.pid, cpus) - except (OSError, ValueError) as err: - if isinstance(err, ValueError) or err.errno == errno.EINVAL: - eligible_cpus = self._get_eligible_cpus() - all_cpus = tuple(range(len(per_cpu_times()))) - for cpu in cpus: - if cpu not in all_cpus: - raise ValueError( - "invalid CPU number %r; choose between %s" % ( - cpu, eligible_cpus)) - if cpu not in eligible_cpus: - raise ValueError( - "CPU number %r is not eligible; choose " - "between %s" % (cpu, eligible_cpus)) - raise + def _get_eligible_cpus( + self, _re=re.compile(br"Cpus_allowed_list:\t(\d+)-(\d+)")): + # See: https://github.com/giampaolo/psutil/issues/956 + data = self._read_status_file() + match = _re.findall(data) + if match: + return list(range(int(match[0][0]), int(match[0][1]) + 1)) + else: + return list(range(len(per_cpu_times()))) + + @wrap_exceptions + def cpu_affinity_set(self, cpus): + try: + cext.proc_cpu_affinity_set(self.pid, cpus) + except (OSError, ValueError) as err: + if isinstance(err, ValueError) or err.errno == errno.EINVAL: + eligible_cpus = self._get_eligible_cpus() + all_cpus = tuple(range(len(per_cpu_times()))) + for cpu in cpus: + if cpu not in all_cpus: + raise ValueError( + "invalid CPU number %r; choose between %s" % ( + cpu, eligible_cpus)) + if cpu not in eligible_cpus: + raise ValueError( + "CPU number %r is not eligible; choose " + "between %s" % (cpu, eligible_cpus)) + raise # only starting from kernel 2.6.13 if HAS_PROC_IO_PRIORITY: @@ -1958,35 +1974,12 @@ class Process(object): @wrap_exceptions def ionice_set(self, ioclass, value): - if value is not None: - if not PY3 and not isinstance(value, (int, long)): - msg = "value argument is not an integer (gor %r)" % value - raise TypeError(msg) - if not 0 <= value <= 7: - raise ValueError( - "value argument range expected is between 0 and 7") - - if ioclass in (IOPRIO_CLASS_NONE, None): - if value: - msg = "can't specify value with IOPRIO_CLASS_NONE " \ - "(got %r)" % value - raise ValueError(msg) - ioclass = IOPRIO_CLASS_NONE + if value is None: value = 0 - elif ioclass == IOPRIO_CLASS_IDLE: - if value: - msg = "can't specify value with IOPRIO_CLASS_IDLE " \ - "(got %r)" % value - raise ValueError(msg) - value = 0 - elif ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE): - if value is None: - # TODO: add comment explaining why this is 4 (?) - value = 4 - else: - # otherwise we would get OSError(EVINAL) - raise ValueError("invalid ioclass argument %r" % ioclass) - + if value and ioclass in (IOPRIO_CLASS_IDLE, IOPRIO_CLASS_NONE): + raise ValueError("%r ioclass accepts no value" % ioclass) + if value < 0 or value > 7: + raise ValueError("value not in 0-7 range") return cext.proc_ioprio_set(self.pid, ioclass, value) if HAS_PRLIMIT: @@ -2020,7 +2013,7 @@ class Process(object): @wrap_exceptions def status(self): - letter = self._parse_stat_file()[1] + letter = self._parse_stat_file()['status'] if PY3: letter = letter.decode() # XXX is '?' legit? (we're not supposed to return it anyway) @@ -2035,16 +2028,15 @@ class Process(object): file = "%s/%s/fd/%s" % (self._procfs_path, self.pid, fd) try: path = readlink(file) - except OSError as err: + except (FileNotFoundError, ProcessLookupError): # ENOENT == file which is gone in the meantime - if err.errno in (errno.ENOENT, errno.ESRCH): - hit_enoent = True - continue - elif err.errno == errno.EINVAL: + hit_enoent = True + continue + except OSError as err: + if err.errno == errno.EINVAL: # not a link continue - else: - raise + raise else: # If path is not an absolute there's no way to tell # whether it's a regular file or not, so we skip it. @@ -2058,29 +2050,23 @@ class Process(object): with open_binary(file) as f: pos = int(f.readline().split()[1]) flags = int(f.readline().split()[1], 8) - except IOError as err: - if err.errno == errno.ENOENT: - # fd gone in the meantime; does not - # necessarily mean the process disappeared - # on us. - hit_enoent = True - else: - raise + except FileNotFoundError: + # fd gone in the meantime; process may + # still be alive + hit_enoent = True else: mode = file_flags_to_mode(flags) ntuple = popenfile( path, int(fd), int(pos), mode, flags) retlist.append(ntuple) if hit_enoent: - # raise NSP if the process disappeared on us - os.stat('%s/%s' % (self._procfs_path, self.pid)) + self._assert_alive() return retlist @wrap_exceptions def connections(self, kind='inet'): ret = _connections.retrieve(kind, self.pid) - # raise NSP if the process disappeared on us - os.stat('%s/%s' % (self._procfs_path, self.pid)) + self._assert_alive() return ret @wrap_exceptions @@ -2089,7 +2075,7 @@ class Process(object): @wrap_exceptions def ppid(self): - return int(self._parse_stat_file()[2]) + return int(self._parse_stat_file()['ppid']) @wrap_exceptions def uids(self, _uids_re=re.compile(br'Uid:\t(\d+)\t(\d+)\t(\d+)')): diff --git a/server/www/packages/packages-linux/x64/psutil/_psosx.py b/server/www/packages/packages-linux/x64/psutil/_psosx.py index 94e22bc..7f28447 100644 --- a/server/www/packages/packages-linux/x64/psutil/_psosx.py +++ b/server/www/packages/packages-linux/x64/psutil/_psosx.py @@ -8,24 +8,20 @@ import contextlib import errno import functools import os -from socket import AF_INET from collections import namedtuple from . import _common from . import _psposix from . import _psutil_osx as cext from . import _psutil_posix as cext_posix -from ._common import AF_INET6 from ._common import conn_tmap +from ._common import conn_to_ntuple from ._common import isfile_strict from ._common import memoize_when_activated from ._common import parse_environ_block -from ._common import sockfam_to_enum -from ._common import socktype_to_enum +from ._compat import PermissionError +from ._compat import ProcessLookupError from ._common import usage_percent -from ._exceptions import AccessDenied -from ._exceptions import NoSuchProcess -from ._exceptions import ZombieProcess __extra__all__ = [] @@ -87,6 +83,13 @@ pidtaskinfo_map = dict( volctxsw=7, ) +# These objects get set on "import psutil" from the __init__.py +# file, see: https://github.com/giampaolo/psutil/issues/1402 +NoSuchProcess = None +ZombieProcess = None +AccessDenied = None +TimeoutExpired = None + # ===================================================================== # --- named tuples @@ -103,13 +106,6 @@ svmem = namedtuple( pmem = namedtuple('pmem', ['rss', 'vms', 'pfaults', 'pageins']) # psutil.Process.memory_full_info() pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', )) -# psutil.Process.memory_maps(grouped=True) -pmmap_grouped = namedtuple( - 'pmmap_grouped', - 'path rss private swapped dirtied ref_count shadow_depth') -# psutil.Process.memory_maps(grouped=False) -pmmap_ext = namedtuple( - 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields)) # ===================================================================== @@ -340,12 +336,10 @@ def wrap_exceptions(fun): def wrapper(self, *args, **kwargs): try: return fun(self, *args, **kwargs) - except OSError as err: - if err.errno == errno.ESRCH: - raise NoSuchProcess(self.pid, self._name) - if err.errno in (errno.EPERM, errno.EACCES): - raise AccessDenied(self.pid, self._name) - raise + except ProcessLookupError: + raise NoSuchProcess(self.pid, self._name) + except PermissionError: + raise AccessDenied(self.pid, self._name) except cext.ZombieProcessError: raise ZombieProcess(self.pid, self._name, self._ppid) return wrapper @@ -380,13 +374,14 @@ def catch_zombie(proc): class Process(object): """Wrapper class around underlying C implementation.""" - __slots__ = ["pid", "_name", "_ppid"] + __slots__ = ["pid", "_name", "_ppid", "_cache"] def __init__(self, pid): self.pid = pid self._name = None self._ppid = None + @wrap_exceptions @memoize_when_activated def _get_kinfo_proc(self): # Note: should work with all PIDs without permission issues. @@ -394,6 +389,7 @@ class Process(object): assert len(ret) == len(kinfo_proc_map) return ret + @wrap_exceptions @memoize_when_activated def _get_pidtaskinfo(self): # Note: should work for PIDs owned by user only. @@ -403,12 +399,12 @@ class Process(object): return ret def oneshot_enter(self): - self._get_kinfo_proc.cache_activate() - self._get_pidtaskinfo.cache_activate() + self._get_kinfo_proc.cache_activate(self) + self._get_pidtaskinfo.cache_activate(self) def oneshot_exit(self): - self._get_kinfo_proc.cache_deactivate() - self._get_pidtaskinfo.cache_deactivate() + self._get_kinfo_proc.cache_deactivate(self) + self._get_pidtaskinfo.cache_deactivate(self) @wrap_exceptions def name(self): @@ -530,15 +526,8 @@ class Process(object): ret = [] for item in rawlist: fd, fam, type, laddr, raddr, status = item - status = TCP_STATUSES[status] - fam = sockfam_to_enum(fam) - type = socktype_to_enum(type) - if fam in (AF_INET, AF_INET6): - if laddr: - laddr = _common.addr(*laddr) - if raddr: - raddr = _common.addr(*raddr) - nt = _common.pconn(fd, fam, type, laddr, raddr, status) + nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status, + TCP_STATUSES) ret.append(nt) return ret @@ -577,7 +566,3 @@ class Process(object): ntuple = _common.pthread(thread_id, utime, stime) retlist.append(ntuple) return retlist - - @wrap_exceptions - def memory_maps(self): - return cext.proc_memory_maps(self.pid) diff --git a/server/www/packages/packages-linux/x64/psutil/_psposix.py b/server/www/packages/packages-linux/x64/psutil/_psposix.py index 9c3fac2..2457022 100644 --- a/server/www/packages/packages-linux/x64/psutil/_psposix.py +++ b/server/www/packages/packages-linux/x64/psutil/_psposix.py @@ -4,7 +4,6 @@ """Routines common to all posix systems.""" -import errno import glob import os import sys @@ -13,14 +12,23 @@ import time from ._common import memoize from ._common import sdiskusage from ._common import usage_percent +from ._compat import ChildProcessError +from ._compat import FileNotFoundError +from ._compat import InterruptedError +from ._compat import PermissionError +from ._compat import ProcessLookupError from ._compat import PY3 from ._compat import unicode -from ._exceptions import TimeoutExpired __all__ = ['pid_exists', 'wait_pid', 'disk_usage', 'get_terminal_map'] +# This object gets set on "import psutil" from the __init__.py +# file, see: https://github.com/giampaolo/psutil/issues/1402 +TimeoutExpired = None + + def pid_exists(pid): """Check whether pid exists in the current process table.""" if pid == 0: @@ -32,19 +40,13 @@ def pid_exists(pid): return True try: os.kill(pid, 0) - except OSError as err: - if err.errno == errno.ESRCH: - # ESRCH == No such process - return False - elif err.errno == errno.EPERM: - # EPERM clearly means there's a process to deny access to - return True - else: - # According to "man 2 kill" possible error values are - # (EINVAL, EPERM, ESRCH) therefore we should never get - # here. If we do let's be explicit in considering this - # an error. - raise err + except ProcessLookupError: + return False + except PermissionError: + # EPERM clearly means there's a process to deny access to + return True + # According to "man 2 kill" possible error values are + # (EINVAL, EPERM, ESRCH) else: return True @@ -80,24 +82,20 @@ def wait_pid(pid, timeout=None, proc_name=None): while True: try: retpid, status = waitcall() - except OSError as err: - if err.errno == errno.EINTR: - delay = check_timeout(delay) - continue - elif err.errno == errno.ECHILD: - # This has two meanings: - # - pid is not a child of os.getpid() in which case - # we keep polling until it's gone - # - pid never existed in the first place - # In both cases we'll eventually return None as we - # can't determine its exit status code. - while True: - if pid_exists(pid): - delay = check_timeout(delay) - else: - return - else: - raise + except InterruptedError: + delay = check_timeout(delay) + except ChildProcessError: + # This has two meanings: + # - pid is not a child of os.getpid() in which case + # we keep polling until it's gone + # - pid never existed in the first place + # In both cases we'll eventually return None as we + # can't determine its exit status code. + while True: + if pid_exists(pid): + delay = check_timeout(delay) + else: + return else: if retpid == 0: # WNOHANG was used, pid is still running @@ -176,7 +174,6 @@ def get_terminal_map(): assert name not in ret, name try: ret[os.stat(name).st_rdev] = name - except OSError as err: - if err.errno != errno.ENOENT: - raise + except FileNotFoundError: + pass return ret diff --git a/server/www/packages/packages-linux/x64/psutil/_pssunos.py b/server/www/packages/packages-linux/x64/psutil/_pssunos.py index e2f33a3..2aa2a86 100644 --- a/server/www/packages/packages-linux/x64/psutil/_pssunos.py +++ b/server/www/packages/packages-linux/x64/psutil/_pssunos.py @@ -5,6 +5,7 @@ """Sun OS Solaris platform implementation.""" import errno +import functools import os import socket import subprocess @@ -17,16 +18,17 @@ from . import _psposix from . import _psutil_posix as cext_posix from . import _psutil_sunos as cext from ._common import AF_INET6 +from ._common import get_procfs_path from ._common import isfile_strict from ._common import memoize_when_activated from ._common import sockfam_to_enum from ._common import socktype_to_enum from ._common import usage_percent from ._compat import b +from ._compat import FileNotFoundError +from ._compat import PermissionError +from ._compat import ProcessLookupError from ._compat import PY3 -from ._exceptions import AccessDenied -from ._exceptions import NoSuchProcess -from ._exceptions import ZombieProcess __extra__all__ = ["CONN_IDLE", "CONN_BOUND", "PROCFS_PATH"] @@ -85,6 +87,13 @@ proc_info_map = dict( gid=10, egid=11) +# These objects get set on "import psutil" from the __init__.py +# file, see: https://github.com/giampaolo/psutil/issues/1402 +NoSuchProcess = None +ZombieProcess = None +AccessDenied = None +TimeoutExpired = None + # ===================================================================== # --- named tuples @@ -109,16 +118,6 @@ pmmap_ext = namedtuple( 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields)) -# ===================================================================== -# --- utils -# ===================================================================== - - -def get_procfs_path(): - """Return updated psutil.PROCFS_PATH constant.""" - return sys.modules['psutil'].PROCFS_PATH - - # ===================================================================== # --- memory # ===================================================================== @@ -266,6 +265,7 @@ def net_connections(kind, _pid=-1): continue if type_ not in types: continue + # TODO: refactor and use _common.conn_to_ntuple. if fam in (AF_INET, AF_INET6): if laddr: laddr = _common.addr(*laddr) @@ -341,26 +341,26 @@ def wrap_exceptions(fun): """Call callable into a try/except clause and translate ENOENT, EACCES and EPERM in NoSuchProcess or AccessDenied exceptions. """ - + @functools.wraps(fun) def wrapper(self, *args, **kwargs): try: return fun(self, *args, **kwargs) - except EnvironmentError as err: + except (FileNotFoundError, ProcessLookupError): + # ENOENT (no such file or directory) gets raised on open(). + # ESRCH (no such process) can get raised on read() if + # process is gone in meantime. + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) + else: + raise ZombieProcess(self.pid, self._name, self._ppid) + except PermissionError: + raise AccessDenied(self.pid, self._name) + except OSError: if self.pid == 0: if 0 in pids(): raise AccessDenied(self.pid, self._name) else: raise - # ENOENT (no such file or directory) gets raised on open(). - # ESRCH (no such process) can get raised on read() if - # process is gone in meantime. - if err.errno in (errno.ENOENT, errno.ESRCH): - if not pid_exists(self.pid): - raise NoSuchProcess(self.pid, self._name) - else: - raise ZombieProcess(self.pid, self._name, self._ppid) - if err.errno in (errno.EPERM, errno.EACCES): - raise AccessDenied(self.pid, self._name) raise return wrapper @@ -368,7 +368,7 @@ def wrap_exceptions(fun): class Process(object): """Wrapper class around underlying C implementation.""" - __slots__ = ["pid", "_name", "_ppid", "_procfs_path"] + __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"] def __init__(self, pid): self.pid = pid @@ -376,32 +376,38 @@ class Process(object): self._ppid = None self._procfs_path = get_procfs_path() + def _assert_alive(self): + """Raise NSP if the process disappeared on us.""" + # For those C function who do not raise NSP, possibly returning + # incorrect or incomplete result. + os.stat('%s/%s' % (self._procfs_path, self.pid)) + def oneshot_enter(self): - self._proc_name_and_args.cache_activate() - self._proc_basic_info.cache_activate() - self._proc_cred.cache_activate() + self._proc_name_and_args.cache_activate(self) + self._proc_basic_info.cache_activate(self) + self._proc_cred.cache_activate(self) def oneshot_exit(self): - self._proc_name_and_args.cache_deactivate() - self._proc_basic_info.cache_deactivate() - self._proc_cred.cache_deactivate() + self._proc_name_and_args.cache_deactivate(self) + self._proc_basic_info.cache_deactivate(self) + self._proc_cred.cache_deactivate(self) + @wrap_exceptions @memoize_when_activated def _proc_name_and_args(self): return cext.proc_name_and_args(self.pid, self._procfs_path) + @wrap_exceptions @memoize_when_activated def _proc_basic_info(self): ret = cext.proc_basic_info(self.pid, self._procfs_path) assert len(ret) == len(proc_info_map) return ret + @wrap_exceptions @memoize_when_activated def _proc_cred(self): - @wrap_exceptions - def proc_cred(self): - return cext.proc_cred(self.pid, self._procfs_path) - return proc_cred(self) + return cext.proc_cred(self.pid, self._procfs_path) @wrap_exceptions def name(self): @@ -512,14 +518,11 @@ class Process(object): try: return os.readlink( '%s/%d/path/%d' % (procfs_path, self.pid, x)) - except OSError as err: - if err.errno == errno.ENOENT: - hit_enoent = True - continue - raise + except FileNotFoundError: + hit_enoent = True + continue if hit_enoent: - # raise NSP if the process disappeared on us - os.stat('%s/%s' % (procfs_path, self.pid)) + self._assert_alive() @wrap_exceptions def cwd(self): @@ -530,11 +533,9 @@ class Process(object): procfs_path = self._procfs_path try: return os.readlink("%s/%s/path/cwd" % (procfs_path, self.pid)) - except OSError as err: - if err.errno == errno.ENOENT: - os.stat("%s/%s" % (procfs_path, self.pid)) # raise NSP or AD - return None - raise + except FileNotFoundError: + os.stat("%s/%s" % (procfs_path, self.pid)) # raise NSP or AD + return None @wrap_exceptions def memory_info(self): @@ -581,8 +582,7 @@ class Process(object): nt = _common.pthread(tid, utime, stime) ret.append(nt) if hit_enoent: - # raise NSP if the process disappeared on us - os.stat('%s/%s' % (procfs_path, self.pid)) + self._assert_alive() return ret @wrap_exceptions @@ -596,18 +596,14 @@ class Process(object): if os.path.islink(path): try: file = os.readlink(path) - except OSError as err: - # ENOENT == file which is gone in the meantime - if err.errno == errno.ENOENT: - hit_enoent = True - continue - raise + except FileNotFoundError: + hit_enoent = True + continue else: if isfile_strict(file): retlist.append(_common.popenfile(file, int(fd))) if hit_enoent: - # raise NSP if the process disappeared on us - os.stat('%s/%s' % (procfs_path, self.pid)) + self._assert_alive() return retlist def _get_unix_sockets(self, pid): @@ -707,8 +703,7 @@ class Process(object): raise retlist.append((addr, perm, name, rss, anon, locked)) if hit_enoent: - # raise NSP if the process disappeared on us - os.stat('%s/%s' % (procfs_path, self.pid)) + self._assert_alive() return retlist @wrap_exceptions diff --git a/server/www/packages/packages-linux/x64/psutil/_psutil_linux.cpython-37m-x86_64-linux-gnu.so b/server/www/packages/packages-linux/x64/psutil/_psutil_linux.cpython-37m-x86_64-linux-gnu.so index 26278e2..3ff94fa 100755 Binary files a/server/www/packages/packages-linux/x64/psutil/_psutil_linux.cpython-37m-x86_64-linux-gnu.so and b/server/www/packages/packages-linux/x64/psutil/_psutil_linux.cpython-37m-x86_64-linux-gnu.so differ diff --git a/server/www/packages/packages-linux/x64/psutil/_psutil_posix.cpython-37m-x86_64-linux-gnu.so b/server/www/packages/packages-linux/x64/psutil/_psutil_posix.cpython-37m-x86_64-linux-gnu.so index 8916dd8..6f33c93 100755 Binary files a/server/www/packages/packages-linux/x64/psutil/_psutil_posix.cpython-37m-x86_64-linux-gnu.so and b/server/www/packages/packages-linux/x64/psutil/_psutil_posix.cpython-37m-x86_64-linux-gnu.so differ diff --git a/server/www/packages/packages-linux/x64/psutil/_pswindows.py b/server/www/packages/packages-linux/x64/psutil/_pswindows.py index b938d42..636b0af 100644 --- a/server/www/packages/packages-linux/x64/psutil/_pswindows.py +++ b/server/www/packages/packages-linux/x64/psutil/_pswindows.py @@ -27,29 +27,25 @@ except ImportError as err: # but if we get here it means this this was a wheel (or exe). msg = "this Windows version is too old (< Windows Vista); " msg += "psutil 3.4.2 is the latest version which supports Windows " - msg += "2000, XP and 2003 server; it may be possible that psutil " - msg += "will work if compiled from sources though" + msg += "2000, XP and 2003 server" raise RuntimeError(msg) else: raise from ._common import conn_tmap +from ._common import conn_to_ntuple from ._common import ENCODING from ._common import ENCODING_ERRS from ._common import isfile_strict +from ._common import memoize from ._common import memoize_when_activated from ._common import parse_environ_block -from ._common import sockfam_to_enum -from ._common import socktype_to_enum from ._common import usage_percent from ._compat import long from ._compat import lru_cache from ._compat import PY3 from ._compat import unicode from ._compat import xrange -from ._exceptions import AccessDenied -from ._exceptions import NoSuchProcess -from ._exceptions import TimeoutExpired from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS from ._psutil_windows import HIGH_PRIORITY_CLASS @@ -66,11 +62,14 @@ else: # http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx __extra__all__ = [ "win_service_iter", "win_service_get", + # Process priority "ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS", - "HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS", - "NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS", - "CONN_DELETE_TCB", - "AF_LINK", + "HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS", "NORMAL_PRIORITY_CLASS", + "REALTIME_PRIORITY_CLASS", + # IO priority + "IOPRIO_VERYLOW", "IOPRIO_LOW", "IOPRIO_NORMAL", "IOPRIO_HIGH", + # others + "CONN_DELETE_TCB", "AF_LINK", ] @@ -79,11 +78,9 @@ __extra__all__ = [ # ===================================================================== CONN_DELETE_TCB = "DELETE_TCB" -ACCESS_DENIED_ERRSET = frozenset([errno.EPERM, errno.EACCES, - cext.ERROR_ACCESS_DENIED]) -NO_SUCH_SERVICE_ERRSET = frozenset([cext.ERROR_INVALID_NAME, - cext.ERROR_SERVICE_DOES_NOT_EXIST]) HAS_PROC_IO_PRIORITY = hasattr(cext, "proc_io_priority_get") +HAS_GETLOADAVG = hasattr(cext, "getloadavg") +ERROR_PARTIAL_COPY = 299 if enum is None: @@ -119,6 +116,19 @@ if enum is not None: globals().update(Priority.__members__) +if enum is None: + IOPRIO_VERYLOW = 0 + IOPRIO_LOW = 1 + IOPRIO_NORMAL = 2 + IOPRIO_HIGH = 3 +else: + class IOPriority(enum.IntEnum): + IOPRIO_VERYLOW = 0 + IOPRIO_LOW = 1 + IOPRIO_NORMAL = 2 + IOPRIO_HIGH = 3 + globals().update(IOPriority.__members__) + pinfo_map = dict( num_handles=0, ctx_switches=1, @@ -144,6 +154,35 @@ pinfo_map = dict( mem_private=21, ) +# These objects get set on "import psutil" from the __init__.py +# file, see: https://github.com/giampaolo/psutil/issues/1402 +NoSuchProcess = None +ZombieProcess = None +AccessDenied = None +TimeoutExpired = None + +# More values at: https://stackoverflow.com/a/20804735/376587 +WIN_10 = (10, 0) +WIN_8 = (6, 2) +WIN_7 = (6, 1) +WIN_SERVER_2008 = (6, 0) +WIN_VISTA = (6, 0) +WIN_SERVER_2003 = (5, 2) +WIN_XP = (5, 1) + + +@lru_cache() +def get_winver(): + """Usage: + >>> if get_winver() <= WIN_VISTA: + ... ... + """ + wv = sys.getwindowsversion() + return (wv.major, wv.minor) + + +IS_WIN_XP = get_winver() < WIN_VISTA + # ===================================================================== # --- named tuples @@ -204,6 +243,11 @@ def py2_strencode(s): return s.encode(ENCODING, ENCODING_ERRS) +@memoize +def getpagesize(): + return cext.getpagesize() + + # ===================================================================== # --- memory # ===================================================================== @@ -310,6 +354,23 @@ def cpu_freq(): return [_common.scpufreq(float(curr), min_, float(max_))] +if HAS_GETLOADAVG: + _loadavg_inititialized = False + + def getloadavg(): + """Return the number of processes in the system run queue averaged + over the last 1, 5, and 15 minutes respectively as a tuple""" + global _loadavg_inititialized + + if not _loadavg_inititialized: + cext.init_loadavg_counter() + _loadavg_inititialized = True + + # Drop to 2 decimal points which is what Linux does + raw_loads = cext.getloadavg() + return tuple([round(load, 2) for load in raw_loads]) + + # ===================================================================== # --- network # ===================================================================== @@ -327,17 +388,8 @@ def net_connections(kind, _pid=-1): ret = set() for item in rawlist: fd, fam, type, laddr, raddr, status, pid = item - if laddr: - laddr = _common.addr(*laddr) - if raddr: - raddr = _common.addr(*raddr) - status = TCP_STATUSES[status] - fam = sockfam_to_enum(fam) - type = socktype_to_enum(type) - if _pid == -1: - nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid) - else: - nt = _common.pconn(fd, fam, type, laddr, raddr, status) + nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status, TCP_STATUSES, + pid=pid if _pid == -1 else None) ret.add(nt) return list(ret) @@ -502,14 +554,14 @@ class WindowsService(object): """ try: yield - except WindowsError as err: - if err.errno in ACCESS_DENIED_ERRSET: + except OSError as err: + if is_permission_err(err): raise AccessDenied( pid=None, name=self._name, msg="service %r is not querable (not enough privileges)" % self._name) - elif err.errno in NO_SUCH_SERVICE_ERRSET or \ - err.winerror in NO_SUCH_SERVICE_ERRSET: + elif err.winerror in (cext.ERROR_INVALID_NAME, + cext.ERROR_SERVICE_DOES_NOT_EXIST): raise NoSuchProcess( pid=None, name=self._name, msg="service %r does not exist)" % self._name) @@ -626,27 +678,68 @@ pid_exists = cext.pid_exists ppid_map = cext.ppid_map # used internally by Process.children() +def is_permission_err(exc): + """Return True if this is a permission error.""" + assert isinstance(exc, OSError), exc + # On Python 2 OSError doesn't always have 'winerror'. Sometimes + # it does, in which case the original exception was WindowsError + # (which is a subclass of OSError). + return exc.errno in (errno.EPERM, errno.EACCES) or \ + getattr(exc, "winerror", -1) in (cext.ERROR_ACCESS_DENIED, + cext.ERROR_PRIVILEGE_NOT_HELD) + + +def convert_oserror(exc, pid=None, name=None): + """Convert OSError into NoSuchProcess or AccessDenied.""" + assert isinstance(exc, OSError), exc + if is_permission_err(exc): + return AccessDenied(pid=pid, name=name) + if exc.errno == errno.ESRCH: + return NoSuchProcess(pid=pid, name=name) + raise exc + + def wrap_exceptions(fun): - """Decorator which translates bare OSError and WindowsError - exceptions into NoSuchProcess and AccessDenied. - """ + """Decorator which converts OSError into NoSuchProcess or AccessDenied.""" @functools.wraps(fun) def wrapper(self, *args, **kwargs): try: return fun(self, *args, **kwargs) except OSError as err: - if err.errno in ACCESS_DENIED_ERRSET: - raise AccessDenied(self.pid, self._name) - if err.errno == errno.ESRCH: - raise NoSuchProcess(self.pid, self._name) - raise + raise convert_oserror(err, pid=self.pid, name=self._name) + return wrapper + + +def retry_error_partial_copy(fun): + """Workaround for https://github.com/giampaolo/psutil/issues/875. + See: https://stackoverflow.com/questions/4457745#4457745 + """ + @functools.wraps(fun) + def wrapper(self, *args, **kwargs): + delay = 0.0001 + times = 33 + for x in range(times): # retries for roughly 1 second + try: + return fun(self, *args, **kwargs) + except WindowsError as _: + err = _ + if err.winerror == ERROR_PARTIAL_COPY: + time.sleep(delay) + delay = min(delay * 2, 0.04) + continue + else: + raise + else: + msg = "%s retried %s times, converted to AccessDenied as it's " \ + "still returning %r" % (fun, times, err) + raise AccessDenied(pid=self.pid, name=self._name, msg=msg) return wrapper class Process(object): """Wrapper class around underlying C implementation.""" - __slots__ = ["pid", "_name", "_ppid"] + __slots__ = ["pid", "_name", "_ppid", "_cache"] def __init__(self, pid): self.pid = pid @@ -656,11 +749,12 @@ class Process(object): # --- oneshot() stuff def oneshot_enter(self): - self.oneshot_info.cache_activate() + self.oneshot_info.cache_activate(self) def oneshot_exit(self): - self.oneshot_info.cache_deactivate() + self.oneshot_info.cache_deactivate(self) + @wrap_exceptions @memoize_when_activated def oneshot_info(self): """Return multiple information about this process as a @@ -691,25 +785,41 @@ class Process(object): @wrap_exceptions def exe(self): - # Note: os.path.exists(path) may return False even if the file - # is there, see: - # http://stackoverflow.com/questions/3112546/os-path-exists-lies - - # see https://github.com/giampaolo/psutil/issues/414 - # see https://github.com/giampaolo/psutil/issues/528 - if self.pid in (0, 4): - raise AccessDenied(self.pid, self._name) - return py2_strencode(convert_dos_path(cext.proc_exe(self.pid))) + # Dual implementation, see: + # https://github.com/giampaolo/psutil/pull/1413 + if not IS_WIN_XP: + exe = cext.proc_exe(self.pid) + else: + if self.pid in (0, 4): + # https://github.com/giampaolo/psutil/issues/414 + # https://github.com/giampaolo/psutil/issues/528 + raise AccessDenied(self.pid, self._name) + exe = cext.proc_exe(self.pid) + exe = convert_dos_path(exe) + return py2_strencode(exe) @wrap_exceptions + @retry_error_partial_copy def cmdline(self): - ret = cext.proc_cmdline(self.pid) + if cext.WINVER >= cext.WINDOWS_8_1: + # PEB method detects cmdline changes but requires more + # privileges: https://github.com/giampaolo/psutil/pull/1398 + try: + ret = cext.proc_cmdline(self.pid, use_peb=True) + except OSError as err: + if is_permission_err(err): + ret = cext.proc_cmdline(self.pid, use_peb=False) + else: + raise + else: + ret = cext.proc_cmdline(self.pid, use_peb=True) if PY3: return ret else: return [py2_strencode(s) for s in ret] @wrap_exceptions + @retry_error_partial_copy def environ(self): ustr = cext.proc_environ(self.pid) if ustr and not PY3: @@ -726,7 +836,7 @@ class Process(object): try: return cext.proc_memory_info(self.pid) except OSError as err: - if err.errno in ACCESS_DENIED_ERRSET: + if is_permission_err(err): # TODO: the C ext can probably be refactored in order # to get this from cext.proc_info() info = self.oneshot_info() @@ -758,6 +868,7 @@ class Process(object): def memory_full_info(self): basic_mem = self.memory_info() uss = cext.proc_memory_uss(self.pid) + uss *= getpagesize() return pfullmem(*basic_mem + (uss, )) def memory_maps(self): @@ -766,11 +877,7 @@ class Process(object): except OSError as err: # XXX - can't use wrap_exceptions decorator as we're # returning a generator; probably needs refactoring. - if err.errno in ACCESS_DENIED_ERRSET: - raise AccessDenied(self.pid, self._name) - if err.errno == errno.ESRCH: - raise NoSuchProcess(self.pid, self._name) - raise + raise convert_oserror(err, self.pid, self._name) else: for addr, perm, path, rss in raw: path = convert_dos_path(path) @@ -846,7 +953,7 @@ class Process(object): try: return cext.proc_create_time(self.pid) except OSError as err: - if err.errno in ACCESS_DENIED_ERRSET: + if is_permission_err(err): return self.oneshot_info()[pinfo_map['create_time']] raise @@ -868,24 +975,24 @@ class Process(object): try: user, system = cext.proc_cpu_times(self.pid) except OSError as err: - if err.errno in ACCESS_DENIED_ERRSET: - info = self.oneshot_info() - user = info[pinfo_map['user_time']] - system = info[pinfo_map['kernel_time']] - else: + if not is_permission_err(err): raise + info = self.oneshot_info() + user = info[pinfo_map['user_time']] + system = info[pinfo_map['kernel_time']] # Children user/system times are not retrievable (set to 0). return _common.pcputimes(user, system, 0.0, 0.0) @wrap_exceptions def suspend(self): - return cext.proc_suspend(self.pid) + cext.proc_suspend_or_resume(self.pid, True) @wrap_exceptions def resume(self): - return cext.proc_resume(self.pid) + cext.proc_suspend_or_resume(self.pid, False) @wrap_exceptions + @retry_error_partial_copy def cwd(self): if self.pid in (0, 4): raise AccessDenied(self.pid, self._name) @@ -932,35 +1039,36 @@ class Process(object): if HAS_PROC_IO_PRIORITY: @wrap_exceptions def ionice_get(self): - return cext.proc_io_priority_get(self.pid) + ret = cext.proc_io_priority_get(self.pid) + if enum is not None: + ret = IOPriority(ret) + return ret @wrap_exceptions - def ionice_set(self, value, _): - if _: - raise TypeError("set_proc_ionice() on Windows takes only " - "1 argument (2 given)") - if value not in (2, 1, 0): - raise ValueError("value must be 2 (normal), 1 (low) or 0 " - "(very low); got %r" % value) - return cext.proc_io_priority_set(self.pid, value) + def ionice_set(self, ioclass, value): + if value: + raise TypeError("value argument not accepted on Windows") + if ioclass not in (IOPRIO_VERYLOW, IOPRIO_LOW, IOPRIO_NORMAL, + IOPRIO_HIGH): + raise ValueError("%s is not a valid priority" % ioclass) + cext.proc_io_priority_set(self.pid, ioclass) @wrap_exceptions def io_counters(self): try: ret = cext.proc_io_counters(self.pid) except OSError as err: - if err.errno in ACCESS_DENIED_ERRSET: - info = self.oneshot_info() - ret = ( - info[pinfo_map['io_rcount']], - info[pinfo_map['io_wcount']], - info[pinfo_map['io_rbytes']], - info[pinfo_map['io_wbytes']], - info[pinfo_map['io_count_others']], - info[pinfo_map['io_bytes_others']], - ) - else: + if not is_permission_err(err): raise + info = self.oneshot_info() + ret = ( + info[pinfo_map['io_rcount']], + info[pinfo_map['io_wcount']], + info[pinfo_map['io_rbytes']], + info[pinfo_map['io_wbytes']], + info[pinfo_map['io_count_others']], + info[pinfo_map['io_bytes_others']], + ) return pio(*ret) @wrap_exceptions @@ -1008,7 +1116,7 @@ class Process(object): try: return cext.proc_num_handles(self.pid) except OSError as err: - if err.errno in ACCESS_DENIED_ERRSET: + if is_permission_err(err): return self.oneshot_info()[pinfo_map['num_handles']] raise diff --git a/server/www/packages/packages-linux/x64/pyasn1/__init__.py b/server/www/packages/packages-linux/x64/pyasn1/__init__.py index e2e4c5c..5a56a70 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/__init__.py +++ b/server/www/packages/packages-linux/x64/pyasn1/__init__.py @@ -1,7 +1,7 @@ import sys # https://www.python.org/dev/peps/pep-0396/ -__version__ = '0.4.4' +__version__ = '0.4.8' if sys.version_info[:2] < (2, 4): raise RuntimeError('PyASN1 requires Python 2.4 or later') diff --git a/server/www/packages/packages-linux/x64/pyasn1/codec/ber/decoder.py b/server/www/packages/packages-linux/x64/pyasn1/codec/ber/decoder.py index a27b3e0..5ff485f 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/codec/ber/decoder.py +++ b/server/www/packages/packages-linux/x64/pyasn1/codec/ber/decoder.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # from pyasn1 import debug @@ -18,6 +18,8 @@ from pyasn1.type import useful __all__ = ['decode'] +LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_DECODER) + noValue = base.noValue @@ -70,6 +72,10 @@ class ExplicitTagDecoder(AbstractSimpleDecoder): value, _ = decodeFun(head, asn1Spec, tagSet, length, **options) + if LOG: + LOG('explicit tag container carries %d octets of trailing payload ' + '(will be lost!): %s' % (len(_), debug.hexdump(_))) + return value, tail def indefLenValueDecoder(self, substrate, asn1Spec, @@ -120,7 +126,8 @@ class BooleanDecoder(IntegerDecoder): protoComponent = univ.Boolean(0) def _createComponent(self, asn1Spec, tagSet, value, **options): - return IntegerDecoder._createComponent(self, asn1Spec, tagSet, value and 1 or 0, **options) + return IntegerDecoder._createComponent( + self, asn1Spec, tagSet, value and 1 or 0, **options) class BitStringDecoder(AbstractSimpleDecoder): @@ -134,8 +141,8 @@ class BitStringDecoder(AbstractSimpleDecoder): head, tail = substrate[:length], substrate[length:] if substrateFun: - return substrateFun(self._createComponent(asn1Spec, tagSet, noValue, **options), - substrate, length) + return substrateFun(self._createComponent( + asn1Spec, tagSet, noValue, **options), substrate, length) if not head: raise error.PyAsn1Error('Empty BIT STRING substrate') @@ -148,12 +155,17 @@ class BitStringDecoder(AbstractSimpleDecoder): 'Trailing bits overflow %s' % trailingBits ) - value = self.protoComponent.fromOctetString(head[1:], internalFormat=True, padding=trailingBits) + value = self.protoComponent.fromOctetString( + head[1:], internalFormat=True, padding=trailingBits) return self._createComponent(asn1Spec, tagSet, value, **options), tail if not self.supportConstructedForm: - raise error.PyAsn1Error('Constructed encoding form prohibited at %s' % self.__class__.__name__) + raise error.PyAsn1Error('Constructed encoding form prohibited ' + 'at %s' % self.__class__.__name__) + + if LOG: + LOG('assembling constructed serialization') # All inner fragments are of the same type, treat them as octet string substrateFun = self.substrateCollector @@ -234,6 +246,9 @@ class OctetStringDecoder(AbstractSimpleDecoder): if not self.supportConstructedForm: raise error.PyAsn1Error('Constructed encoding form prohibited at %s' % self.__class__.__name__) + if LOG: + LOG('assembling constructed serialization') + # All inner fragments are of the same type, treat them as octet string substrateFun = self.substrateCollector @@ -267,7 +282,9 @@ class OctetStringDecoder(AbstractSimpleDecoder): allowEoo=True, **options) if component is eoo.endOfOctets: break + header += component + else: raise error.SubstrateUnderrunError( 'No EOO seen before substrate ends' @@ -374,59 +391,90 @@ class RealDecoder(AbstractSimpleDecoder): if fo & 0x80: # binary encoding if not head: raise error.PyAsn1Error("Incomplete floating-point value") + + if LOG: + LOG('decoding binary encoded REAL') + n = (fo & 0x03) + 1 + if n == 4: n = oct2int(head[0]) head = head[1:] + eo, head = head[:n], head[n:] + if not eo or not head: raise error.PyAsn1Error('Real exponent screwed') + e = oct2int(eo[0]) & 0x80 and -1 or 0 + while eo: # exponent e <<= 8 e |= oct2int(eo[0]) eo = eo[1:] + b = fo >> 4 & 0x03 # base bits + if b > 2: raise error.PyAsn1Error('Illegal Real base') + if b == 1: # encbase = 8 e *= 3 + elif b == 2: # encbase = 16 e *= 4 p = 0 + while head: # value p <<= 8 p |= oct2int(head[0]) head = head[1:] + if fo & 0x40: # sign bit p = -p + sf = fo >> 2 & 0x03 # scale bits p *= 2 ** sf value = (p, 2, e) + elif fo & 0x40: # infinite value + if LOG: + LOG('decoding infinite REAL') + value = fo & 0x01 and '-inf' or 'inf' + elif fo & 0xc0 == 0: # character encoding if not head: raise error.PyAsn1Error("Incomplete floating-point value") + + if LOG: + LOG('decoding character encoded REAL') + try: if fo & 0x3 == 0x1: # NR1 value = (int(head), 10, 0) + elif fo & 0x3 == 0x2: # NR2 value = float(head) + elif fo & 0x3 == 0x3: # NR3 value = float(head) + else: raise error.SubstrateUnderrunError( 'Unknown NR (tag %s)' % fo ) + except ValueError: raise error.SubstrateUnderrunError( 'Bad character Real syntax' ) + else: raise error.SubstrateUnderrunError( 'Unknown encoding (tag %s)' % fo ) + return self._createComponent(asn1Spec, tagSet, value, **options), tail @@ -447,10 +495,12 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): def _decodeComponents(self, substrate, tagSet=None, decodeFun=None, **options): components = [] componentTypes = set() + while substrate: component, substrate = decodeFun(substrate, **options) if component is eoo.endOfOctets: break + components.append(component) componentTypes.add(component.tagSet) @@ -460,6 +510,7 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): # * otherwise -> likely SEQUENCE OF/SET OF if len(componentTypes) > 1: protoComponent = self.protoRecordComponent + else: protoComponent = self.protoSequenceComponent @@ -469,6 +520,10 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): tagSet=tag.TagSet(protoComponent.tagSet.baseTag, *tagSet.superTags) ) + if LOG: + LOG('guessed %r container type (pass `asn1Spec` to guide the ' + 'decoder)' % asn1Object) + for idx, component in enumerate(components): asn1Object.setComponentByPosition( idx, component, @@ -490,8 +545,10 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): if substrateFun is not None: if asn1Spec is not None: asn1Object = asn1Spec.clone() + elif self.protoComponent is not None: asn1Object = self.protoComponent.clone(tagSet=tagSet) + else: asn1Object = self.protoRecordComponent, self.protoSequenceComponent @@ -501,11 +558,16 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): asn1Object, trailing = self._decodeComponents( head, tagSet=tagSet, decodeFun=decodeFun, **options ) + if trailing: - raise error.PyAsn1Error('Unused trailing %d octets encountered' % len(trailing)) + if LOG: + LOG('Unused trailing %d octets encountered: %s' % ( + len(trailing), debug.hexdump(trailing))) + return asn1Object, tail asn1Object = asn1Spec.clone() + asn1Object.clear() if asn1Spec.typeId in (univ.Sequence.typeId, univ.Set.typeId): @@ -514,21 +576,31 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): isSetType = asn1Spec.typeId == univ.Set.typeId isDeterministic = not isSetType and not namedTypes.hasOptionalOrDefault + if LOG: + LOG('decoding %sdeterministic %s type %r chosen by type ID' % ( + not isDeterministic and 'non-' or '', isSetType and 'SET' or '', + asn1Spec)) + seenIndices = set() idx = 0 while head: if not namedTypes: componentType = None + elif isSetType: componentType = namedTypes.tagMapUnique + else: try: if isDeterministic: componentType = namedTypes[idx].asn1Object + elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted: componentType = namedTypes.getTagMapNearPosition(idx) + else: componentType = namedTypes[idx].asn1Object + except IndexError: raise error.PyAsn1Error( 'Excessive components decoded at %r' % (asn1Spec,) @@ -539,6 +611,7 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): if not isDeterministic and namedTypes: if isSetType: idx = namedTypes.getPositionByType(component.effectiveTagSet) + elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted: idx = namedTypes.getPositionNearType(component.effectiveTagSet, idx) @@ -551,14 +624,25 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): seenIndices.add(idx) idx += 1 + if LOG: + LOG('seen component indices %s' % seenIndices) + if namedTypes: if not namedTypes.requiredComponents.issubset(seenIndices): - raise error.PyAsn1Error('ASN.1 object %s has uninitialized components' % asn1Object.__class__.__name__) + raise error.PyAsn1Error( + 'ASN.1 object %s has uninitialized ' + 'components' % asn1Object.__class__.__name__) if namedTypes.hasOpenTypes: openTypes = options.get('openTypes', {}) + if LOG: + LOG('user-specified open types map:') + + for k, v in openTypes.items(): + LOG('%s -> %r' % (k, v)) + if openTypes or options.get('decodeOpenTypes', False): for idx, namedType in enumerate(namedTypes.namedTypes): @@ -577,27 +661,67 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): except KeyError: + if LOG: + LOG('default open types map of component ' + '"%s.%s" governed by component "%s.%s"' + ':' % (asn1Object.__class__.__name__, + namedType.name, + asn1Object.__class__.__name__, + namedType.openType.name)) + + for k, v in namedType.openType.items(): + LOG('%s -> %r' % (k, v)) + try: openType = namedType.openType[governingValue] except KeyError: + if LOG: + LOG('failed to resolve open type by governing ' + 'value %r' % (governingValue,)) continue - component, rest = decodeFun( - asn1Object.getComponentByPosition(idx).asOctets(), - asn1Spec=openType - ) + if LOG: + LOG('resolved open type %r by governing ' + 'value %r' % (openType, governingValue)) - asn1Object.setComponentByPosition(idx, component) + containerValue = asn1Object.getComponentByPosition(idx) + + if containerValue.typeId in ( + univ.SetOf.typeId, univ.SequenceOf.typeId): + + for pos, containerElement in enumerate( + containerValue): + + component, rest = decodeFun( + containerValue[pos].asOctets(), + asn1Spec=openType, **options + ) + + containerValue[pos] = component + + else: + component, rest = decodeFun( + asn1Object.getComponentByPosition(idx).asOctets(), + asn1Spec=openType, **options + ) + + asn1Object.setComponentByPosition(idx, component) else: - asn1Object.verifySizeSpec() + inconsistency = asn1Object.isInconsistent + if inconsistency: + raise inconsistency else: asn1Object = asn1Spec.clone() + asn1Object.clear() componentType = asn1Spec.componentType + if LOG: + LOG('decoding type %r chosen by given `asn1Spec`' % componentType) + idx = 0 while head: @@ -607,6 +731,7 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): verifyConstraints=False, matchTags=False, matchConstraints=False ) + idx += 1 return asn1Object, tail @@ -621,8 +746,10 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): if substrateFun is not None: if asn1Spec is not None: asn1Object = asn1Spec.clone() + elif self.protoComponent is not None: asn1Object = self.protoComponent.clone(tagSet=tagSet) + else: asn1Object = self.protoRecordComponent, self.protoSequenceComponent @@ -630,10 +757,12 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): if asn1Spec is None: return self._decodeComponents( - substrate, tagSet=tagSet, decodeFun=decodeFun, allowEoo=True, **options + substrate, tagSet=tagSet, decodeFun=decodeFun, + **dict(options, allowEoo=True) ) asn1Object = asn1Spec.clone() + asn1Object.clear() if asn1Spec.typeId in (univ.Sequence.typeId, univ.Set.typeId): @@ -642,21 +771,31 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): isSetType = asn1Object.typeId == univ.Set.typeId isDeterministic = not isSetType and not namedTypes.hasOptionalOrDefault + if LOG: + LOG('decoding %sdeterministic %s type %r chosen by type ID' % ( + not isDeterministic and 'non-' or '', isSetType and 'SET' or '', + asn1Spec)) + seenIndices = set() idx = 0 while substrate: if len(namedTypes) <= idx: asn1Spec = None + elif isSetType: asn1Spec = namedTypes.tagMapUnique + else: try: if isDeterministic: asn1Spec = namedTypes[idx].asn1Object + elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted: asn1Spec = namedTypes.getTagMapNearPosition(idx) + else: asn1Spec = namedTypes[idx].asn1Object + except IndexError: raise error.PyAsn1Error( 'Excessive components decoded at %r' % (asn1Object,) @@ -686,13 +825,22 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): 'No EOO seen before substrate ends' ) + if LOG: + LOG('seen component indices %s' % seenIndices) + if namedTypes: if not namedTypes.requiredComponents.issubset(seenIndices): raise error.PyAsn1Error('ASN.1 object %s has uninitialized components' % asn1Object.__class__.__name__) - if namedTypes.hasOpenTypes: + if namedTypes.hasOpenTypes: - openTypes = options.get('openTypes', None) + openTypes = options.get('openTypes', {}) + + if LOG: + LOG('user-specified open types map:') + + for k, v in openTypes.items(): + LOG('%s -> %r' % (k, v)) if openTypes or options.get('decodeOpenTypes', False): @@ -712,28 +860,68 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): except KeyError: + if LOG: + LOG('default open types map of component ' + '"%s.%s" governed by component "%s.%s"' + ':' % (asn1Object.__class__.__name__, + namedType.name, + asn1Object.__class__.__name__, + namedType.openType.name)) + + for k, v in namedType.openType.items(): + LOG('%s -> %r' % (k, v)) + try: openType = namedType.openType[governingValue] except KeyError: + if LOG: + LOG('failed to resolve open type by governing ' + 'value %r' % (governingValue,)) continue - component, rest = decodeFun( - asn1Object.getComponentByPosition(idx).asOctets(), - asn1Spec=openType, allowEoo=True - ) + if LOG: + LOG('resolved open type %r by governing ' + 'value %r' % (openType, governingValue)) - if component is not eoo.endOfOctets: - asn1Object.setComponentByPosition(idx, component) + containerValue = asn1Object.getComponentByPosition(idx) + + if containerValue.typeId in ( + univ.SetOf.typeId, univ.SequenceOf.typeId): + + for pos, containerElement in enumerate( + containerValue): + + component, rest = decodeFun( + containerValue[pos].asOctets(), + asn1Spec=openType, **dict(options, allowEoo=True) + ) + + containerValue[pos] = component + + else: + component, rest = decodeFun( + asn1Object.getComponentByPosition(idx).asOctets(), + asn1Spec=openType, **dict(options, allowEoo=True) + ) + + if component is not eoo.endOfOctets: + asn1Object.setComponentByPosition(idx, component) else: - asn1Object.verifySizeSpec() + inconsistency = asn1Object.isInconsistent + if inconsistency: + raise inconsistency else: asn1Object = asn1Spec.clone() + asn1Object.clear() componentType = asn1Spec.componentType + if LOG: + LOG('decoding type %r chosen by given `asn1Spec`' % componentType) + idx = 0 while substrate: @@ -747,7 +935,9 @@ class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): verifyConstraints=False, matchTags=False, matchConstraints=False ) + idx += 1 + else: raise error.SubstrateUnderrunError( 'No EOO seen before substrate ends' @@ -794,18 +984,25 @@ class ChoiceDecoder(AbstractConstructedDecoder): if asn1Spec is None: asn1Object = self.protoComponent.clone(tagSet=tagSet) + else: asn1Object = asn1Spec.clone() if substrateFun: return substrateFun(asn1Object, substrate, length) - if asn1Object.tagSet == tagSet: # explicitly tagged Choice + if asn1Object.tagSet == tagSet: + if LOG: + LOG('decoding %s as explicitly tagged CHOICE' % (tagSet,)) + component, head = decodeFun( head, asn1Object.componentTagMap, **options ) else: + if LOG: + LOG('decoding %s as untagged CHOICE' % (tagSet,)) + component, head = decodeFun( head, asn1Object.componentTagMap, tagSet, length, state, **options @@ -813,6 +1010,9 @@ class ChoiceDecoder(AbstractConstructedDecoder): effectiveTagSet = component.effectiveTagSet + if LOG: + LOG('decoded component %s, effective tag set %s' % (component, effectiveTagSet)) + asn1Object.setComponentByType( effectiveTagSet, component, verifyConstraints=False, @@ -834,18 +1034,26 @@ class ChoiceDecoder(AbstractConstructedDecoder): if substrateFun: return substrateFun(asn1Object, substrate, length) - if asn1Object.tagSet == tagSet: # explicitly tagged Choice + if asn1Object.tagSet == tagSet: + if LOG: + LOG('decoding %s as explicitly tagged CHOICE' % (tagSet,)) + component, substrate = decodeFun( substrate, asn1Object.componentType.tagMapUnique, **options ) + # eat up EOO marker eooMarker, substrate = decodeFun( substrate, allowEoo=True, **options ) + if eooMarker is not eoo.endOfOctets: raise error.PyAsn1Error('No EOO seen before substrate ends') else: + if LOG: + LOG('decoding %s as untagged CHOICE' % (tagSet,)) + component, substrate = decodeFun( substrate, asn1Object.componentType.tagMapUnique, tagSet, length, state, **options @@ -853,6 +1061,9 @@ class ChoiceDecoder(AbstractConstructedDecoder): effectiveTagSet = component.effectiveTagSet + if LOG: + LOG('decoded component %s, effective tag set %s' % (component, effectiveTagSet)) + asn1Object.setComponentByType( effectiveTagSet, component, verifyConstraints=False, @@ -870,13 +1081,25 @@ class AnyDecoder(AbstractSimpleDecoder): tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): - if asn1Spec is None or asn1Spec is not None and tagSet != asn1Spec.tagSet: + if asn1Spec is None: + isUntagged = True + + elif asn1Spec.__class__ is tagmap.TagMap: + isUntagged = tagSet not in asn1Spec.tagMap + + else: + isUntagged = tagSet != asn1Spec.tagSet + + if isUntagged: fullSubstrate = options['fullSubstrate'] # untagged Any container, recover inner header substrate length += len(fullSubstrate) - len(substrate) substrate = fullSubstrate + if LOG: + LOG('decoding as untagged ANY, substrate %s' % debug.hexdump(substrate)) + if substrateFun: return substrateFun(self._createComponent(asn1Spec, tagSet, noValue, **options), substrate, length) @@ -889,15 +1112,31 @@ class AnyDecoder(AbstractSimpleDecoder): tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): - if asn1Spec is not None and tagSet == asn1Spec.tagSet: + if asn1Spec is None: + isTagged = False + + elif asn1Spec.__class__ is tagmap.TagMap: + isTagged = tagSet in asn1Spec.tagMap + + else: + isTagged = tagSet == asn1Spec.tagSet + + if isTagged: # tagged Any type -- consume header substrate header = null + + if LOG: + LOG('decoding as tagged ANY') + else: fullSubstrate = options['fullSubstrate'] # untagged Any, recover header substrate header = fullSubstrate[:-len(substrate)] + if LOG: + LOG('decoding as untagged ANY, header substrate %s' % debug.hexdump(header)) + # Any components do not inherit initial tag asn1Spec = self.protoComponent @@ -905,6 +1144,9 @@ class AnyDecoder(AbstractSimpleDecoder): asn1Object = self._createComponent(asn1Spec, tagSet, noValue, **options) return substrateFun(asn1Object, header + substrate, length + len(header)) + if LOG: + LOG('assembling constructed serialization') + # All inner fragments are of the same type, treat them as octet string substrateFun = self.substrateCollector @@ -914,13 +1156,17 @@ class AnyDecoder(AbstractSimpleDecoder): allowEoo=True, **options) if component is eoo.endOfOctets: break + header += component + else: raise error.SubstrateUnderrunError( 'No EOO seen before substrate ends' ) + if substrateFun: return header, substrate + else: return self._createComponent(asn1Spec, tagSet, header, **options), substrate @@ -1045,7 +1291,7 @@ for typeDecoder in tagMap.values(): class Decoder(object): defaultErrorState = stErrorCondition - # defaultErrorState = stDumpRawValue + #defaultErrorState = stDumpRawValue defaultRawDecoder = AnyDecoder() supportIndefLength = True @@ -1063,21 +1309,16 @@ class Decoder(object): decodeFun=None, substrateFun=None, **options): - if debug.logger & debug.flagDecoder: - logger = debug.logger - else: - logger = None - - if logger: - logger('decoder called at scope %s with state %d, working with up to %d octets of substrate: %s' % (debug.scope, state, len(substrate), debug.hexdump(substrate))) + if LOG: + LOG('decoder called at scope %s with state %d, working with up to %d octets of substrate: %s' % (debug.scope, state, len(substrate), debug.hexdump(substrate))) allowEoo = options.pop('allowEoo', False) # Look for end-of-octets sentinel if allowEoo and self.supportIndefLength: if substrate[:2] == self.__eooSentinel: - if logger: - logger('end-of-octets sentinel found') + if LOG: + LOG('end-of-octets sentinel found') return eoo.endOfOctets, substrate[2:] value = noValue @@ -1090,26 +1331,32 @@ class Decoder(object): fullSubstrate = substrate while state is not stStop: + if state is stDecodeTag: if not substrate: raise error.SubstrateUnderrunError( 'Short octet stream on tag decoding' ) + # Decode tag isShortTag = True firstOctet = substrate[0] substrate = substrate[1:] + try: lastTag = tagCache[firstOctet] + except KeyError: integerTag = oct2int(firstOctet) tagClass = integerTag & 0xC0 tagFormat = integerTag & 0x20 tagId = integerTag & 0x1F + if tagId == 0x1F: isShortTag = False lengthOctetIdx = 0 tagId = 0 + try: while True: integerTag = oct2int(substrate[lengthOctetIdx]) @@ -1118,42 +1365,55 @@ class Decoder(object): tagId |= (integerTag & 0x7F) if not integerTag & 0x80: break + substrate = substrate[lengthOctetIdx:] + except IndexError: raise error.SubstrateUnderrunError( 'Short octet stream on long tag decoding' ) + lastTag = tag.Tag( tagClass=tagClass, tagFormat=tagFormat, tagId=tagId ) + if isShortTag: # cache short tags tagCache[firstOctet] = lastTag + if tagSet is None: if isShortTag: try: tagSet = tagSetCache[firstOctet] + except KeyError: # base tag not recovered tagSet = tag.TagSet((), lastTag) tagSetCache[firstOctet] = tagSet else: tagSet = tag.TagSet((), lastTag) + else: tagSet = lastTag + tagSet + state = stDecodeLength - if logger: - logger('tag decoded into %s, decoding length' % tagSet) + + if LOG: + LOG('tag decoded into %s, decoding length' % tagSet) + if state is stDecodeLength: # Decode length if not substrate: raise error.SubstrateUnderrunError( 'Short octet stream on length decoding' ) + firstOctet = oct2int(substrate[0]) + if firstOctet < 128: size = 1 length = firstOctet + elif firstOctet > 128: size = firstOctet & 0x7F # encoded in size bytes @@ -1164,28 +1424,36 @@ class Decoder(object): raise error.SubstrateUnderrunError( '%s<%s at %s' % (size, len(encodedLength), tagSet) ) + length = 0 for lengthOctet in encodedLength: length <<= 8 length |= lengthOctet size += 1 + else: size = 1 length = -1 substrate = substrate[size:] + if length == -1: if not self.supportIndefLength: raise error.PyAsn1Error('Indefinite length encoding not supported by this codec') + else: if len(substrate) < length: raise error.SubstrateUnderrunError('%d-octet short' % (length - len(substrate))) + state = stGetValueDecoder - if logger: - logger('value length decoded into %d, payload substrate is: %s' % (length, debug.hexdump(length == -1 and substrate or substrate[:length]))) + + if LOG: + LOG('value length decoded into %d, payload substrate is: %s' % (length, debug.hexdump(length == -1 and substrate or substrate[:length]))) + if state is stGetValueDecoder: if asn1Spec is None: state = stGetValueDecoderByTag + else: state = stGetValueDecoderByAsn1Spec # @@ -1207,41 +1475,55 @@ class Decoder(object): if state is stGetValueDecoderByTag: try: concreteDecoder = tagMap[tagSet] + except KeyError: concreteDecoder = None + if concreteDecoder: state = stDecodeValue + else: try: concreteDecoder = tagMap[tagSet[:1]] + except KeyError: concreteDecoder = None + if concreteDecoder: state = stDecodeValue else: state = stTryAsExplicitTag - if logger: - logger('codec %s chosen by a built-in type, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "", state is stDecodeValue and 'value' or 'as explicit tag')) + + if LOG: + LOG('codec %s chosen by a built-in type, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "", state is stDecodeValue and 'value' or 'as explicit tag')) debug.scope.push(concreteDecoder is None and '?' or concreteDecoder.protoComponent.__class__.__name__) + if state is stGetValueDecoderByAsn1Spec: + if asn1Spec.__class__ is tagmap.TagMap: try: chosenSpec = asn1Spec[tagSet] + except KeyError: chosenSpec = None - if logger: - logger('candidate ASN.1 spec is a map of:') + + if LOG: + LOG('candidate ASN.1 spec is a map of:') + for firstOctet, v in asn1Spec.presentTypes.items(): - logger(' %s -> %s' % (firstOctet, v.__class__.__name__)) + LOG(' %s -> %s' % (firstOctet, v.__class__.__name__)) + if asn1Spec.skipTypes: - logger('but neither of: ') + LOG('but neither of: ') for firstOctet, v in asn1Spec.skipTypes.items(): - logger(' %s -> %s' % (firstOctet, v.__class__.__name__)) - logger('new candidate ASN.1 spec is %s, chosen by %s' % (chosenSpec is None and '' or chosenSpec.prettyPrintType(), tagSet)) + LOG(' %s -> %s' % (firstOctet, v.__class__.__name__)) + LOG('new candidate ASN.1 spec is %s, chosen by %s' % (chosenSpec is None and '' or chosenSpec.prettyPrintType(), tagSet)) + elif tagSet == asn1Spec.tagSet or tagSet in asn1Spec.tagMap: chosenSpec = asn1Spec - if logger: - logger('candidate ASN.1 spec is %s' % asn1Spec.__class__.__name__) + if LOG: + LOG('candidate ASN.1 spec is %s' % asn1Spec.__class__.__name__) + else: chosenSpec = None @@ -1249,29 +1531,38 @@ class Decoder(object): try: # ambiguous type or just faster codec lookup concreteDecoder = typeMap[chosenSpec.typeId] - if logger: - logger('value decoder chosen for an ambiguous type by type ID %s' % (chosenSpec.typeId,)) + + if LOG: + LOG('value decoder chosen for an ambiguous type by type ID %s' % (chosenSpec.typeId,)) + except KeyError: # use base type for codec lookup to recover untagged types baseTagSet = tag.TagSet(chosenSpec.tagSet.baseTag, chosenSpec.tagSet.baseTag) try: # base type or tagged subtype concreteDecoder = tagMap[baseTagSet] - if logger: - logger('value decoder chosen by base %s' % (baseTagSet,)) + + if LOG: + LOG('value decoder chosen by base %s' % (baseTagSet,)) + except KeyError: concreteDecoder = None + if concreteDecoder: asn1Spec = chosenSpec state = stDecodeValue + else: state = stTryAsExplicitTag + else: concreteDecoder = None state = stTryAsExplicitTag - if logger: - logger('codec %s chosen by ASN.1 spec, decoding %s' % (state is stDecodeValue and concreteDecoder.__class__.__name__ or "", state is stDecodeValue and 'value' or 'as explicit tag')) + + if LOG: + LOG('codec %s chosen by ASN.1 spec, decoding %s' % (state is stDecodeValue and concreteDecoder.__class__.__name__ or "", state is stDecodeValue and 'value' or 'as explicit tag')) debug.scope.push(chosenSpec is None and '?' or chosenSpec.__class__.__name__) + if state is stDecodeValue: if not options.get('recursiveFlag', True) and not substrateFun: # deprecate this substrateFun = lambda a, b, c: (a, b[:c]) @@ -1285,6 +1576,7 @@ class Decoder(object): self, substrateFun, **options ) + else: value, substrate = concreteDecoder.valueDecoder( substrate, asn1Spec, @@ -1293,33 +1585,44 @@ class Decoder(object): **options ) - if logger: - logger('codec %s yields type %s, value:\n%s\n...remaining substrate is: %s' % (concreteDecoder.__class__.__name__, value.__class__.__name__, isinstance(value, base.Asn1Item) and value.prettyPrint() or value, substrate and debug.hexdump(substrate) or '')) + if LOG: + LOG('codec %s yields type %s, value:\n%s\n...remaining substrate is: %s' % (concreteDecoder.__class__.__name__, value.__class__.__name__, isinstance(value, base.Asn1Item) and value.prettyPrint() or value, substrate and debug.hexdump(substrate) or '')) state = stStop break + if state is stTryAsExplicitTag: - if tagSet and tagSet[0].tagFormat == tag.tagFormatConstructed and tagSet[0].tagClass != tag.tagClassUniversal: + if (tagSet and + tagSet[0].tagFormat == tag.tagFormatConstructed and + tagSet[0].tagClass != tag.tagClassUniversal): # Assume explicit tagging concreteDecoder = explicitTagDecoder state = stDecodeValue + else: concreteDecoder = None state = self.defaultErrorState - if logger: - logger('codec %s chosen, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "", state is stDecodeValue and 'value' or 'as failure')) + + if LOG: + LOG('codec %s chosen, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "", state is stDecodeValue and 'value' or 'as failure')) + if state is stDumpRawValue: concreteDecoder = self.defaultRawDecoder - if logger: - logger('codec %s chosen, decoding value' % concreteDecoder.__class__.__name__) + + if LOG: + LOG('codec %s chosen, decoding value' % concreteDecoder.__class__.__name__) + state = stDecodeValue + if state is stErrorCondition: raise error.PyAsn1Error( '%s not in asn1Spec: %r' % (tagSet, asn1Spec) ) - if logger: + + if LOG: debug.scope.pop() - logger('decoder left scope %s, call completed' % debug.scope) + LOG('decoder left scope %s, call completed' % debug.scope) + return value, substrate @@ -1349,7 +1652,7 @@ class Decoder(object): #: #: Raises #: ------ -#: :py:class:`~pyasn1.error.PyAsn1Error` +#: ~pyasn1.error.PyAsn1Error, ~pyasn1.error.SubstrateUnderrunError #: On decoding errors #: #: Examples diff --git a/server/www/packages/packages-linux/x64/pyasn1/codec/ber/encoder.py b/server/www/packages/packages-linux/x64/pyasn1/codec/ber/encoder.py index 0094b22..778aa86 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/codec/ber/encoder.py +++ b/server/www/packages/packages-linux/x64/pyasn1/codec/ber/encoder.py @@ -1,9 +1,11 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # +import sys + from pyasn1 import debug from pyasn1 import error from pyasn1.codec.ber import eoo @@ -17,6 +19,8 @@ from pyasn1.type import useful __all__ = ['encode'] +LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_ENCODER) + class AbstractItemEncoder(object): supportIndefLenMode = True @@ -31,29 +35,39 @@ class AbstractItemEncoder(object): encodedTag = tagClass | tagFormat if isConstructed: encodedTag |= tag.tagFormatConstructed + if tagId < 31: return encodedTag | tagId, + else: substrate = tagId & 0x7f, + tagId >>= 7 + while tagId: substrate = (0x80 | (tagId & 0x7f),) + substrate tagId >>= 7 + return (encodedTag | 0x1F,) + substrate def encodeLength(self, length, defMode): if not defMode and self.supportIndefLenMode: return (0x80,) + if length < 0x80: return length, + else: substrate = () while length: substrate = (length & 0xff,) + substrate length >>= 8 + substrateLen = len(substrate) + if substrateLen > 126: raise error.PyAsn1Error('Length octets overflow (%d)' % substrateLen) + return (0x80 | substrateLen,) + substrate def encodeValue(self, value, asn1Spec, encodeFun, **options): @@ -75,26 +89,51 @@ class AbstractItemEncoder(object): defMode = options.get('defMode', True) + substrate = null + for idx, singleTag in enumerate(tagSet.superTags): defModeOverride = defMode # base tag? if not idx: - substrate, isConstructed, isOctets = self.encodeValue( - value, asn1Spec, encodeFun, **options - ) + try: + substrate, isConstructed, isOctets = self.encodeValue( + value, asn1Spec, encodeFun, **options + ) + + except error.PyAsn1Error: + exc = sys.exc_info() + raise error.PyAsn1Error( + 'Error encoding %r: %s' % (value, exc[1])) + + if LOG: + LOG('encoded %svalue %s into %s' % ( + isConstructed and 'constructed ' or '', value, substrate + )) if not substrate and isConstructed and options.get('ifNotEmpty', False): return substrate - # primitive form implies definite mode if not isConstructed: defModeOverride = True + if LOG: + LOG('overridden encoding mode into definitive for primitive type') + header = self.encodeTag(singleTag, isConstructed) + + if LOG: + LOG('encoded %stag %s into %s' % ( + isConstructed and 'constructed ' or '', + singleTag, debug.hexdump(ints2octs(header)))) + header += self.encodeLength(len(substrate), defModeOverride) + if LOG: + LOG('encoded %s octets (tag + payload) into %s' % ( + len(substrate), debug.hexdump(ints2octs(header)))) + if isOctets: substrate = ints2octs(header) + substrate @@ -131,6 +170,11 @@ class IntegerEncoder(AbstractItemEncoder): def encodeValue(self, value, asn1Spec, encodeFun, **options): if value == 0: + if LOG: + LOG('encoding %spayload for zero INTEGER' % ( + self.supportCompactZero and 'no ' or '' + )) + # de-facto way to encode zero if self.supportCompactZero: return (), False, False @@ -157,11 +201,15 @@ class BitStringEncoder(AbstractItemEncoder): substrate = alignedValue.asOctets() return int2oct(len(substrate) * 8 - valueLength) + substrate, False, True + if LOG: + LOG('encoding into up to %s-octet chunks' % maxChunkSize) + baseTag = value.tagSet.baseTag # strip off explicit tags if baseTag: tagSet = tag.TagSet(baseTag, baseTag) + else: tagSet = tag.TagSet() @@ -195,44 +243,47 @@ class OctetStringEncoder(AbstractItemEncoder): if not maxChunkSize or len(substrate) <= maxChunkSize: return substrate, False, True - else: + if LOG: + LOG('encoding into up to %s-octet chunks' % maxChunkSize) - # strip off explicit tags for inner chunks + # strip off explicit tags for inner chunks - if asn1Spec is None: - baseTag = value.tagSet.baseTag + if asn1Spec is None: + baseTag = value.tagSet.baseTag - # strip off explicit tags - if baseTag: - tagSet = tag.TagSet(baseTag, baseTag) - else: - tagSet = tag.TagSet() + # strip off explicit tags + if baseTag: + tagSet = tag.TagSet(baseTag, baseTag) - asn1Spec = value.clone(tagSet=tagSet) + else: + tagSet = tag.TagSet() - elif not isOctetsType(value): - baseTag = asn1Spec.tagSet.baseTag + asn1Spec = value.clone(tagSet=tagSet) - # strip off explicit tags - if baseTag: - tagSet = tag.TagSet(baseTag, baseTag) - else: - tagSet = tag.TagSet() + elif not isOctetsType(value): + baseTag = asn1Spec.tagSet.baseTag - asn1Spec = asn1Spec.clone(tagSet=tagSet) + # strip off explicit tags + if baseTag: + tagSet = tag.TagSet(baseTag, baseTag) - pos = 0 - substrate = null + else: + tagSet = tag.TagSet() - while True: - chunk = value[pos:pos + maxChunkSize] - if not chunk: - break + asn1Spec = asn1Spec.clone(tagSet=tagSet) - substrate += encodeFun(chunk, asn1Spec, **options) - pos += maxChunkSize + pos = 0 + substrate = null - return substrate, True, True + while True: + chunk = value[pos:pos + maxChunkSize] + if not chunk: + break + + substrate += encodeFun(chunk, asn1Spec, **options) + pos += maxChunkSize + + return substrate, True, True class NullEncoder(AbstractItemEncoder): @@ -268,8 +319,10 @@ class ObjectIdentifierEncoder(AbstractItemEncoder): oid = (second + 80,) + oid[2:] else: raise error.PyAsn1Error('Impossible first/second arcs at %s' % (value,)) + elif first == 2: oid = (second + 80,) + oid[2:] + else: raise error.PyAsn1Error('Impossible first/second arcs at %s' % (value,)) @@ -280,15 +333,19 @@ class ObjectIdentifierEncoder(AbstractItemEncoder): if 0 <= subOid <= 127: # Optimize for the common case octets += (subOid,) + elif subOid > 127: # Pack large Sub-Object IDs res = (subOid & 0x7f,) subOid >>= 7 + while subOid: res = (0x80 | (subOid & 0x7f),) + res subOid >>= 7 + # Add packed Sub-Object ID to resulted Object ID octets += res + else: raise error.PyAsn1Error('Negative OID arc %s at %s' % (subOid, value)) @@ -304,12 +361,16 @@ class RealEncoder(AbstractItemEncoder): ms, es = 1, 1 if m < 0: ms = -1 # mantissa sign + if e < 0: - es = -1 # exponenta sign + es = -1 # exponent sign + m *= ms + if encbase == 8: m *= 2 ** (abs(e) % 3 * es) e = abs(e) // 3 * es + elif encbase == 16: m *= 2 ** (abs(e) % 4 * es) e = abs(e) // 4 * es @@ -320,6 +381,7 @@ class RealEncoder(AbstractItemEncoder): e -= 1 continue break + return ms, int(m), encbase, e def _chooseEncBase(self, value): @@ -327,23 +389,32 @@ class RealEncoder(AbstractItemEncoder): encBase = [2, 8, 16] if value.binEncBase in encBase: return self._dropFloatingPoint(m, value.binEncBase, e) + elif self.binEncBase in encBase: return self._dropFloatingPoint(m, self.binEncBase, e) - # auto choosing base 2/8/16 + + # auto choosing base 2/8/16 mantissa = [m, m, m] - exponenta = [e, e, e] + exponent = [e, e, e] sign = 1 encbase = 2 e = float('inf') + for i in range(3): (sign, mantissa[i], encBase[i], - exponenta[i]) = self._dropFloatingPoint(mantissa[i], encBase[i], exponenta[i]) - if abs(exponenta[i]) < abs(e) or (abs(exponenta[i]) == abs(e) and mantissa[i] < m): - e = exponenta[i] + exponent[i]) = self._dropFloatingPoint(mantissa[i], encBase[i], exponent[i]) + + if abs(exponent[i]) < abs(e) or (abs(exponent[i]) == abs(e) and mantissa[i] < m): + e = exponent[i] m = int(mantissa[i]) encbase = encBase[i] + + if LOG: + LOG('automatically chosen REAL encoding base %s, sign %s, mantissa %s, ' + 'exponent %s' % (encbase, sign, m, e)) + return sign, m, encbase, e def encodeValue(self, value, asn1Spec, encodeFun, **options): @@ -352,69 +423,98 @@ class RealEncoder(AbstractItemEncoder): if value.isPlusInf: return (0x40,), False, False + if value.isMinusInf: return (0x41,), False, False + m, b, e = value + if not m: return null, False, True + if b == 10: + if LOG: + LOG('encoding REAL into character form') + return str2octs('\x03%dE%s%d' % (m, e == 0 and '+' or '', e)), False, True + elif b == 2: fo = 0x80 # binary encoding ms, m, encbase, e = self._chooseEncBase(value) + if ms < 0: # mantissa sign fo |= 0x40 # sign bit - # exponenta & mantissa normalization + + # exponent & mantissa normalization if encbase == 2: while m & 0x1 == 0: m >>= 1 e += 1 + elif encbase == 8: while m & 0x7 == 0: m >>= 3 e += 1 fo |= 0x10 + else: # encbase = 16 while m & 0xf == 0: m >>= 4 e += 1 fo |= 0x20 + sf = 0 # scale factor + while m & 0x1 == 0: m >>= 1 sf += 1 + if sf > 3: raise error.PyAsn1Error('Scale factor overflow') # bug if raised + fo |= sf << 2 eo = null if e == 0 or e == -1: eo = int2oct(e & 0xff) + else: while e not in (0, -1): eo = int2oct(e & 0xff) + eo e >>= 8 + if e == 0 and eo and oct2int(eo[0]) & 0x80: eo = int2oct(0) + eo + if e == -1 and eo and not (oct2int(eo[0]) & 0x80): eo = int2oct(0xff) + eo + n = len(eo) if n > 0xff: raise error.PyAsn1Error('Real exponent overflow') + if n == 1: pass + elif n == 2: fo |= 1 + elif n == 3: fo |= 2 + else: fo |= 3 eo = int2oct(n & 0xff) + eo + po = null + while m: po = int2oct(m & 0xff) + po m >>= 8 + substrate = int2oct(fo) + eo + po + return substrate, False, True + else: raise error.PyAsn1Error('Prohibited Real base %s' % b) @@ -428,9 +528,18 @@ class SequenceEncoder(AbstractItemEncoder): substrate = null + omitEmptyOptionals = options.get( + 'omitEmptyOptionals', self.omitEmptyOptionals) + + if LOG: + LOG('%sencoding empty OPTIONAL components' % ( + omitEmptyOptionals and 'not ' or '')) + if asn1Spec is None: # instance of ASN.1 schema - value.verifySizeSpec() + inconsistency = value.isInconsistent + if inconsistency: + raise inconsistency namedTypes = value.componentType @@ -439,23 +548,44 @@ class SequenceEncoder(AbstractItemEncoder): namedType = namedTypes[idx] if namedType.isOptional and not component.isValue: - continue + if LOG: + LOG('not encoding OPTIONAL component %r' % (namedType,)) + continue if namedType.isDefaulted and component == namedType.asn1Object: - continue + if LOG: + LOG('not encoding DEFAULT component %r' % (namedType,)) + continue - if self.omitEmptyOptionals: + if omitEmptyOptionals: options.update(ifNotEmpty=namedType.isOptional) - chunk = encodeFun(component, asn1Spec, **options) - # wrap open type blob if needed if namedTypes and namedType.openType: - wrapType = namedType.asn1Object - if wrapType.tagSet and not wrapType.isSameTypeWith(component): - chunk = encodeFun(chunk, wrapType, **options) - substrate += chunk + wrapType = namedType.asn1Object + + if wrapType.typeId in ( + univ.SetOf.typeId, univ.SequenceOf.typeId): + + substrate += encodeFun( + component, asn1Spec, + **dict(options, wrapType=wrapType.componentType)) + + else: + chunk = encodeFun(component, asn1Spec, **options) + + if wrapType.isSameTypeWith(component): + substrate += chunk + + else: + substrate += encodeFun(chunk, wrapType, **options) + + if LOG: + LOG('wrapped with wrap type %r' % (wrapType,)) + + else: + substrate += encodeFun(component, asn1Spec, **options) else: # bare Python value + ASN.1 schema @@ -465,43 +595,87 @@ class SequenceEncoder(AbstractItemEncoder): component = value[namedType.name] except KeyError: - raise error.PyAsn1Error('Component name "%s" not found in %r' % (namedType.name, value)) + raise error.PyAsn1Error('Component name "%s" not found in %r' % ( + namedType.name, value)) if namedType.isOptional and namedType.name not in value: + if LOG: + LOG('not encoding OPTIONAL component %r' % (namedType,)) continue if namedType.isDefaulted and component == namedType.asn1Object: + if LOG: + LOG('not encoding DEFAULT component %r' % (namedType,)) continue - if self.omitEmptyOptionals: + if omitEmptyOptionals: options.update(ifNotEmpty=namedType.isOptional) - chunk = encodeFun(component, asn1Spec[idx], **options) + componentSpec = namedType.asn1Object # wrap open type blob if needed if namedType.openType: - wrapType = namedType.asn1Object - if wrapType.tagSet and not wrapType.isSameTypeWith(component): - chunk = encodeFun(chunk, wrapType, **options) - substrate += chunk + if componentSpec.typeId in ( + univ.SetOf.typeId, univ.SequenceOf.typeId): + + substrate += encodeFun( + component, componentSpec, + **dict(options, wrapType=componentSpec.componentType)) + + else: + chunk = encodeFun(component, componentSpec, **options) + + if componentSpec.isSameTypeWith(component): + substrate += chunk + + else: + substrate += encodeFun(chunk, componentSpec, **options) + + if LOG: + LOG('wrapped with wrap type %r' % (componentSpec,)) + + else: + substrate += encodeFun(component, componentSpec, **options) return substrate, True, True class SequenceOfEncoder(AbstractItemEncoder): - def encodeValue(self, value, asn1Spec, encodeFun, **options): + def _encodeComponents(self, value, asn1Spec, encodeFun, **options): + if asn1Spec is None: - value.verifySizeSpec() + inconsistency = value.isInconsistent + if inconsistency: + raise inconsistency + else: asn1Spec = asn1Spec.componentType - substrate = null + chunks = [] + + wrapType = options.pop('wrapType', None) for idx, component in enumerate(value): - substrate += encodeFun(value[idx], asn1Spec, **options) + chunk = encodeFun(component, asn1Spec, **options) - return substrate, True, True + if (wrapType is not None and + not wrapType.isSameTypeWith(component)): + # wrap encoded value with wrapper container (e.g. ANY) + chunk = encodeFun(chunk, wrapType, **options) + + if LOG: + LOG('wrapped with wrap type %r' % (wrapType,)) + + chunks.append(chunk) + + return chunks + + def encodeValue(self, value, asn1Spec, encodeFun, **options): + chunks = self._encodeComponents( + value, asn1Spec, encodeFun, **options) + + return null.join(chunks), True, True class ChoiceEncoder(AbstractItemEncoder): @@ -620,13 +794,8 @@ class Encoder(object): raise error.PyAsn1Error('Value %r is not ASN.1 type instance ' 'and "asn1Spec" not given' % (value,)) - if debug.logger & debug.flagEncoder: - logger = debug.logger - else: - logger = None - - if logger: - logger('encoder called in %sdef mode, chunk size %s for ' + if LOG: + LOG('encoder called in %sdef mode, chunk size %s for ' 'type %s, value:\n%s' % (not options.get('defMode', True) and 'in' or '', options.get('maxChunkSize', 0), asn1Spec is None and value.prettyPrintType() or asn1Spec.prettyPrintType(), value)) if self.fixedDefLengthMode is not None: @@ -639,8 +808,8 @@ class Encoder(object): try: concreteEncoder = self.__typeMap[typeId] - if logger: - logger('using value codec %s chosen by type ID %s' % (concreteEncoder.__class__.__name__, typeId)) + if LOG: + LOG('using value codec %s chosen by type ID %s' % (concreteEncoder.__class__.__name__, typeId)) except KeyError: if asn1Spec is None: @@ -657,13 +826,13 @@ class Encoder(object): except KeyError: raise error.PyAsn1Error('No encoder for %r (%s)' % (value, tagSet)) - if logger: - logger('using value codec %s chosen by tagSet %s' % (concreteEncoder.__class__.__name__, tagSet)) + if LOG: + LOG('using value codec %s chosen by tagSet %s' % (concreteEncoder.__class__.__name__, tagSet)) substrate = concreteEncoder.encode(value, asn1Spec, self, **options) - if logger: - logger('codec %s built %s octets of substrate: %s\nencoder completed' % (concreteEncoder, len(substrate), debug.hexdump(substrate))) + if LOG: + LOG('codec %s built %s octets of substrate: %s\nencoder completed' % (concreteEncoder, len(substrate), debug.hexdump(substrate))) return substrate @@ -684,7 +853,7 @@ class Encoder(object): #: Optional ASN.1 schema or value object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative #: #: defMode: :py:class:`bool` -#: If `False`, produces indefinite length encoding +#: If :obj:`False`, produces indefinite length encoding #: #: maxChunkSize: :py:class:`int` #: Maximum chunk size in chunked encoding mode (0 denotes unlimited chunk size) @@ -696,7 +865,7 @@ class Encoder(object): #: #: Raises #: ------ -#: :py:class:`~pyasn1.error.PyAsn1Error` +#: ~pyasn1.error.PyAsn1Error #: On encoding errors #: #: Examples diff --git a/server/www/packages/packages-linux/x64/pyasn1/codec/ber/eoo.py b/server/www/packages/packages-linux/x64/pyasn1/codec/ber/eoo.py index d4cd827..48eb859 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/codec/ber/eoo.py +++ b/server/www/packages/packages-linux/x64/pyasn1/codec/ber/eoo.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # from pyasn1.type import base @@ -10,7 +10,7 @@ from pyasn1.type import tag __all__ = ['endOfOctets'] -class EndOfOctets(base.AbstractSimpleAsn1Item): +class EndOfOctets(base.SimpleAsn1Type): defaultValue = 0 tagSet = tag.initTagSet( tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x00) diff --git a/server/www/packages/packages-linux/x64/pyasn1/codec/cer/decoder.py b/server/www/packages/packages-linux/x64/pyasn1/codec/cer/decoder.py index 66572ec..3e86fd0 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/codec/cer/decoder.py +++ b/server/www/packages/packages-linux/x64/pyasn1/codec/cer/decoder.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # from pyasn1 import error @@ -87,7 +87,7 @@ class Decoder(decoder.Decoder): #: #: Raises #: ------ -#: :py:class:`~pyasn1.error.PyAsn1Error` +#: ~pyasn1.error.PyAsn1Error, ~pyasn1.error.SubstrateUnderrunError #: On decoding errors #: #: Examples diff --git a/server/www/packages/packages-linux/x64/pyasn1/codec/cer/encoder.py b/server/www/packages/packages-linux/x64/pyasn1/codec/cer/encoder.py index 768d3c1..935b696 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/codec/cer/encoder.py +++ b/server/www/packages/packages-linux/x64/pyasn1/codec/cer/encoder.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # from pyasn1 import error @@ -31,17 +31,20 @@ class RealEncoder(encoder.RealEncoder): # specialized GeneralStringEncoder here class TimeEncoderMixIn(object): - zchar, = str2octs('Z') - pluschar, = str2octs('+') - minuschar, = str2octs('-') - commachar, = str2octs(',') - minLength = 12 - maxLength = 19 + Z_CHAR = ord('Z') + PLUS_CHAR = ord('+') + MINUS_CHAR = ord('-') + COMMA_CHAR = ord(',') + DOT_CHAR = ord('.') + ZERO_CHAR = ord('0') + + MIN_LENGTH = 12 + MAX_LENGTH = 19 def encodeValue(self, value, asn1Spec, encodeFun, **options): - # Encoding constraints: + # CER encoding constraints: # - minutes are mandatory, seconds are optional - # - subseconds must NOT be zero + # - sub-seconds must NOT be zero / no meaningless zeros # - no hanging fraction dot # - time in UTC (Z) # - only dot is allowed for fractions @@ -49,20 +52,46 @@ class TimeEncoderMixIn(object): if asn1Spec is not None: value = asn1Spec.clone(value) - octets = value.asOctets() + numbers = value.asNumbers() - if not self.minLength < len(octets) < self.maxLength: - raise error.PyAsn1Error('Length constraint violated: %r' % value) + if self.PLUS_CHAR in numbers or self.MINUS_CHAR in numbers: + raise error.PyAsn1Error('Must be UTC time: %r' % value) - if self.pluschar in octets or self.minuschar in octets: - raise error.PyAsn1Error('Must be UTC time: %r' % octets) + if numbers[-1] != self.Z_CHAR: + raise error.PyAsn1Error('Missing "Z" time zone specifier: %r' % value) - if octets[-1] != self.zchar: - raise error.PyAsn1Error('Missing "Z" time zone specifier: %r' % octets) - - if self.commachar in octets: + if self.COMMA_CHAR in numbers: raise error.PyAsn1Error('Comma in fractions disallowed: %r' % value) + if self.DOT_CHAR in numbers: + + isModified = False + + numbers = list(numbers) + + searchIndex = min(numbers.index(self.DOT_CHAR) + 4, len(numbers) - 1) + + while numbers[searchIndex] != self.DOT_CHAR: + if numbers[searchIndex] == self.ZERO_CHAR: + del numbers[searchIndex] + isModified = True + + searchIndex -= 1 + + searchIndex += 1 + + if searchIndex < len(numbers): + if numbers[searchIndex] == self.Z_CHAR: + # drop hanging comma + del numbers[searchIndex - 1] + isModified = True + + if isModified: + value = value.clone(numbers) + + if not self.MIN_LENGTH < len(numbers) < self.MAX_LENGTH: + raise error.PyAsn1Error('Length constraint violated: %r' % value) + options.update(maxChunkSize=1000) return encoder.OctetStringEncoder.encodeValue( @@ -71,13 +100,44 @@ class TimeEncoderMixIn(object): class GeneralizedTimeEncoder(TimeEncoderMixIn, encoder.OctetStringEncoder): - minLength = 12 - maxLength = 19 + MIN_LENGTH = 12 + MAX_LENGTH = 20 class UTCTimeEncoder(TimeEncoderMixIn, encoder.OctetStringEncoder): - minLength = 10 - maxLength = 14 + MIN_LENGTH = 10 + MAX_LENGTH = 14 + + +class SetOfEncoder(encoder.SequenceOfEncoder): + def encodeValue(self, value, asn1Spec, encodeFun, **options): + chunks = self._encodeComponents( + value, asn1Spec, encodeFun, **options) + + # sort by serialised and padded components + if len(chunks) > 1: + zero = str2octs('\x00') + maxLen = max(map(len, chunks)) + paddedChunks = [ + (x.ljust(maxLen, zero), x) for x in chunks + ] + paddedChunks.sort(key=lambda x: x[0]) + + chunks = [x[1] for x in paddedChunks] + + return null.join(chunks), True, True + + +class SequenceOfEncoder(encoder.SequenceOfEncoder): + def encodeValue(self, value, asn1Spec, encodeFun, **options): + + if options.get('ifNotEmpty', False) and not len(value): + return null, True, True + + chunks = self._encodeComponents( + value, asn1Spec, encodeFun, **options) + + return null.join(chunks), True, True class SetEncoder(encoder.SequenceEncoder): @@ -109,7 +169,9 @@ class SetEncoder(encoder.SequenceEncoder): if asn1Spec is None: # instance of ASN.1 schema - value.verifySizeSpec() + inconsistency = value.isInconsistent + if inconsistency: + raise inconsistency namedTypes = value.componentType @@ -168,55 +230,10 @@ class SetEncoder(encoder.SequenceEncoder): return substrate, True, True -class SetOfEncoder(encoder.SequenceOfEncoder): - def encodeValue(self, value, asn1Spec, encodeFun, **options): - if asn1Spec is None: - value.verifySizeSpec() - else: - asn1Spec = asn1Spec.componentType - - components = [encodeFun(x, asn1Spec, **options) - for x in value] - - # sort by serialised and padded components - if len(components) > 1: - zero = str2octs('\x00') - maxLen = max(map(len, components)) - paddedComponents = [ - (x.ljust(maxLen, zero), x) for x in components - ] - paddedComponents.sort(key=lambda x: x[0]) - - components = [x[1] for x in paddedComponents] - - substrate = null.join(components) - - return substrate, True, True - - class SequenceEncoder(encoder.SequenceEncoder): omitEmptyOptionals = True -class SequenceOfEncoder(encoder.SequenceOfEncoder): - def encodeValue(self, value, asn1Spec, encodeFun, **options): - - if options.get('ifNotEmpty', False) and not len(value): - return null, True, True - - if asn1Spec is None: - value.verifySizeSpec() - else: - asn1Spec = asn1Spec.componentType - - substrate = null - - for idx, component in enumerate(value): - substrate += encodeFun(value[idx], asn1Spec, **options) - - return substrate, True, True - - tagMap = encoder.tagMap.copy() tagMap.update({ univ.Boolean.tagSet: BooleanEncoder(), @@ -269,7 +286,7 @@ class Encoder(encoder.Encoder): #: #: Raises #: ------ -#: :py:class:`~pyasn1.error.PyAsn1Error` +#: ~pyasn1.error.PyAsn1Error #: On encoding errors #: #: Examples diff --git a/server/www/packages/packages-linux/x64/pyasn1/codec/der/decoder.py b/server/www/packages/packages-linux/x64/pyasn1/codec/der/decoder.py index f67d025..1a13fdb 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/codec/der/decoder.py +++ b/server/www/packages/packages-linux/x64/pyasn1/codec/der/decoder.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # from pyasn1.codec.cer import decoder @@ -67,7 +67,7 @@ class Decoder(decoder.Decoder): #: #: Raises #: ------ -#: :py:class:`~pyasn1.error.PyAsn1Error` +#: ~pyasn1.error.PyAsn1Error, ~pyasn1.error.SubstrateUnderrunError #: On decoding errors #: #: Examples diff --git a/server/www/packages/packages-linux/x64/pyasn1/codec/der/encoder.py b/server/www/packages/packages-linux/x64/pyasn1/codec/der/encoder.py index 756d9fe..90e982d 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/codec/der/encoder.py +++ b/server/www/packages/packages-linux/x64/pyasn1/codec/der/encoder.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # from pyasn1 import error @@ -82,7 +82,7 @@ class Encoder(encoder.Encoder): #: #: Raises #: ------ -#: :py:class:`~pyasn1.error.PyAsn1Error` +#: ~pyasn1.error.PyAsn1Error #: On encoding errors #: #: Examples diff --git a/server/www/packages/packages-linux/x64/pyasn1/codec/native/decoder.py b/server/www/packages/packages-linux/x64/pyasn1/codec/native/decoder.py index 78fcda6..104b92e 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/codec/native/decoder.py +++ b/server/www/packages/packages-linux/x64/pyasn1/codec/native/decoder.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # from pyasn1 import debug @@ -14,6 +14,8 @@ from pyasn1.type import useful __all__ = ['decode'] +LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_DECODER) + class AbstractScalarDecoder(object): def __call__(self, pyObject, asn1Spec, decodeFun=None, **options): @@ -136,13 +138,10 @@ class Decoder(object): self.__typeMap = typeMap def __call__(self, pyObject, asn1Spec, **options): - if debug.logger & debug.flagDecoder: - logger = debug.logger - else: - logger = None - if logger: + + if LOG: debug.scope.push(type(pyObject).__name__) - logger('decoder called at scope %s, working with type %s' % (debug.scope, type(pyObject).__name__)) + LOG('decoder called at scope %s, working with type %s' % (debug.scope, type(pyObject).__name__)) if asn1Spec is None or not isinstance(asn1Spec, base.Asn1Item): raise error.PyAsn1Error('asn1Spec is not valid (should be an instance of an ASN.1 Item, not %s)' % asn1Spec.__class__.__name__) @@ -159,13 +158,13 @@ class Decoder(object): except KeyError: raise error.PyAsn1Error('Unknown ASN.1 tag %s' % asn1Spec.tagSet) - if logger: - logger('calling decoder %s on Python type %s <%s>' % (type(valueDecoder).__name__, type(pyObject).__name__, repr(pyObject))) + if LOG: + LOG('calling decoder %s on Python type %s <%s>' % (type(valueDecoder).__name__, type(pyObject).__name__, repr(pyObject))) value = valueDecoder(pyObject, asn1Spec, self, **options) - if logger: - logger('decoder %s produced ASN.1 type %s <%s>' % (type(valueDecoder).__name__, type(value).__name__, repr(value))) + if LOG: + LOG('decoder %s produced ASN.1 type %s <%s>' % (type(valueDecoder).__name__, type(value).__name__, repr(value))) debug.scope.pop() return value @@ -196,7 +195,7 @@ class Decoder(object): #: #: Raises #: ------ -#: :py:class:`~pyasn1.error.PyAsn1Error` +#: ~pyasn1.error.PyAsn1Error #: On decoding errors #: #: Examples diff --git a/server/www/packages/packages-linux/x64/pyasn1/codec/native/encoder.py b/server/www/packages/packages-linux/x64/pyasn1/codec/native/encoder.py index 0956191..4318abd 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/codec/native/encoder.py +++ b/server/www/packages/packages-linux/x64/pyasn1/codec/native/encoder.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # try: @@ -20,6 +20,8 @@ from pyasn1.type import useful __all__ = ['encode'] +LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_ENCODER) + class AbstractItemEncoder(object): def encode(self, value, encodeFun, **options): @@ -70,7 +72,9 @@ class SetEncoder(AbstractItemEncoder): protoDict = dict def encode(self, value, encodeFun, **options): - value.verifySizeSpec() + inconsistency = value.isInconsistent + if inconsistency: + raise inconsistency namedTypes = value.componentType substrate = self.protoDict() @@ -88,7 +92,9 @@ class SequenceEncoder(SetEncoder): class SequenceOfEncoder(AbstractItemEncoder): def encode(self, value, encodeFun, **options): - value.verifySizeSpec() + inconsistency = value.isInconsistent + if inconsistency: + raise inconsistency return [encodeFun(x, **options) for x in value] @@ -180,14 +186,9 @@ class Encoder(object): if not isinstance(value, base.Asn1Item): raise error.PyAsn1Error('value is not valid (should be an instance of an ASN.1 Item)') - if debug.logger & debug.flagEncoder: - logger = debug.logger - else: - logger = None - - if logger: + if LOG: debug.scope.push(type(value).__name__) - logger('encoder called for type %s <%s>' % (type(value).__name__, value.prettyPrint())) + LOG('encoder called for type %s <%s>' % (type(value).__name__, value.prettyPrint())) tagSet = value.tagSet @@ -204,13 +205,13 @@ class Encoder(object): except KeyError: raise error.PyAsn1Error('No encoder for %s' % (value,)) - if logger: - logger('using value codec %s chosen by %s' % (concreteEncoder.__class__.__name__, tagSet)) + if LOG: + LOG('using value codec %s chosen by %s' % (concreteEncoder.__class__.__name__, tagSet)) pyObject = concreteEncoder.encode(value, self, **options) - if logger: - logger('encoder %s produced: %s' % (type(concreteEncoder).__name__, repr(pyObject))) + if LOG: + LOG('encoder %s produced: %s' % (type(concreteEncoder).__name__, repr(pyObject))) debug.scope.pop() return pyObject @@ -238,7 +239,7 @@ class Encoder(object): #: #: Raises #: ------ -#: :py:class:`~pyasn1.error.PyAsn1Error` +#: ~pyasn1.error.PyAsn1Error #: On encoding errors #: #: Examples diff --git a/server/www/packages/packages-linux/x64/pyasn1/compat/binary.py b/server/www/packages/packages-linux/x64/pyasn1/compat/binary.py index c38a650..addbdc9 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/compat/binary.py +++ b/server/www/packages/packages-linux/x64/pyasn1/compat/binary.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # from sys import version_info diff --git a/server/www/packages/packages-linux/x64/pyasn1/compat/calling.py b/server/www/packages/packages-linux/x64/pyasn1/compat/calling.py index c60b50d..778a3d1 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/compat/calling.py +++ b/server/www/packages/packages-linux/x64/pyasn1/compat/calling.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # from sys import version_info diff --git a/server/www/packages/packages-linux/x64/pyasn1/compat/dateandtime.py b/server/www/packages/packages-linux/x64/pyasn1/compat/dateandtime.py index 27526ad..5e471bf 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/compat/dateandtime.py +++ b/server/www/packages/packages-linux/x64/pyasn1/compat/dateandtime.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # import time diff --git a/server/www/packages/packages-linux/x64/pyasn1/compat/integer.py b/server/www/packages/packages-linux/x64/pyasn1/compat/integer.py index bb3d099..4b31791 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/compat/integer.py +++ b/server/www/packages/packages-linux/x64/pyasn1/compat/integer.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # import sys diff --git a/server/www/packages/packages-linux/x64/pyasn1/compat/octets.py b/server/www/packages/packages-linux/x64/pyasn1/compat/octets.py index a06db5d..99d23bb 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/compat/octets.py +++ b/server/www/packages/packages-linux/x64/pyasn1/compat/octets.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # from sys import version_info diff --git a/server/www/packages/packages-linux/x64/pyasn1/compat/string.py b/server/www/packages/packages-linux/x64/pyasn1/compat/string.py index 4d8a045..b9bc8c3 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/compat/string.py +++ b/server/www/packages/packages-linux/x64/pyasn1/compat/string.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # from sys import version_info diff --git a/server/www/packages/packages-linux/x64/pyasn1/debug.py b/server/www/packages/packages-linux/x64/pyasn1/debug.py index ab72fa8..8707aa8 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/debug.py +++ b/server/www/packages/packages-linux/x64/pyasn1/debug.py @@ -1,10 +1,11 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # import logging +import sys from pyasn1 import __version__ from pyasn1 import error @@ -12,18 +13,20 @@ from pyasn1.compat.octets import octs2ints __all__ = ['Debug', 'setLogger', 'hexdump'] -flagNone = 0x0000 -flagEncoder = 0x0001 -flagDecoder = 0x0002 -flagAll = 0xffff +DEBUG_NONE = 0x0000 +DEBUG_ENCODER = 0x0001 +DEBUG_DECODER = 0x0002 +DEBUG_ALL = 0xffff -flagMap = { - 'none': flagNone, - 'encoder': flagEncoder, - 'decoder': flagDecoder, - 'all': flagAll +FLAG_MAP = { + 'none': DEBUG_NONE, + 'encoder': DEBUG_ENCODER, + 'decoder': DEBUG_DECODER, + 'all': DEBUG_ALL } +LOGGEE_MAP = {} + class Printer(object): # noinspection PyShadowingNames @@ -66,7 +69,7 @@ class Debug(object): defaultPrinter = Printer() def __init__(self, *flags, **options): - self._flags = flagNone + self._flags = DEBUG_NONE if 'loggerName' in options: # route our logs to parent logger @@ -89,9 +92,9 @@ class Debug(object): flag = flag[1:] try: if inverse: - self._flags &= ~flagMap[flag] + self._flags &= ~FLAG_MAP[flag] else: - self._flags |= flagMap[flag] + self._flags |= FLAG_MAP[flag] except KeyError: raise error.PyAsn1Error('bad debug flag %s' % flag) @@ -109,17 +112,26 @@ class Debug(object): def __rand__(self, flag): return flag & self._flags - -logger = 0 +_LOG = DEBUG_NONE def setLogger(userLogger): - global logger + global _LOG if userLogger: - logger = userLogger + _LOG = userLogger else: - logger = 0 + _LOG = DEBUG_NONE + + # Update registered logging clients + for module, (name, flags) in LOGGEE_MAP.items(): + setattr(module, name, _LOG & flags and _LOG or DEBUG_NONE) + + +def registerLoggee(module, name='LOG', flags=DEBUG_NONE): + LOGGEE_MAP[sys.modules[module]] = name, flags + setLogger(_LOG) + return _LOG def hexdump(octets): diff --git a/server/www/packages/packages-linux/x64/pyasn1/error.py b/server/www/packages/packages-linux/x64/pyasn1/error.py index c05e65c..4f48db2 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/error.py +++ b/server/www/packages/packages-linux/x64/pyasn1/error.py @@ -1,29 +1,75 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # class PyAsn1Error(Exception): - """Create pyasn1 exception object + """Base pyasn1 exception - The `PyAsn1Error` exception represents generic, usually fatal, error. + `PyAsn1Error` is the base exception class (based on + :class:`Exception`) that represents all possible ASN.1 related + errors. """ class ValueConstraintError(PyAsn1Error): - """Create pyasn1 exception object + """ASN.1 type constraints violation exception The `ValueConstraintError` exception indicates an ASN.1 value constraint violation. + + It might happen on value object instantiation (for scalar types) or on + serialization (for constructed types). """ class SubstrateUnderrunError(PyAsn1Error): - """Create pyasn1 exception object + """ASN.1 data structure deserialization error The `SubstrateUnderrunError` exception indicates insufficient serialised - data on input of a deserialisation routine. + data on input of a de-serialization codec. """ + + +class PyAsn1UnicodeError(PyAsn1Error, UnicodeError): + """Unicode text processing error + + The `PyAsn1UnicodeError` exception is a base class for errors relating to + unicode text de/serialization. + + Apart from inheriting from :class:`PyAsn1Error`, it also inherits from + :class:`UnicodeError` to help the caller catching unicode-related errors. + """ + def __init__(self, message, unicode_error=None): + if isinstance(unicode_error, UnicodeError): + UnicodeError.__init__(self, *unicode_error.args) + PyAsn1Error.__init__(self, message) + + +class PyAsn1UnicodeDecodeError(PyAsn1UnicodeError, UnicodeDecodeError): + """Unicode text decoding error + + The `PyAsn1UnicodeDecodeError` exception represents a failure to + deserialize unicode text. + + Apart from inheriting from :class:`PyAsn1UnicodeError`, it also inherits + from :class:`UnicodeDecodeError` to help the caller catching unicode-related + errors. + """ + + +class PyAsn1UnicodeEncodeError(PyAsn1UnicodeError, UnicodeEncodeError): + """Unicode text encoding error + + The `PyAsn1UnicodeEncodeError` exception represents a failure to + serialize unicode text. + + Apart from inheriting from :class:`PyAsn1UnicodeError`, it also inherits + from :class:`UnicodeEncodeError` to help the caller catching + unicode-related errors. + """ + + diff --git a/server/www/packages/packages-linux/x64/pyasn1/type/base.py b/server/www/packages/packages-linux/x64/pyasn1/type/base.py index adaab22..994f1c9 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/type/base.py +++ b/server/www/packages/packages-linux/x64/pyasn1/type/base.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # import sys @@ -12,7 +12,8 @@ from pyasn1.type import constraint from pyasn1.type import tag from pyasn1.type import tagmap -__all__ = ['Asn1Item', 'Asn1ItemBase', 'AbstractSimpleAsn1Item', 'AbstractConstructedAsn1Item'] +__all__ = ['Asn1Item', 'Asn1Type', 'SimpleAsn1Type', + 'ConstructedAsn1Type'] class Asn1Item(object): @@ -25,7 +26,17 @@ class Asn1Item(object): return Asn1Item._typeCounter -class Asn1ItemBase(Asn1Item): +class Asn1Type(Asn1Item): + """Base class for all classes representing ASN.1 types. + + In the user code, |ASN.1| class is normally used only for telling + ASN.1 objects from others. + + Note + ---- + For as long as ASN.1 is concerned, a way to compare ASN.1 types + is to use :meth:`isSameTypeWith` and :meth:`isSuperTypeOf` methods. + """ #: Set or return a :py:class:`~pyasn1.type.tag.TagSet` object representing #: ASN.1 tag(s) associated with |ASN.1| type. tagSet = tag.TagSet() @@ -91,8 +102,8 @@ class Asn1ItemBase(Asn1Item): Returns ------- : :class:`bool` - :class:`True` if *other* is |ASN.1| type, - :class:`False` otherwise. + :obj:`True` if *other* is |ASN.1| type, + :obj:`False` otherwise. """ return (self is other or (not matchTags or self.tagSet == other.tagSet) and @@ -115,8 +126,8 @@ class Asn1ItemBase(Asn1Item): Returns ------- : :class:`bool` - :class:`True` if *other* is a subtype of |ASN.1| type, - :class:`False` otherwise. + :obj:`True` if *other* is a subtype of |ASN.1| type, + :obj:`False` otherwise. """ return (not matchTags or (self.tagSet.isSuperTagSetOf(other.tagSet)) and @@ -146,9 +157,13 @@ class Asn1ItemBase(Asn1Item): def getSubtypeSpec(self): return self.subtypeSpec + # backward compatibility def hasValue(self): return self.isValue +# Backward compatibility +Asn1ItemBase = Asn1Type + class NoValue(object): """Create a singleton instance of NoValue class. @@ -221,19 +236,31 @@ class NoValue(object): raise error.PyAsn1Error('Attempted "%s" operation on ASN.1 schema object' % attr) def __repr__(self): - return '<%s object at 0x%x>' % (self.__class__.__name__, id(self)) + return '<%s object>' % self.__class__.__name__ noValue = NoValue() -# Base class for "simple" ASN.1 objects. These are immutable. -class AbstractSimpleAsn1Item(Asn1ItemBase): +class SimpleAsn1Type(Asn1Type): + """Base class for all simple classes representing ASN.1 types. + + ASN.1 distinguishes types by their ability to hold other objects. + Scalar types are known as *simple* in ASN.1. + + In the user code, |ASN.1| class is normally used only for telling + ASN.1 objects from others. + + Note + ---- + For as long as ASN.1 is concerned, a way to compare ASN.1 types + is to use :meth:`isSameTypeWith` and :meth:`isSuperTypeOf` methods. + """ #: Default payload value defaultValue = noValue def __init__(self, value=noValue, **kwargs): - Asn1ItemBase.__init__(self, **kwargs) + Asn1Type.__init__(self, **kwargs) if value is noValue: value = self.defaultValue else: @@ -248,19 +275,18 @@ class AbstractSimpleAsn1Item(Asn1ItemBase): self._value = value def __repr__(self): - representation = '%s %s object at 0x%x' % ( - self.__class__.__name__, self.isValue and 'value' or 'schema', id(self) - ) + representation = '%s %s object' % ( + self.__class__.__name__, self.isValue and 'value' or 'schema') for attr, value in self.readOnly.items(): if value: - representation += ' %s %s' % (attr, value) + representation += ', %s %s' % (attr, value) if self.isValue: value = self.prettyPrint() if len(value) > 32: value = value[:16] + '...' + value[-16:] - representation += ' payload [%s]' % value + representation += ', payload [%s]' % value return '<%s>' % representation @@ -296,17 +322,18 @@ class AbstractSimpleAsn1Item(Asn1ItemBase): def isValue(self): """Indicate that |ASN.1| object represents ASN.1 value. - If *isValue* is `False` then this object represents just ASN.1 schema. + If *isValue* is :obj:`False` then this object represents just + ASN.1 schema. - If *isValue* is `True` then, in addition to its ASN.1 schema features, - this object can also be used like a Python built-in object (e.g. `int`, - `str`, `dict` etc.). + If *isValue* is :obj:`True` then, in addition to its ASN.1 schema + features, this object can also be used like a Python built-in object + (e.g. :class:`int`, :class:`str`, :class:`dict` etc.). Returns ------- : :class:`bool` - :class:`False` if object represents just ASN.1 schema. - :class:`True` if object represents ASN.1 schema and can be used as a normal value. + :obj:`False` if object represents just ASN.1 schema. + :obj:`True` if object represents ASN.1 schema and can be used as a normal value. Note ---- @@ -343,10 +370,10 @@ class AbstractSimpleAsn1Item(Asn1ItemBase): value = self._value - initilaizers = self.readOnly.copy() - initilaizers.update(kwargs) + initializers = self.readOnly.copy() + initializers.update(kwargs) - return self.__class__(value, **initilaizers) + return self.__class__(value, **initializers) def subtype(self, value=noValue, **kwargs): """Create a specialization of |ASN.1| schema or value object. @@ -425,10 +452,12 @@ class AbstractSimpleAsn1Item(Asn1ItemBase): def prettyPrint(self, scope=0): return self.prettyOut(self._value) - # noinspection PyUnusedLocal def prettyPrintType(self, scope=0): return '%s -> %s' % (self.tagSet, self.__class__.__name__) +# Backward compatibility +AbstractSimpleAsn1Item = SimpleAsn1Type + # # Constructed types: # * There are five of them: Sequence, SequenceOf/SetOf, Set and Choice @@ -449,67 +478,102 @@ class AbstractSimpleAsn1Item(Asn1ItemBase): # -class AbstractConstructedAsn1Item(Asn1ItemBase): +class ConstructedAsn1Type(Asn1Type): + """Base class for all constructed classes representing ASN.1 types. - #: If `True`, requires exact component type matching, + ASN.1 distinguishes types by their ability to hold other objects. + Those "nesting" types are known as *constructed* in ASN.1. + + In the user code, |ASN.1| class is normally used only for telling + ASN.1 objects from others. + + Note + ---- + For as long as ASN.1 is concerned, a way to compare ASN.1 types + is to use :meth:`isSameTypeWith` and :meth:`isSuperTypeOf` methods. + """ + + #: If :obj:`True`, requires exact component type matching, #: otherwise subtype relation is only enforced strictConstraints = False componentType = None - sizeSpec = None + + # backward compatibility, unused + sizeSpec = constraint.ConstraintsIntersection() def __init__(self, **kwargs): readOnly = { 'componentType': self.componentType, + # backward compatibility, unused 'sizeSpec': self.sizeSpec } + + # backward compatibility: preserve legacy sizeSpec support + kwargs = self._moveSizeSpec(**kwargs) + readOnly.update(kwargs) - Asn1ItemBase.__init__(self, **readOnly) + Asn1Type.__init__(self, **readOnly) - self._componentValues = [] + def _moveSizeSpec(self, **kwargs): + # backward compatibility, unused + sizeSpec = kwargs.pop('sizeSpec', self.sizeSpec) + if sizeSpec: + subtypeSpec = kwargs.pop('subtypeSpec', self.subtypeSpec) + if subtypeSpec: + subtypeSpec = sizeSpec + + else: + subtypeSpec += sizeSpec + + kwargs['subtypeSpec'] = subtypeSpec + + return kwargs def __repr__(self): - representation = '%s %s object at 0x%x' % ( - self.__class__.__name__, self.isValue and 'value' or 'schema', id(self) + representation = '%s %s object' % ( + self.__class__.__name__, self.isValue and 'value' or 'schema' ) for attr, value in self.readOnly.items(): if value is not noValue: - representation += ' %s=%r' % (attr, value) + representation += ', %s=%r' % (attr, value) - if self.isValue and self._componentValues: - representation += ' payload [%s]' % ', '.join([repr(x) for x in self._componentValues]) + if self.isValue and self.components: + representation += ', payload [%s]' % ', '.join( + [repr(x) for x in self.components]) return '<%s>' % representation def __eq__(self, other): - return self is other and True or self._componentValues == other + return self is other or self.components == other def __ne__(self, other): - return self._componentValues != other + return self.components != other def __lt__(self, other): - return self._componentValues < other + return self.components < other def __le__(self, other): - return self._componentValues <= other + return self.components <= other def __gt__(self, other): - return self._componentValues > other + return self.components > other def __ge__(self, other): - return self._componentValues >= other + return self.components >= other if sys.version_info[0] <= 2: def __nonzero__(self): - return self._componentValues and True or False + return bool(self.components) else: def __bool__(self): - return self._componentValues and True or False + return bool(self.components) - def __len__(self): - return len(self._componentValues) + @property + def components(self): + raise error.PyAsn1Error('Method not implemented') def _cloneComponentValues(self, myClone, cloneValueFlag): pass @@ -535,15 +599,14 @@ class AbstractConstructedAsn1Item(Asn1ItemBase): Note ---- Due to the mutable nature of the |ASN.1| object, even if no arguments - are supplied, new |ASN.1| object will always be created as a shallow - copy of `self`. + are supplied, a new |ASN.1| object will be created and returned. """ cloneValueFlag = kwargs.pop('cloneValueFlag', False) - initilaizers = self.readOnly.copy() - initilaizers.update(kwargs) + initializers = self.readOnly.copy() + initializers.update(kwargs) - clone = self.__class__(**initilaizers) + clone = self.__class__(**initializers) if cloneValueFlag: self._cloneComponentValues(clone, cloneValueFlag) @@ -588,9 +651,8 @@ class AbstractConstructedAsn1Item(Asn1ItemBase): Note ---- - Due to the immutable nature of the |ASN.1| object, if no arguments - are supplied, no new |ASN.1| object will be created and `self` will - be returned instead. + Due to the mutable nature of the |ASN.1| object, even if no arguments + are supplied, a new |ASN.1| object will be created and returned. """ initializers = self.readOnly.copy() @@ -615,9 +677,6 @@ class AbstractConstructedAsn1Item(Asn1ItemBase): return clone - def verifySizeSpec(self): - self.sizeSpec(self) - def getComponentByPosition(self, idx): raise error.PyAsn1Error('Method not implemented') @@ -631,9 +690,6 @@ class AbstractConstructedAsn1Item(Asn1ItemBase): self[k] = kwargs[k] return self - def clear(self): - self._componentValues = [] - # backward compatibility def setDefaultComponents(self): @@ -641,3 +697,11 @@ class AbstractConstructedAsn1Item(Asn1ItemBase): def getComponentType(self): return self.componentType + + # backward compatibility, unused + def verifySizeSpec(self): + self.subtypeSpec(self) + + + # Backward compatibility +AbstractConstructedAsn1Item = ConstructedAsn1Type diff --git a/server/www/packages/packages-linux/x64/pyasn1/type/char.py b/server/www/packages/packages-linux/x64/pyasn1/type/char.py index 493badb..06074da 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/type/char.py +++ b/server/www/packages/packages-linux/x64/pyasn1/type/char.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # import sys @@ -21,21 +21,27 @@ noValue = univ.noValue class AbstractCharacterString(univ.OctetString): """Creates |ASN.1| schema or value object. - |ASN.1| objects are immutable and duck-type Python 2 :class:`unicode` or Python 3 :class:`str`. - When used in octet-stream context, |ASN.1| type assumes "|encoding|" encoding. + |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, + its objects are immutable and duck-type Python 2 :class:`str` or Python 3 + :class:`bytes`. When used in octet-stream context, |ASN.1| type assumes + "|encoding|" encoding. Keyword Args ------------ value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object - unicode object (Python 2) or string (Python 3), alternatively string - (Python 2) or bytes (Python 3) representing octet-stream of serialised - unicode string (note `encoding` parameter) or |ASN.1| class instance. + :class:`unicode` object (Python 2) or :class:`str` (Python 3), + alternatively :class:`str` (Python 2) or :class:`bytes` (Python 3) + representing octet-stream of serialised unicode string + (note `encoding` parameter) or |ASN.1| class instance. + If `value` is not given, schema object will be created. tagSet: :py:class:`~pyasn1.type.tag.TagSet` Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - Object representing non-default ASN.1 subtype constraint(s) + Object representing non-default ASN.1 subtype constraint(s). Constraints + verification for |ASN.1| type occurs automatically on object + instantiation. encoding: :py:class:`str` Unicode codec ID to encode/decode :class:`unicode` (Python 2) or @@ -44,7 +50,7 @@ class AbstractCharacterString(univ.OctetString): Raises ------ - :py:class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error On constraint violation or bad initializer. """ @@ -55,8 +61,10 @@ class AbstractCharacterString(univ.OctetString): return self._value.encode(self.encoding) except UnicodeEncodeError: - raise error.PyAsn1Error( - "Can't encode string '%s' with codec %s" % (self._value, self.encoding) + exc = sys.exc_info()[1] + raise error.PyAsn1UnicodeEncodeError( + "Can't encode string '%s' with codec " + "%s" % (self._value, self.encoding), exc ) def __unicode__(self): @@ -76,8 +84,10 @@ class AbstractCharacterString(univ.OctetString): return unicode(value) except (UnicodeDecodeError, LookupError): - raise error.PyAsn1Error( - "Can't decode string '%s' with codec %s" % (value, self.encoding) + exc = sys.exc_info()[1] + raise error.PyAsn1UnicodeDecodeError( + "Can't decode string '%s' with codec " + "%s" % (value, self.encoding), exc ) def asOctets(self, padding=True): @@ -95,8 +105,10 @@ class AbstractCharacterString(univ.OctetString): try: return self._value.encode(self.encoding) except UnicodeEncodeError: - raise error.PyAsn1Error( - "Can't encode string '%s' with codec %s" % (self._value, self.encoding) + exc = sys.exc_info()[1] + raise error.PyAsn1UnicodeEncodeError( + "Can't encode string '%s' with codec " + "%s" % (self._value, self.encoding), exc ) def prettyIn(self, value): @@ -113,8 +125,10 @@ class AbstractCharacterString(univ.OctetString): return str(value) except (UnicodeDecodeError, LookupError): - raise error.PyAsn1Error( - "Can't decode string '%s' with codec %s" % (value, self.encoding) + exc = sys.exc_info()[1] + raise error.PyAsn1UnicodeDecodeError( + "Can't decode string '%s' with codec " + "%s" % (value, self.encoding), exc ) def asOctets(self, padding=True): diff --git a/server/www/packages/packages-linux/x64/pyasn1/type/constraint.py b/server/www/packages/packages-linux/x64/pyasn1/type/constraint.py index a704331..8f152e9 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/type/constraint.py +++ b/server/www/packages/packages-linux/x64/pyasn1/type/constraint.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # # Original concept and code by Mike C. Fletcher. @@ -37,10 +37,11 @@ class AbstractConstraint(object): ) def __repr__(self): - representation = '%s object at 0x%x' % (self.__class__.__name__, id(self)) + representation = '%s object' % (self.__class__.__name__) if self._values: - representation += ' consts %s' % ', '.join([repr(x) for x in self._values]) + representation += ', consts %s' % ', '.join( + [repr(x) for x in self._values]) return '<%s>' % representation @@ -102,12 +103,17 @@ class SingleValueConstraint(AbstractConstraint): The SingleValueConstraint satisfies any value that is present in the set of permitted values. + Objects of this type are iterable (emitting constraint values) and + can act as operands for some arithmetic operations e.g. addition + and subtraction. The latter can be used for combining multiple + SingleValueConstraint objects into one. + The SingleValueConstraint object can be applied to any ASN.1 type. Parameters ---------- - \*values: :class:`int` + *values: :class:`int` Full set of values permitted by this constraint object. Examples @@ -136,6 +142,23 @@ class SingleValueConstraint(AbstractConstraint): if value not in self._set: raise error.ValueConstraintError(value) + # Constrains can be merged or reduced + + def __contains__(self, item): + return item in self._set + + def __iter__(self): + return iter(self._set) + + def __sub__(self, constraint): + return self.__class__(*(self._set.difference(constraint))) + + def __add__(self, constraint): + return self.__class__(*(self._set.union(constraint))) + + def __sub__(self, constraint): + return self.__class__(*(self._set.difference(constraint))) + class ContainedSubtypeConstraint(AbstractConstraint): """Create a ContainedSubtypeConstraint object. @@ -149,7 +172,7 @@ class ContainedSubtypeConstraint(AbstractConstraint): Parameters ---------- - \*values: + *values: Full set of values and constraint objects permitted by this constraint object. @@ -304,17 +327,21 @@ class PermittedAlphabetConstraint(SingleValueConstraint): string for as long as all its characters are present in the set of permitted characters. + Objects of this type are iterable (emitting constraint values) and + can act as operands for some arithmetic operations e.g. addition + and subtraction. + The PermittedAlphabetConstraint object can only be applied to the :ref:`character ASN.1 types ` such as :class:`~pyasn1.type.char.IA5String`. Parameters ---------- - \*alphabet: :class:`str` + *alphabet: :class:`str` Full set of characters permitted by this constraint object. - Examples - -------- + Example + ------- .. code-block:: python class BooleanValue(IA5String): @@ -331,6 +358,42 @@ class PermittedAlphabetConstraint(SingleValueConstraint): # this will raise ValueConstraintError garbage = BooleanValue('TAF') + + ASN.1 `FROM ... EXCEPT ...` clause can be modelled by combining multiple + PermittedAlphabetConstraint objects into one: + + Example + ------- + .. code-block:: python + + class Lipogramme(IA5String): + ''' + ASN.1 specification: + + Lipogramme ::= + IA5String (FROM (ALL EXCEPT ("e"|"E"))) + ''' + subtypeSpec = ( + PermittedAlphabetConstraint(*string.printable) - + PermittedAlphabetConstraint('e', 'E') + ) + + # this will succeed + lipogramme = Lipogramme('A work of fiction?') + + # this will raise ValueConstraintError + lipogramme = Lipogramme('Eel') + + Note + ---- + Although `ConstraintsExclusion` object could seemingly be used for this + purpose, practically, for it to work, it needs to represent its operand + constraints as sets and intersect one with the other. That would require + the insight into the constraint values (and their types) that are otherwise + hidden inside the constraint object. + + Therefore it's more practical to model `EXCEPT` clause at + `PermittedAlphabetConstraint` level instead. """ def _setValues(self, values): self._values = values @@ -341,6 +404,151 @@ class PermittedAlphabetConstraint(SingleValueConstraint): raise error.ValueConstraintError(value) +class ComponentPresentConstraint(AbstractConstraint): + """Create a ComponentPresentConstraint object. + + The ComponentPresentConstraint is only satisfied when the value + is not `None`. + + The ComponentPresentConstraint object is typically used with + `WithComponentsConstraint`. + + Examples + -------- + .. code-block:: python + + present = ComponentPresentConstraint() + + # this will succeed + present('whatever') + + # this will raise ValueConstraintError + present(None) + """ + def _setValues(self, values): + self._values = ('',) + + if values: + raise error.PyAsn1Error('No arguments expected') + + def _testValue(self, value, idx): + if value is None: + raise error.ValueConstraintError( + 'Component is not present:') + + +class ComponentAbsentConstraint(AbstractConstraint): + """Create a ComponentAbsentConstraint object. + + The ComponentAbsentConstraint is only satisfied when the value + is `None`. + + The ComponentAbsentConstraint object is typically used with + `WithComponentsConstraint`. + + Examples + -------- + .. code-block:: python + + absent = ComponentAbsentConstraint() + + # this will succeed + absent(None) + + # this will raise ValueConstraintError + absent('whatever') + """ + def _setValues(self, values): + self._values = ('',) + + if values: + raise error.PyAsn1Error('No arguments expected') + + def _testValue(self, value, idx): + if value is not None: + raise error.ValueConstraintError( + 'Component is not absent: %r' % value) + + +class WithComponentsConstraint(AbstractConstraint): + """Create a WithComponentsConstraint object. + + The `WithComponentsConstraint` satisfies any mapping object that has + constrained fields present or absent, what is indicated by + `ComponentPresentConstraint` and `ComponentAbsentConstraint` + objects respectively. + + The `WithComponentsConstraint` object is typically applied + to :class:`~pyasn1.type.univ.Set` or + :class:`~pyasn1.type.univ.Sequence` types. + + Parameters + ---------- + *fields: :class:`tuple` + Zero or more tuples of (`field`, `constraint`) indicating constrained + fields. + + Notes + ----- + On top of the primary use of `WithComponentsConstraint` (ensuring presence + or absence of particular components of a :class:`~pyasn1.type.univ.Set` or + :class:`~pyasn1.type.univ.Sequence`), it is also possible to pass any other + constraint objects or their combinations. In case of scalar fields, these + constraints will be verified in addition to the constraints belonging to + scalar components themselves. However, formally, these additional + constraints do not change the type of these ASN.1 objects. + + Examples + -------- + + .. code-block:: python + + class Item(Sequence): # Set is similar + ''' + ASN.1 specification: + + Item ::= SEQUENCE { + id INTEGER OPTIONAL, + name OCTET STRING OPTIONAL + } WITH COMPONENTS id PRESENT, name ABSENT | id ABSENT, name PRESENT + ''' + componentType = NamedTypes( + OptionalNamedType('id', Integer()), + OptionalNamedType('name', OctetString()) + ) + withComponents = ConstraintsUnion( + WithComponentsConstraint( + ('id', ComponentPresentConstraint()), + ('name', ComponentAbsentConstraint()) + ), + WithComponentsConstraint( + ('id', ComponentAbsentConstraint()), + ('name', ComponentPresentConstraint()) + ) + ) + + item = Item() + + # This will succeed + item['id'] = 1 + + # This will succeed + item.reset() + item['name'] = 'John' + + # This will fail (on encoding) + item.reset() + descr['id'] = 1 + descr['name'] = 'John' + """ + def _testValue(self, value, idx): + for field, constraint in self._values: + constraint(value.get(field)) + + def _setValues(self, values): + AbstractConstraint._setValues(self, values) + + # This is a bit kludgy, meaning two op modes within a single constraint class InnerTypeConstraint(AbstractConstraint): """Value must satisfy the type and presence constraints""" @@ -352,7 +560,7 @@ class InnerTypeConstraint(AbstractConstraint): if idx not in self.__multipleTypeConstraint: raise error.ValueConstraintError(value) constraint, status = self.__multipleTypeConstraint[idx] - if status == 'ABSENT': # XXX presense is not checked! + if status == 'ABSENT': # XXX presence is not checked! raise error.ValueConstraintError(value) constraint(value) @@ -380,49 +588,41 @@ class ConstraintsExclusion(AbstractConstraint): Parameters ---------- - constraint: - Constraint or logic operator object. + *constraints: + Constraint or logic operator objects. Examples -------- .. code-block:: python - class Lipogramme(IA5STRING): - ''' - ASN.1 specification: - - Lipogramme ::= - IA5String (FROM (ALL EXCEPT ("e"|"E"))) - ''' + class LuckyNumber(Integer): subtypeSpec = ConstraintsExclusion( - PermittedAlphabetConstraint('e', 'E') + SingleValueConstraint(13) ) # this will succeed - lipogramme = Lipogramme('A work of fiction?') + luckyNumber = LuckyNumber(12) # this will raise ValueConstraintError - lipogramme = Lipogramme('Eel') + luckyNumber = LuckyNumber(13) - Warning - ------- - The above example involving PermittedAlphabetConstraint might - not work due to the way how PermittedAlphabetConstraint works. - The other constraints might work with ConstraintsExclusion - though. + Note + ---- + The `FROM ... EXCEPT ...` ASN.1 clause should be modeled by combining + constraint objects into one. See `PermittedAlphabetConstraint` for more + information. """ def _testValue(self, value, idx): - try: - self._values[0](value, idx) - except error.ValueConstraintError: - return - else: + for constraint in self._values: + try: + constraint(value, idx) + + except error.ValueConstraintError: + continue + raise error.ValueConstraintError(value) def _setValues(self, values): - if len(values) != 1: - raise error.PyAsn1Error('Single constraint expected') - AbstractConstraint._setValues(self, values) @@ -467,7 +667,7 @@ class ConstraintsIntersection(AbstractConstraintSet): Parameters ---------- - \*constraints: + *constraints: Constraint or logic operator objects. Examples @@ -500,8 +700,8 @@ class ConstraintsIntersection(AbstractConstraintSet): class ConstraintsUnion(AbstractConstraintSet): """Create a ConstraintsUnion logic operator object. - The ConstraintsUnion logic operator only succeeds if - *at least a single* operand succeeds. + The ConstraintsUnion logic operator succeeds if + *at least* a single operand succeeds. The ConstraintsUnion object can be applied to any constraint and logic operator objects. @@ -511,7 +711,7 @@ class ConstraintsUnion(AbstractConstraintSet): Parameters ---------- - \*constraints: + *constraints: Constraint or logic operator objects. Examples @@ -525,7 +725,7 @@ class ConstraintsUnion(AbstractConstraintSet): CapitalOrSmall ::= IA5String (FROM ("A".."Z") | FROM ("a".."z")) ''' - subtypeSpec = ConstraintsIntersection( + subtypeSpec = ConstraintsUnion( PermittedAlphabetConstraint('A', 'Z'), PermittedAlphabetConstraint('a', 'z') ) diff --git a/server/www/packages/packages-linux/x64/pyasn1/type/error.py b/server/www/packages/packages-linux/x64/pyasn1/type/error.py index b2056bd..80fcf3b 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/type/error.py +++ b/server/www/packages/packages-linux/x64/pyasn1/type/error.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # from pyasn1.error import PyAsn1Error diff --git a/server/www/packages/packages-linux/x64/pyasn1/type/namedtype.py b/server/www/packages/packages-linux/x64/pyasn1/type/namedtype.py index f162d19..cbc1429 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/type/namedtype.py +++ b/server/www/packages/packages-linux/x64/pyasn1/type/namedtype.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # import sys @@ -49,9 +49,10 @@ class NamedType(object): representation = '%s=%r' % (self.name, self.asn1Object) if self.openType: - representation += ' openType: %r' % self.openType + representation += ', open type %r' % self.openType - return '<%s object at 0x%x type %s>' % (self.__class__.__name__, id(self), representation) + return '<%s object, type %s>' % ( + self.__class__.__name__, representation) def __eq__(self, other): return self.__nameAndType == other @@ -173,7 +174,8 @@ class NamedTypes(object): def __repr__(self): representation = ', '.join(['%r' % x for x in self.__namedTypes]) - return '<%s object at 0x%x types %s>' % (self.__class__.__name__, id(self), representation) + return '<%s object, types %s>' % ( + self.__class__.__name__, representation) def __eq__(self, other): return self.__namedTypes == other @@ -265,18 +267,18 @@ class NamedTypes(object): return nameToPosMap def __computeAmbiguousTypes(self): - ambigiousTypes = {} - partialAmbigiousTypes = () + ambiguousTypes = {} + partialAmbiguousTypes = () for idx, namedType in reversed(tuple(enumerate(self.__namedTypes))): if namedType.isOptional or namedType.isDefaulted: - partialAmbigiousTypes = (namedType,) + partialAmbigiousTypes + partialAmbiguousTypes = (namedType,) + partialAmbiguousTypes else: - partialAmbigiousTypes = (namedType,) - if len(partialAmbigiousTypes) == len(self.__namedTypes): - ambigiousTypes[idx] = self + partialAmbiguousTypes = (namedType,) + if len(partialAmbiguousTypes) == len(self.__namedTypes): + ambiguousTypes[idx] = self else: - ambigiousTypes[idx] = NamedTypes(*partialAmbigiousTypes, **dict(terminal=True)) - return ambigiousTypes + ambiguousTypes[idx] = NamedTypes(*partialAmbiguousTypes, **dict(terminal=True)) + return ambiguousTypes def getTypeByPosition(self, idx): """Return ASN.1 type object by its position in fields set. @@ -293,7 +295,7 @@ class NamedTypes(object): Raises ------ - : :class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.PyAsn1Error If given position is out of fields range """ try: @@ -317,7 +319,7 @@ class NamedTypes(object): Raises ------ - : :class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.PyAsn1Error If *tagSet* is not present or ASN.1 types are not unique within callee *NamedTypes* """ try: @@ -341,7 +343,7 @@ class NamedTypes(object): Raises ------ - : :class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.PyAsn1Error If given field name is not present in callee *NamedTypes* """ try: @@ -365,7 +367,7 @@ class NamedTypes(object): Raises ------ - : :class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.PyAsn1Error If *name* is not present or not unique within callee *NamedTypes* """ try: @@ -394,7 +396,7 @@ class NamedTypes(object): Raises ------ - : :class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.PyAsn1Error If given position is out of fields range """ try: @@ -426,7 +428,7 @@ class NamedTypes(object): Raises ------ - : :class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.PyAsn1Error If *tagSet* is not present or not unique within callee *NamedTypes* or *idx* is out of fields range """ diff --git a/server/www/packages/packages-linux/x64/pyasn1/type/namedval.py b/server/www/packages/packages-linux/x64/pyasn1/type/namedval.py index 59257e4..4247597 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/type/namedval.py +++ b/server/www/packages/packages-linux/x64/pyasn1/type/namedval.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # # ASN.1 named integers @@ -23,7 +23,7 @@ class NamedValues(object): Parameters ---------- - \*args: variable number of two-element :py:class:`tuple` + *args: variable number of two-element :py:class:`tuple` name: :py:class:`str` Value label @@ -109,7 +109,8 @@ class NamedValues(object): if len(representation) > 64: representation = representation[:32] + '...' + representation[-32:] - return '<%s object 0x%x enums %s>' % (self.__class__.__name__, id(self), representation) + return '<%s object, enums %s>' % ( + self.__class__.__name__, representation) def __eq__(self, other): return dict(self) == other diff --git a/server/www/packages/packages-linux/x64/pyasn1/type/opentype.py b/server/www/packages/packages-linux/x64/pyasn1/type/opentype.py index d14ab34..29645f0 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/type/opentype.py +++ b/server/www/packages/packages-linux/x64/pyasn1/type/opentype.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # @@ -11,11 +11,22 @@ __all__ = ['OpenType'] class OpenType(object): """Create ASN.1 type map indexed by a value - The *DefinedBy* object models the ASN.1 *DEFINED BY* clause which maps - values to ASN.1 types in the context of the ASN.1 SEQUENCE/SET type. + The *OpenType* object models an untyped field of a constructed ASN.1 + type. In ASN.1 syntax it is usually represented by the + `ANY DEFINED BY` for scalars or `SET OF ANY DEFINED BY`, + `SEQUENCE OF ANY DEFINED BY` for container types clauses. Typically + used together with :class:`~pyasn1.type.univ.Any` object. - OpenType objects are duck-type a read-only Python :class:`dict` objects, - however the passed `typeMap` is stored by reference. + OpenType objects duck-type a read-only Python :class:`dict` objects, + however the passed `typeMap` is not copied, but stored by reference. + That means the user can manipulate `typeMap` at run time having this + reflected on *OpenType* object behavior. + + The |OpenType| class models an untyped field of a constructed ASN.1 + type. In ASN.1 syntax it is usually represented by the + `ANY DEFINED BY` for scalars or `SET OF ANY DEFINED BY`, + `SEQUENCE OF ANY DEFINED BY` for container types clauses. Typically + used with :class:`~pyasn1.type.univ.Any` type. Parameters ---------- @@ -28,12 +39,14 @@ class OpenType(object): Examples -------- + + For untyped scalars: + .. code-block:: python openType = OpenType( - 'id', - {1: Integer(), - 2: OctetString()} + 'id', {1: Integer(), + 2: OctetString()} ) Sequence( componentType=NamedTypes( @@ -41,6 +54,22 @@ class OpenType(object): NamedType('blob', Any(), openType=openType) ) ) + + For untyped `SET OF` or `SEQUENCE OF` vectors: + + .. code-block:: python + + openType = OpenType( + 'id', {1: Integer(), + 2: OctetString()} + ) + Sequence( + componentType=NamedTypes( + NamedType('id', Integer()), + NamedType('blob', SetOf(componentType=Any()), + openType=openType) + ) + ) """ def __init__(self, name, typeMap=None): diff --git a/server/www/packages/packages-linux/x64/pyasn1/type/tag.py b/server/www/packages/packages-linux/x64/pyasn1/type/tag.py index 95c226f..b88a734 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/type/tag.py +++ b/server/www/packages/packages-linux/x64/pyasn1/type/tag.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # from pyasn1 import error @@ -64,8 +64,10 @@ class Tag(object): self.__hash = hash(self.__tagClassId) def __repr__(self): - representation = '[%s:%s:%s]' % (self.__tagClass, self.__tagFormat, self.__tagId) - return '<%s object at 0x%x tag %s>' % (self.__class__.__name__, id(self), representation) + representation = '[%s:%s:%s]' % ( + self.__tagClass, self.__tagFormat, self.__tagId) + return '<%s object, tag %s>' % ( + self.__class__.__name__, representation) def __eq__(self, other): return self.__tagClassId == other @@ -199,7 +201,7 @@ class TagSet(object): else: representation = 'untagged' - return '<%s object at 0x%x %s>' % (self.__class__.__name__, id(self), representation) + return '<%s object, %s>' % (self.__class__.__name__, representation) def __add__(self, superTag): return self.__class__(self.__baseTag, *self.__superTags + (superTag,)) @@ -318,7 +320,7 @@ class TagSet(object): Returns ------- : :py:class:`bool` - `True` if callee is a supertype of *tagSet* + :obj:`True` if callee is a supertype of *tagSet* """ if len(tagSet) < self.__lenOfSuperTags: return False diff --git a/server/www/packages/packages-linux/x64/pyasn1/type/tagmap.py b/server/www/packages/packages-linux/x64/pyasn1/type/tagmap.py index a9d237f..6f5163b 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/type/tagmap.py +++ b/server/www/packages/packages-linux/x64/pyasn1/type/tagmap.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # from pyasn1 import error @@ -56,16 +56,16 @@ class TagMap(object): return iter(self.__presentTypes) def __repr__(self): - representation = '%s object at 0x%x' % (self.__class__.__name__, id(self)) + representation = '%s object' % self.__class__.__name__ if self.__presentTypes: - representation += ' present %s' % repr(self.__presentTypes) + representation += ', present %s' % repr(self.__presentTypes) if self.__skipTypes: - representation += ' skip %s' % repr(self.__skipTypes) + representation += ', skip %s' % repr(self.__skipTypes) if self.__defaultType is not None: - representation += ' default %s' % repr(self.__defaultType) + representation += ', default %s' % repr(self.__defaultType) return '<%s>' % representation diff --git a/server/www/packages/packages-linux/x64/pyasn1/type/univ.py b/server/www/packages/packages-linux/x64/pyasn1/type/univ.py index 898cf25..aa688b2 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/type/univ.py +++ b/server/www/packages/packages-linux/x64/pyasn1/type/univ.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # import math @@ -31,28 +31,32 @@ __all__ = ['Integer', 'Boolean', 'BitString', 'OctetString', 'Null', # "Simple" ASN.1 types (yet incomplete) -class Integer(base.AbstractSimpleAsn1Item): - """Create |ASN.1| type or object. +class Integer(base.SimpleAsn1Type): + """Create |ASN.1| schema or value object. - |ASN.1| objects are immutable and duck-type Python :class:`int` objects. + |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its + objects are immutable and duck-type Python :class:`int` objects. Keyword Args ------------ value: :class:`int`, :class:`str` or |ASN.1| object - Python integer or string literal or |ASN.1| class instance. + Python :class:`int` or :class:`str` literal or |ASN.1| class + instance. If `value` is not given, schema object will be created. tagSet: :py:class:`~pyasn1.type.tag.TagSet` Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - Object representing non-default ASN.1 subtype constraint(s) + Object representing non-default ASN.1 subtype constraint(s). Constraints + verification for |ASN.1| type occurs automatically on object + instantiation. namedValues: :py:class:`~pyasn1.type.namedval.NamedValues` Object representing non-default symbolic aliases for numbers Raises ------ - :py:class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error On constraint violation or bad initializer. Examples @@ -94,13 +98,13 @@ class Integer(base.AbstractSimpleAsn1Item): namedValues = namedval.NamedValues() # Optimization for faster codec lookup - typeId = base.AbstractSimpleAsn1Item.getTypeId() + typeId = base.SimpleAsn1Type.getTypeId() def __init__(self, value=noValue, **kwargs): if 'namedValues' not in kwargs: kwargs['namedValues'] = self.namedValues - base.AbstractSimpleAsn1Item.__init__(self, value, **kwargs) + base.SimpleAsn1Type.__init__(self, value, **kwargs) def __and__(self, value): return self.clone(self._value & value) @@ -187,7 +191,7 @@ class Integer(base.AbstractSimpleAsn1Item): def __rdivmod__(self, value): return self.clone(divmod(value, self._value)) - __hash__ = base.AbstractSimpleAsn1Item.__hash__ + __hash__ = base.SimpleAsn1Type.__hash__ def __int__(self): return int(self._value) @@ -276,27 +280,31 @@ class Integer(base.AbstractSimpleAsn1Item): class Boolean(Integer): - """Create |ASN.1| type or object. + """Create |ASN.1| schema or value object. - |ASN.1| objects are immutable and duck-type Python :class:`int` objects. + |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its + objects are immutable and duck-type Python :class:`int` objects. Keyword Args ------------ value: :class:`int`, :class:`str` or |ASN.1| object - Python integer or boolean or string literal or |ASN.1| class instance. + Python :class:`int` or :class:`str` literal or |ASN.1| class + instance. If `value` is not given, schema object will be created. tagSet: :py:class:`~pyasn1.type.tag.TagSet` Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - Object representing non-default ASN.1 subtype constraint(s) + Object representing non-default ASN.1 subtype constraint(s).Constraints + verification for |ASN.1| type occurs automatically on object + instantiation. namedValues: :py:class:`~pyasn1.type.namedval.NamedValues` Object representing non-default symbolic aliases for numbers Raises ------ - :py:class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error On constraint violation or bad initializer. Examples @@ -355,23 +363,27 @@ class SizedInteger(SizedIntegerBase): return self.bitLength -class BitString(base.AbstractSimpleAsn1Item): +class BitString(base.SimpleAsn1Type): """Create |ASN.1| schema or value object. - |ASN.1| objects are immutable and duck-type both Python :class:`tuple` (as a tuple + |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its + objects are immutable and duck-type both Python :class:`tuple` (as a tuple of bits) and :class:`int` objects. Keyword Args ------------ value: :class:`int`, :class:`str` or |ASN.1| object - Python integer or string literal representing binary or hexadecimal - number or sequence of integer bits or |ASN.1| object. + Python :class:`int` or :class:`str` literal representing binary + or hexadecimal number or sequence of integer bits or |ASN.1| object. + If `value` is not given, schema object will be created. tagSet: :py:class:`~pyasn1.type.tag.TagSet` Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - Object representing non-default ASN.1 subtype constraint(s) + Object representing non-default ASN.1 subtype constraint(s). Constraints + verification for |ASN.1| type occurs automatically on object + instantiation. namedValues: :py:class:`~pyasn1.type.namedval.NamedValues` Object representing non-default symbolic aliases for numbers @@ -386,7 +398,7 @@ class BitString(base.AbstractSimpleAsn1Item): Raises ------ - :py:class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error On constraint violation or bad initializer. Examples @@ -432,7 +444,7 @@ class BitString(base.AbstractSimpleAsn1Item): namedValues = namedval.NamedValues() # Optimization for faster codec lookup - typeId = base.AbstractSimpleAsn1Item.getTypeId() + typeId = base.SimpleAsn1Type.getTypeId() defaultBinValue = defaultHexValue = noValue @@ -461,7 +473,7 @@ class BitString(base.AbstractSimpleAsn1Item): if 'namedValues' not in kwargs: kwargs['namedValues'] = self.namedValues - base.AbstractSimpleAsn1Item.__init__(self, value, **kwargs) + base.SimpleAsn1Type.__init__(self, value, **kwargs) def __str__(self): return self.asBinary() @@ -720,24 +732,30 @@ except NameError: # Python 2.4 return True -class OctetString(base.AbstractSimpleAsn1Item): +class OctetString(base.SimpleAsn1Type): """Create |ASN.1| schema or value object. - |ASN.1| objects are immutable and duck-type Python 2 :class:`str` or Python 3 :class:`bytes`. - When used in Unicode context, |ASN.1| type assumes "|encoding|" serialisation. + |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its + objects are immutable and duck-type Python 2 :class:`str` or + Python 3 :class:`bytes`. When used in Unicode context, |ASN.1| type + assumes "|encoding|" serialisation. Keyword Args ------------ - value: :class:`str`, :class:`bytes` or |ASN.1| object - string (Python 2) or bytes (Python 3), alternatively unicode object - (Python 2) or string (Python 3) representing character string to be - serialised into octets (note `encoding` parameter) or |ASN.1| object. + value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object + class:`str` (Python 2) or :class:`bytes` (Python 3), alternatively + class:`unicode` object (Python 2) or :class:`str` (Python 3) + representing character string to be serialised into octets + (note `encoding` parameter) or |ASN.1| object. + If `value` is not given, schema object will be created. tagSet: :py:class:`~pyasn1.type.tag.TagSet` Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - Object representing non-default ASN.1 subtype constraint(s) + Object representing non-default ASN.1 subtype constraint(s). Constraints + verification for |ASN.1| type occurs automatically on object + instantiation. encoding: :py:class:`str` Unicode codec ID to encode/decode :class:`unicode` (Python 2) or @@ -754,7 +772,7 @@ class OctetString(base.AbstractSimpleAsn1Item): Raises ------ - :py:class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error On constraint violation or bad initializer. Examples @@ -786,7 +804,7 @@ class OctetString(base.AbstractSimpleAsn1Item): subtypeSpec = constraint.ConstraintsIntersection() # Optimization for faster codec lookup - typeId = base.AbstractSimpleAsn1Item.getTypeId() + typeId = base.SimpleAsn1Type.getTypeId() defaultBinValue = defaultHexValue = noValue encoding = 'iso-8859-1' @@ -816,26 +834,33 @@ class OctetString(base.AbstractSimpleAsn1Item): if 'encoding' not in kwargs: kwargs['encoding'] = self.encoding - base.AbstractSimpleAsn1Item.__init__(self, value, **kwargs) + base.SimpleAsn1Type.__init__(self, value, **kwargs) if sys.version_info[0] <= 2: def prettyIn(self, value): if isinstance(value, str): return value + elif isinstance(value, unicode): try: return value.encode(self.encoding) + except (LookupError, UnicodeEncodeError): - raise error.PyAsn1Error( - "Can't encode string '%s' with codec %s" % (value, self.encoding) + exc = sys.exc_info()[1] + raise error.PyAsn1UnicodeEncodeError( + "Can't encode string '%s' with codec " + "%s" % (value, self.encoding), exc ) + elif isinstance(value, (tuple, list)): try: return ''.join([chr(x) for x in value]) + except ValueError: raise error.PyAsn1Error( "Bad %s initializer '%s'" % (self.__class__.__name__, value) ) + else: return str(value) @@ -847,8 +872,10 @@ class OctetString(base.AbstractSimpleAsn1Item): return self._value.decode(self.encoding) except UnicodeDecodeError: - raise error.PyAsn1Error( - "Can't decode string '%s' with codec %s" % (self._value, self.encoding) + exc = sys.exc_info()[1] + raise error.PyAsn1UnicodeDecodeError( + "Can't decode string '%s' with codec " + "%s" % (self._value, self.encoding), exc ) def asOctets(self): @@ -861,19 +888,26 @@ class OctetString(base.AbstractSimpleAsn1Item): def prettyIn(self, value): if isinstance(value, bytes): return value + elif isinstance(value, str): try: return value.encode(self.encoding) + except UnicodeEncodeError: - raise error.PyAsn1Error( - "Can't encode string '%s' with '%s' codec" % (value, self.encoding) + exc = sys.exc_info()[1] + raise error.PyAsn1UnicodeEncodeError( + "Can't encode string '%s' with '%s' " + "codec" % (value, self.encoding), exc ) elif isinstance(value, OctetString): # a shortcut, bytes() would work the same way return value.asOctets() - elif isinstance(value, base.AbstractSimpleAsn1Item): # this mostly targets Integer objects + + elif isinstance(value, base.SimpleAsn1Type): # this mostly targets Integer objects return self.prettyIn(str(value)) + elif isinstance(value, (tuple, list)): return self.prettyIn(bytes(value)) + else: return bytes(value) @@ -882,8 +916,11 @@ class OctetString(base.AbstractSimpleAsn1Item): return self._value.decode(self.encoding) except UnicodeDecodeError: - raise error.PyAsn1Error( - "Can't decode string '%s' with '%s' codec at '%s'" % (self._value, self.encoding, self.__class__.__name__) + exc = sys.exc_info()[1] + raise error.PyAsn1UnicodeDecodeError( + "Can't decode string '%s' with '%s' codec at " + "'%s'" % (self._value, self.encoding, + self.__class__.__name__), exc ) def __bytes__(self): @@ -1028,19 +1065,22 @@ class OctetString(base.AbstractSimpleAsn1Item): class Null(OctetString): """Create |ASN.1| schema or value object. - |ASN.1| objects are immutable and duck-type Python :class:`str` objects (always empty). + |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its + objects are immutable and duck-type Python :class:`str` objects + (always empty). Keyword Args ------------ - value: :class:`str` or :py:class:`~pyasn1.type.univ.Null` object - Python empty string literal or any object that evaluates to `False` + value: :class:`str` or |ASN.1| object + Python empty :class:`str` literal or any object that evaluates to :obj:`False` + If `value` is not given, schema object will be created. tagSet: :py:class:`~pyasn1.type.tag.TagSet` Object representing non-default ASN.1 tag(s) Raises ------ - :py:class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error On constraint violation or bad initializer. Examples @@ -1081,25 +1121,30 @@ else: numericTypes = intTypes + (float,) -class ObjectIdentifier(base.AbstractSimpleAsn1Item): +class ObjectIdentifier(base.SimpleAsn1Type): """Create |ASN.1| schema or value object. - |ASN.1| objects are immutable and duck-type Python :class:`tuple` objects (tuple of non-negative integers). + |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its + objects are immutable and duck-type Python :class:`tuple` objects + (tuple of non-negative integers). Keyword Args ------------ value: :class:`tuple`, :class:`str` or |ASN.1| object - Python sequence of :class:`int` or string literal or |ASN.1| object. + Python sequence of :class:`int` or :class:`str` literal or |ASN.1| object. + If `value` is not given, schema object will be created. tagSet: :py:class:`~pyasn1.type.tag.TagSet` Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - Object representing non-default ASN.1 subtype constraint(s) + Object representing non-default ASN.1 subtype constraint(s). Constraints + verification for |ASN.1| type occurs automatically on object + instantiation. Raises ------ - :py:class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error On constraint violation or bad initializer. Examples @@ -1131,7 +1176,7 @@ class ObjectIdentifier(base.AbstractSimpleAsn1Item): subtypeSpec = constraint.ConstraintsIntersection() # Optimization for faster codec lookup - typeId = base.AbstractSimpleAsn1Item.getTypeId() + typeId = base.SimpleAsn1Type.getTypeId() def __add__(self, other): return self.clone(self._value + other) @@ -1173,8 +1218,8 @@ class ObjectIdentifier(base.AbstractSimpleAsn1Item): Returns ------- : :class:`bool` - :class:`True` if this |ASN.1| object is a parent (e.g. prefix) of the other |ASN.1| object - or :class:`False` otherwise. + :obj:`True` if this |ASN.1| object is a parent (e.g. prefix) of the other |ASN.1| object + or :obj:`False` otherwise. """ l = len(self) if l <= len(other): @@ -1214,10 +1259,11 @@ class ObjectIdentifier(base.AbstractSimpleAsn1Item): return '.'.join([str(x) for x in value]) -class Real(base.AbstractSimpleAsn1Item): +class Real(base.SimpleAsn1Type): """Create |ASN.1| schema or value object. - |ASN.1| objects are immutable and duck-type Python :class:`float` objects. + |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its + objects are immutable and duck-type Python :class:`float` objects. Additionally, |ASN.1| objects behave like a :class:`tuple` in which case its elements are mantissa, base and exponent. @@ -1225,17 +1271,20 @@ class Real(base.AbstractSimpleAsn1Item): ------------ value: :class:`tuple`, :class:`float` or |ASN.1| object Python sequence of :class:`int` (representing mantissa, base and - exponent) or float instance or *Real* class instance. + exponent) or :class:`float` instance or |ASN.1| object. + If `value` is not given, schema object will be created. tagSet: :py:class:`~pyasn1.type.tag.TagSet` Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - Object representing non-default ASN.1 subtype constraint(s) + Object representing non-default ASN.1 subtype constraint(s). Constraints + verification for |ASN.1| type occurs automatically on object + instantiation. Raises ------ - :py:class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error On constraint violation or bad initializer. Examples @@ -1278,7 +1327,7 @@ class Real(base.AbstractSimpleAsn1Item): subtypeSpec = constraint.ConstraintsIntersection() # Optimization for faster codec lookup - typeId = base.AbstractSimpleAsn1Item.getTypeId() + typeId = base.SimpleAsn1Type.getTypeId() @staticmethod def __normalizeBase10(value): @@ -1342,8 +1391,8 @@ class Real(base.AbstractSimpleAsn1Item): Returns ------- : :class:`bool` - :class:`True` if calling object represents plus infinity - or :class:`False` otherwise. + :obj:`True` if calling object represents plus infinity + or :obj:`False` otherwise. """ return self._value == self._plusInf @@ -1355,8 +1404,8 @@ class Real(base.AbstractSimpleAsn1Item): Returns ------- : :class:`bool` - :class:`True` if calling object represents minus infinity - or :class:`False` otherwise. + :obj:`True` if calling object represents minus infinity + or :obj:`False` otherwise. """ return self._value == self._minusInf @@ -1479,7 +1528,7 @@ class Real(base.AbstractSimpleAsn1Item): def __bool__(self): return bool(float(self)) - __hash__ = base.AbstractSimpleAsn1Item.__hash__ + __hash__ = base.SimpleAsn1Type.__hash__ def __getitem__(self, idx): if self._value in self._inf: @@ -1500,27 +1549,31 @@ class Real(base.AbstractSimpleAsn1Item): class Enumerated(Integer): - """Create |ASN.1| type or object. + """Create |ASN.1| schema or value object. - |ASN.1| objects are immutable and duck-type Python :class:`int` objects. + |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its + objects are immutable and duck-type Python :class:`int` objects. Keyword Args ------------ value: :class:`int`, :class:`str` or |ASN.1| object - Python integer or string literal or |ASN.1| class instance. + Python :class:`int` or :class:`str` literal or |ASN.1| object. + If `value` is not given, schema object will be created. tagSet: :py:class:`~pyasn1.type.tag.TagSet` Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - Object representing non-default ASN.1 subtype constraint(s) + Object representing non-default ASN.1 subtype constraint(s). Constraints + verification for |ASN.1| type occurs automatically on object + instantiation. namedValues: :py:class:`~pyasn1.type.namedval.NamedValues` Object representing non-default symbolic aliases for numbers Raises ------ - :py:class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error On constraint violation or bad initializer. Examples @@ -1566,10 +1619,11 @@ class Enumerated(Integer): # "Structured" ASN.1 types -class SequenceOfAndSetOfBase(base.AbstractConstructedAsn1Item): - """Create |ASN.1| type. +class SequenceOfAndSetOfBase(base.ConstructedAsn1Type): + """Create |ASN.1| schema or value object. - |ASN.1| objects are mutable and duck-type Python :class:`list` objects. + |ASN.1| class is based on :class:`~pyasn1.type.base.ConstructedAsn1Type`, + its objects are mutable and duck-type Python :class:`list` objects. Keyword Args ------------ @@ -1580,10 +1634,9 @@ class SequenceOfAndSetOfBase(base.AbstractConstructedAsn1Item): Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - Object representing non-default ASN.1 subtype constraint(s) - - sizeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - Object representing collection size constraint + Object representing non-default ASN.1 subtype constraint(s). Constraints + verification for |ASN.1| type can only occur on explicit + `.isInconsistent` call. Examples -------- @@ -1605,12 +1658,14 @@ class SequenceOfAndSetOfBase(base.AbstractConstructedAsn1Item): # support positional params for backward compatibility if args: for key, value in zip(('componentType', 'tagSet', - 'subtypeSpec', 'sizeSpec'), args): + 'subtypeSpec'), args): if key in kwargs: raise error.PyAsn1Error('Conflicting positional and keyword params!') kwargs['componentType'] = value - base.AbstractConstructedAsn1Item.__init__(self, **kwargs) + self._componentValues = noValue + + base.ConstructedAsn1Type.__init__(self, **kwargs) # Python list protocol @@ -1628,24 +1683,36 @@ class SequenceOfAndSetOfBase(base.AbstractConstructedAsn1Item): except error.PyAsn1Error: raise IndexError(sys.exc_info()[1]) - def clear(self): - self._componentValues = [] - def append(self, value): - self[len(self)] = value + if self._componentValues is noValue: + pos = 0 + + else: + pos = len(self._componentValues) + + self[pos] = value def count(self, value): - return self._componentValues.count(value) + return list(self._componentValues.values()).count(value) def extend(self, values): for value in values: self.append(value) + if self._componentValues is noValue: + self._componentValues = {} + def index(self, value, start=0, stop=None): if stop is None: stop = len(self) + + indices, values = zip(*self._componentValues.items()) + + # TODO: remove when Py2.5 support is gone + values = list(values) + try: - return self._componentValues.index(value, start, stop) + return indices[values.index(value, start, stop)] except error.PyAsn1Error: raise ValueError(sys.exc_info()[1]) @@ -1654,15 +1721,24 @@ class SequenceOfAndSetOfBase(base.AbstractConstructedAsn1Item): self._componentValues.reverse() def sort(self, key=None, reverse=False): - self._componentValues.sort(key=key, reverse=reverse) + self._componentValues = dict( + enumerate(sorted(self._componentValues.values(), + key=key, reverse=reverse))) + + def __len__(self): + if self._componentValues is noValue or not self._componentValues: + return 0 + + return max(self._componentValues) + 1 def __iter__(self): - return iter(self._componentValues) + for idx in range(0, len(self)): + yield self.getComponentByPosition(idx) def _cloneComponentValues(self, myClone, cloneValueFlag): - for idx, componentValue in enumerate(self._componentValues): + for idx, componentValue in self._componentValues.items(): if componentValue is not noValue: - if isinstance(componentValue, base.AbstractConstructedAsn1Item): + if isinstance(componentValue, base.ConstructedAsn1Type): myClone.setComponentByPosition( idx, componentValue.clone(cloneValueFlag=cloneValueFlag) ) @@ -1689,8 +1765,8 @@ class SequenceOfAndSetOfBase(base.AbstractConstructedAsn1Item): object instead of the requested component. instantiate: :class:`bool` - If `True` (default), inner component will be automatically instantiated. - If 'False' either existing component or the `noValue` object will be + If :obj:`True` (default), inner component will be automatically instantiated. + If :obj:`False` either existing component or the :class:`NoValue` object will be returned. Returns @@ -1735,10 +1811,21 @@ class SequenceOfAndSetOfBase(base.AbstractConstructedAsn1Item): # returns noValue s.getComponentByPosition(0, instantiate=False) """ + if isinstance(idx, slice): + indices = tuple(range(len(self))) + return [self.getComponentByPosition(subidx, default, instantiate) + for subidx in indices[idx]] + + if idx < 0: + idx = len(self) + idx + if idx < 0: + raise error.PyAsn1Error( + 'SequenceOf/SetOf index is out of range') + try: componentValue = self._componentValues[idx] - except IndexError: + except (KeyError, error.PyAsn1Error): if not instantiate: return default @@ -1773,15 +1860,16 @@ class SequenceOfAndSetOfBase(base.AbstractConstructedAsn1Item): value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative A Python value to initialize |ASN.1| component with (if *componentType* is set) or ASN.1 value object to assign to |ASN.1| component. + If `value` is not given, schema object will be set as a component. verifyConstraints: :class:`bool` - If `False`, skip constraints validation + If :obj:`False`, skip constraints validation matchTags: :class:`bool` - If `False`, skip component tags matching + If :obj:`False`, skip component tags matching matchConstraints: :class:`bool` - If `False`, skip component constraints matching + If :obj:`False`, skip component constraints matching Returns ------- @@ -1789,51 +1877,75 @@ class SequenceOfAndSetOfBase(base.AbstractConstructedAsn1Item): Raises ------ - IndexError: + ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error + On constraint violation or bad initializer + IndexError When idx > len(self) """ + if isinstance(idx, slice): + indices = tuple(range(len(self))) + startIdx = indices and indices[idx][0] or 0 + for subIdx, subValue in enumerate(value): + self.setComponentByPosition( + startIdx + subIdx, subValue, verifyConstraints, + matchTags, matchConstraints) + return self + + if idx < 0: + idx = len(self) + idx + if idx < 0: + raise error.PyAsn1Error( + 'SequenceOf/SetOf index is out of range') + componentType = self.componentType - try: - currentValue = self._componentValues[idx] - except IndexError: - currentValue = noValue + if self._componentValues is noValue: + componentValues = {} - if len(self._componentValues) < idx: - raise error.PyAsn1Error('Component index out of range') + else: + componentValues = self._componentValues + + currentValue = componentValues.get(idx, noValue) if value is noValue: if componentType is not None: value = componentType.clone() + elif currentValue is noValue: raise error.PyAsn1Error('Component type not defined') + elif not isinstance(value, base.Asn1Item): - if componentType is not None and isinstance(componentType, base.AbstractSimpleAsn1Item): + if (componentType is not None and + isinstance(componentType, base.SimpleAsn1Type)): value = componentType.clone(value=value) - elif currentValue is not noValue and isinstance(currentValue, base.AbstractSimpleAsn1Item): + + elif (currentValue is not noValue and + isinstance(currentValue, base.SimpleAsn1Type)): value = currentValue.clone(value=value) + else: - raise error.PyAsn1Error('Non-ASN.1 value %r and undefined component type at %r' % (value, self)) - elif componentType is not None: - if self.strictConstraints: - if not componentType.isSameTypeWith(value, matchTags, matchConstraints): - raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, componentType)) - else: - if not componentType.isSuperTypeOf(value, matchTags, matchConstraints): - raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, componentType)) + raise error.PyAsn1Error( + 'Non-ASN.1 value %r and undefined component' + ' type at %r' % (value, self)) - if verifyConstraints and value.isValue: - try: - self.subtypeSpec(value, idx) + elif componentType is not None and (matchTags or matchConstraints): + subtypeChecker = ( + self.strictConstraints and + componentType.isSameTypeWith or + componentType.isSuperTypeOf) - except error.PyAsn1Error: - exType, exValue, exTb = sys.exc_info() - raise exType('%s at %s' % (exValue, self.__class__.__name__)) + if not subtypeChecker(value, verifyConstraints and matchTags, + verifyConstraints and matchConstraints): + # TODO: we should wrap componentType with UnnamedType to carry + # additional properties associated with componentType + if componentType.typeId != Any.typeId: + raise error.PyAsn1Error( + 'Component value is tag-incompatible: %r vs ' + '%r' % (value, componentType)) - if currentValue is noValue: - self._componentValues.append(value) - else: - self._componentValues[idx] = value + componentValues[idx] = value + + self._componentValues = componentValues return self @@ -1842,16 +1954,44 @@ class SequenceOfAndSetOfBase(base.AbstractConstructedAsn1Item): if self.componentType is not None: return self.componentType.tagMap + @property + def components(self): + return [self._componentValues[idx] + for idx in sorted(self._componentValues)] + + def clear(self): + """Remove all components and become an empty |ASN.1| value object. + + Has the same effect on |ASN.1| object as it does on :class:`list` + built-in. + """ + self._componentValues = {} + return self + + def reset(self): + """Remove all components and become a |ASN.1| schema object. + + See :meth:`isValue` property for more information on the + distinction between value and schema objects. + """ + self._componentValues = noValue + return self + def prettyPrint(self, scope=0): scope += 1 representation = self.__class__.__name__ + ':\n' - for idx, componentValue in enumerate(self._componentValues): + + if not self.isValue: + return representation + + for idx, componentValue in enumerate(self): representation += ' ' * scope if (componentValue is noValue and self.componentType is not None): representation += '' else: representation += componentValue.prettyPrint(scope) + return representation def prettyPrintType(self, scope=0): @@ -1867,17 +2007,17 @@ class SequenceOfAndSetOfBase(base.AbstractConstructedAsn1Item): def isValue(self): """Indicate that |ASN.1| object represents ASN.1 value. - If *isValue* is `False` then this object represents just ASN.1 schema. + If *isValue* is :obj:`False` then this object represents just ASN.1 schema. - If *isValue* is `True` then, in addition to its ASN.1 schema features, - this object can also be used like a Python built-in object (e.g. `int`, - `str`, `dict` etc.). + If *isValue* is :obj:`True` then, in addition to its ASN.1 schema features, + this object can also be used like a Python built-in object + (e.g. :class:`int`, :class:`str`, :class:`dict` etc.). Returns ------- : :class:`bool` - :class:`False` if object represents just ASN.1 schema. - :class:`True` if object represents ASN.1 schema and can be used as a normal value. + :obj:`False` if object represents just ASN.1 schema. + :obj:`True` if object represents ASN.1 schema and can be used as a normal value. Note ---- @@ -1890,12 +2030,53 @@ class SequenceOfAndSetOfBase(base.AbstractConstructedAsn1Item): The PyASN1 value objects can **additionally** participate in many operations involving regular Python objects (e.g. arithmetic, comprehension etc). """ - for componentValue in self._componentValues: + if self._componentValues is noValue: + return False + + if len(self._componentValues) != len(self): + return False + + for componentValue in self._componentValues.values(): if componentValue is noValue or not componentValue.isValue: return False return True + @property + def isInconsistent(self): + """Run necessary checks to ensure |ASN.1| object consistency. + + Default action is to verify |ASN.1| object against constraints imposed + by `subtypeSpec`. + + Raises + ------ + :py:class:`~pyasn1.error.PyAsn1tError` on any inconsistencies found + """ + if self.componentType is noValue or not self.subtypeSpec: + return False + + if self._componentValues is noValue: + return True + + mapping = {} + + for idx, value in self._componentValues.items(): + # Absent fields are not in the mapping + if value is noValue: + continue + + mapping[idx] = value + + try: + # Represent SequenceOf/SetOf as a bare dict to constraints chain + self.subtypeSpec(mapping) + + except error.PyAsn1Error: + exc = sys.exc_info()[1] + return exc + + return False class SequenceOf(SequenceOfAndSetOfBase): __doc__ = SequenceOfAndSetOfBase.__doc__ @@ -1916,10 +2097,6 @@ class SequenceOf(SequenceOfAndSetOfBase): #: imposing constraints on |ASN.1| type initialization values. subtypeSpec = constraint.ConstraintsIntersection() - #: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - #: object imposing size constraint on |ASN.1| objects - sizeSpec = constraint.ConstraintsIntersection() - # Disambiguation ASN.1 types identification typeId = SequenceOfAndSetOfBase.getTypeId() @@ -1943,18 +2120,15 @@ class SetOf(SequenceOfAndSetOfBase): #: imposing constraints on |ASN.1| type initialization values. subtypeSpec = constraint.ConstraintsIntersection() - #: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - #: object imposing size constraint on |ASN.1| objects - sizeSpec = constraint.ConstraintsIntersection() - # Disambiguation ASN.1 types identification typeId = SequenceOfAndSetOfBase.getTypeId() -class SequenceAndSetBase(base.AbstractConstructedAsn1Item): - """Create |ASN.1| type. +class SequenceAndSetBase(base.ConstructedAsn1Type): + """Create |ASN.1| schema or value object. - |ASN.1| objects are mutable and duck-type Python :class:`dict` objects. + |ASN.1| class is based on :class:`~pyasn1.type.base.ConstructedAsn1Type`, + its objects are mutable and duck-type Python :class:`dict` objects. Keyword Args ------------ @@ -1965,10 +2139,9 @@ class SequenceAndSetBase(base.AbstractConstructedAsn1Item): Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - Object representing non-default ASN.1 subtype constraint(s) - - sizeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - Object representing collection size constraint + Object representing non-default ASN.1 subtype constraint(s). Constraints + verification for |ASN.1| type can only occur on explicit + `.isInconsistent` call. Examples -------- @@ -2042,8 +2215,12 @@ class SequenceAndSetBase(base.AbstractConstructedAsn1Item): def __init__(self, **kwargs): - base.AbstractConstructedAsn1Item.__init__(self, **kwargs) + base.ConstructedAsn1Type.__init__(self, **kwargs) self._componentTypeLen = len(self.componentType) + if self._componentTypeLen: + self._componentValues = [] + else: + self._componentValues = noValue self._dynamicNames = self._componentTypeLen or self.DynamicNames() def __getitem__(self, idx): @@ -2086,6 +2263,9 @@ class SequenceAndSetBase(base.AbstractConstructedAsn1Item): else: return key in self._dynamicNames + def __len__(self): + return len(self._componentValues) + def __iter__(self): return iter(self.componentType or self._dynamicNames) @@ -2112,13 +2292,36 @@ class SequenceAndSetBase(base.AbstractConstructedAsn1Item): self[k] = mappingValue[k] def clear(self): + """Remove all components and become an empty |ASN.1| value object. + + Has the same effect on |ASN.1| object as it does on :class:`dict` + built-in. + """ self._componentValues = [] self._dynamicNames = self.DynamicNames() + return self + + def reset(self): + """Remove all components and become a |ASN.1| schema object. + + See :meth:`isValue` property for more information on the + distinction between value and schema objects. + """ + self._componentValues = noValue + self._dynamicNames = self.DynamicNames() + return self + + @property + def components(self): + return self._componentValues def _cloneComponentValues(self, myClone, cloneValueFlag): + if self._componentValues is noValue: + return + for idx, componentValue in enumerate(self._componentValues): if componentValue is not noValue: - if isinstance(componentValue, base.AbstractConstructedAsn1Item): + if isinstance(componentValue, base.ConstructedAsn1Type): myClone.setComponentByPosition( idx, componentValue.clone(cloneValueFlag=cloneValueFlag) ) @@ -2142,14 +2345,16 @@ class SequenceAndSetBase(base.AbstractConstructedAsn1Item): object instead of the requested component. instantiate: :class:`bool` - If `True` (default), inner component will be automatically instantiated. - If 'False' either existing component or the `noValue` object will be - returned. + If :obj:`True` (default), inner component will be automatically + instantiated. + If :obj:`False` either existing component or the :class:`NoValue` + object will be returned. Returns ------- : :py:class:`~pyasn1.type.base.PyAsn1Item` - Instantiate |ASN.1| component type or return existing component value + Instantiate |ASN.1| component type or return existing + component value """ if self._componentTypeLen: idx = self.componentType.getPositionByName(name) @@ -2180,15 +2385,16 @@ class SequenceAndSetBase(base.AbstractConstructedAsn1Item): value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative A Python value to initialize |ASN.1| component with (if *componentType* is set) or ASN.1 value object to assign to |ASN.1| component. + If `value` is not given, schema object will be set as a component. verifyConstraints: :class:`bool` - If `False`, skip constraints validation + If :obj:`False`, skip constraints validation matchTags: :class:`bool` - If `False`, skip component tags matching + If :obj:`False`, skip component tags matching matchConstraints: :class:`bool` - If `False`, skip component constraints matching + If :obj:`False`, skip component constraints matching Returns ------- @@ -2226,9 +2432,10 @@ class SequenceAndSetBase(base.AbstractConstructedAsn1Item): object instead of the requested component. instantiate: :class:`bool` - If `True` (default), inner component will be automatically instantiated. - If 'False' either existing component or the `noValue` object will be - returned. + If :obj:`True` (default), inner component will be automatically + instantiated. + If :obj:`False` either existing component or the :class:`NoValue` + object will be returned. Returns ------- @@ -2275,7 +2482,11 @@ class SequenceAndSetBase(base.AbstractConstructedAsn1Item): s.getComponentByPosition(0, instantiate=False) """ try: - componentValue = self._componentValues[idx] + if self._componentValues is noValue: + componentValue = noValue + + else: + componentValue = self._componentValues[idx] except IndexError: componentValue = noValue @@ -2317,15 +2528,16 @@ class SequenceAndSetBase(base.AbstractConstructedAsn1Item): value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative A Python value to initialize |ASN.1| component with (if *componentType* is set) or ASN.1 value object to assign to |ASN.1| component. + If `value` is not given, schema object will be set as a component. verifyConstraints : :class:`bool` - If `False`, skip constraints validation + If :obj:`False`, skip constraints validation matchTags: :class:`bool` - If `False`, skip component tags matching + If :obj:`False`, skip component tags matching matchConstraints: :class:`bool` - If `False`, skip component constraints matching + If :obj:`False`, skip component constraints matching Returns ------- @@ -2334,8 +2546,14 @@ class SequenceAndSetBase(base.AbstractConstructedAsn1Item): componentType = self.componentType componentTypeLen = self._componentTypeLen + if self._componentValues is noValue: + componentValues = [] + + else: + componentValues = self._componentValues + try: - currentValue = self._componentValues[idx] + currentValue = componentValues[idx] except IndexError: currentValue = noValue @@ -2343,11 +2561,13 @@ class SequenceAndSetBase(base.AbstractConstructedAsn1Item): if componentTypeLen < idx: raise error.PyAsn1Error('component index out of range') - self._componentValues = [noValue] * componentTypeLen + componentValues = [noValue] * componentTypeLen if value is noValue: if componentTypeLen: - value = componentType.getTypeByPosition(idx).clone() + value = componentType.getTypeByPosition(idx) + if isinstance(value, base.ConstructedAsn1Type): + value = value.clone(cloneValueFlag=componentType[idx].isDefaulted) elif currentValue is noValue: raise error.PyAsn1Error('Component type not defined') @@ -2355,64 +2575,61 @@ class SequenceAndSetBase(base.AbstractConstructedAsn1Item): elif not isinstance(value, base.Asn1Item): if componentTypeLen: subComponentType = componentType.getTypeByPosition(idx) - if isinstance(subComponentType, base.AbstractSimpleAsn1Item): + if isinstance(subComponentType, base.SimpleAsn1Type): value = subComponentType.clone(value=value) else: raise error.PyAsn1Error('%s can cast only scalar values' % componentType.__class__.__name__) - elif currentValue is not noValue and isinstance(currentValue, base.AbstractSimpleAsn1Item): + elif currentValue is not noValue and isinstance(currentValue, base.SimpleAsn1Type): value = currentValue.clone(value=value) else: raise error.PyAsn1Error('%s undefined component type' % componentType.__class__.__name__) - elif (matchTags or matchConstraints) and componentTypeLen: + elif ((verifyConstraints or matchTags or matchConstraints) and + componentTypeLen): subComponentType = componentType.getTypeByPosition(idx) if subComponentType is not noValue: subtypeChecker = (self.strictConstraints and subComponentType.isSameTypeWith or subComponentType.isSuperTypeOf) - if not subtypeChecker(value, matchTags, matchConstraints): + if not subtypeChecker(value, verifyConstraints and matchTags, + verifyConstraints and matchConstraints): if not componentType[idx].openType: raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, componentType)) - if verifyConstraints and value.isValue: - try: - self.subtypeSpec(value, idx) - - except error.PyAsn1Error: - exType, exValue, exTb = sys.exc_info() - raise exType('%s at %s' % (exValue, self.__class__.__name__)) - if componentTypeLen or idx in self._dynamicNames: - self._componentValues[idx] = value + componentValues[idx] = value - elif len(self._componentValues) == idx: - self._componentValues.append(value) + elif len(componentValues) == idx: + componentValues.append(value) self._dynamicNames.addField(idx) else: raise error.PyAsn1Error('Component index out of range') + self._componentValues = componentValues + return self @property def isValue(self): """Indicate that |ASN.1| object represents ASN.1 value. - If *isValue* is `False` then this object represents just ASN.1 schema. + If *isValue* is :obj:`False` then this object represents just ASN.1 schema. - If *isValue* is `True` then, in addition to its ASN.1 schema features, - this object can also be used like a Python built-in object (e.g. `int`, - `str`, `dict` etc.). + If *isValue* is :obj:`True` then, in addition to its ASN.1 schema features, + this object can also be used like a Python built-in object (e.g. + :class:`int`, :class:`str`, :class:`dict` etc.). Returns ------- : :class:`bool` - :class:`False` if object represents just ASN.1 schema. - :class:`True` if object represents ASN.1 schema and can be used as a normal value. + :obj:`False` if object represents just ASN.1 schema. + :obj:`True` if object represents ASN.1 schema and can be used as a + normal value. Note ---- @@ -2424,7 +2641,16 @@ class SequenceAndSetBase(base.AbstractConstructedAsn1Item): The PyASN1 value objects can **additionally** participate in many operations involving regular Python objects (e.g. arithmetic, comprehension etc). + + It is sufficient for |ASN.1| objects to have all non-optional and non-defaulted + components being value objects to be considered as a value objects as a whole. + In other words, even having one or more optional components not turned into + value objects, |ASN.1| object is still considered as a value object. Defaulted + components are normally value objects by default. """ + if self._componentValues is noValue: + return False + componentType = self.componentType if componentType: @@ -2446,6 +2672,44 @@ class SequenceAndSetBase(base.AbstractConstructedAsn1Item): return True + @property + def isInconsistent(self): + """Run necessary checks to ensure |ASN.1| object consistency. + + Default action is to verify |ASN.1| object against constraints imposed + by `subtypeSpec`. + + Raises + ------ + :py:class:`~pyasn1.error.PyAsn1tError` on any inconsistencies found + """ + if self.componentType is noValue or not self.subtypeSpec: + return False + + if self._componentValues is noValue: + return True + + mapping = {} + + for idx, value in enumerate(self._componentValues): + # Absent fields are not in the mapping + if value is noValue: + continue + + name = self.componentType.getNameByPosition(idx) + + mapping[name] = value + + try: + # Represent Sequence/Set as a bare dict to constraints chain + self.subtypeSpec(mapping) + + except error.PyAsn1Error: + exc = sys.exc_info()[1] + return exc + + return False + def prettyPrint(self, scope=0): """Return an object representation string. @@ -2495,7 +2759,6 @@ class SequenceAndSetBase(base.AbstractConstructedAsn1Item): if self._componentTypeLen: return self.componentType[idx].name - class Sequence(SequenceAndSetBase): __doc__ = SequenceAndSetBase.__doc__ @@ -2511,10 +2774,6 @@ class Sequence(SequenceAndSetBase): #: imposing constraints on |ASN.1| type initialization values. subtypeSpec = constraint.ConstraintsIntersection() - #: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - #: object imposing constraints on |ASN.1| objects - sizeSpec = constraint.ConstraintsIntersection() - #: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`) #: object imposing size constraint on |ASN.1| objects componentType = namedtype.NamedTypes() @@ -2554,10 +2813,6 @@ class Set(SequenceAndSetBase): #: imposing constraints on |ASN.1| type initialization values. subtypeSpec = constraint.ConstraintsIntersection() - #: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - #: object imposing constraints on |ASN.1| objects - sizeSpec = constraint.ConstraintsIntersection() - # Disambiguation ASN.1 types identification typeId = SequenceAndSetBase.getTypeId() @@ -2581,9 +2836,10 @@ class Set(SequenceAndSetBase): object instead of the requested component. instantiate: :class:`bool` - If `True` (default), inner component will be automatically instantiated. - If 'False' either existing component or the `noValue` object will be - returned. + If :obj:`True` (default), inner component will be automatically + instantiated. + If :obj:`False` either existing component or the :class:`noValue` + object will be returned. Returns ------- @@ -2619,18 +2875,19 @@ class Set(SequenceAndSetBase): value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative A Python value to initialize |ASN.1| component with (if *componentType* is set) or ASN.1 value object to assign to |ASN.1| component. + If `value` is not given, schema object will be set as a component. verifyConstraints : :class:`bool` - If `False`, skip constraints validation + If :obj:`False`, skip constraints validation matchTags: :class:`bool` - If `False`, skip component tags matching + If :obj:`False`, skip component tags matching matchConstraints: :class:`bool` - If `False`, skip component constraints matching + If :obj:`False`, skip component constraints matching innerFlag: :class:`bool` - If `True`, search for matching *tagSet* recursively. + If :obj:`True`, search for matching *tagSet* recursively. Returns ------- @@ -2662,9 +2919,10 @@ class Set(SequenceAndSetBase): class Choice(Set): - """Create |ASN.1| type. + """Create |ASN.1| schema or value object. - |ASN.1| objects are mutable and duck-type Python :class:`dict` objects. + |ASN.1| class is based on :class:`~pyasn1.type.base.ConstructedAsn1Type`, + its objects are mutable and duck-type Python :class:`list` objects. Keyword Args ------------ @@ -2675,10 +2933,9 @@ class Choice(Set): Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - Object representing non-default ASN.1 subtype constraint(s) - - sizeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - Object representing collection size constraint + Object representing non-default ASN.1 subtype constraint(s). Constraints + verification for |ASN.1| type can only occur on explicit + `.isInconsistent` call. Examples -------- @@ -2718,11 +2975,7 @@ class Choice(Set): #: Set (on class, not on instance) or return a #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object #: imposing constraints on |ASN.1| type initialization values. - subtypeSpec = constraint.ConstraintsIntersection() - - #: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - #: object imposing size constraint on |ASN.1| objects - sizeSpec = constraint.ConstraintsIntersection( + subtypeSpec = constraint.ConstraintsIntersection( constraint.ValueSizeConstraint(1, 1) ) @@ -2795,7 +3048,7 @@ class Choice(Set): if self._currentIdx is not None: yield self.componentType[self._currentIdx].getName(), self[self._currentIdx] - def verifySizeSpec(self): + def checkConsistency(self): if self._currentIdx is None: raise error.PyAsn1Error('Component not chosen') @@ -2809,7 +3062,7 @@ class Choice(Set): tagSet = component.effectiveTagSet else: tagSet = component.tagSet - if isinstance(component, base.AbstractConstructedAsn1Item): + if isinstance(component, base.ConstructedAsn1Type): myClone.setComponentByType( tagSet, component.clone(cloneValueFlag=cloneValueFlag) ) @@ -2847,15 +3100,16 @@ class Choice(Set): A Python value to initialize |ASN.1| component with (if *componentType* is set) or ASN.1 value object to assign to |ASN.1| component. Once a new value is set to *idx* component, previous value is dropped. + If `value` is not given, schema object will be set as a component. verifyConstraints : :class:`bool` - If `False`, skip constraints validation + If :obj:`False`, skip constraints validation matchTags: :class:`bool` - If `False`, skip component tags matching + If :obj:`False`, skip component tags matching matchConstraints: :class:`bool` - If `False`, skip component constraints matching + If :obj:`False`, skip component constraints matching Returns ------- @@ -2925,17 +3179,18 @@ class Choice(Set): def isValue(self): """Indicate that |ASN.1| object represents ASN.1 value. - If *isValue* is `False` then this object represents just ASN.1 schema. + If *isValue* is :obj:`False` then this object represents just ASN.1 schema. - If *isValue* is `True` then, in addition to its ASN.1 schema features, - this object can also be used like a Python built-in object (e.g. `int`, - `str`, `dict` etc.). + If *isValue* is :obj:`True` then, in addition to its ASN.1 schema features, + this object can also be used like a Python built-in object (e.g. + :class:`int`, :class:`str`, :class:`dict` etc.). Returns ------- : :class:`bool` - :class:`False` if object represents just ASN.1 schema. - :class:`True` if object represents ASN.1 schema and can be used as a normal value. + :obj:`False` if object represents just ASN.1 schema. + :obj:`True` if object represents ASN.1 schema and can be used as a normal + value. Note ---- @@ -2957,7 +3212,7 @@ class Choice(Set): def clear(self): self._currentIdx = None - Set.clear(self) + return Set.clear(self) # compatibility stubs @@ -2968,22 +3223,27 @@ class Choice(Set): class Any(OctetString): """Create |ASN.1| schema or value object. - |ASN.1| objects are immutable and duck-type Python 2 :class:`str` or Python 3 - :class:`bytes`. When used in Unicode context, |ASN.1| type assumes "|encoding|" - serialisation. + |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, + its objects are immutable and duck-type Python 2 :class:`str` or Python 3 + :class:`bytes`. When used in Unicode context, |ASN.1| type assumes + "|encoding|" serialisation. Keyword Args ------------ - value: :class:`str`, :class:`bytes` or |ASN.1| object - string (Python 2) or bytes (Python 3), alternatively unicode object - (Python 2) or string (Python 3) representing character string to be - serialised into octets (note `encoding` parameter) or |ASN.1| object. + value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object + :class:`str` (Python 2) or :class:`bytes` (Python 3), alternatively + :class:`unicode` object (Python 2) or :class:`str` (Python 3) + representing character string to be serialised into octets (note + `encoding` parameter) or |ASN.1| object. + If `value` is not given, schema object will be created. tagSet: :py:class:`~pyasn1.type.tag.TagSet` Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` - Object representing non-default ASN.1 subtype constraint(s) + Object representing non-default ASN.1 subtype constraint(s). Constraints + verification for |ASN.1| type occurs automatically on object + instantiation. encoding: :py:class:`str` Unicode codec ID to encode/decode :class:`unicode` (Python 2) or @@ -3000,7 +3260,7 @@ class Any(OctetString): Raises ------ - :py:class:`~pyasn1.error.PyAsn1Error` + ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error On constraint violation or bad initializer. Examples diff --git a/server/www/packages/packages-linux/x64/pyasn1/type/useful.py b/server/www/packages/packages-linux/x64/pyasn1/type/useful.py index 146916d..7536b95 100644 --- a/server/www/packages/packages-linux/x64/pyasn1/type/useful.py +++ b/server/www/packages/packages-linux/x64/pyasn1/type/useful.py @@ -1,7 +1,7 @@ # # This file is part of pyasn1 software. # -# Copyright (c) 2005-2018, Ilya Etingof +# Copyright (c) 2005-2019, Ilya Etingof # License: http://snmplabs.com/pyasn1/license.html # import datetime diff --git a/server/www/packages/packages-linux/x64/pymysql/__init__.py b/server/www/packages/packages-linux/x64/pymysql/__init__.py index b79b4b8..0cb5006 100644 --- a/server/www/packages/packages-linux/x64/pymysql/__init__.py +++ b/server/www/packages/packages-linux/x64/pymysql/__init__.py @@ -35,7 +35,7 @@ from .times import ( DateFromTicks, TimeFromTicks, TimestampFromTicks) -VERSION = (0, 9, 2, None) +VERSION = (0, 9, 3, None) if VERSION[3] is not None: VERSION_STRING = "%d.%d.%d_%s" % VERSION else: diff --git a/server/www/packages/packages-linux/x64/pymysql/_auth.py b/server/www/packages/packages-linux/x64/pymysql/_auth.py index bbb742d..199f36c 100644 --- a/server/www/packages/packages-linux/x64/pymysql/_auth.py +++ b/server/www/packages/packages-linux/x64/pymysql/_auth.py @@ -4,14 +4,22 @@ Implements auth methods from ._compat import text_type, PY2 from .constants import CLIENT from .err import OperationalError +from .util import byte2int, int2byte -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives import serialization, hashes -from cryptography.hazmat.primitives.asymmetric import padding + +try: + from cryptography.hazmat.backends import default_backend + from cryptography.hazmat.primitives import serialization, hashes + from cryptography.hazmat.primitives.asymmetric import padding + _have_cryptography = True +except ImportError: + _have_cryptography = False from functools import partial import hashlib +import io import struct +import warnings DEBUG = False @@ -69,6 +77,8 @@ class RandStruct_323(object): def scramble_old_password(password, message): """Scramble for old_password""" + warnings.warn("old password (for MySQL <4.1) is used. Upgrade your password with newer auth method.\n" + "old password support will be removed in future PyMySQL version") hash_pass = _hash_password_323(password) hash_message = _hash_password_323(message[:SCRAMBLE_LENGTH_323]) hash_pass_n = struct.unpack(">LL", hash_pass) @@ -128,6 +138,8 @@ def sha2_rsa_encrypt(password, salt, public_key): Used for sha256_password and caching_sha2_password. """ + if not _have_cryptography: + raise RuntimeError("cryptography is required for sha256_password or caching_sha2_password") message = _xor_password(password + b'\0', salt) rsa_key = serialization.load_pem_public_key(public_key, default_backend()) return rsa_key.encrypt( diff --git a/server/www/packages/packages-linux/x64/pymysql/charset.py b/server/www/packages/packages-linux/x64/pymysql/charset.py index 968376c..07d8063 100644 --- a/server/www/packages/packages-linux/x64/pymysql/charset.py +++ b/server/www/packages/packages-linux/x64/pymysql/charset.py @@ -18,7 +18,7 @@ class Charset(object): @property def encoding(self): name = self.name - if name == 'utf8mb4': + if name in ('utf8mb4', 'utf8mb3'): return 'utf8' return name @@ -30,18 +30,18 @@ class Charset(object): class Charsets: def __init__(self): self._by_id = {} + self._by_name = {} def add(self, c): self._by_id[c.id] = c + if c.is_default: + self._by_name[c.name] = c def by_id(self, id): return self._by_id[id] def by_name(self, name): - name = name.lower() - for c in self._by_id.values(): - if c.name == name and c.is_default: - return c + return self._by_name.get(name.lower()) _charsets = Charsets() """ @@ -89,7 +89,6 @@ _charsets.add(Charset(31, 'latin1', 'latin1_german2_ci', '')) _charsets.add(Charset(32, 'armscii8', 'armscii8_general_ci', 'Yes')) _charsets.add(Charset(33, 'utf8', 'utf8_general_ci', 'Yes')) _charsets.add(Charset(34, 'cp1250', 'cp1250_czech_cs', '')) -_charsets.add(Charset(35, 'ucs2', 'ucs2_general_ci', 'Yes')) _charsets.add(Charset(36, 'cp866', 'cp866_general_ci', 'Yes')) _charsets.add(Charset(37, 'keybcs2', 'keybcs2_general_ci', 'Yes')) _charsets.add(Charset(38, 'macce', 'macce_general_ci', 'Yes')) @@ -108,13 +107,9 @@ _charsets.add(Charset(50, 'cp1251', 'cp1251_bin', '')) _charsets.add(Charset(51, 'cp1251', 'cp1251_general_ci', 'Yes')) _charsets.add(Charset(52, 'cp1251', 'cp1251_general_cs', '')) _charsets.add(Charset(53, 'macroman', 'macroman_bin', '')) -_charsets.add(Charset(54, 'utf16', 'utf16_general_ci', 'Yes')) -_charsets.add(Charset(55, 'utf16', 'utf16_bin', '')) _charsets.add(Charset(57, 'cp1256', 'cp1256_general_ci', 'Yes')) _charsets.add(Charset(58, 'cp1257', 'cp1257_bin', '')) _charsets.add(Charset(59, 'cp1257', 'cp1257_general_ci', 'Yes')) -_charsets.add(Charset(60, 'utf32', 'utf32_general_ci', 'Yes')) -_charsets.add(Charset(61, 'utf32', 'utf32_bin', '')) _charsets.add(Charset(63, 'binary', 'binary', 'Yes')) _charsets.add(Charset(64, 'armscii8', 'armscii8_bin', '')) _charsets.add(Charset(65, 'ascii', 'ascii_bin', '')) @@ -128,6 +123,7 @@ _charsets.add(Charset(72, 'hp8', 'hp8_bin', '')) _charsets.add(Charset(73, 'keybcs2', 'keybcs2_bin', '')) _charsets.add(Charset(74, 'koi8r', 'koi8r_bin', '')) _charsets.add(Charset(75, 'koi8u', 'koi8u_bin', '')) +_charsets.add(Charset(76, 'utf8', 'utf8_tolower_ci', '')) _charsets.add(Charset(77, 'latin2', 'latin2_bin', '')) _charsets.add(Charset(78, 'latin5', 'latin5_bin', '')) _charsets.add(Charset(79, 'latin7', 'latin7_bin', '')) @@ -141,7 +137,6 @@ _charsets.add(Charset(86, 'gb2312', 'gb2312_bin', '')) _charsets.add(Charset(87, 'gbk', 'gbk_bin', '')) _charsets.add(Charset(88, 'sjis', 'sjis_bin', '')) _charsets.add(Charset(89, 'tis620', 'tis620_bin', '')) -_charsets.add(Charset(90, 'ucs2', 'ucs2_bin', '')) _charsets.add(Charset(91, 'ujis', 'ujis_bin', '')) _charsets.add(Charset(92, 'geostd8', 'geostd8_general_ci', 'Yes')) _charsets.add(Charset(93, 'geostd8', 'geostd8_bin', '')) @@ -151,67 +146,6 @@ _charsets.add(Charset(96, 'cp932', 'cp932_bin', '')) _charsets.add(Charset(97, 'eucjpms', 'eucjpms_japanese_ci', 'Yes')) _charsets.add(Charset(98, 'eucjpms', 'eucjpms_bin', '')) _charsets.add(Charset(99, 'cp1250', 'cp1250_polish_ci', '')) -_charsets.add(Charset(101, 'utf16', 'utf16_unicode_ci', '')) -_charsets.add(Charset(102, 'utf16', 'utf16_icelandic_ci', '')) -_charsets.add(Charset(103, 'utf16', 'utf16_latvian_ci', '')) -_charsets.add(Charset(104, 'utf16', 'utf16_romanian_ci', '')) -_charsets.add(Charset(105, 'utf16', 'utf16_slovenian_ci', '')) -_charsets.add(Charset(106, 'utf16', 'utf16_polish_ci', '')) -_charsets.add(Charset(107, 'utf16', 'utf16_estonian_ci', '')) -_charsets.add(Charset(108, 'utf16', 'utf16_spanish_ci', '')) -_charsets.add(Charset(109, 'utf16', 'utf16_swedish_ci', '')) -_charsets.add(Charset(110, 'utf16', 'utf16_turkish_ci', '')) -_charsets.add(Charset(111, 'utf16', 'utf16_czech_ci', '')) -_charsets.add(Charset(112, 'utf16', 'utf16_danish_ci', '')) -_charsets.add(Charset(113, 'utf16', 'utf16_lithuanian_ci', '')) -_charsets.add(Charset(114, 'utf16', 'utf16_slovak_ci', '')) -_charsets.add(Charset(115, 'utf16', 'utf16_spanish2_ci', '')) -_charsets.add(Charset(116, 'utf16', 'utf16_roman_ci', '')) -_charsets.add(Charset(117, 'utf16', 'utf16_persian_ci', '')) -_charsets.add(Charset(118, 'utf16', 'utf16_esperanto_ci', '')) -_charsets.add(Charset(119, 'utf16', 'utf16_hungarian_ci', '')) -_charsets.add(Charset(120, 'utf16', 'utf16_sinhala_ci', '')) -_charsets.add(Charset(128, 'ucs2', 'ucs2_unicode_ci', '')) -_charsets.add(Charset(129, 'ucs2', 'ucs2_icelandic_ci', '')) -_charsets.add(Charset(130, 'ucs2', 'ucs2_latvian_ci', '')) -_charsets.add(Charset(131, 'ucs2', 'ucs2_romanian_ci', '')) -_charsets.add(Charset(132, 'ucs2', 'ucs2_slovenian_ci', '')) -_charsets.add(Charset(133, 'ucs2', 'ucs2_polish_ci', '')) -_charsets.add(Charset(134, 'ucs2', 'ucs2_estonian_ci', '')) -_charsets.add(Charset(135, 'ucs2', 'ucs2_spanish_ci', '')) -_charsets.add(Charset(136, 'ucs2', 'ucs2_swedish_ci', '')) -_charsets.add(Charset(137, 'ucs2', 'ucs2_turkish_ci', '')) -_charsets.add(Charset(138, 'ucs2', 'ucs2_czech_ci', '')) -_charsets.add(Charset(139, 'ucs2', 'ucs2_danish_ci', '')) -_charsets.add(Charset(140, 'ucs2', 'ucs2_lithuanian_ci', '')) -_charsets.add(Charset(141, 'ucs2', 'ucs2_slovak_ci', '')) -_charsets.add(Charset(142, 'ucs2', 'ucs2_spanish2_ci', '')) -_charsets.add(Charset(143, 'ucs2', 'ucs2_roman_ci', '')) -_charsets.add(Charset(144, 'ucs2', 'ucs2_persian_ci', '')) -_charsets.add(Charset(145, 'ucs2', 'ucs2_esperanto_ci', '')) -_charsets.add(Charset(146, 'ucs2', 'ucs2_hungarian_ci', '')) -_charsets.add(Charset(147, 'ucs2', 'ucs2_sinhala_ci', '')) -_charsets.add(Charset(159, 'ucs2', 'ucs2_general_mysql500_ci', '')) -_charsets.add(Charset(160, 'utf32', 'utf32_unicode_ci', '')) -_charsets.add(Charset(161, 'utf32', 'utf32_icelandic_ci', '')) -_charsets.add(Charset(162, 'utf32', 'utf32_latvian_ci', '')) -_charsets.add(Charset(163, 'utf32', 'utf32_romanian_ci', '')) -_charsets.add(Charset(164, 'utf32', 'utf32_slovenian_ci', '')) -_charsets.add(Charset(165, 'utf32', 'utf32_polish_ci', '')) -_charsets.add(Charset(166, 'utf32', 'utf32_estonian_ci', '')) -_charsets.add(Charset(167, 'utf32', 'utf32_spanish_ci', '')) -_charsets.add(Charset(168, 'utf32', 'utf32_swedish_ci', '')) -_charsets.add(Charset(169, 'utf32', 'utf32_turkish_ci', '')) -_charsets.add(Charset(170, 'utf32', 'utf32_czech_ci', '')) -_charsets.add(Charset(171, 'utf32', 'utf32_danish_ci', '')) -_charsets.add(Charset(172, 'utf32', 'utf32_lithuanian_ci', '')) -_charsets.add(Charset(173, 'utf32', 'utf32_slovak_ci', '')) -_charsets.add(Charset(174, 'utf32', 'utf32_spanish2_ci', '')) -_charsets.add(Charset(175, 'utf32', 'utf32_roman_ci', '')) -_charsets.add(Charset(176, 'utf32', 'utf32_persian_ci', '')) -_charsets.add(Charset(177, 'utf32', 'utf32_esperanto_ci', '')) -_charsets.add(Charset(178, 'utf32', 'utf32_hungarian_ci', '')) -_charsets.add(Charset(179, 'utf32', 'utf32_sinhala_ci', '')) _charsets.add(Charset(192, 'utf8', 'utf8_unicode_ci', '')) _charsets.add(Charset(193, 'utf8', 'utf8_icelandic_ci', '')) _charsets.add(Charset(194, 'utf8', 'utf8_latvian_ci', '')) @@ -232,6 +166,10 @@ _charsets.add(Charset(208, 'utf8', 'utf8_persian_ci', '')) _charsets.add(Charset(209, 'utf8', 'utf8_esperanto_ci', '')) _charsets.add(Charset(210, 'utf8', 'utf8_hungarian_ci', '')) _charsets.add(Charset(211, 'utf8', 'utf8_sinhala_ci', '')) +_charsets.add(Charset(212, 'utf8', 'utf8_german2_ci', '')) +_charsets.add(Charset(213, 'utf8', 'utf8_croatian_ci', '')) +_charsets.add(Charset(214, 'utf8', 'utf8_unicode_520_ci', '')) +_charsets.add(Charset(215, 'utf8', 'utf8_vietnamese_ci', '')) _charsets.add(Charset(223, 'utf8', 'utf8_general_mysql500_ci', '')) _charsets.add(Charset(224, 'utf8mb4', 'utf8mb4_unicode_ci', '')) _charsets.add(Charset(225, 'utf8mb4', 'utf8mb4_icelandic_ci', '')) @@ -257,14 +195,18 @@ _charsets.add(Charset(244, 'utf8mb4', 'utf8mb4_german2_ci', '')) _charsets.add(Charset(245, 'utf8mb4', 'utf8mb4_croatian_ci', '')) _charsets.add(Charset(246, 'utf8mb4', 'utf8mb4_unicode_520_ci', '')) _charsets.add(Charset(247, 'utf8mb4', 'utf8mb4_vietnamese_ci', '')) - +_charsets.add(Charset(248, 'gb18030', 'gb18030_chinese_ci', 'Yes')) +_charsets.add(Charset(249, 'gb18030', 'gb18030_bin', '')) +_charsets.add(Charset(250, 'gb18030', 'gb18030_unicode_520_ci', '')) +_charsets.add(Charset(255, 'utf8mb4', 'utf8mb4_0900_ai_ci', '')) charset_by_name = _charsets.by_name charset_by_id = _charsets.by_id +#TODO: remove this def charset_to_encoding(name): """Convert MySQL's charset name to Python's codec name""" - if name == 'utf8mb4': + if name in ('utf8mb4', 'utf8mb3'): return 'utf8' return name diff --git a/server/www/packages/packages-linux/x64/pymysql/connections.py b/server/www/packages/packages-linux/x64/pymysql/connections.py index 1e580d2..2e4122b 100644 --- a/server/www/packages/packages-linux/x64/pymysql/connections.py +++ b/server/www/packages/packages-linux/x64/pymysql/connections.py @@ -88,7 +88,7 @@ TEXT_TYPES = { } -DEFAULT_CHARSET = 'utf8mb4' # TODO: change to utf8mb4 +DEFAULT_CHARSET = 'utf8mb4' MAX_PACKET_LEN = 2**24-1 @@ -152,7 +152,6 @@ class Connection(object): (default: 10, min: 1, max: 31536000) :param ssl: A dict of arguments similar to mysql_ssl_set()'s parameters. - For now the capath and cipher arguments are not supported. :param read_default_group: Group to read from in the configuration file. :param compress: Not supported :param named_pipe: Not supported @@ -295,15 +294,15 @@ class Connection(object): self._affected_rows = 0 self.host_info = "Not connected" - #: specified autocommit mode. None means use server default. + # specified autocommit mode. None means use server default. self.autocommit_mode = autocommit if conv is None: conv = converters.conversions # Need for MySQLdb compatibility. - self.encoders = dict([(k, v) for (k, v) in conv.items() if type(k) is not int]) - self.decoders = dict([(k, v) for (k, v) in conv.items() if type(k) is int]) + self.encoders = {k: v for (k, v) in conv.items() if type(k) is not int} + self.decoders = {k: v for (k, v) in conv.items() if type(k) is int} self.sql_mode = sql_mode self.init_command = init_command self.max_allowed_packet = max_allowed_packet @@ -316,10 +315,9 @@ class Connection(object): '_pid': str(os.getpid()), '_client_version': VERSION_STRING, } + if program_name: self._connect_attrs["program_name"] = program_name - elif sys.argv: - self._connect_attrs["program_name"] = sys.argv[0] if defer_connect: self._sock = None @@ -494,6 +492,9 @@ class Connection(object): def __enter__(self): """Context manager that returns a Cursor""" + warnings.warn( + "Context manager API of Connection object is deprecated; Use conn.begin()", + DeprecationWarning) return self.cursor() def __exit__(self, exc, value, traceback): @@ -696,6 +697,10 @@ class Connection(object): raise err.OperationalError( CR.CR_SERVER_LOST, "Lost connection to MySQL server during query (%s)" % (e,)) + except BaseException: + # Don't convert unknown exception to MySQLError. + self._force_close() + raise if len(data) < num_bytes: self._force_close() raise err.OperationalError( @@ -804,7 +809,11 @@ class Connection(object): authresp = b'' plugin_name = None - if self._auth_plugin_name in ('', 'mysql_native_password'): + if self._auth_plugin_name == '': + plugin_name = b'' + authresp = _auth.scramble_native_password(self.password, self.salt) + elif self._auth_plugin_name == 'mysql_native_password': + plugin_name = b'mysql_native_password' authresp = _auth.scramble_native_password(self.password, self.salt) elif self._auth_plugin_name == 'caching_sha2_password': plugin_name = b'caching_sha2_password' @@ -842,9 +851,9 @@ class Connection(object): if self.server_capabilities & CLIENT.CONNECT_ATTRS: connect_attrs = b'' for k, v in self._connect_attrs.items(): - k = k.encode('utf8') + k = k.encode('utf-8') connect_attrs += struct.pack('B', len(k)) + k - v = v.encode('utf8') + v = v.encode('utf-8') connect_attrs += struct.pack('B', len(v)) + v data += struct.pack('B', len(connect_attrs)) + connect_attrs diff --git a/server/www/packages/packages-linux/x64/pymysql/converters.py b/server/www/packages/packages-linux/x64/pymysql/converters.py index bf1db9d..ce2be06 100644 --- a/server/www/packages/packages-linux/x64/pymysql/converters.py +++ b/server/www/packages/packages-linux/x64/pymysql/converters.py @@ -354,21 +354,6 @@ def through(x): convert_bit = through -def convert_characters(connection, field, data): - field_charset = charset_by_id(field.charsetnr).name - encoding = charset_to_encoding(field_charset) - if field.flags & FLAG.SET: - return convert_set(data.decode(encoding)) - if field.flags & FLAG.BINARY: - return data - - if connection.use_unicode: - data = data.decode(encoding) - elif connection.charset != field_charset: - data = data.decode(encoding) - data = data.encode(connection.encoding) - return data - encoders = { bool: escape_bool, int: escape_int, diff --git a/server/www/packages/packages-linux/x64/pymysql/cursors.py b/server/www/packages/packages-linux/x64/pymysql/cursors.py index cc16998..a6d645d 100644 --- a/server/www/packages/packages-linux/x64/pymysql/cursors.py +++ b/server/www/packages/packages-linux/x64/pymysql/cursors.py @@ -122,9 +122,9 @@ class Cursor(object): return tuple(conn.literal(arg) for arg in args) elif isinstance(args, dict): if PY2: - args = dict((ensure_bytes(key), ensure_bytes(val)) for - (key, val) in args.items()) - return dict((key, conn.literal(val)) for (key, val) in args.items()) + args = {ensure_bytes(key): ensure_bytes(val) for + (key, val) in args.items()} + return {key: conn.literal(val) for (key, val) in args.items()} else: # If it's not a dictionary let's try escaping it anyways. # Worst case it will throw a Value error diff --git a/server/www/packages/packages-linux/x64/pymysql/util.py b/server/www/packages/packages-linux/x64/pymysql/util.py index 3e82ac7..04683f8 100644 --- a/server/www/packages/packages-linux/x64/pymysql/util.py +++ b/server/www/packages/packages-linux/x64/pymysql/util.py @@ -11,12 +11,3 @@ def byte2int(b): def int2byte(i): return struct.pack("!B", i) - -def join_bytes(bs): - if len(bs) == 0: - return "" - else: - rv = bs[0] - for b in bs[1:]: - rv += b - return rv diff --git a/server/www/packages/packages-linux/x64/qrcode/main.py b/server/www/packages/packages-linux/x64/qrcode/main.py index 1e164f1..e46a9b9 100644 --- a/server/www/packages/packages-linux/x64/qrcode/main.py +++ b/server/www/packages/packages-linux/x64/qrcode/main.py @@ -33,6 +33,7 @@ def _check_mask_pattern(mask_pattern): raise ValueError( "Mask pattern should be in range(8) (got %s)" % mask_pattern) + class QRCode: def __init__(self, version=None, diff --git a/server/www/packages/packages-linux/x64/qrcode/release.py b/server/www/packages/packages-linux/x64/qrcode/release.py index abbabb4..4cc4c19 100644 --- a/server/www/packages/packages-linux/x64/qrcode/release.py +++ b/server/www/packages/packages-linux/x64/qrcode/release.py @@ -12,7 +12,6 @@ def update_manpage(data): Update the version in the manpage document. """ if data['name'] != 'qrcode': - print('no qrcode') return base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) diff --git a/server/www/packages/packages-linux/x64/qrcode/util.py b/server/www/packages/packages-linux/x64/qrcode/util.py index a9652f7..231b85e 100644 --- a/server/www/packages/packages-linux/x64/qrcode/util.py +++ b/server/www/packages/packages-linux/x64/qrcode/util.py @@ -33,7 +33,7 @@ MODE_SIZE_LARGE = { } ALPHA_NUM = six.b('0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:') -RE_ALPHA_NUM = re.compile(six.b('^[') + re.escape(ALPHA_NUM) + six.b(']*\Z')) +RE_ALPHA_NUM = re.compile(six.b('^[') + re.escape(ALPHA_NUM) + six.b(r']*\Z')) # The number of bits for numeric delimited data lengths. NUMBER_LENGTH = {3: 10, 2: 7, 1: 4} @@ -344,12 +344,17 @@ def optimal_data_chunks(data, minimum=4): :param minimum: The minimum number of bytes in a row to split as a chunk. """ data = to_bytestring(data) - re_repeat = ( - six.b('{') + six.text_type(minimum).encode('ascii') + six.b(',}')) - num_pattern = re.compile(six.b('\d') + re_repeat) + num_pattern = six.b(r'\d') + alpha_pattern = six.b('[') + re.escape(ALPHA_NUM) + six.b(']') + if len(data) <= minimum: + num_pattern = re.compile(six.b('^') + num_pattern + six.b('+$')) + alpha_pattern = re.compile(six.b('^') + alpha_pattern + six.b('+$')) + else: + re_repeat = ( + six.b('{') + six.text_type(minimum).encode('ascii') + six.b(',}')) + num_pattern = re.compile(num_pattern + re_repeat) + alpha_pattern = re.compile(alpha_pattern + re_repeat) num_bits = _optimal_split(data, num_pattern) - alpha_pattern = re.compile( - six.b('[') + re.escape(ALPHA_NUM) + six.b(']') + re_repeat) for is_num, chunk in num_bits: if is_num: yield QRData(chunk, mode=MODE_NUMBER, check_data=False) diff --git a/server/www/packages/packages-linux/x64/six.py b/server/www/packages/packages-linux/x64/six.py index 6bf4fd3..357e624 100644 --- a/server/www/packages/packages-linux/x64/six.py +++ b/server/www/packages/packages-linux/x64/six.py @@ -1,4 +1,4 @@ -# Copyright (c) 2010-2017 Benjamin Peterson +# Copyright (c) 2010-2019 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -29,7 +29,7 @@ import sys import types __author__ = "Benjamin Peterson " -__version__ = "1.11.0" +__version__ = "1.13.0" # Useful for very coarse version differentiation. @@ -255,8 +255,10 @@ _moved_attributes = [ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), + MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"), MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), @@ -637,6 +639,7 @@ if PY3: import io StringIO = io.StringIO BytesIO = io.BytesIO + del io _assertCountEqual = "assertCountEqual" if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" @@ -824,7 +827,15 @@ def with_metaclass(meta, *bases): class metaclass(type): def __new__(cls, name, this_bases, d): - return meta(name, bases, d) + if sys.version_info[:2] >= (3, 7): + # This version introduced PEP 560 that requires a bit + # of extra care (we mimic what is done by __build_class__). + resolved_bases = types.resolve_bases(bases) + if resolved_bases is not bases: + d['__orig_bases__'] = bases + else: + resolved_bases = bases + return meta(name, resolved_bases, d) @classmethod def __prepare__(cls, name, this_bases): @@ -844,10 +855,71 @@ def add_metaclass(metaclass): orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) + if hasattr(cls, '__qualname__'): + orig_vars['__qualname__'] = cls.__qualname__ return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper +def ensure_binary(s, encoding='utf-8', errors='strict'): + """Coerce **s** to six.binary_type. + + For Python 2: + - `unicode` -> encoded to `str` + - `str` -> `str` + + For Python 3: + - `str` -> encoded to `bytes` + - `bytes` -> `bytes` + """ + if isinstance(s, text_type): + return s.encode(encoding, errors) + elif isinstance(s, binary_type): + return s + else: + raise TypeError("not expecting type '%s'" % type(s)) + + +def ensure_str(s, encoding='utf-8', errors='strict'): + """Coerce *s* to `str`. + + For Python 2: + - `unicode` -> encoded to `str` + - `str` -> `str` + + For Python 3: + - `str` -> `str` + - `bytes` -> decoded to `str` + """ + if not isinstance(s, (text_type, binary_type)): + raise TypeError("not expecting type '%s'" % type(s)) + if PY2 and isinstance(s, text_type): + s = s.encode(encoding, errors) + elif PY3 and isinstance(s, binary_type): + s = s.decode(encoding, errors) + return s + + +def ensure_text(s, encoding='utf-8', errors='strict'): + """Coerce *s* to six.text_type. + + For Python 2: + - `unicode` -> `unicode` + - `str` -> `unicode` + + For Python 3: + - `str` -> `str` + - `bytes` -> decoded to `str` + """ + if isinstance(s, binary_type): + return s.decode(encoding, errors) + elif isinstance(s, text_type): + return s + else: + raise TypeError("not expecting type '%s'" % type(s)) + + + def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. diff --git a/server/www/packages/packages-linux/x64/tornado/__init__.py b/server/www/packages/packages-linux/x64/tornado/__init__.py index b269cf7..a1094a6 100644 --- a/server/www/packages/packages-linux/x64/tornado/__init__.py +++ b/server/www/packages/packages-linux/x64/tornado/__init__.py @@ -15,8 +15,6 @@ """The Tornado web server and tools.""" -from __future__ import absolute_import, division, print_function - # version is a human-readable version number. # version_info is a four-tuple for programmatic comparison. The first @@ -24,5 +22,5 @@ from __future__ import absolute_import, division, print_function # is zero for an official release, positive for a development branch, # or negative for a release candidate or beta (after the base version # number has been incremented) -version = "5.1.1" -version_info = (5, 1, 1, 0) +version = "6.0.3" +version_info = (6, 0, 3, 0) diff --git a/server/www/packages/packages-linux/x64/tornado/_locale_data.py b/server/www/packages/packages-linux/x64/tornado/_locale_data.py index a2c5039..91416d9 100644 --- a/server/www/packages/packages-linux/x64/tornado/_locale_data.py +++ b/server/www/packages/packages-linux/x64/tornado/_locale_data.py @@ -16,8 +16,6 @@ """Data used by the tornado.locale module.""" -from __future__ import absolute_import, division, print_function - LOCALE_NAMES = { "af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"}, "am_ET": {"name_en": u"Amharic", "name": u"አማርኛ"}, diff --git a/server/www/packages/packages-linux/x64/tornado/auth.py b/server/www/packages/packages-linux/x64/tornado/auth.py index b79ad14..db6d290 100644 --- a/server/www/packages/packages-linux/x64/tornado/auth.py +++ b/server/www/packages/packages-linux/x64/tornado/auth.py @@ -54,93 +54,29 @@ Example usage for Google OAuth: .. testoutput:: :hide: - -.. versionchanged:: 4.0 - All of the callback interfaces in this module are now guaranteed - to run their callback with an argument of ``None`` on error. - Previously some functions would do this while others would simply - terminate the request on their own. This change also ensures that - errors are more consistently reported through the ``Future`` interfaces. """ -from __future__ import absolute_import, division, print_function - import base64 import binascii -import functools import hashlib import hmac import time +import urllib.parse import uuid -import warnings -from tornado.concurrent import (Future, _non_deprecated_return_future, - future_set_exc_info, chain_future, - future_set_result_unless_cancelled) -from tornado import gen from tornado import httpclient from tornado import escape from tornado.httputil import url_concat -from tornado.log import gen_log -from tornado.stack_context import ExceptionStackContext, wrap -from tornado.util import unicode_type, ArgReplacer, PY3 +from tornado.util import unicode_type +from tornado.web import RequestHandler -if PY3: - import urllib.parse as urlparse - import urllib.parse as urllib_parse - long = int -else: - import urlparse - import urllib as urllib_parse +from typing import List, Any, Dict, cast, Iterable, Union, Optional class AuthError(Exception): pass -def _auth_future_to_callback(callback, future): - try: - result = future.result() - except AuthError as e: - gen_log.warning(str(e)) - result = None - callback(result) - - -def _auth_return_future(f): - """Similar to tornado.concurrent.return_future, but uses the auth - module's legacy callback interface. - - Note that when using this decorator the ``callback`` parameter - inside the function will actually be a future. - - .. deprecated:: 5.1 - Will be removed in 6.0. - """ - replacer = ArgReplacer(f, 'callback') - - @functools.wraps(f) - def wrapper(*args, **kwargs): - future = Future() - callback, args, kwargs = replacer.replace(future, args, kwargs) - if callback is not None: - warnings.warn("callback arguments are deprecated, use the returned Future instead", - DeprecationWarning) - future.add_done_callback( - wrap(functools.partial(_auth_future_to_callback, callback))) - - def handle_exception(typ, value, tb): - if future.done(): - return False - else: - future_set_exc_info(future, (typ, value, tb)) - return True - with ExceptionStackContext(handle_exception, delay_warning=True): - f(*args, **kwargs) - return future - return wrapper - - class OpenIdMixin(object): """Abstract implementation of OpenID and Attribute Exchange. @@ -148,10 +84,12 @@ class OpenIdMixin(object): * ``_OPENID_ENDPOINT``: the identity provider's URI. """ - @_non_deprecated_return_future - def authenticate_redirect(self, callback_uri=None, - ax_attrs=["name", "email", "language", "username"], - callback=None): + + def authenticate_redirect( + self, + callback_uri: str = None, + ax_attrs: List[str] = ["name", "email", "language", "username"], + ) -> None: """Redirects to the authentication URL for this service. After authentication, the service will redirect back to the given @@ -162,24 +100,22 @@ class OpenIdMixin(object): all those attributes for your app, you can request fewer with the ax_attrs keyword argument. - .. versionchanged:: 3.1 - Returns a `.Future` and takes an optional callback. These are - not strictly necessary as this method is synchronous, - but they are supplied for consistency with - `OAuthMixin.authorize_redirect`. + .. versionchanged:: 6.0 - .. deprecated:: 5.1 - - The ``callback`` argument and returned awaitable will be removed - in Tornado 6.0; this will be an ordinary synchronous function. + The ``callback`` argument was removed and this method no + longer returns an awaitable object. It is now an ordinary + synchronous function. """ - callback_uri = callback_uri or self.request.uri + handler = cast(RequestHandler, self) + callback_uri = callback_uri or handler.request.uri + assert callback_uri is not None args = self._openid_args(callback_uri, ax_attrs=ax_attrs) - self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args)) - callback() + endpoint = self._OPENID_ENDPOINT # type: ignore + handler.redirect(endpoint + "?" + urllib.parse.urlencode(args)) - @_auth_return_future - def get_authenticated_user(self, callback, http_client=None): + async def get_authenticated_user( + self, http_client: httpclient.AsyncHTTPClient = None + ) -> Dict[str, Any]: """Fetches the authenticated user data upon redirect. This method should be called by the handler that receives the @@ -190,51 +126,57 @@ class OpenIdMixin(object): The result of this method will generally be used to set a cookie. - .. deprecated:: 5.1 + .. versionchanged:: 6.0 - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. + The ``callback`` argument was removed. Use the returned + awaitable object instead. """ + handler = cast(RequestHandler, self) # Verify the OpenID response via direct request to the OP - args = dict((k, v[-1]) for k, v in self.request.arguments.items()) + args = dict( + (k, v[-1]) for k, v in handler.request.arguments.items() + ) # type: Dict[str, Union[str, bytes]] args["openid.mode"] = u"check_authentication" - url = self._OPENID_ENDPOINT + url = self._OPENID_ENDPOINT # type: ignore if http_client is None: http_client = self.get_auth_http_client() - fut = http_client.fetch(url, method="POST", body=urllib_parse.urlencode(args)) - fut.add_done_callback(wrap(functools.partial( - self._on_authentication_verified, callback))) + resp = await http_client.fetch( + url, method="POST", body=urllib.parse.urlencode(args) + ) + return self._on_authentication_verified(resp) - def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None): - url = urlparse.urljoin(self.request.full_url(), callback_uri) + def _openid_args( + self, callback_uri: str, ax_attrs: Iterable[str] = [], oauth_scope: str = None + ) -> Dict[str, str]: + handler = cast(RequestHandler, self) + url = urllib.parse.urljoin(handler.request.full_url(), callback_uri) args = { "openid.ns": "http://specs.openid.net/auth/2.0", - "openid.claimed_id": - "http://specs.openid.net/auth/2.0/identifier_select", - "openid.identity": - "http://specs.openid.net/auth/2.0/identifier_select", + "openid.claimed_id": "http://specs.openid.net/auth/2.0/identifier_select", + "openid.identity": "http://specs.openid.net/auth/2.0/identifier_select", "openid.return_to": url, - "openid.realm": urlparse.urljoin(url, '/'), + "openid.realm": urllib.parse.urljoin(url, "/"), "openid.mode": "checkid_setup", } if ax_attrs: - args.update({ - "openid.ns.ax": "http://openid.net/srv/ax/1.0", - "openid.ax.mode": "fetch_request", - }) + args.update( + { + "openid.ns.ax": "http://openid.net/srv/ax/1.0", + "openid.ax.mode": "fetch_request", + } + ) ax_attrs = set(ax_attrs) - required = [] + required = [] # type: List[str] if "name" in ax_attrs: ax_attrs -= set(["name", "firstname", "fullname", "lastname"]) required += ["firstname", "fullname", "lastname"] - args.update({ - "openid.ax.type.firstname": - "http://axschema.org/namePerson/first", - "openid.ax.type.fullname": - "http://axschema.org/namePerson", - "openid.ax.type.lastname": - "http://axschema.org/namePerson/last", - }) + args.update( + { + "openid.ax.type.firstname": "http://axschema.org/namePerson/first", + "openid.ax.type.fullname": "http://axschema.org/namePerson", + "openid.ax.type.lastname": "http://axschema.org/namePerson/last", + } + ) known_attrs = { "email": "http://axschema.org/contact/email", "language": "http://axschema.org/pref/language", @@ -245,47 +187,45 @@ class OpenIdMixin(object): required.append(name) args["openid.ax.required"] = ",".join(required) if oauth_scope: - args.update({ - "openid.ns.oauth": - "http://specs.openid.net/extensions/oauth/1.0", - "openid.oauth.consumer": self.request.host.split(":")[0], - "openid.oauth.scope": oauth_scope, - }) + args.update( + { + "openid.ns.oauth": "http://specs.openid.net/extensions/oauth/1.0", + "openid.oauth.consumer": handler.request.host.split(":")[0], + "openid.oauth.scope": oauth_scope, + } + ) return args - def _on_authentication_verified(self, future, response_fut): - try: - response = response_fut.result() - except Exception as e: - future.set_exception(AuthError( - "Error response %s" % e)) - return + def _on_authentication_verified( + self, response: httpclient.HTTPResponse + ) -> Dict[str, Any]: + handler = cast(RequestHandler, self) if b"is_valid:true" not in response.body: - future.set_exception(AuthError( - "Invalid OpenID response: %s" % response.body)) - return + raise AuthError("Invalid OpenID response: %s" % response.body) # Make sure we got back at least an email from attribute exchange ax_ns = None - for name in self.request.arguments: - if name.startswith("openid.ns.") and \ - self.get_argument(name) == u"http://openid.net/srv/ax/1.0": - ax_ns = name[10:] + for key in handler.request.arguments: + if ( + key.startswith("openid.ns.") + and handler.get_argument(key) == u"http://openid.net/srv/ax/1.0" + ): + ax_ns = key[10:] break - def get_ax_arg(uri): + def get_ax_arg(uri: str) -> str: if not ax_ns: return u"" prefix = "openid." + ax_ns + ".type." ax_name = None - for name in self.request.arguments.keys(): - if self.get_argument(name) == uri and name.startswith(prefix): - part = name[len(prefix):] + for name in handler.request.arguments.keys(): + if handler.get_argument(name) == uri and name.startswith(prefix): + part = name[len(prefix) :] ax_name = "openid." + ax_ns + ".value." + part break if not ax_name: return u"" - return self.get_argument(ax_name, u"") + return handler.get_argument(ax_name, u"") email = get_ax_arg("http://axschema.org/contact/email") name = get_ax_arg("http://axschema.org/namePerson") @@ -313,12 +253,12 @@ class OpenIdMixin(object): user["locale"] = locale if username: user["username"] = username - claimed_id = self.get_argument("openid.claimed_id", None) + claimed_id = handler.get_argument("openid.claimed_id", None) if claimed_id: user["claimed_id"] = claimed_id - future_set_result_unless_cancelled(future, user) + return user - def get_auth_http_client(self): + def get_auth_http_client(self) -> httpclient.AsyncHTTPClient: """Returns the `.AsyncHTTPClient` instance to be used for auth requests. May be overridden by subclasses to use an HTTP client other than @@ -343,9 +283,13 @@ class OAuthMixin(object): Subclasses must also override the `_oauth_get_user_future` and `_oauth_consumer_token` methods. """ - @_non_deprecated_return_future - def authorize_redirect(self, callback_uri=None, extra_params=None, - http_client=None, callback=None): + + async def authorize_redirect( + self, + callback_uri: str = None, + extra_params: Dict[str, Any] = None, + http_client: httpclient.AsyncHTTPClient = None, + ) -> None: """Redirects the user to obtain OAuth authorization for this service. The ``callback_uri`` may be omitted if you have previously @@ -367,35 +311,31 @@ class OAuthMixin(object): Now returns a `.Future` and takes an optional callback, for compatibility with `.gen.coroutine`. - .. deprecated:: 5.1 + .. versionchanged:: 6.0 - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. + The ``callback`` argument was removed. Use the returned + awaitable object instead. """ if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False): raise Exception("This service does not support oauth_callback") if http_client is None: http_client = self.get_auth_http_client() + assert http_client is not None if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": - fut = http_client.fetch( - self._oauth_request_token_url(callback_uri=callback_uri, - extra_params=extra_params)) - fut.add_done_callback(wrap(functools.partial( - self._on_request_token, - self._OAUTH_AUTHORIZE_URL, - callback_uri, - callback))) + response = await http_client.fetch( + self._oauth_request_token_url( + callback_uri=callback_uri, extra_params=extra_params + ) + ) else: - fut = http_client.fetch(self._oauth_request_token_url()) - fut.add_done_callback( - wrap(functools.partial( - self._on_request_token, self._OAUTH_AUTHORIZE_URL, - callback_uri, - callback))) + response = await http_client.fetch(self._oauth_request_token_url()) + url = self._OAUTH_AUTHORIZE_URL # type: ignore + self._on_request_token(url, callback_uri, response) - @_auth_return_future - def get_authenticated_user(self, callback, http_client=None): + async def get_authenticated_user( + self, http_client: httpclient.AsyncHTTPClient = None + ) -> Dict[str, Any]: """Gets the OAuth authorized user and access token. This method should be called from the handler for your @@ -406,37 +346,45 @@ class OAuthMixin(object): also contain other fields such as ``name``, depending on the service used. - .. deprecated:: 5.1 + .. versionchanged:: 6.0 - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. + The ``callback`` argument was removed. Use the returned + awaitable object instead. """ - future = callback - request_key = escape.utf8(self.get_argument("oauth_token")) - oauth_verifier = self.get_argument("oauth_verifier", None) - request_cookie = self.get_cookie("_oauth_request_token") + handler = cast(RequestHandler, self) + request_key = escape.utf8(handler.get_argument("oauth_token")) + oauth_verifier = handler.get_argument("oauth_verifier", None) + request_cookie = handler.get_cookie("_oauth_request_token") if not request_cookie: - future.set_exception(AuthError( - "Missing OAuth request token cookie")) - return - self.clear_cookie("_oauth_request_token") + raise AuthError("Missing OAuth request token cookie") + handler.clear_cookie("_oauth_request_token") cookie_key, cookie_secret = [ - base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")] + base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|") + ] if cookie_key != request_key: - future.set_exception(AuthError( - "Request token does not match cookie")) - return - token = dict(key=cookie_key, secret=cookie_secret) + raise AuthError("Request token does not match cookie") + token = dict( + key=cookie_key, secret=cookie_secret + ) # type: Dict[str, Union[str, bytes]] if oauth_verifier: token["verifier"] = oauth_verifier if http_client is None: http_client = self.get_auth_http_client() - fut = http_client.fetch(self._oauth_access_token_url(token)) - fut.add_done_callback(wrap(functools.partial(self._on_access_token, callback))) + assert http_client is not None + response = await http_client.fetch(self._oauth_access_token_url(token)) + access_token = _oauth_parse_response(response.body) + user = await self._oauth_get_user_future(access_token) + if not user: + raise AuthError("Error getting user") + user["access_token"] = access_token + return user - def _oauth_request_token_url(self, callback_uri=None, extra_params=None): + def _oauth_request_token_url( + self, callback_uri: str = None, extra_params: Dict[str, Any] = None + ) -> str: + handler = cast(RequestHandler, self) consumer_token = self._oauth_consumer_token() - url = self._OAUTH_REQUEST_TOKEN_URL + url = self._OAUTH_REQUEST_TOKEN_URL # type: ignore args = dict( oauth_consumer_key=escape.to_basestring(consumer_token["key"]), oauth_signature_method="HMAC-SHA1", @@ -448,8 +396,9 @@ class OAuthMixin(object): if callback_uri == "oob": args["oauth_callback"] = "oob" elif callback_uri: - args["oauth_callback"] = urlparse.urljoin( - self.request.full_url(), callback_uri) + args["oauth_callback"] = urllib.parse.urljoin( + handler.request.full_url(), callback_uri + ) if extra_params: args.update(extra_params) signature = _oauth10a_signature(consumer_token, "GET", url, args) @@ -457,32 +406,35 @@ class OAuthMixin(object): signature = _oauth_signature(consumer_token, "GET", url, args) args["oauth_signature"] = signature - return url + "?" + urllib_parse.urlencode(args) + return url + "?" + urllib.parse.urlencode(args) - def _on_request_token(self, authorize_url, callback_uri, callback, - response_fut): - try: - response = response_fut.result() - except Exception as e: - raise Exception("Could not get request token: %s" % e) + def _on_request_token( + self, + authorize_url: str, + callback_uri: Optional[str], + response: httpclient.HTTPResponse, + ) -> None: + handler = cast(RequestHandler, self) request_token = _oauth_parse_response(response.body) - data = (base64.b64encode(escape.utf8(request_token["key"])) + b"|" + - base64.b64encode(escape.utf8(request_token["secret"]))) - self.set_cookie("_oauth_request_token", data) + data = ( + base64.b64encode(escape.utf8(request_token["key"])) + + b"|" + + base64.b64encode(escape.utf8(request_token["secret"])) + ) + handler.set_cookie("_oauth_request_token", data) args = dict(oauth_token=request_token["key"]) if callback_uri == "oob": - self.finish(authorize_url + "?" + urllib_parse.urlencode(args)) - callback() + handler.finish(authorize_url + "?" + urllib.parse.urlencode(args)) return elif callback_uri: - args["oauth_callback"] = urlparse.urljoin( - self.request.full_url(), callback_uri) - self.redirect(authorize_url + "?" + urllib_parse.urlencode(args)) - callback() + args["oauth_callback"] = urllib.parse.urljoin( + handler.request.full_url(), callback_uri + ) + handler.redirect(authorize_url + "?" + urllib.parse.urlencode(args)) - def _oauth_access_token_url(self, request_token): + def _oauth_access_token_url(self, request_token: Dict[str, Any]) -> str: consumer_token = self._oauth_consumer_token() - url = self._OAUTH_ACCESS_TOKEN_URL + url = self._OAUTH_ACCESS_TOKEN_URL # type: ignore args = dict( oauth_consumer_key=escape.to_basestring(consumer_token["key"]), oauth_token=escape.to_basestring(request_token["key"]), @@ -495,41 +447,31 @@ class OAuthMixin(object): args["oauth_verifier"] = request_token["verifier"] if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": - signature = _oauth10a_signature(consumer_token, "GET", url, args, - request_token) + signature = _oauth10a_signature( + consumer_token, "GET", url, args, request_token + ) else: - signature = _oauth_signature(consumer_token, "GET", url, args, - request_token) + signature = _oauth_signature( + consumer_token, "GET", url, args, request_token + ) args["oauth_signature"] = signature - return url + "?" + urllib_parse.urlencode(args) + return url + "?" + urllib.parse.urlencode(args) - def _on_access_token(self, future, response_fut): - try: - response = response_fut.result() - except Exception: - future.set_exception(AuthError("Could not fetch access token")) - return - - access_token = _oauth_parse_response(response.body) - fut = self._oauth_get_user_future(access_token) - fut = gen.convert_yielded(fut) - fut.add_done_callback( - wrap(functools.partial(self._on_oauth_get_user, access_token, future))) - - def _oauth_consumer_token(self): + def _oauth_consumer_token(self) -> Dict[str, Any]: """Subclasses must override this to return their OAuth consumer keys. The return value should be a `dict` with keys ``key`` and ``secret``. """ raise NotImplementedError() - @_non_deprecated_return_future - def _oauth_get_user_future(self, access_token, callback): + async def _oauth_get_user_future( + self, access_token: Dict[str, Any] + ) -> Dict[str, Any]: """Subclasses must override this to get basic information about the user. - Should return a `.Future` whose result is a dictionary + Should be a coroutine whose result is a dictionary containing information about the user, which may have been retrieved by using ``access_token`` to make a request to the service. @@ -537,40 +479,23 @@ class OAuthMixin(object): The access token will be added to the returned dictionary to make the result of `get_authenticated_user`. - For backwards compatibility, the callback-based ``_oauth_get_user`` - method is also supported. - .. versionchanged:: 5.1 Subclasses may also define this method with ``async def``. - .. deprecated:: 5.1 + .. versionchanged:: 6.0 - The ``_oauth_get_user`` fallback is deprecated and support for it - will be removed in 6.0. + A synchronous fallback to ``_oauth_get_user`` was removed. """ - warnings.warn("_oauth_get_user is deprecated, override _oauth_get_user_future instead", - DeprecationWarning) - # By default, call the old-style _oauth_get_user, but new code - # should override this method instead. - self._oauth_get_user(access_token, callback) - - def _oauth_get_user(self, access_token, callback): raise NotImplementedError() - def _on_oauth_get_user(self, access_token, future, user_future): - if user_future.exception() is not None: - future.set_exception(user_future.exception()) - return - user = user_future.result() - if not user: - future.set_exception(AuthError("Error getting user")) - return - user["access_token"] = access_token - future_set_result_unless_cancelled(future, user) - - def _oauth_request_parameters(self, url, access_token, parameters={}, - method="GET"): + def _oauth_request_parameters( + self, + url: str, + access_token: Dict[str, Any], + parameters: Dict[str, Any] = {}, + method: str = "GET", + ) -> Dict[str, Any]: """Returns the OAuth parameters as a dict for the given request. parameters should include all POST arguments and query string arguments @@ -589,15 +514,17 @@ class OAuthMixin(object): args.update(base_args) args.update(parameters) if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": - signature = _oauth10a_signature(consumer_token, method, url, args, - access_token) + signature = _oauth10a_signature( + consumer_token, method, url, args, access_token + ) else: - signature = _oauth_signature(consumer_token, method, url, args, - access_token) + signature = _oauth_signature( + consumer_token, method, url, args, access_token + ) base_args["oauth_signature"] = escape.to_basestring(signature) return base_args - def get_auth_http_client(self): + def get_auth_http_client(self) -> httpclient.AsyncHTTPClient: """Returns the `.AsyncHTTPClient` instance to be used for auth requests. May be overridden by subclasses to use an HTTP client other than @@ -617,10 +544,16 @@ class OAuth2Mixin(object): * ``_OAUTH_AUTHORIZE_URL``: The service's authorization url. * ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url. """ - @_non_deprecated_return_future - def authorize_redirect(self, redirect_uri=None, client_id=None, - client_secret=None, extra_params=None, - callback=None, scope=None, response_type="code"): + + def authorize_redirect( + self, + redirect_uri: str = None, + client_id: str = None, + client_secret: str = None, + extra_params: Dict[str, Any] = None, + scope: str = None, + response_type: str = "code", + ) -> None: """Redirects the user to obtain OAuth authorization for this service. Some providers require that you register a redirect URL with @@ -629,47 +562,53 @@ class OAuth2Mixin(object): ``get_authenticated_user`` in the handler for your redirect URL to complete the authorization process. - .. versionchanged:: 3.1 - Returns a `.Future` and takes an optional callback. These are - not strictly necessary as this method is synchronous, - but they are supplied for consistency with - `OAuthMixin.authorize_redirect`. + .. versionchanged:: 6.0 - .. deprecated:: 5.1 - - The ``callback`` argument and returned awaitable will be removed - in Tornado 6.0; this will be an ordinary synchronous function. + The ``callback`` argument and returned awaitable were removed; + this is now an ordinary synchronous function. """ - args = { - "redirect_uri": redirect_uri, - "client_id": client_id, - "response_type": response_type - } + handler = cast(RequestHandler, self) + args = {"response_type": response_type} + if redirect_uri is not None: + args["redirect_uri"] = redirect_uri + if client_id is not None: + args["client_id"] = client_id if extra_params: args.update(extra_params) if scope: - args['scope'] = ' '.join(scope) - self.redirect( - url_concat(self._OAUTH_AUTHORIZE_URL, args)) - callback() + args["scope"] = " ".join(scope) + url = self._OAUTH_AUTHORIZE_URL # type: ignore + handler.redirect(url_concat(url, args)) - def _oauth_request_token_url(self, redirect_uri=None, client_id=None, - client_secret=None, code=None, - extra_params=None): - url = self._OAUTH_ACCESS_TOKEN_URL - args = dict( - redirect_uri=redirect_uri, - code=code, - client_id=client_id, - client_secret=client_secret, - ) + def _oauth_request_token_url( + self, + redirect_uri: str = None, + client_id: str = None, + client_secret: str = None, + code: str = None, + extra_params: Dict[str, Any] = None, + ) -> str: + url = self._OAUTH_ACCESS_TOKEN_URL # type: ignore + args = {} # type: Dict[str, str] + if redirect_uri is not None: + args["redirect_uri"] = redirect_uri + if code is not None: + args["code"] = code + if client_id is not None: + args["client_id"] = client_id + if client_secret is not None: + args["client_secret"] = client_secret if extra_params: args.update(extra_params) return url_concat(url, args) - @_auth_return_future - def oauth2_request(self, url, callback, access_token=None, - post_args=None, **args): + async def oauth2_request( + self, + url: str, + access_token: str = None, + post_args: Dict[str, Any] = None, + **args: Any + ) -> Any: """Fetches the given URL auth an OAuth2 access token. If the request is a POST, ``post_args`` should be provided. Query @@ -699,10 +638,9 @@ class OAuth2Mixin(object): .. versionadded:: 4.3 - .. deprecated:: 5.1 + .. versionchanged::: 6.0 - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. + The ``callback`` argument was removed. Use the returned awaitable object instead. """ all_args = {} if access_token: @@ -710,25 +648,17 @@ class OAuth2Mixin(object): all_args.update(args) if all_args: - url += "?" + urllib_parse.urlencode(all_args) - callback = wrap(functools.partial(self._on_oauth2_request, callback)) + url += "?" + urllib.parse.urlencode(all_args) http = self.get_auth_http_client() if post_args is not None: - fut = http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args)) + response = await http.fetch( + url, method="POST", body=urllib.parse.urlencode(post_args) + ) else: - fut = http.fetch(url) - fut.add_done_callback(callback) + response = await http.fetch(url) + return escape.json_decode(response.body) - def _on_oauth2_request(self, future, response_fut): - try: - response = response_fut.result() - except Exception as e: - future.set_exception(AuthError("Error response %s" % e)) - return - - future_set_result_unless_cancelled(future, escape.json_decode(response.body)) - - def get_auth_http_client(self): + def get_auth_http_client(self) -> httpclient.AsyncHTTPClient: """Returns the `.AsyncHTTPClient` instance to be used for auth requests. May be overridden by subclasses to use an HTTP client other than @@ -771,6 +701,7 @@ class TwitterMixin(OAuthMixin): and all of the custom Twitter user attributes described at https://dev.twitter.com/docs/api/1.1/get/users/show """ + _OAUTH_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token" _OAUTH_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token" _OAUTH_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize" @@ -778,8 +709,7 @@ class TwitterMixin(OAuthMixin): _OAUTH_NO_CALLBACKS = False _TWITTER_BASE_URL = "https://api.twitter.com/1.1" - @_non_deprecated_return_future - def authenticate_redirect(self, callback_uri=None, callback=None): + async def authenticate_redirect(self, callback_uri: str = None) -> None: """Just like `~OAuthMixin.authorize_redirect`, but auto-redirects if authorized. @@ -790,20 +720,24 @@ class TwitterMixin(OAuthMixin): Now returns a `.Future` and takes an optional callback, for compatibility with `.gen.coroutine`. - .. deprecated:: 5.1 + .. versionchanged:: 6.0 - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. + The ``callback`` argument was removed. Use the returned + awaitable object instead. """ http = self.get_auth_http_client() - fut = http.fetch(self._oauth_request_token_url(callback_uri=callback_uri)) - fut.add_done_callback(wrap(functools.partial( - self._on_request_token, self._OAUTH_AUTHENTICATE_URL, - None, callback))) + response = await http.fetch( + self._oauth_request_token_url(callback_uri=callback_uri) + ) + self._on_request_token(self._OAUTH_AUTHENTICATE_URL, None, response) - @_auth_return_future - def twitter_request(self, path, callback=None, access_token=None, - post_args=None, **args): + async def twitter_request( + self, + path: str, + access_token: Dict[str, Any], + post_args: Dict[str, Any] = None, + **args: Any + ) -> Any: """Fetches the given API path, e.g., ``statuses/user_timeline/btaylor`` The path should not include the format or API version number. @@ -840,12 +774,12 @@ class TwitterMixin(OAuthMixin): .. testoutput:: :hide: - .. deprecated:: 5.1 + .. versionchanged:: 6.0 - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. + The ``callback`` argument was removed. Use the returned + awaitable object instead. """ - if path.startswith('http:') or path.startswith('https:'): + if path.startswith("http:") or path.startswith("https:"): # Raw urls are useful for e.g. search which doesn't follow the # usual pattern: http://search.twitter.com/search.json url = path @@ -858,42 +792,38 @@ class TwitterMixin(OAuthMixin): all_args.update(post_args or {}) method = "POST" if post_args is not None else "GET" oauth = self._oauth_request_parameters( - url, access_token, all_args, method=method) + url, access_token, all_args, method=method + ) args.update(oauth) if args: - url += "?" + urllib_parse.urlencode(args) + url += "?" + urllib.parse.urlencode(args) http = self.get_auth_http_client() - http_callback = wrap(functools.partial(self._on_twitter_request, callback, url)) if post_args is not None: - fut = http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args)) + response = await http.fetch( + url, method="POST", body=urllib.parse.urlencode(post_args) + ) else: - fut = http.fetch(url) - fut.add_done_callback(http_callback) + response = await http.fetch(url) + return escape.json_decode(response.body) - def _on_twitter_request(self, future, url, response_fut): - try: - response = response_fut.result() - except Exception as e: - future.set_exception(AuthError( - "Error response %s fetching %s" % (e, url))) - return - future_set_result_unless_cancelled(future, escape.json_decode(response.body)) - - def _oauth_consumer_token(self): - self.require_setting("twitter_consumer_key", "Twitter OAuth") - self.require_setting("twitter_consumer_secret", "Twitter OAuth") + def _oauth_consumer_token(self) -> Dict[str, Any]: + handler = cast(RequestHandler, self) + handler.require_setting("twitter_consumer_key", "Twitter OAuth") + handler.require_setting("twitter_consumer_secret", "Twitter OAuth") return dict( - key=self.settings["twitter_consumer_key"], - secret=self.settings["twitter_consumer_secret"]) + key=handler.settings["twitter_consumer_key"], + secret=handler.settings["twitter_consumer_secret"], + ) - @gen.coroutine - def _oauth_get_user_future(self, access_token): - user = yield self.twitter_request( - "/account/verify_credentials", - access_token=access_token) + async def _oauth_get_user_future( + self, access_token: Dict[str, Any] + ) -> Dict[str, Any]: + user = await self.twitter_request( + "/account/verify_credentials", access_token=access_token + ) if user: user["username"] = user["screen_name"] - raise gen.Return(user) + return user class GoogleOAuth2Mixin(OAuth2Mixin): @@ -910,18 +840,20 @@ class GoogleOAuth2Mixin(OAuth2Mixin): * In the OAuth section of the page, select Create New Client ID. * Set the Redirect URI to point to your auth handler * Copy the "Client secret" and "Client ID" to the application settings as - {"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}} + ``{"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}}`` .. versionadded:: 3.2 """ + _OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/v2/auth" _OAUTH_ACCESS_TOKEN_URL = "https://www.googleapis.com/oauth2/v4/token" _OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo" _OAUTH_NO_CALLBACKS = False - _OAUTH_SETTINGS_KEY = 'google_oauth' + _OAUTH_SETTINGS_KEY = "google_oauth" - @_auth_return_future - def get_authenticated_user(self, redirect_uri, code, callback): + async def get_authenticated_user( + self, redirect_uri: str, code: str + ) -> Dict[str, Any]: """Handles the login for the Google user, returning an access token. The result is a dictionary containing an ``access_token`` field @@ -959,48 +891,47 @@ class GoogleOAuth2Mixin(OAuth2Mixin): .. testoutput:: :hide: - .. deprecated:: 5.1 + .. versionchanged:: 6.0 - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. + The ``callback`` argument was removed. Use the returned awaitable object instead. """ # noqa: E501 + handler = cast(RequestHandler, self) http = self.get_auth_http_client() - body = urllib_parse.urlencode({ - "redirect_uri": redirect_uri, - "code": code, - "client_id": self.settings[self._OAUTH_SETTINGS_KEY]['key'], - "client_secret": self.settings[self._OAUTH_SETTINGS_KEY]['secret'], - "grant_type": "authorization_code", - }) + body = urllib.parse.urlencode( + { + "redirect_uri": redirect_uri, + "code": code, + "client_id": handler.settings[self._OAUTH_SETTINGS_KEY]["key"], + "client_secret": handler.settings[self._OAUTH_SETTINGS_KEY]["secret"], + "grant_type": "authorization_code", + } + ) - fut = http.fetch(self._OAUTH_ACCESS_TOKEN_URL, - method="POST", - headers={'Content-Type': 'application/x-www-form-urlencoded'}, - body=body) - fut.add_done_callback(wrap(functools.partial(self._on_access_token, callback))) - - def _on_access_token(self, future, response_fut): - """Callback function for the exchange to the access token.""" - try: - response = response_fut.result() - except Exception as e: - future.set_exception(AuthError('Google auth error: %s' % str(e))) - return - - args = escape.json_decode(response.body) - future_set_result_unless_cancelled(future, args) + response = await http.fetch( + self._OAUTH_ACCESS_TOKEN_URL, + method="POST", + headers={"Content-Type": "application/x-www-form-urlencoded"}, + body=body, + ) + return escape.json_decode(response.body) class FacebookGraphMixin(OAuth2Mixin): """Facebook authentication using the new Graph API and OAuth2.""" + _OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?" _OAUTH_AUTHORIZE_URL = "https://www.facebook.com/dialog/oauth?" _OAUTH_NO_CALLBACKS = False _FACEBOOK_BASE_URL = "https://graph.facebook.com" - @_auth_return_future - def get_authenticated_user(self, redirect_uri, client_id, client_secret, - code, callback, extra_fields=None): + async def get_authenticated_user( + self, + redirect_uri: str, + client_id: str, + client_secret: str, + code: str, + extra_fields: Dict[str, Any] = None, + ) -> Optional[Dict[str, Any]]: """Handles the login for the Facebook user, returning a user object. Example usage: @@ -1042,10 +973,9 @@ class FacebookGraphMixin(OAuth2Mixin): The ``session_expires`` field was updated to support changes made to the Facebook API in March 2017. - .. deprecated:: 5.1 + .. versionchanged:: 6.0 - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. + The ``callback`` argument was removed. Use the returned awaitable object instead. """ http = self.get_auth_http_client() args = { @@ -1055,42 +985,35 @@ class FacebookGraphMixin(OAuth2Mixin): "client_secret": client_secret, } - fields = set(['id', 'name', 'first_name', 'last_name', - 'locale', 'picture', 'link']) + fields = set( + ["id", "name", "first_name", "last_name", "locale", "picture", "link"] + ) if extra_fields: fields.update(extra_fields) - fut = http.fetch(self._oauth_request_token_url(**args)) - fut.add_done_callback(wrap(functools.partial(self._on_access_token, redirect_uri, client_id, - client_secret, callback, fields))) - - @gen.coroutine - def _on_access_token(self, redirect_uri, client_id, client_secret, - future, fields, response_fut): - try: - response = response_fut.result() - except Exception as e: - future.set_exception(AuthError('Facebook auth error: %s' % str(e))) - return - + response = await http.fetch( + self._oauth_request_token_url(**args) # type: ignore + ) args = escape.json_decode(response.body) session = { "access_token": args.get("access_token"), - "expires_in": args.get("expires_in") + "expires_in": args.get("expires_in"), } + assert session["access_token"] is not None - user = yield self.facebook_request( + user = await self.facebook_request( path="/me", access_token=session["access_token"], - appsecret_proof=hmac.new(key=client_secret.encode('utf8'), - msg=session["access_token"].encode('utf8'), - digestmod=hashlib.sha256).hexdigest(), - fields=",".join(fields) + appsecret_proof=hmac.new( + key=client_secret.encode("utf8"), + msg=session["access_token"].encode("utf8"), + digestmod=hashlib.sha256, + ).hexdigest(), + fields=",".join(fields), ) if user is None: - future_set_result_unless_cancelled(future, None) - return + return None fieldmap = {} for field in fields: @@ -1100,13 +1023,21 @@ class FacebookGraphMixin(OAuth2Mixin): # older versions in which the server used url-encoding and # this code simply returned the string verbatim. # This should change in Tornado 5.0. - fieldmap.update({"access_token": session["access_token"], - "session_expires": str(session.get("expires_in"))}) - future_set_result_unless_cancelled(future, fieldmap) + fieldmap.update( + { + "access_token": session["access_token"], + "session_expires": str(session.get("expires_in")), + } + ) + return fieldmap - @_auth_return_future - def facebook_request(self, path, callback, access_token=None, - post_args=None, **args): + async def facebook_request( + self, + path: str, + access_token: str = None, + post_args: Dict[str, Any] = None, + **args: Any + ) -> Any: """Fetches the given relative API path, e.g., "/btaylor/picture" If the request is a POST, ``post_args`` should be provided. Query @@ -1153,35 +1084,39 @@ class FacebookGraphMixin(OAuth2Mixin): .. versionchanged:: 3.1 Added the ability to override ``self._FACEBOOK_BASE_URL``. - .. deprecated:: 5.1 + .. versionchanged:: 6.0 - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. + The ``callback`` argument was removed. Use the returned awaitable object instead. """ url = self._FACEBOOK_BASE_URL + path - # Thanks to the _auth_return_future decorator, our "callback" - # argument is a Future, which we cannot pass as a callback to - # oauth2_request. Instead, have oauth2_request return a - # future and chain them together. - oauth_future = self.oauth2_request(url, access_token=access_token, - post_args=post_args, **args) - chain_future(oauth_future, callback) + return await self.oauth2_request( + url, access_token=access_token, post_args=post_args, **args + ) -def _oauth_signature(consumer_token, method, url, parameters={}, token=None): +def _oauth_signature( + consumer_token: Dict[str, Any], + method: str, + url: str, + parameters: Dict[str, Any] = {}, + token: Dict[str, Any] = None, +) -> bytes: """Calculates the HMAC-SHA1 OAuth signature for the given request. See http://oauth.net/core/1.0/#signing_process """ - parts = urlparse.urlparse(url) + parts = urllib.parse.urlparse(url) scheme, netloc, path = parts[:3] normalized_url = scheme.lower() + "://" + netloc.lower() + path base_elems = [] base_elems.append(method.upper()) base_elems.append(normalized_url) - base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v))) - for k, v in sorted(parameters.items()))) + base_elems.append( + "&".join( + "%s=%s" % (k, _oauth_escape(str(v))) for k, v in sorted(parameters.items()) + ) + ) base_string = "&".join(_oauth_escape(e) for e in base_elems) key_elems = [escape.utf8(consumer_token["secret"])] @@ -1192,42 +1127,53 @@ def _oauth_signature(consumer_token, method, url, parameters={}, token=None): return binascii.b2a_base64(hash.digest())[:-1] -def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None): +def _oauth10a_signature( + consumer_token: Dict[str, Any], + method: str, + url: str, + parameters: Dict[str, Any] = {}, + token: Dict[str, Any] = None, +) -> bytes: """Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request. See http://oauth.net/core/1.0a/#signing_process """ - parts = urlparse.urlparse(url) + parts = urllib.parse.urlparse(url) scheme, netloc, path = parts[:3] normalized_url = scheme.lower() + "://" + netloc.lower() + path base_elems = [] base_elems.append(method.upper()) base_elems.append(normalized_url) - base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v))) - for k, v in sorted(parameters.items()))) + base_elems.append( + "&".join( + "%s=%s" % (k, _oauth_escape(str(v))) for k, v in sorted(parameters.items()) + ) + ) base_string = "&".join(_oauth_escape(e) for e in base_elems) - key_elems = [escape.utf8(urllib_parse.quote(consumer_token["secret"], safe='~'))] - key_elems.append(escape.utf8(urllib_parse.quote(token["secret"], safe='~') if token else "")) + key_elems = [escape.utf8(urllib.parse.quote(consumer_token["secret"], safe="~"))] + key_elems.append( + escape.utf8(urllib.parse.quote(token["secret"], safe="~") if token else "") + ) key = b"&".join(key_elems) hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1) return binascii.b2a_base64(hash.digest())[:-1] -def _oauth_escape(val): +def _oauth_escape(val: Union[str, bytes]) -> str: if isinstance(val, unicode_type): val = val.encode("utf-8") - return urllib_parse.quote(val, safe="~") + return urllib.parse.quote(val, safe="~") -def _oauth_parse_response(body): +def _oauth_parse_response(body: bytes) -> Dict[str, Any]: # I can't find an officially-defined encoding for oauth responses and # have never seen anyone use non-ascii. Leave the response in a byte # string for python 2, and use utf8 on python 3. - body = escape.native_str(body) - p = urlparse.parse_qs(body, keep_blank_values=False) + body_str = escape.native_str(body) + p = urllib.parse.parse_qs(body_str, keep_blank_values=False) token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0]) # Add the extra parameters the Provider included to the token diff --git a/server/www/packages/packages-linux/x64/tornado/autoreload.py b/server/www/packages/packages-linux/x64/tornado/autoreload.py index 7d69474..1c47aae 100644 --- a/server/www/packages/packages-linux/x64/tornado/autoreload.py +++ b/server/www/packages/packages-linux/x64/tornado/autoreload.py @@ -33,9 +33,8 @@ This combination is encouraged as the wrapper catches syntax errors and other import-time failures, while debug mode catches changes once the server has started. -This module depends on `.IOLoop`, so it will not work in WSGI applications -and Google App Engine. It also will not work correctly when `.HTTPServer`'s -multi-process mode is used. +This module will not work correctly when `.HTTPServer`'s multi-process +mode is used. Reloading loses any Python interpreter command-line arguments (e.g. ``-u``) because it re-executes Python using ``sys.executable`` and ``sys.argv``. @@ -44,8 +43,6 @@ incorrectly. """ -from __future__ import absolute_import, division, print_function - import os import sys @@ -96,23 +93,29 @@ from tornado.util import exec_in try: import signal except ImportError: - signal = None + signal = None # type: ignore + +import typing +from typing import Callable, Dict + +if typing.TYPE_CHECKING: + from typing import List, Optional, Union # noqa: F401 # os.execv is broken on Windows and can't properly parse command line # arguments and executable name if they contain whitespaces. subprocess # fixes that behavior. -_has_execv = sys.platform != 'win32' +_has_execv = sys.platform != "win32" _watched_files = set() _reload_hooks = [] _reload_attempted = False _io_loops = weakref.WeakKeyDictionary() # type: ignore _autoreload_is_main = False -_original_argv = None +_original_argv = None # type: Optional[List[str]] _original_spec = None -def start(check_time=500): +def start(check_time: int = 500) -> None: """Begins watching source files for changes. .. versionchanged:: 5.0 @@ -124,13 +127,13 @@ def start(check_time=500): _io_loops[io_loop] = True if len(_io_loops) > 1: gen_log.warning("tornado.autoreload started more than once in the same process") - modify_times = {} + modify_times = {} # type: Dict[str, float] callback = functools.partial(_reload_on_update, modify_times) scheduler = ioloop.PeriodicCallback(callback, check_time) scheduler.start() -def wait(): +def wait() -> None: """Wait for a watched file to change, then restart the process. Intended to be used at the end of scripts like unit test runners, @@ -142,7 +145,7 @@ def wait(): io_loop.start() -def watch(filename): +def watch(filename: str) -> None: """Add a file to the watch list. All imported modules are watched by default. @@ -150,7 +153,7 @@ def watch(filename): _watched_files.add(filename) -def add_reload_hook(fn): +def add_reload_hook(fn: Callable[[], None]) -> None: """Add a function to be called before reloading the process. Note that for open file and socket handles it is generally @@ -161,7 +164,7 @@ def add_reload_hook(fn): _reload_hooks.append(fn) -def _reload_on_update(modify_times): +def _reload_on_update(modify_times: Dict[str, float]) -> None: if _reload_attempted: # We already tried to reload and it didn't work, so don't try again. return @@ -187,7 +190,7 @@ def _reload_on_update(modify_times): _check_file(modify_times, path) -def _check_file(modify_times, path): +def _check_file(modify_times: Dict[str, float], path: str) -> None: try: modified = os.stat(path).st_mtime except Exception: @@ -200,7 +203,7 @@ def _check_file(modify_times, path): _reload() -def _reload(): +def _reload() -> None: global _reload_attempted _reload_attempted = True for fn in _reload_hooks: @@ -218,19 +221,20 @@ def _reload(): # sys.path[0] is an empty string and add the current directory to # $PYTHONPATH. if _autoreload_is_main: + assert _original_argv is not None spec = _original_spec argv = _original_argv else: - spec = getattr(sys.modules['__main__'], '__spec__', None) + spec = getattr(sys.modules["__main__"], "__spec__", None) argv = sys.argv if spec: - argv = ['-m', spec.name] + argv[1:] + argv = ["-m", spec.name] + argv[1:] else: - path_prefix = '.' + os.pathsep - if (sys.path[0] == '' and - not os.environ.get("PYTHONPATH", "").startswith(path_prefix)): - os.environ["PYTHONPATH"] = (path_prefix + - os.environ.get("PYTHONPATH", "")) + path_prefix = "." + os.pathsep + if sys.path[0] == "" and not os.environ.get("PYTHONPATH", "").startswith( + path_prefix + ): + os.environ["PYTHONPATH"] = path_prefix + os.environ.get("PYTHONPATH", "") if not _has_execv: subprocess.Popen([sys.executable] + argv) os._exit(0) @@ -249,7 +253,9 @@ def _reload(): # Unfortunately the errno returned in this case does not # appear to be consistent, so we can't easily check for # this error specifically. - os.spawnv(os.P_NOWAIT, sys.executable, [sys.executable] + argv) + os.spawnv( # type: ignore + os.P_NOWAIT, sys.executable, [sys.executable] + argv + ) # At this point the IOLoop has been closed and finally # blocks will experience errors if we allow the stack to # unwind, so just exit uncleanly. @@ -263,7 +269,7 @@ Usage: """ -def main(): +def main() -> None: """Command-line wrapper to re-run a script whenever its source changes. Scripts may be specified by filename or module name:: @@ -280,12 +286,13 @@ def main(): # The main module can be tricky; set the variables both in our globals # (which may be __main__) and the real importable version. import tornado.autoreload + global _autoreload_is_main global _original_argv, _original_spec tornado.autoreload._autoreload_is_main = _autoreload_is_main = True original_argv = sys.argv tornado.autoreload._original_argv = _original_argv = original_argv - original_spec = getattr(sys.modules['__main__'], '__spec__', None) + original_spec = getattr(sys.modules["__main__"], "__spec__", None) tornado.autoreload._original_spec = _original_spec = original_spec sys.argv = sys.argv[:] if len(sys.argv) >= 3 and sys.argv[1] == "-m": @@ -303,6 +310,7 @@ def main(): try: if mode == "module": import runpy + runpy.run_module(module, run_name="__main__", alter_sys=True) elif mode == "script": with open(script) as f: @@ -340,12 +348,12 @@ def main(): # restore sys.argv so subsequent executions will include autoreload sys.argv = original_argv - if mode == 'module': + if mode == "module": # runpy did a fake import of the module as __main__, but now it's # no longer in sys.modules. Figure out where it is and watch it. loader = pkgutil.get_loader(module) if loader is not None: - watch(loader.get_filename()) + watch(loader.get_filename()) # type: ignore wait() diff --git a/server/www/packages/packages-linux/x64/tornado/concurrent.py b/server/www/packages/packages-linux/x64/tornado/concurrent.py index f7e6bcc..3a49940 100644 --- a/server/www/packages/packages-linux/x64/tornado/concurrent.py +++ b/server/www/packages/packages-linux/x64/tornado/concurrent.py @@ -14,389 +14,64 @@ # under the License. """Utilities for working with ``Future`` objects. -``Futures`` are a pattern for concurrent programming introduced in -Python 3.2 in the `concurrent.futures` package, and also adopted (in a -slightly different form) in Python 3.4's `asyncio` package. This -package defines a ``Future`` class that is an alias for `asyncio.Future` -when available, and a compatible implementation for older versions of -Python. It also includes some utility functions for interacting with -``Future`` objects. +Tornado previously provided its own ``Future`` class, but now uses +`asyncio.Future`. This module contains utility functions for working +with `asyncio.Future` in a way that is backwards-compatible with +Tornado's old ``Future`` implementation. -While this package is an important part of Tornado's internal +While this module is an important part of Tornado's internal implementation, applications rarely need to interact with it directly. -""" -from __future__ import absolute_import, division, print_function +""" + +import asyncio +from concurrent import futures import functools -import platform -import textwrap -import traceback import sys -import warnings +import types from tornado.log import app_log -from tornado.stack_context import ExceptionStackContext, wrap -from tornado.util import raise_exc_info, ArgReplacer, is_finalizing -try: - from concurrent import futures -except ImportError: - futures = None +import typing +from typing import Any, Callable, Optional, Tuple, Union -try: - import asyncio -except ImportError: - asyncio = None - -try: - import typing -except ImportError: - typing = None - - -# Can the garbage collector handle cycles that include __del__ methods? -# This is true in cpython beginning with version 3.4 (PEP 442). -_GC_CYCLE_FINALIZERS = (platform.python_implementation() == 'CPython' and - sys.version_info >= (3, 4)) +_T = typing.TypeVar("_T") class ReturnValueIgnoredError(Exception): + # No longer used; was previously used by @return_future pass -# This class and associated code in the future object is derived -# from the Trollius project, a backport of asyncio to Python 2.x - 3.x + +Future = asyncio.Future + +FUTURES = (futures.Future, Future) -class _TracebackLogger(object): - """Helper to log a traceback upon destruction if not cleared. - - This solves a nasty problem with Futures and Tasks that have an - exception set: if nobody asks for the exception, the exception is - never logged. This violates the Zen of Python: 'Errors should - never pass silently. Unless explicitly silenced.' - - However, we don't want to log the exception as soon as - set_exception() is called: if the calling code is written - properly, it will get the exception and handle it properly. But - we *do* want to log it if result() or exception() was never called - -- otherwise developers waste a lot of time wondering why their - buggy code fails silently. - - An earlier attempt added a __del__() method to the Future class - itself, but this backfired because the presence of __del__() - prevents garbage collection from breaking cycles. A way out of - this catch-22 is to avoid having a __del__() method on the Future - class itself, but instead to have a reference to a helper object - with a __del__() method that logs the traceback, where we ensure - that the helper object doesn't participate in cycles, and only the - Future has a reference to it. - - The helper object is added when set_exception() is called. When - the Future is collected, and the helper is present, the helper - object is also collected, and its __del__() method will log the - traceback. When the Future's result() or exception() method is - called (and a helper object is present), it removes the the helper - object, after calling its clear() method to prevent it from - logging. - - One downside is that we do a fair amount of work to extract the - traceback from the exception, even when it is never logged. It - would seem cheaper to just store the exception object, but that - references the traceback, which references stack frames, which may - reference the Future, which references the _TracebackLogger, and - then the _TracebackLogger would be included in a cycle, which is - what we're trying to avoid! As an optimization, we don't - immediately format the exception; we only do the work when - activate() is called, which call is delayed until after all the - Future's callbacks have run. Since usually a Future has at least - one callback (typically set by 'yield From') and usually that - callback extracts the callback, thereby removing the need to - format the exception. - - PS. I don't claim credit for this solution. I first heard of it - in a discussion about closing files when they are collected. - """ - - __slots__ = ('exc_info', 'formatted_tb') - - def __init__(self, exc_info): - self.exc_info = exc_info - self.formatted_tb = None - - def activate(self): - exc_info = self.exc_info - if exc_info is not None: - self.exc_info = None - self.formatted_tb = traceback.format_exception(*exc_info) - - def clear(self): - self.exc_info = None - self.formatted_tb = None - - def __del__(self, is_finalizing=is_finalizing): - if not is_finalizing() and self.formatted_tb: - app_log.error('Future exception was never retrieved: %s', - ''.join(self.formatted_tb).rstrip()) - - -class Future(object): - """Placeholder for an asynchronous result. - - A ``Future`` encapsulates the result of an asynchronous - operation. In synchronous applications ``Futures`` are used - to wait for the result from a thread or process pool; in - Tornado they are normally used with `.IOLoop.add_future` or by - yielding them in a `.gen.coroutine`. - - `tornado.concurrent.Future` is an alias for `asyncio.Future` when - that package is available (Python 3.4+). Unlike - `concurrent.futures.Future`, the ``Futures`` used by Tornado and - `asyncio` are not thread-safe (and therefore faster for use with - single-threaded event loops). - - In addition to ``exception`` and ``set_exception``, Tornado's - ``Future`` implementation supports storing an ``exc_info`` triple - to support better tracebacks on Python 2. To set an ``exc_info`` - triple, use `future_set_exc_info`, and to retrieve one, call - `result()` (which will raise it). - - .. versionchanged:: 4.0 - `tornado.concurrent.Future` is always a thread-unsafe ``Future`` - with support for the ``exc_info`` methods. Previously it would - be an alias for the thread-safe `concurrent.futures.Future` - if that package was available and fall back to the thread-unsafe - implementation if it was not. - - .. versionchanged:: 4.1 - If a `.Future` contains an error but that error is never observed - (by calling ``result()``, ``exception()``, or ``exc_info()``), - a stack trace will be logged when the `.Future` is garbage collected. - This normally indicates an error in the application, but in cases - where it results in undesired logging it may be necessary to - suppress the logging by ensuring that the exception is observed: - ``f.add_done_callback(lambda f: f.exception())``. - - .. versionchanged:: 5.0 - - This class was previoiusly available under the name - ``TracebackFuture``. This name, which was deprecated since - version 4.0, has been removed. When `asyncio` is available - ``tornado.concurrent.Future`` is now an alias for - `asyncio.Future`. Like `asyncio.Future`, callbacks are now - always scheduled on the `.IOLoop` and are never run - synchronously. - - """ - def __init__(self): - self._done = False - self._result = None - self._exc_info = None - - self._log_traceback = False # Used for Python >= 3.4 - self._tb_logger = None # Used for Python <= 3.3 - - self._callbacks = [] - - # Implement the Python 3.5 Awaitable protocol if possible - # (we can't use return and yield together until py33). - if sys.version_info >= (3, 3): - exec(textwrap.dedent(""" - def __await__(self): - return (yield self) - """)) - else: - # Py2-compatible version for use with cython. - def __await__(self): - result = yield self - # StopIteration doesn't take args before py33, - # but Cython recognizes the args tuple. - e = StopIteration() - e.args = (result,) - raise e - - def cancel(self): - """Cancel the operation, if possible. - - Tornado ``Futures`` do not support cancellation, so this method always - returns False. - """ - return False - - def cancelled(self): - """Returns True if the operation has been cancelled. - - Tornado ``Futures`` do not support cancellation, so this method - always returns False. - """ - return False - - def running(self): - """Returns True if this operation is currently running.""" - return not self._done - - def done(self): - """Returns True if the future has finished running.""" - return self._done - - def _clear_tb_log(self): - self._log_traceback = False - if self._tb_logger is not None: - self._tb_logger.clear() - self._tb_logger = None - - def result(self, timeout=None): - """If the operation succeeded, return its result. If it failed, - re-raise its exception. - - This method takes a ``timeout`` argument for compatibility with - `concurrent.futures.Future` but it is an error to call it - before the `Future` is done, so the ``timeout`` is never used. - """ - self._clear_tb_log() - if self._result is not None: - return self._result - if self._exc_info is not None: - try: - raise_exc_info(self._exc_info) - finally: - self = None - self._check_done() - return self._result - - def exception(self, timeout=None): - """If the operation raised an exception, return the `Exception` - object. Otherwise returns None. - - This method takes a ``timeout`` argument for compatibility with - `concurrent.futures.Future` but it is an error to call it - before the `Future` is done, so the ``timeout`` is never used. - """ - self._clear_tb_log() - if self._exc_info is not None: - return self._exc_info[1] - else: - self._check_done() - return None - - def add_done_callback(self, fn): - """Attaches the given callback to the `Future`. - - It will be invoked with the `Future` as its argument when the Future - has finished running and its result is available. In Tornado - consider using `.IOLoop.add_future` instead of calling - `add_done_callback` directly. - """ - if self._done: - from tornado.ioloop import IOLoop - IOLoop.current().add_callback(fn, self) - else: - self._callbacks.append(fn) - - def set_result(self, result): - """Sets the result of a ``Future``. - - It is undefined to call any of the ``set`` methods more than once - on the same object. - """ - self._result = result - self._set_done() - - def set_exception(self, exception): - """Sets the exception of a ``Future.``""" - self.set_exc_info( - (exception.__class__, - exception, - getattr(exception, '__traceback__', None))) - - def exc_info(self): - """Returns a tuple in the same format as `sys.exc_info` or None. - - .. versionadded:: 4.0 - """ - self._clear_tb_log() - return self._exc_info - - def set_exc_info(self, exc_info): - """Sets the exception information of a ``Future.`` - - Preserves tracebacks on Python 2. - - .. versionadded:: 4.0 - """ - self._exc_info = exc_info - self._log_traceback = True - if not _GC_CYCLE_FINALIZERS: - self._tb_logger = _TracebackLogger(exc_info) - - try: - self._set_done() - finally: - # Activate the logger after all callbacks have had a - # chance to call result() or exception(). - if self._log_traceback and self._tb_logger is not None: - self._tb_logger.activate() - self._exc_info = exc_info - - def _check_done(self): - if not self._done: - raise Exception("DummyFuture does not support blocking for results") - - def _set_done(self): - self._done = True - if self._callbacks: - from tornado.ioloop import IOLoop - loop = IOLoop.current() - for cb in self._callbacks: - loop.add_callback(cb, self) - self._callbacks = None - - # On Python 3.3 or older, objects with a destructor part of a reference - # cycle are never destroyed. It's no longer the case on Python 3.4 thanks to - # the PEP 442. - if _GC_CYCLE_FINALIZERS: - def __del__(self, is_finalizing=is_finalizing): - if is_finalizing() or not self._log_traceback: - # set_exception() was not called, or result() or exception() - # has consumed the exception - return - - tb = traceback.format_exception(*self._exc_info) - - app_log.error('Future %r exception was never retrieved: %s', - self, ''.join(tb).rstrip()) - - -if asyncio is not None: - Future = asyncio.Future # noqa - -if futures is None: - FUTURES = Future # type: typing.Union[type, typing.Tuple[type, ...]] -else: - FUTURES = (futures.Future, Future) - - -def is_future(x): +def is_future(x: Any) -> bool: return isinstance(x, FUTURES) -class DummyExecutor(object): - def submit(self, fn, *args, **kwargs): - future = Future() +class DummyExecutor(futures.Executor): + def submit( + self, fn: Callable[..., _T], *args: Any, **kwargs: Any + ) -> "futures.Future[_T]": + future = futures.Future() # type: futures.Future[_T] try: future_set_result_unless_cancelled(future, fn(*args, **kwargs)) except Exception: future_set_exc_info(future, sys.exc_info()) return future - def shutdown(self, wait=True): + def shutdown(self, wait: bool = True) -> None: pass dummy_executor = DummyExecutor() -def run_on_executor(*args, **kwargs): +def run_on_executor(*args: Any, **kwargs: Any) -> Callable: """Decorator to run a synchronous method asynchronously on an executor. The decorated method may be called with a ``callback`` keyword @@ -432,24 +107,25 @@ def run_on_executor(*args, **kwargs): The ``callback`` argument is deprecated and will be removed in 6.0. The decorator itself is discouraged in new code but will not be removed in 6.0. + + .. versionchanged:: 6.0 + + The ``callback`` argument was removed. """ - def run_on_executor_decorator(fn): + # Fully type-checking decorators is tricky, and this one is + # discouraged anyway so it doesn't have all the generic magic. + def run_on_executor_decorator(fn: Callable) -> Callable[..., Future]: executor = kwargs.get("executor", "executor") @functools.wraps(fn) - def wrapper(self, *args, **kwargs): - callback = kwargs.pop("callback", None) - async_future = Future() + def wrapper(self: Any, *args: Any, **kwargs: Any) -> Future: + async_future = Future() # type: Future conc_future = getattr(self, executor).submit(fn, self, *args, **kwargs) chain_future(conc_future, async_future) - if callback: - warnings.warn("callback arguments are deprecated, use the returned Future instead", - DeprecationWarning) - from tornado.ioloop import IOLoop - IOLoop.current().add_future( - async_future, lambda future: callback(future.result())) return async_future + return wrapper + if args and kwargs: raise ValueError("cannot combine positional and keyword args") if len(args) == 1: @@ -462,129 +138,7 @@ def run_on_executor(*args, **kwargs): _NO_RESULT = object() -def return_future(f): - """Decorator to make a function that returns via callback return a - `Future`. - - This decorator was provided to ease the transition from - callback-oriented code to coroutines. It is not recommended for - new code. - - The wrapped function should take a ``callback`` keyword argument - and invoke it with one argument when it has finished. To signal failure, - the function can simply raise an exception (which will be - captured by the `.StackContext` and passed along to the ``Future``). - - From the caller's perspective, the callback argument is optional. - If one is given, it will be invoked when the function is complete - with ``Future.result()`` as an argument. If the function fails, the - callback will not be run and an exception will be raised into the - surrounding `.StackContext`. - - If no callback is given, the caller should use the ``Future`` to - wait for the function to complete (perhaps by yielding it in a - coroutine, or passing it to `.IOLoop.add_future`). - - Usage: - - .. testcode:: - - @return_future - def future_func(arg1, arg2, callback): - # Do stuff (possibly asynchronous) - callback(result) - - async def caller(): - await future_func(arg1, arg2) - - .. - - Note that ``@return_future`` and ``@gen.engine`` can be applied to the - same function, provided ``@return_future`` appears first. However, - consider using ``@gen.coroutine`` instead of this combination. - - .. versionchanged:: 5.1 - - Now raises a `.DeprecationWarning` if a callback argument is passed to - the decorated function and deprecation warnings are enabled. - - .. deprecated:: 5.1 - - This decorator will be removed in Tornado 6.0. New code should - use coroutines directly instead of wrapping callback-based code - with this decorator. Interactions with non-Tornado - callback-based code should be managed explicitly to avoid - relying on the `.ExceptionStackContext` built into this - decorator. - """ - warnings.warn("@return_future is deprecated, use coroutines instead", - DeprecationWarning) - return _non_deprecated_return_future(f, warn=True) - - -def _non_deprecated_return_future(f, warn=False): - # Allow auth.py to use this decorator without triggering - # deprecation warnings. This will go away once auth.py has removed - # its legacy interfaces in 6.0. - replacer = ArgReplacer(f, 'callback') - - @functools.wraps(f) - def wrapper(*args, **kwargs): - future = Future() - callback, args, kwargs = replacer.replace( - lambda value=_NO_RESULT: future_set_result_unless_cancelled(future, value), - args, kwargs) - - def handle_error(typ, value, tb): - future_set_exc_info(future, (typ, value, tb)) - return True - exc_info = None - esc = ExceptionStackContext(handle_error, delay_warning=True) - with esc: - if not warn: - # HACK: In non-deprecated mode (only used in auth.py), - # suppress the warning entirely. Since this is added - # in a 5.1 patch release and already removed in 6.0 - # I'm prioritizing a minimial change instead of a - # clean solution. - esc.delay_warning = False - try: - result = f(*args, **kwargs) - if result is not None: - raise ReturnValueIgnoredError( - "@return_future should not be used with functions " - "that return values") - except: - exc_info = sys.exc_info() - raise - if exc_info is not None: - # If the initial synchronous part of f() raised an exception, - # go ahead and raise it to the caller directly without waiting - # for them to inspect the Future. - future.result() - - # If the caller passed in a callback, schedule it to be called - # when the future resolves. It is important that this happens - # just before we return the future, or else we risk confusing - # stack contexts with multiple exceptions (one here with the - # immediate exception, and again when the future resolves and - # the callback triggers its exception by calling future.result()). - if callback is not None: - warnings.warn("callback arguments are deprecated, use the returned Future instead", - DeprecationWarning) - - def run_callback(future): - result = future.result() - if result is _NO_RESULT: - callback() - else: - callback(future.result()) - future_add_done_callback(future, wrap(run_callback)) - return future - return wrapper - - -def chain_future(a, b): +def chain_future(a: "Future[_T]", b: "Future[_T]") -> None: """Chain two futures together so that when one completes, so does the other. The result (success or failure) of ``a`` will be copied to ``b``, unless @@ -596,29 +150,33 @@ def chain_future(a, b): `concurrent.futures.Future`. """ - def copy(future): + + def copy(future: "Future[_T]") -> None: assert future is a if b.done(): return - if (hasattr(a, 'exc_info') and - a.exc_info() is not None): - future_set_exc_info(b, a.exc_info()) + if hasattr(a, "exc_info") and a.exc_info() is not None: # type: ignore + future_set_exc_info(b, a.exc_info()) # type: ignore elif a.exception() is not None: b.set_exception(a.exception()) else: b.set_result(a.result()) + if isinstance(a, Future): future_add_done_callback(a, copy) else: # concurrent.futures.Future from tornado.ioloop import IOLoop + IOLoop.current().add_future(a, copy) -def future_set_result_unless_cancelled(future, value): +def future_set_result_unless_cancelled( + future: "Union[futures.Future[_T], Future[_T]]", value: _T +) -> None: """Set the given ``value`` as the `Future`'s result, if not cancelled. - Avoids asyncio.InvalidStateError when calling set_result() on + Avoids ``asyncio.InvalidStateError`` when calling ``set_result()`` on a cancelled `asyncio.Future`. .. versionadded:: 5.0 @@ -627,23 +185,69 @@ def future_set_result_unless_cancelled(future, value): future.set_result(value) -def future_set_exc_info(future, exc_info): +def future_set_exception_unless_cancelled( + future: "Union[futures.Future[_T], Future[_T]]", exc: BaseException +) -> None: + """Set the given ``exc`` as the `Future`'s exception. + + If the Future is already canceled, logs the exception instead. If + this logging is not desired, the caller should explicitly check + the state of the Future and call ``Future.set_exception`` instead of + this wrapper. + + Avoids ``asyncio.InvalidStateError`` when calling ``set_exception()`` on + a cancelled `asyncio.Future`. + + .. versionadded:: 6.0 + + """ + if not future.cancelled(): + future.set_exception(exc) + else: + app_log.error("Exception after Future was cancelled", exc_info=exc) + + +def future_set_exc_info( + future: "Union[futures.Future[_T], Future[_T]]", + exc_info: Tuple[ + Optional[type], Optional[BaseException], Optional[types.TracebackType] + ], +) -> None: """Set the given ``exc_info`` as the `Future`'s exception. - Understands both `asyncio.Future` and Tornado's extensions to - enable better tracebacks on Python 2. + Understands both `asyncio.Future` and the extensions in older + versions of Tornado to enable better tracebacks on Python 2. .. versionadded:: 5.0 + + .. versionchanged:: 6.0 + + If the future is already cancelled, this function is a no-op. + (previously ``asyncio.InvalidStateError`` would be raised) + """ - if hasattr(future, 'set_exc_info'): - # Tornado's Future - future.set_exc_info(exc_info) - else: - # asyncio.Future - future.set_exception(exc_info[1]) + if exc_info[1] is None: + raise Exception("future_set_exc_info called with no exception") + future_set_exception_unless_cancelled(future, exc_info[1]) -def future_add_done_callback(future, callback): +@typing.overload +def future_add_done_callback( + future: "futures.Future[_T]", callback: Callable[["futures.Future[_T]"], None] +) -> None: + pass + + +@typing.overload # noqa: F811 +def future_add_done_callback( + future: "Future[_T]", callback: Callable[["Future[_T]"], None] +) -> None: + pass + + +def future_add_done_callback( # noqa: F811 + future: "Union[futures.Future[_T], Future[_T]]", callback: Callable[..., None] +) -> None: """Arrange to call ``callback`` when ``future`` is complete. ``callback`` is invoked with one argument, the ``future``. diff --git a/server/www/packages/packages-linux/x64/tornado/curl_httpclient.py b/server/www/packages/packages-linux/x64/tornado/curl_httpclient.py index 7f5cb10..4119585 100644 --- a/server/www/packages/packages-linux/x64/tornado/curl_httpclient.py +++ b/server/www/packages/packages-linux/x64/tornado/curl_httpclient.py @@ -15,8 +15,6 @@ """Non-blocking HTTP client implementation using pycurl.""" -from __future__ import absolute_import, division, print_function - import collections import functools import logging @@ -27,32 +25,49 @@ from io import BytesIO from tornado import httputil from tornado import ioloop -from tornado import stack_context from tornado.escape import utf8, native_str -from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main +from tornado.httpclient import ( + HTTPRequest, + HTTPResponse, + HTTPError, + AsyncHTTPClient, + main, +) +from tornado.log import app_log -curl_log = logging.getLogger('tornado.curl_httpclient') +from typing import Dict, Any, Callable, Union +import typing + +if typing.TYPE_CHECKING: + from typing import Deque, Tuple, Optional # noqa: F401 + +curl_log = logging.getLogger("tornado.curl_httpclient") class CurlAsyncHTTPClient(AsyncHTTPClient): - def initialize(self, max_clients=10, defaults=None): + def initialize( # type: ignore + self, max_clients: int = 10, defaults: Dict[str, Any] = None + ) -> None: super(CurlAsyncHTTPClient, self).initialize(defaults=defaults) self._multi = pycurl.CurlMulti() self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout) self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket) self._curls = [self._curl_create() for i in range(max_clients)] self._free_list = self._curls[:] - self._requests = collections.deque() - self._fds = {} - self._timeout = None + self._requests = ( + collections.deque() + ) # type: Deque[Tuple[HTTPRequest, Callable[[HTTPResponse], None], float]] + self._fds = {} # type: Dict[int, int] + self._timeout = None # type: Optional[object] # libcurl has bugs that sometimes cause it to not report all # relevant file descriptors and timeouts to TIMERFUNCTION/ # SOCKETFUNCTION. Mitigate the effects of such bugs by # forcing a periodic scan of all active requests. self._force_timeout_callback = ioloop.PeriodicCallback( - self._handle_force_timeout, 1000) + self._handle_force_timeout, 1000 + ) self._force_timeout_callback.start() # Work around a bug in libcurl 7.29.0: Some fields in the curl @@ -64,7 +79,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): self._multi.add_handle(dummy_curl_handle) self._multi.remove_handle(dummy_curl_handle) - def close(self): + def close(self) -> None: self._force_timeout_callback.stop() if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) @@ -76,15 +91,17 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): # Set below properties to None to reduce the reference count of current # instance, because those properties hold some methods of current # instance that will case circular reference. - self._force_timeout_callback = None + self._force_timeout_callback = None # type: ignore self._multi = None - def fetch_impl(self, request, callback): + def fetch_impl( + self, request: HTTPRequest, callback: Callable[[HTTPResponse], None] + ) -> None: self._requests.append((request, callback, self.io_loop.time())) self._process_queue() self._set_timeout(0) - def _handle_socket(self, event, fd, multi, data): + def _handle_socket(self, event: int, fd: int, multi: Any, data: bytes) -> None: """Called by libcurl when it wants to change the file descriptors it cares about. """ @@ -92,7 +109,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): pycurl.POLL_NONE: ioloop.IOLoop.NONE, pycurl.POLL_IN: ioloop.IOLoop.READ, pycurl.POLL_OUT: ioloop.IOLoop.WRITE, - pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE + pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE, } if event == pycurl.POLL_REMOVE: if fd in self._fds: @@ -110,18 +127,18 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): # instead of update. if fd in self._fds: self.io_loop.remove_handler(fd) - self.io_loop.add_handler(fd, self._handle_events, - ioloop_event) + self.io_loop.add_handler(fd, self._handle_events, ioloop_event) self._fds[fd] = ioloop_event - def _set_timeout(self, msecs): + def _set_timeout(self, msecs: int) -> None: """Called by libcurl to schedule a timeout.""" if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = self.io_loop.add_timeout( - self.io_loop.time() + msecs / 1000.0, self._handle_timeout) + self.io_loop.time() + msecs / 1000.0, self._handle_timeout + ) - def _handle_events(self, fd, events): + def _handle_events(self, fd: int, events: int) -> None: """Called by IOLoop when there is activity on one of our file descriptors. """ @@ -139,19 +156,17 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): break self._finish_pending_requests() - def _handle_timeout(self): + def _handle_timeout(self) -> None: """Called by IOLoop when the requested timeout has passed.""" - with stack_context.NullContext(): - self._timeout = None - while True: - try: - ret, num_handles = self._multi.socket_action( - pycurl.SOCKET_TIMEOUT, 0) - except pycurl.error as e: - ret = e.args[0] - if ret != pycurl.E_CALL_MULTI_PERFORM: - break - self._finish_pending_requests() + self._timeout = None + while True: + try: + ret, num_handles = self._multi.socket_action(pycurl.SOCKET_TIMEOUT, 0) + except pycurl.error as e: + ret = e.args[0] + if ret != pycurl.E_CALL_MULTI_PERFORM: + break + self._finish_pending_requests() # In theory, we shouldn't have to do this because curl will # call _set_timeout whenever the timeout changes. However, @@ -170,21 +185,20 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): if new_timeout >= 0: self._set_timeout(new_timeout) - def _handle_force_timeout(self): + def _handle_force_timeout(self) -> None: """Called by IOLoop periodically to ask libcurl to process any events it may have forgotten about. """ - with stack_context.NullContext(): - while True: - try: - ret, num_handles = self._multi.socket_all() - except pycurl.error as e: - ret = e.args[0] - if ret != pycurl.E_CALL_MULTI_PERFORM: - break - self._finish_pending_requests() + while True: + try: + ret, num_handles = self._multi.socket_all() + except pycurl.error as e: + ret = e.args[0] + if ret != pycurl.E_CALL_MULTI_PERFORM: + break + self._finish_pending_requests() - def _finish_pending_requests(self): + def _finish_pending_requests(self) -> None: """Process any requests that were completed by the last call to multi.socket_action. """ @@ -198,55 +212,55 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): break self._process_queue() - def _process_queue(self): - with stack_context.NullContext(): - while True: - started = 0 - while self._free_list and self._requests: - started += 1 - curl = self._free_list.pop() - (request, callback, queue_start_time) = self._requests.popleft() - curl.info = { - "headers": httputil.HTTPHeaders(), - "buffer": BytesIO(), - "request": request, - "callback": callback, - "queue_start_time": queue_start_time, - "curl_start_time": time.time(), - "curl_start_ioloop_time": self.io_loop.current().time(), - } - try: - self._curl_setup_request( - curl, request, curl.info["buffer"], - curl.info["headers"]) - except Exception as e: - # If there was an error in setup, pass it on - # to the callback. Note that allowing the - # error to escape here will appear to work - # most of the time since we are still in the - # caller's original stack frame, but when - # _process_queue() is called from - # _finish_pending_requests the exceptions have - # nowhere to go. - self._free_list.append(curl) - callback(HTTPResponse( - request=request, - code=599, - error=e)) - else: - self._multi.add_handle(curl) + def _process_queue(self) -> None: + while True: + started = 0 + while self._free_list and self._requests: + started += 1 + curl = self._free_list.pop() + (request, callback, queue_start_time) = self._requests.popleft() + curl.info = { + "headers": httputil.HTTPHeaders(), + "buffer": BytesIO(), + "request": request, + "callback": callback, + "queue_start_time": queue_start_time, + "curl_start_time": time.time(), + "curl_start_ioloop_time": self.io_loop.current().time(), + } + try: + self._curl_setup_request( + curl, request, curl.info["buffer"], curl.info["headers"] + ) + except Exception as e: + # If there was an error in setup, pass it on + # to the callback. Note that allowing the + # error to escape here will appear to work + # most of the time since we are still in the + # caller's original stack frame, but when + # _process_queue() is called from + # _finish_pending_requests the exceptions have + # nowhere to go. + self._free_list.append(curl) + callback(HTTPResponse(request=request, code=599, error=e)) + else: + self._multi.add_handle(curl) - if not started: - break + if not started: + break - def _finish(self, curl, curl_error=None, curl_message=None): + def _finish( + self, curl: pycurl.Curl, curl_error: int = None, curl_message: str = None + ) -> None: info = curl.info curl.info = None self._multi.remove_handle(curl) self._free_list.append(curl) buffer = info["buffer"] if curl_error: - error = CurlError(curl_error, curl_message) + assert curl_message is not None + error = CurlError(curl_error, curl_message) # type: Optional[CurlError] + assert error is not None code = error.code effective_url = None buffer.close() @@ -269,30 +283,45 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): redirect=curl.getinfo(pycurl.REDIRECT_TIME), ) try: - info["callback"](HTTPResponse( - request=info["request"], code=code, headers=info["headers"], - buffer=buffer, effective_url=effective_url, error=error, - reason=info['headers'].get("X-Http-Reason", None), - request_time=self.io_loop.time() - info["curl_start_ioloop_time"], - start_time=info["curl_start_time"], - time_info=time_info)) + info["callback"]( + HTTPResponse( + request=info["request"], + code=code, + headers=info["headers"], + buffer=buffer, + effective_url=effective_url, + error=error, + reason=info["headers"].get("X-Http-Reason", None), + request_time=self.io_loop.time() - info["curl_start_ioloop_time"], + start_time=info["curl_start_time"], + time_info=time_info, + ) + ) except Exception: self.handle_callback_exception(info["callback"]) - def handle_callback_exception(self, callback): - self.io_loop.handle_callback_exception(callback) + def handle_callback_exception(self, callback: Any) -> None: + app_log.error("Exception in callback %r", callback, exc_info=True) - def _curl_create(self): + def _curl_create(self) -> pycurl.Curl: curl = pycurl.Curl() if curl_log.isEnabledFor(logging.DEBUG): curl.setopt(pycurl.VERBOSE, 1) curl.setopt(pycurl.DEBUGFUNCTION, self._curl_debug) - if hasattr(pycurl, 'PROTOCOLS'): # PROTOCOLS first appeared in pycurl 7.19.5 (2014-07-12) + if hasattr( + pycurl, "PROTOCOLS" + ): # PROTOCOLS first appeared in pycurl 7.19.5 (2014-07-12) curl.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS) curl.setopt(pycurl.REDIR_PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS) return curl - def _curl_setup_request(self, curl, request, buffer, headers): + def _curl_setup_request( + self, + curl: pycurl.Curl, + request: HTTPRequest, + buffer: BytesIO, + headers: httputil.HTTPHeaders, + ) -> None: curl.setopt(pycurl.URL, native_str(request.url)) # libcurl's magic "Expect: 100-continue" behavior causes delays @@ -310,22 +339,35 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): if "Pragma" not in request.headers: request.headers["Pragma"] = "" - curl.setopt(pycurl.HTTPHEADER, - ["%s: %s" % (native_str(k), native_str(v)) - for k, v in request.headers.get_all()]) + curl.setopt( + pycurl.HTTPHEADER, + [ + "%s: %s" % (native_str(k), native_str(v)) + for k, v in request.headers.get_all() + ], + ) - curl.setopt(pycurl.HEADERFUNCTION, - functools.partial(self._curl_header_callback, - headers, request.header_callback)) + curl.setopt( + pycurl.HEADERFUNCTION, + functools.partial( + self._curl_header_callback, headers, request.header_callback + ), + ) if request.streaming_callback: - def write_function(chunk): - self.io_loop.add_callback(request.streaming_callback, chunk) + + def write_function(b: Union[bytes, bytearray]) -> int: + assert request.streaming_callback is not None + self.io_loop.add_callback(request.streaming_callback, b) + return len(b) + else: write_function = buffer.write curl.setopt(pycurl.WRITEFUNCTION, write_function) curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects) curl.setopt(pycurl.MAXREDIRS, request.max_redirects) + assert request.connect_timeout is not None curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout)) + assert request.request_timeout is not None curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout)) if request.user_agent: curl.setopt(pycurl.USERAGENT, native_str(request.user_agent)) @@ -341,20 +383,22 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): curl.setopt(pycurl.PROXY, request.proxy_host) curl.setopt(pycurl.PROXYPORT, request.proxy_port) if request.proxy_username: - credentials = httputil.encode_username_password(request.proxy_username, - request.proxy_password) + assert request.proxy_password is not None + credentials = httputil.encode_username_password( + request.proxy_username, request.proxy_password + ) curl.setopt(pycurl.PROXYUSERPWD, credentials) - if (request.proxy_auth_mode is None or - request.proxy_auth_mode == "basic"): + if request.proxy_auth_mode is None or request.proxy_auth_mode == "basic": curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_BASIC) elif request.proxy_auth_mode == "digest": curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_DIGEST) else: raise ValueError( - "Unsupported proxy_auth_mode %s" % request.proxy_auth_mode) + "Unsupported proxy_auth_mode %s" % request.proxy_auth_mode + ) else: - curl.setopt(pycurl.PROXY, '') + curl.setopt(pycurl.PROXY, "") curl.unsetopt(pycurl.PROXYUSERPWD) if request.validate_cert: curl.setopt(pycurl.SSL_VERIFYPEER, 1) @@ -397,7 +441,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): elif request.allow_nonstandard_methods or request.method in custom_methods: curl.setopt(pycurl.CUSTOMREQUEST, request.method) else: - raise KeyError('unknown method ' + request.method) + raise KeyError("unknown method " + request.method) body_expected = request.method in ("POST", "PATCH", "PUT") body_present = request.body is not None @@ -405,12 +449,14 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): # Some HTTP methods nearly always have bodies while others # almost never do. Fail in this case unless the user has # opted out of sanity checks with allow_nonstandard_methods. - if ((body_expected and not body_present) or - (body_present and not body_expected)): + if (body_expected and not body_present) or ( + body_present and not body_expected + ): raise ValueError( - 'Body must %sbe None for method %s (unless ' - 'allow_nonstandard_methods is true)' % - ('not ' if body_expected else '', request.method)) + "Body must %sbe None for method %s (unless " + "allow_nonstandard_methods is true)" + % ("not " if body_expected else "", request.method) + ) if body_expected or body_present: if request.method == "GET": @@ -419,21 +465,23 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): # unless we use CUSTOMREQUEST). While the spec doesn't # forbid clients from sending a body, it arguably # disallows the server from doing anything with them. - raise ValueError('Body must be None for GET request') - request_buffer = BytesIO(utf8(request.body or '')) + raise ValueError("Body must be None for GET request") + request_buffer = BytesIO(utf8(request.body or "")) - def ioctl(cmd): + def ioctl(cmd: int) -> None: if cmd == curl.IOCMD_RESTARTREAD: request_buffer.seek(0) + curl.setopt(pycurl.READFUNCTION, request_buffer.read) curl.setopt(pycurl.IOCTLFUNCTION, ioctl) if request.method == "POST": - curl.setopt(pycurl.POSTFIELDSIZE, len(request.body or '')) + curl.setopt(pycurl.POSTFIELDSIZE, len(request.body or "")) else: curl.setopt(pycurl.UPLOAD, True) - curl.setopt(pycurl.INFILESIZE, len(request.body or '')) + curl.setopt(pycurl.INFILESIZE, len(request.body or "")) if request.auth_username is not None: + assert request.auth_password is not None if request.auth_mode is None or request.auth_mode == "basic": curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC) elif request.auth_mode == "digest": @@ -441,11 +489,16 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): else: raise ValueError("Unsupported auth_mode %s" % request.auth_mode) - userpwd = httputil.encode_username_password(request.auth_username, - request.auth_password) + userpwd = httputil.encode_username_password( + request.auth_username, request.auth_password + ) curl.setopt(pycurl.USERPWD, userpwd) - curl_log.debug("%s %s (username: %r)", request.method, request.url, - request.auth_username) + curl_log.debug( + "%s %s (username: %r)", + request.method, + request.url, + request.auth_username, + ) else: curl.unsetopt(pycurl.USERPWD) curl_log.debug("%s %s", request.method, request.url) @@ -459,7 +512,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): if request.ssl_options is not None: raise ValueError("ssl_options not supported in curl_httpclient") - if threading.activeCount() > 1: + if threading.active_count() > 1: # libcurl/pycurl is not thread-safe by default. When multiple threads # are used, signals should be disabled. This has the side effect # of disabling DNS timeouts in some environments (when libcurl is @@ -472,8 +525,13 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): if request.prepare_curl_callback is not None: request.prepare_curl_callback(curl) - def _curl_header_callback(self, headers, header_callback, header_line): - header_line = native_str(header_line.decode('latin1')) + def _curl_header_callback( + self, + headers: httputil.HTTPHeaders, + header_callback: Callable[[str], None], + header_line_bytes: bytes, + ) -> None: + header_line = native_str(header_line_bytes.decode("latin1")) if header_callback is not None: self.io_loop.add_callback(header_callback, header_line) # header_line as returned by curl includes the end-of-line characters. @@ -490,21 +548,21 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): return headers.parse_line(header_line) - def _curl_debug(self, debug_type, debug_msg): - debug_types = ('I', '<', '>', '<', '>') + def _curl_debug(self, debug_type: int, debug_msg: str) -> None: + debug_types = ("I", "<", ">", "<", ">") if debug_type == 0: debug_msg = native_str(debug_msg) - curl_log.debug('%s', debug_msg.strip()) + curl_log.debug("%s", debug_msg.strip()) elif debug_type in (1, 2): debug_msg = native_str(debug_msg) for line in debug_msg.splitlines(): - curl_log.debug('%s %s', debug_types[debug_type], line) + curl_log.debug("%s %s", debug_types[debug_type], line) elif debug_type == 4: - curl_log.debug('%s %r', debug_types[debug_type], debug_msg) + curl_log.debug("%s %r", debug_types[debug_type], debug_msg) class CurlError(HTTPError): - def __init__(self, errno, message): + def __init__(self, errno: int, message: str) -> None: HTTPError.__init__(self, 599, message) self.errno = errno diff --git a/server/www/packages/packages-linux/x64/tornado/escape.py b/server/www/packages/packages-linux/x64/tornado/escape.py index a79ece6..b0ec332 100644 --- a/server/www/packages/packages-linux/x64/tornado/escape.py +++ b/server/www/packages/packages-linux/x64/tornado/escape.py @@ -19,35 +19,28 @@ Also includes a few other miscellaneous string manipulation functions that have crept in over time. """ -from __future__ import absolute_import, division, print_function - +import html.entities import json import re +import urllib.parse -from tornado.util import PY3, unicode_type, basestring_type +from tornado.util import unicode_type -if PY3: - from urllib.parse import parse_qs as _parse_qs - import html.entities as htmlentitydefs - import urllib.parse as urllib_parse - unichr = chr -else: - from urlparse import parse_qs as _parse_qs - import htmlentitydefs - import urllib as urllib_parse - -try: - import typing # noqa -except ImportError: - pass +import typing +from typing import Union, Any, Optional, Dict, List, Callable -_XHTML_ESCAPE_RE = re.compile('[&<>"\']') -_XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"', - '\'': '''} +_XHTML_ESCAPE_RE = re.compile("[&<>\"']") +_XHTML_ESCAPE_DICT = { + "&": "&", + "<": "<", + ">": ">", + '"': """, + "'": "'", +} -def xhtml_escape(value): +def xhtml_escape(value: Union[str, bytes]) -> str: """Escapes a string so it is valid within HTML or XML. Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``. @@ -58,11 +51,12 @@ def xhtml_escape(value): Added the single quote to the list of escaped characters. """ - return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)], - to_basestring(value)) + return _XHTML_ESCAPE_RE.sub( + lambda match: _XHTML_ESCAPE_DICT[match.group(0)], to_basestring(value) + ) -def xhtml_unescape(value): +def xhtml_unescape(value: Union[str, bytes]) -> str: """Un-escapes an XML-escaped string.""" return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value)) @@ -70,7 +64,7 @@ def xhtml_unescape(value): # The fact that json_encode wraps json.dumps is an implementation detail. # Please see https://github.com/tornadoweb/tornado/pull/706 # before sending a pull request that adds **kwargs to this function. -def json_encode(value): +def json_encode(value: Any) -> str: """JSON-encodes the given Python object.""" # JSON permits but does not require forward slashes to be escaped. # This is useful when json data is emitted in a ' - for p in paths) + return "".join( + '' + for p in paths + ) - def render_embed_js(self, js_embed): + def render_embed_js(self, js_embed: Iterable[bytes]) -> bytes: """Default method used to render the final embedded js for the rendered webpage. Override this method in a sub-classed controller to change the output. """ - return b'' + return ( + b'" + ) - def render_linked_css(self, css_files): + def render_linked_css(self, css_files: Iterable[str]) -> str: """Default method used to render the final css links for the rendered webpage. Override this method in a sub-classed controller to change the output. """ paths = [] - unique_paths = set() + unique_paths = set() # type: Set[str] for path in css_files: if not is_absolute(path): @@ -868,20 +964,21 @@ class RequestHandler(object): paths.append(path) unique_paths.add(path) - return ''.join('' - for p in paths) + return "".join( + '' + for p in paths + ) - def render_embed_css(self, css_embed): + def render_embed_css(self, css_embed: Iterable[bytes]) -> bytes: """Default method used to render the final embedded css for the rendered webpage. Override this method in a sub-classed controller to change the output. """ - return b'' + return b'" - def render_string(self, template_name, **kwargs): + def render_string(self, template_name: str, **kwargs: Any) -> bytes: """Generate the given template with the given arguments. We return the generated byte string (in utf8). To generate and @@ -894,6 +991,7 @@ class RequestHandler(object): web_file = frame.f_code.co_filename while frame.f_code.co_filename == web_file: frame = frame.f_back + assert frame.f_code.co_filename is not None template_path = os.path.dirname(frame.f_code.co_filename) with RequestHandler._template_loader_lock: if template_path not in RequestHandler._template_loaders: @@ -906,7 +1004,7 @@ class RequestHandler(object): namespace.update(kwargs) return t.generate(**namespace) - def get_template_namespace(self): + def get_template_namespace(self) -> Dict[str, Any]: """Returns a dictionary to be used as the default template namespace. May be overridden by subclasses to add or modify values. @@ -924,12 +1022,12 @@ class RequestHandler(object): pgettext=self.locale.pgettext, static_url=self.static_url, xsrf_form_html=self.xsrf_form_html, - reverse_url=self.reverse_url + reverse_url=self.reverse_url, ) namespace.update(self.ui) return namespace - def create_template_loader(self, template_path): + def create_template_loader(self, template_path: str) -> template.BaseLoader: """Returns a new template loader for the given path. May be overridden by subclasses. By default returns a @@ -950,7 +1048,7 @@ class RequestHandler(object): kwargs["whitespace"] = settings["template_whitespace"] return template.Loader(template_path, **kwargs) - def flush(self, include_footers=False, callback=None): + def flush(self, include_footers: bool = False) -> "Future[None]": """Flushes the current output buffer to the network. The ``callback`` argument, if given, can be used for flow control: @@ -962,23 +1060,23 @@ class RequestHandler(object): .. versionchanged:: 4.0 Now returns a `.Future` if no callback is given. - .. deprecated:: 5.1 + .. versionchanged:: 6.0 - The ``callback`` argument is deprecated and will be removed in - Tornado 6.0. + The ``callback`` argument was removed. """ + assert self.request.connection is not None chunk = b"".join(self._write_buffer) self._write_buffer = [] if not self._headers_written: self._headers_written = True for transform in self._transforms: - self._status_code, self._headers, chunk = \ - transform.transform_first_chunk( - self._status_code, self._headers, - chunk, include_footers) + assert chunk is not None + self._status_code, self._headers, chunk = transform.transform_first_chunk( + self._status_code, self._headers, chunk, include_footers + ) # Ignore the chunk and only write the headers for HEAD requests if self.request.method == "HEAD": - chunk = None + chunk = b"" # Finalize the cookie headers (which have been stored in a side # object so an outgoing cookie could be overwritten before it @@ -987,23 +1085,22 @@ class RequestHandler(object): for cookie in self._new_cookie.values(): self.add_header("Set-Cookie", cookie.OutputString(None)) - start_line = httputil.ResponseStartLine('', - self._status_code, - self._reason) + start_line = httputil.ResponseStartLine("", self._status_code, self._reason) return self.request.connection.write_headers( - start_line, self._headers, chunk, callback=callback) + start_line, self._headers, chunk + ) else: for transform in self._transforms: chunk = transform.transform_chunk(chunk, include_footers) # Ignore the chunk and only write the headers for HEAD requests if self.request.method != "HEAD": - return self.request.connection.write(chunk, callback=callback) + return self.request.connection.write(chunk) else: - future = Future() + future = Future() # type: Future[None] future.set_result(None) return future - def finish(self, chunk=None): + def finish(self, chunk: Union[str, bytes, dict] = None) -> "Future[None]": """Finishes this response, ending the HTTP request. Passing a ``chunk`` to ``finish()`` is equivalent to passing that @@ -1027,27 +1124,32 @@ class RequestHandler(object): # Automatically support ETags and add the Content-Length header if # we have not flushed any content yet. if not self._headers_written: - if (self._status_code == 200 and - self.request.method in ("GET", "HEAD") and - "Etag" not in self._headers): + if ( + self._status_code == 200 + and self.request.method in ("GET", "HEAD") + and "Etag" not in self._headers + ): self.set_etag_header() if self.check_etag_header(): self._write_buffer = [] self.set_status(304) - if (self._status_code in (204, 304) or - (self._status_code >= 100 and self._status_code < 200)): - assert not self._write_buffer, "Cannot send body with %s" % self._status_code + if self._status_code in (204, 304) or ( + self._status_code >= 100 and self._status_code < 200 + ): + assert not self._write_buffer, ( + "Cannot send body with %s" % self._status_code + ) self._clear_headers_for_304() elif "Content-Length" not in self._headers: content_length = sum(len(part) for part in self._write_buffer) self.set_header("Content-Length", content_length) - if hasattr(self.request, "connection"): - # Now that the request is finished, clear the callback we - # set on the HTTPConnection (which would otherwise prevent the - # garbage collection of the RequestHandler when there - # are keepalive connections) - self.request.connection.set_close_callback(None) + assert self.request.connection is not None + # Now that the request is finished, clear the callback we + # set on the HTTPConnection (which would otherwise prevent the + # garbage collection of the RequestHandler when there + # are keepalive connections) + self.request.connection.set_close_callback(None) # type: ignore future = self.flush(include_footers=True) self.request.connection.finish() @@ -1057,7 +1159,7 @@ class RequestHandler(object): self._break_cycles() return future - def detach(self): + def detach(self) -> iostream.IOStream: """Take control of the underlying stream. Returns the underlying `.IOStream` object and stops all @@ -1069,14 +1171,15 @@ class RequestHandler(object): .. versionadded:: 5.1 """ self._finished = True - return self.request.connection.detach() + # TODO: add detach to HTTPConnection? + return self.request.connection.detach() # type: ignore - def _break_cycles(self): + def _break_cycles(self) -> None: # Break up a reference cycle between this handler and the # _ui_module closures to allow for faster GC on CPython. - self.ui = None + self.ui = None # type: ignore - def send_error(self, status_code=500, **kwargs): + def send_error(self, status_code: int = 500, **kwargs: Any) -> None: """Sends the given HTTP error code to the browser. If `flush()` has already been called, it is not possible to send @@ -1097,14 +1200,13 @@ class RequestHandler(object): try: self.finish() except Exception: - gen_log.error("Failed to flush partial response", - exc_info=True) + gen_log.error("Failed to flush partial response", exc_info=True) return self.clear() - reason = kwargs.get('reason') - if 'exc_info' in kwargs: - exception = kwargs['exc_info'][1] + reason = kwargs.get("reason") + if "exc_info" in kwargs: + exception = kwargs["exc_info"][1] if isinstance(exception, HTTPError) and exception.reason: reason = exception.reason self.set_status(status_code, reason=reason) @@ -1115,7 +1217,7 @@ class RequestHandler(object): if not self._finished: self.finish() - def write_error(self, status_code, **kwargs): + def write_error(self, status_code: int, **kwargs: Any) -> None: """Override to implement custom error pages. ``write_error`` may call `write`, `render`, `set_header`, etc @@ -1129,19 +1231,19 @@ class RequestHandler(object): """ if self.settings.get("serve_traceback") and "exc_info" in kwargs: # in debug mode, try to send a traceback - self.set_header('Content-Type', 'text/plain') + self.set_header("Content-Type", "text/plain") for line in traceback.format_exception(*kwargs["exc_info"]): self.write(line) self.finish() else: - self.finish("%(code)d: %(message)s" - "%(code)d: %(message)s" % { - "code": status_code, - "message": self._reason, - }) + self.finish( + "%(code)d: %(message)s" + "%(code)d: %(message)s" + % {"code": status_code, "message": self._reason} + ) @property - def locale(self): + def locale(self) -> tornado.locale.Locale: """The locale for the current session. Determined by either `get_user_locale`, which you can override to @@ -1153,17 +1255,19 @@ class RequestHandler(object): Added a property setter. """ if not hasattr(self, "_locale"): - self._locale = self.get_user_locale() - if not self._locale: + loc = self.get_user_locale() + if loc is not None: + self._locale = loc + else: self._locale = self.get_browser_locale() assert self._locale return self._locale @locale.setter - def locale(self, value): + def locale(self, value: tornado.locale.Locale) -> None: self._locale = value - def get_user_locale(self): + def get_user_locale(self) -> Optional[tornado.locale.Locale]: """Override to determine the locale from the authenticated user. If None is returned, we fall back to `get_browser_locale()`. @@ -1173,7 +1277,7 @@ class RequestHandler(object): """ return None - def get_browser_locale(self, default="en_US"): + def get_browser_locale(self, default: str = "en_US") -> tornado.locale.Locale: """Determines the user's locale from ``Accept-Language`` header. See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4 @@ -1198,7 +1302,7 @@ class RequestHandler(object): return locale.get(default) @property - def current_user(self): + def current_user(self) -> Any: """The authenticated user for this request. This is set in one of two ways: @@ -1234,17 +1338,17 @@ class RequestHandler(object): return self._current_user @current_user.setter - def current_user(self, value): + def current_user(self, value: Any) -> None: self._current_user = value - def get_current_user(self): + def get_current_user(self) -> Any: """Override to determine the current user from, e.g., a cookie. This method may not be a coroutine. """ return None - def get_login_url(self): + def get_login_url(self) -> str: """Override to customize the login URL based on the request. By default, we use the ``login_url`` application setting. @@ -1252,7 +1356,7 @@ class RequestHandler(object): self.require_setting("login_url", "@tornado.web.authenticated") return self.application.settings["login_url"] - def get_template_path(self): + def get_template_path(self) -> Optional[str]: """Override to customize template path for each handler. By default, we use the ``template_path`` application setting. @@ -1261,7 +1365,7 @@ class RequestHandler(object): return self.application.settings.get("template_path") @property - def xsrf_token(self): + def xsrf_token(self) -> bytes: """The XSRF-prevention token for the current user/session. To prevent cross-site request forgery, we set an '_xsrf' cookie @@ -1301,22 +1405,23 @@ class RequestHandler(object): self._xsrf_token = binascii.b2a_hex(token) elif output_version == 2: mask = os.urandom(4) - self._xsrf_token = b"|".join([ - b"2", - binascii.b2a_hex(mask), - binascii.b2a_hex(_websocket_mask(mask, token)), - utf8(str(int(timestamp)))]) + self._xsrf_token = b"|".join( + [ + b"2", + binascii.b2a_hex(mask), + binascii.b2a_hex(_websocket_mask(mask, token)), + utf8(str(int(timestamp))), + ] + ) else: - raise ValueError("unknown xsrf cookie version %d", - output_version) + raise ValueError("unknown xsrf cookie version %d", output_version) if version is None: - expires_days = 30 if self.current_user else None - self.set_cookie("_xsrf", self._xsrf_token, - expires_days=expires_days, - **cookie_kwargs) + if self.current_user and "expires_days" not in cookie_kwargs: + cookie_kwargs["expires_days"] = 30 + self.set_cookie("_xsrf", self._xsrf_token, **cookie_kwargs) return self._xsrf_token - def _get_raw_xsrf_token(self): + def _get_raw_xsrf_token(self) -> Tuple[Optional[int], bytes, float]: """Read or generate the xsrf token in its raw form. The raw_xsrf_token is a tuple containing: @@ -1327,7 +1432,7 @@ class RequestHandler(object): * timestamp: the time this token was generated (will not be accurate for version 1 cookies) """ - if not hasattr(self, '_raw_xsrf_token'): + if not hasattr(self, "_raw_xsrf_token"): cookie = self.get_cookie("_xsrf") if cookie: version, token, timestamp = self._decode_xsrf_token(cookie) @@ -1337,10 +1442,14 @@ class RequestHandler(object): version = None token = os.urandom(16) timestamp = time.time() + assert token is not None + assert timestamp is not None self._raw_xsrf_token = (version, token, timestamp) return self._raw_xsrf_token - def _decode_xsrf_token(self, cookie): + def _decode_xsrf_token( + self, cookie: str + ) -> Tuple[Optional[int], Optional[bytes], Optional[float]]: """Convert a cookie string into a the tuple form returned by _get_raw_xsrf_token. """ @@ -1351,12 +1460,11 @@ class RequestHandler(object): if m: version = int(m.group(1)) if version == 2: - _, mask, masked_token, timestamp = cookie.split("|") + _, mask_str, masked_token, timestamp_str = cookie.split("|") - mask = binascii.a2b_hex(utf8(mask)) - token = _websocket_mask( - mask, binascii.a2b_hex(utf8(masked_token))) - timestamp = int(timestamp) + mask = binascii.a2b_hex(utf8(mask_str)) + token = _websocket_mask(mask, binascii.a2b_hex(utf8(masked_token))) + timestamp = int(timestamp_str) return version, token, timestamp else: # Treat unknown versions as not present instead of failing. @@ -1372,11 +1480,10 @@ class RequestHandler(object): return (version, token, timestamp) except Exception: # Catch exceptions and return nothing instead of failing. - gen_log.debug("Uncaught exception in _decode_xsrf_token", - exc_info=True) + gen_log.debug("Uncaught exception in _decode_xsrf_token", exc_info=True) return None, None, None - def check_xsrf_cookie(self): + def check_xsrf_cookie(self) -> None: """Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument. To prevent cross-site request forgery, we set an ``_xsrf`` @@ -1390,30 +1497,31 @@ class RequestHandler(object): See http://en.wikipedia.org/wiki/Cross-site_request_forgery - Prior to release 1.1.1, this check was ignored if the HTTP header - ``X-Requested-With: XMLHTTPRequest`` was present. This exception - has been shown to be insecure and has been removed. For more - information please see - http://www.djangoproject.com/weblog/2011/feb/08/security/ - http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails - .. versionchanged:: 3.2.2 Added support for cookie version 2. Both versions 1 and 2 are supported. """ - token = (self.get_argument("_xsrf", None) or - self.request.headers.get("X-Xsrftoken") or - self.request.headers.get("X-Csrftoken")) + # Prior to release 1.1.1, this check was ignored if the HTTP header + # ``X-Requested-With: XMLHTTPRequest`` was present. This exception + # has been shown to be insecure and has been removed. For more + # information please see + # http://www.djangoproject.com/weblog/2011/feb/08/security/ + # http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails + token = ( + self.get_argument("_xsrf", None) + or self.request.headers.get("X-Xsrftoken") + or self.request.headers.get("X-Csrftoken") + ) if not token: raise HTTPError(403, "'_xsrf' argument missing from POST") _, token, _ = self._decode_xsrf_token(token) _, expected_token, _ = self._get_raw_xsrf_token() if not token: raise HTTPError(403, "'_xsrf' argument has invalid format") - if not _time_independent_equals(utf8(token), utf8(expected_token)): + if not hmac.compare_digest(utf8(token), utf8(expected_token)): raise HTTPError(403, "XSRF cookie does not match POST argument") - def xsrf_form_html(self): + def xsrf_form_html(self) -> str: """An HTML ```` element to be included with all POST forms. It defines the ``_xsrf`` input value, which we check on all POST @@ -1426,10 +1534,13 @@ class RequestHandler(object): See `check_xsrf_cookie()` above for more information. """ - return '' + return ( + '' + ) - def static_url(self, path, include_host=None, **kwargs): + def static_url(self, path: str, include_host: bool = None, **kwargs: Any) -> str: """Returns a static URL for the given relative static file path. This method requires you set the ``static_path`` setting in your @@ -1451,8 +1562,9 @@ class RequestHandler(object): """ self.require_setting("static_path", "static_url") - get_url = self.settings.get("static_handler_class", - StaticFileHandler).make_static_url + get_url = self.settings.get( + "static_handler_class", StaticFileHandler + ).make_static_url if include_host is None: include_host = getattr(self, "include_host", False) @@ -1464,17 +1576,19 @@ class RequestHandler(object): return base + get_url(self.settings, path, **kwargs) - def require_setting(self, name, feature="this feature"): + def require_setting(self, name: str, feature: str = "this feature") -> None: """Raises an exception if the given app setting is not defined.""" if not self.application.settings.get(name): - raise Exception("You must define the '%s' setting in your " - "application to use %s" % (name, feature)) + raise Exception( + "You must define the '%s' setting in your " + "application to use %s" % (name, feature) + ) - def reverse_url(self, name, *args): + def reverse_url(self, name: str, *args: Any) -> str: """Alias for `Application.reverse_url`.""" return self.application.reverse_url(name, *args) - def compute_etag(self): + def compute_etag(self) -> Optional[str]: """Computes the etag header to be used for this request. By default uses a hash of the content written so far. @@ -1487,7 +1601,7 @@ class RequestHandler(object): hasher.update(part) return '"%s"' % hasher.hexdigest() - def set_etag_header(self): + def set_etag_header(self) -> None: """Sets the response's Etag header using ``self.compute_etag()``. Note: no header will be set if ``compute_etag()`` returns ``None``. @@ -1498,7 +1612,7 @@ class RequestHandler(object): if etag is not None: self.set_header("Etag", etag) - def check_etag_header(self): + def check_etag_header(self) -> bool: """Checks the ``Etag`` header against requests's ``If-None-Match``. Returns ``True`` if the request's Etag matches and a 304 should be @@ -1519,19 +1633,18 @@ class RequestHandler(object): # Find all weak and strong etag values from If-None-Match header # because RFC 7232 allows multiple etag values in a single header. etags = re.findall( - br'\*|(?:W/)?"[^"]*"', - utf8(self.request.headers.get("If-None-Match", "")) + br'\*|(?:W/)?"[^"]*"', utf8(self.request.headers.get("If-None-Match", "")) ) if not computed_etag or not etags: return False match = False - if etags[0] == b'*': + if etags[0] == b"*": match = True else: # Use a weak comparison when comparing entity-tags. - def val(x): - return x[2:] if x.startswith(b'W/') else x + def val(x: bytes) -> bytes: + return x[2:] if x.startswith(b"W/") else x for etag in etags: if val(etag) == val(computed_etag): @@ -1539,36 +1652,30 @@ class RequestHandler(object): break return match - def _stack_context_handle_exception(self, type, value, traceback): - try: - # For historical reasons _handle_request_exception only takes - # the exception value instead of the full triple, - # so re-raise the exception to ensure that it's in - # sys.exc_info() - raise_exc_info((type, value, traceback)) - except Exception: - self._handle_request_exception(value) - return True - - @gen.coroutine - def _execute(self, transforms, *args, **kwargs): + async def _execute( + self, transforms: List["OutputTransform"], *args: bytes, **kwargs: bytes + ) -> None: """Executes this request with the given output transforms.""" self._transforms = transforms try: if self.request.method not in self.SUPPORTED_METHODS: raise HTTPError(405) self.path_args = [self.decode_argument(arg) for arg in args] - self.path_kwargs = dict((k, self.decode_argument(v, name=k)) - for (k, v) in kwargs.items()) + self.path_kwargs = dict( + (k, self.decode_argument(v, name=k)) for (k, v) in kwargs.items() + ) # If XSRF cookies are turned on, reject form submissions without # the proper cookie - if self.request.method not in ("GET", "HEAD", "OPTIONS") and \ - self.application.settings.get("xsrf_cookies"): + if self.request.method not in ( + "GET", + "HEAD", + "OPTIONS", + ) and self.application.settings.get("xsrf_cookies"): self.check_xsrf_cookie() result = self.prepare() if result is not None: - result = yield result + result = await result if self._prepared_future is not None: # Tell the Application we've finished with prepare() # and are ready for the body to arrive. @@ -1582,14 +1689,14 @@ class RequestHandler(object): # result; the data has been passed to self.data_received # instead. try: - yield self.request.body + await self.request._body_future except iostream.StreamClosedError: return method = getattr(self, self.request.method.lower()) result = method(*self.path_args, **self.path_kwargs) if result is not None: - result = yield result + result = await result if self._auto_finish and not self._finished: self.finish() except Exception as e: @@ -1600,21 +1707,22 @@ class RequestHandler(object): finally: # Unset result to avoid circular references result = None - if (self._prepared_future is not None and - not self._prepared_future.done()): + if self._prepared_future is not None and not self._prepared_future.done(): # In case we failed before setting _prepared_future, do it # now (to unblock the HTTP server). Note that this is not # in a finally block to avoid GC issues prior to Python 3.4. self._prepared_future.set_result(None) - def data_received(self, chunk): + def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]: """Implement this method to handle streamed request data. Requires the `.stream_request_body` decorator. + + May be a coroutine for flow control. """ raise NotImplementedError() - def _log(self): + def _log(self) -> None: """Logs the current request. Sort of deprecated since this functionality was moved to the @@ -1623,11 +1731,14 @@ class RequestHandler(object): """ self.application.log_request(self) - def _request_summary(self): - return "%s %s (%s)" % (self.request.method, self.request.uri, - self.request.remote_ip) + def _request_summary(self) -> str: + return "%s %s (%s)" % ( + self.request.method, + self.request.uri, + self.request.remote_ip, + ) - def _handle_request_exception(self, e): + def _handle_request_exception(self, e: BaseException) -> None: if isinstance(e, Finish): # Not an error; just finish the request without logging. if not self._finished: @@ -1649,7 +1760,12 @@ class RequestHandler(object): else: self.send_error(500, exc_info=sys.exc_info()) - def log_exception(self, typ, value, tb): + def log_exception( + self, + typ: "Optional[Type[BaseException]]", + value: Optional[BaseException], + tb: Optional[TracebackType], + ) -> None: """Override to customize logging of uncaught exceptions. By default logs instances of `HTTPError` as warnings without @@ -1662,123 +1778,50 @@ class RequestHandler(object): if isinstance(value, HTTPError): if value.log_message: format = "%d %s: " + value.log_message - args = ([value.status_code, self._request_summary()] + - list(value.args)) + args = [value.status_code, self._request_summary()] + list(value.args) gen_log.warning(format, *args) else: - app_log.error("Uncaught exception %s\n%r", self._request_summary(), - self.request, exc_info=(typ, value, tb)) + app_log.error( # type: ignore + "Uncaught exception %s\n%r", + self._request_summary(), + self.request, + exc_info=(typ, value, tb), + ) - def _ui_module(self, name, module): - def render(*args, **kwargs): + def _ui_module(self, name: str, module: Type["UIModule"]) -> Callable[..., str]: + def render(*args, **kwargs) -> str: # type: ignore if not hasattr(self, "_active_modules"): - self._active_modules = {} + self._active_modules = {} # type: Dict[str, UIModule] if name not in self._active_modules: self._active_modules[name] = module(self) rendered = self._active_modules[name].render(*args, **kwargs) return rendered + return render - def _ui_method(self, method): + def _ui_method(self, method: Callable[..., str]) -> Callable[..., str]: return lambda *args, **kwargs: method(self, *args, **kwargs) - def _clear_headers_for_304(self): + def _clear_headers_for_304(self) -> None: # 304 responses should not contain entity headers (defined in # http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1) # not explicitly allowed by # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5 - headers = ["Allow", "Content-Encoding", "Content-Language", - "Content-Length", "Content-MD5", "Content-Range", - "Content-Type", "Last-Modified"] + headers = [ + "Allow", + "Content-Encoding", + "Content-Language", + "Content-Length", + "Content-MD5", + "Content-Range", + "Content-Type", + "Last-Modified", + ] for h in headers: self.clear_header(h) -def asynchronous(method): - """Wrap request handler methods with this if they are asynchronous. - - This decorator is for callback-style asynchronous methods; for - coroutines, use the ``@gen.coroutine`` decorator without - ``@asynchronous``. (It is legal for legacy reasons to use the two - decorators together provided ``@asynchronous`` is first, but - ``@asynchronous`` will be ignored in this case) - - This decorator should only be applied to the :ref:`HTTP verb - methods `; its behavior is undefined for any other method. - This decorator does not *make* a method asynchronous; it tells - the framework that the method *is* asynchronous. For this decorator - to be useful the method must (at least sometimes) do something - asynchronous. - - If this decorator is given, the response is not finished when the - method returns. It is up to the request handler to call - `self.finish() ` to finish the HTTP - request. Without this decorator, the request is automatically - finished when the ``get()`` or ``post()`` method returns. Example: - - .. testcode:: - - class MyRequestHandler(RequestHandler): - @asynchronous - def get(self): - http = httpclient.AsyncHTTPClient() - http.fetch("http://friendfeed.com/", self._on_download) - - def _on_download(self, response): - self.write("Downloaded!") - self.finish() - - .. testoutput:: - :hide: - - .. versionchanged:: 3.1 - The ability to use ``@gen.coroutine`` without ``@asynchronous``. - - .. versionchanged:: 4.3 Returning anything but ``None`` or a - yieldable object from a method decorated with ``@asynchronous`` - is an error. Such return values were previously ignored silently. - - .. deprecated:: 5.1 - - This decorator is deprecated and will be removed in Tornado 6.0. - Use coroutines instead. - """ - warnings.warn("@asynchronous is deprecated, use coroutines instead", - DeprecationWarning) - # Delay the IOLoop import because it's not available on app engine. - from tornado.ioloop import IOLoop - - @functools.wraps(method) - def wrapper(self, *args, **kwargs): - self._auto_finish = False - with stack_context.ExceptionStackContext( - self._stack_context_handle_exception, delay_warning=True): - result = method(self, *args, **kwargs) - if result is not None: - result = gen.convert_yielded(result) - - # If @asynchronous is used with @gen.coroutine, (but - # not @gen.engine), we can automatically finish the - # request when the future resolves. Additionally, - # the Future will swallow any exceptions so we need - # to throw them back out to the stack context to finish - # the request. - def future_complete(f): - f.result() - if not self._finished: - self.finish() - IOLoop.current().add_future(result, future_complete) - # Once we have done this, hide the Future from our - # caller (i.e. RequestHandler._when_complete), which - # would otherwise set up its own callback and - # exception handler (resulting in exceptions being - # logged twice). - return None - return result - return wrapper - - -def stream_request_body(cls): +def stream_request_body(cls: Type[RequestHandler]) -> Type[RequestHandler]: """Apply to `RequestHandler` subclasses to enable streaming body support. This decorator implies the following changes: @@ -1805,21 +1848,26 @@ def stream_request_body(cls): return cls -def _has_stream_request_body(cls): +def _has_stream_request_body(cls: Type[RequestHandler]) -> bool: if not issubclass(cls, RequestHandler): raise TypeError("expected subclass of RequestHandler, got %r", cls) - return getattr(cls, '_stream_request_body', False) + return cls._stream_request_body -def removeslash(method): +def removeslash( + method: Callable[..., Optional[Awaitable[None]]] +) -> Callable[..., Optional[Awaitable[None]]]: """Use this decorator to remove trailing slashes from the request path. For example, a request to ``/foo/`` would redirect to ``/foo`` with this decorator. Your request handler mapping should use a regular expression like ``r'/foo/*'`` in conjunction with using the decorator. """ + @functools.wraps(method) - def wrapper(self, *args, **kwargs): + def wrapper( # type: ignore + self: RequestHandler, *args, **kwargs + ) -> Optional[Awaitable[None]]: if self.request.path.endswith("/"): if self.request.method in ("GET", "HEAD"): uri = self.request.path.rstrip("/") @@ -1827,31 +1875,38 @@ def removeslash(method): if self.request.query: uri += "?" + self.request.query self.redirect(uri, permanent=True) - return + return None else: raise HTTPError(404) return method(self, *args, **kwargs) + return wrapper -def addslash(method): +def addslash( + method: Callable[..., Optional[Awaitable[None]]] +) -> Callable[..., Optional[Awaitable[None]]]: """Use this decorator to add a missing trailing slash to the request path. For example, a request to ``/foo`` would redirect to ``/foo/`` with this decorator. Your request handler mapping should use a regular expression like ``r'/foo/?'`` in conjunction with using the decorator. """ + @functools.wraps(method) - def wrapper(self, *args, **kwargs): + def wrapper( # type: ignore + self: RequestHandler, *args, **kwargs + ) -> Optional[Awaitable[None]]: if not self.request.path.endswith("/"): if self.request.method in ("GET", "HEAD"): uri = self.request.path + "/" if self.request.query: uri += "?" + self.request.query self.redirect(uri, permanent=True) - return + return None raise HTTPError(404) return method(self, *args, **kwargs) + return wrapper @@ -1866,28 +1921,36 @@ class _ApplicationRouter(ReversibleRuleRouter): `_ApplicationRouter` instance. """ - def __init__(self, application, rules=None): + def __init__(self, application: "Application", rules: _RuleList = None) -> None: assert isinstance(application, Application) self.application = application super(_ApplicationRouter, self).__init__(rules) - def process_rule(self, rule): + def process_rule(self, rule: Rule) -> Rule: rule = super(_ApplicationRouter, self).process_rule(rule) if isinstance(rule.target, (list, tuple)): - rule.target = _ApplicationRouter(self.application, rule.target) + rule.target = _ApplicationRouter( # type: ignore + self.application, rule.target + ) return rule - def get_target_delegate(self, target, request, **target_params): + def get_target_delegate( + self, target: Any, request: httputil.HTTPServerRequest, **target_params: Any + ) -> Optional[httputil.HTTPMessageDelegate]: if isclass(target) and issubclass(target, RequestHandler): - return self.application.get_handler_delegate(request, target, **target_params) + return self.application.get_handler_delegate( + request, target, **target_params + ) - return super(_ApplicationRouter, self).get_target_delegate(target, request, **target_params) + return super(_ApplicationRouter, self).get_target_delegate( + target, request, **target_params + ) class Application(ReversibleRouter): - """A collection of request handlers that make up a web application. + r"""A collection of request handlers that make up a web application. Instances of this class are callable and can be passed directly to HTTPServer to serve the application:: @@ -1951,7 +2014,7 @@ class Application(ReversibleRouter): Applications that do not use TLS may be vulnerable to :ref:`DNS rebinding ` attacks. This attack is especially - relevant to applications that only listen on ``127.0.0.1` or + relevant to applications that only listen on ``127.0.0.1`` or other private networks. Appropriate host patterns must be used (instead of the default of ``r'.*'``) to prevent this risk. The ``default_host`` argument must not be used in applications that @@ -1969,54 +2032,64 @@ class Application(ReversibleRouter): Integration with the new `tornado.routing` module. """ - def __init__(self, handlers=None, default_host=None, transforms=None, - **settings): + + def __init__( + self, + handlers: _RuleList = None, + default_host: str = None, + transforms: List[Type["OutputTransform"]] = None, + **settings: Any + ) -> None: if transforms is None: - self.transforms = [] + self.transforms = [] # type: List[Type[OutputTransform]] if settings.get("compress_response") or settings.get("gzip"): self.transforms.append(GZipContentEncoding) else: self.transforms = transforms self.default_host = default_host self.settings = settings - self.ui_modules = {'linkify': _linkify, - 'xsrf_form_html': _xsrf_form_html, - 'Template': TemplateModule, - } - self.ui_methods = {} + self.ui_modules = { + "linkify": _linkify, + "xsrf_form_html": _xsrf_form_html, + "Template": TemplateModule, + } + self.ui_methods = {} # type: Dict[str, Callable[..., str]] self._load_ui_modules(settings.get("ui_modules", {})) self._load_ui_methods(settings.get("ui_methods", {})) if self.settings.get("static_path"): path = self.settings["static_path"] handlers = list(handlers or []) - static_url_prefix = settings.get("static_url_prefix", - "/static/") - static_handler_class = settings.get("static_handler_class", - StaticFileHandler) + static_url_prefix = settings.get("static_url_prefix", "/static/") + static_handler_class = settings.get( + "static_handler_class", StaticFileHandler + ) static_handler_args = settings.get("static_handler_args", {}) - static_handler_args['path'] = path - for pattern in [re.escape(static_url_prefix) + r"(.*)", - r"/(favicon\.ico)", r"/(robots\.txt)"]: - handlers.insert(0, (pattern, static_handler_class, - static_handler_args)) + static_handler_args["path"] = path + for pattern in [ + re.escape(static_url_prefix) + r"(.*)", + r"/(favicon\.ico)", + r"/(robots\.txt)", + ]: + handlers.insert(0, (pattern, static_handler_class, static_handler_args)) - if self.settings.get('debug'): - self.settings.setdefault('autoreload', True) - self.settings.setdefault('compiled_template_cache', False) - self.settings.setdefault('static_hash_cache', False) - self.settings.setdefault('serve_traceback', True) + if self.settings.get("debug"): + self.settings.setdefault("autoreload", True) + self.settings.setdefault("compiled_template_cache", False) + self.settings.setdefault("static_hash_cache", False) + self.settings.setdefault("serve_traceback", True) self.wildcard_router = _ApplicationRouter(self, handlers) - self.default_router = _ApplicationRouter(self, [ - Rule(AnyMatches(), self.wildcard_router) - ]) + self.default_router = _ApplicationRouter( + self, [Rule(AnyMatches(), self.wildcard_router)] + ) # Automatically reload modified modules - if self.settings.get('autoreload'): + if self.settings.get("autoreload"): from tornado import autoreload + autoreload.start() - def listen(self, port, address="", **kwargs): + def listen(self, port: int, address: str = "", **kwargs: Any) -> HTTPServer: """Starts an HTTP server for this application on the given port. This is a convenience alias for creating an `.HTTPServer` @@ -2035,14 +2108,11 @@ class Application(ReversibleRouter): .. versionchanged:: 4.3 Now returns the `.HTTPServer` object. """ - # import is here rather than top level because HTTPServer - # is not importable on appengine - from tornado.httpserver import HTTPServer server = HTTPServer(self, **kwargs) server.listen(port, address) return server - def add_handlers(self, host_pattern, host_handlers): + def add_handlers(self, host_pattern: str, host_handlers: _RuleList) -> None: """Appends the given handlers to our handler list. Host patterns are processed sequentially in the order they were @@ -2054,31 +2124,31 @@ class Application(ReversibleRouter): self.default_router.rules.insert(-1, rule) if self.default_host is not None: - self.wildcard_router.add_rules([( - DefaultHostMatches(self, host_matcher.host_pattern), - host_handlers - )]) + self.wildcard_router.add_rules( + [(DefaultHostMatches(self, host_matcher.host_pattern), host_handlers)] + ) - def add_transform(self, transform_class): + def add_transform(self, transform_class: Type["OutputTransform"]) -> None: self.transforms.append(transform_class) - def _load_ui_methods(self, methods): + def _load_ui_methods(self, methods: Any) -> None: if isinstance(methods, types.ModuleType): - self._load_ui_methods(dict((n, getattr(methods, n)) - for n in dir(methods))) + self._load_ui_methods(dict((n, getattr(methods, n)) for n in dir(methods))) elif isinstance(methods, list): for m in methods: self._load_ui_methods(m) else: for name, fn in methods.items(): - if not name.startswith("_") and hasattr(fn, "__call__") \ - and name[0].lower() == name[0]: + if ( + not name.startswith("_") + and hasattr(fn, "__call__") + and name[0].lower() == name[0] + ): self.ui_methods[name] = fn - def _load_ui_modules(self, modules): + def _load_ui_modules(self, modules: Any) -> None: if isinstance(modules, types.ModuleType): - self._load_ui_modules(dict((n, getattr(modules, n)) - for n in dir(modules))) + self._load_ui_modules(dict((n, getattr(modules, n)) for n in dir(modules))) elif isinstance(modules, list): for m in modules: self._load_ui_modules(m) @@ -2091,27 +2161,37 @@ class Application(ReversibleRouter): except TypeError: pass - def __call__(self, request): + def __call__( + self, request: httputil.HTTPServerRequest + ) -> Optional[Awaitable[None]]: # Legacy HTTPServer interface dispatcher = self.find_handler(request) return dispatcher.execute() - def find_handler(self, request, **kwargs): + def find_handler( + self, request: httputil.HTTPServerRequest, **kwargs: Any + ) -> "_HandlerDelegate": route = self.default_router.find_handler(request) if route is not None: - return route + return cast("_HandlerDelegate", route) - if self.settings.get('default_handler_class'): + if self.settings.get("default_handler_class"): return self.get_handler_delegate( request, - self.settings['default_handler_class'], - self.settings.get('default_handler_args', {})) + self.settings["default_handler_class"], + self.settings.get("default_handler_args", {}), + ) - return self.get_handler_delegate( - request, ErrorHandler, {'status_code': 404}) + return self.get_handler_delegate(request, ErrorHandler, {"status_code": 404}) - def get_handler_delegate(self, request, target_class, target_kwargs=None, - path_args=None, path_kwargs=None): + def get_handler_delegate( + self, + request: httputil.HTTPServerRequest, + target_class: Type[RequestHandler], + target_kwargs: Dict[str, Any] = None, + path_args: List[bytes] = None, + path_kwargs: Dict[str, bytes] = None, + ) -> "_HandlerDelegate": """Returns `~.httputil.HTTPMessageDelegate` that can serve a request for application and `RequestHandler` subclass. @@ -2123,9 +2203,10 @@ class Application(ReversibleRouter): :arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method. """ return _HandlerDelegate( - self, request, target_class, target_kwargs, path_args, path_kwargs) + self, request, target_class, target_kwargs, path_args, path_kwargs + ) - def reverse_url(self, name, *args): + def reverse_url(self, name: str, *args: Any) -> str: """Returns a URL path for handler named ``name`` The handler must be added to the application as a named `URLSpec`. @@ -2140,7 +2221,7 @@ class Application(ReversibleRouter): raise KeyError("%s not found in named urls" % name) - def log_request(self, handler): + def log_request(self, handler: RequestHandler) -> None: """Writes a completed HTTP request to the logs. By default writes to the python root logger. To change @@ -2158,13 +2239,24 @@ class Application(ReversibleRouter): else: log_method = access_log.error request_time = 1000.0 * handler.request.request_time() - log_method("%d %s %.2fms", handler.get_status(), - handler._request_summary(), request_time) + log_method( + "%d %s %.2fms", + handler.get_status(), + handler._request_summary(), + request_time, + ) class _HandlerDelegate(httputil.HTTPMessageDelegate): - def __init__(self, application, request, handler_class, handler_kwargs, - path_args, path_kwargs): + def __init__( + self, + application: Application, + request: httputil.HTTPServerRequest, + handler_class: Type[RequestHandler], + handler_kwargs: Optional[Dict[str, Any]], + path_args: Optional[List[bytes]], + path_kwargs: Optional[Dict[str, bytes]], + ) -> None: self.application = application self.connection = request.connection self.request = request @@ -2172,35 +2264,41 @@ class _HandlerDelegate(httputil.HTTPMessageDelegate): self.handler_kwargs = handler_kwargs or {} self.path_args = path_args or [] self.path_kwargs = path_kwargs or {} - self.chunks = [] + self.chunks = [] # type: List[bytes] self.stream_request_body = _has_stream_request_body(self.handler_class) - def headers_received(self, start_line, headers): + def headers_received( + self, + start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine], + headers: httputil.HTTPHeaders, + ) -> Optional[Awaitable[None]]: if self.stream_request_body: - self.request.body = Future() + self.request._body_future = Future() return self.execute() + return None - def data_received(self, data): + def data_received(self, data: bytes) -> Optional[Awaitable[None]]: if self.stream_request_body: return self.handler.data_received(data) else: self.chunks.append(data) + return None - def finish(self): + def finish(self) -> None: if self.stream_request_body: - future_set_result_unless_cancelled(self.request.body, None) + future_set_result_unless_cancelled(self.request._body_future, None) else: - self.request.body = b''.join(self.chunks) + self.request.body = b"".join(self.chunks) self.request._parse_body() self.execute() - def on_connection_close(self): + def on_connection_close(self) -> None: if self.stream_request_body: self.handler.on_connection_close() else: - self.chunks = None + self.chunks = None # type: ignore - def execute(self): + def execute(self) -> Optional[Awaitable[None]]: # If template cache is disabled (usually in the debug mode), # re-compile templates and reload static files on every # request so you don't need to restart to see changes @@ -2208,11 +2306,12 @@ class _HandlerDelegate(httputil.HTTPMessageDelegate): with RequestHandler._template_loader_lock: for loader in RequestHandler._template_loaders.values(): loader.reset() - if not self.application.settings.get('static_hash_cache', True): + if not self.application.settings.get("static_hash_cache", True): StaticFileHandler.reset() - self.handler = self.handler_class(self.application, self.request, - **self.handler_kwargs) + self.handler = self.handler_class( + self.application, self.request, **self.handler_kwargs + ) transforms = [t(self.request) for t in self.application.transforms] if self.stream_request_body: @@ -2224,8 +2323,10 @@ class _HandlerDelegate(httputil.HTTPMessageDelegate): # except handler, and we cannot easily access the IOLoop here to # call add_future (because of the requirement to remain compatible # with WSGI) - self.handler._execute(transforms, *self.path_args, - **self.path_kwargs) + fut = gen.convert_yielded( + self.handler._execute(transforms, *self.path_args, **self.path_kwargs) + ) + fut.add_done_callback(lambda f: f.result()) # If we are streaming the request body, then execute() is finished # when the handler has prepared to receive the body. If not, # it doesn't matter when execute() finishes (so we return None) @@ -2254,18 +2355,22 @@ class HTTPError(Exception): determined automatically from ``status_code``, but can be used to use a non-standard numeric code. """ - def __init__(self, status_code=500, log_message=None, *args, **kwargs): + + def __init__( + self, status_code: int = 500, log_message: str = None, *args: Any, **kwargs: Any + ) -> None: self.status_code = status_code self.log_message = log_message self.args = args - self.reason = kwargs.get('reason', None) + self.reason = kwargs.get("reason", None) if log_message and not args: - self.log_message = log_message.replace('%', '%%') + self.log_message = log_message.replace("%", "%%") - def __str__(self): + def __str__(self) -> str: message = "HTTP %d: %s" % ( self.status_code, - self.reason or httputil.responses.get(self.status_code, 'Unknown')) + self.reason or httputil.responses.get(self.status_code, "Unknown"), + ) if self.log_message: return message + " (" + (self.log_message % self.args) + ")" else: @@ -2296,6 +2401,7 @@ class Finish(Exception): Arguments passed to ``Finish()`` will be passed on to `RequestHandler.finish`. """ + pass @@ -2307,21 +2413,24 @@ class MissingArgumentError(HTTPError): .. versionadded:: 3.1 """ - def __init__(self, arg_name): + + def __init__(self, arg_name: str) -> None: super(MissingArgumentError, self).__init__( - 400, 'Missing argument %s' % arg_name) + 400, "Missing argument %s" % arg_name + ) self.arg_name = arg_name class ErrorHandler(RequestHandler): """Generates an error response with ``status_code`` for all requests.""" - def initialize(self, status_code): + + def initialize(self, status_code: int) -> None: self.set_status(status_code) - def prepare(self): + def prepare(self) -> None: raise HTTPError(self._status_code) - def check_xsrf_cookie(self): + def check_xsrf_cookie(self) -> None: # POSTs to an ErrorHandler don't actually have side effects, # so we don't need to check the xsrf token. This allows POSTs # to the wrong url to return a 404 instead of 403. @@ -2360,15 +2469,19 @@ class RedirectHandler(RequestHandler): If any query arguments are present, they will be copied to the destination URL. """ - def initialize(self, url, permanent=True): + + def initialize(self, url: str, permanent: bool = True) -> None: self._url = url self._permanent = permanent - def get(self, *args): + def get(self, *args: Any) -> None: to_url = self._url.format(*args) if self.request.query_arguments: + # TODO: figure out typing for the next line. to_url = httputil.url_concat( - to_url, list(httputil.qs_to_qsl(self.request.query_arguments))) + to_url, + list(httputil.qs_to_qsl(self.request.query_arguments)), # type: ignore + ) self.redirect(to_url, permanent=self._permanent) @@ -2438,31 +2551,30 @@ class StaticFileHandler(RequestHandler): .. versionchanged:: 3.1 Many of the methods for subclasses were added in Tornado 3.1. """ + CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years - _static_hashes = {} # type: typing.Dict + _static_hashes = {} # type: Dict[str, Optional[str]] _lock = threading.Lock() # protects _static_hashes - def initialize(self, path, default_filename=None): + def initialize(self, path: str, default_filename: str = None) -> None: self.root = path self.default_filename = default_filename @classmethod - def reset(cls): + def reset(cls) -> None: with cls._lock: cls._static_hashes = {} - def head(self, path): + def head(self, path: str) -> Awaitable[None]: return self.get(path, include_body=False) - @gen.coroutine - def get(self, path, include_body=True): + async def get(self, path: str, include_body: bool = True) -> None: # Set up our path instance variables. self.path = self.parse_url_path(path) del path # make sure we don't refer to path instead of self.path again absolute_path = self.get_absolute_path(self.root, self.path) - self.absolute_path = self.validate_absolute_path( - self.root, absolute_path) + self.absolute_path = self.validate_absolute_path(self.root, absolute_path) if self.absolute_path is None: return @@ -2483,16 +2595,24 @@ class StaticFileHandler(RequestHandler): size = self.get_content_size() if request_range: start, end = request_range - if (start is not None and start >= size) or end == 0: - # As per RFC 2616 14.35.1, a range is not satisfiable only: if - # the first requested byte is equal to or greater than the - # content, or when a suffix with length 0 is specified - self.set_status(416) # Range Not Satisfiable - self.set_header("Content-Type", "text/plain") - self.set_header("Content-Range", "bytes */%s" % (size, )) - return if start is not None and start < 0: start += size + if start < 0: + start = 0 + if ( + start is not None + and (start >= size or (end is not None and start >= end)) + ) or end == 0: + # As per RFC 2616 14.35.1, a range is not satisfiable only: if + # the first requested byte is equal to or greater than the + # content, or when a suffix with length 0 is specified. + # https://tools.ietf.org/html/rfc7233#section-2.1 + # A byte-range-spec is invalid if the last-byte-pos value is present + # and less than the first-byte-pos. + self.set_status(416) # Range Not Satisfiable + self.set_header("Content-Type", "text/plain") + self.set_header("Content-Range", "bytes */%s" % (size,)) + return if end is not None and end > size: # Clients sometimes blindly use a large range to limit their # download size; cap the endpoint at the actual file size. @@ -2503,8 +2623,9 @@ class StaticFileHandler(RequestHandler): # ``Range: bytes=0-``. if size != (end or size) - (start or 0): self.set_status(206) # Partial Content - self.set_header("Content-Range", - httputil._get_content_range(start, end, size)) + self.set_header( + "Content-Range", httputil._get_content_range(start, end, size) + ) else: start = end = None @@ -2525,13 +2646,13 @@ class StaticFileHandler(RequestHandler): for chunk in content: try: self.write(chunk) - yield self.flush() + await self.flush() except iostream.StreamClosedError: return else: assert self.request.method == "HEAD" - def compute_etag(self): + def compute_etag(self) -> Optional[str]: """Sets the ``Etag`` header based on static url version. This allows efficient ``If-None-Match`` checks against cached @@ -2540,12 +2661,13 @@ class StaticFileHandler(RequestHandler): .. versionadded:: 3.1 """ + assert self.absolute_path is not None version_hash = self._get_cached_version(self.absolute_path) if not version_hash: return None - return '"%s"' % (version_hash, ) + return '"%s"' % (version_hash,) - def set_headers(self): + def set_headers(self) -> None: """Sets the content and caching headers on the response. .. versionadded:: 3.1 @@ -2560,22 +2682,23 @@ class StaticFileHandler(RequestHandler): if content_type: self.set_header("Content-Type", content_type) - cache_time = self.get_cache_time(self.path, self.modified, - content_type) + cache_time = self.get_cache_time(self.path, self.modified, content_type) if cache_time > 0: - self.set_header("Expires", datetime.datetime.utcnow() + - datetime.timedelta(seconds=cache_time)) + self.set_header( + "Expires", + datetime.datetime.utcnow() + datetime.timedelta(seconds=cache_time), + ) self.set_header("Cache-Control", "max-age=" + str(cache_time)) self.set_extra_headers(self.path) - def should_return_304(self): + def should_return_304(self) -> bool: """Returns True if the headers indicate that we should return 304. .. versionadded:: 3.1 """ # If client sent If-None-Match, use it, ignore If-Modified-Since - if self.request.headers.get('If-None-Match'): + if self.request.headers.get("If-None-Match"): return self.check_etag_header() # Check the If-Modified-Since, and don't send the result if the @@ -2585,13 +2708,14 @@ class StaticFileHandler(RequestHandler): date_tuple = email.utils.parsedate(ims_value) if date_tuple is not None: if_since = datetime.datetime(*date_tuple[:6]) + assert self.modified is not None if if_since >= self.modified: return True return False @classmethod - def get_absolute_path(cls, root, path): + def get_absolute_path(cls, root: str, path: str) -> str: """Returns the absolute location of ``path`` relative to ``root``. ``root`` is the path configured for this `StaticFileHandler` @@ -2607,7 +2731,7 @@ class StaticFileHandler(RequestHandler): abspath = os.path.abspath(os.path.join(root, path)) return abspath - def validate_absolute_path(self, root, absolute_path): + def validate_absolute_path(self, root: str, absolute_path: str) -> Optional[str]: """Validate and return the absolute path. ``root`` is the configured path for the `StaticFileHandler`, @@ -2642,16 +2766,14 @@ class StaticFileHandler(RequestHandler): # The trailing slash also needs to be temporarily added back # the requested path so a request to root/ will match. if not (absolute_path + os.path.sep).startswith(root): - raise HTTPError(403, "%s is not in root static directory", - self.path) - if (os.path.isdir(absolute_path) and - self.default_filename is not None): + raise HTTPError(403, "%s is not in root static directory", self.path) + if os.path.isdir(absolute_path) and self.default_filename is not None: # need to look at the request.path here for when path is empty # but there is some prefix to the path that was already # trimmed by the routing if not self.request.path.endswith("/"): self.redirect(self.request.path + "/", permanent=True) - return + return None absolute_path = os.path.join(absolute_path, self.default_filename) if not os.path.exists(absolute_path): raise HTTPError(404) @@ -2660,7 +2782,9 @@ class StaticFileHandler(RequestHandler): return absolute_path @classmethod - def get_content(cls, abspath, start=None, end=None): + def get_content( + cls, abspath: str, start: int = None, end: int = None + ) -> Generator[bytes, None, None]: """Retrieve the content of the requested resource which is located at the given absolute path. @@ -2679,7 +2803,7 @@ class StaticFileHandler(RequestHandler): if start is not None: file.seek(start) if end is not None: - remaining = end - (start or 0) + remaining = end - (start or 0) # type: Optional[int] else: remaining = None while True: @@ -2697,7 +2821,7 @@ class StaticFileHandler(RequestHandler): return @classmethod - def get_content_version(cls, abspath): + def get_content_version(cls, abspath: str) -> str: """Returns a version string for the resource at the given path. This class method may be overridden by subclasses. The @@ -2714,12 +2838,13 @@ class StaticFileHandler(RequestHandler): hasher.update(chunk) return hasher.hexdigest() - def _stat(self): - if not hasattr(self, '_stat_result'): + def _stat(self) -> os.stat_result: + assert self.absolute_path is not None + if not hasattr(self, "_stat_result"): self._stat_result = os.stat(self.absolute_path) return self._stat_result - def get_content_size(self): + def get_content_size(self) -> int: """Retrieve the total size of the resource at the given path. This method may be overridden by subclasses. @@ -2731,9 +2856,9 @@ class StaticFileHandler(RequestHandler): partial results are requested. """ stat_result = self._stat() - return stat_result[stat.ST_SIZE] + return stat_result.st_size - def get_modified_time(self): + def get_modified_time(self) -> Optional[datetime.datetime]: """Returns the time that ``self.absolute_path`` was last modified. May be overridden in subclasses. Should return a `~datetime.datetime` @@ -2742,15 +2867,23 @@ class StaticFileHandler(RequestHandler): .. versionadded:: 3.1 """ stat_result = self._stat() - modified = datetime.datetime.utcfromtimestamp( - stat_result[stat.ST_MTIME]) + # NOTE: Historically, this used stat_result[stat.ST_MTIME], + # which truncates the fractional portion of the timestamp. It + # was changed from that form to stat_result.st_mtime to + # satisfy mypy (which disallows the bracket operator), but the + # latter form returns a float instead of an int. For + # consistency with the past (and because we have a unit test + # that relies on this), we truncate the float here, although + # I'm not sure that's the right thing to do. + modified = datetime.datetime.utcfromtimestamp(int(stat_result.st_mtime)) return modified - def get_content_type(self): + def get_content_type(self) -> str: """Returns the ``Content-Type`` header to be used for this request. .. versionadded:: 3.1 """ + assert self.absolute_path is not None mime_type, encoding = mimetypes.guess_type(self.absolute_path) # per RFC 6713, use the appropriate type for a gzip compressed file if encoding == "gzip": @@ -2766,11 +2899,13 @@ class StaticFileHandler(RequestHandler): else: return "application/octet-stream" - def set_extra_headers(self, path): + def set_extra_headers(self, path: str) -> None: """For subclass to add extra headers to the response""" pass - def get_cache_time(self, path, modified, mime_type): + def get_cache_time( + self, path: str, modified: Optional[datetime.datetime], mime_type: str + ) -> int: """Override to customize cache control behavior. Return a positive number of seconds to make the result @@ -2784,7 +2919,9 @@ class StaticFileHandler(RequestHandler): return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0 @classmethod - def make_static_url(cls, settings, path, include_version=True): + def make_static_url( + cls, settings: Dict[str, Any], path: str, include_version: bool = True + ) -> str: """Constructs a versioned url for the given path. This method may be overridden in subclasses (but note that it @@ -2803,7 +2940,7 @@ class StaticFileHandler(RequestHandler): file corresponding to the given ``path``. """ - url = settings.get('static_url_prefix', '/static/') + path + url = settings.get("static_url_prefix", "/static/") + path if not include_version: return url @@ -2811,9 +2948,9 @@ class StaticFileHandler(RequestHandler): if not version_hash: return url - return '%s?v=%s' % (url, version_hash) + return "%s?v=%s" % (url, version_hash) - def parse_url_path(self, url_path): + def parse_url_path(self, url_path: str) -> str: """Converts a static URL path into a filesystem path. ``url_path`` is the path component of the URL with @@ -2827,7 +2964,7 @@ class StaticFileHandler(RequestHandler): return url_path @classmethod - def get_version(cls, settings, path): + def get_version(cls, settings: Dict[str, Any], path: str) -> Optional[str]: """Generate the version string to be used in static URLs. ``settings`` is the `Application.settings` dictionary and ``path`` @@ -2840,11 +2977,11 @@ class StaticFileHandler(RequestHandler): `get_content_version` is now preferred as it allows the base class to handle caching of the result. """ - abs_path = cls.get_absolute_path(settings['static_path'], path) + abs_path = cls.get_absolute_path(settings["static_path"], path) return cls._get_cached_version(abs_path) @classmethod - def _get_cached_version(cls, abs_path): + def _get_cached_version(cls, abs_path: str) -> Optional[str]: with cls._lock: hashes = cls._static_hashes if abs_path not in hashes: @@ -2875,10 +3012,13 @@ class FallbackHandler(RequestHandler): (r".*", FallbackHandler, dict(fallback=wsgi_app), ]) """ - def initialize(self, fallback): + + def initialize( + self, fallback: Callable[[httputil.HTTPServerRequest], None] + ) -> None: self.fallback = fallback - def prepare(self): + def prepare(self) -> None: self.fallback(self.request) self._finished = True self.on_finish() @@ -2891,14 +3031,20 @@ class OutputTransform(object): or interact with them directly; the framework chooses which transforms (if any) to apply. """ - def __init__(self, request): + + def __init__(self, request: httputil.HTTPServerRequest) -> None: pass - def transform_first_chunk(self, status_code, headers, chunk, finishing): - # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # noqa: E501 + def transform_first_chunk( + self, + status_code: int, + headers: httputil.HTTPHeaders, + chunk: bytes, + finishing: bool, + ) -> Tuple[int, httputil.HTTPHeaders, bytes]: return status_code, headers, chunk - def transform_chunk(self, chunk, finishing): + def transform_chunk(self, chunk: bytes, finishing: bool) -> bytes: return chunk @@ -2912,12 +3058,20 @@ class GZipContentEncoding(OutputTransform): of just a whitelist. (the whitelist is still used for certain non-text mime types). """ + # Whitelist of compressible mime types (in addition to any types # beginning with "text/"). - CONTENT_TYPES = set(["application/javascript", "application/x-javascript", - "application/xml", "application/atom+xml", - "application/json", "application/xhtml+xml", - "image/svg+xml"]) + CONTENT_TYPES = set( + [ + "application/javascript", + "application/x-javascript", + "application/xml", + "application/atom+xml", + "application/json", + "application/xhtml+xml", + "image/svg+xml", + ] + ) # Python's GzipFile defaults to level 9, while most other gzip # tools (including gzip itself) default to 6, which is probably a # better CPU/size tradeoff. @@ -2929,29 +3083,37 @@ class GZipContentEncoding(OutputTransform): # regardless of size. MIN_LENGTH = 1024 - def __init__(self, request): + def __init__(self, request: httputil.HTTPServerRequest) -> None: self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "") - def _compressible_type(self, ctype): - return ctype.startswith('text/') or ctype in self.CONTENT_TYPES + def _compressible_type(self, ctype: str) -> bool: + return ctype.startswith("text/") or ctype in self.CONTENT_TYPES - def transform_first_chunk(self, status_code, headers, chunk, finishing): - # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # noqa: E501 + def transform_first_chunk( + self, + status_code: int, + headers: httputil.HTTPHeaders, + chunk: bytes, + finishing: bool, + ) -> Tuple[int, httputil.HTTPHeaders, bytes]: # TODO: can/should this type be inherited from the superclass? - if 'Vary' in headers: - headers['Vary'] += ', Accept-Encoding' + if "Vary" in headers: + headers["Vary"] += ", Accept-Encoding" else: - headers['Vary'] = 'Accept-Encoding' + headers["Vary"] = "Accept-Encoding" if self._gzipping: ctype = _unicode(headers.get("Content-Type", "")).split(";")[0] - self._gzipping = self._compressible_type(ctype) and \ - (not finishing or len(chunk) >= self.MIN_LENGTH) and \ - ("Content-Encoding" not in headers) + self._gzipping = ( + self._compressible_type(ctype) + and (not finishing or len(chunk) >= self.MIN_LENGTH) + and ("Content-Encoding" not in headers) + ) if self._gzipping: headers["Content-Encoding"] = "gzip" self._gzip_value = BytesIO() - self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value, - compresslevel=self.GZIP_LEVEL) + self._gzip_file = gzip.GzipFile( + mode="w", fileobj=self._gzip_value, compresslevel=self.GZIP_LEVEL + ) chunk = self.transform_chunk(chunk, finishing) if "Content-Length" in headers: # The original content length is no longer correct. @@ -2964,7 +3126,7 @@ class GZipContentEncoding(OutputTransform): del headers["Content-Length"] return status_code, headers, chunk - def transform_chunk(self, chunk, finishing): + def transform_chunk(self, chunk: bytes, finishing: bool) -> bytes: if self._gzipping: self._gzip_file.write(chunk) if finishing: @@ -2977,7 +3139,9 @@ class GZipContentEncoding(OutputTransform): return chunk -def authenticated(method): +def authenticated( + method: Callable[..., Optional[Awaitable[None]]] +) -> Callable[..., Optional[Awaitable[None]]]: """Decorate methods with this to require that the user be logged in. If the user is not logged in, they will be redirected to the configured @@ -2988,22 +3152,27 @@ def authenticated(method): will add a `next` parameter so the login page knows where to send you once you're logged in. """ + @functools.wraps(method) - def wrapper(self, *args, **kwargs): + def wrapper( # type: ignore + self: RequestHandler, *args, **kwargs + ) -> Optional[Awaitable[None]]: if not self.current_user: if self.request.method in ("GET", "HEAD"): url = self.get_login_url() if "?" not in url: - if urlparse.urlsplit(url).scheme: + if urllib.parse.urlsplit(url).scheme: # if login url is absolute, make next absolute too next_url = self.request.full_url() else: + assert self.request.uri is not None next_url = self.request.uri url += "?" + urlencode(dict(next=next_url)) self.redirect(url) - return + return None raise HTTPError(403) return method(self, *args, **kwargs) + return wrapper @@ -3016,26 +3185,27 @@ class UIModule(object): Subclasses of UIModule must override the `render` method. """ - def __init__(self, handler): + + def __init__(self, handler: RequestHandler) -> None: self.handler = handler self.request = handler.request self.ui = handler.ui self.locale = handler.locale @property - def current_user(self): + def current_user(self) -> Any: return self.handler.current_user - def render(self, *args, **kwargs): + def render(self, *args: Any, **kwargs: Any) -> str: """Override in subclasses to return this module's output.""" raise NotImplementedError() - def embedded_javascript(self): + def embedded_javascript(self) -> Optional[str]: """Override to return a JavaScript string to be embedded in the page.""" return None - def javascript_files(self): + def javascript_files(self) -> Optional[Iterable[str]]: """Override to return a list of JavaScript files needed by this module. If the return values are relative paths, they will be passed to @@ -3043,12 +3213,12 @@ class UIModule(object): """ return None - def embedded_css(self): + def embedded_css(self) -> Optional[str]: """Override to return a CSS string that will be embedded in the page.""" return None - def css_files(self): + def css_files(self) -> Optional[Iterable[str]]: """Override to returns a list of CSS files required by this module. If the return values are relative paths, they will be passed to @@ -3056,30 +3226,30 @@ class UIModule(object): """ return None - def html_head(self): + def html_head(self) -> Optional[str]: """Override to return an HTML string that will be put in the element. """ return None - def html_body(self): + def html_body(self) -> Optional[str]: """Override to return an HTML string that will be put at the end of the element. """ return None - def render_string(self, path, **kwargs): + def render_string(self, path: str, **kwargs: Any) -> bytes: """Renders a template and returns it as a string.""" return self.handler.render_string(path, **kwargs) class _linkify(UIModule): - def render(self, text, **kwargs): + def render(self, text: str, **kwargs: Any) -> str: # type: ignore return escape.linkify(text, **kwargs) class _xsrf_form_html(UIModule): - def render(self): + def render(self) -> str: # type: ignore return self.handler.xsrf_form_html() @@ -3098,32 +3268,35 @@ class TemplateModule(UIModule): per instantiation of the template, so they must not depend on any arguments to the template. """ - def __init__(self, handler): + + def __init__(self, handler: RequestHandler) -> None: super(TemplateModule, self).__init__(handler) # keep resources in both a list and a dict to preserve order - self._resource_list = [] - self._resource_dict = {} + self._resource_list = [] # type: List[Dict[str, Any]] + self._resource_dict = {} # type: Dict[str, Dict[str, Any]] - def render(self, path, **kwargs): - def set_resources(**kwargs): + def render(self, path: str, **kwargs: Any) -> bytes: # type: ignore + def set_resources(**kwargs) -> str: # type: ignore if path not in self._resource_dict: self._resource_list.append(kwargs) self._resource_dict[path] = kwargs else: if self._resource_dict[path] != kwargs: - raise ValueError("set_resources called with different " - "resources for the same template") + raise ValueError( + "set_resources called with different " + "resources for the same template" + ) return "" - return self.render_string(path, set_resources=set_resources, - **kwargs) - def _get_resources(self, key): + return self.render_string(path, set_resources=set_resources, **kwargs) + + def _get_resources(self, key: str) -> Iterable[str]: return (r[key] for r in self._resource_list if key in r) - def embedded_javascript(self): + def embedded_javascript(self) -> str: return "\n".join(self._get_resources("embedded_javascript")) - def javascript_files(self): + def javascript_files(self) -> Iterable[str]: result = [] for f in self._get_resources("javascript_files"): if isinstance(f, (unicode_type, bytes)): @@ -3132,10 +3305,10 @@ class TemplateModule(UIModule): result.extend(f) return result - def embedded_css(self): + def embedded_css(self) -> str: return "\n".join(self._get_resources("embedded_css")) - def css_files(self): + def css_files(self) -> Iterable[str]: result = [] for f in self._get_resources("css_files"): if isinstance(f, (unicode_type, bytes)): @@ -3144,47 +3317,40 @@ class TemplateModule(UIModule): result.extend(f) return result - def html_head(self): + def html_head(self) -> str: return "".join(self._get_resources("html_head")) - def html_body(self): + def html_body(self) -> str: return "".join(self._get_resources("html_body")) class _UIModuleNamespace(object): """Lazy namespace which creates UIModule proxies bound to a handler.""" - def __init__(self, handler, ui_modules): + + def __init__( + self, handler: RequestHandler, ui_modules: Dict[str, Type[UIModule]] + ) -> None: self.handler = handler self.ui_modules = ui_modules - def __getitem__(self, key): + def __getitem__(self, key: str) -> Callable[..., str]: return self.handler._ui_module(key, self.ui_modules[key]) - def __getattr__(self, key): + def __getattr__(self, key: str) -> Callable[..., str]: try: return self[key] except KeyError as e: raise AttributeError(str(e)) -if hasattr(hmac, 'compare_digest'): # python 3.3 - _time_independent_equals = hmac.compare_digest -else: - def _time_independent_equals(a, b): - if len(a) != len(b): - return False - result = 0 - if isinstance(a[0], int): # python3 byte strings - for x, y in zip(a, b): - result |= x ^ y - else: # python2 - for x, y in zip(a, b): - result |= ord(x) ^ ord(y) - return result == 0 - - -def create_signed_value(secret, name, value, version=None, clock=None, - key_version=None): +def create_signed_value( + secret: _CookieSecretTypes, + name: str, + value: Union[str, bytes], + version: int = None, + clock: Callable[[], float] = None, + key_version: int = None, +) -> bytes: if version is None: version = DEFAULT_SIGNED_VALUE_VERSION if clock is None: @@ -3193,6 +3359,7 @@ def create_signed_value(secret, name, value, version=None, clock=None, timestamp = utf8(str(int(clock()))) value = base64.b64encode(utf8(value)) if version == 1: + assert not isinstance(secret, dict) signature = _create_signature_v1(secret, name, value, timestamp) value = b"|".join([value, timestamp, signature]) return value @@ -3211,19 +3378,25 @@ def create_signed_value(secret, name, value, version=None, clock=None, # - name (not encoded; assumed to be ~alphanumeric) # - value (base64-encoded) # - signature (hex-encoded; no length prefix) - def format_field(s): + def format_field(s: Union[str, bytes]) -> bytes: return utf8("%d:" % len(s)) + utf8(s) - to_sign = b"|".join([ - b"2", - format_field(str(key_version or 0)), - format_field(timestamp), - format_field(name), - format_field(value), - b'']) + + to_sign = b"|".join( + [ + b"2", + format_field(str(key_version or 0)), + format_field(timestamp), + format_field(name), + format_field(value), + b"", + ] + ) if isinstance(secret, dict): - assert key_version is not None, 'Key version must be set when sign key dict is used' - assert version >= 2, 'Version must be at least 2 for key version support' + assert ( + key_version is not None + ), "Key version must be set when sign key dict is used" + assert version >= 2, "Version must be at least 2 for key version support" secret = secret[key_version] signature = _create_signature_v2(secret, to_sign) @@ -3237,7 +3410,7 @@ def create_signed_value(secret, name, value, version=None, clock=None, _signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$") -def _get_version(value): +def _get_version(value: bytes) -> int: # Figures out what version value is. Version 1 did not include an # explicit version field and started with arbitrary base64 data, # which makes this tricky. @@ -3260,8 +3433,14 @@ def _get_version(value): return version -def decode_signed_value(secret, name, value, max_age_days=31, - clock=None, min_version=None): +def decode_signed_value( + secret: _CookieSecretTypes, + name: str, + value: Union[None, str, bytes], + max_age_days: int = 31, + clock: Callable[[], float] = None, + min_version: int = None, +) -> Optional[bytes]: if clock is None: clock = time.time if min_version is None: @@ -3277,21 +3456,26 @@ def decode_signed_value(secret, name, value, max_age_days=31, if version < min_version: return None if version == 1: - return _decode_signed_value_v1(secret, name, value, - max_age_days, clock) + assert not isinstance(secret, dict) + return _decode_signed_value_v1(secret, name, value, max_age_days, clock) elif version == 2: - return _decode_signed_value_v2(secret, name, value, - max_age_days, clock) + return _decode_signed_value_v2(secret, name, value, max_age_days, clock) else: return None -def _decode_signed_value_v1(secret, name, value, max_age_days, clock): +def _decode_signed_value_v1( + secret: Union[str, bytes], + name: str, + value: bytes, + max_age_days: int, + clock: Callable[[], float], +) -> Optional[bytes]: parts = utf8(value).split(b"|") if len(parts) != 3: return None signature = _create_signature_v1(secret, name, parts[0], parts[1]) - if not _time_independent_equals(parts[2], signature): + if not hmac.compare_digest(parts[2], signature): gen_log.warning("Invalid cookie signature %r", value) return None timestamp = int(parts[1]) @@ -3304,8 +3488,7 @@ def _decode_signed_value_v1(secret, name, value, max_age_days, clock): # digits from the payload to the timestamp without altering the # signature. For backwards compatibility, sanity-check timestamp # here instead of modifying _cookie_signature. - gen_log.warning("Cookie timestamp in future; possible tampering %r", - value) + gen_log.warning("Cookie timestamp in future; possible tampering %r", value) return None if parts[1].startswith(b"0"): gen_log.warning("Tampered cookie %r", value) @@ -3316,16 +3499,16 @@ def _decode_signed_value_v1(secret, name, value, max_age_days, clock): return None -def _decode_fields_v2(value): - def _consume_field(s): - length, _, rest = s.partition(b':') +def _decode_fields_v2(value: bytes) -> Tuple[int, bytes, bytes, bytes, bytes]: + def _consume_field(s: bytes) -> Tuple[bytes, bytes]: + length, _, rest = s.partition(b":") n = int(length) field_value = rest[:n] # In python 3, indexing bytes returns small integers; we must # use a slice to get a byte string as in python 2. - if rest[n:n + 1] != b'|': + if rest[n : n + 1] != b"|": raise ValueError("malformed v2 signed value field") - rest = rest[n + 1:] + rest = rest[n + 1 :] return field_value, rest rest = value[2:] # remove version number @@ -3336,12 +3519,20 @@ def _decode_fields_v2(value): return int(key_version), timestamp, name_field, value_field, passed_sig -def _decode_signed_value_v2(secret, name, value, max_age_days, clock): +def _decode_signed_value_v2( + secret: _CookieSecretTypes, + name: str, + value: bytes, + max_age_days: int, + clock: Callable[[], float], +) -> Optional[bytes]: try: - key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value) + key_version, timestamp_bytes, name_field, value_field, passed_sig = _decode_fields_v2( + value + ) except ValueError: return None - signed_string = value[:-len(passed_sig)] + signed_string = value[: -len(passed_sig)] if isinstance(secret, dict): try: @@ -3350,11 +3541,11 @@ def _decode_signed_value_v2(secret, name, value, max_age_days, clock): return None expected_sig = _create_signature_v2(secret, signed_string) - if not _time_independent_equals(passed_sig, expected_sig): + if not hmac.compare_digest(passed_sig, expected_sig): return None if name_field != utf8(name): return None - timestamp = int(timestamp) + timestamp = int(timestamp_bytes) if timestamp < clock() - max_age_days * 86400: # The signature has expired. return None @@ -3364,7 +3555,7 @@ def _decode_signed_value_v2(secret, name, value, max_age_days, clock): return None -def get_signature_key_version(value): +def get_signature_key_version(value: Union[str, bytes]) -> Optional[int]: value = utf8(value) version = _get_version(value) if version < 2: @@ -3377,18 +3568,18 @@ def get_signature_key_version(value): return key_version -def _create_signature_v1(secret, *parts): +def _create_signature_v1(secret: Union[str, bytes], *parts: Union[str, bytes]) -> bytes: hash = hmac.new(utf8(secret), digestmod=hashlib.sha1) for part in parts: hash.update(utf8(part)) return utf8(hash.hexdigest()) -def _create_signature_v2(secret, s): +def _create_signature_v2(secret: Union[str, bytes], s: bytes) -> bytes: hash = hmac.new(utf8(secret), digestmod=hashlib.sha256) hash.update(utf8(s)) return utf8(hash.hexdigest()) -def is_absolute(path): +def is_absolute(path: str) -> bool: return any(path.startswith(x) for x in ["/", "http:", "https:"]) diff --git a/server/www/packages/packages-linux/x64/tornado/websocket.py b/server/www/packages/packages-linux/x64/tornado/websocket.py index 0b994fc..d991fee 100644 --- a/server/www/packages/packages-linux/x64/tornado/websocket.py +++ b/server/www/packages/packages-linux/x64/tornado/websocket.py @@ -16,8 +16,8 @@ the protocol (known as "draft 76") and are not compatible with this module. Removed support for the draft 76 protocol version. """ -from __future__ import absolute_import, division, print_function - +import abc +import asyncio import base64 import hashlib import os @@ -25,24 +25,79 @@ import sys import struct import tornado.escape import tornado.web +from urllib.parse import urlparse import zlib from tornado.concurrent import Future, future_set_result_unless_cancelled from tornado.escape import utf8, native_str, to_unicode from tornado import gen, httpclient, httputil from tornado.ioloop import IOLoop, PeriodicCallback -from tornado.iostream import StreamClosedError -from tornado.log import gen_log +from tornado.iostream import StreamClosedError, IOStream +from tornado.log import gen_log, app_log from tornado import simple_httpclient from tornado.queues import Queue from tornado.tcpclient import TCPClient -from tornado.util import _websocket_mask, PY3 +from tornado.util import _websocket_mask + +from typing import ( + TYPE_CHECKING, + cast, + Any, + Optional, + Dict, + Union, + List, + Awaitable, + Callable, + Tuple, + Type, +) +from types import TracebackType + +if TYPE_CHECKING: + from typing_extensions import Protocol + + # The zlib compressor types aren't actually exposed anywhere + # publicly, so declare protocols for the portions we use. + class _Compressor(Protocol): + def compress(self, data: bytes) -> bytes: + pass + + def flush(self, mode: int) -> bytes: + pass + + class _Decompressor(Protocol): + unconsumed_tail = b"" # type: bytes + + def decompress(self, data: bytes, max_length: int) -> bytes: + pass + + class _WebSocketDelegate(Protocol): + # The common base interface implemented by WebSocketHandler on + # the server side and WebSocketClientConnection on the client + # side. + def on_ws_connection_close( + self, close_code: int = None, close_reason: str = None + ) -> None: + pass + + def on_message(self, message: Union[str, bytes]) -> Optional["Awaitable[None]"]: + pass + + def on_ping(self, data: bytes) -> None: + pass + + def on_pong(self, data: bytes) -> None: + pass + + def log_exception( + self, + typ: Optional[Type[BaseException]], + value: Optional[BaseException], + tb: Optional[TracebackType], + ) -> None: + pass -if PY3: - from urllib.parse import urlparse # py2 - xrange = range -else: - from urlparse import urlparse # py3 _default_max_message_size = 10 * 1024 * 1024 @@ -56,6 +111,7 @@ class WebSocketClosedError(WebSocketError): .. versionadded:: 3.2 """ + pass @@ -63,6 +119,20 @@ class _DecompressTooLargeError(Exception): pass +class _WebSocketParams(object): + def __init__( + self, + ping_interval: float = None, + ping_timeout: float = None, + max_message_size: int = _default_max_message_size, + compression_options: Dict[str, Any] = None, + ) -> None: + self.ping_interval = ping_interval + self.ping_timeout = ping_timeout + self.max_message_size = max_message_size + self.compression_options = compression_options + + class WebSocketHandler(tornado.web.RequestHandler): """Subclass this class to create a basic WebSocket handler. @@ -144,22 +214,28 @@ class WebSocketHandler(tornado.web.RequestHandler): Added ``websocket_ping_interval``, ``websocket_ping_timeout``, and ``websocket_max_message_size``. """ - def __init__(self, application, request, **kwargs): + + def __init__( + self, + application: tornado.web.Application, + request: httputil.HTTPServerRequest, + **kwargs: Any + ) -> None: super(WebSocketHandler, self).__init__(application, request, **kwargs) - self.ws_connection = None - self.close_code = None - self.close_reason = None - self.stream = None + self.ws_connection = None # type: Optional[WebSocketProtocol] + self.close_code = None # type: Optional[int] + self.close_reason = None # type: Optional[str] + self.stream = None # type: Optional[IOStream] self._on_close_called = False - def get(self, *args, **kwargs): + async def get(self, *args: Any, **kwargs: Any) -> None: self.open_args = args self.open_kwargs = kwargs # Upgrade header should be present and should be equal to WebSocket - if self.request.headers.get("Upgrade", "").lower() != 'websocket': + if self.request.headers.get("Upgrade", "").lower() != "websocket": self.set_status(400) - log_msg = "Can \"Upgrade\" only to \"WebSocket\"." + log_msg = 'Can "Upgrade" only to "WebSocket".' self.finish(log_msg) gen_log.debug(log_msg) return @@ -168,11 +244,12 @@ class WebSocketHandler(tornado.web.RequestHandler): # Some proxy servers/load balancers # might mess with it. headers = self.request.headers - connection = map(lambda s: s.strip().lower(), - headers.get("Connection", "").split(",")) - if 'upgrade' not in connection: + connection = map( + lambda s: s.strip().lower(), headers.get("Connection", "").split(",") + ) + if "upgrade" not in connection: self.set_status(400) - log_msg = "\"Connection\" must be \"Upgrade\"." + log_msg = '"Connection" must be "Upgrade".' self.finish(log_msg) gen_log.debug(log_msg) return @@ -198,32 +275,31 @@ class WebSocketHandler(tornado.web.RequestHandler): self.ws_connection = self.get_websocket_protocol() if self.ws_connection: - self.ws_connection.accept_connection() + await self.ws_connection.accept_connection(self) else: self.set_status(426, "Upgrade Required") self.set_header("Sec-WebSocket-Version", "7, 8, 13") - self.finish() stream = None @property - def ping_interval(self): + def ping_interval(self) -> Optional[float]: """The interval for websocket keep-alive pings. Set websocket_ping_interval = 0 to disable pings. """ - return self.settings.get('websocket_ping_interval', None) + return self.settings.get("websocket_ping_interval", None) @property - def ping_timeout(self): + def ping_timeout(self) -> Optional[float]: """If no ping is received in this many seconds, close the websocket connection (VPNs, etc. can fail to cleanly close ws connections). Default is max of 3 pings or 30 seconds. """ - return self.settings.get('websocket_ping_timeout', None) + return self.settings.get("websocket_ping_timeout", None) @property - def max_message_size(self): + def max_message_size(self) -> int: """Maximum allowed message size. If the remote peer sends a message larger than this, the connection @@ -231,9 +307,13 @@ class WebSocketHandler(tornado.web.RequestHandler): Default is 10MiB. """ - return self.settings.get('websocket_max_message_size', _default_max_message_size) + return self.settings.get( + "websocket_max_message_size", _default_max_message_size + ) - def write_message(self, message, binary=False): + def write_message( + self, message: Union[bytes, str, Dict[str, Any]], binary: bool = False + ) -> "Future[None]": """Sends the given message to the client of this Web Socket. The message may be either a string or a dict (which will be @@ -255,13 +335,13 @@ class WebSocketHandler(tornado.web.RequestHandler): Consistently raises `WebSocketClosedError`. Previously could sometimes raise `.StreamClosedError`. """ - if self.ws_connection is None: + if self.ws_connection is None or self.ws_connection.is_closing(): raise WebSocketClosedError() if isinstance(message, dict): message = tornado.escape.json_encode(message) return self.ws_connection.write_message(message, binary=binary) - def select_subprotocol(self, subprotocols): + def select_subprotocol(self, subprotocols: List[str]) -> Optional[str]: """Override to implement subprotocol negotiation. ``subprotocols`` is a list of strings identifying the @@ -287,14 +367,15 @@ class WebSocketHandler(tornado.web.RequestHandler): return None @property - def selected_subprotocol(self): + def selected_subprotocol(self) -> Optional[str]: """The subprotocol returned by `select_subprotocol`. .. versionadded:: 5.1 """ + assert self.ws_connection is not None return self.ws_connection.selected_subprotocol - def get_compression_options(self): + def get_compression_options(self) -> Optional[Dict[str, Any]]: """Override to return compression options for the connection. If this method returns None (the default), compression will @@ -318,7 +399,7 @@ class WebSocketHandler(tornado.web.RequestHandler): # TODO: Add wbits option. return None - def open(self, *args, **kwargs): + def open(self, *args: str, **kwargs: str) -> Optional[Awaitable[None]]: """Invoked when a new WebSocket is opened. The arguments to `open` are extracted from the `tornado.web.URLSpec` @@ -334,7 +415,7 @@ class WebSocketHandler(tornado.web.RequestHandler): """ pass - def on_message(self, message): + def on_message(self, message: Union[str, bytes]) -> Optional[Awaitable[None]]: """Handle incoming messages on the WebSocket This method must be overridden. @@ -345,7 +426,7 @@ class WebSocketHandler(tornado.web.RequestHandler): """ raise NotImplementedError - def ping(self, data=b''): + def ping(self, data: Union[str, bytes] = b"") -> None: """Send ping frame to the remote end. The data argument allows a small amount of data (up to 125 @@ -362,19 +443,19 @@ class WebSocketHandler(tornado.web.RequestHandler): """ data = utf8(data) - if self.ws_connection is None: + if self.ws_connection is None or self.ws_connection.is_closing(): raise WebSocketClosedError() self.ws_connection.write_ping(data) - def on_pong(self, data): + def on_pong(self, data: bytes) -> None: """Invoked when the response to a ping frame is received.""" pass - def on_ping(self, data): + def on_ping(self, data: bytes) -> None: """Invoked when the a ping frame is received.""" pass - def on_close(self): + def on_close(self) -> None: """Invoked when the WebSocket is closed. If the connection was closed cleanly and a status code or reason @@ -387,7 +468,7 @@ class WebSocketHandler(tornado.web.RequestHandler): """ pass - def close(self, code=None, reason=None): + def close(self, code: int = None, reason: str = None) -> None: """Closes this Web Socket. Once the close handshake is successful the socket will be closed. @@ -407,7 +488,7 @@ class WebSocketHandler(tornado.web.RequestHandler): self.ws_connection.close(code, reason) self.ws_connection = None - def check_origin(self, origin): + def check_origin(self, origin: str) -> bool: """Override to enable support for allowing alternate origins. The ``origin`` argument is the value of the ``Origin`` HTTP @@ -417,9 +498,9 @@ class WebSocketHandler(tornado.web.RequestHandler): implement WebSockets support this header, and non-browser clients do not have the same cross-site security concerns). - Should return True to accept the request or False to reject it. - By default, rejects all requests with an origin on a host other - than this one. + Should return ``True`` to accept the request or ``False`` to + reject it. By default, rejects all requests with an origin on + a host other than this one. This is a security protection against cross site scripting attacks on browsers, since WebSockets are allowed to bypass the usual same-origin @@ -439,7 +520,7 @@ class WebSocketHandler(tornado.web.RequestHandler): for more. To accept all cross-origin traffic (which was the default prior to - Tornado 4.0), simply override this method to always return true:: + Tornado 4.0), simply override this method to always return ``True``:: def check_origin(self, origin): return True @@ -463,7 +544,7 @@ class WebSocketHandler(tornado.web.RequestHandler): # Check to see that origin matches host directly, including ports return origin == host - def set_nodelay(self, value): + def set_nodelay(self, value: bool) -> None: """Set the no-delay flag for this stream. By default, small messages may be delayed and/or combined to minimize @@ -477,9 +558,10 @@ class WebSocketHandler(tornado.web.RequestHandler): .. versionadded:: 3.1 """ - self.stream.set_nodelay(value) + assert self.ws_connection is not None + self.ws_connection.set_nodelay(value) - def on_connection_close(self): + def on_connection_close(self) -> None: if self.ws_connection: self.ws_connection.on_connection_close() self.ws_connection = None @@ -488,7 +570,14 @@ class WebSocketHandler(tornado.web.RequestHandler): self.on_close() self._break_cycles() - def _break_cycles(self): + def on_ws_connection_close( + self, close_code: int = None, close_reason: str = None + ) -> None: + self.close_code = close_code + self.close_reason = close_reason + self.on_connection_close() + + def _break_cycles(self) -> None: # WebSocketHandlers call finish() early, but we don't want to # break up reference cycles (which makes it impossible to call # self.render_string) until after we've really closed the @@ -497,7 +586,7 @@ class WebSocketHandler(tornado.web.RequestHandler): if self.get_status() != 101 or self._on_close_called: super(WebSocketHandler, self)._break_cycles() - def send_error(self, *args, **kwargs): + def send_error(self, *args: Any, **kwargs: Any) -> None: if self.stream is None: super(WebSocketHandler, self).send_error(*args, **kwargs) else: @@ -507,36 +596,50 @@ class WebSocketHandler(tornado.web.RequestHandler): # we can close the connection more gracefully. self.stream.close() - def get_websocket_protocol(self): + def get_websocket_protocol(self) -> Optional["WebSocketProtocol"]: websocket_version = self.request.headers.get("Sec-WebSocket-Version") if websocket_version in ("7", "8", "13"): - return WebSocketProtocol13( - self, compression_options=self.get_compression_options()) + params = _WebSocketParams( + ping_interval=self.ping_interval, + ping_timeout=self.ping_timeout, + max_message_size=self.max_message_size, + compression_options=self.get_compression_options(), + ) + return WebSocketProtocol13(self, False, params) + return None - def _attach_stream(self): - self.stream = self.detach() - self.stream.set_close_callback(self.on_connection_close) + def _detach_stream(self) -> IOStream: # disable non-WS methods - for method in ["write", "redirect", "set_header", "set_cookie", - "set_status", "flush", "finish"]: + for method in [ + "write", + "redirect", + "set_header", + "set_cookie", + "set_status", + "flush", + "finish", + ]: setattr(self, method, _raise_not_supported_for_websockets) + return self.detach() -def _raise_not_supported_for_websockets(*args, **kwargs): +def _raise_not_supported_for_websockets(*args: Any, **kwargs: Any) -> None: raise RuntimeError("Method not supported for Web Sockets") -class WebSocketProtocol(object): +class WebSocketProtocol(abc.ABC): """Base class for WebSocket protocol versions. """ - def __init__(self, handler): + + def __init__(self, handler: "_WebSocketDelegate") -> None: self.handler = handler - self.request = handler.request - self.stream = handler.stream + self.stream = None # type: Optional[IOStream] self.client_terminated = False self.server_terminated = False - def _run_callback(self, callback, *args, **kwargs): + def _run_callback( + self, callback: Callable, *args: Any, **kwargs: Any + ) -> "Optional[Future[Any]]": """Runs the given callback with exception handling. If the callback is a coroutine, returns its Future. On error, aborts the @@ -547,80 +650,156 @@ class WebSocketProtocol(object): except Exception: self.handler.log_exception(*sys.exc_info()) self._abort() + return None else: if result is not None: result = gen.convert_yielded(result) + assert self.stream is not None self.stream.io_loop.add_future(result, lambda f: f.result()) return result - def on_connection_close(self): + def on_connection_close(self) -> None: self._abort() - def _abort(self): + def _abort(self) -> None: """Instantly aborts the WebSocket connection by closing the socket""" self.client_terminated = True self.server_terminated = True - self.stream.close() # forcibly tear down the connection + if self.stream is not None: + self.stream.close() # forcibly tear down the connection self.close() # let the subclass cleanup + @abc.abstractmethod + def close(self, code: int = None, reason: str = None) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def is_closing(self) -> bool: + raise NotImplementedError() + + @abc.abstractmethod + async def accept_connection(self, handler: WebSocketHandler) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def write_message( + self, message: Union[str, bytes], binary: bool = False + ) -> "Future[None]": + raise NotImplementedError() + + @property + @abc.abstractmethod + def selected_subprotocol(self) -> Optional[str]: + raise NotImplementedError() + + @abc.abstractmethod + def write_ping(self, data: bytes) -> None: + raise NotImplementedError() + + # The entry points below are used by WebSocketClientConnection, + # which was introduced after we only supported a single version of + # WebSocketProtocol. The WebSocketProtocol/WebSocketProtocol13 + # boundary is currently pretty ad-hoc. + @abc.abstractmethod + def _process_server_headers( + self, key: Union[str, bytes], headers: httputil.HTTPHeaders + ) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def start_pinging(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + async def _receive_frame_loop(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def set_nodelay(self, x: bool) -> None: + raise NotImplementedError() + class _PerMessageDeflateCompressor(object): - def __init__(self, persistent, max_wbits, compression_options=None): + def __init__( + self, + persistent: bool, + max_wbits: Optional[int], + compression_options: Dict[str, Any] = None, + ) -> None: if max_wbits is None: max_wbits = zlib.MAX_WBITS # There is no symbolic constant for the minimum wbits value. if not (8 <= max_wbits <= zlib.MAX_WBITS): - raise ValueError("Invalid max_wbits value %r; allowed range 8-%d", - max_wbits, zlib.MAX_WBITS) + raise ValueError( + "Invalid max_wbits value %r; allowed range 8-%d", + max_wbits, + zlib.MAX_WBITS, + ) self._max_wbits = max_wbits - if compression_options is None or 'compression_level' not in compression_options: + if ( + compression_options is None + or "compression_level" not in compression_options + ): self._compression_level = tornado.web.GZipContentEncoding.GZIP_LEVEL else: - self._compression_level = compression_options['compression_level'] + self._compression_level = compression_options["compression_level"] - if compression_options is None or 'mem_level' not in compression_options: + if compression_options is None or "mem_level" not in compression_options: self._mem_level = 8 else: - self._mem_level = compression_options['mem_level'] + self._mem_level = compression_options["mem_level"] if persistent: - self._compressor = self._create_compressor() + self._compressor = self._create_compressor() # type: Optional[_Compressor] else: self._compressor = None - def _create_compressor(self): - return zlib.compressobj(self._compression_level, - zlib.DEFLATED, -self._max_wbits, self._mem_level) + def _create_compressor(self) -> "_Compressor": + return zlib.compressobj( + self._compression_level, zlib.DEFLATED, -self._max_wbits, self._mem_level + ) - def compress(self, data): + def compress(self, data: bytes) -> bytes: compressor = self._compressor or self._create_compressor() - data = (compressor.compress(data) + - compressor.flush(zlib.Z_SYNC_FLUSH)) - assert data.endswith(b'\x00\x00\xff\xff') + data = compressor.compress(data) + compressor.flush(zlib.Z_SYNC_FLUSH) + assert data.endswith(b"\x00\x00\xff\xff") return data[:-4] class _PerMessageDeflateDecompressor(object): - def __init__(self, persistent, max_wbits, max_message_size, compression_options=None): + def __init__( + self, + persistent: bool, + max_wbits: Optional[int], + max_message_size: int, + compression_options: Dict[str, Any] = None, + ) -> None: self._max_message_size = max_message_size if max_wbits is None: max_wbits = zlib.MAX_WBITS if not (8 <= max_wbits <= zlib.MAX_WBITS): - raise ValueError("Invalid max_wbits value %r; allowed range 8-%d", - max_wbits, zlib.MAX_WBITS) + raise ValueError( + "Invalid max_wbits value %r; allowed range 8-%d", + max_wbits, + zlib.MAX_WBITS, + ) self._max_wbits = max_wbits if persistent: - self._decompressor = self._create_decompressor() + self._decompressor = ( + self._create_decompressor() + ) # type: Optional[_Decompressor] else: self._decompressor = None - def _create_decompressor(self): + def _create_decompressor(self) -> "_Decompressor": return zlib.decompressobj(-self._max_wbits) - def decompress(self, data): + def decompress(self, data: bytes) -> bytes: decompressor = self._decompressor or self._create_decompressor() - result = decompressor.decompress(data + b'\x00\x00\xff\xff', self._max_message_size) + result = decompressor.decompress( + data + b"\x00\x00\xff\xff", self._max_message_size + ) if decompressor.unconsumed_tail: raise _DecompressTooLargeError() return result @@ -632,30 +811,38 @@ class WebSocketProtocol13(WebSocketProtocol): This class supports versions 7 and 8 of the protocol in addition to the final version 13. """ + # Bit masks for the first byte of a frame. FIN = 0x80 RSV1 = 0x40 RSV2 = 0x20 RSV3 = 0x10 RSV_MASK = RSV1 | RSV2 | RSV3 - OPCODE_MASK = 0x0f + OPCODE_MASK = 0x0F - def __init__(self, handler, mask_outgoing=False, - compression_options=None): + stream = None # type: IOStream + + def __init__( + self, + handler: "_WebSocketDelegate", + mask_outgoing: bool, + params: _WebSocketParams, + ) -> None: WebSocketProtocol.__init__(self, handler) self.mask_outgoing = mask_outgoing + self.params = params self._final_frame = False self._frame_opcode = None self._masked_frame = None - self._frame_mask = None + self._frame_mask = None # type: Optional[bytes] self._frame_length = None - self._fragmented_message_buffer = None + self._fragmented_message_buffer = None # type: Optional[bytes] self._fragmented_message_opcode = None - self._waiting = None - self._compression_options = compression_options - self._decompressor = None - self._compressor = None - self._frame_compressed = None + self._waiting = None # type: object + self._compression_options = params.compression_options + self._decompressor = None # type: Optional[_PerMessageDeflateDecompressor] + self._compressor = None # type: Optional[_PerMessageDeflateCompressor] + self._frame_compressed = None # type: Optional[bool] # The total uncompressed size of all messages received or sent. # Unicode messages are encoded to utf8. # Only for testing; subject to change. @@ -665,40 +852,53 @@ class WebSocketProtocol13(WebSocketProtocol): # the effect of compression, frame overhead, and control frames. self._wire_bytes_in = 0 self._wire_bytes_out = 0 - self.ping_callback = None - self.last_ping = 0 - self.last_pong = 0 + self.ping_callback = None # type: Optional[PeriodicCallback] + self.last_ping = 0.0 + self.last_pong = 0.0 + self.close_code = None # type: Optional[int] + self.close_reason = None # type: Optional[str] - def accept_connection(self): + # Use a property for this to satisfy the abc. + @property + def selected_subprotocol(self) -> Optional[str]: + return self._selected_subprotocol + + @selected_subprotocol.setter + def selected_subprotocol(self, value: Optional[str]) -> None: + self._selected_subprotocol = value + + async def accept_connection(self, handler: WebSocketHandler) -> None: try: - self._handle_websocket_headers() + self._handle_websocket_headers(handler) except ValueError: - self.handler.set_status(400) + handler.set_status(400) log_msg = "Missing/Invalid WebSocket headers" - self.handler.finish(log_msg) + handler.finish(log_msg) gen_log.debug(log_msg) return try: - self._accept_connection() + await self._accept_connection(handler) + except asyncio.CancelledError: + self._abort() + return except ValueError: - gen_log.debug("Malformed WebSocket request received", - exc_info=True) + gen_log.debug("Malformed WebSocket request received", exc_info=True) self._abort() return - def _handle_websocket_headers(self): + def _handle_websocket_headers(self, handler: WebSocketHandler) -> None: """Verifies all invariant- and required headers If a header is missing or have an incorrect value ValueError will be raised """ fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version") - if not all(map(lambda f: self.request.headers.get(f), fields)): + if not all(map(lambda f: handler.request.headers.get(f), fields)): raise ValueError("Missing/Invalid WebSocket headers") @staticmethod - def compute_accept_value(key): + def compute_accept_value(key: Union[str, bytes]) -> str: """Computes the value for the Sec-WebSocket-Accept header, given the value for Sec-WebSocket-Key. """ @@ -707,114 +907,143 @@ class WebSocketProtocol13(WebSocketProtocol): sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value return native_str(base64.b64encode(sha1.digest())) - def _challenge_response(self): + def _challenge_response(self, handler: WebSocketHandler) -> str: return WebSocketProtocol13.compute_accept_value( - self.request.headers.get("Sec-Websocket-Key")) + cast(str, handler.request.headers.get("Sec-Websocket-Key")) + ) - @gen.coroutine - def _accept_connection(self): - subprotocol_header = self.request.headers.get("Sec-WebSocket-Protocol") + async def _accept_connection(self, handler: WebSocketHandler) -> None: + subprotocol_header = handler.request.headers.get("Sec-WebSocket-Protocol") if subprotocol_header: - subprotocols = [s.strip() for s in subprotocol_header.split(',')] + subprotocols = [s.strip() for s in subprotocol_header.split(",")] else: subprotocols = [] - self.selected_subprotocol = self.handler.select_subprotocol(subprotocols) + self.selected_subprotocol = handler.select_subprotocol(subprotocols) if self.selected_subprotocol: assert self.selected_subprotocol in subprotocols - self.handler.set_header("Sec-WebSocket-Protocol", self.selected_subprotocol) + handler.set_header("Sec-WebSocket-Protocol", self.selected_subprotocol) - extensions = self._parse_extensions_header(self.request.headers) + extensions = self._parse_extensions_header(handler.request.headers) for ext in extensions: - if (ext[0] == 'permessage-deflate' and - self._compression_options is not None): + if ext[0] == "permessage-deflate" and self._compression_options is not None: # TODO: negotiate parameters if compression_options # specifies limits. - self._create_compressors('server', ext[1], self._compression_options) - if ('client_max_window_bits' in ext[1] and - ext[1]['client_max_window_bits'] is None): + self._create_compressors("server", ext[1], self._compression_options) + if ( + "client_max_window_bits" in ext[1] + and ext[1]["client_max_window_bits"] is None + ): # Don't echo an offered client_max_window_bits # parameter with no value. - del ext[1]['client_max_window_bits'] - self.handler.set_header("Sec-WebSocket-Extensions", - httputil._encode_header( - 'permessage-deflate', ext[1])) + del ext[1]["client_max_window_bits"] + handler.set_header( + "Sec-WebSocket-Extensions", + httputil._encode_header("permessage-deflate", ext[1]), + ) break - self.handler.clear_header("Content-Type") - self.handler.set_status(101) - self.handler.set_header("Upgrade", "websocket") - self.handler.set_header("Connection", "Upgrade") - self.handler.set_header("Sec-WebSocket-Accept", self._challenge_response()) - self.handler.finish() + handler.clear_header("Content-Type") + handler.set_status(101) + handler.set_header("Upgrade", "websocket") + handler.set_header("Connection", "Upgrade") + handler.set_header("Sec-WebSocket-Accept", self._challenge_response(handler)) + handler.finish() - self.handler._attach_stream() - self.stream = self.handler.stream + self.stream = handler._detach_stream() self.start_pinging() - open_result = self._run_callback(self.handler.open, *self.handler.open_args, - **self.handler.open_kwargs) - if open_result is not None: - yield open_result - yield self._receive_frame_loop() + try: + open_result = handler.open(*handler.open_args, **handler.open_kwargs) + if open_result is not None: + await open_result + except Exception: + handler.log_exception(*sys.exc_info()) + self._abort() + return - def _parse_extensions_header(self, headers): - extensions = headers.get("Sec-WebSocket-Extensions", '') + await self._receive_frame_loop() + + def _parse_extensions_header( + self, headers: httputil.HTTPHeaders + ) -> List[Tuple[str, Dict[str, str]]]: + extensions = headers.get("Sec-WebSocket-Extensions", "") if extensions: - return [httputil._parse_header(e.strip()) - for e in extensions.split(',')] + return [httputil._parse_header(e.strip()) for e in extensions.split(",")] return [] - def _process_server_headers(self, key, headers): + def _process_server_headers( + self, key: Union[str, bytes], headers: httputil.HTTPHeaders + ) -> None: """Process the headers sent by the server to this client connection. 'key' is the websocket handshake challenge/response key. """ - assert headers['Upgrade'].lower() == 'websocket' - assert headers['Connection'].lower() == 'upgrade' + assert headers["Upgrade"].lower() == "websocket" + assert headers["Connection"].lower() == "upgrade" accept = self.compute_accept_value(key) - assert headers['Sec-Websocket-Accept'] == accept + assert headers["Sec-Websocket-Accept"] == accept extensions = self._parse_extensions_header(headers) for ext in extensions: - if (ext[0] == 'permessage-deflate' and - self._compression_options is not None): - self._create_compressors('client', ext[1]) + if ext[0] == "permessage-deflate" and self._compression_options is not None: + self._create_compressors("client", ext[1]) else: raise ValueError("unsupported extension %r", ext) - self.selected_subprotocol = headers.get('Sec-WebSocket-Protocol', None) + self.selected_subprotocol = headers.get("Sec-WebSocket-Protocol", None) - def _get_compressor_options(self, side, agreed_parameters, compression_options=None): + def _get_compressor_options( + self, + side: str, + agreed_parameters: Dict[str, Any], + compression_options: Dict[str, Any] = None, + ) -> Dict[str, Any]: """Converts a websocket agreed_parameters set to keyword arguments for our compressor objects. """ options = dict( - persistent=(side + '_no_context_takeover') not in agreed_parameters) - wbits_header = agreed_parameters.get(side + '_max_window_bits', None) + persistent=(side + "_no_context_takeover") not in agreed_parameters + ) # type: Dict[str, Any] + wbits_header = agreed_parameters.get(side + "_max_window_bits", None) if wbits_header is None: - options['max_wbits'] = zlib.MAX_WBITS + options["max_wbits"] = zlib.MAX_WBITS else: - options['max_wbits'] = int(wbits_header) - options['compression_options'] = compression_options + options["max_wbits"] = int(wbits_header) + options["compression_options"] = compression_options return options - def _create_compressors(self, side, agreed_parameters, compression_options=None): + def _create_compressors( + self, + side: str, + agreed_parameters: Dict[str, Any], + compression_options: Dict[str, Any] = None, + ) -> None: # TODO: handle invalid parameters gracefully - allowed_keys = set(['server_no_context_takeover', - 'client_no_context_takeover', - 'server_max_window_bits', - 'client_max_window_bits']) + allowed_keys = set( + [ + "server_no_context_takeover", + "client_no_context_takeover", + "server_max_window_bits", + "client_max_window_bits", + ] + ) for key in agreed_parameters: if key not in allowed_keys: raise ValueError("unsupported compression parameter %r" % key) - other_side = 'client' if (side == 'server') else 'server' + other_side = "client" if (side == "server") else "server" self._compressor = _PerMessageDeflateCompressor( - **self._get_compressor_options(side, agreed_parameters, compression_options)) + **self._get_compressor_options(side, agreed_parameters, compression_options) + ) self._decompressor = _PerMessageDeflateDecompressor( - max_message_size=self.handler.max_message_size, - **self._get_compressor_options(other_side, agreed_parameters, compression_options)) + max_message_size=self.params.max_message_size, + **self._get_compressor_options( + other_side, agreed_parameters, compression_options + ) + ) - def _write_frame(self, fin, opcode, data, flags=0): + def _write_frame( + self, fin: bool, opcode: int, data: bytes, flags: int = 0 + ) -> "Future[None]": data_len = len(data) if opcode & 0x8: # All control frames MUST have a payload length of 125 @@ -845,7 +1074,9 @@ class WebSocketProtocol13(WebSocketProtocol): self._wire_bytes_out += len(frame) return self.stream.write(frame) - def write_message(self, message, binary=False): + def write_message( + self, message: Union[str, bytes], binary: bool = False + ) -> "Future[None]": """Sends the given message to the client of this Web Socket.""" if binary: opcode = 0x2 @@ -868,35 +1099,35 @@ class WebSocketProtocol13(WebSocketProtocol): except StreamClosedError: raise WebSocketClosedError() - @gen.coroutine - def wrapper(): + async def wrapper() -> None: try: - yield fut + await fut except StreamClosedError: raise WebSocketClosedError() - return wrapper() - def write_ping(self, data): + return asyncio.ensure_future(wrapper()) + + def write_ping(self, data: bytes) -> None: """Send ping frame.""" assert isinstance(data, bytes) self._write_frame(True, 0x9, data) - @gen.coroutine - def _receive_frame_loop(self): + async def _receive_frame_loop(self) -> None: try: while not self.client_terminated: - yield self._receive_frame() + await self._receive_frame() except StreamClosedError: self._abort() + self.handler.on_ws_connection_close(self.close_code, self.close_reason) - def _read_bytes(self, n): + async def _read_bytes(self, n: int) -> bytes: + data = await self.stream.read_bytes(n) self._wire_bytes_in += n - return self.stream.read_bytes(n) + return data - @gen.coroutine - def _receive_frame(self): + async def _receive_frame(self) -> None: # Read the frame header. - data = yield self._read_bytes(2) + data = await self._read_bytes(2) header, mask_payloadlen = struct.unpack("BB", data) is_final_frame = header & self.FIN reserved_bits = header & self.RSV_MASK @@ -913,7 +1144,7 @@ class WebSocketProtocol13(WebSocketProtocol): self._abort() return is_masked = bool(mask_payloadlen & 0x80) - payloadlen = mask_payloadlen & 0x7f + payloadlen = mask_payloadlen & 0x7F # Parse and validate the length. if opcode_is_control and payloadlen >= 126: @@ -923,24 +1154,25 @@ class WebSocketProtocol13(WebSocketProtocol): if payloadlen < 126: self._frame_length = payloadlen elif payloadlen == 126: - data = yield self._read_bytes(2) + data = await self._read_bytes(2) payloadlen = struct.unpack("!H", data)[0] elif payloadlen == 127: - data = yield self._read_bytes(8) + data = await self._read_bytes(8) payloadlen = struct.unpack("!Q", data)[0] new_len = payloadlen if self._fragmented_message_buffer is not None: new_len += len(self._fragmented_message_buffer) - if new_len > self.handler.max_message_size: + if new_len > self.params.max_message_size: self.close(1009, "message too big") self._abort() return # Read the payload, unmasking if necessary. if is_masked: - self._frame_mask = yield self._read_bytes(4) - data = yield self._read_bytes(payloadlen) + self._frame_mask = await self._read_bytes(4) + data = await self._read_bytes(payloadlen) if is_masked: + assert self._frame_mask is not None data = _websocket_mask(self._frame_mask, data) # Decide what to do with this frame. @@ -974,20 +1206,21 @@ class WebSocketProtocol13(WebSocketProtocol): if is_final_frame: handled_future = self._handle_message(opcode, data) if handled_future is not None: - yield handled_future + await handled_future - def _handle_message(self, opcode, data): + def _handle_message(self, opcode: int, data: bytes) -> "Optional[Future[None]]": """Execute on_message, returning its Future if it is a coroutine.""" if self.client_terminated: - return + return None if self._frame_compressed: + assert self._decompressor is not None try: data = self._decompressor.decompress(data) except _DecompressTooLargeError: self.close(1009, "message too big after decompression") self._abort() - return + return None if opcode == 0x1: # UTF-8 data @@ -996,7 +1229,7 @@ class WebSocketProtocol13(WebSocketProtocol): decoded = data.decode("utf-8") except UnicodeDecodeError: self._abort() - return + return None return self._run_callback(self.handler.on_message, decoded) elif opcode == 0x2: # Binary data @@ -1006,11 +1239,11 @@ class WebSocketProtocol13(WebSocketProtocol): # Close self.client_terminated = True if len(data) >= 2: - self.handler.close_code = struct.unpack('>H', data[:2])[0] + self.close_code = struct.unpack(">H", data[:2])[0] if len(data) > 2: - self.handler.close_reason = to_unicode(data[2:]) + self.close_reason = to_unicode(data[2:]) # Echo the received close code, if any (RFC 6455 section 5.5.1). - self.close(self.handler.close_code) + self.close(self.close_code) elif opcode == 0x9: # Ping try: @@ -1024,17 +1257,18 @@ class WebSocketProtocol13(WebSocketProtocol): return self._run_callback(self.handler.on_pong, data) else: self._abort() + return None - def close(self, code=None, reason=None): + def close(self, code: int = None, reason: str = None) -> None: """Closes the WebSocket connection.""" if not self.server_terminated: if not self.stream.closed(): if code is None and reason is not None: code = 1000 # "normal closure" status code if code is None: - close_data = b'' + close_data = b"" else: - close_data = struct.pack('>H', code) + close_data = struct.pack(">H", code) if reason is not None: close_data += utf8(reason) try: @@ -1051,36 +1285,49 @@ class WebSocketProtocol13(WebSocketProtocol): # Give the client a few seconds to complete a clean shutdown, # otherwise just close the connection. self._waiting = self.stream.io_loop.add_timeout( - self.stream.io_loop.time() + 5, self._abort) + self.stream.io_loop.time() + 5, self._abort + ) + + def is_closing(self) -> bool: + """Return ``True`` if this connection is closing. + + The connection is considered closing if either side has + initiated its closing handshake or if the stream has been + shut down uncleanly. + """ + return self.stream.closed() or self.client_terminated or self.server_terminated @property - def ping_interval(self): - interval = self.handler.ping_interval + def ping_interval(self) -> Optional[float]: + interval = self.params.ping_interval if interval is not None: return interval return 0 @property - def ping_timeout(self): - timeout = self.handler.ping_timeout + def ping_timeout(self) -> Optional[float]: + timeout = self.params.ping_timeout if timeout is not None: return timeout + assert self.ping_interval is not None return max(3 * self.ping_interval, 30) - def start_pinging(self): + def start_pinging(self) -> None: """Start sending periodic pings to keep the connection alive""" + assert self.ping_interval is not None if self.ping_interval > 0: self.last_ping = self.last_pong = IOLoop.current().time() self.ping_callback = PeriodicCallback( - self.periodic_ping, self.ping_interval * 1000) + self.periodic_ping, self.ping_interval * 1000 + ) self.ping_callback.start() - def periodic_ping(self): + def periodic_ping(self) -> None: """Send a ping to keep the websocket alive Called periodically if the websocket_ping_interval is set and non-zero. """ - if self.stream.closed() and self.ping_callback is not None: + if self.is_closing() and self.ping_callback is not None: self.ping_callback.stop() return @@ -1090,14 +1337,21 @@ class WebSocketProtocol13(WebSocketProtocol): now = IOLoop.current().time() since_last_pong = now - self.last_pong since_last_ping = now - self.last_ping - if (since_last_ping < 2 * self.ping_interval and - since_last_pong > self.ping_timeout): + assert self.ping_interval is not None + assert self.ping_timeout is not None + if ( + since_last_ping < 2 * self.ping_interval + and since_last_pong > self.ping_timeout + ): self.close() return - self.write_ping(b'') + self.write_ping(b"") self.last_ping = now + def set_nodelay(self, x: bool) -> None: + self.stream.set_nodelay(x) + class WebSocketClientConnection(simple_httpclient._HTTPConnection): """WebSocket client connection. @@ -1105,46 +1359,68 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): This class should not be instantiated directly; use the `websocket_connect` function instead. """ - def __init__(self, request, on_message_callback=None, - compression_options=None, ping_interval=None, ping_timeout=None, - max_message_size=None, subprotocols=[]): - self.compression_options = compression_options - self.connect_future = Future() - self.protocol = None - self.read_queue = Queue(1) + + protocol = None # type: WebSocketProtocol + + def __init__( + self, + request: httpclient.HTTPRequest, + on_message_callback: Callable[[Union[None, str, bytes]], None] = None, + compression_options: Dict[str, Any] = None, + ping_interval: float = None, + ping_timeout: float = None, + max_message_size: int = _default_max_message_size, + subprotocols: Optional[List[str]] = [], + ) -> None: + self.connect_future = Future() # type: Future[WebSocketClientConnection] + self.read_queue = Queue(1) # type: Queue[Union[None, str, bytes]] self.key = base64.b64encode(os.urandom(16)) self._on_message_callback = on_message_callback - self.close_code = self.close_reason = None - self.ping_interval = ping_interval - self.ping_timeout = ping_timeout - self.max_message_size = max_message_size + self.close_code = None # type: Optional[int] + self.close_reason = None # type: Optional[str] + self.params = _WebSocketParams( + ping_interval=ping_interval, + ping_timeout=ping_timeout, + max_message_size=max_message_size, + compression_options=compression_options, + ) - scheme, sep, rest = request.url.partition(':') - scheme = {'ws': 'http', 'wss': 'https'}[scheme] + scheme, sep, rest = request.url.partition(":") + scheme = {"ws": "http", "wss": "https"}[scheme] request.url = scheme + sep + rest - request.headers.update({ - 'Upgrade': 'websocket', - 'Connection': 'Upgrade', - 'Sec-WebSocket-Key': self.key, - 'Sec-WebSocket-Version': '13', - }) + request.headers.update( + { + "Upgrade": "websocket", + "Connection": "Upgrade", + "Sec-WebSocket-Key": self.key, + "Sec-WebSocket-Version": "13", + } + ) if subprotocols is not None: - request.headers['Sec-WebSocket-Protocol'] = ','.join(subprotocols) - if self.compression_options is not None: + request.headers["Sec-WebSocket-Protocol"] = ",".join(subprotocols) + if compression_options is not None: # Always offer to let the server set our max_wbits (and even though # we don't offer it, we will accept a client_no_context_takeover # from the server). # TODO: set server parameters for deflate extension # if requested in self.compression_options. - request.headers['Sec-WebSocket-Extensions'] = ( - 'permessage-deflate; client_max_window_bits') + request.headers[ + "Sec-WebSocket-Extensions" + ] = "permessage-deflate; client_max_window_bits" self.tcp_client = TCPClient() super(WebSocketClientConnection, self).__init__( - None, request, lambda: None, self._on_http_response, - 104857600, self.tcp_client, 65536, 104857600) + None, + request, + lambda: None, + self._on_http_response, + 104857600, + self.tcp_client, + 65536, + 104857600, + ) - def close(self, code=None, reason=None): + def close(self, code: int = None, reason: str = None) -> None: """Closes the websocket connection. ``code`` and ``reason`` are documented under @@ -1158,49 +1434,66 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): """ if self.protocol is not None: self.protocol.close(code, reason) - self.protocol = None + self.protocol = None # type: ignore - def on_connection_close(self): + def on_connection_close(self) -> None: if not self.connect_future.done(): self.connect_future.set_exception(StreamClosedError()) - self.on_message(None) + self._on_message(None) self.tcp_client.close() super(WebSocketClientConnection, self).on_connection_close() - def _on_http_response(self, response): + def on_ws_connection_close( + self, close_code: int = None, close_reason: str = None + ) -> None: + self.close_code = close_code + self.close_reason = close_reason + self.on_connection_close() + + def _on_http_response(self, response: httpclient.HTTPResponse) -> None: if not self.connect_future.done(): if response.error: self.connect_future.set_exception(response.error) else: - self.connect_future.set_exception(WebSocketError( - "Non-websocket response")) + self.connect_future.set_exception( + WebSocketError("Non-websocket response") + ) - def headers_received(self, start_line, headers): + async def headers_received( + self, + start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine], + headers: httputil.HTTPHeaders, + ) -> None: + assert isinstance(start_line, httputil.ResponseStartLine) if start_line.code != 101: - return super(WebSocketClientConnection, self).headers_received( - start_line, headers) - - self.headers = headers - self.protocol = self.get_websocket_protocol() - self.protocol._process_server_headers(self.key, self.headers) - self.protocol.start_pinging() - IOLoop.current().add_callback(self.protocol._receive_frame_loop) + await super(WebSocketClientConnection, self).headers_received( + start_line, headers + ) + return if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None - self.stream = self.connection.detach() - self.stream.set_close_callback(self.on_connection_close) + self.headers = headers + self.protocol = self.get_websocket_protocol() + self.protocol._process_server_headers(self.key, self.headers) + self.protocol.stream = self.connection.detach() + + IOLoop.current().add_callback(self.protocol._receive_frame_loop) + self.protocol.start_pinging() + # Once we've taken over the connection, clear the final callback # we set on the http request. This deactivates the error handling # in simple_httpclient that would otherwise interfere with our # ability to see exceptions. - self.final_callback = None + self.final_callback = None # type: ignore future_set_result_unless_cancelled(self.connect_future, self) - def write_message(self, message, binary=False): + def write_message( + self, message: Union[str, bytes], binary: bool = False + ) -> "Future[None]": """Sends a message to the WebSocket server. If the stream is closed, raises `WebSocketClosedError`. @@ -1212,7 +1505,9 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): """ return self.protocol.write_message(message, binary=binary) - def read_message(self, callback=None): + def read_message( + self, callback: Callable[["Future[Union[None, str, bytes]]"], None] = None + ) -> Awaitable[Union[None, str, bytes]]: """Reads a message from the WebSocket server. If on_message_callback was specified at WebSocket @@ -1224,18 +1519,24 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): ready. """ - future = self.read_queue.get() + awaitable = self.read_queue.get() if callback is not None: - self.io_loop.add_future(future, callback) - return future + self.io_loop.add_future(asyncio.ensure_future(awaitable), callback) + return awaitable - def on_message(self, message): + def on_message(self, message: Union[str, bytes]) -> Optional[Awaitable[None]]: + return self._on_message(message) + + def _on_message( + self, message: Union[None, str, bytes] + ) -> Optional[Awaitable[None]]: if self._on_message_callback: self._on_message_callback(message) + return None else: return self.read_queue.put(message) - def ping(self, data=b''): + def ping(self, data: bytes = b"") -> None: """Send ping frame to the remote end. The data argument allows a small amount of data (up to 125 @@ -1254,29 +1555,45 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): raise WebSocketClosedError() self.protocol.write_ping(data) - def on_pong(self, data): + def on_pong(self, data: bytes) -> None: pass - def on_ping(self, data): + def on_ping(self, data: bytes) -> None: pass - def get_websocket_protocol(self): - return WebSocketProtocol13(self, mask_outgoing=True, - compression_options=self.compression_options) + def get_websocket_protocol(self) -> WebSocketProtocol: + return WebSocketProtocol13(self, mask_outgoing=True, params=self.params) @property - def selected_subprotocol(self): + def selected_subprotocol(self) -> Optional[str]: """The subprotocol selected by the server. .. versionadded:: 5.1 """ return self.protocol.selected_subprotocol + def log_exception( + self, + typ: "Optional[Type[BaseException]]", + value: Optional[BaseException], + tb: Optional[TracebackType], + ) -> None: + assert typ is not None + assert value is not None + app_log.error("Uncaught exception %s", value, exc_info=(typ, value, tb)) -def websocket_connect(url, callback=None, connect_timeout=None, - on_message_callback=None, compression_options=None, - ping_interval=None, ping_timeout=None, - max_message_size=_default_max_message_size, subprotocols=None): + +def websocket_connect( + url: Union[str, httpclient.HTTPRequest], + callback: Callable[["Future[WebSocketClientConnection]"], None] = None, + connect_timeout: float = None, + on_message_callback: Callable[[Union[None, str, bytes]], None] = None, + compression_options: Dict[str, Any] = None, + ping_interval: float = None, + ping_timeout: float = None, + max_message_size: int = _default_max_message_size, + subprotocols: List[str] = None, +) -> "Awaitable[WebSocketClientConnection]": """Client-side websocket support. Takes a url and returns a Future whose result is a @@ -1328,15 +1645,19 @@ def websocket_connect(url, callback=None, connect_timeout=None, request.headers = httputil.HTTPHeaders(request.headers) else: request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout) - request = httpclient._RequestProxy( - request, httpclient.HTTPRequest._DEFAULTS) - conn = WebSocketClientConnection(request, - on_message_callback=on_message_callback, - compression_options=compression_options, - ping_interval=ping_interval, - ping_timeout=ping_timeout, - max_message_size=max_message_size, - subprotocols=subprotocols) + request = cast( + httpclient.HTTPRequest, + httpclient._RequestProxy(request, httpclient.HTTPRequest._DEFAULTS), + ) + conn = WebSocketClientConnection( + request, + on_message_callback=on_message_callback, + compression_options=compression_options, + ping_interval=ping_interval, + ping_timeout=ping_timeout, + max_message_size=max_message_size, + subprotocols=subprotocols, + ) if callback is not None: IOLoop.current().add_future(conn.connect_future, callback) return conn.connect_future diff --git a/server/www/packages/packages-linux/x64/tornado/wsgi.py b/server/www/packages/packages-linux/x64/tornado/wsgi.py index e1230da..77124aa 100644 --- a/server/www/packages/packages-linux/x64/tornado/wsgi.py +++ b/server/www/packages/packages-linux/x64/tornado/wsgi.py @@ -16,235 +16,41 @@ """WSGI support for the Tornado web framework. WSGI is the Python standard for web servers, and allows for interoperability -between Tornado and other Python web frameworks and servers. This module -provides WSGI support in two ways: +between Tornado and other Python web frameworks and servers. + +This module provides WSGI support via the `WSGIContainer` class, which +makes it possible to run applications using other WSGI frameworks on +the Tornado HTTP server. The reverse is not supported; the Tornado +`.Application` and `.RequestHandler` classes are designed for use with +the Tornado `.HTTPServer` and cannot be used in a generic WSGI +container. -* `WSGIAdapter` converts a `tornado.web.Application` to the WSGI application - interface. This is useful for running a Tornado app on another - HTTP server, such as Google App Engine. See the `WSGIAdapter` class - documentation for limitations that apply. -* `WSGIContainer` lets you run other WSGI applications and frameworks on the - Tornado HTTP server. For example, with this class you can mix Django - and Tornado handlers in a single server. """ -from __future__ import absolute_import, division, print_function - import sys from io import BytesIO import tornado -import warnings -from tornado.concurrent import Future from tornado import escape from tornado import httputil from tornado.log import access_log -from tornado import web -from tornado.escape import native_str -from tornado.util import unicode_type, PY3 +from typing import List, Tuple, Optional, Callable, Any, Dict, Text +from types import TracebackType +import typing + +if typing.TYPE_CHECKING: + from typing import Type # noqa: F401 + from wsgiref.types import WSGIApplication as WSGIAppType # noqa: F401 -if PY3: - import urllib.parse as urllib_parse # py3 -else: - import urllib as urllib_parse # PEP 3333 specifies that WSGI on python 3 generally deals with byte strings # that are smuggled inside objects of type unicode (via the latin1 encoding). -# These functions are like those in the tornado.escape module, but defined -# here to minimize the temptation to use them in non-wsgi contexts. -if str is unicode_type: - def to_wsgi_str(s): - assert isinstance(s, bytes) - return s.decode('latin1') - - def from_wsgi_str(s): - assert isinstance(s, str) - return s.encode('latin1') -else: - def to_wsgi_str(s): - assert isinstance(s, bytes) - return s - - def from_wsgi_str(s): - assert isinstance(s, str) - return s - - -class WSGIApplication(web.Application): - """A WSGI equivalent of `tornado.web.Application`. - - .. deprecated:: 4.0 - - Use a regular `.Application` and wrap it in `WSGIAdapter` instead. - This class will be removed in Tornado 6.0. - """ - def __call__(self, environ, start_response): - return WSGIAdapter(self)(environ, start_response) - - -# WSGI has no facilities for flow control, so just return an already-done -# Future when the interface requires it. -def _dummy_future(): - f = Future() - f.set_result(None) - return f - - -class _WSGIConnection(httputil.HTTPConnection): - def __init__(self, method, start_response, context): - self.method = method - self.start_response = start_response - self.context = context - self._write_buffer = [] - self._finished = False - self._expected_content_remaining = None - self._error = None - - def set_close_callback(self, callback): - # WSGI has no facility for detecting a closed connection mid-request, - # so we can simply ignore the callback. - pass - - def write_headers(self, start_line, headers, chunk=None, callback=None): - if self.method == 'HEAD': - self._expected_content_remaining = 0 - elif 'Content-Length' in headers: - self._expected_content_remaining = int(headers['Content-Length']) - else: - self._expected_content_remaining = None - self.start_response( - '%s %s' % (start_line.code, start_line.reason), - [(native_str(k), native_str(v)) for (k, v) in headers.get_all()]) - if chunk is not None: - self.write(chunk, callback) - elif callback is not None: - callback() - return _dummy_future() - - def write(self, chunk, callback=None): - if self._expected_content_remaining is not None: - self._expected_content_remaining -= len(chunk) - if self._expected_content_remaining < 0: - self._error = httputil.HTTPOutputError( - "Tried to write more data than Content-Length") - raise self._error - self._write_buffer.append(chunk) - if callback is not None: - callback() - return _dummy_future() - - def finish(self): - if (self._expected_content_remaining is not None and - self._expected_content_remaining != 0): - self._error = httputil.HTTPOutputError( - "Tried to write %d bytes less than Content-Length" % - self._expected_content_remaining) - raise self._error - self._finished = True - - -class _WSGIRequestContext(object): - def __init__(self, remote_ip, protocol): - self.remote_ip = remote_ip - self.protocol = protocol - - def __str__(self): - return self.remote_ip - - -class WSGIAdapter(object): - """Converts a `tornado.web.Application` instance into a WSGI application. - - Example usage:: - - import tornado.web - import tornado.wsgi - import wsgiref.simple_server - - class MainHandler(tornado.web.RequestHandler): - def get(self): - self.write("Hello, world") - - if __name__ == "__main__": - application = tornado.web.Application([ - (r"/", MainHandler), - ]) - wsgi_app = tornado.wsgi.WSGIAdapter(application) - server = wsgiref.simple_server.make_server('', 8888, wsgi_app) - server.serve_forever() - - See the `appengine demo - `_ - for an example of using this module to run a Tornado app on Google - App Engine. - - In WSGI mode asynchronous methods are not supported. This means - that it is not possible to use `.AsyncHTTPClient`, or the - `tornado.auth` or `tornado.websocket` modules. - - In multithreaded WSGI servers on Python 3, it may be necessary to - permit `asyncio` to create event loops on any thread. Run the - following at startup (typically import time for WSGI - applications):: - - import asyncio - from tornado.platform.asyncio import AnyThreadEventLoopPolicy - asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) - - .. versionadded:: 4.0 - - .. deprecated:: 5.1 - - This class is deprecated and will be removed in Tornado 6.0. - Use Tornado's `.HTTPServer` instead of a WSGI container. - """ - def __init__(self, application): - warnings.warn("WSGIAdapter is deprecated, use Tornado's HTTPServer instead", - DeprecationWarning) - if isinstance(application, WSGIApplication): - self.application = lambda request: web.Application.__call__( - application, request) - else: - self.application = application - - def __call__(self, environ, start_response): - method = environ["REQUEST_METHOD"] - uri = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", ""))) - uri += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", ""))) - if environ.get("QUERY_STRING"): - uri += "?" + environ["QUERY_STRING"] - headers = httputil.HTTPHeaders() - if environ.get("CONTENT_TYPE"): - headers["Content-Type"] = environ["CONTENT_TYPE"] - if environ.get("CONTENT_LENGTH"): - headers["Content-Length"] = environ["CONTENT_LENGTH"] - for key in environ: - if key.startswith("HTTP_"): - headers[key[5:].replace("_", "-")] = environ[key] - if headers.get("Content-Length"): - body = environ["wsgi.input"].read( - int(headers["Content-Length"])) - else: - body = b"" - protocol = environ["wsgi.url_scheme"] - remote_ip = environ.get("REMOTE_ADDR", "") - if environ.get("HTTP_HOST"): - host = environ["HTTP_HOST"] - else: - host = environ["SERVER_NAME"] - connection = _WSGIConnection(method, start_response, - _WSGIRequestContext(remote_ip, protocol)) - request = httputil.HTTPServerRequest( - method, uri, "HTTP/1.1", headers=headers, body=body, - host=host, connection=connection) - request._parse_body() - self.application(request) - if connection._error: - raise connection._error - if not connection._finished: - raise Exception("request did not finish synchronously") - return connection._write_buffer +# This function is like those in the tornado.escape module, but defined +# here to minimize the temptation to use it in non-wsgi contexts. +def to_wsgi_str(s: bytes) -> str: + assert isinstance(s, bytes) + return s.decode("latin1") class WSGIContainer(object): @@ -281,31 +87,44 @@ class WSGIContainer(object): Tornado and WSGI apps in the same server. See https://github.com/bdarnell/django-tornado-demo for a complete example. """ - def __init__(self, wsgi_application): + + def __init__(self, wsgi_application: "WSGIAppType") -> None: self.wsgi_application = wsgi_application - def __call__(self, request): - data = {} - response = [] + def __call__(self, request: httputil.HTTPServerRequest) -> None: + data = {} # type: Dict[str, Any] + response = [] # type: List[bytes] - def start_response(status, response_headers, exc_info=None): + def start_response( + status: str, + headers: List[Tuple[str, str]], + exc_info: Optional[ + Tuple[ + "Optional[Type[BaseException]]", + Optional[BaseException], + Optional[TracebackType], + ] + ] = None, + ) -> Callable[[bytes], Any]: data["status"] = status - data["headers"] = response_headers + data["headers"] = headers return response.append + app_response = self.wsgi_application( - WSGIContainer.environ(request), start_response) + WSGIContainer.environ(request), start_response + ) try: response.extend(app_response) body = b"".join(response) finally: if hasattr(app_response, "close"): - app_response.close() + app_response.close() # type: ignore if not data: raise Exception("WSGI app did not call start_response") - status_code, reason = data["status"].split(' ', 1) - status_code = int(status_code) - headers = data["headers"] + status_code_str, reason = data["status"].split(" ", 1) + status_code = int(status_code_str) + headers = data["headers"] # type: List[Tuple[str, str]] header_set = set(k.lower() for (k, v) in headers) body = escape.utf8(body) if status_code != 304: @@ -320,12 +139,13 @@ class WSGIContainer(object): header_obj = httputil.HTTPHeaders() for key, value in headers: header_obj.add(key, value) + assert request.connection is not None request.connection.write_headers(start_line, header_obj, chunk=body) request.connection.finish() self._log(status_code, request) @staticmethod - def environ(request): + def environ(request: httputil.HTTPServerRequest) -> Dict[Text, Any]: """Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment. """ hostport = request.host.split(":") @@ -338,8 +158,9 @@ class WSGIContainer(object): environ = { "REQUEST_METHOD": request.method, "SCRIPT_NAME": "", - "PATH_INFO": to_wsgi_str(escape.url_unescape( - request.path, encoding=None, plus=False)), + "PATH_INFO": to_wsgi_str( + escape.url_unescape(request.path, encoding=None, plus=False) + ), "QUERY_STRING": request.query, "REMOTE_ADDR": request.remote_ip, "SERVER_NAME": host, @@ -361,7 +182,7 @@ class WSGIContainer(object): environ["HTTP_" + key.replace("-", "_").upper()] = value return environ - def _log(self, status_code, request): + def _log(self, status_code: int, request: httputil.HTTPServerRequest) -> None: if status_code < 400: log_method = access_log.info elif status_code < 500: @@ -369,8 +190,9 @@ class WSGIContainer(object): else: log_method = access_log.error request_time = 1000.0 * request.request_time() - summary = request.method + " " + request.uri + " (" + \ - request.remote_ip + ")" + assert request.method is not None + assert request.uri is not None + summary = request.method + " " + request.uri + " (" + request.remote_ip + ")" log_method("%d %s %.2fms", status_code, summary, request_time) diff --git a/server/www/teleport/static/js/audit/record-list.js b/server/www/teleport/static/js/audit/record-list.js index c39e60a..1a82f64 100644 --- a/server/www/teleport/static/js/audit/record-list.js +++ b/server/www/teleport/static/js/audit/record-list.js @@ -157,7 +157,7 @@ $app.on_table_host_cell_created = function (tbl, row_id, col_key, cell_obj) { cell_obj.find('[data-action]').click(function () { var row_data = tbl.get_row(row_id); - console.log('---', row_data); + // console.log('---', row_data); var action = $(this).attr('data-action'); if (action === 'replay') { @@ -363,83 +363,26 @@ $app.on_table_host_render_created = function (render) { }; $app.do_replay_rdp = function (record_id, user_username, acc_username, host_ip, time_begin) { + if(!$app.options.core_running) { + $tp.notify_error(tp_error_msg(TPE_NO_CORE_SERVER), '无法播放。'); + return; + } + + if(!$assist.check()) + return; + $assist.do_rdp_replay( - { - rid: record_id - // , web: $tp.web_server // + '/audit/get_rdp_record/' + record_id // 'http://' + ip + ':' + port + '/log/replay/rdp/' + record_id; - // , sid: Cookies.get('_sid') - , user: user_username - , acc: acc_username - , host: host_ip - , start: time_begin//tp_format_datetime(time_begin, 'yyyyMMdd-HHmmss') - } + record_id , function () { // func_success } , function (code, message) { - if (code === TPE_NO_ASSIST) + if (code === TPE_NO_ASSIST) { + $assist.errcode = TPE_NO_ASSIST; $assist.alert_assist_not_found(); + } else $tp.notify_error('播放RDP操作录像失败:' + tp_error_msg(code, message)); } ); }; - - -// $app.on_table_host_header_created = function (header) { -// $('#' + header._table_ctrl.dom_id + ' a[data-reset-filter]').click(function () { -// CALLBACK_STACK.create() -// .add(header._table_ctrl.load_data) -// .add(header._table_ctrl.reset_filters) -// .exec(); -// }); -// -// // 表格内嵌过滤器的事件绑定在这时进行(也可以延期到整个表格创建完成时进行) -// header._table_ctrl.get_filter_ctrl('search').on_created(); -// }; - -// $app.get_selected_record = function (tbl) { -// var records = []; -// var _objs = $('#' + $app.table_record.dom_id + ' tbody tr td input[data-check-box]'); -// $.each(_objs, function (i, _obj) { -// if ($(_obj).is(':checked')) { -// var _row_data = tbl.get_row(_obj); -// records.push(_row_data.id); -// } -// }); -// return records; -// }; - -// $app.on_btn_remove_record_click = function () { -// var records = $app.get_selected_record($app.table_record); -// if (records.length === 0) { -// $tp.notify_error('请选择要删除的会话记录!'); -// return; -// } -// -// var _fn_sure = function (cb_stack, cb_args) { -// $tp.ajax_post_json('/user/remove-user', {users: users}, -// function (ret) { -// if (ret.code === TPE_OK) { -// cb_stack.add($app.check_host_all_selected); -// cb_stack.add($app.table_record.load_data); -// $tp.notify_success('删除用户账号操作成功!'); -// } else { -// $tp.notify_error('删除用户账号操作失败:' + tp_error_msg(ret.code, ret.message)); -// } -// -// cb_stack.exec(); -// }, -// function () { -// $tp.notify_error('网络故障,删除用户账号操作失败!'); -// cb_stack.exec(); -// } -// ); -// }; -// -// var cb_stack = CALLBACK_STACK.create(); -// $tp.dlg_confirm(cb_stack, { -// msg: '

注意:删除操作不可恢复!!

删除用户账号将同时将其从所在用户组中移除,并且删除所有分配给此用户的授权!

如果您希望禁止某个用户登录本系统,可对其进行“禁用”操作!

您确定要移除所有选定的 ' + user_list.length + '个 用户账号吗?

', -// fn_yes: _fn_sure -// }); -// }; diff --git a/server/www/teleport/static/js/audit/replay-ssh.js b/server/www/teleport/static/js/audit/replay-ssh.js index 0e82d68..a7f5104 100644 --- a/server/www/teleport/static/js/audit/replay-ssh.js +++ b/server/www/teleport/static/js/audit/replay-ssh.js @@ -56,7 +56,7 @@ $app.on_init = function (cb_stack) { $app.record_hdr.height = 24; console.log('header', $app.record_hdr); - $('#recorder-info').html(tp_format_datetime($app.record_hdr.start) + ': ' + $app.record_hdr.user_name + '@' + $app.record_hdr.client_ip + ' 访问 ' + $app.record_hdr.account + '@' + $app.record_hdr.conn_ip + ':' + $app.record_hdr.conn_port); + $('#recorder-info').html(tp_format_datetime($app.record_hdr.start) + ',用户' + $app.record_hdr.user_name + '(来自' + $app.record_hdr.client_ip + ') 访问远程主机 ' + $app.record_hdr.account + '@' + $app.record_hdr.conn_ip + ':' + $app.record_hdr.conn_port); $app.req_record_data(record_id, 0); @@ -163,12 +163,13 @@ $app.on_init = function (cb_stack) { $app.req_record_data = function (record_id, offset) { $tp.ajax_post_json('/audit/get-record-data', {protocol: TP_PROTOCOL_TYPE_SSH, id: record_id, offset: offset}, function (ret) { - if (ret.code === TPE_OK) { - // console.log('data', ret.data); + if (ret.code === TPE_OK || ret.code === TPE_NO_MORE_DATA) { + console.log('data', ret.data); $app.record_data = $app.record_data.concat(ret.data.data_list); $app.record_data_offset += ret.data.data_size; - if ($app.record_data.length < $app.record_hdr.pkg_count) { + //if ($app.record_data.length < $app.record_hdr.pkg_count) { + if(ret.code === TPE_OK) { $app.req_record_data(record_id, $app.record_data_offset); } } else { @@ -252,7 +253,8 @@ $app.do_play = function() { $app.player_console_term.write(tp_base64_decode(play_data.d)); } - if (($app.played_pkg_count + 1) === $app.record_hdr.pkg_count) { + //if (($app.played_pkg_count + 1) === $app.record_hdr.pkg_count) { + if (($app.played_pkg_count + 1) === $app.record_data.length) { $app.dom.progress.val(100); $app.dom.status.text('播放完成'); $app.dom.time.text(parseInt($app.record_hdr.time_used / 1000) + '秒'); @@ -287,7 +289,8 @@ $app.do_play = function() { $app.dom.time.text(temp + '/' + parseInt($app.record_hdr.time_used / 1000) + '秒'); // if all packages played - if ($app.played_pkg_count >= $app.record_hdr.pkg_count) { + // if ($app.played_pkg_count >= $app.record_hdr.pkg_count) { + if ($app.played_pkg_count >= $app.record_data.length) { $app.dom.progress.val(100); $app.dom.status.text('播放完成'); $app.dom.time.text(parseInt($app.record_hdr.time_used / 1000) + '秒'); diff --git a/server/www/teleport/static/js/tp-assist.js b/server/www/teleport/static/js/tp-assist.js index 65fb055..2569ec2 100644 --- a/server/www/teleport/static/js/tp-assist.js +++ b/server/www/teleport/static/js/tp-assist.js @@ -66,16 +66,29 @@ $assist.init = function (cb_stack) { cb_stack.exec(); }; +$assist.check = function() { + if (!$assist.running) { + $assist.errcode = TPE_NO_ASSIST; + $assist.alert_assist_not_found(); + return false; + } else if (!$assist._version_compare()) { + $assist.errcode = TPE_OLD_ASSIST; + $assist.alert_assist_not_found(); + return false; + } + return true; +}; + + $assist.alert_assist_not_found = function () { - console.log($assist.errcode); if($assist.errcode === TPE_NO_ASSIST) { $assist.dom.msg_box_title.html('未检测到TELEPORT助手'); $assist.dom.msg_box_info.html('需要TELEPORT助手来辅助远程连接,请确认本机运行了TELEPORT助手!'); - $assist.dom.msg_box_desc.html('如果您尚未运行TELEPORT助手,请 下载最新版TELEPORT助手安装包 并安装。一旦运行了TELEPORT助手,即可重新进行远程连接。'); + $assist.dom.msg_box_desc.html('如果您尚未运行TELEPORT助手,请 下载最新版TELEPORT助手安装包 并安装。一旦运行了TELEPORT助手,即可刷新页面,重新进行远程连接。'); } else if($assist.errcode === TPE_OLD_ASSIST) { $assist.dom.msg_box_title.html('TELEPORT助手需要升级'); $assist.dom.msg_box_info.html('检测到TELEPORT助手版本 v'+ $assist.version +',但需要最低版本 v'+ $assist.ver_require+'。'); - $assist.dom.msg_box_desc.html('请 下载最新版TELEPORT助手安装包 并安装。一旦升级了TELEPORT助手,即可重新进行远程连接。'); + $assist.dom.msg_box_desc.html('请 下载最新版TELEPORT助手安装包 并安装。一旦升级了TELEPORT助手,即可刷新页面,重新进行远程连接。'); } $('#dialog-need-assist').modal(); @@ -134,13 +147,8 @@ $assist._make_message_box = function () { }; $assist.do_teleport = function (args, func_success, func_error) { - if(!$assist.running) { - $assist.errcode = TPE_NO_ASSIST; - func_error(TPE_NO_ASSIST, ''); - return; - } else if(!$assist._version_compare()) { - $assist.errcode = TPE_OLD_ASSIST; - func_error(TPE_NO_ASSIST, ''); + if(!$app.options.url_proto){ + if(!$assist.check()) return; } @@ -226,17 +234,11 @@ $assist.do_teleport = function (args, func_success, func_error) { }); }; -$assist.do_rdp_replay = function (args, func_success, func_error) { - // ================================================== - // args is dict with fields shown below: - // rid: (int) - record-id in database. - // user: (string) - who did the RDP connection. - // acc: (string) - account to login to remote RDP server. - // host: (string) - IP of the remote RDP server. - // start: (string) - when start the RDP connection, should be a UTC timestamp. - // ================================================== +$assist.do_rdp_replay = function (rid, func_success, func_error) { + // rid: (int) - record-id in database. - // now fix the args. + // now make the args. + var args = {rid: rid}; args.web = $tp.web_server; // (string) - teleport server base address, like "http://127.0.0.1:7190", without end-slash. args.sid = Cookies.get('_sid'); // (string) - current login user's session-id. args.start = tp_format_datetime(args.start, 'yyyyMMdd-HHmmss'); // (string) - convert UTC timestamp to local human-readable string. @@ -264,34 +266,3 @@ $assist.do_rdp_replay = function (args, func_success, func_error) { } }); }; - -/* - -var version_compare = function () { - var cur_version = parseInt(g_current_version.split(".")[2]); - var req_version = parseInt(g_req_version.split(".")[2]); - return cur_version >= req_version; -}; - -var start_rdp_replay = function (args, func_success, func_error) { - var args_ = encodeURIComponent(JSON.stringify(args)); - $.ajax({ - type: 'GET', - timeout: 6000, - url: $assist.api_url + '/rdp_play/' + args_, - jsonp: 'callback', - dataType: 'json', - success: function (ret) { - if (ret.code === TPE_OK) { - error_process(ret, func_success, func_error); - } else { - func_error(ret.code, '查看录像失败!'); - } - console.log('ret', ret); - }, - error: function () { - func_error(TPE_NETWORK, '与助手的络通讯失败!'); - } - }); -}; -*/ diff --git a/server/www/teleport/static/js/tp-const.js b/server/www/teleport/static/js/tp-const.js index d0e90c0..6bf9e10 100755 --- a/server/www/teleport/static/js/tp-const.js +++ b/server/www/teleport/static/js/tp-const.js @@ -195,6 +195,8 @@ var TPE_PRIVILEGE = 3; var TPE_NOT_IMPLEMENT = 7; // 尚未实现 var TPE_EXISTS = 8; var TPE_NOT_EXISTS = 9; +var TPE_NO_MORE_DATA = 10; // 没有更多的数据了(不一定是错误) +var TPE_INCOMPATIBLE_VERSION = 11; // 版本不兼容 // 100~299是通用错误值 diff --git a/server/www/teleport/static/js/user/user-list.js b/server/www/teleport/static/js/user/user-list.js index cd8d6ad..2208c9e 100755 --- a/server/www/teleport/static/js/user/user-list.js +++ b/server/www/teleport/static/js/user/user-list.js @@ -744,6 +744,8 @@ $app.create_dlg_edit_user = function () { dlg.field_mobile = ''; dlg.field_qq = ''; dlg.field_wechat = ''; + dlg.field_vaild_from = ''; + dlg.field_vaild_to = ''; dlg.field_desc = ''; dlg.dom = { @@ -756,6 +758,8 @@ $app.create_dlg_edit_user = function () { , edit_mobile: $('#edit-user-mobile') , edit_qq: $('#edit-user-qq') , edit_wechat: $('#edit-user-wechat') + , edit_valid_from: $('#edit-user-valid-from') + , edit_valid_to: $('#edit-user-valid-to') , edit_desc: $('#edit-user-desc') , msg: $('#edit-user-message') , btn_save: $('#btn-edit-user-save') @@ -778,6 +782,8 @@ $app.create_dlg_edit_user = function () { _ret.push('
  • ' + role.name + '
  • '); }); _ret.push(''); + dlg.dom.edit_valid_from.datetimepicker({format: "yyyy-mm-dd h:ii", autoclose: 1, todayHighlight: 1}); + dlg.dom.edit_valid_to.datetimepicker({format: "yyyy-mm-dd h:ii", autoclose: 1, todayHighlight: 1}); dlg.dom.select_role.after($(_ret.join(''))); dlg.dom.selected_role = $('#' + dlg.dom_id + ' span[data-selected-role]'); @@ -869,7 +875,7 @@ $app.create_dlg_edit_user = function () { var role_name = '选择角色'; dlg.field_role = -1; dlg.field_auth_type = 0; - + // dlg.dom.btn_auth_use_sys_config.removeClass('tp-selected'); // dlg.dom.btn_auth_username_password.removeClass('tp-selected'); // dlg.dom.btn_auth_username_password_captcha.removeClass('tp-selected'); @@ -887,6 +893,8 @@ $app.create_dlg_edit_user = function () { dlg.dom.edit_qq.val(''); dlg.dom.edit_wechat.val(''); dlg.dom.edit_desc.val(''); + dlg.dom.edit_valid_from.find('input').val(''); + dlg.dom.edit_valid_to.find('input').val(''); } else { dlg.field_id = user.id; dlg.field_auth_type = user.auth_type; @@ -905,6 +913,16 @@ $app.create_dlg_edit_user = function () { dlg.dom.edit_qq.val(user.qq); dlg.dom.edit_wechat.val(user.wechat); dlg.dom.edit_desc.val(user.desc); + if (user.valid_from == 0 ) { + dlg.dom.edit_valid_from.find('input').val(''); + }else{ + dlg.dom.edit_valid_from.find('input').val(tp_format_datetime(tp_utc2local(user.valid_from), 'yyyy-MM-dd HH:mm')); + } + if (user.valid_to == 0 ) { + dlg.dom.edit_valid_to.find('input').val(''); + }else{ + dlg.dom.edit_valid_to.find('input').val(tp_format_datetime(tp_utc2local(user.valid_to), 'yyyy-MM-dd HH:mm')); + } } dlg.dom.selected_role.text(role_name); @@ -943,6 +961,8 @@ $app.create_dlg_edit_user = function () { dlg.field_mobile = dlg.dom.edit_mobile.val(); dlg.field_qq = dlg.dom.edit_qq.val(); dlg.field_wechat = dlg.dom.edit_wechat.val(); + dlg.field_valid_from = dlg.dom.edit_valid_from.find('input').val(); + dlg.field_valid_to = dlg.dom.edit_valid_to.find('input').val(); dlg.field_desc = dlg.dom.edit_desc.val(); if (dlg.field_role === -1) { @@ -1002,6 +1022,8 @@ $app.create_dlg_edit_user = function () { , mobile: dlg.field_mobile , qq: dlg.field_qq , wechat: dlg.field_wechat + , valid_from: dlg.field_valid_from + , valid_to: dlg.field_valid_to , desc: dlg.field_desc }, function (ret) { diff --git a/server/www/teleport/view/user/user-list.mako b/server/www/teleport/view/user/user-list.mako index 8a5a02b..40ab30f 100644 --- a/server/www/teleport/view/user/user-list.mako +++ b/server/www/teleport/view/user/user-list.mako @@ -8,6 +8,7 @@ <%block name="extend_js_file"> + <%block name="embed_js"> @@ -230,6 +231,25 @@ + +
    + +
    +
    + + + + +
    +
    + + + + +
    +
    +
    +
    diff --git a/server/www/teleport/webroot/app/app_ver.py b/server/www/teleport/webroot/app/app_ver.py index 1c328c3..ada39de 100644 --- a/server/www/teleport/webroot/app/app_ver.py +++ b/server/www/teleport/webroot/app/app_ver.py @@ -1,3 +1,3 @@ # -*- coding: utf8 -*- -TP_SERVER_VER = "3.3.1" -TP_ASSIST_REQUIRE_VER = "3.3.1" +TP_SERVER_VER = "3.5.1" +TP_ASSIST_REQUIRE_VER = "3.5.1" diff --git a/server/www/teleport/webroot/app/const.py b/server/www/teleport/webroot/app/const.py index 492d26f..3c343a7 100755 --- a/server/www/teleport/webroot/app/const.py +++ b/server/www/teleport/webroot/app/const.py @@ -183,6 +183,8 @@ TPE_PRIVILEGE = 3 TPE_NOT_IMPLEMENT = 7 TPE_EXISTS = 8 TPE_NOT_EXISTS = 9 +TPE_NO_MORE_DATA = 10 +TPE_INCOMPATIBLE_VERSION = 11 TPE_FAILED = 100 TPE_NETWORK = 101 diff --git a/server/www/teleport/webroot/app/controller/audit.py b/server/www/teleport/webroot/app/controller/audit.py index b49466a..529b03a 100644 --- a/server/www/teleport/webroot/app/controller/audit.py +++ b/server/www/teleport/webroot/app/controller/audit.py @@ -411,12 +411,15 @@ class RecordHandler(TPBaseHandler): return if not tp_cfg().core.detected: + core_running = False total_size = 0 free_size = 0 else: + core_running = True total_size, free_size = get_free_space_bytes(tp_cfg().core.replay_path) param = { + 'core_running': core_running, 'total_size': total_size, 'free_size': free_size, } @@ -659,25 +662,12 @@ class DoGetFileHandler(TPBaseHandler): require_privilege = TP_PRIVILEGE_OPS_AUZ | TP_PRIVILEGE_AUDIT_AUZ | TP_PRIVILEGE_AUDIT - # sid = self.get_argument('sid', None) - # if sid is None: - # self.set_status(403) - # return self.write('need login first.') - # - # self._s_id = sid - # _user = self.get_session('user') - # if _user is None: - # self.set_status(403) - # return self.write('need login first.') - # self._user = _user - - # when test, disable auth. - # if not self._user['_is_login']: - # self.set_status(401) # 401=未授权, 要求身份验证 - # return self.write('need login first.') - # if (self._user['privilege'] & require_privilege) == 0: - # self.set_status(403) # 403=禁止 - # return self.write('you have no such privilege.') + if not self._user['_is_login']: + self.set_status(401) # 401=未授权, 要求身份验证 + return self.write('need login first.') + if (self._user['privilege'] & require_privilege) == 0: + self.set_status(403) # 403=禁止 + return self.write('you have no such privilege.') act = self.get_argument('act', None) _type = self.get_argument('type', None) diff --git a/server/www/teleport/webroot/app/controller/user.py b/server/www/teleport/webroot/app/controller/user.py index 6200873..d232529 100755 --- a/server/www/teleport/webroot/app/controller/user.py +++ b/server/www/teleport/webroot/app/controller/user.py @@ -11,7 +11,7 @@ from app.base.configs import tp_cfg from app.base.controller import TPBaseHandler, TPBaseJsonHandler from app.base.logger import * from app.base.session import tp_session -from app.base.utils import tp_check_strong_password, tp_gen_password +from app.base.utils import tp_check_strong_password, tp_gen_password, tp_timestamp_from_str from app.logic.auth.oath import tp_oath_verify_code from app.const import * from app.logic.auth.oath import tp_oath_generate_secret, tp_oath_generate_qrcode @@ -588,13 +588,22 @@ class DoUpdateUserHandler(TPBaseJsonHandler): args['mobile'] = args['mobile'].strip() args['qq'] = args['qq'].strip() args['wechat'] = args['wechat'].strip() + + if args['valid_from'] == '': + args['valid_from'] = 0 + else: + args['valid_from'] = tp_timestamp_from_str(args['valid_from'].strip(), '%Y-%m-%d %H:%M') + if args['valid_to'] == '': + args['valid_to'] = 0 + else: + args['valid_to'] = tp_timestamp_from_str(args['valid_to'].strip(), '%Y-%m-%d %H:%M') args['desc'] = args['desc'].strip() except: return self.write_json(TPE_PARAM) if len(args['username']) == 0: return self.write_json(TPE_PARAM) - + if args['id'] == -1: args['password'] = tp_gen_password(8) err, _ = user.create_user(self, args) diff --git a/server/www/teleport/webroot/app/model/account.py b/server/www/teleport/webroot/app/model/account.py index 9e2240b..0004953 100644 --- a/server/www/teleport/webroot/app/model/account.py +++ b/server/www/teleport/webroot/app/model/account.py @@ -166,6 +166,7 @@ def get_accounts(sql_filter, sql_order, sql_limit, sql_restrict, sql_exclude): s = SQL(db) # s.select_from('acc', ['id', 'host_id', 'host_ip', 'router_ip', 'router_port', 'username', 'protocol_type', 'auth_type', 'state'], alt_name='a') s.select_from('acc', ['id', 'host_id', 'username', 'protocol_type', 'auth_type', 'state', 'username_prompt', 'password_prompt'], alt_name='a') + s.left_join('host', ['name', 'desc'], join_on='h.id=a.host_id', alt_name='h', out_map={'name': 'host_name'}) str_where = '' _where = list() @@ -189,7 +190,7 @@ def get_accounts(sql_filter, sql_order, sql_limit, sql_restrict, sql_exclude): if len(sql_filter) > 0: for k in sql_filter: if k == 'search': - _where.append('(a.username LIKE "%{filter}%" OR a.host_ip LIKE "%{filter}%" OR a.router_ip LIKE "%{filter}%")'.format(filter=sql_filter[k])) + _where.append('(a.username LIKE "%{filter}%" OR a.host_ip LIKE "%{filter}%" OR a.router_ip LIKE "%{filter}%" OR h.name LIKE "%{filter}%" OR h.desc LIKE "%{filter}%")'.format(filter=sql_filter[k])) # _where.append('(a.username LIKE "%{filter}%")'.format(filter=sql_filter[k])) if len(_where) > 0: diff --git a/server/www/teleport/webroot/app/model/record.py b/server/www/teleport/webroot/app/model/record.py index 87a6fa0..29d4b07 100644 --- a/server/www/teleport/webroot/app/model/record.py +++ b/server/www/teleport/webroot/app/model/record.py @@ -149,30 +149,23 @@ def read_record_head(protocol_type, record_id): data = file.read() offset = 0 - magic, = struct.unpack_from('I', data, offset) # magic must be 1381126228, 'TPPR' - offset += 4 - ver, = struct.unpack_from('H', data, offset) - offset += 2 - pkg_count, = struct.unpack_from('I', data, offset) - offset += 4 - time_used, = struct.unpack_from('I', data, offset) - offset += 4 + # 读取 `TPPR` 标记(1380995156) 和录像文件版本、录像类型 + magic, ver, = struct.unpack_from('=IH', data, offset) + offset += 6 + if magic != 1380995156: + return None, TPE_DATA + if ver != 4: # 从v3.5.0开始录像文件版本为版本4 + return None, TPE_INCOMPATIBLE_VERSION - protocol_type, = struct.unpack_from('H', data, offset) - offset += 2 - protocol_sub_type, = struct.unpack_from('H', data, offset) - offset += 2 - time_start, = struct.unpack_from('Q', data, offset) - offset += 8 - width, = struct.unpack_from('H', data, offset) - offset += 2 - height, = struct.unpack_from('H', data, offset) - offset += 2 + rec_type, time_used, dat_file_count, = struct.unpack_from('=HII', data, offset) + offset += 10 - # file_count, = struct.unpack_from('H', data, offset) - # offset += 2 - # total_size, = struct.unpack_from('I', data, offset) - # offset += 4 + # TS_RECORD_HEADER_INFO 共计64字节,前面有用的数据读取后,跳过后面补齐用的字节,从第64字节 + # 开始解析 TS_RECORD_HEADER_BASIC + offset = 64 + + protocol_type, protocol_sub_type, time_start, width, height = struct.unpack_from('=HHQHH', data, offset) + offset += 16 user_name, = struct.unpack_from('64s', data, offset) user_name = _remove_padding_space(user_name).decode() @@ -202,7 +195,7 @@ def read_record_head(protocol_type, record_id): header = dict() header['start'] = time_start - header['pkg_count'] = pkg_count + # header['pkg_count'] = pkg_count header['time_used'] = time_used header['width'] = width header['height'] = height @@ -212,6 +205,7 @@ def read_record_head(protocol_type, record_id): header['conn_ip'] = conn_ip header['conn_port'] = conn_port header['client_ip'] = client_ip + log.d('header:', header, '\n') return header, TPE_OK @@ -305,10 +299,11 @@ def read_ssh_record_data(record_id, offset): data_list = list() data_size = 0 file = None + err = TPE_OK try: file_size = os.path.getsize(file_data) if offset >= file_size: - return None, 0, TPE_FAILED + return None, 0, TPE_NO_MORE_DATA file = open(file_data, 'rb') if offset > 0: @@ -356,6 +351,7 @@ def read_ssh_record_data(record_id, offset): data_list.append(temp) if offset + data_size == file_size: + err = TPE_NO_MORE_DATA break except Exception: @@ -365,7 +361,7 @@ def read_ssh_record_data(record_id, offset): if file is not None: file.close() - return data_list, data_size, TPE_OK + return data_list, data_size, err def read_telnet_record_data(record_id, offset): diff --git a/server/www/teleport/webroot/app/model/user.py b/server/www/teleport/webroot/app/model/user.py index 3581c85..726d366 100755 --- a/server/www/teleport/webroot/app/model/user.py +++ b/server/www/teleport/webroot/app/model/user.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- - +import time,datetime from app.base.configs import tp_cfg from app.base.db import get_db, SQL from app.base.logger import log @@ -38,7 +38,7 @@ def get_by_username(username): s.select_from('user', ['id', 'type', 'auth_type', 'username', 'surname', 'ldap_dn', 'password', 'oath_secret', 'role_id', 'state', 'fail_count', 'lock_time', 'email', 'create_time', 'last_login', 'last_ip', 'last_chpass', - 'mobile', 'qq', 'wechat', 'desc'], alt_name='u') + 'mobile', 'qq', 'wechat', 'valid_from', 'valid_to', 'desc'], alt_name='u') s.left_join('role', ['name', 'privilege'], join_on='r.id=u.role_id', alt_name='r', out_map={'name': 'role'}) s.where('u.username="{}"'.format(username)) err = s.query() @@ -57,7 +57,9 @@ def get_by_username(username): def login(handler, username, password=None, oath_code=None, check_bind_oath=False): sys_cfg = tp_cfg().sys msg = '' - + current_unix_time = int(time.mktime(datetime.datetime.now().timetuple())) +# log.e('current:',current_unix_time,'validfrom:', user_info['valid_from']) + err, user_info = get_by_username(username) if err != TPE_OK: return err, None, msg @@ -88,6 +90,10 @@ def login(handler, username, password=None, oath_code=None, check_bind_oath=Fals msg = '登录失败,用户状态异常' syslog.sys_log(user_info, handler.request.remote_ip, TPE_FAILED, msg) return TPE_FAILED, None, msg + elif current_unix_time < user_info['valid_from'] or (current_unix_time > user_info['valid_to'] and user_info['valid_to'] != 0): + msg = '登录失败,用户已过期' + syslog.sys_log(user_info, handler.request.remote_ip, TPE_FAILED, msg) + return TPE_FAILED, None, msg err_msg = '' if password is not None: @@ -172,7 +178,7 @@ def login(handler, username, password=None, oath_code=None, check_bind_oath=Fals def get_users(sql_filter, sql_order, sql_limit, sql_restrict, sql_exclude): dbtp = get_db().table_prefix s = SQL(get_db()) - s.select_from('user', ['id', 'type', 'auth_type', 'username', 'surname', 'role_id', 'state', 'email', 'last_login'], + s.select_from('user', ['id', 'type', 'auth_type', 'username', 'surname', 'role_id', 'state', 'email', 'last_login', 'valid_from', 'valid_to'], alt_name='u') s.left_join('role', ['name', 'privilege'], join_on='r.id=u.role_id', alt_name='r', out_map={'name': 'role'}) @@ -353,14 +359,15 @@ def create_user(handler, user): sql = 'INSERT INTO `{}user` (' \ '`role_id`, `username`, `surname`, `type`, `ldap_dn`, `auth_type`, `password`, `state`, ' \ - '`email`, `creator_id`, `create_time`, `last_login`, `last_chpass`, `desc`' \ + '`email`, `creator_id`, `create_time`, `last_login`, `last_chpass`, `valid_from`, `valid_to`, `desc`' \ ') VALUES (' \ '{role}, "{username}", "{surname}", {user_type}, "{ldap_dn}", {auth_type}, "{password}", {state}, ' \ - '"{email}", {creator_id}, {create_time}, {last_login}, {last_chpass}, "{desc}");' \ + '"{email}", {creator_id}, {create_time}, {last_login}, {last_chpass}, {valid_from}, '\ + '{valid_to}, "{desc}");' \ ''.format(db.table_prefix, role=user['role'], username=user['username'], surname=user['surname'], user_type=user['type'], ldap_dn=user['ldap_dn'], auth_type=user['auth_type'], password=_password, state=TP_STATE_NORMAL, email=user['email'], creator_id=operator['id'], create_time=_time_now, - last_login=0, last_chpass=_time_now, desc=user['desc']) + last_login=0, last_chpass=_time_now, valid_from=user['valid_from'], valid_to=user['valid_to'], desc=user['desc']) db_ret = db.exec(sql) if not db_ret: return TPE_DATABASE, 0 @@ -400,12 +407,12 @@ def update_user(handler, args): sql = 'UPDATE `{}user` SET ' \ '`username`="{username}", `surname`="{surname}", `auth_type`={auth_type}, ' \ '`role_id`={role}, `email`="{email}", `mobile`="{mobile}", `qq`="{qq}", ' \ - '`wechat`="{wechat}", `desc`="{desc}" WHERE `id`={user_id};' \ + '`wechat`="{wechat}", `valid_from`={valid_from}, `valid_to`={valid_to}, '\ + '`desc`="{desc}" WHERE `id`={user_id};' \ ''.format(db.table_prefix, username=args['username'], surname=args['surname'], auth_type=args['auth_type'], role=args['role'], - email=args['email'], - mobile=args['mobile'], qq=args['qq'], wechat=args['wechat'], desc=args['desc'], - user_id=args['id'] + email=args['email'], mobile=args['mobile'], qq=args['qq'], wechat=args['wechat'], + valid_from=args['valid_from'], valid_to=args['valid_to'], desc=args['desc'], user_id=args['id'] ) db_ret = db.exec(sql) if not db_ret: diff --git a/version.in b/version.in index 8405318..4de5209 100644 --- a/version.in +++ b/version.in @@ -10,8 +10,8 @@ Minor: 次版本号。如果两个程序集的名称和主版本号相同,而 Revision: 修订号。主版本号和次版本号都相同但修订号不同的程序集应是完全可互换的。 这适用于修复以前发布的程序集中的错误或安全漏洞。 -TP_SERVER 3.3.1 # 整个服务端打包的版本 -TP_TPCORE 3.3.0 # 核心服务 tp_core 的版本 +TP_SERVER 3.5.1 # 整个服务端打包的版本 +TP_TPCORE 3.5.0 # 核心服务 tp_core 的版本 TP_TPWEB 3.1.0 # web服务 tp_web 的版本(一般除非升级Python,否则不会变化) -TP_ASSIST 3.3.1 # 助手版本 -TP_ASSIST_REQUIRE 3.3.1 # 适配的助手最低版本 +TP_ASSIST 3.5.1 # 助手版本 +TP_ASSIST_REQUIRE 3.5.1 # 适配的助手最低版本