From ca7d6ad5171bce807cf9b64d76508222fe5d6c5b Mon Sep 17 00:00:00 2001 From: Apex Liu Date: Tue, 12 Dec 2017 05:23:32 +0800 Subject: [PATCH] =?UTF-8?q?=E5=8A=A0=E5=85=A5psutil=E8=8E=B7=E5=8F=96?= =?UTF-8?q?=E7=B3=BB=E7=BB=9F=E8=B4=9F=E8=BD=BD=E4=BF=A1=E6=81=AF=EF=BC=8C?= =?UTF-8?q?=E5=8A=A0=E5=85=A5websocket=E6=94=AF=E6=8C=81=EF=BC=8C=E5=9C=A8?= =?UTF-8?q?dashboard=E9=A1=B5=E9=9D=A2=E6=98=BE=E7=A4=BA=E7=B3=BB=E7=BB=9F?= =?UTF-8?q?=E8=B4=9F=E8=BD=BD=E3=80=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../packages-windows/x86/psutil/__init__.py | 2356 +++++++++++++++++ .../packages-windows/x86/psutil/_common.py | 575 ++++ .../packages-windows/x86/psutil/_compat.py | 249 ++ .../x86/psutil/_exceptions.py | 94 + .../packages-windows/x86/psutil/_psaix.py | 573 ++++ .../packages-windows/x86/psutil/_psbsd.py | 873 ++++++ .../packages-windows/x86/psutil/_pslinux.py | 2002 ++++++++++++++ .../packages-windows/x86/psutil/_psosx.py | 572 ++++ .../packages-windows/x86/psutil/_psposix.py | 182 ++ .../packages-windows/x86/psutil/_pssunos.py | 725 +++++ .../x86/psutil/_psutil_windows.pyd | Bin 0 -> 48640 bytes .../packages-windows/x86/psutil/_pswindows.py | 993 +++++++ .../x86/psutil/tests/__init__.py | 1198 +++++++++ .../x86/psutil/tests/__main__.py | 96 + .../x86/psutil/tests/test_aix.py | 121 + .../x86/psutil/tests/test_bsd.py | 489 ++++ .../x86/psutil/tests/test_connections.py | 525 ++++ .../x86/psutil/tests/test_contracts.py | 651 +++++ .../x86/psutil/tests/test_linux.py | 1911 +++++++++++++ .../x86/psutil/tests/test_memory_leaks.py | 599 +++++ .../x86/psutil/tests/test_misc.py | 1039 ++++++++ .../x86/psutil/tests/test_osx.py | 303 +++ .../x86/psutil/tests/test_posix.py | 419 +++ .../x86/psutil/tests/test_process.py | 1548 +++++++++++ .../x86/psutil/tests/test_sunos.py | 45 + .../x86/psutil/tests/test_system.py | 862 ++++++ .../x86/psutil/tests/test_unicode.py | 367 +++ .../x86/psutil/tests/test_windows.py | 838 ++++++ server/www/teleport/app_bootstrap.py | 4 +- .../teleport/static/js/dashboard/dashboard.js | 161 +- server/www/teleport/view/dashboard/index.mako | 5 +- server/www/teleport/webroot/app/app_env.py | 2 +- .../www/teleport/webroot/app/base/configs.py | 4 +- .../teleport/webroot/app/base/controller.py | 8 +- .../teleport/webroot/app/base/core_server.py | 4 +- server/www/teleport/webroot/app/base/db.py | 6 +- server/www/teleport/webroot/app/base/mail.py | 6 +- .../www/teleport/webroot/app/base/session.py | 16 +- .../www/teleport/webroot/app/base/status.py | 120 + .../www/teleport/webroot/app/base/webapp.py | 33 +- .../webroot/app/controller/__init__.py | 9 +- .../webroot/app/controller/account.py | 2 +- .../teleport/webroot/app/controller/audit.py | 12 +- .../teleport/webroot/app/controller/auth.py | 8 +- .../teleport/webroot/app/controller/host.py | 4 +- .../webroot/app/controller/maintenance.py | 4 +- .../teleport/webroot/app/controller/ops.py | 12 +- .../teleport/webroot/app/controller/rpc.py | 10 +- .../teleport/webroot/app/controller/system.py | 48 +- .../teleport/webroot/app/controller/user.py | 24 +- .../www/teleport/webroot/app/controller/ws.py | 121 + .../webroot/app/logic/auth/captcha.py | 6 +- .../www/teleport/webroot/app/model/record.py | 18 +- server/www/teleport/webroot/app/model/user.py | 6 +- 54 files changed, 20741 insertions(+), 117 deletions(-) create mode 100644 server/www/packages/packages-windows/x86/psutil/__init__.py create mode 100644 server/www/packages/packages-windows/x86/psutil/_common.py create mode 100644 server/www/packages/packages-windows/x86/psutil/_compat.py create mode 100644 server/www/packages/packages-windows/x86/psutil/_exceptions.py create mode 100644 server/www/packages/packages-windows/x86/psutil/_psaix.py create mode 100644 server/www/packages/packages-windows/x86/psutil/_psbsd.py create mode 100644 server/www/packages/packages-windows/x86/psutil/_pslinux.py create mode 100644 server/www/packages/packages-windows/x86/psutil/_psosx.py create mode 100644 server/www/packages/packages-windows/x86/psutil/_psposix.py create mode 100644 server/www/packages/packages-windows/x86/psutil/_pssunos.py create mode 100644 server/www/packages/packages-windows/x86/psutil/_psutil_windows.pyd create mode 100644 server/www/packages/packages-windows/x86/psutil/_pswindows.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/__init__.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/__main__.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/test_aix.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/test_bsd.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/test_connections.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/test_contracts.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/test_linux.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/test_memory_leaks.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/test_misc.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/test_osx.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/test_posix.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/test_process.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/test_sunos.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/test_system.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/test_unicode.py create mode 100644 server/www/packages/packages-windows/x86/psutil/tests/test_windows.py create mode 100644 server/www/teleport/webroot/app/base/status.py create mode 100644 server/www/teleport/webroot/app/controller/ws.py diff --git a/server/www/packages/packages-windows/x86/psutil/__init__.py b/server/www/packages/packages-windows/x86/psutil/__init__.py new file mode 100644 index 0000000..d6a66de --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/__init__.py @@ -0,0 +1,2356 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""psutil is a cross-platform library for retrieving information on +running processes and system utilization (CPU, memory, disks, network, +sensors) in Python. Supported platforms: + + - Linux + - Windows + - OSX + - FreeBSD + - OpenBSD + - NetBSD + - Sun Solaris + - AIX + +Works with Python versions from 2.6 to 3.X. +""" + +from __future__ import division + +import collections +import contextlib +import datetime +import errno +import functools +import os +import signal +import subprocess +import sys +import time +import traceback +try: + import pwd +except ImportError: + pwd = None + +from . import _common +from ._common import deprecated_method +from ._common import memoize +from ._common import memoize_when_activated +from ._common import wrap_numbers as _wrap_numbers +from ._compat import callable +from ._compat import long +from ._compat import PY3 as _PY3 + +from ._common import STATUS_DEAD +from ._common import STATUS_DISK_SLEEP +from ._common import STATUS_IDLE # bsd +from ._common import STATUS_LOCKED +from ._common import STATUS_RUNNING +from ._common import STATUS_SLEEPING +from ._common import STATUS_STOPPED +from ._common import STATUS_TRACING_STOP +from ._common import STATUS_WAITING # bsd +from ._common import STATUS_WAKING +from ._common import STATUS_ZOMBIE + +from ._common import CONN_CLOSE +from ._common import CONN_CLOSE_WAIT +from ._common import CONN_CLOSING +from ._common import CONN_ESTABLISHED +from ._common import CONN_FIN_WAIT1 +from ._common import CONN_FIN_WAIT2 +from ._common import CONN_LAST_ACK +from ._common import CONN_LISTEN +from ._common import CONN_NONE +from ._common import CONN_SYN_RECV +from ._common import CONN_SYN_SENT +from ._common import CONN_TIME_WAIT +from ._common import NIC_DUPLEX_FULL +from ._common import NIC_DUPLEX_HALF +from ._common import NIC_DUPLEX_UNKNOWN + +from ._common import AIX +from ._common import BSD +from ._common import FREEBSD # NOQA +from ._common import LINUX +from ._common import NETBSD # NOQA +from ._common import OPENBSD # NOQA +from ._common import OSX +from ._common import POSIX # NOQA +from ._common import SUNOS +from ._common import WINDOWS + +from ._exceptions import AccessDenied +from ._exceptions import Error +from ._exceptions import NoSuchProcess +from ._exceptions import TimeoutExpired +from ._exceptions import ZombieProcess + +if LINUX: + # This is public API and it will be retrieved from _pslinux.py + # via sys.modules. + PROCFS_PATH = "/proc" + + from . import _pslinux as _psplatform + + from ._pslinux import IOPRIO_CLASS_BE # NOQA + from ._pslinux import IOPRIO_CLASS_IDLE # NOQA + from ._pslinux import IOPRIO_CLASS_NONE # NOQA + from ._pslinux import IOPRIO_CLASS_RT # NOQA + # Linux >= 2.6.36 + if _psplatform.HAS_PRLIMIT: + from ._psutil_linux import RLIM_INFINITY # NOQA + from ._psutil_linux import RLIMIT_AS # NOQA + from ._psutil_linux import RLIMIT_CORE # NOQA + from ._psutil_linux import RLIMIT_CPU # NOQA + from ._psutil_linux import RLIMIT_DATA # NOQA + from ._psutil_linux import RLIMIT_FSIZE # NOQA + from ._psutil_linux import RLIMIT_LOCKS # NOQA + from ._psutil_linux import RLIMIT_MEMLOCK # NOQA + from ._psutil_linux import RLIMIT_NOFILE # NOQA + from ._psutil_linux import RLIMIT_NPROC # NOQA + from ._psutil_linux import RLIMIT_RSS # NOQA + from ._psutil_linux import RLIMIT_STACK # NOQA + # Kinda ugly but considerably faster than using hasattr() and + # setattr() against the module object (we are at import time: + # speed matters). + from . import _psutil_linux + try: + RLIMIT_MSGQUEUE = _psutil_linux.RLIMIT_MSGQUEUE + except AttributeError: + pass + try: + RLIMIT_NICE = _psutil_linux.RLIMIT_NICE + except AttributeError: + pass + try: + RLIMIT_RTPRIO = _psutil_linux.RLIMIT_RTPRIO + except AttributeError: + pass + try: + RLIMIT_RTTIME = _psutil_linux.RLIMIT_RTTIME + except AttributeError: + pass + try: + RLIMIT_SIGPENDING = _psutil_linux.RLIMIT_SIGPENDING + except AttributeError: + pass + +elif WINDOWS: + from . import _pswindows as _psplatform + from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS # NOQA + from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS # NOQA + from ._psutil_windows import HIGH_PRIORITY_CLASS # NOQA + from ._psutil_windows import IDLE_PRIORITY_CLASS # NOQA + from ._psutil_windows import NORMAL_PRIORITY_CLASS # NOQA + from ._psutil_windows import REALTIME_PRIORITY_CLASS # NOQA + from ._pswindows import CONN_DELETE_TCB # NOQA + +elif OSX: + from . import _psosx as _psplatform + +elif BSD: + from . import _psbsd as _psplatform + +elif SUNOS: + from . import _pssunos as _psplatform + from ._pssunos import CONN_BOUND # NOQA + from ._pssunos import CONN_IDLE # NOQA + + # This is public writable API which is read from _pslinux.py and + # _pssunos.py via sys.modules. + PROCFS_PATH = "/proc" + +elif AIX: + from . import _psaix as _psplatform + + # This is public API and it will be retrieved from _pslinux.py + # via sys.modules. + PROCFS_PATH = "/proc" + +else: # pragma: no cover + raise NotImplementedError('platform %s is not supported' % sys.platform) + + +__all__ = [ + # exceptions + "Error", "NoSuchProcess", "ZombieProcess", "AccessDenied", + "TimeoutExpired", + + # constants + "version_info", "__version__", + + "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP", + "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD", + "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED", + + "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1", + "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT", + "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE", + + "AF_LINK", + + "NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN", + + "POWER_TIME_UNKNOWN", "POWER_TIME_UNLIMITED", + + "BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "OSX", "POSIX", "SUNOS", + "WINDOWS", "AIX", + + # classes + "Process", "Popen", + + # functions + "pid_exists", "pids", "process_iter", "wait_procs", # proc + "virtual_memory", "swap_memory", # memory + "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu + "cpu_stats", # "cpu_freq", + "net_io_counters", "net_connections", "net_if_addrs", # network + "net_if_stats", + "disk_io_counters", "disk_partitions", "disk_usage", # disk + # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors + "users", "boot_time", # others +] +__all__.extend(_psplatform.__extra__all__) +__author__ = "Giampaolo Rodola'" +__version__ = "5.4.2" +version_info = tuple([int(num) for num in __version__.split('.')]) +AF_LINK = _psplatform.AF_LINK +POWER_TIME_UNLIMITED = _common.POWER_TIME_UNLIMITED +POWER_TIME_UNKNOWN = _common.POWER_TIME_UNKNOWN +_TOTAL_PHYMEM = None +_timer = getattr(time, 'monotonic', time.time) + + +# Sanity check in case the user messed up with psutil installation +# or did something weird with sys.path. In this case we might end +# up importing a python module using a C extension module which +# was compiled for a different version of psutil. +# We want to prevent that by failing sooner rather than later. +# See: https://github.com/giampaolo/psutil/issues/564 +if (int(__version__.replace('.', '')) != + getattr(_psplatform.cext, 'version', None)): + msg = "version conflict: %r C extension module was built for another " \ + "version of psutil" % getattr(_psplatform.cext, "__file__") + if hasattr(_psplatform.cext, 'version'): + msg += " (%s instead of %s)" % ( + '.'.join([x for x in str(_psplatform.cext.version)]), __version__) + else: + msg += " (different than %s)" % __version__ + msg += "; you may try to 'pip uninstall psutil', manually remove %s" % ( + getattr(_psplatform.cext, "__file__", + "the existing psutil install directory")) + msg += " or clean the virtual env somehow, then reinstall" + raise ImportError(msg) + + +# ===================================================================== +# --- Utils +# ===================================================================== + + +if hasattr(_psplatform, 'ppid_map'): + # Faster version (Windows and Linux). + _ppid_map = _psplatform.ppid_map +else: + def _ppid_map(): + """Return a {pid: ppid, ...} dict for all running processes in + one shot. Used to speed up Process.children(). + """ + ret = {} + for pid in pids(): + try: + proc = _psplatform.Process(pid) + ppid = proc.ppid() + except (NoSuchProcess, AccessDenied): + # Note: AccessDenied is unlikely to happen. + pass + else: + ret[pid] = ppid + return ret + + +def _assert_pid_not_reused(fun): + """Decorator which raises NoSuchProcess in case a process is no + longer running or its PID has been reused. + """ + @functools.wraps(fun) + def wrapper(self, *args, **kwargs): + if not self.is_running(): + raise NoSuchProcess(self.pid, self._name) + return fun(self, *args, **kwargs) + return wrapper + + +def _pprint_secs(secs): + """Format seconds in a human readable form.""" + now = time.time() + secs_ago = int(now - secs) + if secs_ago < 60 * 60 * 24: + fmt = "%H:%M:%S" + else: + fmt = "%Y-%m-%d %H:%M:%S" + return datetime.datetime.fromtimestamp(secs).strftime(fmt) + + +# ===================================================================== +# --- Process class +# ===================================================================== + + +class Process(object): + """Represents an OS process with the given PID. + If PID is omitted current process PID (os.getpid()) is used. + Raise NoSuchProcess if PID does not exist. + + Note that most of the methods of this class do not make sure + the PID of the process being queried has been reused over time. + That means you might end up retrieving an information referring + to another process in case the original one this instance + refers to is gone in the meantime. + + The only exceptions for which process identity is pre-emptively + checked and guaranteed are: + + - parent() + - children() + - nice() (set) + - ionice() (set) + - rlimit() (set) + - cpu_affinity (set) + - suspend() + - resume() + - send_signal() + - terminate() + - kill() + + To prevent this problem for all other methods you can: + - use is_running() before querying the process + - if you're continuously iterating over a set of Process + instances use process_iter() which pre-emptively checks + process identity for every yielded instance + """ + + def __init__(self, pid=None): + self._init(pid) + + def _init(self, pid, _ignore_nsp=False): + if pid is None: + pid = os.getpid() + else: + if not _PY3 and not isinstance(pid, (int, long)): + raise TypeError('pid must be an integer (got %r)' % pid) + if pid < 0: + raise ValueError('pid must be a positive integer (got %s)' + % pid) + self._pid = pid + self._name = None + self._exe = None + self._create_time = None + self._gone = False + self._hash = None + self._oneshot_inctx = False + # used for caching on Windows only (on POSIX ppid may change) + self._ppid = None + # platform-specific modules define an _psplatform.Process + # implementation class + self._proc = _psplatform.Process(pid) + self._last_sys_cpu_times = None + self._last_proc_cpu_times = None + # cache creation time for later use in is_running() method + try: + self.create_time() + except AccessDenied: + # We should never get here as AFAIK we're able to get + # process creation time on all platforms even as a + # limited user. + pass + except ZombieProcess: + # Zombies can still be queried by this class (although + # not always) and pids() return them so just go on. + pass + except NoSuchProcess: + if not _ignore_nsp: + msg = 'no process found with pid %s' % pid + raise NoSuchProcess(pid, None, msg) + else: + self._gone = True + # This pair is supposed to indentify a Process instance + # univocally over time (the PID alone is not enough as + # it might refer to a process whose PID has been reused). + # This will be used later in __eq__() and is_running(). + self._ident = (self.pid, self._create_time) + + def __str__(self): + try: + info = collections.OrderedDict() + except AttributeError: + info = {} # Python 2.6 + info["pid"] = self.pid + try: + info["name"] = self.name() + if self._create_time: + info['started'] = _pprint_secs(self._create_time) + except ZombieProcess: + info["status"] = "zombie" + except NoSuchProcess: + info["status"] = "terminated" + except AccessDenied: + pass + return "%s.%s(%s)" % ( + self.__class__.__module__, + self.__class__.__name__, + ", ".join(["%s=%r" % (k, v) for k, v in info.items()])) + + __repr__ = __str__ + + def __eq__(self, other): + # Test for equality with another Process object based + # on PID and creation time. + if not isinstance(other, Process): + return NotImplemented + return self._ident == other._ident + + def __ne__(self, other): + return not self == other + + def __hash__(self): + if self._hash is None: + self._hash = hash(self._ident) + return self._hash + + @property + def pid(self): + """The process PID.""" + return self._pid + + # --- utility methods + + @contextlib.contextmanager + def oneshot(self): + """Utility context manager which considerably speeds up the + retrieval of multiple process information at the same time. + + Internally different process info (e.g. name, ppid, uids, + gids, ...) may be fetched by using the same routine, but + only one information is returned and the others are discarded. + When using this context manager the internal routine is + executed once (in the example below on name()) and the + other info are cached. + + The cache is cleared when exiting the context manager block. + The advice is to use this every time you retrieve more than + one information about the process. If you're lucky, you'll + get a hell of a speedup. + + >>> import psutil + >>> p = psutil.Process() + >>> with p.oneshot(): + ... p.name() # collect multiple info + ... p.cpu_times() # return cached value + ... p.cpu_percent() # return cached value + ... p.create_time() # return cached value + ... + >>> + """ + if self._oneshot_inctx: + # NOOP: this covers the use case where the user enters the + # context twice. Since as_dict() internally uses oneshot() + # I expect that the code below will be a pretty common + # "mistake" that the user will make, so let's guard + # against that: + # + # >>> with p.oneshot(): + # ... p.as_dict() + # ... + yield + else: + self._oneshot_inctx = True + try: + # cached in case cpu_percent() is used + self.cpu_times.cache_activate() + # cached in case memory_percent() is used + self.memory_info.cache_activate() + # cached in case parent() is used + self.ppid.cache_activate() + # cached in case username() is used + if POSIX: + self.uids.cache_activate() + # specific implementation cache + self._proc.oneshot_enter() + yield + finally: + self.cpu_times.cache_deactivate() + self.memory_info.cache_deactivate() + self.ppid.cache_deactivate() + if POSIX: + self.uids.cache_deactivate() + self._proc.oneshot_exit() + self._oneshot_inctx = False + + def as_dict(self, attrs=None, ad_value=None): + """Utility method returning process information as a + hashable dictionary. + If *attrs* is specified it must be a list of strings + reflecting available Process class' attribute names + (e.g. ['cpu_times', 'name']) else all public (read + only) attributes are assumed. + *ad_value* is the value which gets assigned in case + AccessDenied or ZombieProcess exception is raised when + retrieving that particular process information. + """ + valid_names = _as_dict_attrnames + if attrs is not None: + if not isinstance(attrs, (list, tuple, set, frozenset)): + raise TypeError("invalid attrs type %s" % type(attrs)) + attrs = set(attrs) + invalid_names = attrs - valid_names + if invalid_names: + raise ValueError("invalid attr name%s %s" % ( + "s" if len(invalid_names) > 1 else "", + ", ".join(map(repr, invalid_names)))) + + retdict = dict() + ls = attrs or valid_names + with self.oneshot(): + for name in ls: + try: + if name == 'pid': + ret = self.pid + else: + meth = getattr(self, name) + ret = meth() + except (AccessDenied, ZombieProcess): + ret = ad_value + except NotImplementedError: + # in case of not implemented functionality (may happen + # on old or exotic systems) we want to crash only if + # the user explicitly asked for that particular attr + if attrs: + raise + continue + retdict[name] = ret + return retdict + + def parent(self): + """Return the parent process as a Process object pre-emptively + checking whether PID has been reused. + If no parent is known return None. + """ + ppid = self.ppid() + if ppid is not None: + ctime = self.create_time() + try: + parent = Process(ppid) + if parent.create_time() <= ctime: + return parent + # ...else ppid has been reused by another process + except NoSuchProcess: + pass + + def is_running(self): + """Return whether this process is running. + It also checks if PID has been reused by another process in + which case return False. + """ + if self._gone: + return False + try: + # Checking if PID is alive is not enough as the PID might + # have been reused by another process: we also want to + # verify process identity. + # Process identity / uniqueness over time is guaranteed by + # (PID + creation time) and that is verified in __eq__. + return self == Process(self.pid) + except ZombieProcess: + # We should never get here as it's already handled in + # Process.__init__; here just for extra safety. + return True + except NoSuchProcess: + self._gone = True + return False + + # --- actual API + + @memoize_when_activated + def ppid(self): + """The process parent PID. + On Windows the return value is cached after first call. + """ + # On POSIX we don't want to cache the ppid as it may unexpectedly + # change to 1 (init) in case this process turns into a zombie: + # https://github.com/giampaolo/psutil/issues/321 + # http://stackoverflow.com/questions/356722/ + + # XXX should we check creation time here rather than in + # Process.parent()? + if POSIX: + return self._proc.ppid() + else: # pragma: no cover + self._ppid = self._ppid or self._proc.ppid() + return self._ppid + + def name(self): + """The process name. The return value is cached after first call.""" + # Process name is only cached on Windows as on POSIX it may + # change, see: + # https://github.com/giampaolo/psutil/issues/692 + if WINDOWS and self._name is not None: + return self._name + name = self._proc.name() + if POSIX and len(name) >= 15: + # On UNIX the name gets truncated to the first 15 characters. + # If it matches the first part of the cmdline we return that + # one instead because it's usually more explicative. + # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon". + try: + cmdline = self.cmdline() + except AccessDenied: + pass + else: + if cmdline: + extended_name = os.path.basename(cmdline[0]) + if extended_name.startswith(name): + name = extended_name + self._name = name + self._proc._name = name + return name + + def exe(self): + """The process executable as an absolute path. + May also be an empty string. + The return value is cached after first call. + """ + def guess_it(fallback): + # try to guess exe from cmdline[0] in absence of a native + # exe representation + cmdline = self.cmdline() + if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'): + exe = cmdline[0] # the possible exe + # Attempt to guess only in case of an absolute path. + # It is not safe otherwise as the process might have + # changed cwd. + if (os.path.isabs(exe) and + os.path.isfile(exe) and + os.access(exe, os.X_OK)): + return exe + if isinstance(fallback, AccessDenied): + raise fallback + return fallback + + if self._exe is None: + try: + exe = self._proc.exe() + except AccessDenied as err: + return guess_it(fallback=err) + else: + if not exe: + # underlying implementation can legitimately return an + # empty string; if that's the case we don't want to + # raise AD while guessing from the cmdline + try: + exe = guess_it(fallback=exe) + except AccessDenied: + pass + self._exe = exe + return self._exe + + def cmdline(self): + """The command line this process has been called with.""" + return self._proc.cmdline() + + def status(self): + """The process current status as a STATUS_* constant.""" + try: + return self._proc.status() + except ZombieProcess: + return STATUS_ZOMBIE + + def username(self): + """The name of the user that owns the process. + On UNIX this is calculated by using *real* process uid. + """ + if POSIX: + if pwd is None: + # might happen if python was installed from sources + raise ImportError( + "requires pwd module shipped with standard python") + real_uid = self.uids().real + try: + return pwd.getpwuid(real_uid).pw_name + except KeyError: + # the uid can't be resolved by the system + return str(real_uid) + else: + return self._proc.username() + + def create_time(self): + """The process creation time as a floating point number + expressed in seconds since the epoch, in UTC. + The return value is cached after first call. + """ + if self._create_time is None: + self._create_time = self._proc.create_time() + return self._create_time + + def cwd(self): + """Process current working directory as an absolute path.""" + return self._proc.cwd() + + def nice(self, value=None): + """Get or set process niceness (priority).""" + if value is None: + return self._proc.nice_get() + else: + if not self.is_running(): + raise NoSuchProcess(self.pid, self._name) + self._proc.nice_set(value) + + if POSIX: + + @memoize_when_activated + def uids(self): + """Return process UIDs as a (real, effective, saved) + namedtuple. + """ + return self._proc.uids() + + def gids(self): + """Return process GIDs as a (real, effective, saved) + namedtuple. + """ + return self._proc.gids() + + def terminal(self): + """The terminal associated with this process, if any, + else None. + """ + return self._proc.terminal() + + def num_fds(self): + """Return the number of file descriptors opened by this + process (POSIX only). + """ + return self._proc.num_fds() + + # Linux, BSD, AIX and Windows only + if hasattr(_psplatform.Process, "io_counters"): + + def io_counters(self): + """Return process I/O statistics as a + (read_count, write_count, read_bytes, write_bytes) + namedtuple. + Those are the number of read/write calls performed and the + amount of bytes read and written by the process. + """ + return self._proc.io_counters() + + # Linux and Windows >= Vista only + if hasattr(_psplatform.Process, "ionice_get"): + + def ionice(self, ioclass=None, value=None): + """Get or set process I/O niceness (priority). + + On Linux *ioclass* is one of the IOPRIO_CLASS_* constants. + *value* is a number which goes from 0 to 7. The higher the + value, the lower the I/O priority of the process. + + On Windows only *ioclass* is used and it can be set to 2 + (normal), 1 (low) or 0 (very low). + + Available on Linux and Windows > Vista only. + """ + if ioclass is None: + if value is not None: + raise ValueError("'ioclass' argument must be specified") + return self._proc.ionice_get() + else: + return self._proc.ionice_set(ioclass, value) + + # Linux only + if hasattr(_psplatform.Process, "rlimit"): + + def rlimit(self, resource, limits=None): + """Get or set process resource limits as a (soft, hard) + tuple. + + *resource* is one of the RLIMIT_* constants. + *limits* is supposed to be a (soft, hard) tuple. + + See "man prlimit" for further info. + Available on Linux only. + """ + if limits is None: + return self._proc.rlimit(resource) + else: + return self._proc.rlimit(resource, limits) + + # Windows, Linux and FreeBSD only + if hasattr(_psplatform.Process, "cpu_affinity_get"): + + def cpu_affinity(self, cpus=None): + """Get or set process CPU affinity. + If specified, *cpus* must be a list of CPUs for which you + want to set the affinity (e.g. [0, 1]). + If an empty list is passed, all egible CPUs are assumed + (and set). + (Windows, Linux and BSD only). + """ + # Automatically remove duplicates both on get and + # set (for get it's not really necessary, it's + # just for extra safety). + if cpus is None: + return list(set(self._proc.cpu_affinity_get())) + else: + if not cpus: + if hasattr(self._proc, "_get_eligible_cpus"): + cpus = self._proc._get_eligible_cpus() + else: + cpus = tuple(range(len(cpu_times(percpu=True)))) + self._proc.cpu_affinity_set(list(set(cpus))) + + # Linux, FreeBSD, SunOS + if hasattr(_psplatform.Process, "cpu_num"): + + def cpu_num(self): + """Return what CPU this process is currently running on. + The returned number should be <= psutil.cpu_count() + and <= len(psutil.cpu_percent(percpu=True)). + It may be used in conjunction with + psutil.cpu_percent(percpu=True) to observe the system + workload distributed across CPUs. + """ + return self._proc.cpu_num() + + # Linux, OSX and Windows only + if hasattr(_psplatform.Process, "environ"): + + def environ(self): + """The environment variables of the process as a dict. Note: this + might not reflect changes made after the process started. """ + return self._proc.environ() + + if WINDOWS: + + def num_handles(self): + """Return the number of handles opened by this process + (Windows only). + """ + return self._proc.num_handles() + + def num_ctx_switches(self): + """Return the number of voluntary and involuntary context + switches performed by this process. + """ + return self._proc.num_ctx_switches() + + def num_threads(self): + """Return the number of threads used by this process.""" + return self._proc.num_threads() + + if hasattr(_psplatform.Process, "threads"): + + def threads(self): + """Return threads opened by process as a list of + (id, user_time, system_time) namedtuples representing + thread id and thread CPU times (user/system). + On OpenBSD this method requires root access. + """ + return self._proc.threads() + + @_assert_pid_not_reused + def children(self, recursive=False): + """Return the children of this process as a list of Process + instances, pre-emptively checking whether PID has been reused. + If *recursive* is True return all the parent descendants. + + Example (A == this process): + + A ─┐ + │ + ├─ B (child) ─┐ + │ └─ X (grandchild) ─┐ + │ └─ Y (great grandchild) + ├─ C (child) + └─ D (child) + + >>> import psutil + >>> p = psutil.Process() + >>> p.children() + B, C, D + >>> p.children(recursive=True) + B, X, Y, C, D + + Note that in the example above if process X disappears + process Y won't be listed as the reference to process A + is lost. + """ + ppid_map = _ppid_map() + ret = [] + if not recursive: + for pid, ppid in ppid_map.items(): + if ppid == self.pid: + try: + child = Process(pid) + # if child happens to be older than its parent + # (self) it means child's PID has been reused + if self.create_time() <= child.create_time(): + ret.append(child) + except (NoSuchProcess, ZombieProcess): + pass + else: + # Construct a {pid: [child pids]} dict + reverse_ppid_map = collections.defaultdict(list) + for pid, ppid in ppid_map.items(): + reverse_ppid_map[ppid].append(pid) + # Recursively traverse that dict, starting from self.pid, + # such that we only call Process() on actual children + seen = set() + stack = [self.pid] + while stack: + pid = stack.pop() + if pid in seen: + # Since pids can be reused while the ppid_map is + # constructed, there may be rare instances where + # there's a cycle in the recorded process "tree". + continue + seen.add(pid) + for child_pid in reverse_ppid_map[pid]: + try: + child = Process(child_pid) + # if child happens to be older than its parent + # (self) it means child's PID has been reused + intime = self.create_time() <= child.create_time() + if intime: + ret.append(child) + stack.append(child_pid) + except (NoSuchProcess, ZombieProcess): + pass + return ret + + def cpu_percent(self, interval=None): + """Return a float representing the current process CPU + utilization as a percentage. + + When *interval* is 0.0 or None (default) compares process times + to system CPU times elapsed since last call, returning + immediately (non-blocking). That means that the first time + this is called it will return a meaningful 0.0 value. + + When *interval* is > 0.0 compares process times to system CPU + times elapsed before and after the interval (blocking). + + In this case is recommended for accuracy that this function + be called with at least 0.1 seconds between calls. + + A value > 100.0 can be returned in case of processes running + multiple threads on different CPU cores. + + The returned value is explicitly NOT split evenly between + all available logical CPUs. This means that a busy loop process + running on a system with 2 logical CPUs will be reported as + having 100% CPU utilization instead of 50%. + + Examples: + + >>> import psutil + >>> p = psutil.Process(os.getpid()) + >>> # blocking + >>> p.cpu_percent(interval=1) + 2.0 + >>> # non-blocking (percentage since last call) + >>> p.cpu_percent(interval=None) + 2.9 + >>> + """ + blocking = interval is not None and interval > 0.0 + if interval is not None and interval < 0: + raise ValueError("interval is not positive (got %r)" % interval) + num_cpus = cpu_count() or 1 + + def timer(): + return _timer() * num_cpus + + if blocking: + st1 = timer() + pt1 = self._proc.cpu_times() + time.sleep(interval) + st2 = timer() + pt2 = self._proc.cpu_times() + else: + st1 = self._last_sys_cpu_times + pt1 = self._last_proc_cpu_times + st2 = timer() + pt2 = self._proc.cpu_times() + if st1 is None or pt1 is None: + self._last_sys_cpu_times = st2 + self._last_proc_cpu_times = pt2 + return 0.0 + + delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system) + delta_time = st2 - st1 + # reset values for next call in case of interval == None + self._last_sys_cpu_times = st2 + self._last_proc_cpu_times = pt2 + + try: + # This is the utilization split evenly between all CPUs. + # E.g. a busy loop process on a 2-CPU-cores system at this + # point is reported as 50% instead of 100%. + overall_cpus_percent = ((delta_proc / delta_time) * 100) + except ZeroDivisionError: + # interval was too low + return 0.0 + else: + # Note 1: + # in order to emulate "top" we multiply the value for the num + # of CPU cores. This way the busy process will be reported as + # having 100% (or more) usage. + # + # Note 2: + # taskmgr.exe on Windows differs in that it will show 50% + # instead. + # + # Note 3: + # a percentage > 100 is legitimate as it can result from a + # process with multiple threads running on different CPU + # cores (top does the same), see: + # http://stackoverflow.com/questions/1032357 + # https://github.com/giampaolo/psutil/issues/474 + single_cpu_percent = overall_cpus_percent * num_cpus + return round(single_cpu_percent, 1) + + @memoize_when_activated + def cpu_times(self): + """Return a (user, system, children_user, children_system) + namedtuple representing the accumulated process time, in + seconds. + This is similar to os.times() but per-process. + On OSX and Windows children_user and children_system are + always set to 0. + """ + return self._proc.cpu_times() + + @memoize_when_activated + def memory_info(self): + """Return a namedtuple with variable fields depending on the + platform, representing memory information about the process. + + The "portable" fields available on all plaforms are `rss` and `vms`. + + All numbers are expressed in bytes. + """ + return self._proc.memory_info() + + @deprecated_method(replacement="memory_info") + def memory_info_ex(self): + return self.memory_info() + + def memory_full_info(self): + """This method returns the same information as memory_info(), + plus, on some platform (Linux, OSX, Windows), also provides + additional metrics (USS, PSS and swap). + The additional metrics provide a better representation of actual + process memory usage. + + Namely USS is the memory which is unique to a process and which + would be freed if the process was terminated right now. + + It does so by passing through the whole process address. + As such it usually requires higher user privileges than + memory_info() and is considerably slower. + """ + return self._proc.memory_full_info() + + def memory_percent(self, memtype="rss"): + """Compare process memory to total physical system memory and + calculate process memory utilization as a percentage. + *memtype* argument is a string that dictates what type of + process memory you want to compare against (defaults to "rss"). + The list of available strings can be obtained like this: + + >>> psutil.Process().memory_info()._fields + ('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss') + """ + valid_types = list(_psplatform.pfullmem._fields) + if memtype not in valid_types: + raise ValueError("invalid memtype %r; valid types are %r" % ( + memtype, tuple(valid_types))) + fun = self.memory_info if memtype in _psplatform.pmem._fields else \ + self.memory_full_info + metrics = fun() + value = getattr(metrics, memtype) + + # use cached value if available + total_phymem = _TOTAL_PHYMEM or virtual_memory().total + if not total_phymem > 0: + # we should never get here + raise ValueError( + "can't calculate process memory percent because " + "total physical system memory is not positive (%r)" + % total_phymem) + return (value / float(total_phymem)) * 100 + + if hasattr(_psplatform.Process, "memory_maps"): + # Available everywhere except OpenBSD and NetBSD. + def memory_maps(self, grouped=True): + """Return process' mapped memory regions as a list of namedtuples + whose fields are variable depending on the platform. + + If *grouped* is True the mapped regions with the same 'path' + are grouped together and the different memory fields are summed. + + If *grouped* is False every mapped region is shown as a single + entity and the namedtuple will also include the mapped region's + address space ('addr') and permission set ('perms'). + """ + it = self._proc.memory_maps() + if grouped: + d = {} + for tupl in it: + path = tupl[2] + nums = tupl[3:] + try: + d[path] = map(lambda x, y: x + y, d[path], nums) + except KeyError: + d[path] = nums + nt = _psplatform.pmmap_grouped + return [nt(path, *d[path]) for path in d] # NOQA + else: + nt = _psplatform.pmmap_ext + return [nt(*x) for x in it] + + def open_files(self): + """Return files opened by process as a list of + (path, fd) namedtuples including the absolute file name + and file descriptor number. + """ + return self._proc.open_files() + + def connections(self, kind='inet'): + """Return socket connections opened by process as a list of + (fd, family, type, laddr, raddr, status) namedtuples. + The *kind* parameter filters for connections that match the + following criteria: + + +------------+----------------------------------------------------+ + | Kind Value | Connections using | + +------------+----------------------------------------------------+ + | inet | IPv4 and IPv6 | + | inet4 | IPv4 | + | inet6 | IPv6 | + | tcp | TCP | + | tcp4 | TCP over IPv4 | + | tcp6 | TCP over IPv6 | + | udp | UDP | + | udp4 | UDP over IPv4 | + | udp6 | UDP over IPv6 | + | unix | UNIX socket (both UDP and TCP protocols) | + | all | the sum of all the possible families and protocols | + +------------+----------------------------------------------------+ + """ + return self._proc.connections(kind) + + # --- signals + + if POSIX: + def _send_signal(self, sig): + assert not self.pid < 0, self.pid + if self.pid == 0: + # see "man 2 kill" + raise ValueError( + "preventing sending signal to process with PID 0 as it " + "would affect every process in the process group of the " + "calling process (os.getpid()) instead of PID 0") + try: + os.kill(self.pid, sig) + except OSError as err: + if err.errno == errno.ESRCH: + if OPENBSD and pid_exists(self.pid): + # We do this because os.kill() lies in case of + # zombie processes. + raise ZombieProcess(self.pid, self._name, self._ppid) + else: + self._gone = True + raise NoSuchProcess(self.pid, self._name) + if err.errno in (errno.EPERM, errno.EACCES): + raise AccessDenied(self.pid, self._name) + raise + + @_assert_pid_not_reused + def send_signal(self, sig): + """Send a signal *sig* to process pre-emptively checking + whether PID has been reused (see signal module constants) . + On Windows only SIGTERM is valid and is treated as an alias + for kill(). + """ + if POSIX: + self._send_signal(sig) + else: # pragma: no cover + if sig == signal.SIGTERM: + self._proc.kill() + # py >= 2.7 + elif sig in (getattr(signal, "CTRL_C_EVENT", object()), + getattr(signal, "CTRL_BREAK_EVENT", object())): + self._proc.send_signal(sig) + else: + raise ValueError( + "only SIGTERM, CTRL_C_EVENT and CTRL_BREAK_EVENT signals " + "are supported on Windows") + + @_assert_pid_not_reused + def suspend(self): + """Suspend process execution with SIGSTOP pre-emptively checking + whether PID has been reused. + On Windows this has the effect ot suspending all process threads. + """ + if POSIX: + self._send_signal(signal.SIGSTOP) + else: # pragma: no cover + self._proc.suspend() + + @_assert_pid_not_reused + def resume(self): + """Resume process execution with SIGCONT pre-emptively checking + whether PID has been reused. + On Windows this has the effect of resuming all process threads. + """ + if POSIX: + self._send_signal(signal.SIGCONT) + else: # pragma: no cover + self._proc.resume() + + @_assert_pid_not_reused + def terminate(self): + """Terminate the process with SIGTERM pre-emptively checking + whether PID has been reused. + On Windows this is an alias for kill(). + """ + if POSIX: + self._send_signal(signal.SIGTERM) + else: # pragma: no cover + self._proc.kill() + + @_assert_pid_not_reused + def kill(self): + """Kill the current process with SIGKILL pre-emptively checking + whether PID has been reused. + """ + if POSIX: + self._send_signal(signal.SIGKILL) + else: # pragma: no cover + self._proc.kill() + + def wait(self, timeout=None): + """Wait for process to terminate and, if process is a children + of os.getpid(), also return its exit code, else None. + + If the process is already terminated immediately return None + instead of raising NoSuchProcess. + + If *timeout* (in seconds) is specified and process is still + alive raise TimeoutExpired. + + To wait for multiple Process(es) use psutil.wait_procs(). + """ + if timeout is not None and not timeout >= 0: + raise ValueError("timeout must be a positive integer") + return self._proc.wait(timeout) + + +# ===================================================================== +# --- Popen class +# ===================================================================== + + +class Popen(Process): + """A more convenient interface to stdlib subprocess.Popen class. + It starts a sub process and deals with it exactly as when using + subprocess.Popen class but in addition also provides all the + properties and methods of psutil.Process class as a unified + interface: + + >>> import psutil + >>> from subprocess import PIPE + >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE) + >>> p.name() + 'python' + >>> p.uids() + user(real=1000, effective=1000, saved=1000) + >>> p.username() + 'giampaolo' + >>> p.communicate() + ('hi\n', None) + >>> p.terminate() + >>> p.wait(timeout=2) + 0 + >>> + + For method names common to both classes such as kill(), terminate() + and wait(), psutil.Process implementation takes precedence. + + Unlike subprocess.Popen this class pre-emptively checks whether PID + has been reused on send_signal(), terminate() and kill() so that + you don't accidentally terminate another process, fixing + http://bugs.python.org/issue6973. + + For a complete documentation refer to: + http://docs.python.org/library/subprocess.html + """ + + def __init__(self, *args, **kwargs): + # Explicitly avoid to raise NoSuchProcess in case the process + # spawned by subprocess.Popen terminates too quickly, see: + # https://github.com/giampaolo/psutil/issues/193 + self.__subproc = subprocess.Popen(*args, **kwargs) + self._init(self.__subproc.pid, _ignore_nsp=True) + + def __dir__(self): + return sorted(set(dir(Popen) + dir(subprocess.Popen))) + + def __enter__(self): + if hasattr(self.__subproc, '__enter__'): + self.__subproc.__enter__() + return self + + def __exit__(self, *args, **kwargs): + if hasattr(self.__subproc, '__exit__'): + return self.__subproc.__exit__(*args, **kwargs) + else: + if self.stdout: + self.stdout.close() + if self.stderr: + self.stderr.close() + try: + # Flushing a BufferedWriter may raise an error. + if self.stdin: + self.stdin.close() + finally: + # Wait for the process to terminate, to avoid zombies. + self.wait() + + def __getattribute__(self, name): + try: + return object.__getattribute__(self, name) + except AttributeError: + try: + return object.__getattribute__(self.__subproc, name) + except AttributeError: + raise AttributeError("%s instance has no attribute '%s'" + % (self.__class__.__name__, name)) + + def wait(self, timeout=None): + if self.__subproc.returncode is not None: + return self.__subproc.returncode + ret = super(Popen, self).wait(timeout) + self.__subproc.returncode = ret + return ret + + +# The valid attr names which can be processed by Process.as_dict(). +_as_dict_attrnames = set( + [x for x in dir(Process) if not x.startswith('_') and x not in + ['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait', + 'is_running', 'as_dict', 'parent', 'children', 'rlimit', + 'memory_info_ex', 'oneshot']]) + + +# ===================================================================== +# --- system processes related functions +# ===================================================================== + + +def pids(): + """Return a list of current running PIDs.""" + return _psplatform.pids() + + +def pid_exists(pid): + """Return True if given PID exists in the current process list. + This is faster than doing "pid in psutil.pids()" and + should be preferred. + """ + if pid < 0: + return False + elif pid == 0 and POSIX: + # On POSIX we use os.kill() to determine PID existence. + # According to "man 2 kill" PID 0 has a special meaning + # though: it refers to <> and that is not we want + # to do here. + return pid in pids() + else: + return _psplatform.pid_exists(pid) + + +_pmap = {} + + +def process_iter(attrs=None, ad_value=None): + """Return a generator yielding a Process instance for all + running processes. + + Every new Process instance is only created once and then cached + into an internal table which is updated every time this is used. + + Cached Process instances are checked for identity so that you're + safe in case a PID has been reused by another process, in which + case the cached instance is updated. + + The sorting order in which processes are yielded is based on + their PIDs. + + *attrs* and *ad_value* have the same meaning as in + Process.as_dict(). If *attrs* is specified as_dict() is called + and the resulting dict is stored as a 'info' attribute attached + to returned Process instance. + If *attrs* is an empty list it will retrieve all process info + (slow). + """ + def add(pid): + proc = Process(pid) + if attrs is not None: + proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value) + _pmap[proc.pid] = proc + return proc + + def remove(pid): + _pmap.pop(pid, None) + + a = set(pids()) + b = set(_pmap.keys()) + new_pids = a - b + gone_pids = b - a + + for pid in gone_pids: + remove(pid) + for pid, proc in sorted(list(_pmap.items()) + + list(dict.fromkeys(new_pids).items())): + try: + if proc is None: # new process + yield add(pid) + else: + # use is_running() to check whether PID has been reused by + # another process in which case yield a new Process instance + if proc.is_running(): + if attrs is not None: + proc.info = proc.as_dict( + attrs=attrs, ad_value=ad_value) + yield proc + else: + yield add(pid) + except NoSuchProcess: + remove(pid) + except AccessDenied: + # Process creation time can't be determined hence there's + # no way to tell whether the pid of the cached process + # has been reused. Just return the cached version. + if proc is None and pid in _pmap: + try: + yield _pmap[pid] + except KeyError: + # If we get here it is likely that 2 threads were + # using process_iter(). + pass + else: + raise + + +def wait_procs(procs, timeout=None, callback=None): + """Convenience function which waits for a list of processes to + terminate. + + Return a (gone, alive) tuple indicating which processes + are gone and which ones are still alive. + + The gone ones will have a new *returncode* attribute indicating + process exit status (may be None). + + *callback* is a function which gets called every time a process + terminates (a Process instance is passed as callback argument). + + Function will return as soon as all processes terminate or when + *timeout* occurs. + Differently from Process.wait() it will not raise TimeoutExpired if + *timeout* occurs. + + Typical use case is: + + - send SIGTERM to a list of processes + - give them some time to terminate + - send SIGKILL to those ones which are still alive + + Example: + + >>> def on_terminate(proc): + ... print("process {} terminated".format(proc)) + ... + >>> for p in procs: + ... p.terminate() + ... + >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate) + >>> for p in alive: + ... p.kill() + """ + def check_gone(proc, timeout): + try: + returncode = proc.wait(timeout=timeout) + except TimeoutExpired: + pass + else: + if returncode is not None or not proc.is_running(): + proc.returncode = returncode + gone.add(proc) + if callback is not None: + callback(proc) + + if timeout is not None and not timeout >= 0: + msg = "timeout must be a positive integer, got %s" % timeout + raise ValueError(msg) + gone = set() + alive = set(procs) + if callback is not None and not callable(callback): + raise TypeError("callback %r is not a callable" % callable) + if timeout is not None: + deadline = _timer() + timeout + + while alive: + if timeout is not None and timeout <= 0: + break + for proc in alive: + # Make sure that every complete iteration (all processes) + # will last max 1 sec. + # We do this because we don't want to wait too long on a + # single process: in case it terminates too late other + # processes may disappear in the meantime and their PID + # reused. + max_timeout = 1.0 / len(alive) + if timeout is not None: + timeout = min((deadline - _timer()), max_timeout) + if timeout <= 0: + break + check_gone(proc, timeout) + else: + check_gone(proc, max_timeout) + alive = alive - gone + + if alive: + # Last attempt over processes survived so far. + # timeout == 0 won't make this function wait any further. + for proc in alive: + check_gone(proc, 0) + alive = alive - gone + + return (list(gone), list(alive)) + + +# ===================================================================== +# --- CPU related functions +# ===================================================================== + + +def cpu_count(logical=True): + """Return the number of logical CPUs in the system (same as + os.cpu_count() in Python 3.4). + + If *logical* is False return the number of physical cores only + (e.g. hyper thread CPUs are excluded). + + Return None if undetermined. + + The return value is cached after first call. + If desired cache can be cleared like this: + + >>> psutil.cpu_count.cache_clear() + """ + if logical: + ret = _psplatform.cpu_count_logical() + else: + ret = _psplatform.cpu_count_physical() + if ret is not None and ret < 1: + ret = None + return ret + + +def cpu_times(percpu=False): + """Return system-wide CPU times as a namedtuple. + Every CPU time represents the seconds the CPU has spent in the + given mode. The namedtuple's fields availability varies depending on the + platform: + + - user + - system + - idle + - nice (UNIX) + - iowait (Linux) + - irq (Linux, FreeBSD) + - softirq (Linux) + - steal (Linux >= 2.6.11) + - guest (Linux >= 2.6.24) + - guest_nice (Linux >= 3.2.0) + + When *percpu* is True return a list of namedtuples for each CPU. + First element of the list refers to first CPU, second element + to second CPU and so on. + The order of the list is consistent across calls. + """ + if not percpu: + return _psplatform.cpu_times() + else: + return _psplatform.per_cpu_times() + + +try: + _last_cpu_times = cpu_times() +except Exception: + # Don't want to crash at import time. + _last_cpu_times = None + traceback.print_exc() + +try: + _last_per_cpu_times = cpu_times(percpu=True) +except Exception: + # Don't want to crash at import time. + _last_per_cpu_times = None + traceback.print_exc() + + +def _cpu_tot_time(times): + """Given a cpu_time() ntuple calculates the total CPU time + (including idle time). + """ + tot = sum(times) + if LINUX: + # On Linux guest times are already accounted in "user" or + # "nice" times, so we subtract them from total. + # Htop does the same. References: + # https://github.com/giampaolo/psutil/pull/940 + # http://unix.stackexchange.com/questions/178045 + # https://github.com/torvalds/linux/blob/ + # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/ + # cputime.c#L158 + tot -= getattr(times, "guest", 0) # Linux 2.6.24+ + tot -= getattr(times, "guest_nice", 0) # Linux 3.2.0+ + return tot + + +def _cpu_busy_time(times): + """Given a cpu_time() ntuple calculates the busy CPU time. + We do so by subtracting all idle CPU times. + """ + busy = _cpu_tot_time(times) + busy -= times.idle + # Linux: "iowait" is time during which the CPU does not do anything + # (waits for IO to complete). On Linux IO wait is *not* accounted + # in "idle" time so we subtract it. Htop does the same. + # References: + # https://github.com/torvalds/linux/blob/ + # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/cputime.c#L244 + busy -= getattr(times, "iowait", 0) + return busy + + +def cpu_percent(interval=None, percpu=False): + """Return a float representing the current system-wide CPU + utilization as a percentage. + + When *interval* is > 0.0 compares system CPU times elapsed before + and after the interval (blocking). + + When *interval* is 0.0 or None compares system CPU times elapsed + since last call or module import, returning immediately (non + blocking). That means the first time this is called it will + return a meaningless 0.0 value which you should ignore. + In this case is recommended for accuracy that this function be + called with at least 0.1 seconds between calls. + + When *percpu* is True returns a list of floats representing the + utilization as a percentage for each CPU. + First element of the list refers to first CPU, second element + to second CPU and so on. + The order of the list is consistent across calls. + + Examples: + + >>> # blocking, system-wide + >>> psutil.cpu_percent(interval=1) + 2.0 + >>> + >>> # blocking, per-cpu + >>> psutil.cpu_percent(interval=1, percpu=True) + [2.0, 1.0] + >>> + >>> # non-blocking (percentage since last call) + >>> psutil.cpu_percent(interval=None) + 2.9 + >>> + """ + global _last_cpu_times + global _last_per_cpu_times + blocking = interval is not None and interval > 0.0 + if interval is not None and interval < 0: + raise ValueError("interval is not positive (got %r)" % interval) + + def calculate(t1, t2): + t1_all = _cpu_tot_time(t1) + t1_busy = _cpu_busy_time(t1) + + t2_all = _cpu_tot_time(t2) + t2_busy = _cpu_busy_time(t2) + + # this usually indicates a float precision issue + if t2_busy <= t1_busy: + return 0.0 + + busy_delta = t2_busy - t1_busy + all_delta = t2_all - t1_all + try: + busy_perc = (busy_delta / all_delta) * 100 + except ZeroDivisionError: + return 0.0 + else: + return round(busy_perc, 1) + + # system-wide usage + if not percpu: + if blocking: + t1 = cpu_times() + time.sleep(interval) + else: + t1 = _last_cpu_times + if t1 is None: + # Something bad happened at import time. We'll + # get a meaningful result on the next call. See: + # https://github.com/giampaolo/psutil/pull/715 + t1 = cpu_times() + _last_cpu_times = cpu_times() + return calculate(t1, _last_cpu_times) + # per-cpu usage + else: + ret = [] + if blocking: + tot1 = cpu_times(percpu=True) + time.sleep(interval) + else: + tot1 = _last_per_cpu_times + if tot1 is None: + # Something bad happened at import time. We'll + # get a meaningful result on the next call. See: + # https://github.com/giampaolo/psutil/pull/715 + tot1 = cpu_times(percpu=True) + _last_per_cpu_times = cpu_times(percpu=True) + for t1, t2 in zip(tot1, _last_per_cpu_times): + ret.append(calculate(t1, t2)) + return ret + + +# Use separate global vars for cpu_times_percent() so that it's +# independent from cpu_percent() and they can both be used within +# the same program. +_last_cpu_times_2 = _last_cpu_times +_last_per_cpu_times_2 = _last_per_cpu_times + + +def cpu_times_percent(interval=None, percpu=False): + """Same as cpu_percent() but provides utilization percentages + for each specific CPU time as is returned by cpu_times(). + For instance, on Linux we'll get: + + >>> cpu_times_percent() + cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0, + irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0) + >>> + + *interval* and *percpu* arguments have the same meaning as in + cpu_percent(). + """ + global _last_cpu_times_2 + global _last_per_cpu_times_2 + blocking = interval is not None and interval > 0.0 + if interval is not None and interval < 0: + raise ValueError("interval is not positive (got %r)" % interval) + + def calculate(t1, t2): + nums = [] + all_delta = _cpu_tot_time(t2) - _cpu_tot_time(t1) + for field in t1._fields: + field_delta = getattr(t2, field) - getattr(t1, field) + try: + field_perc = (100 * field_delta) / all_delta + except ZeroDivisionError: + field_perc = 0.0 + field_perc = round(field_perc, 1) + # CPU times are always supposed to increase over time + # or at least remain the same and that's because time + # cannot go backwards. + # Surprisingly sometimes this might not be the case (at + # least on Windows and Linux), see: + # https://github.com/giampaolo/psutil/issues/392 + # https://github.com/giampaolo/psutil/issues/645 + # I really don't know what to do about that except + # forcing the value to 0 or 100. + if field_perc > 100.0: + field_perc = 100.0 + # `<=` because `-0.0 == 0.0` evaluates to True + elif field_perc <= 0.0: + field_perc = 0.0 + nums.append(field_perc) + return _psplatform.scputimes(*nums) + + # system-wide usage + if not percpu: + if blocking: + t1 = cpu_times() + time.sleep(interval) + else: + t1 = _last_cpu_times_2 + if t1 is None: + # Something bad happened at import time. We'll + # get a meaningful result on the next call. See: + # https://github.com/giampaolo/psutil/pull/715 + t1 = cpu_times() + _last_cpu_times_2 = cpu_times() + return calculate(t1, _last_cpu_times_2) + # per-cpu usage + else: + ret = [] + if blocking: + tot1 = cpu_times(percpu=True) + time.sleep(interval) + else: + tot1 = _last_per_cpu_times_2 + if tot1 is None: + # Something bad happened at import time. We'll + # get a meaningful result on the next call. See: + # https://github.com/giampaolo/psutil/pull/715 + tot1 = cpu_times(percpu=True) + _last_per_cpu_times_2 = cpu_times(percpu=True) + for t1, t2 in zip(tot1, _last_per_cpu_times_2): + ret.append(calculate(t1, t2)) + return ret + + +def cpu_stats(): + """Return CPU statistics.""" + return _psplatform.cpu_stats() + + +if hasattr(_psplatform, "cpu_freq"): + + def cpu_freq(percpu=False): + """Return CPU frequency as a nameduple including current, + min and max frequency expressed in Mhz. + + If *percpu* is True and the system supports per-cpu frequency + retrieval (Linux only) a list of frequencies is returned for + each CPU. If not a list with one element is returned. + """ + ret = _psplatform.cpu_freq() + if percpu: + return ret + else: + num_cpus = float(len(ret)) + if num_cpus == 0: + return None + elif num_cpus == 1: + return ret[0] + else: + currs, mins, maxs = 0.0, 0.0, 0.0 + for cpu in ret: + currs += cpu.current + mins += cpu.min + maxs += cpu.max + current = currs / num_cpus + min_ = mins / num_cpus + max_ = maxs / num_cpus + return _common.scpufreq(current, min_, max_) + + __all__.append("cpu_freq") + + +# ===================================================================== +# --- system memory related functions +# ===================================================================== + + +def virtual_memory(): + """Return statistics about system memory usage as a namedtuple + including the following fields, expressed in bytes: + + - total: + total physical memory available. + + - available: + the memory that can be given instantly to processes without the + system going into swap. + This is calculated by summing different memory values depending + on the platform and it is supposed to be used to monitor actual + memory usage in a cross platform fashion. + + - percent: + the percentage usage calculated as (total - available) / total * 100 + + - used: + memory used, calculated differently depending on the platform and + designed for informational purposes only: + OSX: active + inactive + wired + BSD: active + wired + cached + LINUX: total - free + + - free: + memory not being used at all (zeroed) that is readily available; + note that this doesn't reflect the actual memory available + (use 'available' instead) + + Platform-specific fields: + + - active (UNIX): + memory currently in use or very recently used, and so it is in RAM. + + - inactive (UNIX): + memory that is marked as not used. + + - buffers (BSD, Linux): + cache for things like file system metadata. + + - cached (BSD, OSX): + cache for various things. + + - wired (OSX, BSD): + memory that is marked to always stay in RAM. It is never moved to disk. + + - shared (BSD): + memory that may be simultaneously accessed by multiple processes. + + The sum of 'used' and 'available' does not necessarily equal total. + On Windows 'available' and 'free' are the same. + """ + global _TOTAL_PHYMEM + ret = _psplatform.virtual_memory() + # cached for later use in Process.memory_percent() + _TOTAL_PHYMEM = ret.total + return ret + + +def swap_memory(): + """Return system swap memory statistics as a namedtuple including + the following fields: + + - total: total swap memory in bytes + - used: used swap memory in bytes + - free: free swap memory in bytes + - percent: the percentage usage + - sin: no. of bytes the system has swapped in from disk (cumulative) + - sout: no. of bytes the system has swapped out from disk (cumulative) + + 'sin' and 'sout' on Windows are meaningless and always set to 0. + """ + return _psplatform.swap_memory() + + +# ===================================================================== +# --- disks/paritions related functions +# ===================================================================== + + +def disk_usage(path): + """Return disk usage statistics about the given *path* as a + namedtuple including total, used and free space expressed in bytes + plus the percentage usage. + """ + return _psplatform.disk_usage(path) + + +def disk_partitions(all=False): + """Return mounted partitions as a list of + (device, mountpoint, fstype, opts) namedtuple. + 'opts' field is a raw string separated by commas indicating mount + options which may vary depending on the platform. + + If *all* parameter is False return physical devices only and ignore + all others. + """ + return _psplatform.disk_partitions(all) + + +def disk_io_counters(perdisk=False, nowrap=True): + """Return system disk I/O statistics as a namedtuple including + the following fields: + + - read_count: number of reads + - write_count: number of writes + - read_bytes: number of bytes read + - write_bytes: number of bytes written + - read_time: time spent reading from disk (in ms) + - write_time: time spent writing to disk (in ms) + + Platform specific: + + - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms) + - read_merged_count (Linux): number of merged reads + - write_merged_count (Linux): number of merged writes + + If *perdisk* is True return the same information for every + physical disk installed on the system as a dictionary + with partition names as the keys and the namedtuple + described above as the values. + + If *nowrap* is True it detects and adjust the numbers which overflow + and wrap (restart from 0) and add "old value" to "new value" so that + the returned numbers will always be increasing or remain the same, + but never decrease. + "disk_io_counters.cache_clear()" can be used to invalidate the + cache. + + On recent Windows versions 'diskperf -y' command may need to be + executed first otherwise this function won't find any disk. + """ + rawdict = _psplatform.disk_io_counters() + if not rawdict: + return {} if perdisk else None + if nowrap: + rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters') + nt = getattr(_psplatform, "sdiskio", _common.sdiskio) + if perdisk: + for disk, fields in rawdict.items(): + rawdict[disk] = nt(*fields) + return rawdict + else: + return nt(*[sum(x) for x in zip(*rawdict.values())]) + + +disk_io_counters.cache_clear = functools.partial( + _wrap_numbers.cache_clear, 'psutil.disk_io_counters') +disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache" + + +# ===================================================================== +# --- network related functions +# ===================================================================== + + +def net_io_counters(pernic=False, nowrap=True): + """Return network I/O statistics as a namedtuple including + the following fields: + + - bytes_sent: number of bytes sent + - bytes_recv: number of bytes received + - packets_sent: number of packets sent + - packets_recv: number of packets received + - errin: total number of errors while receiving + - errout: total number of errors while sending + - dropin: total number of incoming packets which were dropped + - dropout: total number of outgoing packets which were dropped + (always 0 on OSX and BSD) + + If *pernic* is True return the same information for every + network interface installed on the system as a dictionary + with network interface names as the keys and the namedtuple + described above as the values. + + If *nowrap* is True it detects and adjust the numbers which overflow + and wrap (restart from 0) and add "old value" to "new value" so that + the returned numbers will always be increasing or remain the same, + but never decrease. + "disk_io_counters.cache_clear()" can be used to invalidate the + cache. + """ + rawdict = _psplatform.net_io_counters() + if not rawdict: + return {} if pernic else None + if nowrap: + rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters') + if pernic: + for nic, fields in rawdict.items(): + rawdict[nic] = _common.snetio(*fields) + return rawdict + else: + return _common.snetio(*[sum(x) for x in zip(*rawdict.values())]) + + +net_io_counters.cache_clear = functools.partial( + _wrap_numbers.cache_clear, 'psutil.net_io_counters') +net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache" + + +def net_connections(kind='inet'): + """Return system-wide socket connections as a list of + (fd, family, type, laddr, raddr, status, pid) namedtuples. + In case of limited privileges 'fd' and 'pid' may be set to -1 + and None respectively. + The *kind* parameter filters for connections that fit the + following criteria: + + +------------+----------------------------------------------------+ + | Kind Value | Connections using | + +------------+----------------------------------------------------+ + | inet | IPv4 and IPv6 | + | inet4 | IPv4 | + | inet6 | IPv6 | + | tcp | TCP | + | tcp4 | TCP over IPv4 | + | tcp6 | TCP over IPv6 | + | udp | UDP | + | udp4 | UDP over IPv4 | + | udp6 | UDP over IPv6 | + | unix | UNIX socket (both UDP and TCP protocols) | + | all | the sum of all the possible families and protocols | + +------------+----------------------------------------------------+ + + On OSX this function requires root privileges. + """ + return _psplatform.net_connections(kind) + + +def net_if_addrs(): + """Return the addresses associated to each NIC (network interface + card) installed on the system as a dictionary whose keys are the + NIC names and value is a list of namedtuples for each address + assigned to the NIC. Each namedtuple includes 5 fields: + + - family: can be either socket.AF_INET, socket.AF_INET6 or + psutil.AF_LINK, which refers to a MAC address. + - address: is the primary address and it is always set. + - netmask: and 'broadcast' and 'ptp' may be None. + - ptp: stands for "point to point" and references the + destination address on a point to point interface + (typically a VPN). + - broadcast: and *ptp* are mutually exclusive. + + Note: you can have more than one address of the same family + associated with each interface. + """ + has_enums = sys.version_info >= (3, 4) + if has_enums: + import socket + rawlist = _psplatform.net_if_addrs() + rawlist.sort(key=lambda x: x[1]) # sort by family + ret = collections.defaultdict(list) + for name, fam, addr, mask, broadcast, ptp in rawlist: + if has_enums: + try: + fam = socket.AddressFamily(fam) + except ValueError: + if WINDOWS and fam == -1: + fam = _psplatform.AF_LINK + elif (hasattr(_psplatform, "AF_LINK") and + _psplatform.AF_LINK == fam): + # Linux defines AF_LINK as an alias for AF_PACKET. + # We re-set the family here so that repr(family) + # will show AF_LINK rather than AF_PACKET + fam = _psplatform.AF_LINK + if fam == _psplatform.AF_LINK: + # The underlying C function may return an incomplete MAC + # address in which case we fill it with null bytes, see: + # https://github.com/giampaolo/psutil/issues/786 + separator = ":" if POSIX else "-" + while addr.count(separator) < 5: + addr += "%s00" % separator + ret[name].append(_common.snic(fam, addr, mask, broadcast, ptp)) + return dict(ret) + + +def net_if_stats(): + """Return information about each NIC (network interface card) + installed on the system as a dictionary whose keys are the + NIC names and value is a namedtuple with the following fields: + + - isup: whether the interface is up (bool) + - duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or + NIC_DUPLEX_UNKNOWN + - speed: the NIC speed expressed in mega bits (MB); if it can't + be determined (e.g. 'localhost') it will be set to 0. + - mtu: the maximum transmission unit expressed in bytes. + """ + return _psplatform.net_if_stats() + + +# ===================================================================== +# --- sensors +# ===================================================================== + + +# Linux +if hasattr(_psplatform, "sensors_temperatures"): + + def sensors_temperatures(fahrenheit=False): + """Return hardware temperatures. Each entry is a namedtuple + representing a certain hardware sensor (it may be a CPU, an + hard disk or something else, depending on the OS and its + configuration). + All temperatures are expressed in celsius unless *fahrenheit* + is set to True. + """ + def convert(n): + if n is not None: + return (float(n) * 9 / 5) + 32 if fahrenheit else n + + ret = collections.defaultdict(list) + rawdict = _psplatform.sensors_temperatures() + + for name, values in rawdict.items(): + while values: + label, current, high, critical = values.pop(0) + current = convert(current) + high = convert(high) + critical = convert(critical) + + if high and not critical: + critical = high + elif critical and not high: + high = critical + + ret[name].append( + _common.shwtemp(label, current, high, critical)) + + return dict(ret) + + __all__.append("sensors_temperatures") + + +# Linux +if hasattr(_psplatform, "sensors_fans"): + + def sensors_fans(): + """Return fans speed. Each entry is a namedtuple + representing a certain hardware sensor. + All speed are expressed in RPM (rounds per minute). + """ + return _psplatform.sensors_fans() + + __all__.append("sensors_fans") + + +# Linux, Windows, FreeBSD, OSX +if hasattr(_psplatform, "sensors_battery"): + + def sensors_battery(): + """Return battery information. If no battery is installed + returns None. + + - percent: battery power left as a percentage. + - secsleft: a rough approximation of how many seconds are left + before the battery runs out of power. May be + POWER_TIME_UNLIMITED or POWER_TIME_UNLIMITED. + - power_plugged: True if the AC power cable is connected. + """ + return _psplatform.sensors_battery() + + __all__.append("sensors_battery") + + +# ===================================================================== +# --- other system related functions +# ===================================================================== + + +def boot_time(): + """Return the system boot time expressed in seconds since the epoch.""" + # Note: we are not caching this because it is subject to + # system clock updates. + return _psplatform.boot_time() + + +def users(): + """Return users currently connected on the system as a list of + namedtuples including the following fields. + + - user: the name of the user + - terminal: the tty or pseudo-tty associated with the user, if any. + - host: the host name associated with the entry, if any. + - started: the creation time as a floating point number expressed in + seconds since the epoch. + """ + return _psplatform.users() + + +def set_procfs_path(path): + """Set an alternative path for /proc filesystem on Linux, Solaris + and AIX. This superseds PROCFS_PATH variable which is deprecated. + """ + _psplatform.PROCFS_PATH = path + + +# ===================================================================== +# --- Windows services +# ===================================================================== + + +if WINDOWS: + + def win_service_iter(): + """Return a generator yielding a WindowsService instance for all + Windows services installed. + """ + return _psplatform.win_service_iter() + + def win_service_get(name): + """Get a Windows service by *name*. + Raise NoSuchProcess if no service with such name exists. + """ + return _psplatform.win_service_get(name) + + +# ===================================================================== + + +def test(): # pragma: no cover + """List info of all currently running processes emulating ps aux + output. + """ + today_day = datetime.date.today() + templ = "%-10s %5s %4s %7s %7s %-13s %5s %7s %s" + attrs = ['pid', 'memory_percent', 'name', 'cpu_times', 'create_time', + 'memory_info'] + if POSIX: + attrs.append('uids') + attrs.append('terminal') + print(templ % ("USER", "PID", "%MEM", "VSZ", "RSS", "TTY", "START", "TIME", + "COMMAND")) + for p in process_iter(attrs=attrs, ad_value=''): + if p.info['create_time']: + ctime = datetime.datetime.fromtimestamp(p.info['create_time']) + if ctime.date() == today_day: + ctime = ctime.strftime("%H:%M") + else: + ctime = ctime.strftime("%b%d") + else: + ctime = '' + cputime = time.strftime("%M:%S", + time.localtime(sum(p.info['cpu_times']))) + try: + user = p.username() + except Error: + user = '' + if WINDOWS and '\\' in user: + user = user.split('\\')[1] + vms = p.info['memory_info'] and \ + int(p.info['memory_info'].vms / 1024) or '?' + rss = p.info['memory_info'] and \ + int(p.info['memory_info'].rss / 1024) or '?' + memp = p.info['memory_percent'] and \ + round(p.info['memory_percent'], 1) or '?' + print(templ % ( + user[:10], + p.info['pid'], + memp, + vms, + rss, + p.info.get('terminal', '') or '?', + ctime, + cputime, + p.info['name'].strip() or '?')) + + +del memoize, memoize_when_activated, division, deprecated_method +if sys.version_info[0] < 3: + del num, x + +if __name__ == "__main__": + test() diff --git a/server/www/packages/packages-windows/x86/psutil/_common.py b/server/www/packages/packages-windows/x86/psutil/_common.py new file mode 100644 index 0000000..870971e --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/_common.py @@ -0,0 +1,575 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Common objects shared by __init__.py and _ps*.py modules.""" + +# Note: this module is imported by setup.py so it should not import +# psutil or third-party modules. + +from __future__ import division + +import contextlib +import errno +import functools +import os +import socket +import stat +import sys +import threading +import warnings +from collections import defaultdict +from collections import namedtuple +from socket import AF_INET +from socket import SOCK_DGRAM +from socket import SOCK_STREAM +try: + from socket import AF_INET6 +except ImportError: + AF_INET6 = None +try: + from socket import AF_UNIX +except ImportError: + AF_UNIX = None + +if sys.version_info >= (3, 4): + import enum +else: + enum = None + +# can't take it from _common.py as this script is imported by setup.py +PY3 = sys.version_info[0] == 3 + +__all__ = [ + # constants + 'FREEBSD', 'BSD', 'LINUX', 'NETBSD', 'OPENBSD', 'OSX', 'POSIX', 'SUNOS', + 'WINDOWS', + 'ENCODING', 'ENCODING_ERRS', 'AF_INET6', + # connection constants + 'CONN_CLOSE', 'CONN_CLOSE_WAIT', 'CONN_CLOSING', 'CONN_ESTABLISHED', + 'CONN_FIN_WAIT1', 'CONN_FIN_WAIT2', 'CONN_LAST_ACK', 'CONN_LISTEN', + 'CONN_NONE', 'CONN_SYN_RECV', 'CONN_SYN_SENT', 'CONN_TIME_WAIT', + # net constants + 'NIC_DUPLEX_FULL', 'NIC_DUPLEX_HALF', 'NIC_DUPLEX_UNKNOWN', + # process status constants + 'STATUS_DEAD', 'STATUS_DISK_SLEEP', 'STATUS_IDLE', 'STATUS_LOCKED', + 'STATUS_RUNNING', 'STATUS_SLEEPING', 'STATUS_STOPPED', 'STATUS_SUSPENDED', + 'STATUS_TRACING_STOP', 'STATUS_WAITING', 'STATUS_WAKE_KILL', + 'STATUS_WAKING', 'STATUS_ZOMBIE', + # named tuples + 'pconn', 'pcputimes', 'pctxsw', 'pgids', 'pio', 'pionice', 'popenfile', + 'pthread', 'puids', 'sconn', 'scpustats', 'sdiskio', 'sdiskpart', + 'sdiskusage', 'snetio', 'snic', 'snicstats', 'sswap', 'suser', + # utility functions + 'conn_tmap', 'deprecated_method', 'isfile_strict', 'memoize', + 'parse_environ_block', 'path_exists_strict', 'usage_percent', + 'supports_ipv6', 'sockfam_to_enum', 'socktype_to_enum', "wrap_numbers", +] + + +# =================================================================== +# --- OS constants +# =================================================================== + + +POSIX = os.name == "posix" +WINDOWS = os.name == "nt" +LINUX = sys.platform.startswith("linux") +OSX = sys.platform.startswith("darwin") +FREEBSD = sys.platform.startswith("freebsd") +OPENBSD = sys.platform.startswith("openbsd") +NETBSD = sys.platform.startswith("netbsd") +BSD = FREEBSD or OPENBSD or NETBSD +SUNOS = sys.platform.startswith("sunos") or sys.platform.startswith("solaris") +AIX = sys.platform.startswith("aix") + + +# =================================================================== +# --- API constants +# =================================================================== + + +# Process.status() +STATUS_RUNNING = "running" +STATUS_SLEEPING = "sleeping" +STATUS_DISK_SLEEP = "disk-sleep" +STATUS_STOPPED = "stopped" +STATUS_TRACING_STOP = "tracing-stop" +STATUS_ZOMBIE = "zombie" +STATUS_DEAD = "dead" +STATUS_WAKE_KILL = "wake-kill" +STATUS_WAKING = "waking" +STATUS_IDLE = "idle" # FreeBSD, OSX +STATUS_LOCKED = "locked" # FreeBSD +STATUS_WAITING = "waiting" # FreeBSD +STATUS_SUSPENDED = "suspended" # NetBSD + +# Process.connections() and psutil.net_connections() +CONN_ESTABLISHED = "ESTABLISHED" +CONN_SYN_SENT = "SYN_SENT" +CONN_SYN_RECV = "SYN_RECV" +CONN_FIN_WAIT1 = "FIN_WAIT1" +CONN_FIN_WAIT2 = "FIN_WAIT2" +CONN_TIME_WAIT = "TIME_WAIT" +CONN_CLOSE = "CLOSE" +CONN_CLOSE_WAIT = "CLOSE_WAIT" +CONN_LAST_ACK = "LAST_ACK" +CONN_LISTEN = "LISTEN" +CONN_CLOSING = "CLOSING" +CONN_NONE = "NONE" + +# net_if_stats() +if enum is None: + NIC_DUPLEX_FULL = 2 + NIC_DUPLEX_HALF = 1 + NIC_DUPLEX_UNKNOWN = 0 +else: + class NicDuplex(enum.IntEnum): + NIC_DUPLEX_FULL = 2 + NIC_DUPLEX_HALF = 1 + NIC_DUPLEX_UNKNOWN = 0 + + globals().update(NicDuplex.__members__) + +# sensors_battery() +if enum is None: + POWER_TIME_UNKNOWN = -1 + POWER_TIME_UNLIMITED = -2 +else: + class BatteryTime(enum.IntEnum): + POWER_TIME_UNKNOWN = -1 + POWER_TIME_UNLIMITED = -2 + + globals().update(BatteryTime.__members__) + +# --- others + +ENCODING = sys.getfilesystemencoding() +if not PY3: + ENCODING_ERRS = "replace" +else: + try: + ENCODING_ERRS = sys.getfilesystemencodeerrors() # py 3.6 + except AttributeError: + ENCODING_ERRS = "surrogateescape" if POSIX else "replace" + + +# =================================================================== +# --- namedtuples +# =================================================================== + +# --- for system functions + +# psutil.swap_memory() +sswap = namedtuple('sswap', ['total', 'used', 'free', 'percent', 'sin', + 'sout']) +# psutil.disk_usage() +sdiskusage = namedtuple('sdiskusage', ['total', 'used', 'free', 'percent']) +# psutil.disk_io_counters() +sdiskio = namedtuple('sdiskio', ['read_count', 'write_count', + 'read_bytes', 'write_bytes', + 'read_time', 'write_time']) +# psutil.disk_partitions() +sdiskpart = namedtuple('sdiskpart', ['device', 'mountpoint', 'fstype', 'opts']) +# psutil.net_io_counters() +snetio = namedtuple('snetio', ['bytes_sent', 'bytes_recv', + 'packets_sent', 'packets_recv', + 'errin', 'errout', + 'dropin', 'dropout']) +# psutil.users() +suser = namedtuple('suser', ['name', 'terminal', 'host', 'started', 'pid']) +# psutil.net_connections() +sconn = namedtuple('sconn', ['fd', 'family', 'type', 'laddr', 'raddr', + 'status', 'pid']) +# psutil.net_if_addrs() +snic = namedtuple('snic', ['family', 'address', 'netmask', 'broadcast', 'ptp']) +# psutil.net_if_stats() +snicstats = namedtuple('snicstats', ['isup', 'duplex', 'speed', 'mtu']) +# psutil.cpu_stats() +scpustats = namedtuple( + 'scpustats', ['ctx_switches', 'interrupts', 'soft_interrupts', 'syscalls']) +# psutil.cpu_freq() +scpufreq = namedtuple('scpufreq', ['current', 'min', 'max']) +# psutil.sensors_temperatures() +shwtemp = namedtuple( + 'shwtemp', ['label', 'current', 'high', 'critical']) +# psutil.sensors_battery() +sbattery = namedtuple('sbattery', ['percent', 'secsleft', 'power_plugged']) +# psutil.sensors_battery() +sfan = namedtuple('sfan', ['label', 'current']) + +# --- for Process methods + +# psutil.Process.cpu_times() +pcputimes = namedtuple('pcputimes', + ['user', 'system', 'children_user', 'children_system']) +# psutil.Process.open_files() +popenfile = namedtuple('popenfile', ['path', 'fd']) +# psutil.Process.threads() +pthread = namedtuple('pthread', ['id', 'user_time', 'system_time']) +# psutil.Process.uids() +puids = namedtuple('puids', ['real', 'effective', 'saved']) +# psutil.Process.gids() +pgids = namedtuple('pgids', ['real', 'effective', 'saved']) +# psutil.Process.io_counters() +pio = namedtuple('pio', ['read_count', 'write_count', + 'read_bytes', 'write_bytes']) +# psutil.Process.ionice() +pionice = namedtuple('pionice', ['ioclass', 'value']) +# psutil.Process.ctx_switches() +pctxsw = namedtuple('pctxsw', ['voluntary', 'involuntary']) +# psutil.Process.connections() +pconn = namedtuple('pconn', ['fd', 'family', 'type', 'laddr', 'raddr', + 'status']) + +# psutil.connections() and psutil.Process.connections() +addr = namedtuple('addr', ['ip', 'port']) + + +# =================================================================== +# --- Process.connections() 'kind' parameter mapping +# =================================================================== + + +conn_tmap = { + "all": ([AF_INET, AF_INET6, AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]), + "tcp": ([AF_INET, AF_INET6], [SOCK_STREAM]), + "tcp4": ([AF_INET], [SOCK_STREAM]), + "udp": ([AF_INET, AF_INET6], [SOCK_DGRAM]), + "udp4": ([AF_INET], [SOCK_DGRAM]), + "inet": ([AF_INET, AF_INET6], [SOCK_STREAM, SOCK_DGRAM]), + "inet4": ([AF_INET], [SOCK_STREAM, SOCK_DGRAM]), + "inet6": ([AF_INET6], [SOCK_STREAM, SOCK_DGRAM]), +} + +if AF_INET6 is not None: + conn_tmap.update({ + "tcp6": ([AF_INET6], [SOCK_STREAM]), + "udp6": ([AF_INET6], [SOCK_DGRAM]), + }) + +if AF_UNIX is not None: + conn_tmap.update({ + "unix": ([AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]), + }) + +del AF_INET, AF_UNIX, SOCK_STREAM, SOCK_DGRAM + + +# =================================================================== +# --- utils +# =================================================================== + + +def usage_percent(used, total, _round=None): + """Calculate percentage usage of 'used' against 'total'.""" + try: + ret = (used / total) * 100 + except ZeroDivisionError: + ret = 0.0 if isinstance(used, float) or isinstance(total, float) else 0 + if _round is not None: + return round(ret, _round) + else: + return ret + + +def memoize(fun): + """A simple memoize decorator for functions supporting (hashable) + positional arguments. + It also provides a cache_clear() function for clearing the cache: + + >>> @memoize + ... def foo() + ... return 1 + ... + >>> foo() + 1 + >>> foo.cache_clear() + >>> + """ + @functools.wraps(fun) + def wrapper(*args, **kwargs): + key = (args, frozenset(sorted(kwargs.items()))) + try: + return cache[key] + except KeyError: + ret = cache[key] = fun(*args, **kwargs) + return ret + + def cache_clear(): + """Clear cache.""" + cache.clear() + + cache = {} + wrapper.cache_clear = cache_clear + return wrapper + + +def memoize_when_activated(fun): + """A memoize decorator which is disabled by default. It can be + activated and deactivated on request. + For efficiency reasons it can be used only against class methods + accepting no arguments. + + >>> class Foo: + ... @memoize + ... def foo() + ... print(1) + ... + >>> f = Foo() + >>> # deactivated (default) + >>> foo() + 1 + >>> foo() + 1 + >>> + >>> # activated + >>> foo.cache_activate() + >>> foo() + 1 + >>> foo() + >>> foo() + >>> + """ + @functools.wraps(fun) + def wrapper(self): + if not wrapper.cache_activated: + return fun(self) + else: + try: + ret = cache[fun] + except KeyError: + ret = cache[fun] = fun(self) + return ret + + def cache_activate(): + """Activate cache.""" + wrapper.cache_activated = True + + def cache_deactivate(): + """Deactivate and clear cache.""" + wrapper.cache_activated = False + cache.clear() + + cache = {} + wrapper.cache_activated = False + wrapper.cache_activate = cache_activate + wrapper.cache_deactivate = cache_deactivate + return wrapper + + +def isfile_strict(path): + """Same as os.path.isfile() but does not swallow EACCES / EPERM + exceptions, see: + http://mail.python.org/pipermail/python-dev/2012-June/120787.html + """ + try: + st = os.stat(path) + except OSError as err: + if err.errno in (errno.EPERM, errno.EACCES): + raise + return False + else: + return stat.S_ISREG(st.st_mode) + + +def path_exists_strict(path): + """Same as os.path.exists() but does not swallow EACCES / EPERM + exceptions, see: + http://mail.python.org/pipermail/python-dev/2012-June/120787.html + """ + try: + os.stat(path) + except OSError as err: + if err.errno in (errno.EPERM, errno.EACCES): + raise + return False + else: + return True + + +@memoize +def supports_ipv6(): + """Return True if IPv6 is supported on this platform.""" + if not socket.has_ipv6 or AF_INET6 is None: + return False + try: + sock = socket.socket(AF_INET6, socket.SOCK_STREAM) + with contextlib.closing(sock): + sock.bind(("::1", 0)) + return True + except socket.error: + return False + + +def parse_environ_block(data): + """Parse a C environ block of environment variables into a dictionary.""" + # The block is usually raw data from the target process. It might contain + # trailing garbage and lines that do not look like assignments. + ret = {} + pos = 0 + + # localize global variable to speed up access. + WINDOWS_ = WINDOWS + while True: + next_pos = data.find("\0", pos) + # nul byte at the beginning or double nul byte means finish + if next_pos <= pos: + break + # there might not be an equals sign + equal_pos = data.find("=", pos, next_pos) + if equal_pos > pos: + key = data[pos:equal_pos] + value = data[equal_pos + 1:next_pos] + # Windows expects environment variables to be uppercase only + if WINDOWS_: + key = key.upper() + ret[key] = value + pos = next_pos + 1 + + return ret + + +def sockfam_to_enum(num): + """Convert a numeric socket family value to an IntEnum member. + If it's not a known member, return the numeric value itself. + """ + if enum is None: + return num + else: # pragma: no cover + try: + return socket.AddressFamily(num) + except (ValueError, AttributeError): + return num + + +def socktype_to_enum(num): + """Convert a numeric socket type value to an IntEnum member. + If it's not a known member, return the numeric value itself. + """ + if enum is None: + return num + else: # pragma: no cover + try: + return socket.AddressType(num) + except (ValueError, AttributeError): + return num + + +def deprecated_method(replacement): + """A decorator which can be used to mark a method as deprecated + 'replcement' is the method name which will be called instead. + """ + def outer(fun): + msg = "%s() is deprecated and will be removed; use %s() instead" % ( + fun.__name__, replacement) + if fun.__doc__ is None: + fun.__doc__ = msg + + @functools.wraps(fun) + def inner(self, *args, **kwargs): + warnings.warn(msg, category=FutureWarning, stacklevel=2) + return getattr(self, replacement)(*args, **kwargs) + return inner + return outer + + +class _WrapNumbers: + """Watches numbers so that they don't overflow and wrap + (reset to zero). + """ + + def __init__(self): + self.lock = threading.Lock() + self.cache = {} + self.reminders = {} + self.reminder_keys = {} + + def _add_dict(self, input_dict, name): + assert name not in self.cache + assert name not in self.reminders + assert name not in self.reminder_keys + self.cache[name] = input_dict + self.reminders[name] = defaultdict(int) + self.reminder_keys[name] = defaultdict(set) + + def _remove_dead_reminders(self, input_dict, name): + """In case the number of keys changed between calls (e.g. a + disk disappears) this removes the entry from self.reminders. + """ + old_dict = self.cache[name] + gone_keys = set(old_dict.keys()) - set(input_dict.keys()) + for gone_key in gone_keys: + for remkey in self.reminder_keys[name][gone_key]: + del self.reminders[name][remkey] + del self.reminder_keys[name][gone_key] + + def run(self, input_dict, name): + """Cache dict and sum numbers which overflow and wrap. + Return an updated copy of `input_dict` + """ + if name not in self.cache: + # This was the first call. + self._add_dict(input_dict, name) + return input_dict + + self._remove_dead_reminders(input_dict, name) + + old_dict = self.cache[name] + new_dict = {} + for key in input_dict.keys(): + input_tuple = input_dict[key] + try: + old_tuple = old_dict[key] + except KeyError: + # The input dict has a new key (e.g. a new disk or NIC) + # which didn't exist in the previous call. + new_dict[key] = input_tuple + continue + + bits = [] + for i in range(len(input_tuple)): + input_value = input_tuple[i] + old_value = old_tuple[i] + remkey = (key, i) + if input_value < old_value: + # it wrapped! + self.reminders[name][remkey] += old_value + self.reminder_keys[name][key].add(remkey) + bits.append(input_value + self.reminders[name][remkey]) + + new_dict[key] = tuple(bits) + + self.cache[name] = input_dict + return new_dict + + def cache_clear(self, name=None): + """Clear the internal cache, optionally only for function 'name'.""" + with self.lock: + if name is None: + self.cache.clear() + self.reminders.clear() + self.reminder_keys.clear() + else: + self.cache.pop(name, None) + self.reminders.pop(name, None) + self.reminder_keys.pop(name, None) + + def cache_info(self): + """Return internal cache dicts as a tuple of 3 elements.""" + with self.lock: + return (self.cache, self.reminders, self.reminder_keys) + + +def wrap_numbers(input_dict, name): + """Given an `input_dict` and a function `name`, adjust the numbers + which "wrap" (restart from zero) across different calls by adding + "old value" to "new value" and return an updated dict. + """ + with _wn.lock: + return _wn.run(input_dict, name) + + +_wn = _WrapNumbers() +wrap_numbers.cache_clear = _wn.cache_clear +wrap_numbers.cache_info = _wn.cache_info diff --git a/server/www/packages/packages-windows/x86/psutil/_compat.py b/server/www/packages/packages-windows/x86/psutil/_compat.py new file mode 100644 index 0000000..de91638 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/_compat.py @@ -0,0 +1,249 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Module which provides compatibility with older Python versions.""" + +import collections +import functools +import os +import sys + +__all__ = ["PY3", "long", "xrange", "unicode", "basestring", "u", "b", + "callable", "lru_cache", "which"] + +PY3 = sys.version_info[0] == 3 + +if PY3: + long = int + xrange = range + unicode = str + basestring = str + + def u(s): + return s + + def b(s): + return s.encode("latin-1") +else: + long = long + xrange = xrange + unicode = unicode + basestring = basestring + + def u(s): + return unicode(s, "unicode_escape") + + def b(s): + return s + + +# removed in 3.0, reintroduced in 3.2 +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +# --- stdlib additions + + +# py 3.2 functools.lru_cache +# Taken from: http://code.activestate.com/recipes/578078 +# Credit: Raymond Hettinger +try: + from functools import lru_cache +except ImportError: + try: + from threading import RLock + except ImportError: + from dummy_threading import RLock + + _CacheInfo = collections.namedtuple( + "CacheInfo", ["hits", "misses", "maxsize", "currsize"]) + + class _HashedSeq(list): + __slots__ = 'hashvalue' + + def __init__(self, tup, hash=hash): + self[:] = tup + self.hashvalue = hash(tup) + + def __hash__(self): + return self.hashvalue + + def _make_key(args, kwds, typed, + kwd_mark=(object(), ), + fasttypes=set((int, str, frozenset, type(None))), + sorted=sorted, tuple=tuple, type=type, len=len): + key = args + if kwds: + sorted_items = sorted(kwds.items()) + key += kwd_mark + for item in sorted_items: + key += item + if typed: + key += tuple(type(v) for v in args) + if kwds: + key += tuple(type(v) for k, v in sorted_items) + elif len(key) == 1 and type(key[0]) in fasttypes: + return key[0] + return _HashedSeq(key) + + def lru_cache(maxsize=100, typed=False): + """Least-recently-used cache decorator, see: + http://docs.python.org/3/library/functools.html#functools.lru_cache + """ + def decorating_function(user_function): + cache = dict() + stats = [0, 0] + HITS, MISSES = 0, 1 + make_key = _make_key + cache_get = cache.get + _len = len + lock = RLock() + root = [] + root[:] = [root, root, None, None] + nonlocal_root = [root] + PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 + if maxsize == 0: + def wrapper(*args, **kwds): + result = user_function(*args, **kwds) + stats[MISSES] += 1 + return result + elif maxsize is None: + def wrapper(*args, **kwds): + key = make_key(args, kwds, typed) + result = cache_get(key, root) + if result is not root: + stats[HITS] += 1 + return result + result = user_function(*args, **kwds) + cache[key] = result + stats[MISSES] += 1 + return result + else: + def wrapper(*args, **kwds): + if kwds or typed: + key = make_key(args, kwds, typed) + else: + key = args + lock.acquire() + try: + link = cache_get(key) + if link is not None: + root, = nonlocal_root + link_prev, link_next, key, result = link + link_prev[NEXT] = link_next + link_next[PREV] = link_prev + last = root[PREV] + last[NEXT] = root[PREV] = link + link[PREV] = last + link[NEXT] = root + stats[HITS] += 1 + return result + finally: + lock.release() + result = user_function(*args, **kwds) + lock.acquire() + try: + root, = nonlocal_root + if key in cache: + pass + elif _len(cache) >= maxsize: + oldroot = root + oldroot[KEY] = key + oldroot[RESULT] = result + root = nonlocal_root[0] = oldroot[NEXT] + oldkey = root[KEY] + root[KEY] = root[RESULT] = None + del cache[oldkey] + cache[key] = oldroot + else: + last = root[PREV] + link = [last, root, key, result] + last[NEXT] = root[PREV] = cache[key] = link + stats[MISSES] += 1 + finally: + lock.release() + return result + + def cache_info(): + """Report cache statistics""" + lock.acquire() + try: + return _CacheInfo(stats[HITS], stats[MISSES], maxsize, + len(cache)) + finally: + lock.release() + + def cache_clear(): + """Clear the cache and cache statistics""" + lock.acquire() + try: + cache.clear() + root = nonlocal_root[0] + root[:] = [root, root, None, None] + stats[:] = [0, 0] + finally: + lock.release() + + wrapper.__wrapped__ = user_function + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + return functools.update_wrapper(wrapper, user_function) + + return decorating_function + + +# python 3.3 +try: + from shutil import which +except ImportError: + def which(cmd, mode=os.F_OK | os.X_OK, path=None): + """Given a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such + file. + + `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result + of os.environ.get("PATH"), or can be overridden with a custom search + path. + """ + def _access_check(fn, mode): + return (os.path.exists(fn) and os.access(fn, mode) and + not os.path.isdir(fn)) + + if os.path.dirname(cmd): + if _access_check(cmd, mode): + return cmd + return None + + if path is None: + path = os.environ.get("PATH", os.defpath) + if not path: + return None + path = path.split(os.pathsep) + + if sys.platform == "win32": + if os.curdir not in path: + path.insert(0, os.curdir) + + pathext = os.environ.get("PATHEXT", "").split(os.pathsep) + if any(cmd.lower().endswith(ext.lower()) for ext in pathext): + files = [cmd] + else: + files = [cmd + ext for ext in pathext] + else: + files = [cmd] + + seen = set() + for dir in path: + normdir = os.path.normcase(dir) + if normdir not in seen: + seen.add(normdir) + for thefile in files: + name = os.path.join(dir, thefile) + if _access_check(name, mode): + return name + return None diff --git a/server/www/packages/packages-windows/x86/psutil/_exceptions.py b/server/www/packages/packages-windows/x86/psutil/_exceptions.py new file mode 100644 index 0000000..c08e6d8 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/_exceptions.py @@ -0,0 +1,94 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +class Error(Exception): + """Base exception class. All other psutil exceptions inherit + from this one. + """ + + def __init__(self, msg=""): + Exception.__init__(self, msg) + self.msg = msg + + def __repr__(self): + ret = "psutil.%s %s" % (self.__class__.__name__, self.msg) + return ret.strip() + + __str__ = __repr__ + + +class NoSuchProcess(Error): + """Exception raised when a process with a certain PID doesn't + or no longer exists. + """ + + def __init__(self, pid, name=None, msg=None): + Error.__init__(self, msg) + self.pid = pid + self.name = name + self.msg = msg + if msg is None: + if name: + details = "(pid=%s, name=%s)" % (self.pid, repr(self.name)) + else: + details = "(pid=%s)" % self.pid + self.msg = "process no longer exists " + details + + +class ZombieProcess(NoSuchProcess): + """Exception raised when querying a zombie process. This is + raised on OSX, BSD and Solaris only, and not always: depending + on the query the OS may be able to succeed anyway. + On Linux all zombie processes are querable (hence this is never + raised). Windows doesn't have zombie processes. + """ + + def __init__(self, pid, name=None, ppid=None, msg=None): + NoSuchProcess.__init__(self, msg) + self.pid = pid + self.ppid = ppid + self.name = name + self.msg = msg + if msg is None: + args = ["pid=%s" % pid] + if name: + args.append("name=%s" % repr(self.name)) + if ppid: + args.append("ppid=%s" % self.ppid) + details = "(%s)" % ", ".join(args) + self.msg = "process still exists but it's a zombie " + details + + +class AccessDenied(Error): + """Exception raised when permission to perform an action is denied.""" + + def __init__(self, pid=None, name=None, msg=None): + Error.__init__(self, msg) + self.pid = pid + self.name = name + self.msg = msg + if msg is None: + if (pid is not None) and (name is not None): + self.msg = "(pid=%s, name=%s)" % (pid, repr(name)) + elif (pid is not None): + self.msg = "(pid=%s)" % self.pid + else: + self.msg = "" + + +class TimeoutExpired(Error): + """Raised on Process.wait(timeout) if timeout expires and process + is still alive. + """ + + def __init__(self, seconds, pid=None, name=None): + Error.__init__(self, "timeout after %s seconds" % seconds) + self.seconds = seconds + self.pid = pid + self.name = name + if (pid is not None) and (name is not None): + self.msg += " (pid=%s, name=%s)" % (pid, repr(name)) + elif (pid is not None): + self.msg += " (pid=%s)" % self.pid diff --git a/server/www/packages/packages-windows/x86/psutil/_psaix.py b/server/www/packages/packages-windows/x86/psutil/_psaix.py new file mode 100644 index 0000000..9abc8d1 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/_psaix.py @@ -0,0 +1,573 @@ +# Copyright (c) 2009, Giampaolo Rodola' +# Copyright (c) 2017, Arnon Yaari +# All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""AIX platform implementation.""" + +import errno +import glob +import os +import re +import subprocess +import sys +from collections import namedtuple +from socket import AF_INET + +from . import _common +from . import _psposix +from . import _psutil_aix as cext +from . import _psutil_posix as cext_posix +from ._common import AF_INET6 +from ._common import memoize_when_activated +from ._common import NIC_DUPLEX_FULL +from ._common import NIC_DUPLEX_HALF +from ._common import NIC_DUPLEX_UNKNOWN +from ._common import sockfam_to_enum +from ._common import socktype_to_enum +from ._common import usage_percent +from ._compat import PY3 +from ._exceptions import AccessDenied +from ._exceptions import NoSuchProcess +from ._exceptions import ZombieProcess + + +__extra__all__ = ["PROCFS_PATH"] + + +# ===================================================================== +# --- globals +# ===================================================================== + + +HAS_THREADS = hasattr(cext, "proc_threads") + +PAGE_SIZE = os.sysconf('SC_PAGE_SIZE') +AF_LINK = cext_posix.AF_LINK + +PROC_STATUSES = { + cext.SIDL: _common.STATUS_IDLE, + cext.SZOMB: _common.STATUS_ZOMBIE, + cext.SACTIVE: _common.STATUS_RUNNING, + cext.SSWAP: _common.STATUS_RUNNING, # TODO what status is this? + cext.SSTOP: _common.STATUS_STOPPED, +} + +TCP_STATUSES = { + cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED, + cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT, + cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV, + cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1, + cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2, + cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT, + cext.TCPS_CLOSED: _common.CONN_CLOSE, + cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT, + cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK, + cext.TCPS_LISTEN: _common.CONN_LISTEN, + cext.TCPS_CLOSING: _common.CONN_CLOSING, + cext.PSUTIL_CONN_NONE: _common.CONN_NONE, +} + +proc_info_map = dict( + ppid=0, + rss=1, + vms=2, + create_time=3, + nice=4, + num_threads=5, + status=6, + ttynr=7) + + +# ===================================================================== +# --- named tuples +# ===================================================================== + + +# psutil.Process.memory_info() +pmem = namedtuple('pmem', ['rss', 'vms']) +# psutil.Process.memory_full_info() +pfullmem = pmem +# psutil.Process.cpu_times() +scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait']) +# psutil.virtual_memory() +svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free']) +# psutil.Process.memory_maps(grouped=True) +pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss', 'anon', 'locked']) +# psutil.Process.memory_maps(grouped=False) +pmmap_ext = namedtuple( + 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields)) + + +# ===================================================================== +# --- utils +# ===================================================================== + + +def get_procfs_path(): + """Return updated psutil.PROCFS_PATH constant.""" + return sys.modules['psutil'].PROCFS_PATH + + +# ===================================================================== +# --- memory +# ===================================================================== + + +def virtual_memory(): + total, avail, free, pinned, inuse = cext.virtual_mem() + percent = usage_percent((total - avail), total, _round=1) + return svmem(total, avail, percent, inuse, free) + + +def swap_memory(): + """Swap system memory as a (total, used, free, sin, sout) tuple.""" + total, free, sin, sout = cext.swap_mem() + used = total - free + percent = usage_percent(used, total, _round=1) + return _common.sswap(total, used, free, percent, sin, sout) + + +# ===================================================================== +# --- CPU +# ===================================================================== + + +def cpu_times(): + """Return system-wide CPU times as a named tuple""" + ret = cext.per_cpu_times() + return scputimes(*[sum(x) for x in zip(*ret)]) + + +def per_cpu_times(): + """Return system per-CPU times as a list of named tuples""" + ret = cext.per_cpu_times() + return [scputimes(*x) for x in ret] + + +def cpu_count_logical(): + """Return the number of logical CPUs in the system.""" + try: + return os.sysconf("SC_NPROCESSORS_ONLN") + except ValueError: + # mimic os.cpu_count() behavior + return None + + +def cpu_count_physical(): + cmd = "lsdev -Cc processor" + p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + if PY3: + stdout, stderr = [x.decode(sys.stdout.encoding) + for x in (stdout, stderr)] + if p.returncode != 0: + raise RuntimeError("%r command error\n%s" % (cmd, stderr)) + processors = stdout.strip().splitlines() + return len(processors) or None + + +def cpu_stats(): + """Return various CPU stats as a named tuple.""" + ctx_switches, interrupts, soft_interrupts, syscalls = cext.cpu_stats() + return _common.scpustats( + ctx_switches, interrupts, soft_interrupts, syscalls) + + +# ===================================================================== +# --- disks +# ===================================================================== + + +disk_io_counters = cext.disk_io_counters +disk_usage = _psposix.disk_usage + + +def disk_partitions(all=False): + """Return system disk partitions.""" + # TODO - the filtering logic should be better checked so that + # it tries to reflect 'df' as much as possible + retlist = [] + partitions = cext.disk_partitions() + for partition in partitions: + device, mountpoint, fstype, opts = partition + if device == 'none': + device = '' + if not all: + # Differently from, say, Linux, we don't have a list of + # common fs types so the best we can do, AFAIK, is to + # filter by filesystem having a total size > 0. + if not disk_usage(mountpoint).total: + continue + ntuple = _common.sdiskpart(device, mountpoint, fstype, opts) + retlist.append(ntuple) + return retlist + + +# ===================================================================== +# --- network +# ===================================================================== + + +net_if_addrs = cext_posix.net_if_addrs +net_io_counters = cext.net_io_counters + + +def net_connections(kind, _pid=-1): + """Return socket connections. If pid == -1 return system-wide + connections (as opposed to connections opened by one process only). + """ + cmap = _common.conn_tmap + if kind not in cmap: + raise ValueError("invalid %r kind argument; choose between %s" + % (kind, ', '.join([repr(x) for x in cmap]))) + families, types = _common.conn_tmap[kind] + rawlist = cext.net_connections(_pid) + ret = set() + for item in rawlist: + fd, fam, type_, laddr, raddr, status, pid = item + if fam not in families: + continue + if type_ not in types: + continue + status = TCP_STATUSES[status] + if fam in (AF_INET, AF_INET6): + if laddr: + laddr = _common.addr(*laddr) + if raddr: + raddr = _common.addr(*raddr) + fam = sockfam_to_enum(fam) + type_ = socktype_to_enum(type_) + if _pid == -1: + nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid) + else: + nt = _common.pconn(fd, fam, type_, laddr, raddr, status) + ret.add(nt) + return list(ret) + + +def net_if_stats(): + """Get NIC stats (isup, duplex, speed, mtu).""" + duplex_map = {"Full": NIC_DUPLEX_FULL, + "Half": NIC_DUPLEX_HALF} + names = set([x[0] for x in net_if_addrs()]) + ret = {} + for name in names: + isup, mtu = cext.net_if_stats(name) + + # try to get speed and duplex + # TODO: rewrite this in C (entstat forks, so use truss -f to follow. + # looks like it is using an undocumented ioctl?) + duplex = "" + speed = 0 + p = subprocess.Popen(["/usr/bin/entstat", "-d", name], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + if PY3: + stdout, stderr = [x.decode(sys.stdout.encoding) + for x in (stdout, stderr)] + if p.returncode == 0: + re_result = re.search("Running: (\d+) Mbps.*?(\w+) Duplex", stdout) + if re_result is not None: + speed = int(re_result.group(1)) + duplex = re_result.group(2) + + duplex = duplex_map.get(duplex, NIC_DUPLEX_UNKNOWN) + ret[name] = _common.snicstats(isup, duplex, speed, mtu) + return ret + + +# ===================================================================== +# --- other system functions +# ===================================================================== + + +def boot_time(): + """The system boot time expressed in seconds since the epoch.""" + return cext.boot_time() + + +def users(): + """Return currently connected users as a list of namedtuples.""" + retlist = [] + rawlist = cext.users() + localhost = (':0.0', ':0') + for item in rawlist: + user, tty, hostname, tstamp, user_process, pid = item + # note: the underlying C function includes entries about + # system boot, run level and others. We might want + # to use them in the future. + if not user_process: + continue + if hostname in localhost: + hostname = 'localhost' + nt = _common.suser(user, tty, hostname, tstamp, pid) + retlist.append(nt) + return retlist + + +# ===================================================================== +# --- processes +# ===================================================================== + + +def pids(): + """Returns a list of PIDs currently running on the system.""" + return [int(x) for x in os.listdir(get_procfs_path()) if x.isdigit()] + + +def pid_exists(pid): + """Check for the existence of a unix pid.""" + return os.path.exists(os.path.join(get_procfs_path(), str(pid), "psinfo")) + + +def wrap_exceptions(fun): + """Call callable into a try/except clause and translate ENOENT, + EACCES and EPERM in NoSuchProcess or AccessDenied exceptions. + """ + + def wrapper(self, *args, **kwargs): + try: + return fun(self, *args, **kwargs) + except EnvironmentError as err: + # support for private module import + if (NoSuchProcess is None or AccessDenied is None or + ZombieProcess is None): + raise + # ENOENT (no such file or directory) gets raised on open(). + # ESRCH (no such process) can get raised on read() if + # process is gone in meantime. + if err.errno in (errno.ENOENT, errno.ESRCH): + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) + else: + raise ZombieProcess(self.pid, self._name, self._ppid) + if err.errno in (errno.EPERM, errno.EACCES): + raise AccessDenied(self.pid, self._name) + raise + return wrapper + + +class Process(object): + """Wrapper class around underlying C implementation.""" + + __slots__ = ["pid", "_name", "_ppid", "_procfs_path"] + + def __init__(self, pid): + self.pid = pid + self._name = None + self._ppid = None + self._procfs_path = get_procfs_path() + + def oneshot_enter(self): + self._proc_name_and_args.cache_activate() + self._proc_basic_info.cache_activate() + self._proc_cred.cache_activate() + + def oneshot_exit(self): + self._proc_name_and_args.cache_deactivate() + self._proc_basic_info.cache_deactivate() + self._proc_cred.cache_deactivate() + + @memoize_when_activated + def _proc_name_and_args(self): + return cext.proc_name_and_args(self.pid, self._procfs_path) + + @memoize_when_activated + def _proc_basic_info(self): + return cext.proc_basic_info(self.pid, self._procfs_path) + + @memoize_when_activated + def _proc_cred(self): + return cext.proc_cred(self.pid, self._procfs_path) + + @wrap_exceptions + def name(self): + if self.pid == 0: + return "swapper" + # note: this is limited to 15 characters + return self._proc_name_and_args()[0].rstrip("\x00") + + @wrap_exceptions + def exe(self): + # there is no way to get executable path in AIX other than to guess, + # and guessing is more complex than what's in the wrapping class + exe = self.cmdline()[0] + if os.path.sep in exe: + # relative or absolute path + if not os.path.isabs(exe): + # if cwd has changed, we're out of luck - this may be wrong! + exe = os.path.abspath(os.path.join(self.cwd(), exe)) + if (os.path.isabs(exe) and + os.path.isfile(exe) and + os.access(exe, os.X_OK)): + return exe + # not found, move to search in PATH using basename only + exe = os.path.basename(exe) + # search for exe name PATH + for path in os.environ["PATH"].split(":"): + possible_exe = os.path.abspath(os.path.join(path, exe)) + if (os.path.isfile(possible_exe) and + os.access(possible_exe, os.X_OK)): + return possible_exe + return '' + + @wrap_exceptions + def cmdline(self): + return self._proc_name_and_args()[1].split(' ') + + @wrap_exceptions + def create_time(self): + return self._proc_basic_info()[proc_info_map['create_time']] + + @wrap_exceptions + def num_threads(self): + return self._proc_basic_info()[proc_info_map['num_threads']] + + if HAS_THREADS: + @wrap_exceptions + def threads(self): + rawlist = cext.proc_threads(self.pid) + retlist = [] + for thread_id, utime, stime in rawlist: + ntuple = _common.pthread(thread_id, utime, stime) + retlist.append(ntuple) + # The underlying C implementation retrieves all OS threads + # and filters them by PID. At this point we can't tell whether + # an empty list means there were no connections for process or + # process is no longer active so we force NSP in case the PID + # is no longer there. + if not retlist: + # will raise NSP if process is gone + os.stat('%s/%s' % (self._procfs_path, self.pid)) + return retlist + + @wrap_exceptions + def connections(self, kind='inet'): + ret = net_connections(kind, _pid=self.pid) + # The underlying C implementation retrieves all OS connections + # and filters them by PID. At this point we can't tell whether + # an empty list means there were no connections for process or + # process is no longer active so we force NSP in case the PID + # is no longer there. + if not ret: + # will raise NSP if process is gone + os.stat('%s/%s' % (self._procfs_path, self.pid)) + return ret + + @wrap_exceptions + def nice_get(self): + return cext_posix.getpriority(self.pid) + + @wrap_exceptions + def nice_set(self, value): + return cext_posix.setpriority(self.pid, value) + + @wrap_exceptions + def ppid(self): + self._ppid = self._proc_basic_info()[proc_info_map['ppid']] + return self._ppid + + @wrap_exceptions + def uids(self): + real, effective, saved, _, _, _ = self._proc_cred() + return _common.puids(real, effective, saved) + + @wrap_exceptions + def gids(self): + _, _, _, real, effective, saved = self._proc_cred() + return _common.puids(real, effective, saved) + + @wrap_exceptions + def cpu_times(self): + cpu_times = cext.proc_cpu_times(self.pid, self._procfs_path) + return _common.pcputimes(*cpu_times) + + @wrap_exceptions + def terminal(self): + ttydev = self._proc_basic_info()[proc_info_map['ttynr']] + # convert from 64-bit dev_t to 32-bit dev_t and then map the device + ttydev = (((ttydev & 0x0000FFFF00000000) >> 16) | (ttydev & 0xFFFF)) + # try to match rdev of /dev/pts/* files ttydev + for dev in glob.glob("/dev/**/*"): + if os.stat(dev).st_rdev == ttydev: + return dev + return None + + @wrap_exceptions + def cwd(self): + procfs_path = self._procfs_path + try: + result = os.readlink("%s/%s/cwd" % (procfs_path, self.pid)) + return result.rstrip('/') + except OSError as err: + if err.errno == errno.ENOENT: + os.stat("%s/%s" % (procfs_path, self.pid)) # raise NSP or AD + return None + raise + + @wrap_exceptions + def memory_info(self): + ret = self._proc_basic_info() + rss = ret[proc_info_map['rss']] * 1024 + vms = ret[proc_info_map['vms']] * 1024 + return pmem(rss, vms) + + memory_full_info = memory_info + + @wrap_exceptions + def status(self): + code = self._proc_basic_info()[proc_info_map['status']] + # XXX is '?' legit? (we're not supposed to return it anyway) + return PROC_STATUSES.get(code, '?') + + def open_files(self): + # TODO rewrite without using procfiles (stat /proc/pid/fd/* and then + # find matching name of the inode) + p = subprocess.Popen(["/usr/bin/procfiles", "-n", str(self.pid)], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + if PY3: + stdout, stderr = [x.decode(sys.stdout.encoding) + for x in (stdout, stderr)] + if "no such process" in stderr.lower(): + raise NoSuchProcess(self.pid, self._name) + procfiles = re.findall("(\d+): S_IFREG.*\s*.*name:(.*)\n", stdout) + retlist = [] + for fd, path in procfiles: + path = path.strip() + if path.startswith("//"): + path = path[1:] + if path.lower() == "cannot be retrieved": + continue + retlist.append(_common.popenfile(path, int(fd))) + return retlist + + @wrap_exceptions + def num_fds(self): + if self.pid == 0: # no /proc/0/fd + return 0 + return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid))) + + @wrap_exceptions + def num_ctx_switches(self): + return _common.pctxsw( + *cext.proc_num_ctx_switches(self.pid)) + + @wrap_exceptions + def wait(self, timeout=None): + return _psposix.wait_pid(self.pid, timeout, self._name) + + @wrap_exceptions + def io_counters(self): + try: + rc, wc, rb, wb = cext.proc_io_counters(self.pid) + except OSError: + # if process is terminated, proc_io_counters returns OSError + # instead of NSP + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) + raise + return _common.pio(rc, wc, rb, wb) diff --git a/server/www/packages/packages-windows/x86/psutil/_psbsd.py b/server/www/packages/packages-windows/x86/psutil/_psbsd.py new file mode 100644 index 0000000..0553401 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/_psbsd.py @@ -0,0 +1,873 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""FreeBSD, OpenBSD and NetBSD platforms implementation.""" + +import contextlib +import errno +import functools +import os +import xml.etree.ElementTree as ET +from collections import namedtuple +from socket import AF_INET + +from . import _common +from . import _psposix +from . import _psutil_bsd as cext +from . import _psutil_posix as cext_posix +from ._common import AF_INET6 +from ._common import conn_tmap +from ._common import FREEBSD +from ._common import memoize +from ._common import memoize_when_activated +from ._common import NETBSD +from ._common import OPENBSD +from ._common import sockfam_to_enum +from ._common import socktype_to_enum +from ._common import usage_percent +from ._compat import which +from ._exceptions import AccessDenied +from ._exceptions import NoSuchProcess +from ._exceptions import ZombieProcess + +__extra__all__ = [] + + +# ===================================================================== +# --- globals +# ===================================================================== + + +if FREEBSD: + PROC_STATUSES = { + cext.SIDL: _common.STATUS_IDLE, + cext.SRUN: _common.STATUS_RUNNING, + cext.SSLEEP: _common.STATUS_SLEEPING, + cext.SSTOP: _common.STATUS_STOPPED, + cext.SZOMB: _common.STATUS_ZOMBIE, + cext.SWAIT: _common.STATUS_WAITING, + cext.SLOCK: _common.STATUS_LOCKED, + } +elif OPENBSD or NETBSD: + PROC_STATUSES = { + cext.SIDL: _common.STATUS_IDLE, + cext.SSLEEP: _common.STATUS_SLEEPING, + cext.SSTOP: _common.STATUS_STOPPED, + # According to /usr/include/sys/proc.h SZOMB is unused. + # test_zombie_process() shows that SDEAD is the right + # equivalent. Also it appears there's no equivalent of + # psutil.STATUS_DEAD. SDEAD really means STATUS_ZOMBIE. + # cext.SZOMB: _common.STATUS_ZOMBIE, + cext.SDEAD: _common.STATUS_ZOMBIE, + cext.SZOMB: _common.STATUS_ZOMBIE, + # From http://www.eecs.harvard.edu/~margo/cs161/videos/proc.h.txt + # OpenBSD has SRUN and SONPROC: SRUN indicates that a process + # is runnable but *not* yet running, i.e. is on a run queue. + # SONPROC indicates that the process is actually executing on + # a CPU, i.e. it is no longer on a run queue. + # As such we'll map SRUN to STATUS_WAKING and SONPROC to + # STATUS_RUNNING + cext.SRUN: _common.STATUS_WAKING, + cext.SONPROC: _common.STATUS_RUNNING, + } +elif NETBSD: + PROC_STATUSES = { + cext.SIDL: _common.STATUS_IDLE, + cext.SACTIVE: _common.STATUS_RUNNING, + cext.SDYING: _common.STATUS_ZOMBIE, + cext.SSTOP: _common.STATUS_STOPPED, + cext.SZOMB: _common.STATUS_ZOMBIE, + cext.SDEAD: _common.STATUS_DEAD, + cext.SSUSPENDED: _common.STATUS_SUSPENDED, # unique to NetBSD + } + +TCP_STATUSES = { + cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED, + cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT, + cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV, + cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1, + cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2, + cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT, + cext.TCPS_CLOSED: _common.CONN_CLOSE, + cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT, + cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK, + cext.TCPS_LISTEN: _common.CONN_LISTEN, + cext.TCPS_CLOSING: _common.CONN_CLOSING, + cext.PSUTIL_CONN_NONE: _common.CONN_NONE, +} + +if NETBSD: + PAGESIZE = os.sysconf("SC_PAGESIZE") +else: + PAGESIZE = os.sysconf("SC_PAGE_SIZE") +AF_LINK = cext_posix.AF_LINK + +kinfo_proc_map = dict( + ppid=0, + status=1, + real_uid=2, + effective_uid=3, + saved_uid=4, + real_gid=5, + effective_gid=6, + saved_gid=7, + ttynr=8, + create_time=9, + ctx_switches_vol=10, + ctx_switches_unvol=11, + read_io_count=12, + write_io_count=13, + user_time=14, + sys_time=15, + ch_user_time=16, + ch_sys_time=17, + rss=18, + vms=19, + memtext=20, + memdata=21, + memstack=22, + cpunum=23, + name=24, +) + + +# ===================================================================== +# --- named tuples +# ===================================================================== + + +# psutil.virtual_memory() +svmem = namedtuple( + 'svmem', ['total', 'available', 'percent', 'used', 'free', + 'active', 'inactive', 'buffers', 'cached', 'shared', 'wired']) +# psutil.cpu_times() +scputimes = namedtuple( + 'scputimes', ['user', 'nice', 'system', 'idle', 'irq']) +# psutil.Process.memory_info() +pmem = namedtuple('pmem', ['rss', 'vms', 'text', 'data', 'stack']) +# psutil.Process.memory_full_info() +pfullmem = pmem +# psutil.Process.cpu_times() +pcputimes = namedtuple('pcputimes', + ['user', 'system', 'children_user', 'children_system']) +# psutil.Process.memory_maps(grouped=True) +pmmap_grouped = namedtuple( + 'pmmap_grouped', 'path rss, private, ref_count, shadow_count') +# psutil.Process.memory_maps(grouped=False) +pmmap_ext = namedtuple( + 'pmmap_ext', 'addr, perms path rss, private, ref_count, shadow_count') +# psutil.disk_io_counters() +if FREEBSD: + sdiskio = namedtuple('sdiskio', ['read_count', 'write_count', + 'read_bytes', 'write_bytes', + 'read_time', 'write_time', + 'busy_time']) +else: + sdiskio = namedtuple('sdiskio', ['read_count', 'write_count', + 'read_bytes', 'write_bytes']) + + +# ===================================================================== +# --- memory +# ===================================================================== + + +def virtual_memory(): + """System virtual memory as a namedtuple.""" + mem = cext.virtual_mem() + total, free, active, inactive, wired, cached, buffers, shared = mem + if NETBSD: + # On NetBSD buffers and shared mem is determined via /proc. + # The C ext set them to 0. + with open('/proc/meminfo', 'rb') as f: + for line in f: + if line.startswith(b'Buffers:'): + buffers = int(line.split()[1]) * 1024 + elif line.startswith(b'MemShared:'): + shared = int(line.split()[1]) * 1024 + avail = inactive + cached + free + used = active + wired + cached + percent = usage_percent((total - avail), total, _round=1) + return svmem(total, avail, percent, used, free, + active, inactive, buffers, cached, shared, wired) + + +def swap_memory(): + """System swap memory as (total, used, free, sin, sout) namedtuple.""" + total, used, free, sin, sout = cext.swap_mem() + percent = usage_percent(used, total, _round=1) + return _common.sswap(total, used, free, percent, sin, sout) + + +# ===================================================================== +# --- CPU +# ===================================================================== + + +def cpu_times(): + """Return system per-CPU times as a namedtuple""" + user, nice, system, idle, irq = cext.cpu_times() + return scputimes(user, nice, system, idle, irq) + + +if hasattr(cext, "per_cpu_times"): + def per_cpu_times(): + """Return system CPU times as a namedtuple""" + ret = [] + for cpu_t in cext.per_cpu_times(): + user, nice, system, idle, irq = cpu_t + item = scputimes(user, nice, system, idle, irq) + ret.append(item) + return ret +else: + # XXX + # Ok, this is very dirty. + # On FreeBSD < 8 we cannot gather per-cpu information, see: + # https://github.com/giampaolo/psutil/issues/226 + # If num cpus > 1, on first call we return single cpu times to avoid a + # crash at psutil import time. + # Next calls will fail with NotImplementedError + def per_cpu_times(): + """Return system CPU times as a namedtuple""" + if cpu_count_logical() == 1: + return [cpu_times()] + if per_cpu_times.__called__: + raise NotImplementedError("supported only starting from FreeBSD 8") + per_cpu_times.__called__ = True + return [cpu_times()] + + per_cpu_times.__called__ = False + + +def cpu_count_logical(): + """Return the number of logical CPUs in the system.""" + return cext.cpu_count_logical() + + +if OPENBSD or NETBSD: + def cpu_count_physical(): + # OpenBSD and NetBSD do not implement this. + return 1 if cpu_count_logical() == 1 else None +else: + def cpu_count_physical(): + """Return the number of physical CPUs in the system.""" + # From the C module we'll get an XML string similar to this: + # http://manpages.ubuntu.com/manpages/precise/man4/smp.4freebsd.html + # We may get None in case "sysctl kern.sched.topology_spec" + # is not supported on this BSD version, in which case we'll mimic + # os.cpu_count() and return None. + ret = None + s = cext.cpu_count_phys() + if s is not None: + # get rid of padding chars appended at the end of the string + index = s.rfind("") + if index != -1: + s = s[:index + 9] + root = ET.fromstring(s) + try: + ret = len(root.findall('group/children/group/cpu')) or None + finally: + # needed otherwise it will memleak + root.clear() + if not ret: + # If logical CPUs are 1 it's obvious we'll have only 1 + # physical CPU. + if cpu_count_logical() == 1: + return 1 + return ret + + +def cpu_stats(): + """Return various CPU stats as a named tuple.""" + if FREEBSD: + # Note: the C ext is returning some metrics we are not exposing: + # traps. + ctxsw, intrs, soft_intrs, syscalls, traps = cext.cpu_stats() + elif NETBSD: + # XXX + # Note about intrs: the C extension returns 0. intrs + # can be determined via /proc/stat; it has the same value as + # soft_intrs thought so the kernel is faking it (?). + # + # Note about syscalls: the C extension always sets it to 0 (?). + # + # Note: the C ext is returning some metrics we are not exposing: + # traps, faults and forks. + ctxsw, intrs, soft_intrs, syscalls, traps, faults, forks = \ + cext.cpu_stats() + with open('/proc/stat', 'rb') as f: + for line in f: + if line.startswith(b'intr'): + intrs = int(line.split()[1]) + elif OPENBSD: + # Note: the C ext is returning some metrics we are not exposing: + # traps, faults and forks. + ctxsw, intrs, soft_intrs, syscalls, traps, faults, forks = \ + cext.cpu_stats() + return _common.scpustats(ctxsw, intrs, soft_intrs, syscalls) + + +# ===================================================================== +# --- disks +# ===================================================================== + + +def disk_partitions(all=False): + """Return mounted disk partitions as a list of namedtuples. + 'all' argument is ignored, see: + https://github.com/giampaolo/psutil/issues/906 + """ + retlist = [] + partitions = cext.disk_partitions() + for partition in partitions: + device, mountpoint, fstype, opts = partition + ntuple = _common.sdiskpart(device, mountpoint, fstype, opts) + retlist.append(ntuple) + return retlist + + +disk_usage = _psposix.disk_usage +disk_io_counters = cext.disk_io_counters + + +# ===================================================================== +# --- network +# ===================================================================== + + +net_io_counters = cext.net_io_counters +net_if_addrs = cext_posix.net_if_addrs + + +def net_if_stats(): + """Get NIC stats (isup, duplex, speed, mtu).""" + names = net_io_counters().keys() + ret = {} + for name in names: + mtu = cext_posix.net_if_mtu(name) + isup = cext_posix.net_if_flags(name) + duplex, speed = cext_posix.net_if_duplex_speed(name) + if hasattr(_common, 'NicDuplex'): + duplex = _common.NicDuplex(duplex) + ret[name] = _common.snicstats(isup, duplex, speed, mtu) + return ret + + +def net_connections(kind): + """System-wide network connections.""" + if OPENBSD: + ret = [] + for pid in pids(): + try: + cons = Process(pid).connections(kind) + except (NoSuchProcess, ZombieProcess): + continue + else: + for conn in cons: + conn = list(conn) + conn.append(pid) + ret.append(_common.sconn(*conn)) + return ret + + if kind not in _common.conn_tmap: + raise ValueError("invalid %r kind argument; choose between %s" + % (kind, ', '.join([repr(x) for x in conn_tmap]))) + families, types = conn_tmap[kind] + ret = set() + if NETBSD: + rawlist = cext.net_connections(-1) + else: + rawlist = cext.net_connections() + for item in rawlist: + fd, fam, type, laddr, raddr, status, pid = item + # TODO: apply filter at C level + if fam in families and type in types: + try: + status = TCP_STATUSES[status] + except KeyError: + # XXX: Not sure why this happens. I saw this occurring + # with IPv6 sockets opened by 'vim'. Those sockets + # have a very short lifetime so maybe the kernel + # can't initialize their status? + status = TCP_STATUSES[cext.PSUTIL_CONN_NONE] + if fam in (AF_INET, AF_INET6): + if laddr: + laddr = _common.addr(*laddr) + if raddr: + raddr = _common.addr(*raddr) + fam = sockfam_to_enum(fam) + type = socktype_to_enum(type) + nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid) + ret.add(nt) + return list(ret) + + +# ===================================================================== +# --- sensors +# ===================================================================== + + +if FREEBSD: + + def sensors_battery(): + """Return battery info.""" + try: + percent, minsleft, power_plugged = cext.sensors_battery() + except NotImplementedError: + # See: https://github.com/giampaolo/psutil/issues/1074 + return None + power_plugged = power_plugged == 1 + if power_plugged: + secsleft = _common.POWER_TIME_UNLIMITED + elif minsleft == -1: + secsleft = _common.POWER_TIME_UNKNOWN + else: + secsleft = minsleft * 60 + return _common.sbattery(percent, secsleft, power_plugged) + + +# ===================================================================== +# --- other system functions +# ===================================================================== + + +def boot_time(): + """The system boot time expressed in seconds since the epoch.""" + return cext.boot_time() + + +def users(): + """Return currently connected users as a list of namedtuples.""" + retlist = [] + rawlist = cext.users() + for item in rawlist: + user, tty, hostname, tstamp, pid = item + if pid == -1: + assert OPENBSD + pid = None + if tty == '~': + continue # reboot or shutdown + nt = _common.suser(user, tty or None, hostname, tstamp, pid) + retlist.append(nt) + return retlist + + +# ===================================================================== +# --- processes +# ===================================================================== + + +@memoize +def _pid_0_exists(): + try: + Process(0).name() + except NoSuchProcess: + return False + except AccessDenied: + return True + else: + return True + + +def pids(): + """Returns a list of PIDs currently running on the system.""" + ret = cext.pids() + if OPENBSD and (0 not in ret) and _pid_0_exists(): + # On OpenBSD the kernel does not return PID 0 (neither does + # ps) but it's actually querable (Process(0) will succeed). + ret.insert(0, 0) + return ret + + +if OPENBSD or NETBSD: + def pid_exists(pid): + """Return True if pid exists.""" + exists = _psposix.pid_exists(pid) + if not exists: + # We do this because _psposix.pid_exists() lies in case of + # zombie processes. + return pid in pids() + else: + return True +else: + pid_exists = _psposix.pid_exists + + +def wrap_exceptions(fun): + """Decorator which translates bare OSError exceptions into + NoSuchProcess and AccessDenied. + """ + @functools.wraps(fun) + def wrapper(self, *args, **kwargs): + try: + return fun(self, *args, **kwargs) + except OSError as err: + if self.pid == 0: + if 0 in pids(): + raise AccessDenied(self.pid, self._name) + else: + raise + if err.errno == errno.ESRCH: + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) + else: + raise ZombieProcess(self.pid, self._name, self._ppid) + if err.errno in (errno.EPERM, errno.EACCES): + raise AccessDenied(self.pid, self._name) + raise + return wrapper + + +@contextlib.contextmanager +def wrap_exceptions_procfs(inst): + """Same as above, for routines relying on reading /proc fs.""" + try: + yield + except EnvironmentError as err: + # ENOENT (no such file or directory) gets raised on open(). + # ESRCH (no such process) can get raised on read() if + # process is gone in meantime. + if err.errno in (errno.ENOENT, errno.ESRCH): + if not pid_exists(inst.pid): + raise NoSuchProcess(inst.pid, inst._name) + else: + raise ZombieProcess(inst.pid, inst._name, inst._ppid) + if err.errno in (errno.EPERM, errno.EACCES): + raise AccessDenied(inst.pid, inst._name) + raise + + +class Process(object): + """Wrapper class around underlying C implementation.""" + + __slots__ = ["pid", "_name", "_ppid"] + + def __init__(self, pid): + self.pid = pid + self._name = None + self._ppid = None + + @memoize_when_activated + def oneshot(self): + """Retrieves multiple process info in one shot as a raw tuple.""" + ret = cext.proc_oneshot_info(self.pid) + assert len(ret) == len(kinfo_proc_map) + return ret + + def oneshot_enter(self): + self.oneshot.cache_activate() + + def oneshot_exit(self): + self.oneshot.cache_deactivate() + + @wrap_exceptions + def name(self): + name = self.oneshot()[kinfo_proc_map['name']] + return name if name is not None else cext.proc_name(self.pid) + + @wrap_exceptions + def exe(self): + if FREEBSD: + return cext.proc_exe(self.pid) + elif NETBSD: + if self.pid == 0: + # /proc/0 dir exists but /proc/0/exe doesn't + return "" + with wrap_exceptions_procfs(self): + return os.readlink("/proc/%s/exe" % self.pid) + else: + # OpenBSD: exe cannot be determined; references: + # https://chromium.googlesource.com/chromium/src/base/+/ + # master/base_paths_posix.cc + # We try our best guess by using which against the first + # cmdline arg (may return None). + cmdline = self.cmdline() + if cmdline: + return which(cmdline[0]) + else: + return "" + + @wrap_exceptions + def cmdline(self): + if OPENBSD and self.pid == 0: + return [] # ...else it crashes + elif NETBSD: + # XXX - most of the times the underlying sysctl() call on Net + # and Open BSD returns a truncated string. + # Also /proc/pid/cmdline behaves the same so it looks + # like this is a kernel bug. + try: + return cext.proc_cmdline(self.pid) + except OSError as err: + if err.errno == errno.EINVAL: + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) + else: + raise ZombieProcess(self.pid, self._name, self._ppid) + else: + raise + else: + return cext.proc_cmdline(self.pid) + + @wrap_exceptions + def terminal(self): + tty_nr = self.oneshot()[kinfo_proc_map['ttynr']] + tmap = _psposix.get_terminal_map() + try: + return tmap[tty_nr] + except KeyError: + return None + + @wrap_exceptions + def ppid(self): + self._ppid = self.oneshot()[kinfo_proc_map['ppid']] + return self._ppid + + @wrap_exceptions + def uids(self): + rawtuple = self.oneshot() + return _common.puids( + rawtuple[kinfo_proc_map['real_uid']], + rawtuple[kinfo_proc_map['effective_uid']], + rawtuple[kinfo_proc_map['saved_uid']]) + + @wrap_exceptions + def gids(self): + rawtuple = self.oneshot() + return _common.pgids( + rawtuple[kinfo_proc_map['real_gid']], + rawtuple[kinfo_proc_map['effective_gid']], + rawtuple[kinfo_proc_map['saved_gid']]) + + @wrap_exceptions + def cpu_times(self): + rawtuple = self.oneshot() + return _common.pcputimes( + rawtuple[kinfo_proc_map['user_time']], + rawtuple[kinfo_proc_map['sys_time']], + rawtuple[kinfo_proc_map['ch_user_time']], + rawtuple[kinfo_proc_map['ch_sys_time']]) + + if FREEBSD: + @wrap_exceptions + def cpu_num(self): + return self.oneshot()[kinfo_proc_map['cpunum']] + + @wrap_exceptions + def memory_info(self): + rawtuple = self.oneshot() + return pmem( + rawtuple[kinfo_proc_map['rss']], + rawtuple[kinfo_proc_map['vms']], + rawtuple[kinfo_proc_map['memtext']], + rawtuple[kinfo_proc_map['memdata']], + rawtuple[kinfo_proc_map['memstack']]) + + memory_full_info = memory_info + + @wrap_exceptions + def create_time(self): + return self.oneshot()[kinfo_proc_map['create_time']] + + @wrap_exceptions + def num_threads(self): + if hasattr(cext, "proc_num_threads"): + # FreeBSD + return cext.proc_num_threads(self.pid) + else: + return len(self.threads()) + + @wrap_exceptions + def num_ctx_switches(self): + rawtuple = self.oneshot() + return _common.pctxsw( + rawtuple[kinfo_proc_map['ctx_switches_vol']], + rawtuple[kinfo_proc_map['ctx_switches_unvol']]) + + @wrap_exceptions + def threads(self): + # Note: on OpenSBD this (/dev/mem) requires root access. + rawlist = cext.proc_threads(self.pid) + retlist = [] + for thread_id, utime, stime in rawlist: + ntuple = _common.pthread(thread_id, utime, stime) + retlist.append(ntuple) + if OPENBSD: + # On OpenBSD the underlying C function does not raise NSP + # in case the process is gone (and the returned list may + # incomplete). + self.name() # raise NSP if the process disappeared on us + return retlist + + @wrap_exceptions + def connections(self, kind='inet'): + if kind not in conn_tmap: + raise ValueError("invalid %r kind argument; choose between %s" + % (kind, ', '.join([repr(x) for x in conn_tmap]))) + + if NETBSD: + families, types = conn_tmap[kind] + ret = set() + rawlist = cext.net_connections(self.pid) + for item in rawlist: + fd, fam, type, laddr, raddr, status, pid = item + assert pid == self.pid + if fam in families and type in types: + try: + status = TCP_STATUSES[status] + except KeyError: + status = TCP_STATUSES[cext.PSUTIL_CONN_NONE] + if fam in (AF_INET, AF_INET6): + if laddr: + laddr = _common.addr(*laddr) + if raddr: + raddr = _common.addr(*raddr) + fam = sockfam_to_enum(fam) + type = socktype_to_enum(type) + nt = _common.pconn(fd, fam, type, laddr, raddr, status) + ret.add(nt) + # On NetBSD the underlying C function does not raise NSP + # in case the process is gone (and the returned list may + # incomplete). + self.name() # raise NSP if the process disappeared on us + return list(ret) + + families, types = conn_tmap[kind] + rawlist = cext.proc_connections(self.pid, families, types) + ret = [] + for item in rawlist: + fd, fam, type, laddr, raddr, status = item + if fam in (AF_INET, AF_INET6): + if laddr: + laddr = _common.addr(*laddr) + if raddr: + raddr = _common.addr(*raddr) + fam = sockfam_to_enum(fam) + type = socktype_to_enum(type) + status = TCP_STATUSES[status] + nt = _common.pconn(fd, fam, type, laddr, raddr, status) + ret.append(nt) + if OPENBSD: + # On OpenBSD the underlying C function does not raise NSP + # in case the process is gone (and the returned list may + # incomplete). + self.name() # raise NSP if the process disappeared on us + return ret + + @wrap_exceptions + def wait(self, timeout=None): + return _psposix.wait_pid(self.pid, timeout, self._name) + + @wrap_exceptions + def nice_get(self): + return cext_posix.getpriority(self.pid) + + @wrap_exceptions + def nice_set(self, value): + return cext_posix.setpriority(self.pid, value) + + @wrap_exceptions + def status(self): + code = self.oneshot()[kinfo_proc_map['status']] + # XXX is '?' legit? (we're not supposed to return it anyway) + return PROC_STATUSES.get(code, '?') + + @wrap_exceptions + def io_counters(self): + rawtuple = self.oneshot() + return _common.pio( + rawtuple[kinfo_proc_map['read_io_count']], + rawtuple[kinfo_proc_map['write_io_count']], + -1, + -1) + + @wrap_exceptions + def cwd(self): + """Return process current working directory.""" + # sometimes we get an empty string, in which case we turn + # it into None + if OPENBSD and self.pid == 0: + return None # ...else it would raise EINVAL + elif NETBSD: + with wrap_exceptions_procfs(self): + return os.readlink("/proc/%s/cwd" % self.pid) + elif hasattr(cext, 'proc_open_files'): + # FreeBSD < 8 does not support functions based on + # kinfo_getfile() and kinfo_getvmmap() + return cext.proc_cwd(self.pid) or None + else: + raise NotImplementedError( + "supported only starting from FreeBSD 8" if + FREEBSD else "") + + nt_mmap_grouped = namedtuple( + 'mmap', 'path rss, private, ref_count, shadow_count') + nt_mmap_ext = namedtuple( + 'mmap', 'addr, perms path rss, private, ref_count, shadow_count') + + def _not_implemented(self): + raise NotImplementedError + + # FreeBSD < 8 does not support functions based on kinfo_getfile() + # and kinfo_getvmmap() + if hasattr(cext, 'proc_open_files'): + @wrap_exceptions + def open_files(self): + """Return files opened by process as a list of namedtuples.""" + rawlist = cext.proc_open_files(self.pid) + return [_common.popenfile(path, fd) for path, fd in rawlist] + else: + open_files = _not_implemented + + # FreeBSD < 8 does not support functions based on kinfo_getfile() + # and kinfo_getvmmap() + if hasattr(cext, 'proc_num_fds'): + @wrap_exceptions + def num_fds(self): + """Return the number of file descriptors opened by this process.""" + ret = cext.proc_num_fds(self.pid) + if NETBSD: + # On NetBSD the underlying C function does not raise NSP + # in case the process is gone. + self.name() # raise NSP if the process disappeared on us + return ret + else: + num_fds = _not_implemented + + # --- FreeBSD only APIs + + if FREEBSD: + + @wrap_exceptions + def cpu_affinity_get(self): + return cext.proc_cpu_affinity_get(self.pid) + + @wrap_exceptions + def cpu_affinity_set(self, cpus): + # Pre-emptively check if CPUs are valid because the C + # function has a weird behavior in case of invalid CPUs, + # see: https://github.com/giampaolo/psutil/issues/586 + allcpus = tuple(range(len(per_cpu_times()))) + for cpu in cpus: + if cpu not in allcpus: + raise ValueError("invalid CPU #%i (choose between %s)" + % (cpu, allcpus)) + try: + cext.proc_cpu_affinity_set(self.pid, cpus) + except OSError as err: + # 'man cpuset_setaffinity' about EDEADLK: + # <> + if err.errno in (errno.EINVAL, errno.EDEADLK): + for cpu in cpus: + if cpu not in allcpus: + raise ValueError( + "invalid CPU #%i (choose between %s)" % ( + cpu, allcpus)) + raise + + @wrap_exceptions + def memory_maps(self): + return cext.proc_memory_maps(self.pid) diff --git a/server/www/packages/packages-windows/x86/psutil/_pslinux.py b/server/www/packages/packages-windows/x86/psutil/_pslinux.py new file mode 100644 index 0000000..b9b4334 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/_pslinux.py @@ -0,0 +1,2002 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Linux platform implementation.""" + +from __future__ import division + +import base64 +import collections +import errno +import functools +import glob +import os +import re +import socket +import struct +import sys +import traceback +import warnings +from collections import defaultdict +from collections import namedtuple + +from . import _common +from . import _psposix +from . import _psutil_linux as cext +from . import _psutil_posix as cext_posix +from ._common import ENCODING +from ._common import ENCODING_ERRS +from ._common import isfile_strict +from ._common import memoize +from ._common import memoize_when_activated +from ._common import NIC_DUPLEX_FULL +from ._common import NIC_DUPLEX_HALF +from ._common import NIC_DUPLEX_UNKNOWN +from ._common import parse_environ_block +from ._common import path_exists_strict +from ._common import supports_ipv6 +from ._common import usage_percent +from ._compat import b +from ._compat import basestring +from ._compat import long +from ._compat import PY3 +from ._exceptions import AccessDenied +from ._exceptions import NoSuchProcess +from ._exceptions import ZombieProcess + +if sys.version_info >= (3, 4): + import enum +else: + enum = None + + +__extra__all__ = [ + # + 'PROCFS_PATH', + # io prio constants + "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE", + "IOPRIO_CLASS_IDLE", + # connection status constants + "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1", + "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT", + "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", ] + + +# ===================================================================== +# --- globals +# ===================================================================== + + +PROCFS_PATH = None +POWER_SUPPLY_PATH = "/sys/class/power_supply" +HAS_SMAPS = os.path.exists('/proc/%s/smaps' % os.getpid()) +HAS_PRLIMIT = hasattr(cext, "linux_prlimit") +_DEFAULT = object() + +# RLIMIT_* constants, not guaranteed to be present on all kernels +if HAS_PRLIMIT: + for name in dir(cext): + if name.startswith('RLIM'): + __extra__all__.append(name) + +# Number of clock ticks per second +CLOCK_TICKS = os.sysconf("SC_CLK_TCK") +PAGESIZE = os.sysconf("SC_PAGE_SIZE") +BOOT_TIME = None # set later +# Used when reading "big" files, namely /proc/{pid}/smaps and /proc/net/*. +# On Python 2, using a buffer with open() for such files may result in a +# speedup, see: https://github.com/giampaolo/psutil/issues/708 +BIGFILE_BUFFERING = -1 if PY3 else 8192 +LITTLE_ENDIAN = sys.byteorder == 'little' +SECTOR_SIZE_FALLBACK = 512 +if enum is None: + AF_LINK = socket.AF_PACKET +else: + AddressFamily = enum.IntEnum('AddressFamily', + {'AF_LINK': int(socket.AF_PACKET)}) + AF_LINK = AddressFamily.AF_LINK + +# ioprio_* constants http://linux.die.net/man/2/ioprio_get +if enum is None: + IOPRIO_CLASS_NONE = 0 + IOPRIO_CLASS_RT = 1 + IOPRIO_CLASS_BE = 2 + IOPRIO_CLASS_IDLE = 3 +else: + class IOPriority(enum.IntEnum): + IOPRIO_CLASS_NONE = 0 + IOPRIO_CLASS_RT = 1 + IOPRIO_CLASS_BE = 2 + IOPRIO_CLASS_IDLE = 3 + + globals().update(IOPriority.__members__) + +# taken from /fs/proc/array.c +PROC_STATUSES = { + "R": _common.STATUS_RUNNING, + "S": _common.STATUS_SLEEPING, + "D": _common.STATUS_DISK_SLEEP, + "T": _common.STATUS_STOPPED, + "t": _common.STATUS_TRACING_STOP, + "Z": _common.STATUS_ZOMBIE, + "X": _common.STATUS_DEAD, + "x": _common.STATUS_DEAD, + "K": _common.STATUS_WAKE_KILL, + "W": _common.STATUS_WAKING +} + +# https://github.com/torvalds/linux/blob/master/include/net/tcp_states.h +TCP_STATUSES = { + "01": _common.CONN_ESTABLISHED, + "02": _common.CONN_SYN_SENT, + "03": _common.CONN_SYN_RECV, + "04": _common.CONN_FIN_WAIT1, + "05": _common.CONN_FIN_WAIT2, + "06": _common.CONN_TIME_WAIT, + "07": _common.CONN_CLOSE, + "08": _common.CONN_CLOSE_WAIT, + "09": _common.CONN_LAST_ACK, + "0A": _common.CONN_LISTEN, + "0B": _common.CONN_CLOSING +} + + +# ===================================================================== +# --- named tuples +# ===================================================================== + + +# psutil.virtual_memory() +svmem = namedtuple( + 'svmem', ['total', 'available', 'percent', 'used', 'free', + 'active', 'inactive', 'buffers', 'cached', 'shared']) +# psutil.disk_io_counters() +sdiskio = namedtuple( + 'sdiskio', ['read_count', 'write_count', + 'read_bytes', 'write_bytes', + 'read_time', 'write_time', + 'read_merged_count', 'write_merged_count', + 'busy_time']) +# psutil.Process().open_files() +popenfile = namedtuple( + 'popenfile', ['path', 'fd', 'position', 'mode', 'flags']) +# psutil.Process().memory_info() +pmem = namedtuple('pmem', 'rss vms shared text lib data dirty') +# psutil.Process().memory_full_info() +pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', 'pss', 'swap')) +# psutil.Process().memory_maps(grouped=True) +pmmap_grouped = namedtuple( + 'pmmap_grouped', + ['path', 'rss', 'size', 'pss', 'shared_clean', 'shared_dirty', + 'private_clean', 'private_dirty', 'referenced', 'anonymous', 'swap']) +# psutil.Process().memory_maps(grouped=False) +pmmap_ext = namedtuple( + 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields)) +# psutil.Process.io_counters() +pio = namedtuple('pio', ['read_count', 'write_count', + 'read_bytes', 'write_bytes', + 'read_chars', 'write_chars']) + + +# ===================================================================== +# --- utils +# ===================================================================== + + +def open_binary(fname, **kwargs): + return open(fname, "rb", **kwargs) + + +def open_text(fname, **kwargs): + """On Python 3 opens a file in text mode by using fs encoding and + a proper en/decoding errors handler. + On Python 2 this is just an alias for open(name, 'rt'). + """ + if PY3: + # See: + # https://github.com/giampaolo/psutil/issues/675 + # https://github.com/giampaolo/psutil/pull/733 + kwargs.setdefault('encoding', ENCODING) + kwargs.setdefault('errors', ENCODING_ERRS) + return open(fname, "rt", **kwargs) + + +if PY3: + def decode(s): + return s.decode(encoding=ENCODING, errors=ENCODING_ERRS) +else: + def decode(s): + return s + + +def get_procfs_path(): + """Return updated PROCFS_PATH constant. + Return value is cached after 10 calls. + """ + global PROCFS_PATH + + if PROCFS_PATH is not None: + return PROCFS_PATH + + path = sys.modules['psutil'].PROCFS_PATH + if path != "/proc": + msg = \ + "you used `psutil.PROCFS_PATH = %s` somewhere in your code; " \ + "that is deprecated and will be ignored in the future; replace " \ + "it with `set_procfs_path(%r)`" % (path, path) + warnings.warn(msg, category=FutureWarning, stacklevel=2) + PROCFS_PATH = path + + # Cache the value if path remained the same after 10 calls. + # This means that from now on any change to psutil.PROCFS_PATH + # will be ignored. + # This is based on the assumption that it's likely that the user + # does "psutil.PROCFS_PATH" at import time, not later. + get_procfs_path.ncalls += 1 + if get_procfs_path.ncalls >= 10: + PROCFS_PATH = path + + return path + + +get_procfs_path.ncalls = 0 + + +def readlink(path): + """Wrapper around os.readlink().""" + assert isinstance(path, basestring), path + path = os.readlink(path) + # readlink() might return paths containing null bytes ('\x00') + # resulting in "TypeError: must be encoded string without NULL + # bytes, not str" errors when the string is passed to other + # fs-related functions (os.*, open(), ...). + # Apparently everything after '\x00' is garbage (we can have + # ' (deleted)', 'new' and possibly others), see: + # https://github.com/giampaolo/psutil/issues/717 + path = path.split('\x00')[0] + # Certain paths have ' (deleted)' appended. Usually this is + # bogus as the file actually exists. Even if it doesn't we + # don't care. + if path.endswith(' (deleted)') and not path_exists_strict(path): + path = path[:-10] + return path + + +def file_flags_to_mode(flags): + """Convert file's open() flags into a readable string. + Used by Process.open_files(). + """ + modes_map = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'} + mode = modes_map[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)] + if flags & os.O_APPEND: + mode = mode.replace('w', 'a', 1) + mode = mode.replace('w+', 'r+') + # possible values: r, w, a, r+, a+ + return mode + + +def get_sector_size(partition): + """Return the sector size of a partition. + Used by disk_io_counters(). + """ + try: + with open("/sys/block/%s/queue/hw_sector_size" % partition, "rt") as f: + return int(f.read()) + except (IOError, ValueError): + # man iostat states that sectors are equivalent with blocks and + # have a size of 512 bytes since 2.4 kernels. + return SECTOR_SIZE_FALLBACK + + +@memoize +def set_scputimes_ntuple(procfs_path): + """Set a namedtuple of variable fields depending on the CPU times + available on this Linux kernel version which may be: + (user, nice, system, idle, iowait, irq, softirq, [steal, [guest, + [guest_nice]]]) + Used by cpu_times() function. + """ + global scputimes + with open_binary('%s/stat' % procfs_path) as f: + values = f.readline().split()[1:] + fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq'] + vlen = len(values) + if vlen >= 8: + # Linux >= 2.6.11 + fields.append('steal') + if vlen >= 9: + # Linux >= 2.6.24 + fields.append('guest') + if vlen >= 10: + # Linux >= 3.2.0 + fields.append('guest_nice') + scputimes = namedtuple('scputimes', fields) + + +def cat(fname, fallback=_DEFAULT, binary=True): + """Return file content. + fallback: the value returned in case the file does not exist or + cannot be read + binary: whether to open the file in binary or text mode. + """ + try: + with open_binary(fname) if binary else open_text(fname) as f: + return f.read().strip() + except IOError: + if fallback is not _DEFAULT: + return fallback + else: + raise + + +try: + set_scputimes_ntuple("/proc") +except Exception: + # Don't want to crash at import time. + traceback.print_exc() + scputimes = namedtuple('scputimes', 'user system idle')(0.0, 0.0, 0.0) + + +# ===================================================================== +# --- system memory +# ===================================================================== + + +def calculate_avail_vmem(mems): + """Fallback for kernels < 3.14 where /proc/meminfo does not provide + "MemAvailable:" column, see: + https://blog.famzah.net/2014/09/24/ + This code reimplements the algorithm outlined here: + https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/ + commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773 + + XXX: on recent kernels this calculation differs by ~1.5% than + "MemAvailable:" as it's calculated slightly differently, see: + https://gitlab.com/procps-ng/procps/issues/42 + https://github.com/famzah/linux-memavailable-procfs/issues/2 + It is still way more realistic than doing (free + cached) though. + """ + # Fallback for very old distros. According to + # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/ + # commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773 + # ...long ago "avail" was calculated as (free + cached). + # We might fallback in such cases: + # "Active(file)" not available: 2.6.28 / Dec 2008 + # "Inactive(file)" not available: 2.6.28 / Dec 2008 + # "SReclaimable:" not available: 2.6.19 / Nov 2006 + # /proc/zoneinfo not available: 2.6.13 / Aug 2005 + free = mems[b'MemFree:'] + fallback = free + mems.get(b"Cached:", 0) + try: + lru_active_file = mems[b'Active(file):'] + lru_inactive_file = mems[b'Inactive(file):'] + slab_reclaimable = mems[b'SReclaimable:'] + except KeyError: + return fallback + try: + f = open_binary('%s/zoneinfo' % get_procfs_path()) + except IOError: + return fallback # kernel 2.6.13 + + watermark_low = 0 + with f: + for line in f: + line = line.strip() + if line.startswith(b'low'): + watermark_low += int(line.split()[1]) + watermark_low *= PAGESIZE + watermark_low = watermark_low + + avail = free - watermark_low + pagecache = lru_active_file + lru_inactive_file + pagecache -= min(pagecache / 2, watermark_low) + avail += pagecache + avail += slab_reclaimable - min(slab_reclaimable / 2.0, watermark_low) + return int(avail) + + +def virtual_memory(): + """Report virtual memory stats. + This implementation matches "free" and "vmstat -s" cmdline + utility values and procps-ng-3.3.12 source was used as a reference + (2016-09-18): + https://gitlab.com/procps-ng/procps/blob/ + 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c + For reference, procps-ng-3.3.10 is the version available on Ubuntu + 16.04. + + Note about "available" memory: up until psutil 4.3 it was + calculated as "avail = (free + buffers + cached)". Now + "MemAvailable:" column (kernel 3.14) from /proc/meminfo is used as + it's more accurate. + That matches "available" column in newer versions of "free". + """ + missing_fields = [] + mems = {} + with open_binary('%s/meminfo' % get_procfs_path()) as f: + for line in f: + fields = line.split() + mems[fields[0]] = int(fields[1]) * 1024 + + # /proc doc states that the available fields in /proc/meminfo vary + # by architecture and compile options, but these 3 values are also + # returned by sysinfo(2); as such we assume they are always there. + total = mems[b'MemTotal:'] + free = mems[b'MemFree:'] + try: + buffers = mems[b'Buffers:'] + except KeyError: + # https://github.com/giampaolo/psutil/issues/1010 + buffers = 0 + missing_fields.append('buffers') + try: + cached = mems[b"Cached:"] + except KeyError: + cached = 0 + missing_fields.append('cached') + else: + # "free" cmdline utility sums reclaimable to cached. + # Older versions of procps used to add slab memory instead. + # This got changed in: + # https://gitlab.com/procps-ng/procps/commit/ + # 05d751c4f076a2f0118b914c5e51cfbb4762ad8e + cached += mems.get(b"SReclaimable:", 0) # since kernel 2.6.19 + + try: + shared = mems[b'Shmem:'] # since kernel 2.6.32 + except KeyError: + try: + shared = mems[b'MemShared:'] # kernels 2.4 + except KeyError: + shared = 0 + missing_fields.append('shared') + + try: + active = mems[b"Active:"] + except KeyError: + active = 0 + missing_fields.append('active') + + try: + inactive = mems[b"Inactive:"] + except KeyError: + try: + inactive = \ + mems[b"Inact_dirty:"] + \ + mems[b"Inact_clean:"] + \ + mems[b"Inact_laundry:"] + except KeyError: + inactive = 0 + missing_fields.append('inactive') + + used = total - free - cached - buffers + if used < 0: + # May be symptomatic of running within a LCX container where such + # values will be dramatically distorted over those of the host. + used = total - free + + # - starting from 4.4.0 we match free's "available" column. + # Before 4.4.0 we calculated it as (free + buffers + cached) + # which matched htop. + # - free and htop available memory differs as per: + # http://askubuntu.com/a/369589 + # http://unix.stackexchange.com/a/65852/168884 + # - MemAvailable has been introduced in kernel 3.14 + try: + avail = mems[b'MemAvailable:'] + except KeyError: + avail = calculate_avail_vmem(mems) + + if avail < 0: + avail = 0 + missing_fields.append('available') + + # If avail is greater than total or our calculation overflows, + # that's symptomatic of running within a LCX container where such + # values will be dramatically distorted over those of the host. + # https://gitlab.com/procps-ng/procps/blob/ + # 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L764 + if avail > total: + avail = free + + percent = usage_percent((total - avail), total, _round=1) + + # Warn about missing metrics which are set to 0. + if missing_fields: + msg = "%s memory stats couldn't be determined and %s set to 0" % ( + ", ".join(missing_fields), + "was" if len(missing_fields) == 1 else "were") + warnings.warn(msg, RuntimeWarning) + + return svmem(total, avail, percent, used, free, + active, inactive, buffers, cached, shared) + + +def swap_memory(): + """Return swap memory metrics.""" + mems = {} + with open_binary('%s/meminfo' % get_procfs_path()) as f: + for line in f: + fields = line.split() + mems[fields[0]] = int(fields[1]) * 1024 + # We prefer /proc/meminfo over sysinfo() syscall so that + # psutil.PROCFS_PATH can be used in order to allow retrieval + # for linux containers, see: + # https://github.com/giampaolo/psutil/issues/1015 + try: + total = mems[b'SwapTotal:'] + free = mems[b'SwapFree:'] + except KeyError: + _, _, _, _, total, free, unit_multiplier = cext.linux_sysinfo() + total *= unit_multiplier + free *= unit_multiplier + + used = total - free + percent = usage_percent(used, total, _round=1) + # get pgin/pgouts + try: + f = open_binary("%s/vmstat" % get_procfs_path()) + except IOError as err: + # see https://github.com/giampaolo/psutil/issues/722 + msg = "'sin' and 'sout' swap memory stats couldn't " \ + "be determined and were set to 0 (%s)" % str(err) + warnings.warn(msg, RuntimeWarning) + sin = sout = 0 + else: + with f: + sin = sout = None + for line in f: + # values are expressed in 4 kilo bytes, we want + # bytes instead + if line.startswith(b'pswpin'): + sin = int(line.split(b' ')[1]) * 4 * 1024 + elif line.startswith(b'pswpout'): + sout = int(line.split(b' ')[1]) * 4 * 1024 + if sin is not None and sout is not None: + break + else: + # we might get here when dealing with exotic Linux + # flavors, see: + # https://github.com/giampaolo/psutil/issues/313 + msg = "'sin' and 'sout' swap memory stats couldn't " \ + "be determined and were set to 0" + warnings.warn(msg, RuntimeWarning) + sin = sout = 0 + return _common.sswap(total, used, free, percent, sin, sout) + + +# ===================================================================== +# --- CPU +# ===================================================================== + + +def cpu_times(): + """Return a named tuple representing the following system-wide + CPU times: + (user, nice, system, idle, iowait, irq, softirq [steal, [guest, + [guest_nice]]]) + Last 3 fields may not be available on all Linux kernel versions. + """ + procfs_path = get_procfs_path() + set_scputimes_ntuple(procfs_path) + with open_binary('%s/stat' % procfs_path) as f: + values = f.readline().split() + fields = values[1:len(scputimes._fields) + 1] + fields = [float(x) / CLOCK_TICKS for x in fields] + return scputimes(*fields) + + +def per_cpu_times(): + """Return a list of namedtuple representing the CPU times + for every CPU available on the system. + """ + procfs_path = get_procfs_path() + set_scputimes_ntuple(procfs_path) + cpus = [] + with open_binary('%s/stat' % procfs_path) as f: + # get rid of the first line which refers to system wide CPU stats + f.readline() + for line in f: + if line.startswith(b'cpu'): + values = line.split() + fields = values[1:len(scputimes._fields) + 1] + fields = [float(x) / CLOCK_TICKS for x in fields] + entry = scputimes(*fields) + cpus.append(entry) + return cpus + + +def cpu_count_logical(): + """Return the number of logical CPUs in the system.""" + try: + return os.sysconf("SC_NPROCESSORS_ONLN") + except ValueError: + # as a second fallback we try to parse /proc/cpuinfo + num = 0 + with open_binary('%s/cpuinfo' % get_procfs_path()) as f: + for line in f: + if line.lower().startswith(b'processor'): + num += 1 + + # unknown format (e.g. amrel/sparc architectures), see: + # https://github.com/giampaolo/psutil/issues/200 + # try to parse /proc/stat as a last resort + if num == 0: + search = re.compile(r'cpu\d') + with open_text('%s/stat' % get_procfs_path()) as f: + for line in f: + line = line.split(' ')[0] + if search.match(line): + num += 1 + + if num == 0: + # mimic os.cpu_count() + return None + return num + + +def cpu_count_physical(): + """Return the number of physical cores in the system.""" + mapping = {} + current_info = {} + with open_binary('%s/cpuinfo' % get_procfs_path()) as f: + for line in f: + line = line.strip().lower() + if not line: + # new section + if (b'physical id' in current_info and + b'cpu cores' in current_info): + mapping[current_info[b'physical id']] = \ + current_info[b'cpu cores'] + current_info = {} + else: + # ongoing section + if (line.startswith(b'physical id') or + line.startswith(b'cpu cores')): + key, value = line.split(b'\t:', 1) + current_info[key] = int(value) + + # mimic os.cpu_count() + return sum(mapping.values()) or None + + +def cpu_stats(): + """Return various CPU stats as a named tuple.""" + with open_binary('%s/stat' % get_procfs_path()) as f: + ctx_switches = None + interrupts = None + soft_interrupts = None + for line in f: + if line.startswith(b'ctxt'): + ctx_switches = int(line.split()[1]) + elif line.startswith(b'intr'): + interrupts = int(line.split()[1]) + elif line.startswith(b'softirq'): + soft_interrupts = int(line.split()[1]) + if ctx_switches is not None and soft_interrupts is not None \ + and interrupts is not None: + break + syscalls = 0 + return _common.scpustats( + ctx_switches, interrupts, soft_interrupts, syscalls) + + +if os.path.exists("/sys/devices/system/cpu/cpufreq") or \ + os.path.exists("/sys/devices/system/cpu/cpu0/cpufreq"): + def cpu_freq(): + """Return frequency metrics for all CPUs. + Contrarily to other OSes, Linux updates these values in + real-time. + """ + # scaling_* files seem preferable to cpuinfo_*, see: + # http://unix.stackexchange.com/a/87537/168884 + ret = [] + ls = glob.glob("/sys/devices/system/cpu/cpufreq/policy*") + if ls: + # Sort the list so that '10' comes after '2'. This should + # ensure the CPU order is consistent with other CPU functions + # having a 'percpu' argument and returning results for multiple + # CPUs (cpu_times(), cpu_percent(), cpu_times_percent()). + ls.sort(key=lambda x: int(os.path.basename(x)[6:])) + else: + # https://github.com/giampaolo/psutil/issues/981 + ls = glob.glob("/sys/devices/system/cpu/cpu[0-9]*/cpufreq") + ls.sort(key=lambda x: int(re.search('[0-9]+', x).group(0))) + + pjoin = os.path.join + for path in ls: + curr = cat(pjoin(path, "scaling_cur_freq"), fallback=None) + if curr is None: + # Likely an old RedHat, see: + # https://github.com/giampaolo/psutil/issues/1071 + curr = cat(pjoin(path, "cpuinfo_cur_freq"), fallback=None) + if curr is None: + raise NotImplementedError( + "can't find current frequency file") + curr = int(curr) / 1000 + max_ = int(cat(pjoin(path, "scaling_max_freq"))) / 1000 + min_ = int(cat(pjoin(path, "scaling_min_freq"))) / 1000 + ret.append(_common.scpufreq(curr, min_, max_)) + return ret + + +# ===================================================================== +# --- network +# ===================================================================== + + +net_if_addrs = cext_posix.net_if_addrs + + +class _Ipv6UnsupportedError(Exception): + pass + + +class Connections: + """A wrapper on top of /proc/net/* files, retrieving per-process + and system-wide open connections (TCP, UDP, UNIX) similarly to + "netstat -an". + + Note: in case of UNIX sockets we're only able to determine the + local endpoint/path, not the one it's connected to. + According to [1] it would be possible but not easily. + + [1] http://serverfault.com/a/417946 + """ + + def __init__(self): + tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM) + tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM) + udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM) + udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM) + unix = ("unix", socket.AF_UNIX, None) + self.tmap = { + "all": (tcp4, tcp6, udp4, udp6, unix), + "tcp": (tcp4, tcp6), + "tcp4": (tcp4,), + "tcp6": (tcp6,), + "udp": (udp4, udp6), + "udp4": (udp4,), + "udp6": (udp6,), + "unix": (unix,), + "inet": (tcp4, tcp6, udp4, udp6), + "inet4": (tcp4, udp4), + "inet6": (tcp6, udp6), + } + self._procfs_path = None + + def get_proc_inodes(self, pid): + inodes = defaultdict(list) + for fd in os.listdir("%s/%s/fd" % (self._procfs_path, pid)): + try: + inode = readlink("%s/%s/fd/%s" % (self._procfs_path, pid, fd)) + except OSError as err: + # ENOENT == file which is gone in the meantime; + # os.stat('/proc/%s' % self.pid) will be done later + # to force NSP (if it's the case) + if err.errno in (errno.ENOENT, errno.ESRCH): + continue + elif err.errno == errno.EINVAL: + # not a link + continue + else: + raise + else: + if inode.startswith('socket:['): + # the process is using a socket + inode = inode[8:][:-1] + inodes[inode].append((pid, int(fd))) + return inodes + + def get_all_inodes(self): + inodes = {} + for pid in pids(): + try: + inodes.update(self.get_proc_inodes(pid)) + except OSError as err: + # os.listdir() is gonna raise a lot of access denied + # exceptions in case of unprivileged user; that's fine + # as we'll just end up returning a connection with PID + # and fd set to None anyway. + # Both netstat -an and lsof does the same so it's + # unlikely we can do any better. + # ENOENT just means a PID disappeared on us. + if err.errno not in ( + errno.ENOENT, errno.ESRCH, errno.EPERM, errno.EACCES): + raise + return inodes + + @staticmethod + def decode_address(addr, family): + """Accept an "ip:port" address as displayed in /proc/net/* + and convert it into a human readable form, like: + + "0500000A:0016" -> ("10.0.0.5", 22) + "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521) + + The IP address portion is a little or big endian four-byte + hexadecimal number; that is, the least significant byte is listed + first, so we need to reverse the order of the bytes to convert it + to an IP address. + The port is represented as a two-byte hexadecimal number. + + Reference: + http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html + """ + ip, port = addr.split(':') + port = int(port, 16) + # this usually refers to a local socket in listen mode with + # no end-points connected + if not port: + return () + if PY3: + ip = ip.encode('ascii') + if family == socket.AF_INET: + # see: https://github.com/giampaolo/psutil/issues/201 + if LITTLE_ENDIAN: + ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1]) + else: + ip = socket.inet_ntop(family, base64.b16decode(ip)) + else: # IPv6 + # old version - let's keep it, just in case... + # ip = ip.decode('hex') + # return socket.inet_ntop(socket.AF_INET6, + # ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4))) + ip = base64.b16decode(ip) + try: + # see: https://github.com/giampaolo/psutil/issues/201 + if LITTLE_ENDIAN: + ip = socket.inet_ntop( + socket.AF_INET6, + struct.pack('>4I', *struct.unpack('<4I', ip))) + else: + ip = socket.inet_ntop( + socket.AF_INET6, + struct.pack('<4I', *struct.unpack('<4I', ip))) + except ValueError: + # see: https://github.com/giampaolo/psutil/issues/623 + if not supports_ipv6(): + raise _Ipv6UnsupportedError + else: + raise + return _common.addr(ip, port) + + @staticmethod + def process_inet(file, family, type_, inodes, filter_pid=None): + """Parse /proc/net/tcp* and /proc/net/udp* files.""" + if file.endswith('6') and not os.path.exists(file): + # IPv6 not supported + return + with open_text(file, buffering=BIGFILE_BUFFERING) as f: + f.readline() # skip the first line + for lineno, line in enumerate(f, 1): + try: + _, laddr, raddr, status, _, _, _, _, _, inode = \ + line.split()[:10] + except ValueError: + raise RuntimeError( + "error while parsing %s; malformed line %s %r" % ( + file, lineno, line)) + if inode in inodes: + # # We assume inet sockets are unique, so we error + # # out if there are multiple references to the + # # same inode. We won't do this for UNIX sockets. + # if len(inodes[inode]) > 1 and family != socket.AF_UNIX: + # raise ValueError("ambiguos inode with multiple " + # "PIDs references") + pid, fd = inodes[inode][0] + else: + pid, fd = None, -1 + if filter_pid is not None and filter_pid != pid: + continue + else: + if type_ == socket.SOCK_STREAM: + status = TCP_STATUSES[status] + else: + status = _common.CONN_NONE + try: + laddr = Connections.decode_address(laddr, family) + raddr = Connections.decode_address(raddr, family) + except _Ipv6UnsupportedError: + continue + yield (fd, family, type_, laddr, raddr, status, pid) + + @staticmethod + def process_unix(file, family, inodes, filter_pid=None): + """Parse /proc/net/unix files.""" + with open_text(file, buffering=BIGFILE_BUFFERING) as f: + f.readline() # skip the first line + for line in f: + tokens = line.split() + try: + _, _, _, _, type_, _, inode = tokens[0:7] + except ValueError: + if ' ' not in line: + # see: https://github.com/giampaolo/psutil/issues/766 + continue + raise RuntimeError( + "error while parsing %s; malformed line %r" % ( + file, line)) + if inode in inodes: + # With UNIX sockets we can have a single inode + # referencing many file descriptors. + pairs = inodes[inode] + else: + pairs = [(None, -1)] + for pid, fd in pairs: + if filter_pid is not None and filter_pid != pid: + continue + else: + if len(tokens) == 8: + path = tokens[-1] + else: + path = "" + type_ = int(type_) + # XXX: determining the remote endpoint of a + # UNIX socket on Linux is not possible, see: + # https://serverfault.com/questions/252723/ + raddr = "" + status = _common.CONN_NONE + yield (fd, family, type_, path, raddr, status, pid) + + def retrieve(self, kind, pid=None): + if kind not in self.tmap: + raise ValueError("invalid %r kind argument; choose between %s" + % (kind, ', '.join([repr(x) for x in self.tmap]))) + self._procfs_path = get_procfs_path() + if pid is not None: + inodes = self.get_proc_inodes(pid) + if not inodes: + # no connections for this process + return [] + else: + inodes = self.get_all_inodes() + ret = set() + for f, family, type_ in self.tmap[kind]: + if family in (socket.AF_INET, socket.AF_INET6): + ls = self.process_inet( + "%s/net/%s" % (self._procfs_path, f), + family, type_, inodes, filter_pid=pid) + else: + ls = self.process_unix( + "%s/net/%s" % (self._procfs_path, f), + family, inodes, filter_pid=pid) + for fd, family, type_, laddr, raddr, status, bound_pid in ls: + if pid: + conn = _common.pconn(fd, family, type_, laddr, raddr, + status) + else: + conn = _common.sconn(fd, family, type_, laddr, raddr, + status, bound_pid) + ret.add(conn) + return list(ret) + + +_connections = Connections() + + +def net_connections(kind='inet'): + """Return system-wide open connections.""" + return _connections.retrieve(kind) + + +def net_io_counters(): + """Return network I/O statistics for every network interface + installed on the system as a dict of raw tuples. + """ + with open_text("%s/net/dev" % get_procfs_path()) as f: + lines = f.readlines() + retdict = {} + for line in lines[2:]: + colon = line.rfind(':') + assert colon > 0, repr(line) + name = line[:colon].strip() + fields = line[colon + 1:].strip().split() + + # in + (bytes_recv, + packets_recv, + errin, + dropin, + fifoin, # unused + framein, # unused + compressedin, # unused + multicastin, # unused + # out + bytes_sent, + packets_sent, + errout, + dropout, + fifoout, # unused + collisionsout, # unused + carrierout, # unused + compressedout) = map(int, fields) + + retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv, + errin, errout, dropin, dropout) + return retdict + + +def net_if_stats(): + """Get NIC stats (isup, duplex, speed, mtu).""" + duplex_map = {cext.DUPLEX_FULL: NIC_DUPLEX_FULL, + cext.DUPLEX_HALF: NIC_DUPLEX_HALF, + cext.DUPLEX_UNKNOWN: NIC_DUPLEX_UNKNOWN} + names = net_io_counters().keys() + ret = {} + for name in names: + mtu = cext_posix.net_if_mtu(name) + isup = cext_posix.net_if_flags(name) + duplex, speed = cext.net_if_duplex_speed(name) + ret[name] = _common.snicstats(isup, duplex_map[duplex], speed, mtu) + return ret + + +# ===================================================================== +# --- disks +# ===================================================================== + + +disk_usage = _psposix.disk_usage + + +def disk_io_counters(): + """Return disk I/O statistics for every disk installed on the + system as a dict of raw tuples. + """ + # determine partitions we want to look for + def get_partitions(): + partitions = [] + with open_text("%s/partitions" % get_procfs_path()) as f: + lines = f.readlines()[2:] + for line in reversed(lines): + _, _, _, name = line.split() + if name[-1].isdigit(): + # we're dealing with a partition (e.g. 'sda1'); 'sda' will + # also be around but we want to omit it + partitions.append(name) + else: + if not partitions or not partitions[-1].startswith(name): + # we're dealing with a disk entity for which no + # partitions have been defined (e.g. 'sda' but + # 'sda1' was not around), see: + # https://github.com/giampaolo/psutil/issues/338 + partitions.append(name) + return partitions + + retdict = {} + partitions = get_partitions() + with open_text("%s/diskstats" % get_procfs_path()) as f: + lines = f.readlines() + for line in lines: + # OK, this is a bit confusing. The format of /proc/diskstats can + # have 3 variations. + # On Linux 2.4 each line has always 15 fields, e.g.: + # "3 0 8 hda 8 8 8 8 8 8 8 8 8 8 8" + # On Linux 2.6+ each line *usually* has 14 fields, and the disk + # name is in another position, like this: + # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8" + # ...unless (Linux 2.6) the line refers to a partition instead + # of a disk, in which case the line has less fields (7): + # "3 1 hda1 8 8 8 8" + # See: + # https://www.kernel.org/doc/Documentation/iostats.txt + # https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats + fields = line.split() + fields_len = len(fields) + if fields_len == 15: + # Linux 2.4 + name = fields[3] + reads = int(fields[2]) + (reads_merged, rbytes, rtime, writes, writes_merged, + wbytes, wtime, _, busy_time, _) = map(int, fields[4:14]) + elif fields_len == 14: + # Linux 2.6+, line referring to a disk + name = fields[2] + (reads, reads_merged, rbytes, rtime, writes, writes_merged, + wbytes, wtime, _, busy_time, _) = map(int, fields[3:14]) + elif fields_len == 7: + # Linux 2.6+, line referring to a partition + name = fields[2] + reads, rbytes, writes, wbytes = map(int, fields[3:]) + rtime = wtime = reads_merged = writes_merged = busy_time = 0 + else: + raise ValueError("not sure how to interpret line %r" % line) + + if name in partitions: + ssize = get_sector_size(name) + rbytes *= ssize + wbytes *= ssize + retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime, + reads_merged, writes_merged, busy_time) + return retdict + + +def disk_partitions(all=False): + """Return mounted disk partitions as a list of namedtuples.""" + fstypes = set() + with open_text("%s/filesystems" % get_procfs_path()) as f: + for line in f: + line = line.strip() + if not line.startswith("nodev"): + fstypes.add(line.strip()) + else: + # ignore all lines starting with "nodev" except "nodev zfs" + fstype = line.split("\t")[1] + if fstype == "zfs": + fstypes.add("zfs") + + retlist = [] + partitions = cext.disk_partitions() + for partition in partitions: + device, mountpoint, fstype, opts = partition + if device == 'none': + device = '' + if not all: + if device == '' or fstype not in fstypes: + continue + ntuple = _common.sdiskpart(device, mountpoint, fstype, opts) + retlist.append(ntuple) + return retlist + + +# ===================================================================== +# --- sensors +# ===================================================================== + + +def sensors_temperatures(): + """Return hardware (CPU and others) temperatures as a dict + including hardware name, label, current, max and critical + temperatures. + + Implementation notes: + - /sys/class/hwmon looks like the most recent interface to + retrieve this info, and this implementation relies on it + only (old distros will probably use something else) + - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon + - /sys/class/thermal/thermal_zone* is another one but it's more + difficult to parse + """ + ret = collections.defaultdict(list) + basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*') + # CentOS has an intermediate /device directory: + # https://github.com/giampaolo/psutil/issues/971 + # https://github.com/nicolargo/glances/issues/1060 + basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*')) + basenames = sorted(set([x.split('_')[0] for x in basenames])) + + for base in basenames: + try: + current = float(cat(base + '_input')) / 1000.0 + except (IOError, OSError) as err: + # A lot of things can go wrong here, so let's just skip the + # whole entry. + # https://github.com/giampaolo/psutil/issues/1009 + # https://github.com/giampaolo/psutil/issues/1101 + # https://github.com/giampaolo/psutil/issues/1129 + warnings.warn("ignoring %r" % err, RuntimeWarning) + continue + + unit_name = cat(os.path.join(os.path.dirname(base), 'name'), + binary=False) + high = cat(base + '_max', fallback=None) + critical = cat(base + '_crit', fallback=None) + label = cat(base + '_label', fallback='', binary=False) + + if high is not None: + high = float(high) / 1000.0 + if critical is not None: + critical = float(critical) / 1000.0 + + ret[unit_name].append((label, current, high, critical)) + + return ret + + +def sensors_fans(): + """Return hardware fans info (for CPU and other peripherals) as a + dict including hardware label and current speed. + + Implementation notes: + - /sys/class/hwmon looks like the most recent interface to + retrieve this info, and this implementation relies on it + only (old distros will probably use something else) + - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon + """ + ret = collections.defaultdict(list) + basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*') + if not basenames: + # CentOS has an intermediate /device directory: + # https://github.com/giampaolo/psutil/issues/971 + basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*') + + basenames = sorted(set([x.split('_')[0] for x in basenames])) + for base in basenames: + try: + current = int(cat(base + '_input')) + except (IOError, OSError) as err: + warnings.warn("ignoring %r" % err, RuntimeWarning) + continue + unit_name = cat(os.path.join(os.path.dirname(base), 'name'), + binary=False) + label = cat(base + '_label', fallback='', binary=False) + ret[unit_name].append(_common.sfan(label, current)) + + return dict(ret) + + +def sensors_battery(): + """Return battery information. + Implementation note: it appears /sys/class/power_supply/BAT0/ + directory structure may vary and provide files with the same + meaning but under different names, see: + https://github.com/giampaolo/psutil/issues/966 + """ + null = object() + + def multi_cat(*paths): + """Attempt to read the content of multiple files which may + not exist. If none of them exist return None. + """ + for path in paths: + ret = cat(path, fallback=null) + if ret != null: + return int(ret) if ret.isdigit() else ret + return None + + root = os.path.join(POWER_SUPPLY_PATH, "BAT0") + if not os.path.exists(root): + return None + + # Base metrics. + energy_now = multi_cat( + root + "/energy_now", + root + "/charge_now") + power_now = multi_cat( + root + "/power_now", + root + "/current_now") + energy_full = multi_cat( + root + "/energy_full", + root + "/charge_full") + if energy_now is None or power_now is None: + return None + + # Percent. If we have energy_full the percentage will be more + # accurate compared to reading /capacity file (float vs. int). + if energy_full is not None: + try: + percent = 100.0 * energy_now / energy_full + except ZeroDivisionError: + percent = 0.0 + else: + percent = int(cat(root + "/capacity", fallback=-1)) + if percent == -1: + return None + + # Is AC power cable plugged in? + # Note: AC0 is not always available and sometimes (e.g. CentOS7) + # it's called "AC". + power_plugged = None + online = multi_cat( + os.path.join(POWER_SUPPLY_PATH, "AC0/online"), + os.path.join(POWER_SUPPLY_PATH, "AC/online")) + if online is not None: + power_plugged = online == 1 + else: + status = cat(root + "/status", fallback="", binary=False).lower() + if status == "discharging": + power_plugged = False + elif status in ("charging", "full"): + power_plugged = True + + # Seconds left. + # Note to self: we may also calculate the charging ETA as per: + # https://github.com/thialfihar/dotfiles/blob/ + # 013937745fd9050c30146290e8f963d65c0179e6/bin/battery.py#L55 + if power_plugged: + secsleft = _common.POWER_TIME_UNLIMITED + else: + try: + secsleft = int(energy_now / power_now * 3600) + except ZeroDivisionError: + secsleft = _common.POWER_TIME_UNKNOWN + + return _common.sbattery(percent, secsleft, power_plugged) + + +# ===================================================================== +# --- other system functions +# ===================================================================== + + +def users(): + """Return currently connected users as a list of namedtuples.""" + retlist = [] + rawlist = cext.users() + for item in rawlist: + user, tty, hostname, tstamp, user_process, pid = item + # note: the underlying C function includes entries about + # system boot, run level and others. We might want + # to use them in the future. + if not user_process: + continue + if hostname in (':0.0', ':0'): + hostname = 'localhost' + nt = _common.suser(user, tty or None, hostname, tstamp, pid) + retlist.append(nt) + return retlist + + +def boot_time(): + """Return the system boot time expressed in seconds since the epoch.""" + global BOOT_TIME + path = '%s/stat' % get_procfs_path() + with open_binary(path) as f: + for line in f: + if line.startswith(b'btime'): + ret = float(line.strip().split()[1]) + BOOT_TIME = ret + return ret + raise RuntimeError( + "line 'btime' not found in %s" % path) + + +# ===================================================================== +# --- processes +# ===================================================================== + + +def pids(): + """Returns a list of PIDs currently running on the system.""" + return [int(x) for x in os.listdir(b(get_procfs_path())) if x.isdigit()] + + +def pid_exists(pid): + """Check for the existence of a unix PID. Linux TIDs are not + supported (always return False). + """ + if not _psposix.pid_exists(pid): + return False + else: + # Linux's apparently does not distinguish between PIDs and TIDs + # (thread IDs). + # listdir("/proc") won't show any TID (only PIDs) but + # os.stat("/proc/{tid}") will succeed if {tid} exists. + # os.kill() can also be passed a TID. This is quite confusing. + # In here we want to enforce this distinction and support PIDs + # only, see: + # https://github.com/giampaolo/psutil/issues/687 + try: + # Note: already checked that this is faster than using a + # regular expr. Also (a lot) faster than doing + # 'return pid in pids()' + path = "%s/%s/status" % (get_procfs_path(), pid) + with open_binary(path) as f: + for line in f: + if line.startswith(b"Tgid:"): + tgid = int(line.split()[1]) + # If tgid and pid are the same then we're + # dealing with a process PID. + return tgid == pid + raise ValueError("'Tgid' line not found in %s" % path) + except (EnvironmentError, ValueError): + return pid in pids() + + +def ppid_map(): + """Obtain a {pid: ppid, ...} dict for all running processes in + one shot. Used to speed up Process.children(). + """ + ret = {} + procfs_path = get_procfs_path() + for pid in pids(): + try: + with open_binary("%s/%s/stat" % (procfs_path, pid)) as f: + data = f.read() + except EnvironmentError as err: + # Note: we should be able to access /stat for all processes + # so we won't bump into EPERM, which is good. + if err.errno not in (errno.ENOENT, errno.ESRCH, + errno.EPERM, errno.EACCES): + raise + else: + rpar = data.rfind(b')') + dset = data[rpar + 2:].split() + ppid = int(dset[1]) + ret[pid] = ppid + return ret + + +def wrap_exceptions(fun): + """Decorator which translates bare OSError and IOError exceptions + into NoSuchProcess and AccessDenied. + """ + @functools.wraps(fun) + def wrapper(self, *args, **kwargs): + try: + return fun(self, *args, **kwargs) + except EnvironmentError as err: + if err.errno in (errno.EPERM, errno.EACCES): + raise AccessDenied(self.pid, self._name) + # ESRCH (no such process) can be raised on read() if + # process is gone in the meantime. + if err.errno == errno.ESRCH: + raise NoSuchProcess(self.pid, self._name) + # ENOENT (no such file or directory) can be raised on open(). + if err.errno == errno.ENOENT and not os.path.exists("%s/%s" % ( + self._procfs_path, self.pid)): + raise NoSuchProcess(self.pid, self._name) + # Note: zombies will keep existing under /proc until they're + # gone so there's no way to distinguish them in here. + raise + return wrapper + + +class Process(object): + """Linux process implementation.""" + + __slots__ = ["pid", "_name", "_ppid", "_procfs_path"] + + def __init__(self, pid): + self.pid = pid + self._name = None + self._ppid = None + self._procfs_path = get_procfs_path() + + @memoize_when_activated + def _parse_stat_file(self): + """Parse /proc/{pid}/stat file. Return a list of fields where + process name is in position 0. + Using "man proc" as a reference: where "man proc" refers to + position N, always substract 2 (e.g starttime pos 22 in + 'man proc' == pos 20 in the list returned here). + The return value is cached in case oneshot() ctx manager is + in use. + """ + with open_binary("%s/%s/stat" % (self._procfs_path, self.pid)) as f: + data = f.read() + # Process name is between parentheses. It can contain spaces and + # other parentheses. This is taken into account by looking for + # the first occurrence of "(" and the last occurence of ")". + rpar = data.rfind(b')') + name = data[data.find(b'(') + 1:rpar] + others = data[rpar + 2:].split() + return [name] + others + + @memoize_when_activated + def _read_status_file(self): + """Read /proc/{pid}/stat file and return its content. + The return value is cached in case oneshot() ctx manager is + in use. + """ + with open_binary("%s/%s/status" % (self._procfs_path, self.pid)) as f: + return f.read() + + @memoize_when_activated + def _read_smaps_file(self): + with open_binary("%s/%s/smaps" % (self._procfs_path, self.pid), + buffering=BIGFILE_BUFFERING) as f: + return f.read().strip() + + def oneshot_enter(self): + self._parse_stat_file.cache_activate() + self._read_status_file.cache_activate() + self._read_smaps_file.cache_activate() + + def oneshot_exit(self): + self._parse_stat_file.cache_deactivate() + self._read_status_file.cache_deactivate() + self._read_smaps_file.cache_deactivate() + + @wrap_exceptions + def name(self): + name = self._parse_stat_file()[0] + if PY3: + name = decode(name) + # XXX - gets changed later and probably needs refactoring + return name + + def exe(self): + try: + return readlink("%s/%s/exe" % (self._procfs_path, self.pid)) + except OSError as err: + if err.errno in (errno.ENOENT, errno.ESRCH): + # no such file error; might be raised also if the + # path actually exists for system processes with + # low pids (about 0-20) + if os.path.lexists("%s/%s" % (self._procfs_path, self.pid)): + return "" + else: + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) + else: + raise ZombieProcess(self.pid, self._name, self._ppid) + if err.errno in (errno.EPERM, errno.EACCES): + raise AccessDenied(self.pid, self._name) + raise + + @wrap_exceptions + def cmdline(self): + with open_text("%s/%s/cmdline" % (self._procfs_path, self.pid)) as f: + data = f.read() + if not data: + # may happen in case of zombie process + return [] + # 'man proc' states that args are separated by null bytes '\0' + # and last char is supposed to be a null byte. Nevertheless + # some processes may change their cmdline after being started + # (via setproctitle() or similar), they are usually not + # compliant with this rule and use spaces instead. Google + # Chrome process is an example. See: + # https://github.com/giampaolo/psutil/issues/1179 + sep = '\x00' if data.endswith('\x00') else ' ' + if data.endswith(sep): + data = data[:-1] + return [x for x in data.split(sep)] + + @wrap_exceptions + def environ(self): + with open_text("%s/%s/environ" % (self._procfs_path, self.pid)) as f: + data = f.read() + return parse_environ_block(data) + + @wrap_exceptions + def terminal(self): + tty_nr = int(self._parse_stat_file()[5]) + tmap = _psposix.get_terminal_map() + try: + return tmap[tty_nr] + except KeyError: + return None + + if os.path.exists('/proc/%s/io' % os.getpid()): + @wrap_exceptions + def io_counters(self): + fname = "%s/%s/io" % (self._procfs_path, self.pid) + fields = {} + with open_binary(fname) as f: + for line in f: + # https://github.com/giampaolo/psutil/issues/1004 + line = line.strip() + if line: + name, value = line.split(b': ') + fields[name] = int(value) + if not fields: + raise RuntimeError("%s file was empty" % fname) + return pio( + fields[b'syscr'], # read syscalls + fields[b'syscw'], # write syscalls + fields[b'read_bytes'], # read bytes + fields[b'write_bytes'], # write bytes + fields[b'rchar'], # read chars + fields[b'wchar'], # write chars + ) + else: + def io_counters(self): + raise NotImplementedError("couldn't find /proc/%s/io (kernel " + "too old?)" % self.pid) + + @wrap_exceptions + def cpu_times(self): + values = self._parse_stat_file() + utime = float(values[12]) / CLOCK_TICKS + stime = float(values[13]) / CLOCK_TICKS + children_utime = float(values[14]) / CLOCK_TICKS + children_stime = float(values[15]) / CLOCK_TICKS + return _common.pcputimes(utime, stime, children_utime, children_stime) + + @wrap_exceptions + def cpu_num(self): + """What CPU the process is on.""" + return int(self._parse_stat_file()[37]) + + @wrap_exceptions + def wait(self, timeout=None): + return _psposix.wait_pid(self.pid, timeout, self._name) + + @wrap_exceptions + def create_time(self): + values = self._parse_stat_file() + # According to documentation, starttime is in field 21 and the + # unit is jiffies (clock ticks). + # We first divide it for clock ticks and then add uptime returning + # seconds since the epoch, in UTC. + # Also use cached value if available. + bt = BOOT_TIME or boot_time() + return (float(values[20]) / CLOCK_TICKS) + bt + + @wrap_exceptions + def memory_info(self): + # ============================================================ + # | FIELD | DESCRIPTION | AKA | TOP | + # ============================================================ + # | rss | resident set size | | RES | + # | vms | total program size | size | VIRT | + # | shared | shared pages (from shared mappings) | | SHR | + # | text | text ('code') | trs | CODE | + # | lib | library (unused in Linux 2.6) | lrs | | + # | data | data + stack | drs | DATA | + # | dirty | dirty pages (unused in Linux 2.6) | dt | | + # ============================================================ + with open_binary("%s/%s/statm" % (self._procfs_path, self.pid)) as f: + vms, rss, shared, text, lib, data, dirty = \ + [int(x) * PAGESIZE for x in f.readline().split()[:7]] + return pmem(rss, vms, shared, text, lib, data, dirty) + + # /proc/pid/smaps does not exist on kernels < 2.6.14 or if + # CONFIG_MMU kernel configuration option is not enabled. + if HAS_SMAPS: + + @wrap_exceptions + def memory_full_info( + self, + _private_re=re.compile(br"Private.*:\s+(\d+)"), + _pss_re=re.compile(br"Pss.*:\s+(\d+)"), + _swap_re=re.compile(br"Swap.*:\s+(\d+)")): + basic_mem = self.memory_info() + # Note: using 3 regexes is faster than reading the file + # line by line. + # XXX: on Python 3 the 2 regexes are 30% slower than on + # Python 2 though. Figure out why. + # + # You might be tempted to calculate USS by subtracting + # the "shared" value from the "resident" value in + # /proc//statm. But at least on Linux, statm's "shared" + # value actually counts pages backed by files, which has + # little to do with whether the pages are actually shared. + # /proc/self/smaps on the other hand appears to give us the + # correct information. + smaps_data = self._read_smaps_file() + # Note: smaps file can be empty for certain processes. + # The code below will not crash though and will result to 0. + uss = sum(map(int, _private_re.findall(smaps_data))) * 1024 + pss = sum(map(int, _pss_re.findall(smaps_data))) * 1024 + swap = sum(map(int, _swap_re.findall(smaps_data))) * 1024 + return pfullmem(*basic_mem + (uss, pss, swap)) + + else: + memory_full_info = memory_info + + if HAS_SMAPS: + + @wrap_exceptions + def memory_maps(self): + """Return process's mapped memory regions as a list of named + tuples. Fields are explained in 'man proc'; here is an updated + (Apr 2012) version: http://goo.gl/fmebo + """ + def get_blocks(lines, current_block): + data = {} + for line in lines: + fields = line.split(None, 5) + if not fields[0].endswith(b':'): + # new block section + yield (current_block.pop(), data) + current_block.append(line) + else: + try: + data[fields[0]] = int(fields[1]) * 1024 + except ValueError: + if fields[0].startswith(b'VmFlags:'): + # see issue #369 + continue + else: + raise ValueError("don't know how to inte" + "rpret line %r" % line) + yield (current_block.pop(), data) + + data = self._read_smaps_file() + # Note: smaps file can be empty for certain processes. + if not data: + return [] + lines = data.split(b'\n') + ls = [] + first_line = lines.pop(0) + current_block = [first_line] + for header, data in get_blocks(lines, current_block): + hfields = header.split(None, 5) + try: + addr, perms, offset, dev, inode, path = hfields + except ValueError: + addr, perms, offset, dev, inode, path = \ + hfields + [''] + if not path: + path = '[anon]' + else: + if PY3: + path = decode(path) + path = path.strip() + if (path.endswith(' (deleted)') and not + path_exists_strict(path)): + path = path[:-10] + ls.append(( + decode(addr), decode(perms), path, + data[b'Rss:'], + data.get(b'Size:', 0), + data.get(b'Pss:', 0), + data.get(b'Shared_Clean:', 0), + data.get(b'Shared_Dirty:', 0), + data.get(b'Private_Clean:', 0), + data.get(b'Private_Dirty:', 0), + data.get(b'Referenced:', 0), + data.get(b'Anonymous:', 0), + data.get(b'Swap:', 0) + )) + return ls + + else: # pragma: no cover + def memory_maps(self): + raise NotImplementedError( + "/proc/%s/smaps does not exist on kernels < 2.6.14 or " + "if CONFIG_MMU kernel configuration option is not " + "enabled." % self.pid) + + @wrap_exceptions + def cwd(self): + try: + return readlink("%s/%s/cwd" % (self._procfs_path, self.pid)) + except OSError as err: + # https://github.com/giampaolo/psutil/issues/986 + if err.errno in (errno.ENOENT, errno.ESRCH): + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) + else: + raise ZombieProcess(self.pid, self._name, self._ppid) + raise + + @wrap_exceptions + def num_ctx_switches(self, + _ctxsw_re=re.compile(br'ctxt_switches:\t(\d+)')): + data = self._read_status_file() + ctxsw = _ctxsw_re.findall(data) + if not ctxsw: + raise NotImplementedError( + "'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'" + "lines were not found in %s/%s/status; the kernel is " + "probably older than 2.6.23" % ( + self._procfs_path, self.pid)) + else: + return _common.pctxsw(int(ctxsw[0]), int(ctxsw[1])) + + @wrap_exceptions + def num_threads(self, _num_threads_re=re.compile(br'Threads:\t(\d+)')): + # Note: on Python 3 using a re is faster than iterating over file + # line by line. On Python 2 is the exact opposite, and iterating + # over a file on Python 3 is slower than on Python 2. + data = self._read_status_file() + return int(_num_threads_re.findall(data)[0]) + + @wrap_exceptions + def threads(self): + thread_ids = os.listdir("%s/%s/task" % (self._procfs_path, self.pid)) + thread_ids.sort() + retlist = [] + hit_enoent = False + for thread_id in thread_ids: + fname = "%s/%s/task/%s/stat" % ( + self._procfs_path, self.pid, thread_id) + try: + with open_binary(fname) as f: + st = f.read().strip() + except IOError as err: + if err.errno == errno.ENOENT: + # no such file or directory; it means thread + # disappeared on us + hit_enoent = True + continue + raise + # ignore the first two values ("pid (exe)") + st = st[st.find(b')') + 2:] + values = st.split(b' ') + utime = float(values[11]) / CLOCK_TICKS + stime = float(values[12]) / CLOCK_TICKS + ntuple = _common.pthread(int(thread_id), utime, stime) + retlist.append(ntuple) + if hit_enoent: + # raise NSP if the process disappeared on us + os.stat('%s/%s' % (self._procfs_path, self.pid)) + return retlist + + @wrap_exceptions + def nice_get(self): + # with open_text('%s/%s/stat' % (self._procfs_path, self.pid)) as f: + # data = f.read() + # return int(data.split()[18]) + + # Use C implementation + return cext_posix.getpriority(self.pid) + + @wrap_exceptions + def nice_set(self, value): + return cext_posix.setpriority(self.pid, value) + + @wrap_exceptions + def cpu_affinity_get(self): + return cext.proc_cpu_affinity_get(self.pid) + + def _get_eligible_cpus( + self, _re=re.compile(br"Cpus_allowed_list:\t(\d+)-(\d+)")): + # See: https://github.com/giampaolo/psutil/issues/956 + data = self._read_status_file() + match = _re.findall(data) + if match: + return list(range(int(match[0][0]), int(match[0][1]) + 1)) + else: + return list(range(len(per_cpu_times()))) + + @wrap_exceptions + def cpu_affinity_set(self, cpus): + try: + cext.proc_cpu_affinity_set(self.pid, cpus) + except (OSError, ValueError) as err: + if isinstance(err, ValueError) or err.errno == errno.EINVAL: + eligible_cpus = self._get_eligible_cpus() + all_cpus = tuple(range(len(per_cpu_times()))) + for cpu in cpus: + if cpu not in all_cpus: + raise ValueError( + "invalid CPU number %r; choose between %s" % ( + cpu, eligible_cpus)) + if cpu not in eligible_cpus: + raise ValueError( + "CPU number %r is not eligible; choose " + "between %s" % (cpu, eligible_cpus)) + raise + + # only starting from kernel 2.6.13 + if hasattr(cext, "proc_ioprio_get"): + + @wrap_exceptions + def ionice_get(self): + ioclass, value = cext.proc_ioprio_get(self.pid) + if enum is not None: + ioclass = IOPriority(ioclass) + return _common.pionice(ioclass, value) + + @wrap_exceptions + def ionice_set(self, ioclass, value): + if value is not None: + if not PY3 and not isinstance(value, (int, long)): + msg = "value argument is not an integer (gor %r)" % value + raise TypeError(msg) + if not 0 <= value <= 7: + raise ValueError( + "value argument range expected is between 0 and 7") + + if ioclass in (IOPRIO_CLASS_NONE, None): + if value: + msg = "can't specify value with IOPRIO_CLASS_NONE " \ + "(got %r)" % value + raise ValueError(msg) + ioclass = IOPRIO_CLASS_NONE + value = 0 + elif ioclass == IOPRIO_CLASS_IDLE: + if value: + msg = "can't specify value with IOPRIO_CLASS_IDLE " \ + "(got %r)" % value + raise ValueError(msg) + value = 0 + elif ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE): + if value is None: + # TODO: add comment explaining why this is 4 (?) + value = 4 + else: + # otherwise we would get OSError(EVINAL) + raise ValueError("invalid ioclass argument %r" % ioclass) + + return cext.proc_ioprio_set(self.pid, ioclass, value) + + if HAS_PRLIMIT: + @wrap_exceptions + def rlimit(self, resource, limits=None): + # If pid is 0 prlimit() applies to the calling process and + # we don't want that. We should never get here though as + # PID 0 is not supported on Linux. + if self.pid == 0: + raise ValueError("can't use prlimit() against PID 0 process") + try: + if limits is None: + # get + return cext.linux_prlimit(self.pid, resource) + else: + # set + if len(limits) != 2: + raise ValueError( + "second argument must be a (soft, hard) tuple, " + "got %s" % repr(limits)) + soft, hard = limits + cext.linux_prlimit(self.pid, resource, soft, hard) + except OSError as err: + if err.errno == errno.ENOSYS and pid_exists(self.pid): + # I saw this happening on Travis: + # https://travis-ci.org/giampaolo/psutil/jobs/51368273 + raise ZombieProcess(self.pid, self._name, self._ppid) + else: + raise + + @wrap_exceptions + def status(self): + letter = self._parse_stat_file()[1] + if PY3: + letter = letter.decode() + # XXX is '?' legit? (we're not supposed to return it anyway) + return PROC_STATUSES.get(letter, '?') + + @wrap_exceptions + def open_files(self): + retlist = [] + files = os.listdir("%s/%s/fd" % (self._procfs_path, self.pid)) + hit_enoent = False + for fd in files: + file = "%s/%s/fd/%s" % (self._procfs_path, self.pid, fd) + try: + path = readlink(file) + except OSError as err: + # ENOENT == file which is gone in the meantime + if err.errno in (errno.ENOENT, errno.ESRCH): + hit_enoent = True + continue + elif err.errno == errno.EINVAL: + # not a link + continue + else: + raise + else: + # If path is not an absolute there's no way to tell + # whether it's a regular file or not, so we skip it. + # A regular file is always supposed to be have an + # absolute path though. + if path.startswith('/') and isfile_strict(path): + # Get file position and flags. + file = "%s/%s/fdinfo/%s" % ( + self._procfs_path, self.pid, fd) + try: + with open_binary(file) as f: + pos = int(f.readline().split()[1]) + flags = int(f.readline().split()[1], 8) + except IOError as err: + if err.errno == errno.ENOENT: + # fd gone in the meantime; does not + # necessarily mean the process disappeared + # on us. + hit_enoent = True + else: + raise + else: + mode = file_flags_to_mode(flags) + ntuple = popenfile( + path, int(fd), int(pos), mode, flags) + retlist.append(ntuple) + if hit_enoent: + # raise NSP if the process disappeared on us + os.stat('%s/%s' % (self._procfs_path, self.pid)) + return retlist + + @wrap_exceptions + def connections(self, kind='inet'): + ret = _connections.retrieve(kind, self.pid) + # raise NSP if the process disappeared on us + os.stat('%s/%s' % (self._procfs_path, self.pid)) + return ret + + @wrap_exceptions + def num_fds(self): + return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid))) + + @wrap_exceptions + def ppid(self): + return int(self._parse_stat_file()[2]) + + @wrap_exceptions + def uids(self, _uids_re=re.compile(br'Uid:\t(\d+)\t(\d+)\t(\d+)')): + data = self._read_status_file() + real, effective, saved = _uids_re.findall(data)[0] + return _common.puids(int(real), int(effective), int(saved)) + + @wrap_exceptions + def gids(self, _gids_re=re.compile(br'Gid:\t(\d+)\t(\d+)\t(\d+)')): + data = self._read_status_file() + real, effective, saved = _gids_re.findall(data)[0] + return _common.pgids(int(real), int(effective), int(saved)) diff --git a/server/www/packages/packages-windows/x86/psutil/_psosx.py b/server/www/packages/packages-windows/x86/psutil/_psosx.py new file mode 100644 index 0000000..4c97af7 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/_psosx.py @@ -0,0 +1,572 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""OSX platform implementation.""" + +import contextlib +import errno +import functools +import os +from socket import AF_INET +from collections import namedtuple + +from . import _common +from . import _psposix +from . import _psutil_osx as cext +from . import _psutil_posix as cext_posix +from ._common import AF_INET6 +from ._common import conn_tmap +from ._common import isfile_strict +from ._common import memoize_when_activated +from ._common import parse_environ_block +from ._common import sockfam_to_enum +from ._common import socktype_to_enum +from ._common import usage_percent +from ._exceptions import AccessDenied +from ._exceptions import NoSuchProcess +from ._exceptions import ZombieProcess + + +__extra__all__ = [] + + +# ===================================================================== +# --- globals +# ===================================================================== + + +PAGESIZE = os.sysconf("SC_PAGE_SIZE") +AF_LINK = cext_posix.AF_LINK + +TCP_STATUSES = { + cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED, + cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT, + cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV, + cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1, + cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2, + cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT, + cext.TCPS_CLOSED: _common.CONN_CLOSE, + cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT, + cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK, + cext.TCPS_LISTEN: _common.CONN_LISTEN, + cext.TCPS_CLOSING: _common.CONN_CLOSING, + cext.PSUTIL_CONN_NONE: _common.CONN_NONE, +} + +PROC_STATUSES = { + cext.SIDL: _common.STATUS_IDLE, + cext.SRUN: _common.STATUS_RUNNING, + cext.SSLEEP: _common.STATUS_SLEEPING, + cext.SSTOP: _common.STATUS_STOPPED, + cext.SZOMB: _common.STATUS_ZOMBIE, +} + +kinfo_proc_map = dict( + ppid=0, + ruid=1, + euid=2, + suid=3, + rgid=4, + egid=5, + sgid=6, + ttynr=7, + ctime=8, + status=9, + name=10, +) + +pidtaskinfo_map = dict( + cpuutime=0, + cpustime=1, + rss=2, + vms=3, + pfaults=4, + pageins=5, + numthreads=6, + volctxsw=7, +) + + +# ===================================================================== +# --- named tuples +# ===================================================================== + + +# psutil.cpu_times() +scputimes = namedtuple('scputimes', ['user', 'nice', 'system', 'idle']) +# psutil.virtual_memory() +svmem = namedtuple( + 'svmem', ['total', 'available', 'percent', 'used', 'free', + 'active', 'inactive', 'wired']) +# psutil.Process.memory_info() +pmem = namedtuple('pmem', ['rss', 'vms', 'pfaults', 'pageins']) +# psutil.Process.memory_full_info() +pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', )) +# psutil.Process.memory_maps(grouped=True) +pmmap_grouped = namedtuple( + 'pmmap_grouped', + 'path rss private swapped dirtied ref_count shadow_depth') +# psutil.Process.memory_maps(grouped=False) +pmmap_ext = namedtuple( + 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields)) + + +# ===================================================================== +# --- memory +# ===================================================================== + + +def virtual_memory(): + """System virtual memory as a namedtuple.""" + total, active, inactive, wired, free = cext.virtual_mem() + avail = inactive + free + used = active + inactive + wired + percent = usage_percent((total - avail), total, _round=1) + return svmem(total, avail, percent, used, free, + active, inactive, wired) + + +def swap_memory(): + """Swap system memory as a (total, used, free, sin, sout) tuple.""" + total, used, free, sin, sout = cext.swap_mem() + percent = usage_percent(used, total, _round=1) + return _common.sswap(total, used, free, percent, sin, sout) + + +# ===================================================================== +# --- CPU +# ===================================================================== + + +def cpu_times(): + """Return system CPU times as a namedtuple.""" + user, nice, system, idle = cext.cpu_times() + return scputimes(user, nice, system, idle) + + +def per_cpu_times(): + """Return system CPU times as a named tuple""" + ret = [] + for cpu_t in cext.per_cpu_times(): + user, nice, system, idle = cpu_t + item = scputimes(user, nice, system, idle) + ret.append(item) + return ret + + +def cpu_count_logical(): + """Return the number of logical CPUs in the system.""" + return cext.cpu_count_logical() + + +def cpu_count_physical(): + """Return the number of physical CPUs in the system.""" + return cext.cpu_count_phys() + + +def cpu_stats(): + ctx_switches, interrupts, soft_interrupts, syscalls, traps = \ + cext.cpu_stats() + return _common.scpustats( + ctx_switches, interrupts, soft_interrupts, syscalls) + + +def cpu_freq(): + """Return CPU frequency. + On OSX per-cpu frequency is not supported. + Also, the returned frequency never changes, see: + https://arstechnica.com/civis/viewtopic.php?f=19&t=465002 + """ + curr, min_, max_ = cext.cpu_freq() + return [_common.scpufreq(curr, min_, max_)] + + +# ===================================================================== +# --- disks +# ===================================================================== + + +disk_usage = _psposix.disk_usage +disk_io_counters = cext.disk_io_counters + + +def disk_partitions(all=False): + """Return mounted disk partitions as a list of namedtuples.""" + retlist = [] + partitions = cext.disk_partitions() + for partition in partitions: + device, mountpoint, fstype, opts = partition + if device == 'none': + device = '' + if not all: + if not os.path.isabs(device) or not os.path.exists(device): + continue + ntuple = _common.sdiskpart(device, mountpoint, fstype, opts) + retlist.append(ntuple) + return retlist + + +# ===================================================================== +# --- sensors +# ===================================================================== + + +def sensors_battery(): + """Return battery information. + """ + try: + percent, minsleft, power_plugged = cext.sensors_battery() + except NotImplementedError: + # no power source - return None according to interface + return None + power_plugged = power_plugged == 1 + if power_plugged: + secsleft = _common.POWER_TIME_UNLIMITED + elif minsleft == -1: + secsleft = _common.POWER_TIME_UNKNOWN + else: + secsleft = minsleft * 60 + return _common.sbattery(percent, secsleft, power_plugged) + + +# ===================================================================== +# --- network +# ===================================================================== + + +net_io_counters = cext.net_io_counters +net_if_addrs = cext_posix.net_if_addrs + + +def net_connections(kind='inet'): + """System-wide network connections.""" + # Note: on OSX this will fail with AccessDenied unless + # the process is owned by root. + ret = [] + for pid in pids(): + try: + cons = Process(pid).connections(kind) + except NoSuchProcess: + continue + else: + if cons: + for c in cons: + c = list(c) + [pid] + ret.append(_common.sconn(*c)) + return ret + + +def net_if_stats(): + """Get NIC stats (isup, duplex, speed, mtu).""" + names = net_io_counters().keys() + ret = {} + for name in names: + mtu = cext_posix.net_if_mtu(name) + isup = cext_posix.net_if_flags(name) + duplex, speed = cext_posix.net_if_duplex_speed(name) + if hasattr(_common, 'NicDuplex'): + duplex = _common.NicDuplex(duplex) + ret[name] = _common.snicstats(isup, duplex, speed, mtu) + return ret + + +# ===================================================================== +# --- other system functions +# ===================================================================== + + +def boot_time(): + """The system boot time expressed in seconds since the epoch.""" + return cext.boot_time() + + +def users(): + """Return currently connected users as a list of namedtuples.""" + retlist = [] + rawlist = cext.users() + for item in rawlist: + user, tty, hostname, tstamp, pid = item + if tty == '~': + continue # reboot or shutdown + if not tstamp: + continue + nt = _common.suser(user, tty or None, hostname or None, tstamp, pid) + retlist.append(nt) + return retlist + + +# ===================================================================== +# --- processes +# ===================================================================== + + +def pids(): + ls = cext.pids() + if 0 not in ls: + # On certain OSX versions pids() C doesn't return PID 0 but + # "ps" does and the process is querable via sysctl(): + # https://travis-ci.org/giampaolo/psutil/jobs/309619941 + try: + Process(0).create_time() + except NoSuchProcess: + return False + except AccessDenied: + ls.append(0) + else: + ls.append(0) + return ls + + +pid_exists = _psposix.pid_exists + + +def wrap_exceptions(fun): + """Decorator which translates bare OSError exceptions into + NoSuchProcess and AccessDenied. + """ + @functools.wraps(fun) + def wrapper(self, *args, **kwargs): + try: + return fun(self, *args, **kwargs) + except OSError as err: + if err.errno == errno.ESRCH: + raise NoSuchProcess(self.pid, self._name) + if err.errno in (errno.EPERM, errno.EACCES): + raise AccessDenied(self.pid, self._name) + raise + return wrapper + + +@contextlib.contextmanager +def catch_zombie(proc): + """There are some poor C APIs which incorrectly raise ESRCH when + the process is still alive or it's a zombie, or even RuntimeError + (those who don't set errno). This is here in order to solve: + https://github.com/giampaolo/psutil/issues/1044 + """ + try: + yield + except (OSError, RuntimeError) as err: + if isinstance(err, RuntimeError) or err.errno == errno.ESRCH: + try: + # status() is not supposed to lie and correctly detect + # zombies so if it raises ESRCH it's true. + status = proc.status() + except NoSuchProcess: + raise err + else: + if status == _common.STATUS_ZOMBIE: + raise ZombieProcess(proc.pid, proc._name, proc._ppid) + else: + raise AccessDenied(proc.pid, proc._name) + else: + raise + + +class Process(object): + """Wrapper class around underlying C implementation.""" + + __slots__ = ["pid", "_name", "_ppid"] + + def __init__(self, pid): + self.pid = pid + self._name = None + self._ppid = None + + @memoize_when_activated + def _get_kinfo_proc(self): + # Note: should work with all PIDs without permission issues. + ret = cext.proc_kinfo_oneshot(self.pid) + assert len(ret) == len(kinfo_proc_map) + return ret + + @memoize_when_activated + def _get_pidtaskinfo(self): + # Note: should work for PIDs owned by user only. + with catch_zombie(self): + ret = cext.proc_pidtaskinfo_oneshot(self.pid) + assert len(ret) == len(pidtaskinfo_map) + return ret + + def oneshot_enter(self): + self._get_kinfo_proc.cache_activate() + self._get_pidtaskinfo.cache_activate() + + def oneshot_exit(self): + self._get_kinfo_proc.cache_deactivate() + self._get_pidtaskinfo.cache_deactivate() + + @wrap_exceptions + def name(self): + name = self._get_kinfo_proc()[kinfo_proc_map['name']] + return name if name is not None else cext.proc_name(self.pid) + + @wrap_exceptions + def exe(self): + with catch_zombie(self): + return cext.proc_exe(self.pid) + + @wrap_exceptions + def cmdline(self): + with catch_zombie(self): + return cext.proc_cmdline(self.pid) + + @wrap_exceptions + def environ(self): + with catch_zombie(self): + return parse_environ_block(cext.proc_environ(self.pid)) + + @wrap_exceptions + def ppid(self): + self._ppid = self._get_kinfo_proc()[kinfo_proc_map['ppid']] + return self._ppid + + @wrap_exceptions + def cwd(self): + with catch_zombie(self): + return cext.proc_cwd(self.pid) + + @wrap_exceptions + def uids(self): + rawtuple = self._get_kinfo_proc() + return _common.puids( + rawtuple[kinfo_proc_map['ruid']], + rawtuple[kinfo_proc_map['euid']], + rawtuple[kinfo_proc_map['suid']]) + + @wrap_exceptions + def gids(self): + rawtuple = self._get_kinfo_proc() + return _common.puids( + rawtuple[kinfo_proc_map['rgid']], + rawtuple[kinfo_proc_map['egid']], + rawtuple[kinfo_proc_map['sgid']]) + + @wrap_exceptions + def terminal(self): + tty_nr = self._get_kinfo_proc()[kinfo_proc_map['ttynr']] + tmap = _psposix.get_terminal_map() + try: + return tmap[tty_nr] + except KeyError: + return None + + @wrap_exceptions + def memory_info(self): + rawtuple = self._get_pidtaskinfo() + return pmem( + rawtuple[pidtaskinfo_map['rss']], + rawtuple[pidtaskinfo_map['vms']], + rawtuple[pidtaskinfo_map['pfaults']], + rawtuple[pidtaskinfo_map['pageins']], + ) + + @wrap_exceptions + def memory_full_info(self): + basic_mem = self.memory_info() + uss = cext.proc_memory_uss(self.pid) + return pfullmem(*basic_mem + (uss, )) + + @wrap_exceptions + def cpu_times(self): + rawtuple = self._get_pidtaskinfo() + return _common.pcputimes( + rawtuple[pidtaskinfo_map['cpuutime']], + rawtuple[pidtaskinfo_map['cpustime']], + # children user / system times are not retrievable (set to 0) + 0.0, 0.0) + + @wrap_exceptions + def create_time(self): + return self._get_kinfo_proc()[kinfo_proc_map['ctime']] + + @wrap_exceptions + def num_ctx_switches(self): + # Unvoluntary value seems not to be available; + # getrusage() numbers seems to confirm this theory. + # We set it to 0. + vol = self._get_pidtaskinfo()[pidtaskinfo_map['volctxsw']] + return _common.pctxsw(vol, 0) + + @wrap_exceptions + def num_threads(self): + return self._get_pidtaskinfo()[pidtaskinfo_map['numthreads']] + + @wrap_exceptions + def open_files(self): + if self.pid == 0: + return [] + files = [] + with catch_zombie(self): + rawlist = cext.proc_open_files(self.pid) + for path, fd in rawlist: + if isfile_strict(path): + ntuple = _common.popenfile(path, fd) + files.append(ntuple) + return files + + @wrap_exceptions + def connections(self, kind='inet'): + if kind not in conn_tmap: + raise ValueError("invalid %r kind argument; choose between %s" + % (kind, ', '.join([repr(x) for x in conn_tmap]))) + families, types = conn_tmap[kind] + with catch_zombie(self): + rawlist = cext.proc_connections(self.pid, families, types) + ret = [] + for item in rawlist: + fd, fam, type, laddr, raddr, status = item + status = TCP_STATUSES[status] + fam = sockfam_to_enum(fam) + type = socktype_to_enum(type) + if fam in (AF_INET, AF_INET6): + if laddr: + laddr = _common.addr(*laddr) + if raddr: + raddr = _common.addr(*raddr) + nt = _common.pconn(fd, fam, type, laddr, raddr, status) + ret.append(nt) + return ret + + @wrap_exceptions + def num_fds(self): + if self.pid == 0: + return 0 + with catch_zombie(self): + return cext.proc_num_fds(self.pid) + + @wrap_exceptions + def wait(self, timeout=None): + return _psposix.wait_pid(self.pid, timeout, self._name) + + @wrap_exceptions + def nice_get(self): + with catch_zombie(self): + return cext_posix.getpriority(self.pid) + + @wrap_exceptions + def nice_set(self, value): + with catch_zombie(self): + return cext_posix.setpriority(self.pid, value) + + @wrap_exceptions + def status(self): + code = self._get_kinfo_proc()[kinfo_proc_map['status']] + # XXX is '?' legit? (we're not supposed to return it anyway) + return PROC_STATUSES.get(code, '?') + + @wrap_exceptions + def threads(self): + with catch_zombie(self): + rawlist = cext.proc_threads(self.pid) + retlist = [] + for thread_id, utime, stime in rawlist: + ntuple = _common.pthread(thread_id, utime, stime) + retlist.append(ntuple) + return retlist + + @wrap_exceptions + def memory_maps(self): + with catch_zombie(self): + return cext.proc_memory_maps(self.pid) diff --git a/server/www/packages/packages-windows/x86/psutil/_psposix.py b/server/www/packages/packages-windows/x86/psutil/_psposix.py new file mode 100644 index 0000000..6bb8444 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/_psposix.py @@ -0,0 +1,182 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Routines common to all posix systems.""" + +import errno +import glob +import os +import sys +import time + +from ._common import memoize +from ._common import sdiskusage +from ._common import usage_percent +from ._compat import PY3 +from ._compat import unicode +from ._exceptions import TimeoutExpired + + +__all__ = ['pid_exists', 'wait_pid', 'disk_usage', 'get_terminal_map'] + + +def pid_exists(pid): + """Check whether pid exists in the current process table.""" + if pid == 0: + # According to "man 2 kill" PID 0 has a special meaning: + # it refers to <> so we don't want to go any further. + # If we get here it means this UNIX platform *does* have + # a process with id 0. + return True + try: + os.kill(pid, 0) + except OSError as err: + if err.errno == errno.ESRCH: + # ESRCH == No such process + return False + elif err.errno == errno.EPERM: + # EPERM clearly means there's a process to deny access to + return True + else: + # According to "man 2 kill" possible error values are + # (EINVAL, EPERM, ESRCH) therefore we should never get + # here. If we do let's be explicit in considering this + # an error. + raise err + else: + return True + + +def wait_pid(pid, timeout=None, proc_name=None): + """Wait for process with pid 'pid' to terminate and return its + exit status code as an integer. + + If pid is not a children of os.getpid() (current process) just + waits until the process disappears and return None. + + If pid does not exist at all return None immediately. + + Raise TimeoutExpired on timeout expired. + """ + def check_timeout(delay): + if timeout is not None: + if timer() >= stop_at: + raise TimeoutExpired(timeout, pid=pid, name=proc_name) + time.sleep(delay) + return min(delay * 2, 0.04) + + timer = getattr(time, 'monotonic', time.time) + if timeout is not None: + def waitcall(): + return os.waitpid(pid, os.WNOHANG) + stop_at = timer() + timeout + else: + def waitcall(): + return os.waitpid(pid, 0) + + delay = 0.0001 + while True: + try: + retpid, status = waitcall() + except OSError as err: + if err.errno == errno.EINTR: + delay = check_timeout(delay) + continue + elif err.errno == errno.ECHILD: + # This has two meanings: + # - pid is not a child of os.getpid() in which case + # we keep polling until it's gone + # - pid never existed in the first place + # In both cases we'll eventually return None as we + # can't determine its exit status code. + while True: + if pid_exists(pid): + delay = check_timeout(delay) + else: + return + else: + raise + else: + if retpid == 0: + # WNOHANG was used, pid is still running + delay = check_timeout(delay) + continue + # process exited due to a signal; return the integer of + # that signal + if os.WIFSIGNALED(status): + return -os.WTERMSIG(status) + # process exited using exit(2) system call; return the + # integer exit(2) system call has been called with + elif os.WIFEXITED(status): + return os.WEXITSTATUS(status) + else: + # should never happen + raise ValueError("unknown process exit status %r" % status) + + +def disk_usage(path): + """Return disk usage associated with path. + Note: UNIX usually reserves 5% disk space which is not accessible + by user. In this function "total" and "used" values reflect the + total and used disk space whereas "free" and "percent" represent + the "free" and "used percent" user disk space. + """ + if PY3: + st = os.statvfs(path) + else: + # os.statvfs() does not support unicode on Python 2: + # - https://github.com/giampaolo/psutil/issues/416 + # - http://bugs.python.org/issue18695 + try: + st = os.statvfs(path) + except UnicodeEncodeError: + if isinstance(path, unicode): + try: + path = path.encode(sys.getfilesystemencoding()) + except UnicodeEncodeError: + pass + st = os.statvfs(path) + else: + raise + + # Total space which is only available to root (unless changed + # at system level). + total = (st.f_blocks * st.f_frsize) + # Remaining free space usable by root. + avail_to_root = (st.f_bfree * st.f_frsize) + # Remaining free space usable by user. + avail_to_user = (st.f_bavail * st.f_frsize) + # Total space being used in general. + used = (total - avail_to_root) + # Total space which is available to user (same as 'total' but + # for the user). + total_user = used + avail_to_user + # User usage percent compared to the total amount of space + # the user can use. This number would be higher if compared + # to root's because the user has less space (usually -5%). + usage_percent_user = usage_percent(used, total_user, _round=1) + + # NB: the percentage is -5% than what shown by df due to + # reserved blocks that we are currently not considering: + # https://github.com/giampaolo/psutil/issues/829#issuecomment-223750462 + return sdiskusage( + total=total, used=used, free=avail_to_user, percent=usage_percent_user) + + +@memoize +def get_terminal_map(): + """Get a map of device-id -> path as a dict. + Used by Process.terminal() + """ + ret = {} + ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*') + for name in ls: + assert name not in ret, name + try: + ret[os.stat(name).st_rdev] = name + except OSError as err: + if err.errno != errno.ENOENT: + raise + return ret diff --git a/server/www/packages/packages-windows/x86/psutil/_pssunos.py b/server/www/packages/packages-windows/x86/psutil/_pssunos.py new file mode 100644 index 0000000..5471d5a --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/_pssunos.py @@ -0,0 +1,725 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Sun OS Solaris platform implementation.""" + +import errno +import os +import socket +import subprocess +import sys +from collections import namedtuple +from socket import AF_INET + +from . import _common +from . import _psposix +from . import _psutil_posix as cext_posix +from . import _psutil_sunos as cext +from ._common import AF_INET6 +from ._common import isfile_strict +from ._common import memoize_when_activated +from ._common import sockfam_to_enum +from ._common import socktype_to_enum +from ._common import usage_percent +from ._compat import b +from ._compat import PY3 +from ._exceptions import AccessDenied +from ._exceptions import NoSuchProcess +from ._exceptions import ZombieProcess + + +__extra__all__ = ["CONN_IDLE", "CONN_BOUND", "PROCFS_PATH"] + + +# ===================================================================== +# --- globals +# ===================================================================== + + +PAGE_SIZE = os.sysconf('SC_PAGE_SIZE') +AF_LINK = cext_posix.AF_LINK +IS_64_BIT = sys.maxsize > 2**32 + +CONN_IDLE = "IDLE" +CONN_BOUND = "BOUND" + +PROC_STATUSES = { + cext.SSLEEP: _common.STATUS_SLEEPING, + cext.SRUN: _common.STATUS_RUNNING, + cext.SZOMB: _common.STATUS_ZOMBIE, + cext.SSTOP: _common.STATUS_STOPPED, + cext.SIDL: _common.STATUS_IDLE, + cext.SONPROC: _common.STATUS_RUNNING, # same as run + cext.SWAIT: _common.STATUS_WAITING, +} + +TCP_STATUSES = { + cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED, + cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT, + cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV, + cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1, + cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2, + cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT, + cext.TCPS_CLOSED: _common.CONN_CLOSE, + cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT, + cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK, + cext.TCPS_LISTEN: _common.CONN_LISTEN, + cext.TCPS_CLOSING: _common.CONN_CLOSING, + cext.PSUTIL_CONN_NONE: _common.CONN_NONE, + cext.TCPS_IDLE: CONN_IDLE, # sunos specific + cext.TCPS_BOUND: CONN_BOUND, # sunos specific +} + +proc_info_map = dict( + ppid=0, + rss=1, + vms=2, + create_time=3, + nice=4, + num_threads=5, + status=6, + ttynr=7) + + +# ===================================================================== +# --- named tuples +# ===================================================================== + + +# psutil.cpu_times() +scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait']) +# psutil.cpu_times(percpu=True) +pcputimes = namedtuple('pcputimes', + ['user', 'system', 'children_user', 'children_system']) +# psutil.virtual_memory() +svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free']) +# psutil.Process.memory_info() +pmem = namedtuple('pmem', ['rss', 'vms']) +pfullmem = pmem +# psutil.Process.memory_maps(grouped=True) +pmmap_grouped = namedtuple('pmmap_grouped', + ['path', 'rss', 'anonymous', 'locked']) +# psutil.Process.memory_maps(grouped=False) +pmmap_ext = namedtuple( + 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields)) + + +# ===================================================================== +# --- utils +# ===================================================================== + + +def get_procfs_path(): + """Return updated psutil.PROCFS_PATH constant.""" + return sys.modules['psutil'].PROCFS_PATH + + +# ===================================================================== +# --- memory +# ===================================================================== + + +def virtual_memory(): + """Report virtual memory metrics.""" + # we could have done this with kstat, but IMHO this is good enough + total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE + # note: there's no difference on Solaris + free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE + used = total - free + percent = usage_percent(used, total, _round=1) + return svmem(total, avail, percent, used, free) + + +def swap_memory(): + """Report swap memory metrics.""" + sin, sout = cext.swap_mem() + # XXX + # we are supposed to get total/free by doing so: + # http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/ + # usr/src/cmd/swap/swap.c + # ...nevertheless I can't manage to obtain the same numbers as 'swap' + # cmdline utility, so let's parse its output (sigh!) + p = subprocess.Popen(['/usr/bin/env', 'PATH=/usr/sbin:/sbin:%s' % + os.environ['PATH'], 'swap', '-l'], + stdout=subprocess.PIPE) + stdout, stderr = p.communicate() + if PY3: + stdout = stdout.decode(sys.stdout.encoding) + if p.returncode != 0: + raise RuntimeError("'swap -l' failed (retcode=%s)" % p.returncode) + + lines = stdout.strip().split('\n')[1:] + if not lines: + raise RuntimeError('no swap device(s) configured') + total = free = 0 + for line in lines: + line = line.split() + t, f = line[-2:] + total += int(int(t) * 512) + free += int(int(f) * 512) + used = total - free + percent = usage_percent(used, total, _round=1) + return _common.sswap(total, used, free, percent, + sin * PAGE_SIZE, sout * PAGE_SIZE) + + +# ===================================================================== +# --- CPU +# ===================================================================== + + +def cpu_times(): + """Return system-wide CPU times as a named tuple""" + ret = cext.per_cpu_times() + return scputimes(*[sum(x) for x in zip(*ret)]) + + +def per_cpu_times(): + """Return system per-CPU times as a list of named tuples""" + ret = cext.per_cpu_times() + return [scputimes(*x) for x in ret] + + +def cpu_count_logical(): + """Return the number of logical CPUs in the system.""" + try: + return os.sysconf("SC_NPROCESSORS_ONLN") + except ValueError: + # mimic os.cpu_count() behavior + return None + + +def cpu_count_physical(): + """Return the number of physical CPUs in the system.""" + return cext.cpu_count_phys() + + +def cpu_stats(): + """Return various CPU stats as a named tuple.""" + ctx_switches, interrupts, syscalls, traps = cext.cpu_stats() + soft_interrupts = 0 + return _common.scpustats(ctx_switches, interrupts, soft_interrupts, + syscalls) + + +# ===================================================================== +# --- disks +# ===================================================================== + + +disk_io_counters = cext.disk_io_counters +disk_usage = _psposix.disk_usage + + +def disk_partitions(all=False): + """Return system disk partitions.""" + # TODO - the filtering logic should be better checked so that + # it tries to reflect 'df' as much as possible + retlist = [] + partitions = cext.disk_partitions() + for partition in partitions: + device, mountpoint, fstype, opts = partition + if device == 'none': + device = '' + if not all: + # Differently from, say, Linux, we don't have a list of + # common fs types so the best we can do, AFAIK, is to + # filter by filesystem having a total size > 0. + if not disk_usage(mountpoint).total: + continue + ntuple = _common.sdiskpart(device, mountpoint, fstype, opts) + retlist.append(ntuple) + return retlist + + +# ===================================================================== +# --- network +# ===================================================================== + + +net_io_counters = cext.net_io_counters +net_if_addrs = cext_posix.net_if_addrs + + +def net_connections(kind, _pid=-1): + """Return socket connections. If pid == -1 return system-wide + connections (as opposed to connections opened by one process only). + Only INET sockets are returned (UNIX are not). + """ + cmap = _common.conn_tmap.copy() + if _pid == -1: + cmap.pop('unix', 0) + if kind not in cmap: + raise ValueError("invalid %r kind argument; choose between %s" + % (kind, ', '.join([repr(x) for x in cmap]))) + families, types = _common.conn_tmap[kind] + rawlist = cext.net_connections(_pid) + ret = set() + for item in rawlist: + fd, fam, type_, laddr, raddr, status, pid = item + if fam not in families: + continue + if type_ not in types: + continue + if fam in (AF_INET, AF_INET6): + if laddr: + laddr = _common.addr(*laddr) + if raddr: + raddr = _common.addr(*raddr) + status = TCP_STATUSES[status] + fam = sockfam_to_enum(fam) + type_ = socktype_to_enum(type_) + if _pid == -1: + nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid) + else: + nt = _common.pconn(fd, fam, type_, laddr, raddr, status) + ret.add(nt) + return list(ret) + + +def net_if_stats(): + """Get NIC stats (isup, duplex, speed, mtu).""" + ret = cext.net_if_stats() + for name, items in ret.items(): + isup, duplex, speed, mtu = items + if hasattr(_common, 'NicDuplex'): + duplex = _common.NicDuplex(duplex) + ret[name] = _common.snicstats(isup, duplex, speed, mtu) + return ret + + +# ===================================================================== +# --- other system functions +# ===================================================================== + + +def boot_time(): + """The system boot time expressed in seconds since the epoch.""" + return cext.boot_time() + + +def users(): + """Return currently connected users as a list of namedtuples.""" + retlist = [] + rawlist = cext.users() + localhost = (':0.0', ':0') + for item in rawlist: + user, tty, hostname, tstamp, user_process, pid = item + # note: the underlying C function includes entries about + # system boot, run level and others. We might want + # to use them in the future. + if not user_process: + continue + if hostname in localhost: + hostname = 'localhost' + nt = _common.suser(user, tty, hostname, tstamp, pid) + retlist.append(nt) + return retlist + + +# ===================================================================== +# --- processes +# ===================================================================== + + +def pids(): + """Returns a list of PIDs currently running on the system.""" + return [int(x) for x in os.listdir(b(get_procfs_path())) if x.isdigit()] + + +def pid_exists(pid): + """Check for the existence of a unix pid.""" + return _psposix.pid_exists(pid) + + +def wrap_exceptions(fun): + """Call callable into a try/except clause and translate ENOENT, + EACCES and EPERM in NoSuchProcess or AccessDenied exceptions. + """ + + def wrapper(self, *args, **kwargs): + try: + return fun(self, *args, **kwargs) + except EnvironmentError as err: + if self.pid == 0: + if 0 in pids(): + raise AccessDenied(self.pid, self._name) + else: + raise + # ENOENT (no such file or directory) gets raised on open(). + # ESRCH (no such process) can get raised on read() if + # process is gone in meantime. + if err.errno in (errno.ENOENT, errno.ESRCH): + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) + else: + raise ZombieProcess(self.pid, self._name, self._ppid) + if err.errno in (errno.EPERM, errno.EACCES): + raise AccessDenied(self.pid, self._name) + raise + return wrapper + + +class Process(object): + """Wrapper class around underlying C implementation.""" + + __slots__ = ["pid", "_name", "_ppid", "_procfs_path"] + + def __init__(self, pid): + self.pid = pid + self._name = None + self._ppid = None + self._procfs_path = get_procfs_path() + + def oneshot_enter(self): + self._proc_name_and_args.cache_activate() + self._proc_basic_info.cache_activate() + self._proc_cred.cache_activate() + + def oneshot_exit(self): + self._proc_name_and_args.cache_deactivate() + self._proc_basic_info.cache_deactivate() + self._proc_cred.cache_deactivate() + + @memoize_when_activated + def _proc_name_and_args(self): + return cext.proc_name_and_args(self.pid, self._procfs_path) + + @memoize_when_activated + def _proc_basic_info(self): + ret = cext.proc_basic_info(self.pid, self._procfs_path) + assert len(ret) == len(proc_info_map) + return ret + + @memoize_when_activated + def _proc_cred(self): + return cext.proc_cred(self.pid, self._procfs_path) + + @wrap_exceptions + def name(self): + # note: max len == 15 + return self._proc_name_and_args()[0] + + @wrap_exceptions + def exe(self): + try: + return os.readlink( + "%s/%s/path/a.out" % (self._procfs_path, self.pid)) + except OSError: + pass # continue and guess the exe name from the cmdline + # Will be guessed later from cmdline but we want to explicitly + # invoke cmdline here in order to get an AccessDenied + # exception if the user has not enough privileges. + self.cmdline() + return "" + + @wrap_exceptions + def cmdline(self): + return self._proc_name_and_args()[1].split(' ') + + @wrap_exceptions + def environ(self): + return cext.proc_environ(self.pid, self._procfs_path) + + @wrap_exceptions + def create_time(self): + return self._proc_basic_info()[proc_info_map['create_time']] + + @wrap_exceptions + def num_threads(self): + return self._proc_basic_info()[proc_info_map['num_threads']] + + @wrap_exceptions + def nice_get(self): + # Note #1: for some reason getpriority(3) return ESRCH (no such + # process) for certain low-pid processes, no matter what (even + # as root). + # The process actually exists though, as it has a name, + # creation time, etc. + # The best thing we can do here appears to be raising AD. + # Note: tested on Solaris 11; on Open Solaris 5 everything is + # fine. + # + # Note #2: we also can get niceness from /proc/pid/psinfo + # but it's wrong, see: + # https://github.com/giampaolo/psutil/issues/1082 + try: + return cext_posix.getpriority(self.pid) + except EnvironmentError as err: + # 48 is 'operation not supported' but errno does not expose + # it. It occurs for low system pids. + if err.errno in (errno.ENOENT, errno.ESRCH, 48): + if pid_exists(self.pid): + raise AccessDenied(self.pid, self._name) + raise + + @wrap_exceptions + def nice_set(self, value): + if self.pid in (2, 3): + # Special case PIDs: internally setpriority(3) return ESRCH + # (no such process), no matter what. + # The process actually exists though, as it has a name, + # creation time, etc. + raise AccessDenied(self.pid, self._name) + return cext_posix.setpriority(self.pid, value) + + @wrap_exceptions + def ppid(self): + self._ppid = self._proc_basic_info()[proc_info_map['ppid']] + return self._ppid + + @wrap_exceptions + def uids(self): + real, effective, saved, _, _, _ = self._proc_cred() + return _common.puids(real, effective, saved) + + @wrap_exceptions + def gids(self): + _, _, _, real, effective, saved = self._proc_cred() + return _common.puids(real, effective, saved) + + @wrap_exceptions + def cpu_times(self): + try: + times = cext.proc_cpu_times(self.pid, self._procfs_path) + except OSError as err: + if err.errno == errno.EOVERFLOW and not IS_64_BIT: + # We may get here if we attempt to query a 64bit process + # with a 32bit python. + # Error originates from read() and also tools like "cat" + # fail in the same way (!). + # Since there simply is no way to determine CPU times we + # return 0.0 as a fallback. See: + # https://github.com/giampaolo/psutil/issues/857 + times = (0.0, 0.0, 0.0, 0.0) + else: + raise + return _common.pcputimes(*times) + + @wrap_exceptions + def cpu_num(self): + return cext.proc_cpu_num(self.pid, self._procfs_path) + + @wrap_exceptions + def terminal(self): + procfs_path = self._procfs_path + hit_enoent = False + tty = wrap_exceptions( + self._proc_basic_info()[proc_info_map['ttynr']]) + if tty != cext.PRNODEV: + for x in (0, 1, 2, 255): + try: + return os.readlink( + '%s/%d/path/%d' % (procfs_path, self.pid, x)) + except OSError as err: + if err.errno == errno.ENOENT: + hit_enoent = True + continue + raise + if hit_enoent: + # raise NSP if the process disappeared on us + os.stat('%s/%s' % (procfs_path, self.pid)) + + @wrap_exceptions + def cwd(self): + # /proc/PID/path/cwd may not be resolved by readlink() even if + # it exists (ls shows it). If that's the case and the process + # is still alive return None (we can return None also on BSD). + # Reference: http://goo.gl/55XgO + procfs_path = self._procfs_path + try: + return os.readlink("%s/%s/path/cwd" % (procfs_path, self.pid)) + except OSError as err: + if err.errno == errno.ENOENT: + os.stat("%s/%s" % (procfs_path, self.pid)) # raise NSP or AD + return None + raise + + @wrap_exceptions + def memory_info(self): + ret = self._proc_basic_info() + rss = ret[proc_info_map['rss']] * 1024 + vms = ret[proc_info_map['vms']] * 1024 + return pmem(rss, vms) + + memory_full_info = memory_info + + @wrap_exceptions + def status(self): + code = self._proc_basic_info()[proc_info_map['status']] + # XXX is '?' legit? (we're not supposed to return it anyway) + return PROC_STATUSES.get(code, '?') + + @wrap_exceptions + def threads(self): + procfs_path = self._procfs_path + ret = [] + tids = os.listdir('%s/%d/lwp' % (procfs_path, self.pid)) + hit_enoent = False + for tid in tids: + tid = int(tid) + try: + utime, stime = cext.query_process_thread( + self.pid, tid, procfs_path) + except EnvironmentError as err: + if err.errno == errno.EOVERFLOW and not IS_64_BIT: + # We may get here if we attempt to query a 64bit process + # with a 32bit python. + # Error originates from read() and also tools like "cat" + # fail in the same way (!). + # Since there simply is no way to determine CPU times we + # return 0.0 as a fallback. See: + # https://github.com/giampaolo/psutil/issues/857 + continue + # ENOENT == thread gone in meantime + if err.errno == errno.ENOENT: + hit_enoent = True + continue + raise + else: + nt = _common.pthread(tid, utime, stime) + ret.append(nt) + if hit_enoent: + # raise NSP if the process disappeared on us + os.stat('%s/%s' % (procfs_path, self.pid)) + return ret + + @wrap_exceptions + def open_files(self): + retlist = [] + hit_enoent = False + procfs_path = self._procfs_path + pathdir = '%s/%d/path' % (procfs_path, self.pid) + for fd in os.listdir('%s/%d/fd' % (procfs_path, self.pid)): + path = os.path.join(pathdir, fd) + if os.path.islink(path): + try: + file = os.readlink(path) + except OSError as err: + # ENOENT == file which is gone in the meantime + if err.errno == errno.ENOENT: + hit_enoent = True + continue + raise + else: + if isfile_strict(file): + retlist.append(_common.popenfile(file, int(fd))) + if hit_enoent: + # raise NSP if the process disappeared on us + os.stat('%s/%s' % (procfs_path, self.pid)) + return retlist + + def _get_unix_sockets(self, pid): + """Get UNIX sockets used by process by parsing 'pfiles' output.""" + # TODO: rewrite this in C (...but the damn netstat source code + # does not include this part! Argh!!) + cmd = "pfiles %s" % pid + p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + if PY3: + stdout, stderr = [x.decode(sys.stdout.encoding) + for x in (stdout, stderr)] + if p.returncode != 0: + if 'permission denied' in stderr.lower(): + raise AccessDenied(self.pid, self._name) + if 'no such process' in stderr.lower(): + raise NoSuchProcess(self.pid, self._name) + raise RuntimeError("%r command error\n%s" % (cmd, stderr)) + + lines = stdout.split('\n')[2:] + for i, line in enumerate(lines): + line = line.lstrip() + if line.startswith('sockname: AF_UNIX'): + path = line.split(' ', 2)[2] + type = lines[i - 2].strip() + if type == 'SOCK_STREAM': + type = socket.SOCK_STREAM + elif type == 'SOCK_DGRAM': + type = socket.SOCK_DGRAM + else: + type = -1 + yield (-1, socket.AF_UNIX, type, path, "", _common.CONN_NONE) + + @wrap_exceptions + def connections(self, kind='inet'): + ret = net_connections(kind, _pid=self.pid) + # The underlying C implementation retrieves all OS connections + # and filters them by PID. At this point we can't tell whether + # an empty list means there were no connections for process or + # process is no longer active so we force NSP in case the PID + # is no longer there. + if not ret: + # will raise NSP if process is gone + os.stat('%s/%s' % (self._procfs_path, self.pid)) + + # UNIX sockets + if kind in ('all', 'unix'): + ret.extend([_common.pconn(*conn) for conn in + self._get_unix_sockets(self.pid)]) + return ret + + nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked') + nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked') + + @wrap_exceptions + def memory_maps(self): + def toaddr(start, end): + return '%s-%s' % (hex(start)[2:].strip('L'), + hex(end)[2:].strip('L')) + + procfs_path = self._procfs_path + retlist = [] + try: + rawlist = cext.proc_memory_maps(self.pid, procfs_path) + except OSError as err: + if err.errno == errno.EOVERFLOW and not IS_64_BIT: + # We may get here if we attempt to query a 64bit process + # with a 32bit python. + # Error originates from read() and also tools like "cat" + # fail in the same way (!). + # Since there simply is no way to determine CPU times we + # return 0.0 as a fallback. See: + # https://github.com/giampaolo/psutil/issues/857 + return [] + else: + raise + hit_enoent = False + for item in rawlist: + addr, addrsize, perm, name, rss, anon, locked = item + addr = toaddr(addr, addrsize) + if not name.startswith('['): + try: + name = os.readlink( + '%s/%s/path/%s' % (procfs_path, self.pid, name)) + except OSError as err: + if err.errno == errno.ENOENT: + # sometimes the link may not be resolved by + # readlink() even if it exists (ls shows it). + # If that's the case we just return the + # unresolved link path. + # This seems an incosistency with /proc similar + # to: http://goo.gl/55XgO + name = '%s/%s/path/%s' % (procfs_path, self.pid, name) + hit_enoent = True + else: + raise + retlist.append((addr, perm, name, rss, anon, locked)) + if hit_enoent: + # raise NSP if the process disappeared on us + os.stat('%s/%s' % (procfs_path, self.pid)) + return retlist + + @wrap_exceptions + def num_fds(self): + return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid))) + + @wrap_exceptions + def num_ctx_switches(self): + return _common.pctxsw( + *cext.proc_num_ctx_switches(self.pid, self._procfs_path)) + + @wrap_exceptions + def wait(self, timeout=None): + return _psposix.wait_pid(self.pid, timeout, self._name) diff --git a/server/www/packages/packages-windows/x86/psutil/_psutil_windows.pyd b/server/www/packages/packages-windows/x86/psutil/_psutil_windows.pyd new file mode 100644 index 0000000000000000000000000000000000000000..be0bfa9da52902b1a672b0e6b0c5743b446b61f8 GIT binary patch literal 48640 zcmeFa4_H*!oi~1gK_@z9CUHne#bnZsDNWE0%%H%4LIi~bVVHp#jekaj!3!dg;f{Y& z(@F+h$(7c&Hk)m2t1;Q6eY0=VY&M~dwHSzhTDLWcF{GN*cHQn6NSC%&$<{dU=X=h* zGnWC=KKnes=l4F(yQAmcbI;%J`TjfK^F8NYRy?#)y+S(<{hJYm)YO^%ASxT!tmd)*r zfd#W>&9Ev>fA!gh&%J$Y)+1{8#pRlMaBOwdFWT6j^XgoTGs6M!yA8V4H+YX$vj{AB|Ok8!`5?2*UaW!Nz*2UJ%|(LlW8)ZbR6C zp!pS}>jk`+&|yKw3kY{2d<{YKD;9)-1;IdTdjp!8g^UXbHzQDa&97Jx?$J3H59wS7hrcG6WPMAD z2!fhwqG{*pIgO=`$;n<$5Xpc2__QcsK62ci1PEwxi4URKGB3eqG#oz=V`P9#jfZ(a;4QA z9@6)fT1~E+=vkJS_8dcoLA0>qydVns*C|b+y_Zl#wDSBRUC)3t<9O;$rGL?(rmy1} zDqt!^H6D5HVKh)D&+o_0>pPC_pDYjrdz8^fCTOB>%VR((&}{9iw3=$XEdu?E{Bw&S zgvZ-M8Q$ny%TXX4O$q*DpLhF5FE0|XnRnjTd10(T~_JDS$ddeQ@!&og;hrjyGd zFK?is|m1)(A(*P>DMvTD%l0=Fea(<%X!JO*Seil(^;z;RzFLwpYHM4mxe z@o}p4t-nw$JrjRI|R?Ah01^hVQaQ5C&dc_Z?8IGQTmj`WpC_r)ssSq&wb39%x} zex_H{@c^^>@TdV~(YYWSx*Bm5@Br1E?$nVkJY-Ntc8e$zj2jt@ES<(C3OW-YjSVj= zKD=ABJp+-3LCu)hvj-qQO=KTtq6&aZF%FMB9!-0W4?P@>hVD=XqN(j)&FXhWlcA_W zb5#V0W5ha&6zG*g7@4^SCCbsX7ekuMvBlv82P;TcO7l!$#tb|>&w)^~PC-0?uS_N( zmlHVv7|A3F@wy|85D`V`Fy7A`1d7dsVpC0Dt<~zrqs;}7iWQLTT9Crysi9lNFB7ii zj~EwWteFY8%Y<|j&Ciu$LW0!xoZLR`d}{jF1=D+}CR|Bq&YwiCsprjjj-GM=%K4Mw zQPaCWfB)!vYpi+{0^_QM&{*dCR53H&;urmrKE{}F<;oZ_-(F;-+ljw-%hG=Mb0(~LN5^4i#~tdYBfQt)8KU&kjh#H z44yi`G@}16CbIMl{cokQV-`y1s=p8vB7B4EOj=&%2iN1p_%H;W^S>(Ai8TK_=h3MPi z@kc_dp#Ek6cZymB0-)O8QYXc=K&=U=MFY|==E;R7)K+`p{$s~MsfXGh*DF`KcrzL2RKLi zyeraH$|fQV+$ioN;;+5Jh`(x(=B8h=$?9=FS*;ori#7f7*-AU#mHrL=U0)jS?+NPf z!#_{zujN5j2K{{lCB$+~r+Ul~&>vl+2CPJwh*TzaI=yR%GMN^EUd0q2`5u;JL@LaL zA^kcfU(`YCf$#>GH}V~l$KJm0Wb0HPr}7*J?+| z>aY>g_`;($MvU6T!9kzf6n(xAoTk<1w!xMx1^x3lgz6Nm9T?U3$vdnL1wQmP*8i7q04e7bmj~wy(>LN2(A0b1$?U8?!r`xZP zW!+b;lTXV@-y^qyG-KWcG!-7VY)x7gKB{{l^gHlo26$6Zq88&1j@eJvxWrZsVhr-| zrP^pT${991TCC__T`}5!Ans$d%ZEV)gq`4Uapv>Bu`P{xz(z1p3fT&{TWB)ep}|51O;4E!{*H8f1UW0svLpu5##Ycaq zQ@N2?zqi#|-1A{*Cg;dYUq+o4!CTj&XY-v_4_yi(&weg{jkUwLYd*CdGV?Xbh`#S> zD~)5XTy^ceRHD;+sYH`=%dTnA>Kz^a*|}-MFqMF`mFJd?Y6!H7%ao~CGF|7EU5+nr=?Wnw zM#ygKP7H}?`8<)~Z4%xr$a)L7sn7?c#+O@5qei$VAgeX$KIWcS8a`%o5~3da;CeJF z1IN%6q#(e*<-j_Gc0H&{Ezt}w|NGiOv}|I{ET>hQkFDBZZ4^dlqG=B;%RGEpRtkc_ z3jhEVje}Oe8`5;7P@}Z8;Z17i@0T!!DeMJWuP!f)Puow@{IT%&nrVAjhQSu`@Tr|G zVkgLRhJE0;ju9U$szcBt7!ZhMnKI3XM1*FG|3EEQ@|Nub;h~H{56nYdgdC|@>;5Gg zx>Ykbz_&m}m>%~)Obi*IMZ}1*kb-oKR8hlNjM8+e6o;Eb0tS3?5hZ%%D9~-ohms6^ z9_lRJjSiD(-3+s=D2#b0$xEuZ164+*MblbwC&OqlY&~=gqcQU2D%N2?%*S>9wf$gv zfx)GL6w!BhwgQAE4-HMo()bVhZ$yulBzNQ)Fb!EW1JVatn{m}(-2OX!!}_p^0ynC! z>L1BtAMo0*_`L8xh#*ueLI;9`unl2HFXw(_mnJAD=ljO)`A|;s3R)JIy2OD5NTf7k zNawN8ZQ?hH<|{`TH(|moy!10*5bKA0#&E32x2VYLBM_1u)?!G;rDsuunWtVbhhZhz z6@a1`HdDx~qE;>Y1=2#b)>j3CrPfMJAeHf{ zMOk20T3u|aa>dt)@1?Q@CYnrKU^d3*8Z;=f`A3X53D9;Zi_98v2pH#v57}JgAAK0? zT{zJN^gyBu2w;;`z^zlvaTfWpmvCk5xdL`kO)#dBm_kyKJ#q^=;GQ6jLuo421Qj-2 z?S*S8(n-{*e;tH_w46qFP&CfZCla_QME)Z93dCX^S^0xt8P&>9fr|z>()qA1{uVY| zASqsQh)_qE0&fNgs>gy#9@n^Zx<<{!x_hh{xPhp8mw1XgK2~ZqmT2unEHtR(pa&k< zoglXT9QS9}T4#z;;<{4n-5%F&D3vC#-W>(P?lsoT$a_F&c*wv@#7vy%SqO4AAU2V` z_PD9wme8jP-jZ&8+?7;NlN7oc%Z>2S(3 z&=+@!`ynXeFo0D}Uj4e&3XsMlDC81PBa?b)Nrj>x05U*kt6MyysgoY7!wC@{wScoS znd8yo!ixGl3fI5}G21_+WwjP1(`Y4XA=}yFRn}hf^AxH${}j53d3&h@Ie1O37Cl%> z$K)hb0yV9WMCCJ>&*WevtSECABOffEvp}!nwvN6<8sX>tX*KQ|zBcn93^SQ{QAd z6O0NU_fyXzw(11J5fr5r!GdPUbc1tr1F)IVXTHgeB$x|2z-We4JOzwk8B-o1jfMS|Q+Yf^Y8c7essGMvr7qhw03!8y0 z8YNRx2iwbZd^*&$;608l0kXBpp9(FXTGt}L24@7UwPpej*B=Sh$)vJ~$%uL>fOD~0 z)D>l=0Mf1UOfsc-A>JzLN<$D0J(x~$%+jvMs=*_){F^F+3E(Hem-M&8$BO=c0bkcB zIa07#k#ot@Yaj5nBm-fE2o8`p(sDc252c8WwlU7IH^d2%zT*qGuhW=OlhhFodHw8+$_7>(*`yavta!BB*JO z$U`4{nCC`EZ2bEfC{Ete38}-Bisc)u4rBLEKrUCrkQy3Mia4UBC^jtC;sLg*DAFCNnb@%$2ia*w0m{0fXCc|JnJt2`J8%>1 z2HZ-CQ2}fkQLP%d%lZyCrh@5 zs&ejLbXfh@ejM23oEdyFW*eEC&~dQuCMVrSkBX#*?M{(b~$sZtH1cT1eQM8s7ABy?pxU z2dwSWU0^)4iBe?Ey2O{XZDUIw^iv>3V^GY-?jzICRkcC(jn}wc-j<}6{e_)lz~2)q)f@jI!U#;@a! z>L%ia{A}Z{--D9<2OzWINcyWVK83CkdIEOH5t-4qoi-cF`@ThB(n95?{N)y7&(o;& zBiheqRa1V=^X1y~h|?4@g)iyC1G?}>y3P;7O9ZJE4Y_1Pid>tb>xE@6SElHD_gQzM zI+6^1-Hc&+_gK4;;_BPOa%xb5*aIXkC+?Yo!j$b2PsB)txpxAEIV-)7LB&Cp&FO&f zmNqFxLYR;xuggG!h%j%h_h#dgAF04%PDXshFA= z&n=$5Ay_Kol>sn$xkawd0GZ9M8aFZD?O?v)Sk)Nw-9crcaG(UV%$JPhNizwNjVKJJ zDo4wf`@_fcFbIqD7UN?DWE^n=Zz40IaF&)K!Hi(^vsh5VsmYa22gy7Xj!&a-)=(Ps z>nACkX>1Q}0y91U+k_NO9_|DVS2!a;8~}&H(JTDM^^MNhGelW`Ro_$+ZOAbAWPNin zhF@?DXObV0P%VcOeJ487CEVKys1Vwa{7DMIKnmg0Ffb=61fl^D z4jVGA7!s7iKTkn}@5X6x`;CM_}OSzbO0Y3hJ9(X;?YB(F=WynLKyhz_^ndQrvv zhLgm2i~v)M`BgN2Lkf~q3ID9lGBFAcQk}^v{dpwckb+#I1~bCPGni1BjF0_$f_9%w zMzn_IpA|NN#!g6dbtaxZNwf$dC=QK51@=)EnaC)d!noi@h-S%vayKwSCN3?oNV$cN zUjyw8)+V&JnY1<`NT&dZ`H+4HW>Icc{_He744%%t`JZbwm2obW{V<1cbllv0r z8u#ZIz+ALQVi42Lo8r%LN5o9<*cgV24O?>M_^`I`hL!mVJ{+8@r~P7n>6 zAwF~#11vr<9UzDf25=ti8z7At;W%U?k26TcVq{K0PO6_;$7pmo9J5@3kPtL&9m~NQ zREMf&SIgb7;#Fq|?M#cMtRsG`P0vfmfnxBK6FloD!2=IxXzoFFa!)U|a)=ZLvk!21 z05VjPp4^}ClhfO!0xn_ct?$Wix1> z*+J%=gB~5R$1Hl(?m}bR`Dunpj&KKSG(H3lzk$L3!W8i1q!>T{G%qd#1iyjcH;@T$ zAV%E#NrNz`=X8iN;!7H6^sJ>gG{EO1`RGUaPn3^U);JyG@mI zS__UsUw)oWJe-K2X|yi{6ybnFA2y~KN@^k}?qW_=sMQOEUb%+0>)({_=sG$E%^BW) zEj4s+I6pNsuQ+_P*!f!ULkPQ~PY-r8^`pbtJ|1KEr{NQ%yJML=W*Ai9%V6{yNB~dT zGq+DgnlHBAXS9&a@|0{h@e(dZPYyA)|%`X-f-Crl%32zKJ-_ghiG( zq@kjjO7R^seqPpo1se|NRvyPf93H*UG=6&3Qy0lN6OF=nV9T}>WTr$$Od~VY4UP%{ z#uLuiV1dJZT8i2=c2}e9k0|{Z4&zsZK9b9H-ri;U$g#!T1X@IGeK~_xMo*;Eb=Mgf z);02%Pvhp6pRgj$CGWD}hQ#wcaTqrwp1>_-*8r}_I)&TpFAw30S|$`vc|y-C9Yz{T zoAESzmjPEi=x{^1eqI-EOW+7^jd(4F9CEXk((d?qvQz3Z<=6VQO*pV~90ztl2y%nQ z+@DWUSs>d3Gd*o5qd94ljznuB(fR<#vlYQ+JqB#74M1SmcapfB z>KYZv_-5?W%TC{7V}p_g>pWijhrT*d{}T8an||!X6h=n-2H_#{j|wd*JoN3c?gkDq zVI2U21Fvuecb9`hT4-}D-F;*wa=mE83YRv{yIO?lg0FAg_o8Y0Q68vX67y~wdwz|w zU4NmYTgI=<g( z2)474k?Cgo9L>=oa#u~{G*7pZnOWkX4h+UJ%TzzR1e{Kgr6b2h;SM~Rq|b<#SmJL z*Jr;Xnm}XFj9zN;y5M#z?S08wiigN6PNa~xMcG-#J_WfL4bJN!Y!$$V-!(+73}Ooh zC5YwrSyv(fYSvOC3jYGJg3iMM@0f8Zm4loXaIeJZLY_#R24$+c(pp#}UI+PThwue7 z=942a7d9K(Vjz&~x!n+eY+=8+D3dSfGodOHUQS2*P zh2!1ULZs1;1Lcv&aB>=5cr~!Vx0)e@){tXuKT3x`PBM0Xokeg8_;D7QX6)`_k>rr$ zM>^m^9^|`;-ESuDCU(E6*-a#;9?7eiic8-;2kBJGNM&?NARcIkW&=Uq@#56T^-e`iWylQu~QzUFZzW0VY6zBhveo;}I?BE53tnArZg+F?R%c zL4J)l^FNTBxDI9bIspRrtxST>-RTsJJ-3qdk~UOW785&|LuKX7rGiO9XhK+t@C5`v z!ea=k?9iQ`&*&PpkrdT24UG+!3Su|IE^5ZJT5x$ zK%1iuzV3i56ao)3`!1t+Nw-8E*SPORC3(ZnF?<2#(0#p#U-AQM5~Y z2+77>8;KQ3Xj#RUn{gNjLvlpCO3^2fPd((@LS@F6UDJ&(mrrvel4^XpSVtz4i`WHD z$(9^UOv-Mz=oZ&ZpsbZB>qS!|)8yPUz#Y0Q$B`9MP0Fm+qDR+ow$ye5!276I?kTrN z?Qf>+V-8Q>?BiV@#E!wwrBm=OcjR;~bKcMdl#Wprf1)u0pEWA{Iz3>vkPy`tg7uaN#=n`D2*2{DE7;0S{hhGHTHj=SW+-A3#R z7J|;pA_}`BmmCb?>8{AHDTYN+P2_FFFh|iIkbI0hEp&uKos^hWG?iaOwR1FHhN*NZ z>lz?1>C$){lqrmL3i;(M_MieCe=6cn3j2K?PufFo8iB$vn16uQla0Xighe;jlUQ@J z_1Kx)F=WLj!52MX6tUh-)J=^j#z={VxiJB!kfcx6>-8xJ^waTM?3x;jO&yswb@JbwI$a}PSYOm);-HBNbBT6tBJUbb z1a%Ej{vgoUh`WbQ2Bo{i=P*{SBpSPYq<3ldj67gQ4BC=bT-{_lq6+Qe7m_^%bH616 zHiE@%Kj*0fACVCV0j0U+3<$FqXKu~LzdMPjs7HsWyWWLm@;`l^&p%|wBW(@N!f<&> zJP$(?3axW!mQ|0>yoySsB55*o2j<^+JhLU>(Aey{rA*Q!O9;V;4 zCVRqJVUp|=Yv3jK6{j}8AU4*W*C5(G4ks`BMh_SP*0?Fa2jbQ^e${1sx(Ni4JSt)T7U{; zn92=wV)L__$s%#7vQAT%UVJsB!VDFmRs!S z8JtqmYM&Q&YU`=r^cBJw7XF0swd?%ypgOo1#wSix ze4(ysGI5Y#8ujcL+KVGg5u&b9tASa1IEbVQ5DbrV;4gMS!2&xjgXRmeW4;ShFLLi| z%;SS;5oZFB0M!bscVwEf-;5`Dn@KLul#imS8KklTTS8w)GTX0elJPb=UT%65FE<^7 zuSDhbVKPtPS2+ryA&|q{9D=d?5+V8o)-Vo1E^*keU~k<4>XcwI0h^aQ$3nA=FIQ$~ z)rf~dh>@Abmus^@l_5N^1A;Ie$S^~L7P;85EIeciA5U?P1&;#hj+$f3qgW>mLDa-k zM8|h%0*2l&b~^!u){m&XnycJgF6==@J>qFz3A!SKbj36R9jc*jqSf@DV9iNZQ=&}3 z6k(RvVfY}kb44)@aI2#?vMIB_g z?z1|O&z43yGyvPj*nKOqVNY8HDZQ`T>Ofam>rf`c9++rkf*#hQ~>9S7`)4+^jn6Zr7 z3FXRUNOPcy#8keY-YXzQ-;>T4YMGc6n12q)#7Ox*AW0K>SYw%3Y(S)`0He8PDL?g| z-c+`$>Z8-RS43BPuFmgJ5iU5IQt7)67AhGreGV*v zcUtd(@G$@9s3A1%XPcf{2net*u= zs~bm;esHV1B^lmLfTb`rRGb;^)=rvIJTyheCklU}KA1v_TS(;o4X2eg>h_nNm36^7I} zh*ef(?4{al;n6~R^UNLg>C6v2MDMxx2{0S=4sx4jxyh)PCY&XeT*nA}qtOMfqqY2_?CPkq+{75tDp-C;^ zT{IWo*+Ki5oxgxl7UC7Qlq>X4X-7GF9eby;9#uRow<}fXyiXx^i_CpcC_mdP+ma-! zW%hsKW$lX4ANw3`&D(G!VURBM-OM}Cjt{TFv96_3jUmI7CL=mKoJQxqr_td^sxD0f zBOo^AU{JBJ=cII7-||ew|0SD}NcRocDngfW4lbSZ5>8H|J>n?f4+a-d$X3IB4w6{_ zgmiO_+($+jIj!?zTn-)w_Hj4t! zg#6lktL-=5!g5fVZwQWbQ_|Kh}e4reerrGJD`AFZ}q!7YJHr=XI1GfWr+A&vW=*GMZ&8C z^lHL!AQ0L);m8E@33Cv<;-8@}#K`NUhSmcJ#l{IGuVd#PsByOCk#USx91t!fKyUU= z$KcZ@H4Kyv%0(i|h<8H$iVoDGQ@z4YJQS)A#duh$JRn05CH~M-Ebdlf7I8n1g@?*T z_D;h~N=}k^N{QVhUQ}Xw@tP7#7SoTjE?ZJX8;_v{e8Z_!N!E#Ll-M*;QexA^JxXkb z__Pw6DIVrAsyq|a3*CdFZ~0>D4e4{xVkVc*8`*p}u4am1GpC2uh{Y}hfGqNkC-KUd|G zhd~=NCk2U}O(8`~YV@t5&*1xT()1!wejro~^}?(j6pxcGbXu8Npe}(^(xGTXvu&!* zRQLeWfOJPppPdE-&}VCyEb2!Fh~P!^fxfM&hZuA~4D6OhtB~(1MLj)~x-R8t=nvwH zKyu{ji_AeVvKtIyfx5PWLwa-8#and@N2bYG} zu0c&jS=$E&S!G2=)cW)Z2Gn26>(fT&-_`)>NjjmB+P6H(52F^lue1%Q`T9|8FW47K z7tcdteUZOoz38tF&l$Knysq4jual&Pri(f-=!hSnDznf!;Kt9-$$;AQts)T3iYP7C zyRd(lg82;fGO6BYeUSs)PqWTH2(JL`$a>{rUO4=h)YYLo#99>eMNYC}toQJRWa?hS zJ-c{ zR#DMs@j z`Kdz_@9-Nx;2jdH;Dt|ZkN!dUwEU{dCpKmr=!$f4@YNhn)2>koOyl82K-LT*Ys8D9 zWLCl@5}FBgV39_}2E#@DBwcvRJP+&yvw{>>$Sj@#BZ*_UMBm2x`w$iK!o@+K=C}Zz zBj=vS6)!B%lKmj&Z79}soFZV)U&S-q`Zad1L?4KzV;Q^x(SwK44#sR3uTlFJQkkrM zC4G*Vg(O9t$2Iw-2hbu@-}|g<&{2P0b~$pBiR_yPCOW2 zk?=A-#xqh6C~4P%MnK*Z0&k=;e3+plIkcEDfn;F}5V`{}nZdj!x61>sn=LY%y)s}J znGInipQW`%{(0ocRVD`DkyS{x_IZ=pORuNFmCSpM$M<<6>qZ;miJiq5>@hgvr(0K zKA2F&l`CT5Q4{O~;2PUPSVm)@X8V;Ic31>1l%Ur04US*RBL;bFteqZNuz>b53%oI8fdub z7_OlPFODQ(tmepiyp~3;NWMC`Tl=!F^B2m;sj&VgM+xAQ#}tPT21rIPlF-l8?I*ML zg$eD?1_cdod_W&J_=FY#B|E%Gj$_h+7&7U=*oeYKXCPWh(l)#~$aJK5jnqu~&!M~U zHB>4K4JlgjEIM#GbPC0zXORY`G$b85jYoW|BR4|A$h8lgUux{oc~)91K8>0eD4uo^ z1^vfUS+8J*36l-@31F0>PpIlqB2x4qwa|hWP0q@*MiG_MBDfwL#zh>%rN-TogfS5q z@+|71=Fm!X7JX;rb;hS%qnS{m~js(X+k8ONZL*za&ilN+-c*;#IWFFHFkdi1*jGL(}6aZc^2*cH}cQC(b*pP zG&*oahGw%R+Ji1=1e^V&yQ#Ob6z5=wqt%u7bnIu$P8y-@Jj;Z$K2j>nN%Rw=T!#IM z^BDWS&KNo;NhM{ZBq`d_jHL95N!DVLGkPrss?3>UeC&N#H1-ci?gV8N<-~eKgK!74 z_Ng&o**paSak^|aDLadF8wyW)!-Sj{^lCBjXG|Qi9=Hg?3Lz`UuDG{}%T+YZgq{vj zCT5Rm(RpMaGvUBY5KUvk!uX%dPfH{DHOG~Yi5AIeR1swA06Gp*oJoxVZOF6OW0cCP zrD#ycQz;#k6zF>#F#vHs!ec*2;>2ZXjWq)@WQ1-t<@a{7P z9+wn)B2mJ0GQuRq;=)(4CP>P0=U_}ycAyA zI)=mz)=frn_*h2oT*4M91}1=Ig7LBML#9TIAgorR{(C*~))TnFp9g-F*n$Z_O%gle zRD~_g#4Rub*d%Y}EROHZfB{6hK-uD;r7J=&31Uj`GgxbA9Z6Zyv<%e645*tHybD%j z2GU?ZrsIZP#bIXe=<%%=JkwDMmc5Txf8@-ejNsWrz{mHsa;kJGVR_&(~A0U(`K5>oT|o-LvoIT3lFX?#5Y23@4L@L(oWe zu^8qE3@{6(CJU&6v?X|1-Qc`4_)`o~G3?N996r@bj%Dj7f)z{GA)UKul##^<6duh2 zg&f2iE_`{4NJL5!-#z9e!V$GB_F5PiOOJ06N!C2rG6otK*@{?d*f6dPcMAyx9rJX! zV1=WZVhoI$t7H^^N1jUxKKd3$brW+>Q@dceQd-^6r;v%b#Ba1LVXtvY)5BP0B#)$q z8?Dxl(LHkty3nl^fWqkv|46PCJ@Ie6W75eav(Eu|Vd8=4 zpe6xQGzPDDXc;zk((?E~Zy`nn>S?YZ6vG)(i`ScY3%u^_*hoTQ!UXswUXW_GB#AE2 zuO{-pnA3pV`D$@$Mq7MxE1WdU%bMql~OGJTOnH*#-y z{PxiHzVcKQrWLqMi(frHxq%zLyrGwi4YV>vS+iGuiM7|a&q}A`@R~i2C?FkZ&4Z-` zCYcvabCAt6YnZOLxPZZ0#IUGf?7tJWUNZ?;W^@fH@q-tsq+Z-cWYD-5V+9z4(5Tt> z-ofITL`>Rg(rogebvgz~9En#l>3h~$D8=3kwGpG>1D9B!D7u1B&OYy@av#<^kz^^o zTr!qR)B2wSB!m6?*>xYg?xt%@_9%j*d;Eonu^`Uuy}(NKv+Dr6o@Unz?0T79bxx(QZ3HG(CqlUrhME8+i-Fbh-dOZ60`W;I+IC{2e0ecJX};(^DkV>IK{=p z?E}y&R65c34PAl{{nIWMGtjf#XeN^yw7Nkdv;`P!pjRn*<=#PQ;)@?(%NW4o8=w~B zt`E_wd>u}PjMz8zuwKoOZ^w!{cHSuVF=cr!*$(W~ksBLnW8ft^>q8cfj005kCfCI8 zQ}OlpTXYhI9i9FzO4fNrGf=N==>rP#a~J@EjSf!tpoEVm40J@520~jVol!~LQ=4=q z4CmHySUMxC7~{{vVQGu;v3tlL2n2vLt>A6qL`Am)Kk2|A-*d%Y=NhvnEp>r95KvoScC=h62flZd!OEaO= zwP74##mhPX+0vq`!?J|Sr1Q@qb9{}$-y@=;ElKpQHc2Et4McS3kU2k=-jH)GNCx+q z-F0<7^kj{mjZg3frL{{oOflrsfDrdX(*vQ{eAV#-hAy9njz;5aY9O3yn_ObA?szPW zr6%x40zZKd*vHP^Abx_LlidbZN9)m{6K`?elU5Q+Wmg8uiBp^heO)Qd`A@o+>7^1~ zKdeoGg}6|heoXuU#UO%lL!rbdKWW+0DH6soB)Z`@Rc95 z7=#}WGGlJh`9N&OSHsR?%qTxDV} z29n*8mO<>Nm@`G!@TY9(o9Ski0}x(TvWQ%@Fh?+a<{xZ!-X$wL|%po?hK&hDaY8H$d;(NYH zOY0~8Hh(Glf(XXB9N)8G7}twS1S4}*s_GNsetU~aqbk8yw{x_qO9l*^8;o);<8kKV z1O#TF7F?!&y%xoQ5?)e!AU5^^41kCCTP%I_eoJ{GgjhUS21y8YzC;+o>VVNgFSK(} z1ady5U?2fTYQ$3qtAhdhPV5bMGhC%?N7V$FJ(?F8dp=ydI&_<+6)YBL2n?F&I}^u2 ziKn9TGB9ID=cTi&nO!aHYGqdkWF|V##;#fHdIEh$=ZWmPja>)Wb%P8nlv@lx4(Jy5BzWT;QrP~3F4H4`H@u58zXPL`^E=v49`3lx{Um>tBo+5=~JC!}jLzVo8WfO2@3}SjPsX z;}gs%;h-Ddxb((3*6~Sn{3q5izSuh8H@iZ=txPYBf?E}Tw5xz%=d>WhOV*O*-@5NtE6!5k1tzC%I!KhWtf{)@9 zOHB=Zk=mDEW(eivr;dY_zxbKPgXuJx&Y~xIr7j$}hBL9-(9ciTql-&q)Y=Cw4Zx%b zKWe}?0O?aujq)t2B(Ku(qP=AX7b`w;3oPv0*`Ec&_chQ6HUhc2`Hzw5$IHl3Ask$T zc3YC^Z>inXa#!Sbyo5QDFJ1$8h?j9Wo=W*tFmeS0S+$Dh`$u3Kiepq{6qm?5l+l|@ z1?}fn<*zk_^zp(elVT1f9w@B%);)Nt}OgV16U`@as{oCR)huJ(`e zS}ZqC*ntW)+9(Z zjZ(0ceucV92`#1emT(7Z%H6ypiT97f^VI%F=E!BEvR=Nacf|#~)OP~T+*NS51tQ_V zMS*zwc-b|$1)`Cg0FPXDF)|l@4<1jY!6G?2D3g2;hlf%l1;_)l&?&7tL*x|dty&Aj ziPum#@*`xf8f-C(7YK=Tupp5YF4A!c1LL+aBF{);i$1cCsu3V6je!hKW@3U1ePtJ1 zePtJ2)NL+FIkxK}51|e%yvoi;Ll>iG8EvDX3y2YWOdQeq$b+;efu8PU0J8C*wTK&| zL12zt*)C*LKE;lTtnyaT+ zU-All{|fx&fTsK;2`8t}RY_sW(XKrDW3Hk9w7>ZyI?&#fU!HE<_nQ3Lgm0Ad`rzvF z!ju!nS4zwtukn?Kr`g|x1+1qJug{Sm#Ycz@&X0mO$t(3K?>heyoN<46Ak%p^7@;kh z)f1ZI@U&bPg?&&M+q(r*<3UheaKX-l{dRCY$e3L=ZhsvZA8R~BS-pV>&`B{7!4$a{ zNP}4bywu8Ndc3s+*BZ}pu^if{E|80@Mjx@Ic|jFoH9R@v-p1)NmoeUkHFUXoOr0;h}V5F_NTF6T@>jFwVr!ghv`s5uhw z_LYsp8eM~t$f0ZY5LDt$8r2wHOA|L6CA2?mshNPa`-GmLWWZ(1x%J;Q+#q5l$og7GVrQ_?94~BV;1nkKjgVLD+$?2jOY7 z`+eMBMi@dkk1&RijMECYA><(3k5GlM9-#wa7s8VWPb0jH@EXDygo_B*5X=~t6~TtE z2kmzttV5_o$V13PxD7!-_y8xw&Lf;c=tuZI!d`?O2rUQ?A^i4fLD=vNKH-J?a|qWE zZpCvZLMg&`0Pg{$J&y1QLJI=@Ot_mcw`3sPgWy0|j8Kl?LRf>)gwTPo17RP+QwT32 z96>mRP>VJS5NrsU2%kdGA$=6#R|xdGNDf>4(gCZ0aPjX2;a=Q}=^4K_6`HmS!p_%} zvTeA#5cVVNQPPH=5`^aw_9N^@2qLURC`HIZ_!L4i!evaQzePBWa0KD|2>TFrA+#W@ zL|BY)FT$+|0>bYOfbS4aAiRX|6vEdKb|SPOtVCFbun^%+1Ovhl`bzx$?oqCXb|Rw~ zq5KDjzxl^^9w}Dti;;xiSP3o%C|$WDeP{JfJm|pspFyAudZs`>QmUkE%lYGf>q=Vq z02FHyYQyB1@<#?JWt>*->T?BFYLuRa`{xkU^iv7t=3sS4<<$IPP5%E1!(2l!fd1;w z1s=dUUs(AKH3js9T&&+K2;;OTrTm77N04UK)Qunw2mORnP2Ky!D_H3Zd75%BAq{JN zAxo1sj5G(*begp5NP`DJsMMrg0gY>sw*Ltg67UiD8ng41zyHyD|D*T*U(tJJVR=Au z1=||}ot+h%>o*3LHMa(;>Nf{`&=Z=x6@ks|!R-}oO;GT62sNQVaJ#QPxT(2qqbDE< z1}eFtzM~`1sHs0qC~FICRvHg<3NBBHt71WEWu@@-WR$OMZrHT6J=7)%LK!_%!IH+t zARxg!Ln$lT8$+#u^7^*MR)S|crFph@N`cL_&6@)yozxe_1h=rXwY^g*DGcap1HsMB zZS_(>=^S$k)vc`Wl*)p^_8@d}N_9t|P0jvw5^~G7HA_p|8{>68()sF}rDg3w4~E$q zsNT>LXpjWA3M=5G&O3$9I9v=tY4ogx9W%v~DN0}VoT|V!38Q%;p0cbt*omYg!qQ*> zK-adnw~B$*4ttKLt-hmEYzKx0l9#u(Z>VqObnr;^QmC_R8*G7i-y9fWL(;NfAW+%7 zAy^;WE)=Fzw%0c*Pk>`cS1Y2EMh*tP)~Ry`IzyWSJQcQ;&J*ecW*U`*9)>41cQ96) zfh>HLdgqx+@83;fyiwZTSsK{V+z==c3aE@L*xVj$mbNc#ttaX}ssq;~q*d{9oDo9i4mMcf0WC(6DhNMkxqSFXbFc9i12&pE-4U99H8(t z_2&g+Vz|Upr79JgON1c}wp59Eu*_XmR%y>!(Ae542wzRAY;WHb>L_VwAjRitZuAKc zQLWncO@THIlL=RoH2R6rf<_-+0oirv(y2g4}1e-t;d|bnN0zr~$&bwIcv2scxAQ|V+ zOIOslVJZnCB`JyK0Hs8ys<(WCHJP!xk^y;aY6h`CHz8Fmjn&|UbY40Kym2fUWr9)+ zfHi_5*-DBal$3fg>8SXhhWyQ&F`ra6w?WO81UGI$E1vSQN|i*5X(lUatnUEvJC)f8 zDqdJY=@m_2is1GfA)Wdm6jlb>HcDb`dqu|<2UAMbyj*Sl1~9l#;VQ3msgs^?r{Jsg zD8ud~2GS6`1XR3nu1~m?a*5?tS*2$}Z6L^q#@d6%L8@lFH{%3Ni82aMo2ey~@#Oeb zj0nQg`qtJei75vSJXpC^``j*f^)k-6uL`d1P-K4SGi4sMCcC!tsGpix7HZ=9{yJ$$OYsPntF`>Wd9NG*mifeMG?ZE=u)M(dp!it>zUIcjQn4Pk$8E}JY4+el)hQ`e?Gi+>HLw{PGtkKM{s=s?f%pmB$MNI| zJRAzNH3a;`?DfrUox<)3sbZi3v*WKA-j~(4cB+UUVJWr2keVQT8E}_2H%LU*3Yb;! zvzswLmU$haxwhX9d82ZXocY2eL@HdtM4ol-p} zi35a3a2YxW3k5pZq#~S+m*)mjjv#znDGBQg(=vPo^bD>m>Fk79l5*yD-kl0&8NVoL z-&g9=+`O$Jf#^cVc1dh+vpYE7f1KJW1p~aTyQghw>cGq{L1%_j)o38e?{9A3;BSHe zz!jCcF(3unw!qR#-3+myIap3@qKR1;OYP*<{hh*Fshb0vJ7GEtSE+{v5qxol#p)$w z3!73gt+LuHJl>`5Y@3b4<4E!IX8jF8Nw_1~4@LloHTv5^m|$K=_LF@C-L+ZxVLa{+ z2(Ksk>o<39^ao(%3PLtuUfR*IvY|u5OpM}!2PqA$f%-PT1g=CYP3`_RECV_bMSfd7 zX0JY-Khy@8HVLay8WS31nwtAeil3#POz|rzcLOd)KflO~_n^by0L2Rn;V(%9xW6&5 zA+!-QgkQvb2oD)%TN;L|o%JUC43?Vqc@#DWprBqL7TX%o>c)WZqR!JA2y_USw7F35 z9rZ!jdrBeUeF26bsbemeiYZJgt5bNETdS@>kePRoDaGytzQC@E<-;phG{WJpNmYy_ zB)HymSsn0NNR5NZ{ArgD8E%Ve?RIwCc${98DbC%BSoy6morg|7NjrdmJgF+cr za#gri3U&cPjvuHHTPH4bP#FQaf&g|DDwMm%QnJinY$Mzz0jVrT~{a9LNQ&ST~+9b#s7k!~h;>H780Z-_i z!Q4n-t7A~_n;IKg8o@BHp+KO!3Qu)4;KHRoxY3ehX%AZLmgY`NTf1bbw{$9V42n;H z!7a6ttux7{;OPq|r8jqotsV6p&B_pno@LvhAfd8q8#=g|q)Dr5RMMKT5Doh~9mc@FYwXyB9l4WB+vfQA8Dlq_q!9ne3KFEG-?DPy}DzxH4 z@et)#Rc$kIW5f_+h8_XCbkBY-nfmb>RP|2&gH6h*OryuxPinLo0_); zNOl_S`{VjAm$W z6NI&~nG-awBG5PTsQ<07CEywmwjp5~61E|M;ADKTsk54xy#mh^<}JusN&1=Y^KmDc z&#Z2)#t)??#u+a#QH-;K!U@8>tsbhkX%lK``Sp`@(I)(;51L~quPjt9R(qgvL$;cm z32dPQPC$Rd=Ehbm(CFD6Ahm6wWFX$6n%;K?e!8S$VT$E!22=*;0D$Z?=G-SkpmRQRMF7h z*4Sz3Y=-^BssuXP8*q#z)@}@Hm5dw!RE~Mo5@8F$)1;0W_^4jkOI~2ScH4x=qOD`R!Ov`kP?A z#9(C)7E0%IjWx*HBce^@7ve27l?JE-a>Zx0uBpSFU*VU$0Riw9yJ%1oPs|!#m^Qx z0%k#r+O3*Jg&A&|+x=>q(tP50E2>eO1E9d8t;4&R(8q)dYCj}X^@TxV})mhXM z9HASUrOog-$6%e%#^kYS8;I2btjy;Ji-qt5og2eN#pkPmWtxeCo$>OWeuYKZ{KRks zcaO55(((hXzb(y8FpijJ(5kB#i*%Bp!m8s;58lLtMbn*%W2%JKHw1#jg?us7Ihl_Y zyes+x-drslafPf9`LmrCMbJ@_ytydNY0b8g1aCda8S0GTL8}frnj1B3YqdQTB<2Aw z->us%!BATpsVZg~2553&ehl>2h?O$@!Lc#cTfTtA0?Nsc_8wD}AI$6ay@|AoG zxCj*B&8nl)w$lNNOdVTd8}x%?K}8EC@nAW4lPvjZ`b%VP>s%( z+9g0x&ulE>su)n5DY6J>8@5X{yYX=;Q=Qs|0s}C|KmZS@Y=H9cSNjR4Q`bT6{3+_O zDTHRwPJxU(f2gw_ZUnWz4$SvpS;~P{WUxaWlIw=|>|%}~EF;kXqiI_}g6AhL*2>r! zS7E%YAV}C`K|z{`IxQsW8ZJjG3=VB0aU4!=hYC;82u&b_M5X_@9LA*tGjtm!KnOl7 zyny!$M~Q?D9C+WKdD|yBLC=iA`)CGo$bL+|Au<< zifvcRDUZ-k@QF8c0xRGIDo?gN-{XL1P4TW%`{X~ikeX0(3204ObO?hAn7}rM+J{n- z@I$Fnl&67&Vz~e7m>-%rnX@#p6eKaVg_$e$yW_fs63FXGQO#2XRzE>Y?u{v1Lv z{_Y(4PhUX%Jc4jPe;!3#HvpX8$DhrJI}mKu{J9kIH3%jTe{My5C&Ga`{=A3c2z!0} z`FX^L5Q*L0^gRw`}m#h6Yb zPt9`KU;6Uus4Rrx(-RcK{m8IFm-)0b6Lv0k%pke-u}4)oUg zOO`&E*bc)81Ak)sH%c!ptAxuO2)m!grTF-lR##PlYE@;FR#64mDr#ArJKRf_E-mwT zuoF;K0n|=(!dJlsk5gXEG4CmJdn=Zf`Ae(IJZP@gU$zn$q&DFz4i;E6-h8;DZtTxj z%VQ-~Hu2;ZuZ9hX%1R)kw!(|PdE0D3i;X(ikIhqcHEGjes&W$t3bbutVsc~YuxF3#oViNs<@xG@j z9|Ish9y)-OIGm}*GetQ~YsHChFf@6l+9SD<&vLTYGv?!;vYm-IXZA|}G(3Dh=h zaH)rNV1EVJUY{mq>a^xI95HCd(xZNJb1OVn^&zR9&Ym>@cR0L)S5#~hZ%l;I`Y8GGoG$_hG7a@Efmlsp-y{8;xkTl#`?%q{KsVfB3@1YZ;G?$ zPVy4RpVf4_(+|hU*^fT~$9SLWhj;vx>0vePfA0SmVE{Az0T@l6v1QqEZH2b`ZB@3f z+xFX@u|02l(RRf48{2=_{%A|dzA5{bY+H6-wkLaK_Qvdv?48;BvcHx6yX^7oX!f+6 ztek~88**B69?sd8^JvcgoTqYrnDg_TH*!A6`9qG`o@rla-)R4${aO2e*k?Fyaopo5 zbd)>Xjx~;l9breG<6DlW94|UvaU5~{)`352nmZ@=v$@&1dAVh|b-4|>t+`*w4d?F9 zeJc04+#|UsbAOq8HurbAf5Ra^nMc-cZ;-Y~?A1_MD%g$SzSC&_uSC=Q|wdZ{)Z&%(^dC%n?$xF?@ zJ^!x!d-5ImrTGu!x8y&Z|IPfv`LE^wD*re6GYYZ_vI`a!R2O&)zFY8YL4U!o3Zezm zoMp}x&K=G!=Qo{)o#&nZ=Df4;p2C8{&4rzXUoQOn!hb9LL*cc;IYsvtEh*Ykw5w=O z(GQCb7wHzySiE?#Z}Hm24=>)j`0IZN#l}-muy?ov*gJo z-&=Bc$=gf*xJ0lB%m_@i&9tT49JU%;n@zTzwS8!tn|)t)8|d)O?1R}yv*+a8pVN{f zxej#bM?6y zxu4E00j8UP>8{-G=Kdu2bZ+WG^TL9KB?}*2xO?G0FZ}7kw->Hnw0=?3qDL1!zG&Z~ zZ!S8z=*>l@yg7OE@(S}F%v+PUA#ZQqH}n26?{MD9yz_b4`Of_1`GNeM`Gff<^RMSy z3Z5xAUNBma>bw(J+vwcme8zdidCobn&{nvw@cV^nMLk827k#bhABqkZ{ZrA8i(V^w zv*=1u^Wwq9h9%3FtXOgg*m!x#ua;a`^3f8(CI}YrtIbwmd)9W+_Kr=L-IFb6Kbif5 z?2MdG=X^fLm$No!3ovjz$6?=Lf5rY2e1m$#{s(YgiQ_>>zvDL!9Yz|={Zj6?bN?y# zpL0*;-n?+h73e$V-yGvXX`UU#Mx&M2H;=qQv6_ZEJ$@VUZ& zDtx8zRN-5Nzb(98IK3#nD6eR7(W;_v71b?Xz4-4IA6)#)#gWAyFHT)zSTcJ_1GwhN zB`+>{ZON~f2*r4(2KX`B?tmn(v~98-{XgxS|5umg9mYvzVN+us6wEJ~p^=zA_t)pS zpZosYk11OqSeyNjhN3M>j^gG?GAz=D9>k(7W$ICKmgFQk;z<&Q=yYhgNzN~L z#*!r-(P-k@1?%Vi1@RB?a9!8?dR?CnpN~`Tv^jT^DDohoxZfhOlYB(>k!n&)z97fQ zDe@I*As0w1X(R2VgLD#yK1t`%T)K+BMYqtMbU$sN_pmW6o^h7L4zfCSmi?1`#|Gn$ z)A$TNhdr;hqfevAJfr05K=_FC6N)PO|f)GsBtVEOK6V(#cw~0eAL$QrWB7d9s8S z)6wj4md)0&?d%9^WT)8otUn*lr}4#5;%#2aEBP({Yy7>tM2$Emz80Yl$jQYgH+dw+jl@sT1m~x}@6G_o}Zy(2w>X@Z){YpMu_+;m`9I`z!ql zf43io3|gI}m+3;iRs8jvzFSaK<}Row?3(=K_*>&AH{=cEZ7zXfmColV{0%^x8|LfNUkZ$zJjq zIYNfg9rPf5jg_%lc8r~2tt^~J^561eUe25OO)%pn-18UWlxT;_edGW+SVqhHq4fkw z&=dGu>Qw zwY%Oeg)i0a8Moby_Y%E%-X?FKcL53gnR-}FR;lV~RnqIiA8`MNRJ}T`npK;+raILf zcrnNy;t%(y`?LM|evbbld{~P{*y`5t zi%9Y)B$*no*O@0wrdeT%K$vo~75?lo)$r(qxdqyIq3xi|Rdi>JooIbHlV#W0P4=KY zYX54l+b$a!Bn1nCcY3^7iuXz2ML%b#^B@xcs59Gn!CB#~b2fwhd!0Hk<}%1Xj1bVJ zrzOsj+oTWe57sO6!?Sb&ZlHvg(f4UJJw)qhBRxsKq%HIkZKEBulXlT=8pisu2=*{b z0EZT^H&_X)U?V}Fg?tUa!rS@Npw8Q3n>Z|v<36s7+u~k1T*k}kaaKC+b6(HMQv)^0_b%g#M z`T^{}&yKT?!Iio8dHa?v1^sL7X*lzD`U`mi4Renb{8MWAK;e1iMHCo8~IoKGFtlv|AEJ$t&Nx>62;>pO=O8Y zv0SVYYecaq5u3#aVu#oxK1Hh?L9aE7bK)POL)?US;c}21CdbP0*bagGrA(HYGFvW{ zJ>Lf%jc4zosVt3c#1)?eL0VWBi{ddouQCtg^GC>H2AxT>Xf}3a9(H5_Eu=-r*hXZm zoK}ECm9&Z;pf$)^18t(G=~<+$6=`e#F>O6r8^EGi3>(2>u{tM!R~|D+Um{DvZ_Z#d zSr$5b5zAxw*an5Hh!yufPdReAlU1@Rc7WA@Zw;)8o$h_cR@TPav75SBH|xhE_yFGX z%p-U#kK+?K=N=fCfHj)}5@ul0X7Oyki0AQq(6Epfp`kYNGPG0$xLC=nus>>e9dF=G z{4_tyTX-v0SUd0JUA!B+Bti@jQN7&!A2m5vun`GxA_Y!lh?#IB8|x)c= (3, 4): + import enum +else: + enum = None + +# process priority constants, import from __init__.py: +# http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx +__extra__all__ = [ + "win_service_iter", "win_service_get", + "ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS", + "HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS", + "NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS", + "CONN_DELETE_TCB", + "AF_LINK", +] + + +# ===================================================================== +# --- globals +# ===================================================================== + +CONN_DELETE_TCB = "DELETE_TCB" +WAIT_TIMEOUT = 0x00000102 # 258 in decimal +ACCESS_DENIED_ERRSET = frozenset([errno.EPERM, errno.EACCES, + cext.ERROR_ACCESS_DENIED]) +NO_SUCH_SERVICE_ERRSET = frozenset([cext.ERROR_INVALID_NAME, + cext.ERROR_SERVICE_DOES_NOT_EXIST]) + + +if enum is None: + AF_LINK = -1 +else: + AddressFamily = enum.IntEnum('AddressFamily', {'AF_LINK': -1}) + AF_LINK = AddressFamily.AF_LINK + +TCP_STATUSES = { + cext.MIB_TCP_STATE_ESTAB: _common.CONN_ESTABLISHED, + cext.MIB_TCP_STATE_SYN_SENT: _common.CONN_SYN_SENT, + cext.MIB_TCP_STATE_SYN_RCVD: _common.CONN_SYN_RECV, + cext.MIB_TCP_STATE_FIN_WAIT1: _common.CONN_FIN_WAIT1, + cext.MIB_TCP_STATE_FIN_WAIT2: _common.CONN_FIN_WAIT2, + cext.MIB_TCP_STATE_TIME_WAIT: _common.CONN_TIME_WAIT, + cext.MIB_TCP_STATE_CLOSED: _common.CONN_CLOSE, + cext.MIB_TCP_STATE_CLOSE_WAIT: _common.CONN_CLOSE_WAIT, + cext.MIB_TCP_STATE_LAST_ACK: _common.CONN_LAST_ACK, + cext.MIB_TCP_STATE_LISTEN: _common.CONN_LISTEN, + cext.MIB_TCP_STATE_CLOSING: _common.CONN_CLOSING, + cext.MIB_TCP_STATE_DELETE_TCB: CONN_DELETE_TCB, + cext.PSUTIL_CONN_NONE: _common.CONN_NONE, +} + +if enum is not None: + class Priority(enum.IntEnum): + ABOVE_NORMAL_PRIORITY_CLASS = ABOVE_NORMAL_PRIORITY_CLASS + BELOW_NORMAL_PRIORITY_CLASS = BELOW_NORMAL_PRIORITY_CLASS + HIGH_PRIORITY_CLASS = HIGH_PRIORITY_CLASS + IDLE_PRIORITY_CLASS = IDLE_PRIORITY_CLASS + NORMAL_PRIORITY_CLASS = NORMAL_PRIORITY_CLASS + REALTIME_PRIORITY_CLASS = REALTIME_PRIORITY_CLASS + + globals().update(Priority.__members__) + +pinfo_map = dict( + num_handles=0, + ctx_switches=1, + user_time=2, + kernel_time=3, + create_time=4, + num_threads=5, + io_rcount=6, + io_wcount=7, + io_rbytes=8, + io_wbytes=9, + io_count_others=10, + io_bytes_others=11, + num_page_faults=12, + peak_wset=13, + wset=14, + peak_paged_pool=15, + paged_pool=16, + peak_non_paged_pool=17, + non_paged_pool=18, + pagefile=19, + peak_pagefile=20, + mem_private=21, +) + + +# ===================================================================== +# --- named tuples +# ===================================================================== + + +# psutil.cpu_times() +scputimes = namedtuple('scputimes', + ['user', 'system', 'idle', 'interrupt', 'dpc']) +# psutil.virtual_memory() +svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free']) +# psutil.Process.memory_info() +pmem = namedtuple( + 'pmem', ['rss', 'vms', + 'num_page_faults', 'peak_wset', 'wset', 'peak_paged_pool', + 'paged_pool', 'peak_nonpaged_pool', 'nonpaged_pool', + 'pagefile', 'peak_pagefile', 'private']) +# psutil.Process.memory_full_info() +pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', )) +# psutil.Process.memory_maps(grouped=True) +pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss']) +# psutil.Process.memory_maps(grouped=False) +pmmap_ext = namedtuple( + 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields)) +# psutil.Process.io_counters() +pio = namedtuple('pio', ['read_count', 'write_count', + 'read_bytes', 'write_bytes', + 'other_count', 'other_bytes']) + + +# ===================================================================== +# --- utils +# ===================================================================== + + +@lru_cache(maxsize=512) +def convert_dos_path(s): + r"""Convert paths using native DOS format like: + "\Device\HarddiskVolume1\Windows\systemew\file.txt" + into: + "C:\Windows\systemew\file.txt" + """ + rawdrive = '\\'.join(s.split('\\')[:3]) + driveletter = cext.win32_QueryDosDevice(rawdrive) + return os.path.join(driveletter, s[len(rawdrive):]) + + +def py2_strencode(s): + """Encode a unicode string to a byte string by using the default fs + encoding + "replace" error handler. + """ + if PY3: + return s + else: + if isinstance(s, str): + return s + else: + return s.encode(ENCODING, errors=ENCODING_ERRS) + + +# ===================================================================== +# --- memory +# ===================================================================== + + +def virtual_memory(): + """System virtual memory as a namedtuple.""" + mem = cext.virtual_mem() + totphys, availphys, totpagef, availpagef, totvirt, freevirt = mem + # + total = totphys + avail = availphys + free = availphys + used = total - avail + percent = usage_percent((total - avail), total, _round=1) + return svmem(total, avail, percent, used, free) + + +def swap_memory(): + """Swap system memory as a (total, used, free, sin, sout) tuple.""" + mem = cext.virtual_mem() + total = mem[2] + free = mem[3] + used = total - free + percent = usage_percent(used, total, _round=1) + return _common.sswap(total, used, free, percent, 0, 0) + + +# ===================================================================== +# --- disk +# ===================================================================== + + +disk_io_counters = cext.disk_io_counters + + +def disk_usage(path): + """Return disk usage associated with path.""" + if PY3 and isinstance(path, bytes): + # XXX: do we want to use "strict"? Probably yes, in order + # to fail immediately. After all we are accepting input here... + path = path.decode(ENCODING, errors="strict") + total, free = cext.disk_usage(path) + used = total - free + percent = usage_percent(used, total, _round=1) + return _common.sdiskusage(total, used, free, percent) + + +def disk_partitions(all): + """Return disk partitions.""" + rawlist = cext.disk_partitions(all) + return [_common.sdiskpart(*x) for x in rawlist] + + +# ===================================================================== +# --- CPU +# ===================================================================== + + +def cpu_times(): + """Return system CPU times as a named tuple.""" + user, system, idle = cext.cpu_times() + # Internally, GetSystemTimes() is used, and it doesn't return + # interrupt and dpc times. cext.per_cpu_times() does, so we + # rely on it to get those only. + percpu_summed = scputimes(*[sum(n) for n in zip(*cext.per_cpu_times())]) + return scputimes(user, system, idle, + percpu_summed.interrupt, percpu_summed.dpc) + + +def per_cpu_times(): + """Return system per-CPU times as a list of named tuples.""" + ret = [] + for user, system, idle, interrupt, dpc in cext.per_cpu_times(): + item = scputimes(user, system, idle, interrupt, dpc) + ret.append(item) + return ret + + +def cpu_count_logical(): + """Return the number of logical CPUs in the system.""" + return cext.cpu_count_logical() + + +def cpu_count_physical(): + """Return the number of physical CPUs in the system.""" + return cext.cpu_count_phys() + + +def cpu_stats(): + """Return CPU statistics.""" + ctx_switches, interrupts, dpcs, syscalls = cext.cpu_stats() + soft_interrupts = 0 + return _common.scpustats(ctx_switches, interrupts, soft_interrupts, + syscalls) + + +def cpu_freq(): + """Return CPU frequency. + On Windows per-cpu frequency is not supported. + """ + curr, max_ = cext.cpu_freq() + min_ = 0.0 + return [_common.scpufreq(float(curr), min_, float(max_))] + + +# ===================================================================== +# --- network +# ===================================================================== + + +def net_connections(kind, _pid=-1): + """Return socket connections. If pid == -1 return system-wide + connections (as opposed to connections opened by one process only). + """ + if kind not in conn_tmap: + raise ValueError("invalid %r kind argument; choose between %s" + % (kind, ', '.join([repr(x) for x in conn_tmap]))) + families, types = conn_tmap[kind] + rawlist = cext.net_connections(_pid, families, types) + ret = set() + for item in rawlist: + fd, fam, type, laddr, raddr, status, pid = item + if laddr: + laddr = _common.addr(*laddr) + if raddr: + raddr = _common.addr(*raddr) + status = TCP_STATUSES[status] + fam = sockfam_to_enum(fam) + type = socktype_to_enum(type) + if _pid == -1: + nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid) + else: + nt = _common.pconn(fd, fam, type, laddr, raddr, status) + ret.add(nt) + return list(ret) + + +def net_if_stats(): + """Get NIC stats (isup, duplex, speed, mtu).""" + ret = {} + rawdict = cext.net_if_stats() + for name, items in rawdict.items(): + if not PY3: + assert isinstance(name, unicode), type(name) + name = py2_strencode(name) + isup, duplex, speed, mtu = items + if hasattr(_common, 'NicDuplex'): + duplex = _common.NicDuplex(duplex) + ret[name] = _common.snicstats(isup, duplex, speed, mtu) + return ret + + +def net_io_counters(): + """Return network I/O statistics for every network interface + installed on the system as a dict of raw tuples. + """ + ret = cext.net_io_counters() + return dict([(py2_strencode(k), v) for k, v in ret.items()]) + + +def net_if_addrs(): + """Return the addresses associated to each NIC.""" + ret = [] + for items in cext.net_if_addrs(): + items = list(items) + items[0] = py2_strencode(items[0]) + ret.append(items) + return ret + + +# ===================================================================== +# --- sensors +# ===================================================================== + + +def sensors_battery(): + """Return battery information.""" + # For constants meaning see: + # https://msdn.microsoft.com/en-us/library/windows/desktop/ + # aa373232(v=vs.85).aspx + acline_status, flags, percent, secsleft = cext.sensors_battery() + power_plugged = acline_status == 1 + no_battery = bool(flags & 128) + charging = bool(flags & 8) + + if no_battery: + return None + if power_plugged or charging: + secsleft = _common.POWER_TIME_UNLIMITED + elif secsleft == -1: + secsleft = _common.POWER_TIME_UNKNOWN + + return _common.sbattery(percent, secsleft, power_plugged) + + +# ===================================================================== +# --- other system functions +# ===================================================================== + + +_last_btime = 0 + + +def boot_time(): + """The system boot time expressed in seconds since the epoch.""" + # This dirty hack is to adjust the precision of the returned + # value which may have a 1 second fluctuation, see: + # https://github.com/giampaolo/psutil/issues/1007 + global _last_btime + ret = float(cext.boot_time()) + if abs(ret - _last_btime) <= 1: + return _last_btime + else: + _last_btime = ret + return ret + + +def users(): + """Return currently connected users as a list of namedtuples.""" + retlist = [] + rawlist = cext.users() + for item in rawlist: + user, hostname, tstamp = item + user = py2_strencode(user) + nt = _common.suser(user, None, hostname, tstamp, None) + retlist.append(nt) + return retlist + + +# ===================================================================== +# --- Windows services +# ===================================================================== + + +def win_service_iter(): + """Yields a list of WindowsService instances.""" + for name, display_name in cext.winservice_enumerate(): + yield WindowsService(py2_strencode(name), py2_strencode(display_name)) + + +def win_service_get(name): + """Open a Windows service and return it as a WindowsService instance.""" + service = WindowsService(name, None) + service._display_name = service._query_config()['display_name'] + return service + + +class WindowsService(object): + """Represents an installed Windows service.""" + + def __init__(self, name, display_name): + self._name = name + self._display_name = display_name + + def __str__(self): + details = "(name=%r, display_name=%r)" % ( + self._name, self._display_name) + return "%s%s" % (self.__class__.__name__, details) + + def __repr__(self): + return "<%s at %s>" % (self.__str__(), id(self)) + + def __eq__(self, other): + # Test for equality with another WindosService object based + # on name. + if not isinstance(other, WindowsService): + return NotImplemented + return self._name == other._name + + def __ne__(self, other): + return not self == other + + def _query_config(self): + with self._wrap_exceptions(): + display_name, binpath, username, start_type = \ + cext.winservice_query_config(self._name) + # XXX - update _self.display_name? + return dict( + display_name=py2_strencode(display_name), + binpath=py2_strencode(binpath), + username=py2_strencode(username), + start_type=py2_strencode(start_type)) + + def _query_status(self): + with self._wrap_exceptions(): + status, pid = cext.winservice_query_status(self._name) + if pid == 0: + pid = None + return dict(status=status, pid=pid) + + @contextlib.contextmanager + def _wrap_exceptions(self): + """Ctx manager which translates bare OSError and WindowsError + exceptions into NoSuchProcess and AccessDenied. + """ + try: + yield + except WindowsError as err: + if err.errno in ACCESS_DENIED_ERRSET: + raise AccessDenied( + pid=None, name=self._name, + msg="service %r is not querable (not enough privileges)" % + self._name) + elif err.errno in NO_SUCH_SERVICE_ERRSET or \ + err.winerror in NO_SUCH_SERVICE_ERRSET: + raise NoSuchProcess( + pid=None, name=self._name, + msg="service %r does not exist)" % self._name) + else: + raise + + # config query + + def name(self): + """The service name. This string is how a service is referenced + and can be passed to win_service_get() to get a new + WindowsService instance. + """ + return self._name + + def display_name(self): + """The service display name. The value is cached when this class + is instantiated. + """ + return self._display_name + + def binpath(self): + """The fully qualified path to the service binary/exe file as + a string, including command line arguments. + """ + return self._query_config()['binpath'] + + def username(self): + """The name of the user that owns this service.""" + return self._query_config()['username'] + + def start_type(self): + """A string which can either be "automatic", "manual" or + "disabled". + """ + return self._query_config()['start_type'] + + # status query + + def pid(self): + """The process PID, if any, else None. This can be passed + to Process class to control the service's process. + """ + return self._query_status()['pid'] + + def status(self): + """Service status as a string.""" + return self._query_status()['status'] + + def description(self): + """Service long description.""" + return py2_strencode(cext.winservice_query_descr(self.name())) + + # utils + + def as_dict(self): + """Utility method retrieving all the information above as a + dictionary. + """ + d = self._query_config() + d.update(self._query_status()) + d['name'] = self.name() + d['display_name'] = self.display_name() + d['description'] = self.description() + return d + + # actions + # XXX: the necessary C bindings for start() and stop() are + # implemented but for now I prefer not to expose them. + # I may change my mind in the future. Reasons: + # - they require Administrator privileges + # - can't implement a timeout for stop() (unless by using a thread, + # which sucks) + # - would require adding ServiceAlreadyStarted and + # ServiceAlreadyStopped exceptions, adding two new APIs. + # - we might also want to have modify(), which would basically mean + # rewriting win32serviceutil.ChangeServiceConfig, which involves a + # lot of stuff (and API constants which would pollute the API), see: + # http://pyxr.sourceforge.net/PyXR/c/python24/lib/site-packages/ + # win32/lib/win32serviceutil.py.html#0175 + # - psutil is typically about "read only" monitoring stuff; + # win_service_* APIs should only be used to retrieve a service and + # check whether it's running + + # def start(self, timeout=None): + # with self._wrap_exceptions(): + # cext.winservice_start(self.name()) + # if timeout: + # giveup_at = time.time() + timeout + # while True: + # if self.status() == "running": + # return + # else: + # if time.time() > giveup_at: + # raise TimeoutExpired(timeout) + # else: + # time.sleep(.1) + + # def stop(self): + # # Note: timeout is not implemented because it's just not + # # possible, see: + # # http://stackoverflow.com/questions/11973228/ + # with self._wrap_exceptions(): + # return cext.winservice_stop(self.name()) + + +# ===================================================================== +# --- processes +# ===================================================================== + + +pids = cext.pids +pid_exists = cext.pid_exists +ppid_map = cext.ppid_map # used internally by Process.children() + + +def wrap_exceptions(fun): + """Decorator which translates bare OSError and WindowsError + exceptions into NoSuchProcess and AccessDenied. + """ + @functools.wraps(fun) + def wrapper(self, *args, **kwargs): + try: + return fun(self, *args, **kwargs) + except OSError as err: + if err.errno in ACCESS_DENIED_ERRSET: + raise AccessDenied(self.pid, self._name) + if err.errno == errno.ESRCH: + raise NoSuchProcess(self.pid, self._name) + raise + return wrapper + + +class Process(object): + """Wrapper class around underlying C implementation.""" + + __slots__ = ["pid", "_name", "_ppid"] + + def __init__(self, pid): + self.pid = pid + self._name = None + self._ppid = None + + # --- oneshot() stuff + + def oneshot_enter(self): + self.oneshot_info.cache_activate() + + def oneshot_exit(self): + self.oneshot_info.cache_deactivate() + + @memoize_when_activated + def oneshot_info(self): + """Return multiple information about this process as a + raw tuple. + """ + ret = cext.proc_info(self.pid) + assert len(ret) == len(pinfo_map) + return ret + + @wrap_exceptions + def name(self): + """Return process name, which on Windows is always the final + part of the executable. + """ + # This is how PIDs 0 and 4 are always represented in taskmgr + # and process-hacker. + if self.pid == 0: + return "System Idle Process" + elif self.pid == 4: + return "System" + else: + try: + # Note: this will fail with AD for most PIDs owned + # by another user but it's faster. + return py2_strencode(os.path.basename(self.exe())) + except AccessDenied: + return py2_strencode(cext.proc_name(self.pid)) + + @wrap_exceptions + def exe(self): + # Note: os.path.exists(path) may return False even if the file + # is there, see: + # http://stackoverflow.com/questions/3112546/os-path-exists-lies + + # see https://github.com/giampaolo/psutil/issues/414 + # see https://github.com/giampaolo/psutil/issues/528 + if self.pid in (0, 4): + raise AccessDenied(self.pid, self._name) + return py2_strencode(convert_dos_path(cext.proc_exe(self.pid))) + + @wrap_exceptions + def cmdline(self): + ret = cext.proc_cmdline(self.pid) + if PY3: + return ret + else: + return [py2_strencode(s) for s in ret] + + @wrap_exceptions + def environ(self): + ustr = cext.proc_environ(self.pid) + if ustr and not PY3: + assert isinstance(ustr, unicode), type(ustr) + return parse_environ_block(py2_strencode(ustr)) + + def ppid(self): + try: + return ppid_map()[self.pid] + except KeyError: + raise NoSuchProcess(self.pid, self._name) + + def _get_raw_meminfo(self): + try: + return cext.proc_memory_info(self.pid) + except OSError as err: + if err.errno in ACCESS_DENIED_ERRSET: + # TODO: the C ext can probably be refactored in order + # to get this from cext.proc_info() + info = self.oneshot_info() + return ( + info[pinfo_map['num_page_faults']], + info[pinfo_map['peak_wset']], + info[pinfo_map['wset']], + info[pinfo_map['peak_paged_pool']], + info[pinfo_map['paged_pool']], + info[pinfo_map['peak_non_paged_pool']], + info[pinfo_map['non_paged_pool']], + info[pinfo_map['pagefile']], + info[pinfo_map['peak_pagefile']], + info[pinfo_map['mem_private']], + ) + raise + + @wrap_exceptions + def memory_info(self): + # on Windows RSS == WorkingSetSize and VSM == PagefileUsage. + # Underlying C function returns fields of PROCESS_MEMORY_COUNTERS + # struct. + t = self._get_raw_meminfo() + rss = t[2] # wset + vms = t[7] # pagefile + return pmem(*(rss, vms, ) + t) + + @wrap_exceptions + def memory_full_info(self): + basic_mem = self.memory_info() + uss = cext.proc_memory_uss(self.pid) + return pfullmem(*basic_mem + (uss, )) + + def memory_maps(self): + try: + raw = cext.proc_memory_maps(self.pid) + except OSError as err: + # XXX - can't use wrap_exceptions decorator as we're + # returning a generator; probably needs refactoring. + if err.errno in ACCESS_DENIED_ERRSET: + raise AccessDenied(self.pid, self._name) + if err.errno == errno.ESRCH: + raise NoSuchProcess(self.pid, self._name) + raise + else: + for addr, perm, path, rss in raw: + path = convert_dos_path(path) + if not PY3: + assert isinstance(path, unicode), type(path) + path = py2_strencode(path) + addr = hex(addr) + yield (addr, perm, path, rss) + + @wrap_exceptions + def kill(self): + return cext.proc_kill(self.pid) + + @wrap_exceptions + def send_signal(self, sig): + os.kill(self.pid, sig) + + @wrap_exceptions + def wait(self, timeout=None): + if timeout is None: + cext_timeout = cext.INFINITE + else: + # WaitForSingleObject() expects time in milliseconds + cext_timeout = int(timeout * 1000) + while True: + ret = cext.proc_wait(self.pid, cext_timeout) + if ret == WAIT_TIMEOUT: + raise TimeoutExpired(timeout, self.pid, self._name) + if pid_exists(self.pid): + if timeout is None: + continue + else: + raise TimeoutExpired(timeout, self.pid, self._name) + return ret + + @wrap_exceptions + def username(self): + if self.pid in (0, 4): + return 'NT AUTHORITY\\SYSTEM' + domain, user = cext.proc_username(self.pid) + return py2_strencode(domain) + '\\' + py2_strencode(user) + + @wrap_exceptions + def create_time(self): + # special case for kernel process PIDs; return system boot time + if self.pid in (0, 4): + return boot_time() + try: + return cext.proc_create_time(self.pid) + except OSError as err: + if err.errno in ACCESS_DENIED_ERRSET: + return self.oneshot_info()[pinfo_map['create_time']] + raise + + @wrap_exceptions + def num_threads(self): + return self.oneshot_info()[pinfo_map['num_threads']] + + @wrap_exceptions + def threads(self): + rawlist = cext.proc_threads(self.pid) + retlist = [] + for thread_id, utime, stime in rawlist: + ntuple = _common.pthread(thread_id, utime, stime) + retlist.append(ntuple) + return retlist + + @wrap_exceptions + def cpu_times(self): + try: + user, system = cext.proc_cpu_times(self.pid) + except OSError as err: + if err.errno in ACCESS_DENIED_ERRSET: + info = self.oneshot_info() + user = info[pinfo_map['user_time']] + system = info[pinfo_map['kernel_time']] + else: + raise + # Children user/system times are not retrievable (set to 0). + return _common.pcputimes(user, system, 0.0, 0.0) + + @wrap_exceptions + def suspend(self): + return cext.proc_suspend(self.pid) + + @wrap_exceptions + def resume(self): + return cext.proc_resume(self.pid) + + @wrap_exceptions + def cwd(self): + if self.pid in (0, 4): + raise AccessDenied(self.pid, self._name) + # return a normalized pathname since the native C function appends + # "\\" at the and of the path + path = cext.proc_cwd(self.pid) + return py2_strencode(os.path.normpath(path)) + + @wrap_exceptions + def open_files(self): + if self.pid in (0, 4): + return [] + ret = set() + # Filenames come in in native format like: + # "\Device\HarddiskVolume1\Windows\systemew\file.txt" + # Convert the first part in the corresponding drive letter + # (e.g. "C:\") by using Windows's QueryDosDevice() + raw_file_names = cext.proc_open_files(self.pid) + for _file in raw_file_names: + _file = convert_dos_path(_file) + if isfile_strict(_file): + if not PY3: + _file = py2_strencode(_file) + ntuple = _common.popenfile(_file, -1) + ret.add(ntuple) + return list(ret) + + @wrap_exceptions + def connections(self, kind='inet'): + return net_connections(kind, _pid=self.pid) + + @wrap_exceptions + def nice_get(self): + value = cext.proc_priority_get(self.pid) + if enum is not None: + value = Priority(value) + return value + + @wrap_exceptions + def nice_set(self, value): + return cext.proc_priority_set(self.pid, value) + + # available on Windows >= Vista + if hasattr(cext, "proc_io_priority_get"): + @wrap_exceptions + def ionice_get(self): + return cext.proc_io_priority_get(self.pid) + + @wrap_exceptions + def ionice_set(self, value, _): + if _: + raise TypeError("set_proc_ionice() on Windows takes only " + "1 argument (2 given)") + if value not in (2, 1, 0): + raise ValueError("value must be 2 (normal), 1 (low) or 0 " + "(very low); got %r" % value) + return cext.proc_io_priority_set(self.pid, value) + + @wrap_exceptions + def io_counters(self): + try: + ret = cext.proc_io_counters(self.pid) + except OSError as err: + if err.errno in ACCESS_DENIED_ERRSET: + info = self.oneshot_info() + ret = ( + info[pinfo_map['io_rcount']], + info[pinfo_map['io_wcount']], + info[pinfo_map['io_rbytes']], + info[pinfo_map['io_wbytes']], + info[pinfo_map['io_count_others']], + info[pinfo_map['io_bytes_others']], + ) + else: + raise + return pio(*ret) + + @wrap_exceptions + def status(self): + suspended = cext.proc_is_suspended(self.pid) + if suspended: + return _common.STATUS_STOPPED + else: + return _common.STATUS_RUNNING + + @wrap_exceptions + def cpu_affinity_get(self): + def from_bitmask(x): + return [i for i in xrange(64) if (1 << i) & x] + bitmask = cext.proc_cpu_affinity_get(self.pid) + return from_bitmask(bitmask) + + @wrap_exceptions + def cpu_affinity_set(self, value): + def to_bitmask(l): + if not l: + raise ValueError("invalid argument %r" % l) + out = 0 + for b in l: + out |= 2 ** b + return out + + # SetProcessAffinityMask() states that ERROR_INVALID_PARAMETER + # is returned for an invalid CPU but this seems not to be true, + # therefore we check CPUs validy beforehand. + allcpus = list(range(len(per_cpu_times()))) + for cpu in value: + if cpu not in allcpus: + if not isinstance(cpu, (int, long)): + raise TypeError( + "invalid CPU %r; an integer is required" % cpu) + else: + raise ValueError("invalid CPU %r" % cpu) + + bitmask = to_bitmask(value) + cext.proc_cpu_affinity_set(self.pid, bitmask) + + @wrap_exceptions + def num_handles(self): + try: + return cext.proc_num_handles(self.pid) + except OSError as err: + if err.errno in ACCESS_DENIED_ERRSET: + return self.oneshot_info()[pinfo_map['num_handles']] + raise + + @wrap_exceptions + def num_ctx_switches(self): + ctx_switches = self.oneshot_info()[pinfo_map['ctx_switches']] + # only voluntary ctx switches are supported + return _common.pctxsw(ctx_switches, 0) diff --git a/server/www/packages/packages-windows/x86/psutil/tests/__init__.py b/server/www/packages/packages-windows/x86/psutil/tests/__init__.py new file mode 100644 index 0000000..9e8d859 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/__init__.py @@ -0,0 +1,1198 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +Test utilities. +""" + +from __future__ import print_function + +import atexit +import contextlib +import ctypes +import errno +import functools +import os +import random +import re +import select +import shutil +import socket +import stat +import subprocess +import sys +import tempfile +import textwrap +import threading +import time +import traceback +import warnings +from socket import AF_INET +from socket import AF_INET6 +from socket import SOCK_DGRAM +from socket import SOCK_STREAM + +import psutil +from psutil import OSX +from psutil import POSIX +from psutil import SUNOS +from psutil import WINDOWS +from psutil._common import supports_ipv6 +from psutil._compat import PY3 +from psutil._compat import u +from psutil._compat import unicode +from psutil._compat import which + +if sys.version_info < (2, 7): + import unittest2 as unittest # requires "pip install unittest2" +else: + import unittest + +try: + from unittest import mock # py3 +except ImportError: + import mock # NOQA - requires "pip install mock" + +if sys.version_info >= (3, 4): + import enum +else: + enum = None + + +__all__ = [ + # constants + 'APPVEYOR', 'DEVNULL', 'GLOBAL_TIMEOUT', 'MEMORY_TOLERANCE', 'NO_RETRIES', + 'PYPY', 'PYTHON_EXE', 'ROOT_DIR', 'SCRIPTS_DIR', 'TESTFILE_PREFIX', + 'TESTFN', 'TESTFN_UNICODE', 'TOX', 'TRAVIS', 'VALID_PROC_STATUSES', + 'VERBOSITY', + "HAS_CPU_AFFINITY", "HAS_CPU_FREQ", "HAS_ENVIRON", "HAS_PROC_IO_COUNTERS", + "HAS_IONICE", "HAS_MEMORY_MAPS", "HAS_PROC_CPU_NUM", "HAS_RLIMIT", + "HAS_SENSORS_BATTERY", "HAS_BATTERY", "HAS_SENSORS_FANS", + "HAS_SENSORS_TEMPERATURES", "HAS_MEMORY_FULL_INFO", + # subprocesses + 'pyrun', 'reap_children', 'get_test_subprocess', 'create_zombie_proc', + 'create_proc_children_pair', + # threads + 'ThreadTask' + # test utils + 'unittest', 'skip_on_access_denied', 'skip_on_not_implemented', + 'retry_before_failing', 'run_test_module_by_name', 'get_suite', + 'run_suite', + # install utils + 'install_pip', 'install_test_deps', + # fs utils + 'chdir', 'safe_rmpath', 'create_exe', 'decode_path', 'encode_path', + 'unique_filename', + # os + 'get_winver', 'get_kernel_version', + # sync primitives + 'call_until', 'wait_for_pid', 'wait_for_file', + # network + 'check_connection_ntuple', 'check_net_address', + 'get_free_port', 'unix_socket_path', 'bind_socket', 'bind_unix_socket', + 'tcp_socketpair', 'unix_socketpair', 'create_sockets', + # compat + 'reload_module', 'import_module_by_path', + # others + 'warn', 'copyload_shared_lib', 'is_namedtuple', +] + + +# =================================================================== +# --- constants +# =================================================================== + +# --- platforms + +TOX = os.getenv('TOX') or '' in ('1', 'true') +PYPY = '__pypy__' in sys.builtin_module_names +WIN_VISTA = (6, 0, 0) if WINDOWS else None +# whether we're running this test suite on Travis (https://travis-ci.org/) +TRAVIS = bool(os.environ.get('TRAVIS')) +# whether we're running this test suite on Appveyor for Windows +# (http://www.appveyor.com/) +APPVEYOR = bool(os.environ.get('APPVEYOR')) + +# --- configurable defaults + +# how many times retry_before_failing() decorator will retry +NO_RETRIES = 10 +# bytes tolerance for system-wide memory related tests +MEMORY_TOLERANCE = 500 * 1024 # 500KB +# the timeout used in functions which have to wait +GLOBAL_TIMEOUT = 3 +# test output verbosity +VERBOSITY = 1 if os.getenv('SILENT') or TOX else 2 +# be more tolerant if we're on travis / appveyor in order to avoid +# false positives +if TRAVIS or APPVEYOR: + NO_RETRIES *= 3 + GLOBAL_TIMEOUT *= 3 + +# --- files + +TESTFILE_PREFIX = '$testfn' +TESTFN = os.path.join(os.path.realpath(os.getcwd()), TESTFILE_PREFIX) +_TESTFN = TESTFN + '-internal' +TESTFN_UNICODE = TESTFN + u("-ƒőő") +ASCII_FS = sys.getfilesystemencoding().lower() in ('ascii', 'us-ascii') + +# --- paths + +ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) +SCRIPTS_DIR = os.path.join(ROOT_DIR, 'scripts') +HERE = os.path.abspath(os.path.dirname(__file__)) + +# --- support + +HAS_CPU_AFFINITY = hasattr(psutil.Process, "cpu_affinity") +HAS_CPU_FREQ = hasattr(psutil, "cpu_freq") +HAS_CONNECTIONS_UNIX = POSIX and not SUNOS +HAS_ENVIRON = hasattr(psutil.Process, "environ") +HAS_PROC_IO_COUNTERS = hasattr(psutil.Process, "io_counters") +HAS_IONICE = hasattr(psutil.Process, "ionice") +HAS_MEMORY_FULL_INFO = 'uss' in psutil.Process().memory_full_info()._fields +HAS_MEMORY_MAPS = hasattr(psutil.Process, "memory_maps") +HAS_PROC_CPU_NUM = hasattr(psutil.Process, "cpu_num") +HAS_RLIMIT = hasattr(psutil.Process, "rlimit") +HAS_THREADS = hasattr(psutil.Process, "threads") +HAS_SENSORS_BATTERY = hasattr(psutil, "sensors_battery") +HAS_BATTERY = HAS_SENSORS_BATTERY and psutil.sensors_battery() +HAS_SENSORS_FANS = hasattr(psutil, "sensors_fans") +HAS_SENSORS_TEMPERATURES = hasattr(psutil, "sensors_temperatures") + +# --- misc + + +def _get_py_exe(): + def attempt(exe): + try: + subprocess.check_call( + [exe, "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except Exception: + return None + else: + return exe + + if OSX: + exe = \ + attempt(sys.executable) or \ + attempt(os.path.realpath(sys.executable)) or \ + attempt(which("python%s.%s" % sys.version_info[:2])) or \ + attempt(psutil.Process().exe()) + if not exe: + raise ValueError("can't find python exe real abspath") + return exe + else: + exe = os.path.realpath(sys.executable) + assert os.path.exists(exe), exe + return exe + + +PYTHON_EXE = _get_py_exe() +DEVNULL = open(os.devnull, 'r+') +VALID_PROC_STATUSES = [getattr(psutil, x) for x in dir(psutil) + if x.startswith('STATUS_')] +AF_UNIX = getattr(socket, "AF_UNIX", object()) +SOCK_SEQPACKET = getattr(socket, "SOCK_SEQPACKET", object()) + +_subprocesses_started = set() +_pids_started = set() +_testfiles_created = set() + + +@atexit.register +def _cleanup_files(): + DEVNULL.close() + for name in os.listdir(u('.')): + if isinstance(name, unicode): + prefix = u(TESTFILE_PREFIX) + else: + prefix = TESTFILE_PREFIX + if name.startswith(prefix): + try: + safe_rmpath(name) + except Exception: + traceback.print_exc() + for path in _testfiles_created: + try: + safe_rmpath(path) + except Exception: + traceback.print_exc() + + +# this is executed first +@atexit.register +def _cleanup_procs(): + reap_children(recursive=True) + + +# =================================================================== +# --- threads +# =================================================================== + + +class ThreadTask(threading.Thread): + """A thread task which does nothing expect staying alive.""" + + def __init__(self): + threading.Thread.__init__(self) + self._running = False + self._interval = 0.001 + self._flag = threading.Event() + + def __repr__(self): + name = self.__class__.__name__ + return '<%s running=%s at %#x>' % (name, self._running, id(self)) + + def __enter__(self): + self.start() + return self + + def __exit__(self, *args, **kwargs): + self.stop() + + def start(self): + """Start thread and keep it running until an explicit + stop() request. Polls for shutdown every 'timeout' seconds. + """ + if self._running: + raise ValueError("already started") + threading.Thread.start(self) + self._flag.wait() + + def run(self): + self._running = True + self._flag.set() + while self._running: + time.sleep(self._interval) + + def stop(self): + """Stop thread execution and and waits until it is stopped.""" + if not self._running: + raise ValueError("already stopped") + self._running = False + self.join() + + +# =================================================================== +# --- subprocesses +# =================================================================== + + +def _cleanup_on_err(fun): + @functools.wraps(fun) + def wrapper(*args, **kwargs): + try: + return fun(*args, **kwargs) + except Exception: + reap_children() + raise + return wrapper + + +@_cleanup_on_err +def get_test_subprocess(cmd=None, **kwds): + """Creates a python subprocess which does nothing for 60 secs and + return it as subprocess.Popen instance. + If "cmd" is specified that is used instead of python. + By default stdin and stdout are redirected to /dev/null. + It also attemps to make sure the process is in a reasonably + initialized state. + The process is registered for cleanup on reap_children(). + """ + kwds.setdefault("stdin", DEVNULL) + kwds.setdefault("stdout", DEVNULL) + kwds.setdefault("cwd", os.getcwd()) + kwds.setdefault("env", os.environ) + if WINDOWS: + # Prevents the subprocess to open error dialogs. + kwds.setdefault("creationflags", 0x8000000) # CREATE_NO_WINDOW + if cmd is None: + safe_rmpath(_TESTFN) + pyline = "from time import sleep;" \ + "open(r'%s', 'w').close();" \ + "sleep(60);" % _TESTFN + cmd = [PYTHON_EXE, "-c", pyline] + sproc = subprocess.Popen(cmd, **kwds) + _subprocesses_started.add(sproc) + wait_for_file(_TESTFN, delete=True, empty=True) + else: + sproc = subprocess.Popen(cmd, **kwds) + _subprocesses_started.add(sproc) + wait_for_pid(sproc.pid) + return sproc + + +@_cleanup_on_err +def create_proc_children_pair(): + """Create a subprocess which creates another one as in: + A (us) -> B (child) -> C (grandchild). + Return a (child, grandchild) tuple. + The 2 processes are fully initialized and will live for 60 secs + and are registered for cleanup on reap_children(). + """ + _TESTFN2 = os.path.basename(_TESTFN) + '2' # need to be relative + s = textwrap.dedent("""\ + import subprocess, os, sys, time + s = "import os, time;" + s += "f = open('%s', 'w');" + s += "f.write(str(os.getpid()));" + s += "f.close();" + s += "time.sleep(60);" + subprocess.Popen(['%s', '-c', s]) + time.sleep(60) + """ % (_TESTFN2, PYTHON_EXE)) + # On Windows if we create a subprocess with CREATE_NO_WINDOW flag + # set (which is the default) a "conhost.exe" extra process will be + # spawned as a child. We don't want that. + if WINDOWS: + subp = pyrun(s, creationflags=0) + else: + subp = pyrun(s) + child1 = psutil.Process(subp.pid) + data = wait_for_file(_TESTFN2, delete=False, empty=False) + os.remove(_TESTFN2) + child2_pid = int(data) + _pids_started.add(child2_pid) + child2 = psutil.Process(child2_pid) + return (child1, child2) + + +def create_zombie_proc(): + """Create a zombie process and return its PID.""" + assert psutil.POSIX + unix_file = tempfile.mktemp(prefix=TESTFILE_PREFIX) if OSX else TESTFN + src = textwrap.dedent("""\ + import os, sys, time, socket, contextlib + child_pid = os.fork() + if child_pid > 0: + time.sleep(3000) + else: + # this is the zombie process + s = socket.socket(socket.AF_UNIX) + with contextlib.closing(s): + s.connect('%s') + if sys.version_info < (3, ): + pid = str(os.getpid()) + else: + pid = bytes(str(os.getpid()), 'ascii') + s.sendall(pid) + """ % unix_file) + with contextlib.closing(socket.socket(socket.AF_UNIX)) as sock: + sock.settimeout(GLOBAL_TIMEOUT) + sock.bind(unix_file) + sock.listen(1) + pyrun(src) + conn, _ = sock.accept() + try: + select.select([conn.fileno()], [], [], GLOBAL_TIMEOUT) + zpid = int(conn.recv(1024)) + _pids_started.add(zpid) + zproc = psutil.Process(zpid) + call_until(lambda: zproc.status(), "ret == psutil.STATUS_ZOMBIE") + return zpid + finally: + conn.close() + + +@_cleanup_on_err +def pyrun(src, **kwds): + """Run python 'src' code string in a separate interpreter. + Returns a subprocess.Popen instance. + """ + kwds.setdefault("stdout", None) + kwds.setdefault("stderr", None) + with tempfile.NamedTemporaryFile( + prefix=TESTFILE_PREFIX, mode="wt", delete=False) as f: + _testfiles_created.add(f.name) + f.write(src) + f.flush() + subp = get_test_subprocess([PYTHON_EXE, f.name], **kwds) + wait_for_pid(subp.pid) + return subp + + +@_cleanup_on_err +def sh(cmd, **kwds): + """run cmd in a subprocess and return its output. + raises RuntimeError on error. + """ + shell = True if isinstance(cmd, (str, unicode)) else False + # Prevents subprocess to open error dialogs in case of error. + flags = 0x8000000 if WINDOWS and shell else 0 + kwds.setdefault("shell", shell) + kwds.setdefault("stdout", subprocess.PIPE) + kwds.setdefault("stderr", subprocess.PIPE) + kwds.setdefault("universal_newlines", True) + kwds.setdefault("creationflags", flags) + p = subprocess.Popen(cmd, **kwds) + _subprocesses_started.add(p) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise RuntimeError(stderr) + if stderr: + warn(stderr) + if stdout.endswith('\n'): + stdout = stdout[:-1] + return stdout + + +def reap_children(recursive=False): + """Terminate and wait() any subprocess started by this test suite + and ensure that no zombies stick around to hog resources and + create problems when looking for refleaks. + + If resursive is True it also tries to terminate and wait() + all grandchildren started by this process. + """ + # This is here to make sure wait_procs() behaves properly and + # investigate: + # https://ci.appveyor.com/project/giampaolo/psutil/build/job/ + # jiq2cgd6stsbtn60 + def assert_gone(pid): + assert not psutil.pid_exists(pid), pid + assert pid not in psutil.pids(), pid + try: + p = psutil.Process(pid) + assert not p.is_running(), pid + except psutil.NoSuchProcess: + pass + else: + assert 0, "pid %s is not gone" % pid + + # Get the children here, before terminating the children sub + # processes as we don't want to lose the intermediate reference + # in case of grandchildren. + if recursive: + children = set(psutil.Process().children(recursive=True)) + else: + children = set() + + # Terminate subprocess.Popen instances "cleanly" by closing their + # fds and wiat()ing for them in order to avoid zombies. + while _subprocesses_started: + subp = _subprocesses_started.pop() + _pids_started.add(subp.pid) + try: + subp.terminate() + except OSError as err: + if err.errno != errno.ESRCH: + raise + if subp.stdout: + subp.stdout.close() + if subp.stderr: + subp.stderr.close() + try: + # Flushing a BufferedWriter may raise an error. + if subp.stdin: + subp.stdin.close() + finally: + # Wait for the process to terminate, to avoid zombies. + try: + subp.wait() + except OSError as err: + if err.errno != errno.ECHILD: + raise + + # Terminate started pids. + while _pids_started: + pid = _pids_started.pop() + try: + p = psutil.Process(pid) + except psutil.NoSuchProcess: + assert_gone(pid) + else: + children.add(p) + + # Terminate children. + if children: + for p in children: + try: + p.terminate() + except psutil.NoSuchProcess: + pass + gone, alive = psutil.wait_procs(children, timeout=GLOBAL_TIMEOUT) + for p in alive: + warn("couldn't terminate process %r; attempting kill()" % p) + try: + p.kill() + except psutil.NoSuchProcess: + pass + gone, alive = psutil.wait_procs(alive, timeout=GLOBAL_TIMEOUT) + if alive: + for p in alive: + warn("process %r survived kill()" % p) + + for p in children: + assert_gone(p.pid) + + +# =================================================================== +# --- OS +# =================================================================== + + +def get_kernel_version(): + """Return a tuple such as (2, 6, 36).""" + if not POSIX: + raise NotImplementedError("not POSIX") + s = "" + uname = os.uname()[2] + for c in uname: + if c.isdigit() or c == '.': + s += c + else: + break + if not s: + raise ValueError("can't parse %r" % uname) + minor = 0 + micro = 0 + nums = s.split('.') + major = int(nums[0]) + if len(nums) >= 2: + minor = int(nums[1]) + if len(nums) >= 3: + micro = int(nums[2]) + return (major, minor, micro) + + +def get_winver(): + if not WINDOWS: + raise NotImplementedError("not WINDOWS") + wv = sys.getwindowsversion() + if hasattr(wv, 'service_pack_major'): # python >= 2.7 + sp = wv.service_pack_major or 0 + else: + r = re.search(r"\s\d$", wv[4]) + if r: + sp = int(r.group(0)) + else: + sp = 0 + return (wv[0], wv[1], sp) + + +# =================================================================== +# --- sync primitives +# =================================================================== + + +class retry(object): + """A retry decorator.""" + + def __init__(self, + exception=Exception, + timeout=None, + retries=None, + interval=0.001, + logfun=lambda s: print(s, file=sys.stderr), + ): + if timeout and retries: + raise ValueError("timeout and retries args are mutually exclusive") + self.exception = exception + self.timeout = timeout + self.retries = retries + self.interval = interval + self.logfun = logfun + + def __iter__(self): + if self.timeout: + stop_at = time.time() + self.timeout + while time.time() < stop_at: + yield + elif self.retries: + for _ in range(self.retries): + yield + else: + while True: + yield + + def sleep(self): + if self.interval is not None: + time.sleep(self.interval) + + def __call__(self, fun): + @functools.wraps(fun) + def wrapper(*args, **kwargs): + exc = None + for _ in self: + try: + return fun(*args, **kwargs) + except self.exception as _: + exc = _ + if self.logfun is not None: + self.logfun(exc) + self.sleep() + continue + if PY3: + raise exc + else: + raise + + # This way the user of the decorated function can change config + # parameters. + wrapper.decorator = self + return wrapper + + +@retry(exception=psutil.NoSuchProcess, logfun=None, timeout=GLOBAL_TIMEOUT, + interval=0.001) +def wait_for_pid(pid): + """Wait for pid to show up in the process list then return. + Used in the test suite to give time the sub process to initialize. + """ + psutil.Process(pid) + if WINDOWS: + # give it some more time to allow better initialization + time.sleep(0.01) + + +@retry(exception=(EnvironmentError, AssertionError), logfun=None, + timeout=GLOBAL_TIMEOUT, interval=0.001) +def wait_for_file(fname, delete=True, empty=False): + """Wait for a file to be written on disk with some content.""" + with open(fname, "rb") as f: + data = f.read() + if not empty: + assert data + if delete: + os.remove(fname) + return data + + +@retry(exception=AssertionError, logfun=None, timeout=GLOBAL_TIMEOUT, + interval=0.001) +def call_until(fun, expr): + """Keep calling function for timeout secs and exit if eval() + expression is True. + """ + ret = fun() + assert eval(expr) + return ret + + +# =================================================================== +# --- fs +# =================================================================== + + +def safe_rmpath(path): + "Convenience function for removing temporary test files or dirs" + try: + st = os.stat(path) + if stat.S_ISDIR(st.st_mode): + os.rmdir(path) + else: + os.remove(path) + except OSError as err: + if err.errno != errno.ENOENT: + raise + + +def safe_mkdir(dir): + "Convenience function for creating a directory" + try: + os.mkdir(dir) + except OSError as err: + if err.errno != errno.EEXIST: + raise + + +@contextlib.contextmanager +def chdir(dirname): + "Context manager which temporarily changes the current directory." + curdir = os.getcwd() + try: + os.chdir(dirname) + yield + finally: + os.chdir(curdir) + + +def create_exe(outpath, c_code=None): + """Creates an executable file in the given location.""" + assert not os.path.exists(outpath), outpath + if c_code: + if not which("gcc"): + raise ValueError("gcc is not installed") + if isinstance(c_code, bool): # c_code is True + c_code = textwrap.dedent( + """ + #include + int main() { + pause(); + return 1; + } + """) + assert isinstance(c_code, str), c_code + with tempfile.NamedTemporaryFile( + suffix='.c', delete=False, mode='wt') as f: + f.write(c_code) + try: + subprocess.check_call(["gcc", f.name, "-o", outpath]) + finally: + safe_rmpath(f.name) + else: + # copy python executable + shutil.copyfile(PYTHON_EXE, outpath) + if POSIX: + st = os.stat(outpath) + os.chmod(outpath, st.st_mode | stat.S_IEXEC) + + +def unique_filename(prefix=TESTFILE_PREFIX, suffix=""): + return tempfile.mktemp(prefix=prefix, suffix=suffix) + + +# =================================================================== +# --- testing +# =================================================================== + + +class TestCase(unittest.TestCase): + + # Print a full path representation of the single unit tests + # being run. + def __str__(self): + return "%s.%s.%s" % ( + self.__class__.__module__, self.__class__.__name__, + self._testMethodName) + + # assertRaisesRegexp renamed to assertRaisesRegex in 3.3; + # add support for the new name. + if not hasattr(unittest.TestCase, 'assertRaisesRegex'): + assertRaisesRegex = unittest.TestCase.assertRaisesRegexp + + +# override default unittest.TestCase +unittest.TestCase = TestCase + + +def _setup_tests(): + if 'PSUTIL_TESTING' not in os.environ: + # This won't work on Windows but set_testing() below will do it. + os.environ['PSUTIL_TESTING'] = '1' + psutil._psplatform.cext.set_testing() + + +def get_suite(): + testmods = [os.path.splitext(x)[0] for x in os.listdir(HERE) + if x.endswith('.py') and x.startswith('test_') and not + x.startswith('test_memory_leaks')] + if "WHEELHOUSE_UPLOADER_USERNAME" in os.environ: + testmods = [x for x in testmods if not x.endswith(( + "osx", "posix", "linux"))] + suite = unittest.TestSuite() + for tm in testmods: + # ...so that the full test paths are printed on screen + tm = "psutil.tests.%s" % tm + suite.addTest(unittest.defaultTestLoader.loadTestsFromName(tm)) + return suite + + +def run_suite(): + _setup_tests() + result = unittest.TextTestRunner(verbosity=VERBOSITY).run(get_suite()) + success = result.wasSuccessful() + sys.exit(0 if success else 1) + + +def run_test_module_by_name(name): + # testmodules = [os.path.splitext(x)[0] for x in os.listdir(HERE) + # if x.endswith('.py') and x.startswith('test_')] + _setup_tests() + name = os.path.splitext(os.path.basename(name))[0] + suite = unittest.TestSuite() + suite.addTest(unittest.defaultTestLoader.loadTestsFromName(name)) + result = unittest.TextTestRunner(verbosity=VERBOSITY).run(suite) + success = result.wasSuccessful() + sys.exit(0 if success else 1) + + +def retry_before_failing(retries=NO_RETRIES): + """Decorator which runs a test function and retries N times before + actually failing. + """ + return retry(exception=AssertionError, timeout=None, retries=retries) + + +def skip_on_access_denied(only_if=None): + """Decorator to Ignore AccessDenied exceptions.""" + def decorator(fun): + @functools.wraps(fun) + def wrapper(*args, **kwargs): + try: + return fun(*args, **kwargs) + except psutil.AccessDenied: + if only_if is not None: + if not only_if: + raise + raise unittest.SkipTest("raises AccessDenied") + return wrapper + return decorator + + +def skip_on_not_implemented(only_if=None): + """Decorator to Ignore NotImplementedError exceptions.""" + def decorator(fun): + @functools.wraps(fun) + def wrapper(*args, **kwargs): + try: + return fun(*args, **kwargs) + except NotImplementedError: + if only_if is not None: + if not only_if: + raise + msg = "%r was skipped because it raised NotImplementedError" \ + % fun.__name__ + raise unittest.SkipTest(msg) + return wrapper + return decorator + + +# =================================================================== +# --- network +# =================================================================== + + +def get_free_port(host='127.0.0.1'): + """Return an unused TCP port.""" + with contextlib.closing(socket.socket()) as sock: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind((host, 0)) + return sock.getsockname()[1] + + +@contextlib.contextmanager +def unix_socket_path(suffix=""): + """A context manager which returns a non-existent file name + and tries to delete it on exit. + """ + assert psutil.POSIX + path = unique_filename(suffix=suffix) + try: + yield path + finally: + try: + os.unlink(path) + except OSError: + pass + + +def bind_socket(family=AF_INET, type=SOCK_STREAM, addr=None): + """Binds a generic socket.""" + if addr is None and family in (AF_INET, AF_INET6): + addr = ("", 0) + sock = socket.socket(family, type) + try: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(addr) + if type == socket.SOCK_STREAM: + sock.listen(10) + return sock + except Exception: + sock.close() + raise + + +def bind_unix_socket(name, type=socket.SOCK_STREAM): + """Bind a UNIX socket.""" + assert psutil.POSIX + assert not os.path.exists(name), name + sock = socket.socket(socket.AF_UNIX, type) + try: + sock.bind(name) + if type == socket.SOCK_STREAM: + sock.listen(10) + except Exception: + sock.close() + raise + return sock + + +def tcp_socketpair(family, addr=("", 0)): + """Build a pair of TCP sockets connected to each other. + Return a (server, client) tuple. + """ + with contextlib.closing(socket.socket(family, SOCK_STREAM)) as ll: + ll.bind(addr) + ll.listen(10) + addr = ll.getsockname() + c = socket.socket(family, SOCK_STREAM) + try: + c.connect(addr) + caddr = c.getsockname() + while True: + a, addr = ll.accept() + # check that we've got the correct client + if addr == caddr: + return (a, c) + a.close() + except OSError: + c.close() + raise + + +def unix_socketpair(name): + """Build a pair of UNIX sockets connected to each other through + the same UNIX file name. + Return a (server, client) tuple. + """ + assert psutil.POSIX + server = client = None + try: + server = bind_unix_socket(name, type=socket.SOCK_STREAM) + server.setblocking(0) + client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + client.setblocking(0) + client.connect(name) + # new = server.accept() + except Exception: + if server is not None: + server.close() + if client is not None: + client.close() + raise + return (server, client) + + +@contextlib.contextmanager +def create_sockets(): + """Open as many socket families / types as possible.""" + socks = [] + fname1 = fname2 = None + try: + socks.append(bind_socket(socket.AF_INET, socket.SOCK_STREAM)) + socks.append(bind_socket(socket.AF_INET, socket.SOCK_DGRAM)) + if supports_ipv6(): + socks.append(bind_socket(socket.AF_INET6, socket.SOCK_STREAM)) + socks.append(bind_socket(socket.AF_INET6, socket.SOCK_DGRAM)) + if POSIX and HAS_CONNECTIONS_UNIX: + fname1 = unix_socket_path().__enter__() + fname2 = unix_socket_path().__enter__() + s1, s2 = unix_socketpair(fname1) + s3 = bind_unix_socket(fname2, type=socket.SOCK_DGRAM) + # self.addCleanup(safe_rmpath, fname1) + # self.addCleanup(safe_rmpath, fname2) + for s in (s1, s2, s3): + socks.append(s) + yield socks + finally: + for s in socks: + s.close() + if fname1 is not None: + safe_rmpath(fname1) + if fname2 is not None: + safe_rmpath(fname2) + + +def check_net_address(addr, family): + """Check a net address validity. Supported families are IPv4, + IPv6 and MAC addresses. + """ + import ipaddress # python >= 3.3 / requires "pip install ipaddress" + if enum and PY3: + assert isinstance(family, enum.IntEnum), family + if family == socket.AF_INET: + octs = [int(x) for x in addr.split('.')] + assert len(octs) == 4, addr + for num in octs: + assert 0 <= num <= 255, addr + if not PY3: + addr = unicode(addr) + ipaddress.IPv4Address(addr) + elif family == socket.AF_INET6: + assert isinstance(addr, str), addr + if not PY3: + addr = unicode(addr) + ipaddress.IPv6Address(addr) + elif family == psutil.AF_LINK: + assert re.match(r'([a-fA-F0-9]{2}[:|\-]?){6}', addr) is not None, addr + else: + raise ValueError("unknown family %r", family) + + +def check_connection_ntuple(conn): + """Check validity of a connection namedtuple.""" + # check ntuple + assert len(conn) in (6, 7), conn + has_pid = len(conn) == 7 + has_fd = getattr(conn, 'fd', -1) != -1 + assert conn[0] == conn.fd + assert conn[1] == conn.family + assert conn[2] == conn.type + assert conn[3] == conn.laddr + assert conn[4] == conn.raddr + assert conn[5] == conn.status + if has_pid: + assert conn[6] == conn.pid + + # check fd + if has_fd: + assert conn.fd >= 0, conn + if hasattr(socket, 'fromfd') and not WINDOWS: + try: + dupsock = socket.fromfd(conn.fd, conn.family, conn.type) + except (socket.error, OSError) as err: + if err.args[0] != errno.EBADF: + raise + else: + with contextlib.closing(dupsock): + assert dupsock.family == conn.family + assert dupsock.type == conn.type + + # check family + assert conn.family in (AF_INET, AF_INET6, AF_UNIX), repr(conn.family) + if conn.family in (AF_INET, AF_INET6): + # actually try to bind the local socket; ignore IPv6 + # sockets as their address might be represented as + # an IPv4-mapped-address (e.g. "::127.0.0.1") + # and that's rejected by bind() + if conn.family == AF_INET: + s = socket.socket(conn.family, conn.type) + with contextlib.closing(s): + try: + s.bind((conn.laddr[0], 0)) + except socket.error as err: + if err.errno != errno.EADDRNOTAVAIL: + raise + elif conn.family == AF_UNIX: + assert conn.status == psutil.CONN_NONE, conn.status + + # check type (SOCK_SEQPACKET may happen in case of AF_UNIX socks) + assert conn.type in (SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET), \ + repr(conn.type) + if conn.type == SOCK_DGRAM: + assert conn.status == psutil.CONN_NONE, conn.status + + # check laddr (IP address and port sanity) + for addr in (conn.laddr, conn.raddr): + if conn.family in (AF_INET, AF_INET6): + assert isinstance(addr, tuple), addr + if not addr: + continue + assert isinstance(addr.port, int), addr.port + assert 0 <= addr.port <= 65535, addr.port + check_net_address(addr.ip, conn.family) + elif conn.family == AF_UNIX: + assert isinstance(addr, str), addr + + # check status + assert isinstance(conn.status, str), conn + valids = [getattr(psutil, x) for x in dir(psutil) if x.startswith('CONN_')] + assert conn.status in valids, conn + + +# =================================================================== +# --- compatibility +# =================================================================== + + +def reload_module(module): + """Backport of importlib.reload of Python 3.3+.""" + try: + import importlib + if not hasattr(importlib, 'reload'): # python <=3.3 + raise ImportError + except ImportError: + import imp + return imp.reload(module) + else: + return importlib.reload(module) + + +def import_module_by_path(path): + name = os.path.splitext(os.path.basename(path))[0] + if sys.version_info[0] == 2: + import imp + return imp.load_source(name, path) + elif sys.version_info[:2] <= (3, 4): + from importlib.machinery import SourceFileLoader + return SourceFileLoader(name, path).load_module() + else: + import importlib.util + spec = importlib.util.spec_from_file_location(name, path) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + +# =================================================================== +# --- others +# =================================================================== + + +def warn(msg): + """Raise a warning msg.""" + warnings.warn(msg, UserWarning) + + +def is_namedtuple(x): + """Check if object is an instance of namedtuple.""" + t = type(x) + b = t.__bases__ + if len(b) != 1 or b[0] != tuple: + return False + f = getattr(t, '_fields', None) + if not isinstance(f, tuple): + return False + return all(type(n) == str for n in f) + + +if POSIX: + @contextlib.contextmanager + def copyload_shared_lib(dst_prefix=TESTFILE_PREFIX): + """Ctx manager which picks up a random shared CO lib used + by this process, copies it in another location and loads it + in memory via ctypes. Return the new absolutized path. + """ + ext = ".so" + dst = tempfile.mktemp(prefix=dst_prefix, suffix=ext) + libs = [x.path for x in psutil.Process().memory_maps() if + os.path.splitext(x.path)[1] == ext and + 'python' in x.path.lower()] + src = random.choice(libs) + shutil.copyfile(src, dst) + try: + ctypes.CDLL(dst) + yield dst + finally: + safe_rmpath(dst) +else: + @contextlib.contextmanager + def copyload_shared_lib(dst_prefix=TESTFILE_PREFIX): + """Ctx manager which picks up a random shared DLL lib used + by this process, copies it in another location and loads it + in memory via ctypes. + Return the new absolutized, normcased path. + """ + from ctypes import wintypes + from ctypes import WinError + ext = ".dll" + dst = tempfile.mktemp(prefix=dst_prefix, suffix=ext) + libs = [x.path for x in psutil.Process().memory_maps() if + os.path.splitext(x.path)[1].lower() == ext and + 'python' in os.path.basename(x.path).lower() and + 'wow64' not in x.path.lower()] + src = random.choice(libs) + shutil.copyfile(src, dst) + cfile = None + try: + cfile = ctypes.WinDLL(dst) + yield dst + finally: + # Work around OverflowError: + # - https://ci.appveyor.com/project/giampaolo/psutil/build/1207/ + # job/o53330pbnri9bcw7 + # - http://bugs.python.org/issue30286 + # - http://stackoverflow.com/questions/23522055 + if cfile is not None: + FreeLibrary = ctypes.windll.kernel32.FreeLibrary + FreeLibrary.argtypes = [wintypes.HMODULE] + ret = FreeLibrary(cfile._handle) + if ret == 0: + WinError() + safe_rmpath(dst) diff --git a/server/www/packages/packages-windows/x86/psutil/tests/__main__.py b/server/www/packages/packages-windows/x86/psutil/tests/__main__.py new file mode 100644 index 0000000..2cdf5c4 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/__main__.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +Run unit tests. This is invoked by: + +$ python -m psutil.tests +""" + +import contextlib +import optparse +import os +import ssl +import sys +import tempfile +try: + from urllib.request import urlopen # py3 +except ImportError: + from urllib2 import urlopen + +from psutil.tests import PYTHON_EXE +from psutil.tests import run_suite + + +HERE = os.path.abspath(os.path.dirname(__file__)) +GET_PIP_URL = "https://bootstrap.pypa.io/get-pip.py" +TEST_DEPS = [] +if sys.version_info[:2] == (2, 6): + TEST_DEPS.extend(["ipaddress", "unittest2", "argparse", "mock==1.0.1"]) +elif sys.version_info[:2] == (2, 7) or sys.version_info[:2] <= (3, 2): + TEST_DEPS.extend(["ipaddress", "mock"]) +elif sys.version_info[:2] == (3, 3): + TEST_DEPS.extend(["ipaddress"]) + + +def install_pip(): + try: + import pip # NOQA + except ImportError: + f = tempfile.NamedTemporaryFile(suffix='.py') + with contextlib.closing(f): + print("downloading %s to %s" % (GET_PIP_URL, f.name)) + if hasattr(ssl, '_create_unverified_context'): + ctx = ssl._create_unverified_context() + else: + ctx = None + kwargs = dict(context=ctx) if ctx else {} + req = urlopen(GET_PIP_URL, **kwargs) + data = req.read() + f.write(data) + f.flush() + + print("installing pip") + code = os.system('%s %s --user' % (PYTHON_EXE, f.name)) + return code + + +def install_test_deps(deps=None): + """Install test dependencies via pip.""" + if deps is None: + deps = TEST_DEPS + deps = set(deps) + if deps: + is_venv = hasattr(sys, 'real_prefix') + opts = "--user" if not is_venv else "" + install_pip() + code = os.system('%s -m pip install %s --upgrade %s' % ( + PYTHON_EXE, opts, " ".join(deps))) + return code + + +def main(): + usage = "%s -m psutil.tests [opts]" % PYTHON_EXE + parser = optparse.OptionParser(usage=usage, description="run unit tests") + parser.add_option("-i", "--install-deps", + action="store_true", default=False, + help="don't print status messages to stdout") + + opts, args = parser.parse_args() + if opts.install_deps: + install_pip() + install_test_deps() + else: + for dep in TEST_DEPS: + try: + __import__(dep.split("==")[0]) + except ImportError: + sys.exit("%r lib is not installed; run %s -m psutil.tests " + "--install-deps" % (dep, PYTHON_EXE)) + run_suite() + + +main() diff --git a/server/www/packages/packages-windows/x86/psutil/tests/test_aix.py b/server/www/packages/packages-windows/x86/psutil/tests/test_aix.py new file mode 100644 index 0000000..7a8a4c3 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/test_aix.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python + +# Copyright (c) 2009, Giampaolo Rodola' +# Copyright (c) 2017, Arnon Yaari +# All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""AIX specific tests.""" + +import re + +from psutil import AIX +from psutil.tests import run_test_module_by_name +from psutil.tests import sh +from psutil.tests import unittest +import psutil + + +@unittest.skipIf(not AIX, "AIX only") +class AIXSpecificTestCase(unittest.TestCase): + + def test_virtual_memory(self): + out = sh('/usr/bin/svmon -O unit=KB') + re_pattern = "memory\s*" + for field in ("size inuse free pin virtual available mmode").split(): + re_pattern += "(?P<%s>\S+)\s+" % (field,) + matchobj = re.search(re_pattern, out) + + self.assertIsNotNone( + matchobj, "svmon command returned unexpected output") + + KB = 1024 + total = int(matchobj.group("size")) * KB + available = int(matchobj.group("available")) * KB + used = int(matchobj.group("inuse")) * KB + free = int(matchobj.group("free")) * KB + + psutil_result = psutil.virtual_memory() + + # MEMORY_TOLERANCE from psutil.tests is not enough. For some reason + # we're seeing differences of ~1.2 MB. 2 MB is still a good tolerance + # when compared to GBs. + MEMORY_TOLERANCE = 2 * KB * KB # 2 MB + self.assertEqual(psutil_result.total, total) + self.assertAlmostEqual( + psutil_result.used, used, delta=MEMORY_TOLERANCE) + self.assertAlmostEqual( + psutil_result.available, available, delta=MEMORY_TOLERANCE) + self.assertAlmostEqual( + psutil_result.free, free, delta=MEMORY_TOLERANCE) + + def test_swap_memory(self): + out = sh('/usr/sbin/lsps -a') + # From the man page, "The size is given in megabytes" so we assume + # we'll always have 'MB' in the result + # TODO maybe try to use "swap -l" to check "used" too, but its units + # are not guaranteed to be "MB" so parsing may not be consistent + matchobj = re.search("(?P\S+)\s+" + "(?P\S+)\s+" + "(?P\S+)\s+" + "(?P\d+)MB", out) + + self.assertIsNotNone( + matchobj, "lsps command returned unexpected output") + + total_mb = int(matchobj.group("size")) + MB = 1024 ** 2 + psutil_result = psutil.swap_memory() + # we divide our result by MB instead of multiplying the lsps value by + # MB because lsps may round down, so we round down too + self.assertEqual(int(psutil_result.total / MB), total_mb) + + def test_cpu_stats(self): + out = sh('/usr/bin/mpstat -a') + + re_pattern = "ALL\s*" + for field in ("min maj mpcs mpcr dev soft dec ph cs ics bound rq " + "push S3pull S3grd S0rd S1rd S2rd S3rd S4rd S5rd " + "sysc").split(): + re_pattern += "(?P<%s>\S+)\s+" % (field,) + matchobj = re.search(re_pattern, out) + + self.assertIsNotNone( + matchobj, "mpstat command returned unexpected output") + + # numbers are usually in the millions so 1000 is ok for tolerance + CPU_STATS_TOLERANCE = 1000 + psutil_result = psutil.cpu_stats() + self.assertAlmostEqual( + psutil_result.ctx_switches, + int(matchobj.group("cs")), + delta=CPU_STATS_TOLERANCE) + self.assertAlmostEqual( + psutil_result.syscalls, + int(matchobj.group("sysc")), + delta=CPU_STATS_TOLERANCE) + self.assertAlmostEqual( + psutil_result.interrupts, + int(matchobj.group("dev")), + delta=CPU_STATS_TOLERANCE) + self.assertAlmostEqual( + psutil_result.soft_interrupts, + int(matchobj.group("soft")), + delta=CPU_STATS_TOLERANCE) + + def test_cpu_count_logical(self): + out = sh('/usr/bin/mpstat -a') + mpstat_lcpu = int(re.search("lcpu=(\d+)", out).group(1)) + psutil_lcpu = psutil.cpu_count(logical=True) + self.assertEqual(mpstat_lcpu, psutil_lcpu) + + def test_net_if_addrs_names(self): + out = sh('/etc/ifconfig -l') + ifconfig_names = set(out.split()) + psutil_names = set(psutil.net_if_addrs().keys()) + self.assertSetEqual(ifconfig_names, psutil_names) + + +if __name__ == '__main__': + run_test_module_by_name(__file__) diff --git a/server/www/packages/packages-windows/x86/psutil/tests/test_bsd.py b/server/www/packages/packages-windows/x86/psutil/tests/test_bsd.py new file mode 100644 index 0000000..d3868ad --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/test_bsd.py @@ -0,0 +1,489 @@ +#!/usr/bin/env python + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# TODO: (FreeBSD) add test for comparing connections with 'sockstat' cmd. + + +"""Tests specific to all BSD platforms.""" + + +import datetime +import os +import re +import time + +import psutil +from psutil import BSD +from psutil import FREEBSD +from psutil import NETBSD +from psutil import OPENBSD +from psutil.tests import get_test_subprocess +from psutil.tests import HAS_BATTERY +from psutil.tests import MEMORY_TOLERANCE +from psutil.tests import reap_children +from psutil.tests import retry_before_failing +from psutil.tests import run_test_module_by_name +from psutil.tests import sh +from psutil.tests import unittest +from psutil.tests import which + + +if BSD: + PAGESIZE = os.sysconf("SC_PAGE_SIZE") + if os.getuid() == 0: # muse requires root privileges + MUSE_AVAILABLE = which('muse') + else: + MUSE_AVAILABLE = False +else: + MUSE_AVAILABLE = False + + +def sysctl(cmdline): + """Expects a sysctl command with an argument and parse the result + returning only the value of interest. + """ + result = sh("sysctl " + cmdline) + if FREEBSD: + result = result[result.find(": ") + 2:] + elif OPENBSD or NETBSD: + result = result[result.find("=") + 1:] + try: + return int(result) + except ValueError: + return result + + +def muse(field): + """Thin wrapper around 'muse' cmdline utility.""" + out = sh('muse') + for line in out.split('\n'): + if line.startswith(field): + break + else: + raise ValueError("line not found") + return int(line.split()[1]) + + +# ===================================================================== +# --- All BSD* +# ===================================================================== + + +@unittest.skipIf(not BSD, "BSD only") +class BSDSpecificTestCase(unittest.TestCase): + """Generic tests common to all BSD variants.""" + + @classmethod + def setUpClass(cls): + cls.pid = get_test_subprocess().pid + + @classmethod + def tearDownClass(cls): + reap_children() + + @unittest.skipIf(NETBSD, "-o lstart doesn't work on NETBSD") + def test_process_create_time(self): + output = sh("ps -o lstart -p %s" % self.pid) + start_ps = output.replace('STARTED', '').strip() + start_psutil = psutil.Process(self.pid).create_time() + start_psutil = time.strftime("%a %b %e %H:%M:%S %Y", + time.localtime(start_psutil)) + self.assertEqual(start_ps, start_psutil) + + def test_disks(self): + # test psutil.disk_usage() and psutil.disk_partitions() + # against "df -a" + def df(path): + out = sh('df -k "%s"' % path).strip() + lines = out.split('\n') + lines.pop(0) + line = lines.pop(0) + dev, total, used, free = line.split()[:4] + if dev == 'none': + dev = '' + total = int(total) * 1024 + used = int(used) * 1024 + free = int(free) * 1024 + return dev, total, used, free + + for part in psutil.disk_partitions(all=False): + usage = psutil.disk_usage(part.mountpoint) + dev, total, used, free = df(part.mountpoint) + self.assertEqual(part.device, dev) + self.assertEqual(usage.total, total) + # 10 MB tollerance + if abs(usage.free - free) > 10 * 1024 * 1024: + self.fail("psutil=%s, df=%s" % (usage.free, free)) + if abs(usage.used - used) > 10 * 1024 * 1024: + self.fail("psutil=%s, df=%s" % (usage.used, used)) + + @unittest.skipIf(not which('sysctl'), "sysctl cmd not available") + def test_cpu_count_logical(self): + syst = sysctl("hw.ncpu") + self.assertEqual(psutil.cpu_count(logical=True), syst) + + @unittest.skipIf(not which('sysctl'), "sysctl cmd not available") + def test_virtual_memory_total(self): + num = sysctl('hw.physmem') + self.assertEqual(num, psutil.virtual_memory().total) + + def test_net_if_stats(self): + for name, stats in psutil.net_if_stats().items(): + try: + out = sh("ifconfig %s" % name) + except RuntimeError: + pass + else: + self.assertEqual(stats.isup, 'RUNNING' in out, msg=out) + if "mtu" in out: + self.assertEqual(stats.mtu, + int(re.findall(r'mtu (\d+)', out)[0])) + + +# ===================================================================== +# --- FreeBSD +# ===================================================================== + + +@unittest.skipIf(not FREEBSD, "FREEBSD only") +class FreeBSDSpecificTestCase(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.pid = get_test_subprocess().pid + + @classmethod + def tearDownClass(cls): + reap_children() + + @retry_before_failing() + def test_proc_memory_maps(self): + out = sh('procstat -v %s' % self.pid) + maps = psutil.Process(self.pid).memory_maps(grouped=False) + lines = out.split('\n')[1:] + while lines: + line = lines.pop() + fields = line.split() + _, start, stop, perms, res = fields[:5] + map = maps.pop() + self.assertEqual("%s-%s" % (start, stop), map.addr) + self.assertEqual(int(res), map.rss) + if not map.path.startswith('['): + self.assertEqual(fields[10], map.path) + + def test_proc_exe(self): + out = sh('procstat -b %s' % self.pid) + self.assertEqual(psutil.Process(self.pid).exe(), + out.split('\n')[1].split()[-1]) + + def test_proc_cmdline(self): + out = sh('procstat -c %s' % self.pid) + self.assertEqual(' '.join(psutil.Process(self.pid).cmdline()), + ' '.join(out.split('\n')[1].split()[2:])) + + def test_proc_uids_gids(self): + out = sh('procstat -s %s' % self.pid) + euid, ruid, suid, egid, rgid, sgid = out.split('\n')[1].split()[2:8] + p = psutil.Process(self.pid) + uids = p.uids() + gids = p.gids() + self.assertEqual(uids.real, int(ruid)) + self.assertEqual(uids.effective, int(euid)) + self.assertEqual(uids.saved, int(suid)) + self.assertEqual(gids.real, int(rgid)) + self.assertEqual(gids.effective, int(egid)) + self.assertEqual(gids.saved, int(sgid)) + + @retry_before_failing() + def test_proc_ctx_switches(self): + tested = [] + out = sh('procstat -r %s' % self.pid) + p = psutil.Process(self.pid) + for line in out.split('\n'): + line = line.lower().strip() + if ' voluntary context' in line: + pstat_value = int(line.split()[-1]) + psutil_value = p.num_ctx_switches().voluntary + self.assertEqual(pstat_value, psutil_value) + tested.append(None) + elif ' involuntary context' in line: + pstat_value = int(line.split()[-1]) + psutil_value = p.num_ctx_switches().involuntary + self.assertEqual(pstat_value, psutil_value) + tested.append(None) + if len(tested) != 2: + raise RuntimeError("couldn't find lines match in procstat out") + + @retry_before_failing() + def test_proc_cpu_times(self): + tested = [] + out = sh('procstat -r %s' % self.pid) + p = psutil.Process(self.pid) + for line in out.split('\n'): + line = line.lower().strip() + if 'user time' in line: + pstat_value = float('0.' + line.split()[-1].split('.')[-1]) + psutil_value = p.cpu_times().user + self.assertEqual(pstat_value, psutil_value) + tested.append(None) + elif 'system time' in line: + pstat_value = float('0.' + line.split()[-1].split('.')[-1]) + psutil_value = p.cpu_times().system + self.assertEqual(pstat_value, psutil_value) + tested.append(None) + if len(tested) != 2: + raise RuntimeError("couldn't find lines match in procstat out") + + # --- virtual_memory(); tests against sysctl + + @retry_before_failing() + def test_vmem_active(self): + syst = sysctl("vm.stats.vm.v_active_count") * PAGESIZE + self.assertAlmostEqual(psutil.virtual_memory().active, syst, + delta=MEMORY_TOLERANCE) + + @retry_before_failing() + def test_vmem_inactive(self): + syst = sysctl("vm.stats.vm.v_inactive_count") * PAGESIZE + self.assertAlmostEqual(psutil.virtual_memory().inactive, syst, + delta=MEMORY_TOLERANCE) + + @retry_before_failing() + def test_vmem_wired(self): + syst = sysctl("vm.stats.vm.v_wire_count") * PAGESIZE + self.assertAlmostEqual(psutil.virtual_memory().wired, syst, + delta=MEMORY_TOLERANCE) + + @retry_before_failing() + def test_vmem_cached(self): + syst = sysctl("vm.stats.vm.v_cache_count") * PAGESIZE + self.assertAlmostEqual(psutil.virtual_memory().cached, syst, + delta=MEMORY_TOLERANCE) + + @retry_before_failing() + def test_vmem_free(self): + syst = sysctl("vm.stats.vm.v_free_count") * PAGESIZE + self.assertAlmostEqual(psutil.virtual_memory().free, syst, + delta=MEMORY_TOLERANCE) + + @retry_before_failing() + def test_vmem_buffers(self): + syst = sysctl("vfs.bufspace") + self.assertAlmostEqual(psutil.virtual_memory().buffers, syst, + delta=MEMORY_TOLERANCE) + + # --- virtual_memory(); tests against muse + + @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed") + def test_muse_vmem_total(self): + num = muse('Total') + self.assertEqual(psutil.virtual_memory().total, num) + + @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed") + @retry_before_failing() + def test_muse_vmem_active(self): + num = muse('Active') + self.assertAlmostEqual(psutil.virtual_memory().active, num, + delta=MEMORY_TOLERANCE) + + @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed") + @retry_before_failing() + def test_muse_vmem_inactive(self): + num = muse('Inactive') + self.assertAlmostEqual(psutil.virtual_memory().inactive, num, + delta=MEMORY_TOLERANCE) + + @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed") + @retry_before_failing() + def test_muse_vmem_wired(self): + num = muse('Wired') + self.assertAlmostEqual(psutil.virtual_memory().wired, num, + delta=MEMORY_TOLERANCE) + + @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed") + @retry_before_failing() + def test_muse_vmem_cached(self): + num = muse('Cache') + self.assertAlmostEqual(psutil.virtual_memory().cached, num, + delta=MEMORY_TOLERANCE) + + @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed") + @retry_before_failing() + def test_muse_vmem_free(self): + num = muse('Free') + self.assertAlmostEqual(psutil.virtual_memory().free, num, + delta=MEMORY_TOLERANCE) + + @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed") + @retry_before_failing() + def test_muse_vmem_buffers(self): + num = muse('Buffer') + self.assertAlmostEqual(psutil.virtual_memory().buffers, num, + delta=MEMORY_TOLERANCE) + + def test_cpu_stats_ctx_switches(self): + self.assertAlmostEqual(psutil.cpu_stats().ctx_switches, + sysctl('vm.stats.sys.v_swtch'), delta=1000) + + def test_cpu_stats_interrupts(self): + self.assertAlmostEqual(psutil.cpu_stats().interrupts, + sysctl('vm.stats.sys.v_intr'), delta=1000) + + def test_cpu_stats_soft_interrupts(self): + self.assertAlmostEqual(psutil.cpu_stats().soft_interrupts, + sysctl('vm.stats.sys.v_soft'), delta=1000) + + def test_cpu_stats_syscalls(self): + self.assertAlmostEqual(psutil.cpu_stats().syscalls, + sysctl('vm.stats.sys.v_syscall'), delta=1000) + + # def test_cpu_stats_traps(self): + # self.assertAlmostEqual(psutil.cpu_stats().traps, + # sysctl('vm.stats.sys.v_trap'), delta=1000) + + # --- others + + def test_boot_time(self): + s = sysctl('sysctl kern.boottime') + s = s[s.find(" sec = ") + 7:] + s = s[:s.find(',')] + btime = int(s) + self.assertEqual(btime, psutil.boot_time()) + + # --- sensors_battery + + @unittest.skipIf(not HAS_BATTERY, "no battery") + def test_sensors_battery(self): + def secs2hours(secs): + m, s = divmod(secs, 60) + h, m = divmod(m, 60) + return "%d:%02d" % (h, m) + + out = sh("acpiconf -i 0") + fields = dict([(x.split('\t')[0], x.split('\t')[-1]) + for x in out.split("\n")]) + metrics = psutil.sensors_battery() + percent = int(fields['Remaining capacity:'].replace('%', '')) + remaining_time = fields['Remaining time:'] + self.assertEqual(metrics.percent, percent) + if remaining_time == 'unknown': + self.assertEqual(metrics.secsleft, psutil.POWER_TIME_UNLIMITED) + else: + self.assertEqual(secs2hours(metrics.secsleft), remaining_time) + + @unittest.skipIf(not HAS_BATTERY, "no battery") + def test_sensors_battery_against_sysctl(self): + self.assertEqual(psutil.sensors_battery().percent, + sysctl("hw.acpi.battery.life")) + self.assertEqual(psutil.sensors_battery().power_plugged, + sysctl("hw.acpi.acline") == 1) + secsleft = psutil.sensors_battery().secsleft + if secsleft < 0: + self.assertEqual(sysctl("hw.acpi.battery.time"), -1) + else: + self.assertEqual(secsleft, sysctl("hw.acpi.battery.time") * 60) + + @unittest.skipIf(HAS_BATTERY, "has battery") + def test_sensors_battery_no_battery(self): + # If no battery is present one of these calls is supposed + # to fail, see: + # https://github.com/giampaolo/psutil/issues/1074 + with self.assertRaises(RuntimeError): + sysctl("hw.acpi.battery.life") + sysctl("hw.acpi.battery.time") + sysctl("hw.acpi.acline") + self.assertIsNone(psutil.sensors_battery()) + + +# ===================================================================== +# --- OpenBSD +# ===================================================================== + + +@unittest.skipIf(not OPENBSD, "OPENBSD only") +class OpenBSDSpecificTestCase(unittest.TestCase): + + def test_boot_time(self): + s = sysctl('kern.boottime') + sys_bt = datetime.datetime.strptime(s, "%a %b %d %H:%M:%S %Y") + psutil_bt = datetime.datetime.fromtimestamp(psutil.boot_time()) + self.assertEqual(sys_bt, psutil_bt) + + +# ===================================================================== +# --- NetBSD +# ===================================================================== + + +@unittest.skipIf(not NETBSD, "NETBSD only") +class NetBSDSpecificTestCase(unittest.TestCase): + + @staticmethod + def parse_meminfo(look_for): + with open('/proc/meminfo', 'rb') as f: + for line in f: + if line.startswith(look_for): + return int(line.split()[1]) * 1024 + raise ValueError("can't find %s" % look_for) + + def test_vmem_total(self): + self.assertEqual( + psutil.virtual_memory().total, self.parse_meminfo("MemTotal:")) + + def test_vmem_free(self): + self.assertAlmostEqual( + psutil.virtual_memory().free, self.parse_meminfo("MemFree:"), + delta=MEMORY_TOLERANCE) + + def test_vmem_buffers(self): + self.assertAlmostEqual( + psutil.virtual_memory().buffers, self.parse_meminfo("Buffers:"), + delta=MEMORY_TOLERANCE) + + def test_vmem_shared(self): + self.assertAlmostEqual( + psutil.virtual_memory().shared, self.parse_meminfo("MemShared:"), + delta=MEMORY_TOLERANCE) + + def test_swapmem_total(self): + self.assertAlmostEqual( + psutil.swap_memory().total, self.parse_meminfo("SwapTotal:"), + delta=MEMORY_TOLERANCE) + + def test_swapmem_free(self): + self.assertAlmostEqual( + psutil.swap_memory().free, self.parse_meminfo("SwapFree:"), + delta=MEMORY_TOLERANCE) + + def test_swapmem_used(self): + smem = psutil.swap_memory() + self.assertEqual(smem.used, smem.total - smem.free) + + def test_cpu_stats_interrupts(self): + with open('/proc/stat', 'rb') as f: + for line in f: + if line.startswith(b'intr'): + interrupts = int(line.split()[1]) + break + else: + raise ValueError("couldn't find line") + self.assertAlmostEqual( + psutil.cpu_stats().interrupts, interrupts, delta=1000) + + def test_cpu_stats_ctx_switches(self): + with open('/proc/stat', 'rb') as f: + for line in f: + if line.startswith(b'ctxt'): + ctx_switches = int(line.split()[1]) + break + else: + raise ValueError("couldn't find line") + self.assertAlmostEqual( + psutil.cpu_stats().ctx_switches, ctx_switches, delta=1000) + + +if __name__ == '__main__': + run_test_module_by_name(__file__) diff --git a/server/www/packages/packages-windows/x86/psutil/tests/test_connections.py b/server/www/packages/packages-windows/x86/psutil/tests/test_connections.py new file mode 100644 index 0000000..176e266 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/test_connections.py @@ -0,0 +1,525 @@ +#!/usr/bin/env python + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Tests for net_connections() and Process.connections() APIs.""" + +import os +import socket +import textwrap +from contextlib import closing +from socket import AF_INET +from socket import AF_INET6 +from socket import SOCK_DGRAM +from socket import SOCK_STREAM + +import psutil +from psutil import FREEBSD +from psutil import LINUX +from psutil import NETBSD +from psutil import OPENBSD +from psutil import OSX +from psutil import POSIX +from psutil import SUNOS +from psutil import WINDOWS +from psutil._common import supports_ipv6 +from psutil._compat import PY3 +from psutil.tests import AF_UNIX +from psutil.tests import bind_socket +from psutil.tests import bind_unix_socket +from psutil.tests import check_connection_ntuple +from psutil.tests import create_sockets +from psutil.tests import get_free_port +from psutil.tests import HAS_CONNECTIONS_UNIX +from psutil.tests import pyrun +from psutil.tests import reap_children +from psutil.tests import run_test_module_by_name +from psutil.tests import safe_rmpath +from psutil.tests import skip_on_access_denied +from psutil.tests import tcp_socketpair +from psutil.tests import TESTFN +from psutil.tests import TRAVIS +from psutil.tests import unittest +from psutil.tests import unix_socket_path +from psutil.tests import unix_socketpair +from psutil.tests import wait_for_file + + +thisproc = psutil.Process() + + +class Base(object): + + def setUp(self): + if not NETBSD: + # NetBSD opens a UNIX socket to /var/log/run. + cons = thisproc.connections(kind='all') + assert not cons, cons + + def tearDown(self): + safe_rmpath(TESTFN) + reap_children() + if not NETBSD: + # Make sure we closed all resources. + # NetBSD opens a UNIX socket to /var/log/run. + cons = thisproc.connections(kind='all') + assert not cons, cons + + def get_conn_from_sock(self, sock): + cons = thisproc.connections(kind='all') + smap = dict([(c.fd, c) for c in cons]) + if NETBSD: + # NetBSD opens a UNIX socket to /var/log/run + # so there may be more connections. + return smap[sock.fileno()] + else: + self.assertEqual(len(cons), 1) + if cons[0].fd != -1: + self.assertEqual(smap[sock.fileno()].fd, sock.fileno()) + return cons[0] + + def check_socket(self, sock, conn=None): + """Given a socket, makes sure it matches the one obtained + via psutil. It assumes this process created one connection + only (the one supposed to be checked). + """ + if conn is None: + conn = self.get_conn_from_sock(sock) + check_connection_ntuple(conn) + + # fd, family, type + if conn.fd != -1: + self.assertEqual(conn.fd, sock.fileno()) + self.assertEqual(conn.family, sock.family) + # see: http://bugs.python.org/issue30204 + self.assertEqual( + conn.type, sock.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)) + + # local address + laddr = sock.getsockname() + if not laddr and PY3 and isinstance(laddr, bytes): + # See: http://bugs.python.org/issue30205 + laddr = laddr.decode() + if sock.family == AF_INET6: + laddr = laddr[:2] + if sock.family == AF_UNIX and OPENBSD: + # No addresses are set for UNIX sockets on OpenBSD. + pass + else: + self.assertEqual(conn.laddr, laddr) + + # XXX Solaris can't retrieve system-wide UNIX sockets + if sock.family == AF_UNIX and HAS_CONNECTIONS_UNIX: + cons = thisproc.connections(kind='all') + self.compare_procsys_connections(os.getpid(), cons) + return conn + + def compare_procsys_connections(self, pid, proc_cons, kind='all'): + """Given a process PID and its list of connections compare + those against system-wide connections retrieved via + psutil.net_connections. + """ + try: + sys_cons = psutil.net_connections(kind=kind) + except psutil.AccessDenied: + # On OSX, system-wide connections are retrieved by iterating + # over all processes + if OSX: + return + else: + raise + # Filter for this proc PID and exlucde PIDs from the tuple. + sys_cons = [c[:-1] for c in sys_cons if c.pid == pid] + sys_cons.sort() + proc_cons.sort() + self.assertEqual(proc_cons, sys_cons) + + +# ===================================================================== +# --- Test unconnected sockets +# ===================================================================== + + +class TestUnconnectedSockets(Base, unittest.TestCase): + """Tests sockets which are open but not connected to anything.""" + + def test_tcp_v4(self): + addr = ("127.0.0.1", get_free_port()) + with closing(bind_socket(AF_INET, SOCK_STREAM, addr=addr)) as sock: + conn = self.check_socket(sock) + assert not conn.raddr + self.assertEqual(conn.status, psutil.CONN_LISTEN) + + @unittest.skipIf(not supports_ipv6(), "IPv6 not supported") + def test_tcp_v6(self): + addr = ("::1", get_free_port()) + with closing(bind_socket(AF_INET6, SOCK_STREAM, addr=addr)) as sock: + conn = self.check_socket(sock) + assert not conn.raddr + self.assertEqual(conn.status, psutil.CONN_LISTEN) + + def test_udp_v4(self): + addr = ("127.0.0.1", get_free_port()) + with closing(bind_socket(AF_INET, SOCK_DGRAM, addr=addr)) as sock: + conn = self.check_socket(sock) + assert not conn.raddr + self.assertEqual(conn.status, psutil.CONN_NONE) + + @unittest.skipIf(not supports_ipv6(), "IPv6 not supported") + def test_udp_v6(self): + addr = ("::1", get_free_port()) + with closing(bind_socket(AF_INET6, SOCK_DGRAM, addr=addr)) as sock: + conn = self.check_socket(sock) + assert not conn.raddr + self.assertEqual(conn.status, psutil.CONN_NONE) + + @unittest.skipIf(not POSIX, 'POSIX only') + def test_unix_tcp(self): + with unix_socket_path() as name: + with closing(bind_unix_socket(name, type=SOCK_STREAM)) as sock: + conn = self.check_socket(sock) + assert not conn.raddr + self.assertEqual(conn.status, psutil.CONN_NONE) + + @unittest.skipIf(not POSIX, 'POSIX only') + def test_unix_udp(self): + with unix_socket_path() as name: + with closing(bind_unix_socket(name, type=SOCK_STREAM)) as sock: + conn = self.check_socket(sock) + assert not conn.raddr + self.assertEqual(conn.status, psutil.CONN_NONE) + + +# ===================================================================== +# --- Test connected sockets +# ===================================================================== + + +class TestConnectedSocketPairs(Base, unittest.TestCase): + """Test socket pairs which are are actually connected to + each other. + """ + + # On SunOS, even after we close() it, the server socket stays around + # in TIME_WAIT state. + @unittest.skipIf(SUNOS, "unreliable on SUONS") + def test_tcp(self): + addr = ("127.0.0.1", get_free_port()) + assert not thisproc.connections(kind='tcp4') + server, client = tcp_socketpair(AF_INET, addr=addr) + try: + cons = thisproc.connections(kind='tcp4') + self.assertEqual(len(cons), 2) + self.assertEqual(cons[0].status, psutil.CONN_ESTABLISHED) + self.assertEqual(cons[1].status, psutil.CONN_ESTABLISHED) + # May not be fast enough to change state so it stays + # commenteed. + # client.close() + # cons = thisproc.connections(kind='all') + # self.assertEqual(len(cons), 1) + # self.assertEqual(cons[0].status, psutil.CONN_CLOSE_WAIT) + finally: + server.close() + client.close() + + @unittest.skipIf(not POSIX, 'POSIX only') + def test_unix(self): + with unix_socket_path() as name: + server, client = unix_socketpair(name) + try: + cons = thisproc.connections(kind='unix') + assert not (cons[0].laddr and cons[0].raddr) + assert not (cons[1].laddr and cons[1].raddr) + if NETBSD: + # On NetBSD creating a UNIX socket will cause + # a UNIX connection to /var/run/log. + cons = [c for c in cons if c.raddr != '/var/run/log'] + self.assertEqual(len(cons), 2) + if LINUX or FREEBSD or SUNOS: + # remote path is never set + self.assertEqual(cons[0].raddr, "") + self.assertEqual(cons[1].raddr, "") + # one local address should though + self.assertEqual(name, cons[0].laddr or cons[1].laddr) + elif OPENBSD: + # No addresses whatsoever here. + for addr in (cons[0].laddr, cons[0].raddr, + cons[1].laddr, cons[1].raddr): + self.assertEqual(addr, "") + else: + # On other systems either the laddr or raddr + # of both peers are set. + self.assertEqual(cons[0].laddr or cons[1].laddr, name) + self.assertEqual(cons[0].raddr or cons[1].raddr, name) + finally: + server.close() + client.close() + + @skip_on_access_denied(only_if=OSX) + def test_combos(self): + def check_conn(proc, conn, family, type, laddr, raddr, status, kinds): + all_kinds = ("all", "inet", "inet4", "inet6", "tcp", "tcp4", + "tcp6", "udp", "udp4", "udp6") + check_connection_ntuple(conn) + self.assertEqual(conn.family, family) + self.assertEqual(conn.type, type) + self.assertEqual(conn.laddr, laddr) + self.assertEqual(conn.raddr, raddr) + self.assertEqual(conn.status, status) + for kind in all_kinds: + cons = proc.connections(kind=kind) + if kind in kinds: + assert cons + else: + assert not cons, cons + # compare against system-wide connections + # XXX Solaris can't retrieve system-wide UNIX + # sockets. + if HAS_CONNECTIONS_UNIX: + self.compare_procsys_connections(proc.pid, [conn]) + + tcp_template = textwrap.dedent(""" + import socket, time + s = socket.socket($family, socket.SOCK_STREAM) + s.bind(('$addr', 0)) + s.listen(1) + with open('$testfn', 'w') as f: + f.write(str(s.getsockname()[:2])) + time.sleep(60) + """) + + udp_template = textwrap.dedent(""" + import socket, time + s = socket.socket($family, socket.SOCK_DGRAM) + s.bind(('$addr', 0)) + with open('$testfn', 'w') as f: + f.write(str(s.getsockname()[:2])) + time.sleep(60) + """) + + from string import Template + testfile = os.path.basename(TESTFN) + tcp4_template = Template(tcp_template).substitute( + family=int(AF_INET), addr="127.0.0.1", testfn=testfile) + udp4_template = Template(udp_template).substitute( + family=int(AF_INET), addr="127.0.0.1", testfn=testfile) + tcp6_template = Template(tcp_template).substitute( + family=int(AF_INET6), addr="::1", testfn=testfile) + udp6_template = Template(udp_template).substitute( + family=int(AF_INET6), addr="::1", testfn=testfile) + + # launch various subprocess instantiating a socket of various + # families and types to enrich psutil results + tcp4_proc = pyrun(tcp4_template) + tcp4_addr = eval(wait_for_file(testfile)) + udp4_proc = pyrun(udp4_template) + udp4_addr = eval(wait_for_file(testfile)) + if supports_ipv6(): + tcp6_proc = pyrun(tcp6_template) + tcp6_addr = eval(wait_for_file(testfile)) + udp6_proc = pyrun(udp6_template) + udp6_addr = eval(wait_for_file(testfile)) + else: + tcp6_proc = None + udp6_proc = None + tcp6_addr = None + udp6_addr = None + + for p in thisproc.children(): + cons = p.connections() + self.assertEqual(len(cons), 1) + for conn in cons: + # TCP v4 + if p.pid == tcp4_proc.pid: + check_conn(p, conn, AF_INET, SOCK_STREAM, tcp4_addr, (), + psutil.CONN_LISTEN, + ("all", "inet", "inet4", "tcp", "tcp4")) + # UDP v4 + elif p.pid == udp4_proc.pid: + check_conn(p, conn, AF_INET, SOCK_DGRAM, udp4_addr, (), + psutil.CONN_NONE, + ("all", "inet", "inet4", "udp", "udp4")) + # TCP v6 + elif p.pid == getattr(tcp6_proc, "pid", None): + check_conn(p, conn, AF_INET6, SOCK_STREAM, tcp6_addr, (), + psutil.CONN_LISTEN, + ("all", "inet", "inet6", "tcp", "tcp6")) + # UDP v6 + elif p.pid == getattr(udp6_proc, "pid", None): + check_conn(p, conn, AF_INET6, SOCK_DGRAM, udp6_addr, (), + psutil.CONN_NONE, + ("all", "inet", "inet6", "udp", "udp6")) + + # err + self.assertRaises(ValueError, p.connections, kind='???') + + def test_multi_sockets_filtering(self): + with create_sockets() as socks: + cons = thisproc.connections(kind='all') + self.assertEqual(len(cons), len(socks)) + # tcp + cons = thisproc.connections(kind='tcp') + self.assertEqual(len(cons), 2 if supports_ipv6() else 1) + for conn in cons: + self.assertIn(conn.family, (AF_INET, AF_INET6)) + self.assertEqual(conn.type, SOCK_STREAM) + # tcp4 + cons = thisproc.connections(kind='tcp4') + self.assertEqual(len(cons), 1) + self.assertEqual(cons[0].family, AF_INET) + self.assertEqual(cons[0].type, SOCK_STREAM) + # tcp6 + if supports_ipv6(): + cons = thisproc.connections(kind='tcp6') + self.assertEqual(len(cons), 1) + self.assertEqual(cons[0].family, AF_INET6) + self.assertEqual(cons[0].type, SOCK_STREAM) + # udp + cons = thisproc.connections(kind='udp') + self.assertEqual(len(cons), 2 if supports_ipv6() else 1) + for conn in cons: + self.assertIn(conn.family, (AF_INET, AF_INET6)) + self.assertEqual(conn.type, SOCK_DGRAM) + # udp4 + cons = thisproc.connections(kind='udp4') + self.assertEqual(len(cons), 1) + self.assertEqual(cons[0].family, AF_INET) + self.assertEqual(cons[0].type, SOCK_DGRAM) + # udp6 + if supports_ipv6(): + cons = thisproc.connections(kind='udp6') + self.assertEqual(len(cons), 1) + self.assertEqual(cons[0].family, AF_INET6) + self.assertEqual(cons[0].type, SOCK_DGRAM) + # inet + cons = thisproc.connections(kind='inet') + self.assertEqual(len(cons), 4 if supports_ipv6() else 2) + for conn in cons: + self.assertIn(conn.family, (AF_INET, AF_INET6)) + self.assertIn(conn.type, (SOCK_STREAM, SOCK_DGRAM)) + # inet6 + if supports_ipv6(): + cons = thisproc.connections(kind='inet6') + self.assertEqual(len(cons), 2) + for conn in cons: + self.assertEqual(conn.family, AF_INET6) + self.assertIn(conn.type, (SOCK_STREAM, SOCK_DGRAM)) + # unix + if HAS_CONNECTIONS_UNIX: + cons = thisproc.connections(kind='unix') + self.assertEqual(len(cons), 3) + for conn in cons: + self.assertEqual(conn.family, AF_UNIX) + self.assertIn(conn.type, (SOCK_STREAM, SOCK_DGRAM)) + + +# ===================================================================== +# --- Miscellaneous tests +# ===================================================================== + + +class TestSystemWideConnections(Base, unittest.TestCase): + """Tests for net_connections().""" + + @skip_on_access_denied() + def test_it(self): + def check(cons, families, types_): + AF_UNIX = getattr(socket, 'AF_UNIX', object()) + for conn in cons: + self.assertIn(conn.family, families, msg=conn) + if conn.family != AF_UNIX: + self.assertIn(conn.type, types_, msg=conn) + check_connection_ntuple(conn) + + with create_sockets(): + from psutil._common import conn_tmap + for kind, groups in conn_tmap.items(): + # XXX: SunOS does not retrieve UNIX sockets. + if kind == 'unix' and not HAS_CONNECTIONS_UNIX: + continue + families, types_ = groups + cons = psutil.net_connections(kind) + self.assertEqual(len(cons), len(set(cons))) + check(cons, families, types_) + + self.assertRaises(ValueError, psutil.net_connections, kind='???') + + @skip_on_access_denied() + def test_multi_socks(self): + with create_sockets() as socks: + cons = [x for x in psutil.net_connections(kind='all') + if x.pid == os.getpid()] + self.assertEqual(len(cons), len(socks)) + + @skip_on_access_denied() + # See: https://travis-ci.org/giampaolo/psutil/jobs/237566297 + @unittest.skipIf(OSX and TRAVIS, "unreliable on OSX + TRAVIS") + def test_multi_sockets_procs(self): + # Creates multiple sub processes, each creating different + # sockets. For each process check that proc.connections() + # and net_connections() return the same results. + # This is done mainly to check whether net_connections()'s + # pid is properly set, see: + # https://github.com/giampaolo/psutil/issues/1013 + with create_sockets() as socks: + expected = len(socks) + pids = [] + times = 10 + for i in range(times): + fname = os.path.realpath(TESTFN) + str(i) + src = textwrap.dedent("""\ + import time, os + from psutil.tests import create_sockets + with create_sockets(): + with open('%s', 'w') as f: + f.write(str(os.getpid())) + time.sleep(60) + """ % fname) + sproc = pyrun(src) + pids.append(sproc.pid) + self.addCleanup(safe_rmpath, fname) + + # sync + for i in range(times): + fname = TESTFN + str(i) + wait_for_file(fname) + + syscons = [x for x in psutil.net_connections(kind='all') if x.pid + in pids] + for pid in pids: + self.assertEqual(len([x for x in syscons if x.pid == pid]), + expected) + p = psutil.Process(pid) + self.assertEqual(len(p.connections('all')), expected) + + +# ===================================================================== +# --- Miscellaneous tests +# ===================================================================== + + +class TestMisc(unittest.TestCase): + + def test_connection_constants(self): + ints = [] + strs = [] + for name in dir(psutil): + if name.startswith('CONN_'): + num = getattr(psutil, name) + str_ = str(num) + assert str_.isupper(), str_ + self.assertNotIn(str, strs) + self.assertNotIn(num, ints) + ints.append(num) + strs.append(str_) + if SUNOS: + psutil.CONN_IDLE + psutil.CONN_BOUND + if WINDOWS: + psutil.CONN_DELETE_TCB + + +if __name__ == '__main__': + run_test_module_by_name(__file__) diff --git a/server/www/packages/packages-windows/x86/psutil/tests/test_contracts.py b/server/www/packages/packages-windows/x86/psutil/tests/test_contracts.py new file mode 100644 index 0000000..855b53b --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/test_contracts.py @@ -0,0 +1,651 @@ +#!/usr/bin/env python + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Contracts tests. These tests mainly check API sanity in terms of +returned types and APIs availability. +Some of these are duplicates of tests test_system.py and test_process.py +""" + +import errno +import os +import stat +import time +import traceback +import warnings +from contextlib import closing + +from psutil import AIX +from psutil import BSD +from psutil import FREEBSD +from psutil import LINUX +from psutil import NETBSD +from psutil import OPENBSD +from psutil import OSX +from psutil import POSIX +from psutil import SUNOS +from psutil import WINDOWS +from psutil._compat import callable +from psutil._compat import long +from psutil.tests import bind_unix_socket +from psutil.tests import check_connection_ntuple +from psutil.tests import get_kernel_version +from psutil.tests import HAS_CONNECTIONS_UNIX +from psutil.tests import HAS_RLIMIT +from psutil.tests import HAS_SENSORS_FANS +from psutil.tests import HAS_SENSORS_TEMPERATURES +from psutil.tests import is_namedtuple +from psutil.tests import run_test_module_by_name +from psutil.tests import safe_rmpath +from psutil.tests import skip_on_access_denied +from psutil.tests import TESTFN +from psutil.tests import unittest +from psutil.tests import unix_socket_path +from psutil.tests import VALID_PROC_STATUSES +from psutil.tests import warn +import psutil + + +# =================================================================== +# --- APIs availability +# =================================================================== + + +class TestAvailability(unittest.TestCase): + """Make sure code reflects what doc promises in terms of APIs + availability. + """ + + def test_cpu_affinity(self): + hasit = LINUX or WINDOWS or FREEBSD + self.assertEqual(hasattr(psutil.Process, "cpu_affinity"), hasit) + + def test_win_service(self): + self.assertEqual(hasattr(psutil, "win_service_iter"), WINDOWS) + self.assertEqual(hasattr(psutil, "win_service_get"), WINDOWS) + + def test_PROCFS_PATH(self): + self.assertEqual(hasattr(psutil, "PROCFS_PATH"), + LINUX or SUNOS or AIX) + + def test_win_priority(self): + ae = self.assertEqual + ae(hasattr(psutil, "ABOVE_NORMAL_PRIORITY_CLASS"), WINDOWS) + ae(hasattr(psutil, "BELOW_NORMAL_PRIORITY_CLASS"), WINDOWS) + ae(hasattr(psutil, "HIGH_PRIORITY_CLASS"), WINDOWS) + ae(hasattr(psutil, "IDLE_PRIORITY_CLASS"), WINDOWS) + ae(hasattr(psutil, "NORMAL_PRIORITY_CLASS"), WINDOWS) + ae(hasattr(psutil, "REALTIME_PRIORITY_CLASS"), WINDOWS) + + def test_linux_ioprio(self): + ae = self.assertEqual + ae(hasattr(psutil, "IOPRIO_CLASS_NONE"), LINUX) + ae(hasattr(psutil, "IOPRIO_CLASS_RT"), LINUX) + ae(hasattr(psutil, "IOPRIO_CLASS_BE"), LINUX) + ae(hasattr(psutil, "IOPRIO_CLASS_IDLE"), LINUX) + + def test_linux_rlimit(self): + ae = self.assertEqual + hasit = LINUX and get_kernel_version() >= (2, 6, 36) + ae(hasattr(psutil.Process, "rlimit"), hasit) + ae(hasattr(psutil, "RLIM_INFINITY"), hasit) + ae(hasattr(psutil, "RLIMIT_AS"), hasit) + ae(hasattr(psutil, "RLIMIT_CORE"), hasit) + ae(hasattr(psutil, "RLIMIT_CPU"), hasit) + ae(hasattr(psutil, "RLIMIT_DATA"), hasit) + ae(hasattr(psutil, "RLIMIT_FSIZE"), hasit) + ae(hasattr(psutil, "RLIMIT_LOCKS"), hasit) + ae(hasattr(psutil, "RLIMIT_MEMLOCK"), hasit) + ae(hasattr(psutil, "RLIMIT_NOFILE"), hasit) + ae(hasattr(psutil, "RLIMIT_NPROC"), hasit) + ae(hasattr(psutil, "RLIMIT_RSS"), hasit) + ae(hasattr(psutil, "RLIMIT_STACK"), hasit) + + hasit = LINUX and get_kernel_version() >= (3, 0) + ae(hasattr(psutil, "RLIMIT_MSGQUEUE"), hasit) + ae(hasattr(psutil, "RLIMIT_NICE"), hasit) + ae(hasattr(psutil, "RLIMIT_RTPRIO"), hasit) + ae(hasattr(psutil, "RLIMIT_RTTIME"), hasit) + ae(hasattr(psutil, "RLIMIT_SIGPENDING"), hasit) + + def test_cpu_freq(self): + linux = (LINUX and + (os.path.exists("/sys/devices/system/cpu/cpufreq") or + os.path.exists("/sys/devices/system/cpu/cpu0/cpufreq"))) + self.assertEqual(hasattr(psutil, "cpu_freq"), linux or OSX or WINDOWS) + + def test_sensors_temperatures(self): + self.assertEqual(hasattr(psutil, "sensors_temperatures"), LINUX) + + def test_sensors_fans(self): + self.assertEqual(hasattr(psutil, "sensors_fans"), LINUX) + + def test_battery(self): + self.assertEqual(hasattr(psutil, "sensors_battery"), + LINUX or WINDOWS or FREEBSD or OSX) + + def test_proc_environ(self): + self.assertEqual(hasattr(psutil.Process, "environ"), + LINUX or OSX or WINDOWS) + + def test_proc_uids(self): + self.assertEqual(hasattr(psutil.Process, "uids"), POSIX) + + def test_proc_gids(self): + self.assertEqual(hasattr(psutil.Process, "uids"), POSIX) + + def test_proc_terminal(self): + self.assertEqual(hasattr(psutil.Process, "terminal"), POSIX) + + def test_proc_ionice(self): + self.assertEqual(hasattr(psutil.Process, "ionice"), LINUX or WINDOWS) + + def test_proc_rlimit(self): + self.assertEqual(hasattr(psutil.Process, "rlimit"), LINUX) + + def test_proc_io_counters(self): + hasit = hasattr(psutil.Process, "io_counters") + self.assertEqual(hasit, False if OSX or SUNOS else True) + + def test_proc_num_fds(self): + self.assertEqual(hasattr(psutil.Process, "num_fds"), POSIX) + + def test_proc_num_handles(self): + self.assertEqual(hasattr(psutil.Process, "num_handles"), WINDOWS) + + def test_proc_cpu_affinity(self): + self.assertEqual(hasattr(psutil.Process, "cpu_affinity"), + LINUX or WINDOWS or FREEBSD) + + def test_proc_cpu_num(self): + self.assertEqual(hasattr(psutil.Process, "cpu_num"), + LINUX or FREEBSD or SUNOS) + + def test_proc_memory_maps(self): + hasit = hasattr(psutil.Process, "memory_maps") + self.assertEqual(hasit, False if OPENBSD or NETBSD or AIX else True) + + +# =================================================================== +# --- Test deprecations +# =================================================================== + + +class TestDeprecations(unittest.TestCase): + + def test_memory_info_ex(self): + with warnings.catch_warnings(record=True) as ws: + psutil.Process().memory_info_ex() + w = ws[0] + self.assertIsInstance(w.category(), FutureWarning) + self.assertIn("memory_info_ex() is deprecated", str(w.message)) + self.assertIn("use memory_info() instead", str(w.message)) + + +# =================================================================== +# --- System API types +# =================================================================== + + +class TestSystem(unittest.TestCase): + """Check the return types of system related APIs. + Mainly we want to test we never return unicode on Python 2, see: + https://github.com/giampaolo/psutil/issues/1039 + """ + + @classmethod + def setUpClass(cls): + cls.proc = psutil.Process() + + def tearDown(self): + safe_rmpath(TESTFN) + + def test_cpu_times(self): + # Duplicate of test_system.py. Keep it anyway. + ret = psutil.cpu_times() + assert is_namedtuple(ret) + for n in ret: + self.assertIsInstance(n, float) + self.assertGreaterEqual(n, 0) + + def test_io_counters(self): + # Duplicate of test_system.py. Keep it anyway. + for k in psutil.disk_io_counters(perdisk=True): + self.assertIsInstance(k, str) + + def test_disk_partitions(self): + # Duplicate of test_system.py. Keep it anyway. + for disk in psutil.disk_partitions(): + self.assertIsInstance(disk.device, str) + self.assertIsInstance(disk.mountpoint, str) + self.assertIsInstance(disk.fstype, str) + self.assertIsInstance(disk.opts, str) + + @unittest.skipIf(not POSIX, 'POSIX only') + @unittest.skipIf(not HAS_CONNECTIONS_UNIX, "can't list UNIX sockets") + @skip_on_access_denied(only_if=OSX) + def test_net_connections(self): + with unix_socket_path() as name: + with closing(bind_unix_socket(name)): + cons = psutil.net_connections(kind='unix') + assert cons + for conn in cons: + self.assertIsInstance(conn.laddr, str) + + def test_net_if_addrs(self): + # Duplicate of test_system.py. Keep it anyway. + for ifname, addrs in psutil.net_if_addrs().items(): + self.assertIsInstance(ifname, str) + for addr in addrs: + self.assertIsInstance(addr.address, str) + self.assertIsInstance(addr.netmask, (str, type(None))) + self.assertIsInstance(addr.broadcast, (str, type(None))) + + def test_net_if_stats(self): + # Duplicate of test_system.py. Keep it anyway. + for ifname, _ in psutil.net_if_stats().items(): + self.assertIsInstance(ifname, str) + + def test_net_io_counters(self): + # Duplicate of test_system.py. Keep it anyway. + for ifname, _ in psutil.net_io_counters(pernic=True).items(): + self.assertIsInstance(ifname, str) + + @unittest.skipIf(not HAS_SENSORS_FANS, "not supported") + def test_sensors_fans(self): + # Duplicate of test_system.py. Keep it anyway. + for name, units in psutil.sensors_fans().items(): + self.assertIsInstance(name, str) + for unit in units: + self.assertIsInstance(unit.label, str) + + @unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported") + def test_sensors_temperatures(self): + # Duplicate of test_system.py. Keep it anyway. + for name, units in psutil.sensors_temperatures().items(): + self.assertIsInstance(name, str) + for unit in units: + self.assertIsInstance(unit.label, str) + + def test_users(self): + # Duplicate of test_system.py. Keep it anyway. + for user in psutil.users(): + self.assertIsInstance(user.name, str) + self.assertIsInstance(user.terminal, (str, type(None))) + self.assertIsInstance(user.host, (str, type(None))) + self.assertIsInstance(user.pid, (int, type(None))) + + +# =================================================================== +# --- Featch all processes test +# =================================================================== + + +class TestFetchAllProcesses(unittest.TestCase): + """Test which iterates over all running processes and performs + some sanity checks against Process API's returned values. + """ + + def setUp(self): + if POSIX: + import pwd + import grp + users = pwd.getpwall() + groups = grp.getgrall() + self.all_uids = set([x.pw_uid for x in users]) + self.all_usernames = set([x.pw_name for x in users]) + self.all_gids = set([x.gr_gid for x in groups]) + + def test_fetch_all(self): + valid_procs = 0 + excluded_names = set([ + 'send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait', + 'as_dict', 'parent', 'children', 'memory_info_ex', 'oneshot', + ]) + if LINUX and not HAS_RLIMIT: + excluded_names.add('rlimit') + attrs = [] + for name in dir(psutil.Process): + if name.startswith("_"): + continue + if name in excluded_names: + continue + attrs.append(name) + + default = object() + failures = [] + for p in psutil.process_iter(): + with p.oneshot(): + for name in attrs: + ret = default + try: + args = () + kwargs = {} + attr = getattr(p, name, None) + if attr is not None and callable(attr): + if name == 'rlimit': + args = (psutil.RLIMIT_NOFILE,) + elif name == 'memory_maps': + kwargs = {'grouped': False} + ret = attr(*args, **kwargs) + else: + ret = attr + valid_procs += 1 + except NotImplementedError: + msg = "%r was skipped because not implemented" % ( + self.__class__.__name__ + '.test_' + name) + warn(msg) + except (psutil.NoSuchProcess, psutil.AccessDenied) as err: + self.assertEqual(err.pid, p.pid) + if err.name: + # make sure exception's name attr is set + # with the actual process name + self.assertEqual(err.name, p.name()) + assert str(err) + assert err.msg + except Exception as err: + s = '\n' + '=' * 70 + '\n' + s += "FAIL: test_%s (proc=%s" % (name, p) + if ret != default: + s += ", ret=%s)" % repr(ret) + s += ')\n' + s += '-' * 70 + s += "\n%s" % traceback.format_exc() + s = "\n".join((" " * 4) + i for i in s.splitlines()) + s += '\n' + failures.append(s) + break + else: + if ret not in (0, 0.0, [], None, '', {}): + assert ret, ret + meth = getattr(self, name) + meth(ret, p) + + if failures: + self.fail(''.join(failures)) + + # we should always have a non-empty list, not including PID 0 etc. + # special cases. + assert valid_procs + + def cmdline(self, ret, proc): + self.assertIsInstance(ret, list) + for part in ret: + self.assertIsInstance(part, str) + + def exe(self, ret, proc): + self.assertIsInstance(ret, (str, type(None))) + if not ret: + self.assertEqual(ret, '') + else: + assert os.path.isabs(ret), ret + # Note: os.stat() may return False even if the file is there + # hence we skip the test, see: + # http://stackoverflow.com/questions/3112546/os-path-exists-lies + if POSIX and os.path.isfile(ret): + if hasattr(os, 'access') and hasattr(os, "X_OK"): + # XXX may fail on OSX + assert os.access(ret, os.X_OK) + + def pid(self, ret, proc): + self.assertIsInstance(ret, int) + self.assertGreaterEqual(ret, 0) + + def ppid(self, ret, proc): + self.assertIsInstance(ret, (int, long)) + self.assertGreaterEqual(ret, 0) + + def name(self, ret, proc): + self.assertIsInstance(ret, str) + # on AIX, "" processes don't have names + if not AIX: + assert ret + + def create_time(self, ret, proc): + self.assertIsInstance(ret, float) + try: + self.assertGreaterEqual(ret, 0) + except AssertionError: + # XXX + if OPENBSD and proc.status() == psutil.STATUS_ZOMBIE: + pass + else: + raise + # this can't be taken for granted on all platforms + # self.assertGreaterEqual(ret, psutil.boot_time()) + # make sure returned value can be pretty printed + # with strftime + time.strftime("%Y %m %d %H:%M:%S", time.localtime(ret)) + + def uids(self, ret, proc): + assert is_namedtuple(ret) + for uid in ret: + self.assertIsInstance(uid, int) + self.assertGreaterEqual(uid, 0) + self.assertIn(uid, self.all_uids) + + def gids(self, ret, proc): + assert is_namedtuple(ret) + # note: testing all gids as above seems not to be reliable for + # gid == 30 (nodoby); not sure why. + for gid in ret: + self.assertIsInstance(gid, int) + if not OSX and not NETBSD: + self.assertGreaterEqual(gid, 0) + self.assertIn(gid, self.all_gids) + + def username(self, ret, proc): + self.assertIsInstance(ret, str) + assert ret + if POSIX: + self.assertIn(ret, self.all_usernames) + + def status(self, ret, proc): + self.assertIsInstance(ret, str) + assert ret + self.assertNotEqual(ret, '?') # XXX + self.assertIn(ret, VALID_PROC_STATUSES) + + def io_counters(self, ret, proc): + assert is_namedtuple(ret) + for field in ret: + self.assertIsInstance(field, (int, long)) + if field != -1: + self.assertGreaterEqual(field, 0) + + def ionice(self, ret, proc): + if POSIX: + assert is_namedtuple(ret) + for field in ret: + self.assertIsInstance(field, int) + if LINUX: + self.assertGreaterEqual(ret.ioclass, 0) + self.assertGreaterEqual(ret.value, 0) + else: + self.assertGreaterEqual(ret, 0) + self.assertIn(ret, (0, 1, 2)) + + def num_threads(self, ret, proc): + self.assertIsInstance(ret, int) + self.assertGreaterEqual(ret, 1) + + def threads(self, ret, proc): + self.assertIsInstance(ret, list) + for t in ret: + assert is_namedtuple(t) + self.assertGreaterEqual(t.id, 0) + self.assertGreaterEqual(t.user_time, 0) + self.assertGreaterEqual(t.system_time, 0) + for field in t: + self.assertIsInstance(field, (int, float)) + + def cpu_times(self, ret, proc): + assert is_namedtuple(ret) + for n in ret: + self.assertIsInstance(n, float) + self.assertGreaterEqual(n, 0) + # TODO: check ntuple fields + + def cpu_percent(self, ret, proc): + self.assertIsInstance(ret, float) + assert 0.0 <= ret <= 100.0, ret + + def cpu_num(self, ret, proc): + self.assertIsInstance(ret, int) + if FREEBSD and ret == -1: + return + self.assertGreaterEqual(ret, 0) + if psutil.cpu_count() == 1: + self.assertEqual(ret, 0) + self.assertIn(ret, list(range(psutil.cpu_count()))) + + def memory_info(self, ret, proc): + assert is_namedtuple(ret) + for value in ret: + self.assertIsInstance(value, (int, long)) + self.assertGreaterEqual(value, 0) + if POSIX and not AIX and ret.vms != 0: + # VMS is always supposed to be the highest + for name in ret._fields: + if name != 'vms': + value = getattr(ret, name) + self.assertGreater(ret.vms, value, msg=ret) + elif WINDOWS: + self.assertGreaterEqual(ret.peak_wset, ret.wset) + self.assertGreaterEqual(ret.peak_paged_pool, ret.paged_pool) + self.assertGreaterEqual(ret.peak_nonpaged_pool, ret.nonpaged_pool) + self.assertGreaterEqual(ret.peak_pagefile, ret.pagefile) + + def memory_full_info(self, ret, proc): + assert is_namedtuple(ret) + total = psutil.virtual_memory().total + for name in ret._fields: + value = getattr(ret, name) + self.assertIsInstance(value, (int, long)) + self.assertGreaterEqual(value, 0, msg=(name, value)) + self.assertLessEqual(value, total, msg=(name, value, total)) + + if LINUX: + self.assertGreaterEqual(ret.pss, ret.uss) + + def open_files(self, ret, proc): + self.assertIsInstance(ret, list) + for f in ret: + self.assertIsInstance(f.fd, int) + self.assertIsInstance(f.path, str) + if WINDOWS: + self.assertEqual(f.fd, -1) + elif LINUX: + self.assertIsInstance(f.position, int) + self.assertIsInstance(f.mode, str) + self.assertIsInstance(f.flags, int) + self.assertGreaterEqual(f.position, 0) + self.assertIn(f.mode, ('r', 'w', 'a', 'r+', 'a+')) + self.assertGreater(f.flags, 0) + elif BSD and not f.path: + # XXX see: https://github.com/giampaolo/psutil/issues/595 + continue + assert os.path.isabs(f.path), f + assert os.path.isfile(f.path), f + + def num_fds(self, ret, proc): + self.assertIsInstance(ret, int) + self.assertGreaterEqual(ret, 0) + + def connections(self, ret, proc): + self.assertEqual(len(ret), len(set(ret))) + for conn in ret: + check_connection_ntuple(conn) + + def cwd(self, ret, proc): + if ret: # 'ret' can be None or empty + self.assertIsInstance(ret, str) + assert os.path.isabs(ret), ret + try: + st = os.stat(ret) + except OSError as err: + if WINDOWS and err.errno in \ + psutil._psplatform.ACCESS_DENIED_SET: + pass + # directory has been removed in mean time + elif err.errno != errno.ENOENT: + raise + else: + assert stat.S_ISDIR(st.st_mode) + + def memory_percent(self, ret, proc): + self.assertIsInstance(ret, float) + assert 0 <= ret <= 100, ret + + def is_running(self, ret, proc): + self.assertIsInstance(ret, bool) + + def cpu_affinity(self, ret, proc): + self.assertIsInstance(ret, list) + assert ret != [], ret + cpus = range(psutil.cpu_count()) + for n in ret: + self.assertIsInstance(n, int) + self.assertIn(n, cpus) + + def terminal(self, ret, proc): + self.assertIsInstance(ret, (str, type(None))) + if ret is not None: + assert os.path.isabs(ret), ret + assert os.path.exists(ret), ret + + def memory_maps(self, ret, proc): + for nt in ret: + self.assertIsInstance(nt.addr, str) + self.assertIsInstance(nt.perms, str) + self.assertIsInstance(nt.path, str) + for fname in nt._fields: + value = getattr(nt, fname) + if fname == 'path': + if not value.startswith('['): + assert os.path.isabs(nt.path), nt.path + # commented as on Linux we might get + # '/foo/bar (deleted)' + # assert os.path.exists(nt.path), nt.path + elif fname in ('addr', 'perms'): + assert value + else: + self.assertIsInstance(value, (int, long)) + self.assertGreaterEqual(value, 0) + + def num_handles(self, ret, proc): + self.assertIsInstance(ret, int) + self.assertGreaterEqual(ret, 0) + + def nice(self, ret, proc): + self.assertIsInstance(ret, int) + if POSIX: + assert -20 <= ret <= 20, ret + else: + priorities = [getattr(psutil, x) for x in dir(psutil) + if x.endswith('_PRIORITY_CLASS')] + self.assertIn(ret, priorities) + + def num_ctx_switches(self, ret, proc): + assert is_namedtuple(ret) + for value in ret: + self.assertIsInstance(value, (int, long)) + self.assertGreaterEqual(value, 0) + + def rlimit(self, ret, proc): + self.assertIsInstance(ret, tuple) + self.assertEqual(len(ret), 2) + self.assertGreaterEqual(ret[0], -1) + self.assertGreaterEqual(ret[1], -1) + + def environ(self, ret, proc): + self.assertIsInstance(ret, dict) + for k, v in ret.items(): + self.assertIsInstance(k, str) + self.assertIsInstance(v, str) + + +if __name__ == '__main__': + run_test_module_by_name(__file__) diff --git a/server/www/packages/packages-windows/x86/psutil/tests/test_linux.py b/server/www/packages/packages-windows/x86/psutil/tests/test_linux.py new file mode 100644 index 0000000..5d345ae --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/test_linux.py @@ -0,0 +1,1911 @@ +#!/usr/bin/env python + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Linux specific tests.""" + +from __future__ import division +import collections +import contextlib +import errno +import glob +import io +import os +import pprint +import re +import shutil +import socket +import struct +import tempfile +import textwrap +import time +import warnings + +import psutil +from psutil import LINUX +from psutil._compat import PY3 +from psutil._compat import u +from psutil.tests import call_until +from psutil.tests import HAS_BATTERY +from psutil.tests import HAS_CPU_FREQ +from psutil.tests import HAS_RLIMIT +from psutil.tests import MEMORY_TOLERANCE +from psutil.tests import mock +from psutil.tests import PYPY +from psutil.tests import pyrun +from psutil.tests import reap_children +from psutil.tests import reload_module +from psutil.tests import retry_before_failing +from psutil.tests import run_test_module_by_name +from psutil.tests import safe_rmpath +from psutil.tests import sh +from psutil.tests import skip_on_not_implemented +from psutil.tests import TESTFN +from psutil.tests import ThreadTask +from psutil.tests import TRAVIS +from psutil.tests import unittest +from psutil.tests import which + + +HERE = os.path.abspath(os.path.dirname(__file__)) +SIOCGIFADDR = 0x8915 +SIOCGIFCONF = 0x8912 +SIOCGIFHWADDR = 0x8927 +if LINUX: + SECTOR_SIZE = 512 + + +# ===================================================================== +# --- utils +# ===================================================================== + + +def get_ipv4_address(ifname): + import fcntl + ifname = ifname[:15] + if PY3: + ifname = bytes(ifname, 'ascii') + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + with contextlib.closing(s): + return socket.inet_ntoa( + fcntl.ioctl(s.fileno(), + SIOCGIFADDR, + struct.pack('256s', ifname))[20:24]) + + +def get_mac_address(ifname): + import fcntl + ifname = ifname[:15] + if PY3: + ifname = bytes(ifname, 'ascii') + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + with contextlib.closing(s): + info = fcntl.ioctl( + s.fileno(), SIOCGIFHWADDR, struct.pack('256s', ifname)) + if PY3: + def ord(x): + return x + else: + import __builtin__ + ord = __builtin__.ord + return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1] + + +def free_swap(): + """Parse 'free' cmd and return swap memory's s total, used and free + values. + """ + out = sh('free -b') + lines = out.split('\n') + for line in lines: + if line.startswith('Swap'): + _, total, used, free = line.split() + nt = collections.namedtuple('free', 'total used free') + return nt(int(total), int(used), int(free)) + raise ValueError( + "can't find 'Swap' in 'free' output:\n%s" % '\n'.join(lines)) + + +def free_physmem(): + """Parse 'free' cmd and return physical memory's total, used + and free values. + """ + # Note: free can have 2 different formats, invalidating 'shared' + # and 'cached' memory which may have different positions so we + # do not return them. + # https://github.com/giampaolo/psutil/issues/538#issuecomment-57059946 + out = sh('free -b') + lines = out.split('\n') + for line in lines: + if line.startswith('Mem'): + total, used, free, shared = \ + [int(x) for x in line.split()[1:5]] + nt = collections.namedtuple( + 'free', 'total used free shared output') + return nt(total, used, free, shared, out) + raise ValueError( + "can't find 'Mem' in 'free' output:\n%s" % '\n'.join(lines)) + + +def vmstat(stat): + out = sh("vmstat -s") + for line in out.split("\n"): + line = line.strip() + if stat in line: + return int(line.split(' ')[0]) + raise ValueError("can't find %r in 'vmstat' output" % stat) + + +def get_free_version_info(): + out = sh("free -V").strip() + return tuple(map(int, out.split()[-1].split('.'))) + + +# ===================================================================== +# --- system virtual memory +# ===================================================================== + + +@unittest.skipIf(not LINUX, "LINUX only") +class TestSystemVirtualMemory(unittest.TestCase): + + def test_total(self): + # free_value = free_physmem().total + # psutil_value = psutil.virtual_memory().total + # self.assertEqual(free_value, psutil_value) + vmstat_value = vmstat('total memory') * 1024 + psutil_value = psutil.virtual_memory().total + self.assertAlmostEqual(vmstat_value, psutil_value) + + # Older versions of procps used slab memory to calculate used memory. + # This got changed in: + # https://gitlab.com/procps-ng/procps/commit/ + # 05d751c4f076a2f0118b914c5e51cfbb4762ad8e + @unittest.skipIf(LINUX and get_free_version_info() < (3, 3, 12), + "old free version") + @retry_before_failing() + def test_used(self): + free = free_physmem() + free_value = free.used + psutil_value = psutil.virtual_memory().used + self.assertAlmostEqual( + free_value, psutil_value, delta=MEMORY_TOLERANCE, + msg='%s %s \n%s' % (free_value, psutil_value, free.output)) + + @unittest.skipIf(TRAVIS, "unreliable on TRAVIS") + @retry_before_failing() + def test_free(self): + # _, _, free_value, _ = free_physmem() + # psutil_value = psutil.virtual_memory().free + # self.assertAlmostEqual( + # free_value, psutil_value, delta=MEMORY_TOLERANCE) + vmstat_value = vmstat('free memory') * 1024 + psutil_value = psutil.virtual_memory().free + self.assertAlmostEqual( + vmstat_value, psutil_value, delta=MEMORY_TOLERANCE) + + @retry_before_failing() + def test_buffers(self): + vmstat_value = vmstat('buffer memory') * 1024 + psutil_value = psutil.virtual_memory().buffers + self.assertAlmostEqual( + vmstat_value, psutil_value, delta=MEMORY_TOLERANCE) + + # https://travis-ci.org/giampaolo/psutil/jobs/226719664 + @unittest.skipIf(TRAVIS, "unreliable on TRAVIS") + @retry_before_failing() + def test_active(self): + vmstat_value = vmstat('active memory') * 1024 + psutil_value = psutil.virtual_memory().active + self.assertAlmostEqual( + vmstat_value, psutil_value, delta=MEMORY_TOLERANCE) + + # https://travis-ci.org/giampaolo/psutil/jobs/227242952 + @unittest.skipIf(TRAVIS, "unreliable on TRAVIS") + @retry_before_failing() + def test_inactive(self): + vmstat_value = vmstat('inactive memory') * 1024 + psutil_value = psutil.virtual_memory().inactive + self.assertAlmostEqual( + vmstat_value, psutil_value, delta=MEMORY_TOLERANCE) + + @retry_before_failing() + def test_shared(self): + free = free_physmem() + free_value = free.shared + if free_value == 0: + raise unittest.SkipTest("free does not support 'shared' column") + psutil_value = psutil.virtual_memory().shared + self.assertAlmostEqual( + free_value, psutil_value, delta=MEMORY_TOLERANCE, + msg='%s %s \n%s' % (free_value, psutil_value, free.output)) + + @retry_before_failing() + def test_available(self): + # "free" output format has changed at some point: + # https://github.com/giampaolo/psutil/issues/538#issuecomment-147192098 + out = sh("free -b") + lines = out.split('\n') + if 'available' not in lines[0]: + raise unittest.SkipTest("free does not support 'available' column") + else: + free_value = int(lines[1].split()[-1]) + psutil_value = psutil.virtual_memory().available + self.assertAlmostEqual( + free_value, psutil_value, delta=MEMORY_TOLERANCE, + msg='%s %s \n%s' % (free_value, psutil_value, out)) + + def test_warnings_on_misses(self): + # Emulate a case where /proc/meminfo provides few info. + # psutil is supposed to set the missing fields to 0 and + # raise a warning. + def open_mock(name, *args, **kwargs): + if name == '/proc/meminfo': + return io.BytesIO(textwrap.dedent("""\ + Active(anon): 6145416 kB + Active(file): 2950064 kB + Inactive(anon): 574764 kB + Inactive(file): 1567648 kB + MemAvailable: -1 kB + MemFree: 2057400 kB + MemTotal: 16325648 kB + SReclaimable: 346648 kB + """).encode()) + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, create=True, side_effect=open_mock) as m: + with warnings.catch_warnings(record=True) as ws: + warnings.simplefilter("always") + ret = psutil.virtual_memory() + assert m.called + self.assertEqual(len(ws), 1) + w = ws[0] + assert w.filename.endswith('psutil/_pslinux.py') + self.assertIn( + "memory stats couldn't be determined", str(w.message)) + self.assertIn("cached", str(w.message)) + self.assertIn("shared", str(w.message)) + self.assertIn("active", str(w.message)) + self.assertIn("inactive", str(w.message)) + self.assertIn("buffers", str(w.message)) + self.assertIn("available", str(w.message)) + self.assertEqual(ret.cached, 0) + self.assertEqual(ret.active, 0) + self.assertEqual(ret.inactive, 0) + self.assertEqual(ret.shared, 0) + self.assertEqual(ret.buffers, 0) + self.assertEqual(ret.available, 0) + + def test_avail_old_percent(self): + # Make sure that our calculation of avail mem for old kernels + # is off by max 10%. + from psutil._pslinux import calculate_avail_vmem + from psutil._pslinux import open_binary + + mems = {} + with open_binary('/proc/meminfo') as f: + for line in f: + fields = line.split() + mems[fields[0]] = int(fields[1]) * 1024 + + a = calculate_avail_vmem(mems) + if b'MemAvailable:' in mems: + b = mems[b'MemAvailable:'] + diff_percent = abs(a - b) / a * 100 + self.assertLess(diff_percent, 10) + + def test_avail_old_comes_from_kernel(self): + # Make sure "MemAvailable:" coluimn is used instead of relying + # on our internal algorithm to calculate avail mem. + def open_mock(name, *args, **kwargs): + if name == "/proc/meminfo": + return io.BytesIO(textwrap.dedent("""\ + Active: 9444728 kB + Active(anon): 6145416 kB + Active(file): 2950064 kB + Buffers: 287952 kB + Cached: 4818144 kB + Inactive(file): 1578132 kB + Inactive(anon): 574764 kB + Inactive(file): 1567648 kB + MemAvailable: 6574984 kB + MemFree: 2057400 kB + MemTotal: 16325648 kB + Shmem: 577588 kB + SReclaimable: 346648 kB + """).encode()) + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, create=True, side_effect=open_mock) as m: + with warnings.catch_warnings(record=True) as ws: + ret = psutil.virtual_memory() + assert m.called + self.assertEqual(ret.available, 6574984 * 1024) + w = ws[0] + self.assertIn( + "inactive memory stats couldn't be determined", str(w.message)) + + def test_avail_old_missing_fields(self): + # Remove Active(file), Inactive(file) and SReclaimable + # from /proc/meminfo and make sure the fallback is used + # (free + cached), + def open_mock(name, *args, **kwargs): + if name == "/proc/meminfo": + return io.BytesIO(textwrap.dedent("""\ + Active: 9444728 kB + Active(anon): 6145416 kB + Buffers: 287952 kB + Cached: 4818144 kB + Inactive(file): 1578132 kB + Inactive(anon): 574764 kB + MemFree: 2057400 kB + MemTotal: 16325648 kB + Shmem: 577588 kB + """).encode()) + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, create=True, side_effect=open_mock) as m: + with warnings.catch_warnings(record=True) as ws: + ret = psutil.virtual_memory() + assert m.called + self.assertEqual(ret.available, 2057400 * 1024 + 4818144 * 1024) + w = ws[0] + self.assertIn( + "inactive memory stats couldn't be determined", str(w.message)) + + def test_avail_old_missing_zoneinfo(self): + # Remove /proc/zoneinfo file. Make sure fallback is used + # (free + cached). + def open_mock(name, *args, **kwargs): + if name == "/proc/meminfo": + return io.BytesIO(textwrap.dedent("""\ + Active: 9444728 kB + Active(anon): 6145416 kB + Active(file): 2950064 kB + Buffers: 287952 kB + Cached: 4818144 kB + Inactive(file): 1578132 kB + Inactive(anon): 574764 kB + Inactive(file): 1567648 kB + MemFree: 2057400 kB + MemTotal: 16325648 kB + Shmem: 577588 kB + SReclaimable: 346648 kB + """).encode()) + elif name == "/proc/zoneinfo": + raise IOError(errno.ENOENT, 'no such file or directory') + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, create=True, side_effect=open_mock) as m: + with warnings.catch_warnings(record=True) as ws: + ret = psutil.virtual_memory() + assert m.called + self.assertEqual(ret.available, 2057400 * 1024 + 4818144 * 1024) + w = ws[0] + self.assertIn( + "inactive memory stats couldn't be determined", str(w.message)) + + +# ===================================================================== +# --- system swap memory +# ===================================================================== + + +@unittest.skipIf(not LINUX, "LINUX only") +class TestSystemSwapMemory(unittest.TestCase): + + @staticmethod + def meminfo_has_swap_info(): + """Return True if /proc/meminfo provides swap metrics.""" + with open("/proc/meminfo") as f: + data = f.read() + return 'SwapTotal:' in data and 'SwapFree:' in data + + def test_total(self): + free_value = free_swap().total + psutil_value = psutil.swap_memory().total + return self.assertAlmostEqual( + free_value, psutil_value, delta=MEMORY_TOLERANCE) + + @retry_before_failing() + def test_used(self): + free_value = free_swap().used + psutil_value = psutil.swap_memory().used + return self.assertAlmostEqual( + free_value, psutil_value, delta=MEMORY_TOLERANCE) + + @retry_before_failing() + def test_free(self): + free_value = free_swap().free + psutil_value = psutil.swap_memory().free + return self.assertAlmostEqual( + free_value, psutil_value, delta=MEMORY_TOLERANCE) + + def test_missing_sin_sout(self): + with mock.patch('psutil._pslinux.open', create=True) as m: + with warnings.catch_warnings(record=True) as ws: + warnings.simplefilter("always") + ret = psutil.swap_memory() + assert m.called + self.assertEqual(len(ws), 1) + w = ws[0] + assert w.filename.endswith('psutil/_pslinux.py') + self.assertIn( + "'sin' and 'sout' swap memory stats couldn't " + "be determined", str(w.message)) + self.assertEqual(ret.sin, 0) + self.assertEqual(ret.sout, 0) + + def test_no_vmstat_mocked(self): + # see https://github.com/giampaolo/psutil/issues/722 + def open_mock(name, *args, **kwargs): + if name == "/proc/vmstat": + raise IOError(errno.ENOENT, 'no such file or directory') + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, create=True, side_effect=open_mock) as m: + with warnings.catch_warnings(record=True) as ws: + warnings.simplefilter("always") + ret = psutil.swap_memory() + assert m.called + self.assertEqual(len(ws), 1) + w = ws[0] + assert w.filename.endswith('psutil/_pslinux.py') + self.assertIn( + "'sin' and 'sout' swap memory stats couldn't " + "be determined and were set to 0", + str(w.message)) + self.assertEqual(ret.sin, 0) + self.assertEqual(ret.sout, 0) + + def test_meminfo_against_sysinfo(self): + # Make sure the content of /proc/meminfo about swap memory + # matches sysinfo() syscall, see: + # https://github.com/giampaolo/psutil/issues/1015 + if not self.meminfo_has_swap_info(): + return unittest.skip("/proc/meminfo has no swap metrics") + with mock.patch('psutil._pslinux.cext.linux_sysinfo') as m: + swap = psutil.swap_memory() + assert not m.called + import psutil._psutil_linux as cext + _, _, _, _, total, free, unit_multiplier = cext.linux_sysinfo() + total *= unit_multiplier + free *= unit_multiplier + self.assertEqual(swap.total, total) + self.assertEqual(swap.free, free) + + def test_emulate_meminfo_has_no_metrics(self): + # Emulate a case where /proc/meminfo provides no swap metrics + # in which case sysinfo() syscall is supposed to be used + # as a fallback. + def open_mock(name, *args, **kwargs): + if name == "/proc/meminfo": + return io.BytesIO(b"") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, create=True, side_effect=open_mock) as m: + psutil.swap_memory() + assert m.called + + +# ===================================================================== +# --- system CPU +# ===================================================================== + + +@unittest.skipIf(not LINUX, "LINUX only") +class TestSystemCPU(unittest.TestCase): + + @unittest.skipIf(TRAVIS, "unknown failure on travis") + def test_cpu_times(self): + fields = psutil.cpu_times()._fields + kernel_ver = re.findall(r'\d+\.\d+\.\d+', os.uname()[2])[0] + kernel_ver_info = tuple(map(int, kernel_ver.split('.'))) + if kernel_ver_info >= (2, 6, 11): + self.assertIn('steal', fields) + else: + self.assertNotIn('steal', fields) + if kernel_ver_info >= (2, 6, 24): + self.assertIn('guest', fields) + else: + self.assertNotIn('guest', fields) + if kernel_ver_info >= (3, 2, 0): + self.assertIn('guest_nice', fields) + else: + self.assertNotIn('guest_nice', fields) + + @unittest.skipIf(not os.path.exists("/sys/devices/system/cpu/online"), + "/sys/devices/system/cpu/online does not exist") + def test_cpu_count_logical_w_sysdev_cpu_online(self): + with open("/sys/devices/system/cpu/online") as f: + value = f.read().strip() + if "-" in str(value): + value = int(value.split('-')[1]) + 1 + self.assertEqual(psutil.cpu_count(), value) + + @unittest.skipIf(not os.path.exists("/sys/devices/system/cpu"), + "/sys/devices/system/cpu does not exist") + def test_cpu_count_logical_w_sysdev_cpu_num(self): + ls = os.listdir("/sys/devices/system/cpu") + count = len([x for x in ls if re.search(r"cpu\d+$", x) is not None]) + self.assertEqual(psutil.cpu_count(), count) + + @unittest.skipIf(not which("nproc"), "nproc utility not available") + def test_cpu_count_logical_w_nproc(self): + num = int(sh("nproc --all")) + self.assertEqual(psutil.cpu_count(logical=True), num) + + @unittest.skipIf(not which("lscpu"), "lscpu utility not available") + def test_cpu_count_logical_w_lscpu(self): + out = sh("lscpu -p") + num = len([x for x in out.split('\n') if not x.startswith('#')]) + self.assertEqual(psutil.cpu_count(logical=True), num) + + def test_cpu_count_logical_mocked(self): + import psutil._pslinux + original = psutil._pslinux.cpu_count_logical() + # Here we want to mock os.sysconf("SC_NPROCESSORS_ONLN") in + # order to cause the parsing of /proc/cpuinfo and /proc/stat. + with mock.patch( + 'psutil._pslinux.os.sysconf', side_effect=ValueError) as m: + self.assertEqual(psutil._pslinux.cpu_count_logical(), original) + assert m.called + + # Let's have open() return emtpy data and make sure None is + # returned ('cause we mimick os.cpu_count()). + with mock.patch('psutil._pslinux.open', create=True) as m: + self.assertIsNone(psutil._pslinux.cpu_count_logical()) + self.assertEqual(m.call_count, 2) + # /proc/stat should be the last one + self.assertEqual(m.call_args[0][0], '/proc/stat') + + # Let's push this a bit further and make sure /proc/cpuinfo + # parsing works as expected. + with open('/proc/cpuinfo', 'rb') as f: + cpuinfo_data = f.read() + fake_file = io.BytesIO(cpuinfo_data) + with mock.patch('psutil._pslinux.open', + return_value=fake_file, create=True) as m: + self.assertEqual(psutil._pslinux.cpu_count_logical(), original) + + # Finally, let's make /proc/cpuinfo return meaningless data; + # this way we'll fall back on relying on /proc/stat + def open_mock(name, *args, **kwargs): + if name.startswith('/proc/cpuinfo'): + return io.BytesIO(b"") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock, create=True): + self.assertEqual(psutil._pslinux.cpu_count_logical(), original) + + def test_cpu_count_physical_mocked(self): + # Have open() return emtpy data and make sure None is returned + # ('cause we want to mimick os.cpu_count()) + with mock.patch('psutil._pslinux.open', create=True) as m: + self.assertIsNone(psutil._pslinux.cpu_count_physical()) + assert m.called + + @unittest.skipIf(not HAS_CPU_FREQ, "not supported") + def test_cpu_freq_no_result(self): + with mock.patch("psutil._pslinux.glob.glob", return_value=[]): + self.assertIsNone(psutil.cpu_freq()) + + @unittest.skipIf(TRAVIS, "fails on Travis") + @unittest.skipIf(not HAS_CPU_FREQ, "not supported") + def test_cpu_freq_use_second_file(self): + # https://github.com/giampaolo/psutil/issues/981 + def glob_mock(pattern): + if pattern.startswith("/sys/devices/system/cpu/cpufreq/policy"): + flags.append(None) + return [] + else: + flags.append(None) + return orig_glob(pattern) + + flags = [] + orig_glob = glob.glob + with mock.patch("psutil._pslinux.glob.glob", side_effect=glob_mock, + create=True): + assert psutil.cpu_freq() + self.assertEqual(len(flags), 2) + + @unittest.skipIf(not HAS_CPU_FREQ, "not supported") + def test_cpu_freq_emulate_data(self): + def open_mock(name, *args, **kwargs): + if name.endswith('/scaling_cur_freq'): + return io.BytesIO(b"500000") + elif name.endswith('/scaling_min_freq'): + return io.BytesIO(b"600000") + elif name.endswith('/scaling_max_freq'): + return io.BytesIO(b"700000") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock): + with mock.patch( + 'glob.glob', + return_value=['/sys/devices/system/cpu/cpufreq/policy0']): + freq = psutil.cpu_freq() + self.assertEqual(freq.current, 500.0) + self.assertEqual(freq.min, 600.0) + self.assertEqual(freq.max, 700.0) + + @unittest.skipIf(not HAS_CPU_FREQ, "not supported") + def test_cpu_freq_emulate_multi_cpu(self): + def open_mock(name, *args, **kwargs): + if name.endswith('/scaling_cur_freq'): + return io.BytesIO(b"100000") + elif name.endswith('/scaling_min_freq'): + return io.BytesIO(b"200000") + elif name.endswith('/scaling_max_freq'): + return io.BytesIO(b"300000") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + policies = ['/sys/devices/system/cpu/cpufreq/policy0', + '/sys/devices/system/cpu/cpufreq/policy1', + '/sys/devices/system/cpu/cpufreq/policy2'] + with mock.patch(patch_point, side_effect=open_mock): + with mock.patch('glob.glob', return_value=policies): + freq = psutil.cpu_freq() + self.assertEqual(freq.current, 100.0) + self.assertEqual(freq.min, 200.0) + self.assertEqual(freq.max, 300.0) + + @unittest.skipIf(TRAVIS, "fails on Travis") + @unittest.skipIf(not HAS_CPU_FREQ, "not supported") + def test_cpu_freq_no_scaling_cur_freq_file(self): + # See: https://github.com/giampaolo/psutil/issues/1071 + def open_mock(name, *args, **kwargs): + if name.endswith('/scaling_cur_freq'): + raise IOError(errno.ENOENT, "") + elif name.endswith('/cpuinfo_cur_freq'): + return io.BytesIO(b"200000") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + policies = ['/sys/devices/system/cpu/cpufreq/policy0', + '/sys/devices/system/cpu/cpufreq/policy1', + '/sys/devices/system/cpu/cpufreq/policy2'] + + with mock.patch(patch_point, side_effect=open_mock): + with mock.patch('glob.glob', return_value=policies): + freq = psutil.cpu_freq() + self.assertEqual(freq.current, 200) + + # Also test that NotImplementedError is raised in case no + # current freq file is present. + + def open_mock(name, *args, **kwargs): + if name.endswith('/scaling_cur_freq'): + raise IOError(errno.ENOENT, "") + elif name.endswith('/cpuinfo_cur_freq'): + raise IOError(errno.ENOENT, "") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock): + with mock.patch('glob.glob', return_value=policies): + self.assertRaises(NotImplementedError, psutil.cpu_freq) + + +# ===================================================================== +# --- system CPU stats +# ===================================================================== + + +@unittest.skipIf(not LINUX, "LINUX only") +class TestSystemCPUStats(unittest.TestCase): + + @unittest.skipIf(TRAVIS, "fails on Travis") + def test_ctx_switches(self): + vmstat_value = vmstat("context switches") + psutil_value = psutil.cpu_stats().ctx_switches + self.assertAlmostEqual(vmstat_value, psutil_value, delta=500) + + @unittest.skipIf(TRAVIS, "fails on Travis") + def test_interrupts(self): + vmstat_value = vmstat("interrupts") + psutil_value = psutil.cpu_stats().interrupts + self.assertAlmostEqual(vmstat_value, psutil_value, delta=500) + + +# ===================================================================== +# --- system network +# ===================================================================== + + +@unittest.skipIf(not LINUX, "LINUX only") +class TestSystemNetwork(unittest.TestCase): + + def test_net_if_addrs_ips(self): + for name, addrs in psutil.net_if_addrs().items(): + for addr in addrs: + if addr.family == psutil.AF_LINK: + self.assertEqual(addr.address, get_mac_address(name)) + elif addr.family == socket.AF_INET: + self.assertEqual(addr.address, get_ipv4_address(name)) + # TODO: test for AF_INET6 family + + def test_net_if_stats(self): + for name, stats in psutil.net_if_stats().items(): + try: + out = sh("ifconfig %s" % name) + except RuntimeError: + pass + else: + # Not always reliable. + # self.assertEqual(stats.isup, 'RUNNING' in out, msg=out) + self.assertEqual(stats.mtu, + int(re.findall(r'(?i)MTU[: ](\d+)', out)[0])) + + @retry_before_failing() + def test_net_io_counters(self): + def ifconfig(nic): + ret = {} + out = sh("ifconfig %s" % name) + ret['packets_recv'] = int( + re.findall(r'RX packets[: ](\d+)', out)[0]) + ret['packets_sent'] = int( + re.findall(r'TX packets[: ](\d+)', out)[0]) + ret['errin'] = int(re.findall(r'errors[: ](\d+)', out)[0]) + ret['errout'] = int(re.findall(r'errors[: ](\d+)', out)[1]) + ret['dropin'] = int(re.findall(r'dropped[: ](\d+)', out)[0]) + ret['dropout'] = int(re.findall(r'dropped[: ](\d+)', out)[1]) + ret['bytes_recv'] = int( + re.findall(r'RX (?:packets \d+ +)?bytes[: ](\d+)', out)[0]) + ret['bytes_sent'] = int( + re.findall(r'TX (?:packets \d+ +)?bytes[: ](\d+)', out)[0]) + return ret + + nio = psutil.net_io_counters(pernic=True, nowrap=False) + for name, stats in nio.items(): + try: + ifconfig_ret = ifconfig(name) + except RuntimeError: + continue + self.assertAlmostEqual( + stats.bytes_recv, ifconfig_ret['bytes_recv'], delta=1024 * 5) + self.assertAlmostEqual( + stats.bytes_sent, ifconfig_ret['bytes_sent'], delta=1024 * 5) + self.assertAlmostEqual( + stats.packets_recv, ifconfig_ret['packets_recv'], delta=1024) + self.assertAlmostEqual( + stats.packets_sent, ifconfig_ret['packets_sent'], delta=1024) + self.assertAlmostEqual( + stats.errin, ifconfig_ret['errin'], delta=10) + self.assertAlmostEqual( + stats.errout, ifconfig_ret['errout'], delta=10) + self.assertAlmostEqual( + stats.dropin, ifconfig_ret['dropin'], delta=10) + self.assertAlmostEqual( + stats.dropout, ifconfig_ret['dropout'], delta=10) + + @unittest.skipIf(not which('ip'), "'ip' utility not available") + @unittest.skipIf(TRAVIS, "skipped on Travis") + def test_net_if_names(self): + out = sh("ip addr").strip() + nics = [x for x in psutil.net_if_addrs().keys() if ':' not in x] + found = 0 + for line in out.split('\n'): + line = line.strip() + if re.search(r"^\d+:", line): + found += 1 + name = line.split(':')[1].strip() + self.assertIn(name, nics) + self.assertEqual(len(nics), found, msg="%s\n---\n%s" % ( + pprint.pformat(nics), out)) + + @mock.patch('psutil._pslinux.socket.inet_ntop', side_effect=ValueError) + @mock.patch('psutil._pslinux.supports_ipv6', return_value=False) + def test_net_connections_ipv6_unsupported(self, supports_ipv6, inet_ntop): + # see: https://github.com/giampaolo/psutil/issues/623 + try: + s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + self.addCleanup(s.close) + s.bind(("::1", 0)) + except socket.error: + pass + psutil.net_connections(kind='inet6') + + def test_net_connections_mocked(self): + def open_mock(name, *args, **kwargs): + if name == '/proc/net/unix': + return io.StringIO(textwrap.dedent(u"""\ + 0: 00000003 000 000 0001 03 462170 @/tmp/dbus-Qw2hMPIU3n + 0: 00000003 000 000 0001 03 35010 @/tmp/dbus-tB2X8h69BQ + 0: 00000003 000 000 0001 03 34424 @/tmp/dbus-cHy80Y8O + 000000000000000000000000000000000000000000000000000000 + """)) + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + psutil.net_connections(kind='unix') + assert m.called + + +# ===================================================================== +# --- system disk +# ===================================================================== + + +@unittest.skipIf(not LINUX, "LINUX only") +class TestSystemDisks(unittest.TestCase): + + @unittest.skipIf(not hasattr(os, 'statvfs'), "os.statvfs() not available") + @skip_on_not_implemented() + def test_disk_partitions_and_usage(self): + # test psutil.disk_usage() and psutil.disk_partitions() + # against "df -a" + def df(path): + out = sh('df -P -B 1 "%s"' % path).strip() + lines = out.split('\n') + lines.pop(0) + line = lines.pop(0) + dev, total, used, free = line.split()[:4] + if dev == 'none': + dev = '' + total, used, free = int(total), int(used), int(free) + return dev, total, used, free + + for part in psutil.disk_partitions(all=False): + usage = psutil.disk_usage(part.mountpoint) + dev, total, used, free = df(part.mountpoint) + self.assertEqual(usage.total, total) + # 10 MB tollerance + if abs(usage.free - free) > 10 * 1024 * 1024: + self.fail("psutil=%s, df=%s" % (usage.free, free)) + if abs(usage.used - used) > 10 * 1024 * 1024: + self.fail("psutil=%s, df=%s" % (usage.used, used)) + + def test_disk_partitions_mocked(self): + # Test that ZFS partitions are returned. + with open("/proc/filesystems", "r") as f: + data = f.read() + if 'zfs' in data: + for part in psutil.disk_partitions(): + if part.fstype == 'zfs': + break + else: + self.fail("couldn't find any ZFS partition") + else: + # No ZFS partitions on this system. Let's fake one. + fake_file = io.StringIO(u("nodev\tzfs\n")) + with mock.patch('psutil._pslinux.open', + return_value=fake_file, create=True) as m1: + with mock.patch( + 'psutil._pslinux.cext.disk_partitions', + return_value=[('/dev/sdb3', '/', 'zfs', 'rw')]) as m2: + ret = psutil.disk_partitions() + assert m1.called + assert m2.called + assert ret + self.assertEqual(ret[0].fstype, 'zfs') + + def test_disk_io_counters_kernel_2_4_mocked(self): + # Tests /proc/diskstats parsing format for 2.4 kernels, see: + # https://github.com/giampaolo/psutil/issues/767 + def open_mock(name, *args, **kwargs): + if name == '/proc/partitions': + return io.StringIO(textwrap.dedent(u"""\ + major minor #blocks name + + 8 0 488386584 hda + """)) + elif name == '/proc/diskstats': + return io.StringIO( + u(" 3 0 1 hda 2 3 4 5 6 7 8 9 10 11 12")) + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + ret = psutil.disk_io_counters(nowrap=False) + assert m.called + self.assertEqual(ret.read_count, 1) + self.assertEqual(ret.read_merged_count, 2) + self.assertEqual(ret.read_bytes, 3 * SECTOR_SIZE) + self.assertEqual(ret.read_time, 4) + self.assertEqual(ret.write_count, 5) + self.assertEqual(ret.write_merged_count, 6) + self.assertEqual(ret.write_bytes, 7 * SECTOR_SIZE) + self.assertEqual(ret.write_time, 8) + self.assertEqual(ret.busy_time, 10) + + def test_disk_io_counters_kernel_2_6_full_mocked(self): + # Tests /proc/diskstats parsing format for 2.6 kernels, + # lines reporting all metrics: + # https://github.com/giampaolo/psutil/issues/767 + def open_mock(name, *args, **kwargs): + if name == '/proc/partitions': + return io.StringIO(textwrap.dedent(u"""\ + major minor #blocks name + + 8 0 488386584 hda + """)) + elif name == '/proc/diskstats': + return io.StringIO( + u(" 3 0 hda 1 2 3 4 5 6 7 8 9 10 11")) + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + ret = psutil.disk_io_counters(nowrap=False) + assert m.called + self.assertEqual(ret.read_count, 1) + self.assertEqual(ret.read_merged_count, 2) + self.assertEqual(ret.read_bytes, 3 * SECTOR_SIZE) + self.assertEqual(ret.read_time, 4) + self.assertEqual(ret.write_count, 5) + self.assertEqual(ret.write_merged_count, 6) + self.assertEqual(ret.write_bytes, 7 * SECTOR_SIZE) + self.assertEqual(ret.write_time, 8) + self.assertEqual(ret.busy_time, 10) + + def test_disk_io_counters_kernel_2_6_limited_mocked(self): + # Tests /proc/diskstats parsing format for 2.6 kernels, + # where one line of /proc/partitions return a limited + # amount of metrics when it bumps into a partition + # (instead of a disk). See: + # https://github.com/giampaolo/psutil/issues/767 + def open_mock(name, *args, **kwargs): + if name == '/proc/partitions': + return io.StringIO(textwrap.dedent(u"""\ + major minor #blocks name + + 8 0 488386584 hda + """)) + elif name == '/proc/diskstats': + return io.StringIO( + u(" 3 1 hda 1 2 3 4")) + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + ret = psutil.disk_io_counters(nowrap=False) + assert m.called + self.assertEqual(ret.read_count, 1) + self.assertEqual(ret.read_bytes, 2 * SECTOR_SIZE) + self.assertEqual(ret.write_count, 3) + self.assertEqual(ret.write_bytes, 4 * SECTOR_SIZE) + + self.assertEqual(ret.read_merged_count, 0) + self.assertEqual(ret.read_time, 0) + self.assertEqual(ret.write_merged_count, 0) + self.assertEqual(ret.write_time, 0) + self.assertEqual(ret.busy_time, 0) + + +# ===================================================================== +# --- misc +# ===================================================================== + + +@unittest.skipIf(not LINUX, "LINUX only") +class TestMisc(unittest.TestCase): + + def test_boot_time(self): + vmstat_value = vmstat('boot time') + psutil_value = psutil.boot_time() + self.assertEqual(int(vmstat_value), int(psutil_value)) + + @mock.patch('psutil.traceback.print_exc') + def test_no_procfs_on_import(self, tb): + my_procfs = tempfile.mkdtemp() + + with open(os.path.join(my_procfs, 'stat'), 'w') as f: + f.write('cpu 0 0 0 0 0 0 0 0 0 0\n') + f.write('cpu0 0 0 0 0 0 0 0 0 0 0\n') + f.write('cpu1 0 0 0 0 0 0 0 0 0 0\n') + + try: + orig_open = open + + def open_mock(name, *args, **kwargs): + if name.startswith('/proc'): + raise IOError(errno.ENOENT, 'rejecting access for test') + return orig_open(name, *args, **kwargs) + + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock): + reload_module(psutil) + assert tb.called + + self.assertRaises(IOError, psutil.cpu_times) + self.assertRaises(IOError, psutil.cpu_times, percpu=True) + self.assertRaises(IOError, psutil.cpu_percent) + self.assertRaises(IOError, psutil.cpu_percent, percpu=True) + self.assertRaises(IOError, psutil.cpu_times_percent) + self.assertRaises( + IOError, psutil.cpu_times_percent, percpu=True) + + psutil.set_procfs_path(my_procfs) + + self.assertEqual(psutil.cpu_percent(), 0) + self.assertEqual(sum(psutil.cpu_times_percent()), 0) + + # since we don't know the number of CPUs at import time, + # we awkwardly say there are none until the second call + per_cpu_percent = psutil.cpu_percent(percpu=True) + self.assertEqual(sum(per_cpu_percent), 0) + + # ditto awkward length + per_cpu_times_percent = psutil.cpu_times_percent(percpu=True) + self.assertEqual(sum(map(sum, per_cpu_times_percent)), 0) + + # much user, very busy + with open(os.path.join(my_procfs, 'stat'), 'w') as f: + f.write('cpu 1 0 0 0 0 0 0 0 0 0\n') + f.write('cpu0 1 0 0 0 0 0 0 0 0 0\n') + f.write('cpu1 1 0 0 0 0 0 0 0 0 0\n') + + self.assertNotEqual(psutil.cpu_percent(), 0) + self.assertNotEqual( + sum(psutil.cpu_percent(percpu=True)), 0) + self.assertNotEqual(sum(psutil.cpu_times_percent()), 0) + self.assertNotEqual( + sum(map(sum, psutil.cpu_times_percent(percpu=True))), 0) + finally: + psutil.set_procfs_path("/proc") + shutil.rmtree(my_procfs) + reload_module(psutil) + + self.assertEqual(psutil.PROCFS_PATH, '/proc') + + def test_boot_time_mocked(self): + with mock.patch('psutil._pslinux.open', create=True) as m: + self.assertRaises( + RuntimeError, + psutil._pslinux.boot_time) + assert m.called + + def test_users_mocked(self): + # Make sure ':0' and ':0.0' (returned by C ext) are converted + # to 'localhost'. + with mock.patch('psutil._pslinux.cext.users', + return_value=[('giampaolo', 'pts/2', ':0', + 1436573184.0, True, 2)]) as m: + self.assertEqual(psutil.users()[0].host, 'localhost') + assert m.called + with mock.patch('psutil._pslinux.cext.users', + return_value=[('giampaolo', 'pts/2', ':0.0', + 1436573184.0, True, 2)]) as m: + self.assertEqual(psutil.users()[0].host, 'localhost') + assert m.called + # ...otherwise it should be returned as-is + with mock.patch('psutil._pslinux.cext.users', + return_value=[('giampaolo', 'pts/2', 'foo', + 1436573184.0, True, 2)]) as m: + self.assertEqual(psutil.users()[0].host, 'foo') + assert m.called + + def test_procfs_path(self): + tdir = tempfile.mkdtemp() + try: + psutil.set_procfs_path(tdir) + self.assertRaises(IOError, psutil.virtual_memory) + self.assertRaises(IOError, psutil.cpu_times) + self.assertRaises(IOError, psutil.cpu_times, percpu=True) + self.assertRaises(IOError, psutil.boot_time) + # self.assertRaises(IOError, psutil.pids) + self.assertRaises(IOError, psutil.net_connections) + self.assertRaises(IOError, psutil.net_io_counters) + self.assertRaises(IOError, psutil.net_if_stats) + self.assertRaises(IOError, psutil.disk_io_counters) + self.assertRaises(IOError, psutil.disk_partitions) + self.assertRaises(psutil.NoSuchProcess, psutil.Process) + finally: + psutil.set_procfs_path("/proc") + os.rmdir(tdir) + + def test_sector_size_mock(self): + # Test SECTOR_SIZE fallback in case 'hw_sector_size' file + # does not exist. + def open_mock(name, *args, **kwargs): + if PY3 and isinstance(name, bytes): + name = name.decode() + if "hw_sector_size" in name: + flag.append(None) + raise IOError(errno.ENOENT, '') + else: + return orig_open(name, *args, **kwargs) + + flag = [] + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock): + psutil.disk_io_counters() + assert flag + + def test_issue_687(self): + # In case of thread ID: + # - pid_exists() is supposed to return False + # - Process(tid) is supposed to work + # - pids() should not return the TID + # See: https://github.com/giampaolo/psutil/issues/687 + t = ThreadTask() + t.start() + try: + p = psutil.Process() + tid = p.threads()[1].id + assert not psutil.pid_exists(tid), tid + pt = psutil.Process(tid) + pt.as_dict() + self.assertNotIn(tid, psutil.pids()) + finally: + t.stop() + + def test_pid_exists_no_proc_status(self): + # Internally pid_exists relies on /proc/{pid}/status. + # Emulate a case where this file is empty in which case + # psutil is supposed to fall back on using pids(). + def open_mock(name, *args, **kwargs): + if name == "/proc/%s/status" % os.getpid(): + return io.StringIO(u("")) + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock): + assert psutil.pid_exists(os.getpid()) + + +# ===================================================================== +# --- sensors +# ===================================================================== + + +@unittest.skipIf(not LINUX, "LINUX only") +@unittest.skipIf(not HAS_BATTERY, "no battery") +class TestSensorsBattery(unittest.TestCase): + + @unittest.skipIf(not which("acpi"), "acpi utility not available") + def test_percent(self): + out = sh("acpi -b") + acpi_value = int(out.split(",")[1].strip().replace('%', '')) + psutil_value = psutil.sensors_battery().percent + self.assertAlmostEqual(acpi_value, psutil_value, delta=1) + + @unittest.skipIf(not which("acpi"), "acpi utility not available") + def test_power_plugged(self): + out = sh("acpi -b") + if 'unknown' in out.lower(): + return unittest.skip("acpi output not reliable") + if 'discharging at zero rate' in out: + plugged = True + else: + plugged = "Charging" in out.split('\n')[0] + self.assertEqual(psutil.sensors_battery().power_plugged, plugged) + + def test_emulate_power_plugged(self): + # Pretend the AC power cable is connected. + def open_mock(name, *args, **kwargs): + if name.endswith("AC0/online") or name.endswith("AC/online"): + return io.BytesIO(b"1") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + self.assertEqual(psutil.sensors_battery().power_plugged, True) + self.assertEqual( + psutil.sensors_battery().secsleft, psutil.POWER_TIME_UNLIMITED) + assert m.called + + def test_emulate_power_plugged_2(self): + # Same as above but pretend /AC0/online does not exist in which + # case code relies on /status file. + def open_mock(name, *args, **kwargs): + if name.endswith("AC0/online") or name.endswith("AC/online"): + raise IOError(errno.ENOENT, "") + elif name.endswith("/status"): + return io.StringIO(u("charging")) + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + self.assertEqual(psutil.sensors_battery().power_plugged, True) + assert m.called + + def test_emulate_power_not_plugged(self): + # Pretend the AC power cable is not connected. + def open_mock(name, *args, **kwargs): + if name.endswith("AC0/online") or name.endswith("AC/online"): + return io.BytesIO(b"0") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + self.assertEqual(psutil.sensors_battery().power_plugged, False) + assert m.called + + def test_emulate_power_not_plugged_2(self): + # Same as above but pretend /AC0/online does not exist in which + # case code relies on /status file. + def open_mock(name, *args, **kwargs): + if name.endswith("AC0/online") or name.endswith("AC/online"): + raise IOError(errno.ENOENT, "") + elif name.endswith("/status"): + return io.StringIO(u("discharging")) + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + self.assertEqual(psutil.sensors_battery().power_plugged, False) + assert m.called + + def test_emulate_power_undetermined(self): + # Pretend we can't know whether the AC power cable not + # connected (assert fallback to False). + def open_mock(name, *args, **kwargs): + if name.startswith("/sys/class/power_supply/AC0/online") or \ + name.startswith("/sys/class/power_supply/AC/online"): + raise IOError(errno.ENOENT, "") + elif name.startswith("/sys/class/power_supply/BAT0/status"): + return io.BytesIO(b"???") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + self.assertIsNone(psutil.sensors_battery().power_plugged) + assert m.called + + def test_emulate_no_base_files(self): + # Emulate a case where base metrics files are not present, + # in which case we're supposed to get None. + def open_mock(name, *args, **kwargs): + if name.startswith("/sys/class/power_supply/BAT0/energy_now") or \ + name.startswith("/sys/class/power_supply/BAT0/charge_now"): + raise IOError(errno.ENOENT, "") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + self.assertIsNone(psutil.sensors_battery()) + assert m.called + + def test_emulate_energy_full_0(self): + # Emulate a case where energy_full files returns 0. + def open_mock(name, *args, **kwargs): + if name.startswith("/sys/class/power_supply/BAT0/energy_full"): + return io.BytesIO(b"0") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + self.assertEqual(psutil.sensors_battery().percent, 0) + assert m.called + + def test_emulate_energy_full_not_avail(self): + # Emulate a case where energy_full file does not exist. + # Expected fallback on /capacity. + def open_mock(name, *args, **kwargs): + energy_full = "/sys/class/power_supply/BAT0/energy_full" + charge_full = "/sys/class/power_supply/BAT0/charge_full" + if name.startswith(energy_full) or name.startswith(charge_full): + raise IOError(errno.ENOENT, "") + elif name.startswith("/sys/class/power_supply/BAT0/capacity"): + return io.BytesIO(b"88") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + self.assertEqual(psutil.sensors_battery().percent, 88) + assert m.called + + def test_emulate_no_ac0_online(self): + # Emulate a case where /AC0/online file does not exist. + def path_exists_mock(name): + if name.startswith("/sys/class/power_supply/AC0/online"): + return False + else: + return orig_path_exists(name) + + orig_path_exists = os.path.exists + with mock.patch("psutil._pslinux.os.path.exists", + side_effect=path_exists_mock) as m: + psutil.sensors_battery() + assert m.called + + def test_emulate_no_power(self): + # Emulate a case where /AC0/online file nor /BAT0/status exist. + def open_mock(name, *args, **kwargs): + if name.startswith("/sys/class/power_supply/AC/online") or \ + name.startswith("/sys/class/power_supply/AC0/online") or \ + name.startswith("/sys/class/power_supply/BAT0/status"): + raise IOError(errno.ENOENT, "") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + self.assertIsNone(psutil.sensors_battery().power_plugged) + assert m.called + + +@unittest.skipIf(not LINUX, "LINUX only") +class TestSensorsTemperatures(unittest.TestCase): + + @unittest.skipIf(TRAVIS, "unreliable on TRAVIS") + def test_emulate_eio_error(self): + def open_mock(name, *args, **kwargs): + if name.endswith("_input"): + raise OSError(errno.EIO, "") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + with warnings.catch_warnings(record=True) as ws: + self.assertEqual(psutil.sensors_temperatures(), {}) + assert m.called + self.assertIn("ignoring", str(ws[0].message)) + + def test_emulate_data(self): + def open_mock(name, *args, **kwargs): + if name.endswith('/name'): + return io.StringIO(u("name")) + elif name.endswith('/temp1_label'): + return io.StringIO(u("label")) + elif name.endswith('/temp1_input'): + return io.BytesIO(b"30000") + elif name.endswith('/temp1_max'): + return io.BytesIO(b"40000") + elif name.endswith('/temp1_crit'): + return io.BytesIO(b"50000") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock): + with mock.patch('glob.glob', + return_value=['/sys/class/hwmon/hwmon0/temp1']): + temp = psutil.sensors_temperatures()['name'][0] + self.assertEqual(temp.label, 'label') + self.assertEqual(temp.current, 30.0) + self.assertEqual(temp.high, 40.0) + self.assertEqual(temp.critical, 50.0) + + +@unittest.skipIf(not LINUX, "LINUX only") +class TestSensorsFans(unittest.TestCase): + + def test_emulate_data(self): + def open_mock(name, *args, **kwargs): + if name.endswith('/name'): + return io.StringIO(u("name")) + elif name.endswith('/fan1_label'): + return io.StringIO(u("label")) + elif name.endswith('/fan1_input'): + return io.StringIO(u("2000")) + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock): + with mock.patch('glob.glob', + return_value=['/sys/class/hwmon/hwmon2/fan1']): + fan = psutil.sensors_fans()['name'][0] + self.assertEqual(fan.label, 'label') + self.assertEqual(fan.current, 2000) + + +# ===================================================================== +# --- test process +# ===================================================================== + + +@unittest.skipIf(not LINUX, "LINUX only") +class TestProcess(unittest.TestCase): + + def setUp(self): + safe_rmpath(TESTFN) + + tearDown = setUp + + def test_memory_full_info(self): + src = textwrap.dedent(""" + import time + with open("%s", "w") as f: + time.sleep(10) + """ % TESTFN) + sproc = pyrun(src) + self.addCleanup(reap_children) + call_until(lambda: os.listdir('.'), "'%s' not in ret" % TESTFN) + p = psutil.Process(sproc.pid) + time.sleep(.1) + mem = p.memory_full_info() + maps = p.memory_maps(grouped=False) + self.assertAlmostEqual( + mem.uss, sum([x.private_dirty + x.private_clean for x in maps]), + delta=4096) + self.assertAlmostEqual( + mem.pss, sum([x.pss for x in maps]), delta=4096) + self.assertAlmostEqual( + mem.swap, sum([x.swap for x in maps]), delta=4096) + + # On PYPY file descriptors are not closed fast enough. + @unittest.skipIf(PYPY, "unreliable on PYPY") + def test_open_files_mode(self): + def get_test_file(): + p = psutil.Process() + giveup_at = time.time() + 2 + while True: + for file in p.open_files(): + if file.path == os.path.abspath(TESTFN): + return file + elif time.time() > giveup_at: + break + raise RuntimeError("timeout looking for test file") + + # + with open(TESTFN, "w"): + self.assertEqual(get_test_file().mode, "w") + with open(TESTFN, "r"): + self.assertEqual(get_test_file().mode, "r") + with open(TESTFN, "a"): + self.assertEqual(get_test_file().mode, "a") + # + with open(TESTFN, "r+"): + self.assertEqual(get_test_file().mode, "r+") + with open(TESTFN, "w+"): + self.assertEqual(get_test_file().mode, "r+") + with open(TESTFN, "a+"): + self.assertEqual(get_test_file().mode, "a+") + # note: "x" bit is not supported + if PY3: + safe_rmpath(TESTFN) + with open(TESTFN, "x"): + self.assertEqual(get_test_file().mode, "w") + safe_rmpath(TESTFN) + with open(TESTFN, "x+"): + self.assertEqual(get_test_file().mode, "r+") + + def test_open_files_file_gone(self): + # simulates a file which gets deleted during open_files() + # execution + p = psutil.Process() + files = p.open_files() + with tempfile.NamedTemporaryFile(): + # give the kernel some time to see the new file + call_until(p.open_files, "len(ret) != %i" % len(files)) + with mock.patch('psutil._pslinux.os.readlink', + side_effect=OSError(errno.ENOENT, "")) as m: + files = p.open_files() + assert not files + assert m.called + # also simulate the case where os.readlink() returns EINVAL + # in which case psutil is supposed to 'continue' + with mock.patch('psutil._pslinux.os.readlink', + side_effect=OSError(errno.EINVAL, "")) as m: + self.assertEqual(p.open_files(), []) + assert m.called + + def test_open_files_fd_gone(self): + # Simulate a case where /proc/{pid}/fdinfo/{fd} disappears + # while iterating through fds. + # https://travis-ci.org/giampaolo/psutil/jobs/225694530 + p = psutil.Process() + files = p.open_files() + with tempfile.NamedTemporaryFile(): + # give the kernel some time to see the new file + call_until(p.open_files, "len(ret) != %i" % len(files)) + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, + side_effect=IOError(errno.ENOENT, "")) as m: + files = p.open_files() + assert not files + assert m.called + + # --- mocked tests + + def test_terminal_mocked(self): + with mock.patch('psutil._pslinux._psposix.get_terminal_map', + return_value={}) as m: + self.assertIsNone(psutil._pslinux.Process(os.getpid()).terminal()) + assert m.called + + # TODO: re-enable this test. + # def test_num_ctx_switches_mocked(self): + # with mock.patch('psutil._pslinux.open', create=True) as m: + # self.assertRaises( + # NotImplementedError, + # psutil._pslinux.Process(os.getpid()).num_ctx_switches) + # assert m.called + + def test_cmdline_mocked(self): + # see: https://github.com/giampaolo/psutil/issues/639 + p = psutil.Process() + fake_file = io.StringIO(u('foo\x00bar\x00')) + with mock.patch('psutil._pslinux.open', + return_value=fake_file, create=True) as m: + self.assertEqual(p.cmdline(), ['foo', 'bar']) + assert m.called + fake_file = io.StringIO(u('foo\x00bar\x00\x00')) + with mock.patch('psutil._pslinux.open', + return_value=fake_file, create=True) as m: + self.assertEqual(p.cmdline(), ['foo', 'bar', '']) + assert m.called + + def test_cmdline_spaces_mocked(self): + # see: https://github.com/giampaolo/psutil/issues/1179 + p = psutil.Process() + fake_file = io.StringIO(u('foo bar ')) + with mock.patch('psutil._pslinux.open', + return_value=fake_file, create=True) as m: + self.assertEqual(p.cmdline(), ['foo', 'bar']) + assert m.called + fake_file = io.StringIO(u('foo bar ')) + with mock.patch('psutil._pslinux.open', + return_value=fake_file, create=True) as m: + self.assertEqual(p.cmdline(), ['foo', 'bar', '']) + assert m.called + + def test_readlink_path_deleted_mocked(self): + with mock.patch('psutil._pslinux.os.readlink', + return_value='/home/foo (deleted)'): + self.assertEqual(psutil.Process().exe(), "/home/foo") + self.assertEqual(psutil.Process().cwd(), "/home/foo") + + def test_threads_mocked(self): + # Test the case where os.listdir() returns a file (thread) + # which no longer exists by the time we open() it (race + # condition). threads() is supposed to ignore that instead + # of raising NSP. + def open_mock(name, *args, **kwargs): + if name.startswith('/proc/%s/task' % os.getpid()): + raise IOError(errno.ENOENT, "") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + ret = psutil.Process().threads() + assert m.called + self.assertEqual(ret, []) + + # ...but if it bumps into something != ENOENT we want an + # exception. + def open_mock(name, *args, **kwargs): + if name.startswith('/proc/%s/task' % os.getpid()): + raise IOError(errno.EPERM, "") + else: + return orig_open(name, *args, **kwargs) + + with mock.patch(patch_point, side_effect=open_mock): + self.assertRaises(psutil.AccessDenied, psutil.Process().threads) + + def test_exe_mocked(self): + with mock.patch('psutil._pslinux.readlink', + side_effect=OSError(errno.ENOENT, "")) as m1: + with mock.patch('psutil.Process.cmdline', + side_effect=psutil.AccessDenied(0, "")) as m2: + # No such file error; might be raised also if /proc/pid/exe + # path actually exists for system processes with low pids + # (about 0-20). In this case psutil is supposed to return + # an empty string. + ret = psutil.Process().exe() + assert m1.called + assert m2.called + self.assertEqual(ret, "") + + # ...but if /proc/pid no longer exist we're supposed to treat + # it as an alias for zombie process + with mock.patch('psutil._pslinux.os.path.lexists', + return_value=False): + self.assertRaises( + psutil.ZombieProcess, psutil.Process().exe) + + def test_issue_1014(self): + # Emulates a case where smaps file does not exist. In this case + # wrap_exception decorator should not raise NoSuchProcess. + def open_mock(name, *args, **kwargs): + if name.startswith('/proc/%s/smaps' % os.getpid()): + raise IOError(errno.ENOENT, "") + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock) as m: + p = psutil.Process() + with self.assertRaises(IOError) as err: + p.memory_maps() + self.assertEqual(err.exception.errno, errno.ENOENT) + assert m.called + + @unittest.skipIf(not HAS_RLIMIT, "not supported") + def test_rlimit_zombie(self): + # Emulate a case where rlimit() raises ENOSYS, which may + # happen in case of zombie process: + # https://travis-ci.org/giampaolo/psutil/jobs/51368273 + with mock.patch("psutil._pslinux.cext.linux_prlimit", + side_effect=OSError(errno.ENOSYS, "")) as m: + p = psutil.Process() + p.name() + with self.assertRaises(psutil.ZombieProcess) as exc: + p.rlimit(psutil.RLIMIT_NOFILE) + assert m.called + self.assertEqual(exc.exception.pid, p.pid) + self.assertEqual(exc.exception.name, p.name()) + + def test_cwd_zombie(self): + with mock.patch("psutil._pslinux.os.readlink", + side_effect=OSError(errno.ENOENT, "")) as m: + p = psutil.Process() + p.name() + with self.assertRaises(psutil.ZombieProcess) as exc: + p.cwd() + assert m.called + self.assertEqual(exc.exception.pid, p.pid) + self.assertEqual(exc.exception.name, p.name()) + + def test_stat_file_parsing(self): + from psutil._pslinux import CLOCK_TICKS + + def open_mock(name, *args, **kwargs): + if name.startswith('/proc/%s/stat' % os.getpid()): + args = [ + "0", # pid + "(cat)", # name + "Z", # status + "1", # ppid + "0", # pgrp + "0", # session + "0", # tty + "0", # tpgid + "0", # flags + "0", # minflt + "0", # cminflt + "0", # majflt + "0", # cmajflt + "2", # utime + "3", # stime + "4", # cutime + "5", # cstime + "0", # priority + "0", # nice + "0", # num_threads + "0", # itrealvalue + "6", # starttime + "0", # vsize + "0", # rss + "0", # rsslim + "0", # startcode + "0", # endcode + "0", # startstack + "0", # kstkesp + "0", # kstkeip + "0", # signal + "0", # blocked + "0", # sigignore + "0", # sigcatch + "0", # wchan + "0", # nswap + "0", # cnswap + "0", # exit_signal + "6", # processor + ] + return io.BytesIO(" ".join(args).encode()) + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock): + p = psutil.Process() + self.assertEqual(p.name(), 'cat') + self.assertEqual(p.status(), psutil.STATUS_ZOMBIE) + self.assertEqual(p.ppid(), 1) + self.assertEqual( + p.create_time(), 6 / CLOCK_TICKS + psutil.boot_time()) + cpu = p.cpu_times() + self.assertEqual(cpu.user, 2 / CLOCK_TICKS) + self.assertEqual(cpu.system, 3 / CLOCK_TICKS) + self.assertEqual(cpu.children_user, 4 / CLOCK_TICKS) + self.assertEqual(cpu.children_system, 5 / CLOCK_TICKS) + self.assertEqual(p.cpu_num(), 6) + + def test_status_file_parsing(self): + def open_mock(name, *args, **kwargs): + if name.startswith('/proc/%s/status' % os.getpid()): + return io.BytesIO(textwrap.dedent("""\ + Uid:\t1000\t1001\t1002\t1003 + Gid:\t1004\t1005\t1006\t1007 + Threads:\t66 + Cpus_allowed:\tf + Cpus_allowed_list:\t0-7 + voluntary_ctxt_switches:\t12 + nonvoluntary_ctxt_switches:\t13""").encode()) + else: + return orig_open(name, *args, **kwargs) + + orig_open = open + patch_point = 'builtins.open' if PY3 else '__builtin__.open' + with mock.patch(patch_point, side_effect=open_mock): + p = psutil.Process() + self.assertEqual(p.num_ctx_switches().voluntary, 12) + self.assertEqual(p.num_ctx_switches().involuntary, 13) + self.assertEqual(p.num_threads(), 66) + uids = p.uids() + self.assertEqual(uids.real, 1000) + self.assertEqual(uids.effective, 1001) + self.assertEqual(uids.saved, 1002) + gids = p.gids() + self.assertEqual(gids.real, 1004) + self.assertEqual(gids.effective, 1005) + self.assertEqual(gids.saved, 1006) + self.assertEqual(p._proc._get_eligible_cpus(), list(range(0, 8))) + + +@unittest.skipIf(not LINUX, "LINUX only") +class TestProcessAgainstStatus(unittest.TestCase): + """/proc/pid/stat and /proc/pid/status have many values in common. + Whenever possible, psutil uses /proc/pid/stat (it's faster). + For all those cases we check that the value found in + /proc/pid/stat (by psutil) matches the one found in + /proc/pid/status. + """ + + @classmethod + def setUpClass(cls): + cls.proc = psutil.Process() + + def read_status_file(self, linestart): + with psutil._psplatform.open_text( + '/proc/%s/status' % self.proc.pid) as f: + for line in f: + line = line.strip() + if line.startswith(linestart): + value = line.partition('\t')[2] + try: + return int(value) + except ValueError: + return value + raise ValueError("can't find %r" % linestart) + + def test_name(self): + value = self.read_status_file("Name:") + self.assertEqual(self.proc.name(), value) + + def test_status(self): + value = self.read_status_file("State:") + value = value[value.find('(') + 1:value.rfind(')')] + value = value.replace(' ', '-') + self.assertEqual(self.proc.status(), value) + + def test_ppid(self): + value = self.read_status_file("PPid:") + self.assertEqual(self.proc.ppid(), value) + + def test_num_threads(self): + value = self.read_status_file("Threads:") + self.assertEqual(self.proc.num_threads(), value) + + def test_uids(self): + value = self.read_status_file("Uid:") + value = tuple(map(int, value.split()[1:4])) + self.assertEqual(self.proc.uids(), value) + + def test_gids(self): + value = self.read_status_file("Gid:") + value = tuple(map(int, value.split()[1:4])) + self.assertEqual(self.proc.gids(), value) + + @retry_before_failing() + def test_num_ctx_switches(self): + value = self.read_status_file("voluntary_ctxt_switches:") + self.assertEqual(self.proc.num_ctx_switches().voluntary, value) + value = self.read_status_file("nonvoluntary_ctxt_switches:") + self.assertEqual(self.proc.num_ctx_switches().involuntary, value) + + def test_cpu_affinity(self): + value = self.read_status_file("Cpus_allowed_list:") + if '-' in str(value): + min_, max_ = map(int, value.split('-')) + self.assertEqual( + self.proc.cpu_affinity(), list(range(min_, max_ + 1))) + + def test_cpu_affinity_eligible_cpus(self): + value = self.read_status_file("Cpus_allowed_list:") + with mock.patch("psutil._pslinux.per_cpu_times") as m: + self.proc._proc._get_eligible_cpus() + if '-' in str(value): + assert not m.called + else: + assert m.called + + +# ===================================================================== +# --- test utils +# ===================================================================== + + +@unittest.skipIf(not LINUX, "LINUX only") +class TestUtils(unittest.TestCase): + + def test_open_text(self): + with psutil._psplatform.open_text(__file__) as f: + self.assertEqual(f.mode, 'rt') + + def test_open_binary(self): + with psutil._psplatform.open_binary(__file__) as f: + self.assertEqual(f.mode, 'rb') + + def test_readlink(self): + with mock.patch("os.readlink", return_value="foo (deleted)") as m: + self.assertEqual(psutil._psplatform.readlink("bar"), "foo") + assert m.called + + def test_cat(self): + fname = os.path.abspath(TESTFN) + with open(fname, "wt") as f: + f.write("foo ") + self.assertEqual(psutil._psplatform.cat(TESTFN, binary=False), "foo") + self.assertEqual(psutil._psplatform.cat(TESTFN, binary=True), b"foo") + self.assertEqual( + psutil._psplatform.cat(TESTFN + '??', fallback="bar"), "bar") + + +if __name__ == '__main__': + run_test_module_by_name(__file__) diff --git a/server/www/packages/packages-windows/x86/psutil/tests/test_memory_leaks.py b/server/www/packages/packages-windows/x86/psutil/tests/test_memory_leaks.py new file mode 100644 index 0000000..680fe78 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/test_memory_leaks.py @@ -0,0 +1,599 @@ +#!/usr/bin/env python + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +Tests for detecting function memory leaks (typically the ones +implemented in C). It does so by calling a function many times and +checking whether process memory usage keeps increasing between +calls or over time. +Note that this may produce false positives (especially on Windows +for some reason). +""" + +from __future__ import print_function +import errno +import functools +import gc +import os +import sys +import threading +import time + +import psutil +import psutil._common +from psutil import LINUX +from psutil import OPENBSD +from psutil import OSX +from psutil import POSIX +from psutil import SUNOS +from psutil import WINDOWS +from psutil._compat import xrange +from psutil.tests import create_sockets +from psutil.tests import get_test_subprocess +from psutil.tests import HAS_CPU_AFFINITY +from psutil.tests import HAS_CPU_FREQ +from psutil.tests import HAS_ENVIRON +from psutil.tests import HAS_IONICE +from psutil.tests import HAS_MEMORY_MAPS +from psutil.tests import HAS_PROC_CPU_NUM +from psutil.tests import HAS_PROC_IO_COUNTERS +from psutil.tests import HAS_RLIMIT +from psutil.tests import HAS_SENSORS_BATTERY +from psutil.tests import HAS_SENSORS_FANS +from psutil.tests import HAS_SENSORS_TEMPERATURES +from psutil.tests import reap_children +from psutil.tests import run_test_module_by_name +from psutil.tests import safe_rmpath +from psutil.tests import skip_on_access_denied +from psutil.tests import TESTFN +from psutil.tests import TRAVIS +from psutil.tests import unittest + + +LOOPS = 1000 +MEMORY_TOLERANCE = 4096 +RETRY_FOR = 3 + +SKIP_PYTHON_IMPL = True if TRAVIS else False +cext = psutil._psplatform.cext +thisproc = psutil.Process() +SKIP_PYTHON_IMPL = True if TRAVIS else False + + +# =================================================================== +# utils +# =================================================================== + + +def skip_if_linux(): + return unittest.skipIf(LINUX and SKIP_PYTHON_IMPL, + "worthless on LINUX (pure python)") + + +def bytes2human(n): + """ + http://code.activestate.com/recipes/578019 + >>> bytes2human(10000) + '9.8K' + >>> bytes2human(100001221) + '95.4M' + """ + symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') + prefix = {} + for i, s in enumerate(symbols): + prefix[s] = 1 << (i + 1) * 10 + for s in reversed(symbols): + if n >= prefix[s]: + value = float(n) / prefix[s] + return '%.2f%s' % (value, s) + return "%sB" % n + + +class TestMemLeak(unittest.TestCase): + """Base framework class which calls a function many times and + produces a failure if process memory usage keeps increasing + between calls or over time. + """ + tolerance = MEMORY_TOLERANCE + loops = LOOPS + retry_for = RETRY_FOR + + def setUp(self): + gc.collect() + + def execute(self, fun, *args, **kwargs): + """Test a callable.""" + def call_many_times(): + for x in xrange(loops): + self._call(fun, *args, **kwargs) + del x + gc.collect() + + tolerance = kwargs.pop('tolerance_', None) or self.tolerance + loops = kwargs.pop('loops_', None) or self.loops + retry_for = kwargs.pop('retry_for_', None) or self.retry_for + + # warm up + for x in range(10): + self._call(fun, *args, **kwargs) + self.assertEqual(gc.garbage, []) + self.assertEqual(threading.active_count(), 1) + self.assertEqual(thisproc.children(), []) + + # Get 2 distinct memory samples, before and after having + # called fun repeadetly. + # step 1 + call_many_times() + mem1 = self._get_mem() + # step 2 + call_many_times() + mem2 = self._get_mem() + + diff1 = mem2 - mem1 + if diff1 > tolerance: + # This doesn't necessarily mean we have a leak yet. + # At this point we assume that after having called the + # function so many times the memory usage is stabilized + # and if there are no leaks it should not increase + # anymore. + # Let's keep calling fun for 3 more seconds and fail if + # we notice any difference. + ncalls = 0 + stop_at = time.time() + retry_for + while time.time() <= stop_at: + self._call(fun, *args, **kwargs) + ncalls += 1 + + del stop_at + gc.collect() + mem3 = self._get_mem() + diff2 = mem3 - mem2 + + if mem3 > mem2: + # failure + extra_proc_mem = bytes2human(diff1 + diff2) + print("exta proc mem: %s" % extra_proc_mem, file=sys.stderr) + msg = "+%s after %s calls, +%s after another %s calls, " + msg += "+%s extra proc mem" + msg = msg % ( + bytes2human(diff1), loops, bytes2human(diff2), ncalls, + extra_proc_mem) + self.fail(msg) + + def execute_w_exc(self, exc, fun, *args, **kwargs): + """Convenience function which tests a callable raising + an exception. + """ + def call(): + self.assertRaises(exc, fun, *args, **kwargs) + + self.execute(call) + + @staticmethod + def _get_mem(): + # By using USS memory it seems it's less likely to bump + # into false positives. + if LINUX or WINDOWS or OSX: + return thisproc.memory_full_info().uss + else: + return thisproc.memory_info().rss + + @staticmethod + def _call(fun, *args, **kwargs): + fun(*args, **kwargs) + + +# =================================================================== +# Process class +# =================================================================== + + +class TestProcessObjectLeaks(TestMemLeak): + """Test leaks of Process class methods.""" + + proc = thisproc + + def test_coverage(self): + skip = set(( + "pid", "as_dict", "children", "cpu_affinity", "cpu_percent", + "ionice", "is_running", "kill", "memory_info_ex", "memory_percent", + "nice", "oneshot", "parent", "rlimit", "send_signal", "suspend", + "terminate", "wait")) + for name in dir(psutil.Process): + if name.startswith('_'): + continue + if name in skip: + continue + self.assertTrue(hasattr(self, "test_" + name), msg=name) + + @skip_if_linux() + def test_name(self): + self.execute(self.proc.name) + + @skip_if_linux() + def test_cmdline(self): + self.execute(self.proc.cmdline) + + @skip_if_linux() + def test_exe(self): + self.execute(self.proc.exe) + + @skip_if_linux() + def test_ppid(self): + self.execute(self.proc.ppid) + + @unittest.skipIf(not POSIX, "POSIX only") + @skip_if_linux() + def test_uids(self): + self.execute(self.proc.uids) + + @unittest.skipIf(not POSIX, "POSIX only") + @skip_if_linux() + def test_gids(self): + self.execute(self.proc.gids) + + @skip_if_linux() + def test_status(self): + self.execute(self.proc.status) + + def test_nice_get(self): + self.execute(self.proc.nice) + + def test_nice_set(self): + niceness = thisproc.nice() + self.execute(self.proc.nice, niceness) + + @unittest.skipIf(not HAS_IONICE, "not supported") + def test_ionice_get(self): + self.execute(self.proc.ionice) + + @unittest.skipIf(not HAS_IONICE, "not supported") + def test_ionice_set(self): + if WINDOWS: + value = thisproc.ionice() + self.execute(self.proc.ionice, value) + else: + self.execute(self.proc.ionice, psutil.IOPRIO_CLASS_NONE) + fun = functools.partial(cext.proc_ioprio_set, os.getpid(), -1, 0) + self.execute_w_exc(OSError, fun) + + @unittest.skipIf(not HAS_PROC_IO_COUNTERS, "not supported") + @skip_if_linux() + def test_io_counters(self): + self.execute(self.proc.io_counters) + + @unittest.skipIf(POSIX, "worthless on POSIX") + def test_username(self): + self.execute(self.proc.username) + + @skip_if_linux() + def test_create_time(self): + self.execute(self.proc.create_time) + + @skip_if_linux() + @skip_on_access_denied(only_if=OPENBSD) + def test_num_threads(self): + self.execute(self.proc.num_threads) + + @unittest.skipIf(not WINDOWS, "WINDOWS only") + def test_num_handles(self): + self.execute(self.proc.num_handles) + + @unittest.skipIf(not POSIX, "POSIX only") + @skip_if_linux() + def test_num_fds(self): + self.execute(self.proc.num_fds) + + @skip_if_linux() + def test_num_ctx_switches(self): + self.execute(self.proc.num_ctx_switches) + + @skip_if_linux() + @skip_on_access_denied(only_if=OPENBSD) + def test_threads(self): + self.execute(self.proc.threads) + + @skip_if_linux() + def test_cpu_times(self): + self.execute(self.proc.cpu_times) + + @skip_if_linux() + @unittest.skipIf(not HAS_PROC_CPU_NUM, "not supported") + def test_cpu_num(self): + self.execute(self.proc.cpu_num) + + @skip_if_linux() + def test_memory_info(self): + self.execute(self.proc.memory_info) + + @skip_if_linux() + def test_memory_full_info(self): + self.execute(self.proc.memory_full_info) + + @unittest.skipIf(not POSIX, "POSIX only") + @skip_if_linux() + def test_terminal(self): + self.execute(self.proc.terminal) + + @unittest.skipIf(POSIX and SKIP_PYTHON_IMPL, + "worthless on POSIX (pure python)") + def test_resume(self): + self.execute(self.proc.resume) + + @skip_if_linux() + def test_cwd(self): + self.execute(self.proc.cwd) + + @unittest.skipIf(not HAS_CPU_AFFINITY, "not supported") + def test_cpu_affinity_get(self): + self.execute(self.proc.cpu_affinity) + + @unittest.skipIf(not HAS_CPU_AFFINITY, "not supported") + def test_cpu_affinity_set(self): + affinity = thisproc.cpu_affinity() + self.execute(self.proc.cpu_affinity, affinity) + if not TRAVIS: + self.execute_w_exc(ValueError, self.proc.cpu_affinity, [-1]) + + @skip_if_linux() + def test_open_files(self): + safe_rmpath(TESTFN) # needed after UNIX socket test has run + with open(TESTFN, 'w'): + self.execute(self.proc.open_files) + + # OSX implementation is unbelievably slow + @unittest.skipIf(OSX, "too slow on OSX") + @unittest.skipIf(not HAS_MEMORY_MAPS, "not supported") + @skip_if_linux() + def test_memory_maps(self): + self.execute(self.proc.memory_maps) + + @unittest.skipIf(not LINUX, "LINUX only") + @unittest.skipIf(not HAS_RLIMIT, "not supported") + def test_rlimit_get(self): + self.execute(self.proc.rlimit, psutil.RLIMIT_NOFILE) + + @unittest.skipIf(not LINUX, "LINUX only") + @unittest.skipIf(not HAS_RLIMIT, "not supported") + def test_rlimit_set(self): + limit = thisproc.rlimit(psutil.RLIMIT_NOFILE) + self.execute(self.proc.rlimit, psutil.RLIMIT_NOFILE, limit) + self.execute_w_exc(OSError, self.proc.rlimit, -1) + + @skip_if_linux() + # Windows implementation is based on a single system-wide + # function (tested later). + @unittest.skipIf(WINDOWS, "worthless on WINDOWS") + def test_connections(self): + # TODO: UNIX sockets are temporarily implemented by parsing + # 'pfiles' cmd output; we don't want that part of the code to + # be executed. + with create_sockets(): + kind = 'inet' if SUNOS else 'all' + self.execute(self.proc.connections, kind) + + @unittest.skipIf(not HAS_ENVIRON, "not supported") + def test_environ(self): + self.execute(self.proc.environ) + + @unittest.skipIf(not WINDOWS, "WINDOWS only") + def test_proc_info(self): + self.execute(cext.proc_info, os.getpid()) + + +class TestTerminatedProcessLeaks(TestProcessObjectLeaks): + """Repeat the tests above looking for leaks occurring when dealing + with terminated processes raising NoSuchProcess exception. + The C functions are still invoked but will follow different code + paths. We'll check those code paths. + """ + + @classmethod + def setUpClass(cls): + super(TestTerminatedProcessLeaks, cls).setUpClass() + p = get_test_subprocess() + cls.proc = psutil.Process(p.pid) + cls.proc.kill() + cls.proc.wait() + + @classmethod + def tearDownClass(cls): + super(TestTerminatedProcessLeaks, cls).tearDownClass() + reap_children() + + def _call(self, fun, *args, **kwargs): + try: + fun(*args, **kwargs) + except psutil.NoSuchProcess: + pass + + if WINDOWS: + + def test_kill(self): + self.execute(self.proc.kill) + + def test_terminate(self): + self.execute(self.proc.terminate) + + def test_suspend(self): + self.execute(self.proc.suspend) + + def test_resume(self): + self.execute(self.proc.resume) + + def test_wait(self): + self.execute(self.proc.wait) + + def test_proc_info(self): + # test dual implementation + def call(): + try: + return cext.proc_info(self.proc.pid) + except OSError as err: + if err.errno != errno.ESRCH: + raise + + self.execute(call) + + +# =================================================================== +# system APIs +# =================================================================== + + +class TestModuleFunctionsLeaks(TestMemLeak): + """Test leaks of psutil module functions.""" + + def test_coverage(self): + skip = set(( + "version_info", "__version__", "process_iter", "wait_procs", + "cpu_percent", "cpu_times_percent", "cpu_count")) + for name in psutil.__all__: + if not name.islower(): + continue + if name in skip: + continue + self.assertTrue(hasattr(self, "test_" + name), msg=name) + + # --- cpu + + @skip_if_linux() + def test_cpu_count_logical(self): + self.execute(psutil.cpu_count, logical=True) + + @skip_if_linux() + def test_cpu_count_physical(self): + self.execute(psutil.cpu_count, logical=False) + + @skip_if_linux() + def test_cpu_times(self): + self.execute(psutil.cpu_times) + + @skip_if_linux() + def test_per_cpu_times(self): + self.execute(psutil.cpu_times, percpu=True) + + def test_cpu_stats(self): + self.execute(psutil.cpu_stats) + + @skip_if_linux() + @unittest.skipIf(not HAS_CPU_FREQ, "not supported") + def test_cpu_freq(self): + self.execute(psutil.cpu_freq) + + # --- mem + + def test_virtual_memory(self): + self.execute(psutil.virtual_memory) + + # TODO: remove this skip when this gets fixed + @unittest.skipIf(SUNOS, + "worthless on SUNOS (uses a subprocess)") + def test_swap_memory(self): + self.execute(psutil.swap_memory) + + @unittest.skipIf(POSIX and SKIP_PYTHON_IMPL, + "worthless on POSIX (pure python)") + def test_pid_exists(self): + self.execute(psutil.pid_exists, os.getpid()) + + # --- disk + + @unittest.skipIf(POSIX and SKIP_PYTHON_IMPL, + "worthless on POSIX (pure python)") + def test_disk_usage(self): + self.execute(psutil.disk_usage, '.') + + def test_disk_partitions(self): + self.execute(psutil.disk_partitions) + + @unittest.skipIf(LINUX and not os.path.exists('/proc/diskstats'), + '/proc/diskstats not available on this Linux version') + @skip_if_linux() + def test_disk_io_counters(self): + self.execute(psutil.disk_io_counters, nowrap=False) + + # --- proc + + @skip_if_linux() + def test_pids(self): + self.execute(psutil.pids) + + # --- net + + @skip_if_linux() + def test_net_io_counters(self): + self.execute(psutil.net_io_counters, nowrap=False) + + @unittest.skipIf(LINUX, + "worthless on Linux (pure python)") + @unittest.skipIf(OSX and os.getuid() != 0, "need root access") + def test_net_connections(self): + with create_sockets(): + self.execute(psutil.net_connections) + + def test_net_if_addrs(self): + # Note: verified that on Windows this was a false positive. + self.execute(psutil.net_if_addrs, + tolerance_=80 * 1024 if WINDOWS else None) + + @unittest.skipIf(TRAVIS, "EPERM on travis") + def test_net_if_stats(self): + self.execute(psutil.net_if_stats) + + # --- sensors + + @skip_if_linux() + @unittest.skipIf(not HAS_SENSORS_BATTERY, "not supported") + def test_sensors_battery(self): + self.execute(psutil.sensors_battery) + + @skip_if_linux() + @unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported") + def test_sensors_temperatures(self): + self.execute(psutil.sensors_temperatures) + + @skip_if_linux() + @unittest.skipIf(not HAS_SENSORS_FANS, "not supported") + def test_sensors_fans(self): + self.execute(psutil.sensors_fans) + + # --- others + + @skip_if_linux() + def test_boot_time(self): + self.execute(psutil.boot_time) + + # XXX - on Windows this produces a false positive + @unittest.skipIf(WINDOWS, "XXX produces a false positive on Windows") + def test_users(self): + self.execute(psutil.users) + + if WINDOWS: + + # --- win services + + def test_win_service_iter(self): + self.execute(cext.winservice_enumerate) + + def test_win_service_get(self): + pass + + def test_win_service_get_config(self): + name = next(psutil.win_service_iter()).name() + self.execute(cext.winservice_query_config, name) + + def test_win_service_get_status(self): + name = next(psutil.win_service_iter()).name() + self.execute(cext.winservice_query_status, name) + + def test_win_service_get_description(self): + name = next(psutil.win_service_iter()).name() + self.execute(cext.winservice_query_descr, name) + + +if __name__ == '__main__': + run_test_module_by_name(__file__) diff --git a/server/www/packages/packages-windows/x86/psutil/tests/test_misc.py b/server/www/packages/packages-windows/x86/psutil/tests/test_misc.py new file mode 100644 index 0000000..f67c0e4 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/test_misc.py @@ -0,0 +1,1039 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +Miscellaneous tests. +""" + +import ast +import collections +import contextlib +import errno +import json +import os +import pickle +import socket +import stat + +from psutil import LINUX +from psutil import POSIX +from psutil import WINDOWS +from psutil._common import memoize +from psutil._common import memoize_when_activated +from psutil._common import supports_ipv6 +from psutil._common import wrap_numbers +from psutil._compat import PY3 +from psutil.tests import APPVEYOR +from psutil.tests import bind_socket +from psutil.tests import bind_unix_socket +from psutil.tests import call_until +from psutil.tests import chdir +from psutil.tests import create_proc_children_pair +from psutil.tests import create_sockets +from psutil.tests import create_zombie_proc +from psutil.tests import DEVNULL +from psutil.tests import get_free_port +from psutil.tests import get_test_subprocess +from psutil.tests import HAS_BATTERY +from psutil.tests import HAS_CONNECTIONS_UNIX +from psutil.tests import HAS_MEMORY_FULL_INFO +from psutil.tests import HAS_MEMORY_MAPS +from psutil.tests import HAS_SENSORS_BATTERY +from psutil.tests import HAS_SENSORS_FANS +from psutil.tests import HAS_SENSORS_TEMPERATURES +from psutil.tests import import_module_by_path +from psutil.tests import is_namedtuple +from psutil.tests import mock +from psutil.tests import PYTHON_EXE +from psutil.tests import reap_children +from psutil.tests import reload_module +from psutil.tests import retry +from psutil.tests import ROOT_DIR +from psutil.tests import run_test_module_by_name +from psutil.tests import safe_rmpath +from psutil.tests import SCRIPTS_DIR +from psutil.tests import sh +from psutil.tests import tcp_socketpair +from psutil.tests import TESTFN +from psutil.tests import TOX +from psutil.tests import TRAVIS +from psutil.tests import unittest +from psutil.tests import unix_socket_path +from psutil.tests import unix_socketpair +from psutil.tests import wait_for_file +from psutil.tests import wait_for_pid +import psutil +import psutil.tests + + +# =================================================================== +# --- Misc / generic tests. +# =================================================================== + + +class TestMisc(unittest.TestCase): + + def test_process__repr__(self, func=repr): + p = psutil.Process() + r = func(p) + self.assertIn("psutil.Process", r) + self.assertIn("pid=%s" % p.pid, r) + self.assertIn("name=", r) + self.assertIn(p.name(), r) + with mock.patch.object(psutil.Process, "name", + side_effect=psutil.ZombieProcess(os.getpid())): + p = psutil.Process() + r = func(p) + self.assertIn("pid=%s" % p.pid, r) + self.assertIn("zombie", r) + self.assertNotIn("name=", r) + with mock.patch.object(psutil.Process, "name", + side_effect=psutil.NoSuchProcess(os.getpid())): + p = psutil.Process() + r = func(p) + self.assertIn("pid=%s" % p.pid, r) + self.assertIn("terminated", r) + self.assertNotIn("name=", r) + with mock.patch.object(psutil.Process, "name", + side_effect=psutil.AccessDenied(os.getpid())): + p = psutil.Process() + r = func(p) + self.assertIn("pid=%s" % p.pid, r) + self.assertNotIn("name=", r) + + def test_process__str__(self): + self.test_process__repr__(func=str) + + def test_no_such_process__repr__(self, func=repr): + self.assertEqual( + repr(psutil.NoSuchProcess(321)), + "psutil.NoSuchProcess process no longer exists (pid=321)") + self.assertEqual( + repr(psutil.NoSuchProcess(321, name='foo')), + "psutil.NoSuchProcess process no longer exists (pid=321, " + "name='foo')") + self.assertEqual( + repr(psutil.NoSuchProcess(321, msg='foo')), + "psutil.NoSuchProcess foo") + + def test_zombie_process__repr__(self, func=repr): + self.assertEqual( + repr(psutil.ZombieProcess(321)), + "psutil.ZombieProcess process still exists but it's a zombie " + "(pid=321)") + self.assertEqual( + repr(psutil.ZombieProcess(321, name='foo')), + "psutil.ZombieProcess process still exists but it's a zombie " + "(pid=321, name='foo')") + self.assertEqual( + repr(psutil.ZombieProcess(321, name='foo', ppid=1)), + "psutil.ZombieProcess process still exists but it's a zombie " + "(pid=321, name='foo', ppid=1)") + self.assertEqual( + repr(psutil.ZombieProcess(321, msg='foo')), + "psutil.ZombieProcess foo") + + def test_access_denied__repr__(self, func=repr): + self.assertEqual( + repr(psutil.AccessDenied(321)), + "psutil.AccessDenied (pid=321)") + self.assertEqual( + repr(psutil.AccessDenied(321, name='foo')), + "psutil.AccessDenied (pid=321, name='foo')") + self.assertEqual( + repr(psutil.AccessDenied(321, msg='foo')), + "psutil.AccessDenied foo") + + def test_timeout_expired__repr__(self, func=repr): + self.assertEqual( + repr(psutil.TimeoutExpired(321)), + "psutil.TimeoutExpired timeout after 321 seconds") + self.assertEqual( + repr(psutil.TimeoutExpired(321, pid=111)), + "psutil.TimeoutExpired timeout after 321 seconds (pid=111)") + self.assertEqual( + repr(psutil.TimeoutExpired(321, pid=111, name='foo')), + "psutil.TimeoutExpired timeout after 321 seconds " + "(pid=111, name='foo')") + + def test_process__eq__(self): + p1 = psutil.Process() + p2 = psutil.Process() + self.assertEqual(p1, p2) + p2._ident = (0, 0) + self.assertNotEqual(p1, p2) + self.assertNotEqual(p1, 'foo') + + def test_process__hash__(self): + s = set([psutil.Process(), psutil.Process()]) + self.assertEqual(len(s), 1) + + def test__all__(self): + dir_psutil = dir(psutil) + for name in dir_psutil: + if name in ('callable', 'error', 'namedtuple', 'tests', + 'long', 'test', 'NUM_CPUS', 'BOOT_TIME', + 'TOTAL_PHYMEM'): + continue + if not name.startswith('_'): + try: + __import__(name) + except ImportError: + if name not in psutil.__all__: + fun = getattr(psutil, name) + if fun is None: + continue + if (fun.__doc__ is not None and + 'deprecated' not in fun.__doc__.lower()): + self.fail('%r not in psutil.__all__' % name) + + # Import 'star' will break if __all__ is inconsistent, see: + # https://github.com/giampaolo/psutil/issues/656 + # Can't do `from psutil import *` as it won't work on python 3 + # so we simply iterate over __all__. + for name in psutil.__all__: + self.assertIn(name, dir_psutil) + + def test_version(self): + self.assertEqual('.'.join([str(x) for x in psutil.version_info]), + psutil.__version__) + + def test_process_as_dict_no_new_names(self): + # See https://github.com/giampaolo/psutil/issues/813 + p = psutil.Process() + p.foo = '1' + self.assertNotIn('foo', p.as_dict()) + + def test_memoize(self): + @memoize + def foo(*args, **kwargs): + "foo docstring" + calls.append(None) + return (args, kwargs) + + calls = [] + # no args + for x in range(2): + ret = foo() + expected = ((), {}) + self.assertEqual(ret, expected) + self.assertEqual(len(calls), 1) + # with args + for x in range(2): + ret = foo(1) + expected = ((1, ), {}) + self.assertEqual(ret, expected) + self.assertEqual(len(calls), 2) + # with args + kwargs + for x in range(2): + ret = foo(1, bar=2) + expected = ((1, ), {'bar': 2}) + self.assertEqual(ret, expected) + self.assertEqual(len(calls), 3) + # clear cache + foo.cache_clear() + ret = foo() + expected = ((), {}) + self.assertEqual(ret, expected) + self.assertEqual(len(calls), 4) + # docstring + self.assertEqual(foo.__doc__, "foo docstring") + + def test_memoize_when_activated(self): + class Foo: + + @memoize_when_activated + def foo(self): + calls.append(None) + + f = Foo() + calls = [] + f.foo() + f.foo() + self.assertEqual(len(calls), 2) + + # activate + calls = [] + f.foo.cache_activate() + f.foo() + f.foo() + self.assertEqual(len(calls), 1) + + # deactivate + calls = [] + f.foo.cache_deactivate() + f.foo() + f.foo() + self.assertEqual(len(calls), 2) + + def test_parse_environ_block(self): + from psutil._common import parse_environ_block + + def k(s): + return s.upper() if WINDOWS else s + + self.assertEqual(parse_environ_block("a=1\0"), + {k("a"): "1"}) + self.assertEqual(parse_environ_block("a=1\0b=2\0\0"), + {k("a"): "1", k("b"): "2"}) + self.assertEqual(parse_environ_block("a=1\0b=\0\0"), + {k("a"): "1", k("b"): ""}) + # ignore everything after \0\0 + self.assertEqual(parse_environ_block("a=1\0b=2\0\0c=3\0"), + {k("a"): "1", k("b"): "2"}) + # ignore everything that is not an assignment + self.assertEqual(parse_environ_block("xxx\0a=1\0"), {k("a"): "1"}) + self.assertEqual(parse_environ_block("a=1\0=b=2\0"), {k("a"): "1"}) + # do not fail if the block is incomplete + self.assertEqual(parse_environ_block("a=1\0b=2"), {k("a"): "1"}) + + def test_supports_ipv6(self): + self.addCleanup(supports_ipv6.cache_clear) + if supports_ipv6(): + with mock.patch('psutil._common.socket') as s: + s.has_ipv6 = False + supports_ipv6.cache_clear() + assert not supports_ipv6() + + supports_ipv6.cache_clear() + with mock.patch('psutil._common.socket.socket', + side_effect=socket.error) as s: + assert not supports_ipv6() + assert s.called + + supports_ipv6.cache_clear() + with mock.patch('psutil._common.socket.socket', + side_effect=socket.gaierror) as s: + assert not supports_ipv6() + supports_ipv6.cache_clear() + assert s.called + + supports_ipv6.cache_clear() + with mock.patch('psutil._common.socket.socket.bind', + side_effect=socket.gaierror) as s: + assert not supports_ipv6() + supports_ipv6.cache_clear() + assert s.called + else: + with self.assertRaises(Exception): + sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + sock.bind(("::1", 0)) + + def test_isfile_strict(self): + from psutil._common import isfile_strict + this_file = os.path.abspath(__file__) + assert isfile_strict(this_file) + assert not isfile_strict(os.path.dirname(this_file)) + with mock.patch('psutil._common.os.stat', + side_effect=OSError(errno.EPERM, "foo")): + self.assertRaises(OSError, isfile_strict, this_file) + with mock.patch('psutil._common.os.stat', + side_effect=OSError(errno.EACCES, "foo")): + self.assertRaises(OSError, isfile_strict, this_file) + with mock.patch('psutil._common.os.stat', + side_effect=OSError(errno.EINVAL, "foo")): + assert not isfile_strict(this_file) + with mock.patch('psutil._common.stat.S_ISREG', return_value=False): + assert not isfile_strict(this_file) + + def test_serialization(self): + def check(ret): + if json is not None: + json.loads(json.dumps(ret)) + a = pickle.dumps(ret) + b = pickle.loads(a) + self.assertEqual(ret, b) + + check(psutil.Process().as_dict()) + check(psutil.virtual_memory()) + check(psutil.swap_memory()) + check(psutil.cpu_times()) + check(psutil.cpu_times_percent(interval=0)) + check(psutil.net_io_counters()) + if LINUX and not os.path.exists('/proc/diskstats'): + pass + else: + if not APPVEYOR: + check(psutil.disk_io_counters()) + check(psutil.disk_partitions()) + check(psutil.disk_usage(os.getcwd())) + check(psutil.users()) + + def test_setup_script(self): + setup_py = os.path.join(ROOT_DIR, 'setup.py') + if TRAVIS and not os.path.exists(setup_py): + return self.skipTest("can't find setup.py") + module = import_module_by_path(setup_py) + self.assertRaises(SystemExit, module.setup) + self.assertEqual(module.get_version(), psutil.__version__) + + def test_ad_on_process_creation(self): + # We are supposed to be able to instantiate Process also in case + # of zombie processes or access denied. + with mock.patch.object(psutil.Process, 'create_time', + side_effect=psutil.AccessDenied) as meth: + psutil.Process() + assert meth.called + with mock.patch.object(psutil.Process, 'create_time', + side_effect=psutil.ZombieProcess(1)) as meth: + psutil.Process() + assert meth.called + with mock.patch.object(psutil.Process, 'create_time', + side_effect=ValueError) as meth: + with self.assertRaises(ValueError): + psutil.Process() + assert meth.called + + def test_sanity_version_check(self): + # see: https://github.com/giampaolo/psutil/issues/564 + with mock.patch( + "psutil._psplatform.cext.version", return_value="0.0.0"): + with self.assertRaises(ImportError) as cm: + reload_module(psutil) + self.assertIn("version conflict", str(cm.exception).lower()) + + +# =================================================================== +# --- Tests for wrap_numbers() function. +# =================================================================== + + +nt = collections.namedtuple('foo', 'a b c') + + +class TestWrapNumbers(unittest.TestCase): + + def setUp(self): + wrap_numbers.cache_clear() + + tearDown = setUp + + def test_first_call(self): + input = {'disk1': nt(5, 5, 5)} + self.assertEqual(wrap_numbers(input, 'disk_io'), input) + + def test_input_hasnt_changed(self): + input = {'disk1': nt(5, 5, 5)} + self.assertEqual(wrap_numbers(input, 'disk_io'), input) + self.assertEqual(wrap_numbers(input, 'disk_io'), input) + + def test_increase_but_no_wrap(self): + input = {'disk1': nt(5, 5, 5)} + self.assertEqual(wrap_numbers(input, 'disk_io'), input) + input = {'disk1': nt(10, 15, 20)} + self.assertEqual(wrap_numbers(input, 'disk_io'), input) + input = {'disk1': nt(20, 25, 30)} + self.assertEqual(wrap_numbers(input, 'disk_io'), input) + input = {'disk1': nt(20, 25, 30)} + self.assertEqual(wrap_numbers(input, 'disk_io'), input) + + def test_wrap(self): + # let's say 100 is the threshold + input = {'disk1': nt(100, 100, 100)} + self.assertEqual(wrap_numbers(input, 'disk_io'), input) + # first wrap restarts from 10 + input = {'disk1': nt(100, 100, 10)} + self.assertEqual(wrap_numbers(input, 'disk_io'), + {'disk1': nt(100, 100, 110)}) + # then it remains the same + input = {'disk1': nt(100, 100, 10)} + self.assertEqual(wrap_numbers(input, 'disk_io'), + {'disk1': nt(100, 100, 110)}) + # then it goes up + input = {'disk1': nt(100, 100, 90)} + self.assertEqual(wrap_numbers(input, 'disk_io'), + {'disk1': nt(100, 100, 190)}) + # then it wraps again + input = {'disk1': nt(100, 100, 20)} + self.assertEqual(wrap_numbers(input, 'disk_io'), + {'disk1': nt(100, 100, 210)}) + # and remains the same + input = {'disk1': nt(100, 100, 20)} + self.assertEqual(wrap_numbers(input, 'disk_io'), + {'disk1': nt(100, 100, 210)}) + # now wrap another num + input = {'disk1': nt(50, 100, 20)} + self.assertEqual(wrap_numbers(input, 'disk_io'), + {'disk1': nt(150, 100, 210)}) + # and again + input = {'disk1': nt(40, 100, 20)} + self.assertEqual(wrap_numbers(input, 'disk_io'), + {'disk1': nt(190, 100, 210)}) + # keep it the same + input = {'disk1': nt(40, 100, 20)} + self.assertEqual(wrap_numbers(input, 'disk_io'), + {'disk1': nt(190, 100, 210)}) + + def test_changing_keys(self): + # Emulate a case where the second call to disk_io() + # (or whatever) provides a new disk, then the new disk + # disappears on the third call. + input = {'disk1': nt(5, 5, 5)} + self.assertEqual(wrap_numbers(input, 'disk_io'), input) + input = {'disk1': nt(5, 5, 5), + 'disk2': nt(7, 7, 7)} + self.assertEqual(wrap_numbers(input, 'disk_io'), input) + input = {'disk1': nt(8, 8, 8)} + self.assertEqual(wrap_numbers(input, 'disk_io'), input) + + def test_changing_keys_w_wrap(self): + input = {'disk1': nt(50, 50, 50), + 'disk2': nt(100, 100, 100)} + self.assertEqual(wrap_numbers(input, 'disk_io'), input) + # disk 2 wraps + input = {'disk1': nt(50, 50, 50), + 'disk2': nt(100, 100, 10)} + self.assertEqual(wrap_numbers(input, 'disk_io'), + {'disk1': nt(50, 50, 50), + 'disk2': nt(100, 100, 110)}) + # disk 2 disappears + input = {'disk1': nt(50, 50, 50)} + self.assertEqual(wrap_numbers(input, 'disk_io'), input) + + # then it appears again; the old wrap is supposed to be + # gone. + input = {'disk1': nt(50, 50, 50), + 'disk2': nt(100, 100, 100)} + self.assertEqual(wrap_numbers(input, 'disk_io'), input) + # remains the same + input = {'disk1': nt(50, 50, 50), + 'disk2': nt(100, 100, 100)} + self.assertEqual(wrap_numbers(input, 'disk_io'), input) + # and then wraps again + input = {'disk1': nt(50, 50, 50), + 'disk2': nt(100, 100, 10)} + self.assertEqual(wrap_numbers(input, 'disk_io'), + {'disk1': nt(50, 50, 50), + 'disk2': nt(100, 100, 110)}) + + def test_real_data(self): + d = {'nvme0n1': (300, 508, 640, 1571, 5970, 1987, 2049, 451751, 47048), + 'nvme0n1p1': (1171, 2, 5600256, 1024, 516, 0, 0, 0, 8), + 'nvme0n1p2': (54, 54, 2396160, 5165056, 4, 24, 30, 1207, 28), + 'nvme0n1p3': (2389, 4539, 5154, 150, 4828, 1844, 2019, 398, 348)} + self.assertEqual(wrap_numbers(d, 'disk_io'), d) + self.assertEqual(wrap_numbers(d, 'disk_io'), d) + # decrease this ↓ + d = {'nvme0n1': (100, 508, 640, 1571, 5970, 1987, 2049, 451751, 47048), + 'nvme0n1p1': (1171, 2, 5600256, 1024, 516, 0, 0, 0, 8), + 'nvme0n1p2': (54, 54, 2396160, 5165056, 4, 24, 30, 1207, 28), + 'nvme0n1p3': (2389, 4539, 5154, 150, 4828, 1844, 2019, 398, 348)} + out = wrap_numbers(d, 'disk_io') + self.assertEqual(out['nvme0n1'][0], 400) + + # --- cache tests + + def test_cache_first_call(self): + input = {'disk1': nt(5, 5, 5)} + wrap_numbers(input, 'disk_io') + cache = wrap_numbers.cache_info() + self.assertEqual(cache[0], {'disk_io': input}) + self.assertEqual(cache[1], {'disk_io': {}}) + self.assertEqual(cache[2], {'disk_io': {}}) + + def test_cache_call_twice(self): + input = {'disk1': nt(5, 5, 5)} + wrap_numbers(input, 'disk_io') + input = {'disk1': nt(10, 10, 10)} + wrap_numbers(input, 'disk_io') + cache = wrap_numbers.cache_info() + self.assertEqual(cache[0], {'disk_io': input}) + self.assertEqual( + cache[1], + {'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 0}}) + self.assertEqual(cache[2], {'disk_io': {}}) + + def test_cache_wrap(self): + # let's say 100 is the threshold + input = {'disk1': nt(100, 100, 100)} + wrap_numbers(input, 'disk_io') + + # first wrap restarts from 10 + input = {'disk1': nt(100, 100, 10)} + wrap_numbers(input, 'disk_io') + cache = wrap_numbers.cache_info() + self.assertEqual(cache[0], {'disk_io': input}) + self.assertEqual( + cache[1], + {'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 100}}) + self.assertEqual(cache[2], {'disk_io': {'disk1': set([('disk1', 2)])}}) + + def assert_(): + cache = wrap_numbers.cache_info() + self.assertEqual( + cache[1], + {'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, + ('disk1', 2): 100}}) + self.assertEqual(cache[2], + {'disk_io': {'disk1': set([('disk1', 2)])}}) + + # then it remains the same + input = {'disk1': nt(100, 100, 10)} + wrap_numbers(input, 'disk_io') + cache = wrap_numbers.cache_info() + self.assertEqual(cache[0], {'disk_io': input}) + assert_() + + # then it goes up + input = {'disk1': nt(100, 100, 90)} + wrap_numbers(input, 'disk_io') + cache = wrap_numbers.cache_info() + self.assertEqual(cache[0], {'disk_io': input}) + assert_() + + # then it wraps again + input = {'disk1': nt(100, 100, 20)} + wrap_numbers(input, 'disk_io') + cache = wrap_numbers.cache_info() + self.assertEqual(cache[0], {'disk_io': input}) + self.assertEqual( + cache[1], + {'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 190}}) + self.assertEqual(cache[2], {'disk_io': {'disk1': set([('disk1', 2)])}}) + + def test_cache_changing_keys(self): + input = {'disk1': nt(5, 5, 5)} + wrap_numbers(input, 'disk_io') + input = {'disk1': nt(5, 5, 5), + 'disk2': nt(7, 7, 7)} + wrap_numbers(input, 'disk_io') + cache = wrap_numbers.cache_info() + self.assertEqual(cache[0], {'disk_io': input}) + self.assertEqual( + cache[1], + {'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 0}}) + self.assertEqual(cache[2], {'disk_io': {}}) + + def test_cache_clear(self): + input = {'disk1': nt(5, 5, 5)} + wrap_numbers(input, 'disk_io') + wrap_numbers(input, 'disk_io') + wrap_numbers.cache_clear('disk_io') + self.assertEqual(wrap_numbers.cache_info(), ({}, {}, {})) + wrap_numbers.cache_clear('disk_io') + wrap_numbers.cache_clear('?!?') + + @unittest.skipIf( + not psutil.disk_io_counters() or not psutil.net_io_counters(), + "no disks or NICs available") + def test_cache_clear_public_apis(self): + psutil.disk_io_counters() + psutil.net_io_counters() + caches = wrap_numbers.cache_info() + for cache in caches: + self.assertIn('psutil.disk_io_counters', cache) + self.assertIn('psutil.net_io_counters', cache) + + psutil.disk_io_counters.cache_clear() + caches = wrap_numbers.cache_info() + for cache in caches: + self.assertIn('psutil.net_io_counters', cache) + self.assertNotIn('psutil.disk_io_counters', cache) + + psutil.net_io_counters.cache_clear() + caches = wrap_numbers.cache_info() + self.assertEqual(caches, ({}, {}, {})) + + +# =================================================================== +# --- Example script tests +# =================================================================== + + +@unittest.skipIf(TOX, "can't test on TOX") +# See: https://travis-ci.org/giampaolo/psutil/jobs/295224806 +@unittest.skipIf(TRAVIS and not os.path.exists(SCRIPTS_DIR), + "can't locate scripts directory") +class TestScripts(unittest.TestCase): + """Tests for scripts in the "scripts" directory.""" + + @staticmethod + def assert_stdout(exe, *args, **kwargs): + exe = '%s' % os.path.join(SCRIPTS_DIR, exe) + cmd = [PYTHON_EXE, exe] + for arg in args: + cmd.append(arg) + try: + out = sh(cmd, **kwargs).strip() + except RuntimeError as err: + if 'AccessDenied' in str(err): + return str(err) + else: + raise + assert out, out + return out + + @staticmethod + def assert_syntax(exe, args=None): + exe = os.path.join(SCRIPTS_DIR, exe) + if PY3: + f = open(exe, 'rt', encoding='utf8') + else: + f = open(exe, 'rt') + with f: + src = f.read() + ast.parse(src) + + def test_coverage(self): + # make sure all example scripts have a test method defined + meths = dir(self) + for name in os.listdir(SCRIPTS_DIR): + if name.endswith('.py'): + if 'test_' + os.path.splitext(name)[0] not in meths: + # self.assert_stdout(name) + self.fail('no test defined for %r script' + % os.path.join(SCRIPTS_DIR, name)) + + @unittest.skipIf(not POSIX, "POSIX only") + def test_executable(self): + for name in os.listdir(SCRIPTS_DIR): + if name.endswith('.py'): + path = os.path.join(SCRIPTS_DIR, name) + if not stat.S_IXUSR & os.stat(path)[stat.ST_MODE]: + self.fail('%r is not executable' % path) + + def test_disk_usage(self): + self.assert_stdout('disk_usage.py') + + def test_free(self): + self.assert_stdout('free.py') + + def test_meminfo(self): + self.assert_stdout('meminfo.py') + + def test_procinfo(self): + self.assert_stdout('procinfo.py', str(os.getpid())) + + # can't find users on APPVEYOR or TRAVIS + @unittest.skipIf(APPVEYOR or TRAVIS and not psutil.users(), + "unreliable on APPVEYOR or TRAVIS") + def test_who(self): + self.assert_stdout('who.py') + + def test_ps(self): + self.assert_stdout('ps.py') + + def test_pstree(self): + self.assert_stdout('pstree.py') + + def test_netstat(self): + self.assert_stdout('netstat.py') + + # permission denied on travis + @unittest.skipIf(TRAVIS, "unreliable on TRAVIS") + def test_ifconfig(self): + self.assert_stdout('ifconfig.py') + + @unittest.skipIf(not HAS_MEMORY_MAPS, "not supported") + def test_pmap(self): + self.assert_stdout('pmap.py', str(os.getpid())) + + @unittest.skipIf(not HAS_MEMORY_FULL_INFO, "not supported") + def test_procsmem(self): + self.assert_stdout('procsmem.py', stderr=DEVNULL) + + def test_killall(self): + self.assert_syntax('killall.py') + + def test_nettop(self): + self.assert_syntax('nettop.py') + + def test_top(self): + self.assert_syntax('top.py') + + def test_iotop(self): + self.assert_syntax('iotop.py') + + def test_pidof(self): + output = self.assert_stdout('pidof.py', psutil.Process().name()) + self.assertIn(str(os.getpid()), output) + + @unittest.skipIf(not WINDOWS, "WINDOWS only") + def test_winservices(self): + self.assert_stdout('winservices.py') + + def test_cpu_distribution(self): + self.assert_syntax('cpu_distribution.py') + + @unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported") + @unittest.skipIf(TRAVIS, "unreliable on TRAVIS") + def test_temperatures(self): + self.assert_stdout('temperatures.py') + + @unittest.skipIf(not HAS_SENSORS_FANS, "not supported") + @unittest.skipIf(TRAVIS, "unreliable on TRAVIS") + def test_fans(self): + self.assert_stdout('fans.py') + + @unittest.skipIf(not HAS_SENSORS_BATTERY, "not supported") + @unittest.skipIf(not HAS_BATTERY, "no battery") + def test_battery(self): + self.assert_stdout('battery.py') + + def test_sensors(self): + self.assert_stdout('sensors.py') + + +# =================================================================== +# --- Unit tests for test utilities. +# =================================================================== + + +class TestRetryDecorator(unittest.TestCase): + + @mock.patch('time.sleep') + def test_retry_success(self, sleep): + # Fail 3 times out of 5; make sure the decorated fun returns. + + @retry(retries=5, interval=1, logfun=None) + def foo(): + while queue: + queue.pop() + 1 / 0 + return 1 + + queue = list(range(3)) + self.assertEqual(foo(), 1) + self.assertEqual(sleep.call_count, 3) + + @mock.patch('time.sleep') + def test_retry_failure(self, sleep): + # Fail 6 times out of 5; th function is supposed to raise exc. + + @retry(retries=5, interval=1, logfun=None) + def foo(): + while queue: + queue.pop() + 1 / 0 + return 1 + + queue = list(range(6)) + self.assertRaises(ZeroDivisionError, foo) + self.assertEqual(sleep.call_count, 5) + + @mock.patch('time.sleep') + def test_exception_arg(self, sleep): + @retry(exception=ValueError, interval=1) + def foo(): + raise TypeError + + self.assertRaises(TypeError, foo) + self.assertEqual(sleep.call_count, 0) + + @mock.patch('time.sleep') + def test_no_interval_arg(self, sleep): + # if interval is not specified sleep is not supposed to be called + + @retry(retries=5, interval=None, logfun=None) + def foo(): + 1 / 0 + + self.assertRaises(ZeroDivisionError, foo) + self.assertEqual(sleep.call_count, 0) + + @mock.patch('time.sleep') + def test_retries_arg(self, sleep): + + @retry(retries=5, interval=1, logfun=None) + def foo(): + 1 / 0 + + self.assertRaises(ZeroDivisionError, foo) + self.assertEqual(sleep.call_count, 5) + + @mock.patch('time.sleep') + def test_retries_and_timeout_args(self, sleep): + self.assertRaises(ValueError, retry, retries=5, timeout=1) + + +class TestSyncTestUtils(unittest.TestCase): + + def tearDown(self): + safe_rmpath(TESTFN) + + def test_wait_for_pid(self): + wait_for_pid(os.getpid()) + nopid = max(psutil.pids()) + 99999 + with mock.patch('psutil.tests.retry.__iter__', return_value=iter([0])): + self.assertRaises(psutil.NoSuchProcess, wait_for_pid, nopid) + + def test_wait_for_file(self): + with open(TESTFN, 'w') as f: + f.write('foo') + wait_for_file(TESTFN) + assert not os.path.exists(TESTFN) + + def test_wait_for_file_empty(self): + with open(TESTFN, 'w'): + pass + wait_for_file(TESTFN, empty=True) + assert not os.path.exists(TESTFN) + + def test_wait_for_file_no_file(self): + with mock.patch('psutil.tests.retry.__iter__', return_value=iter([0])): + self.assertRaises(IOError, wait_for_file, TESTFN) + + def test_wait_for_file_no_delete(self): + with open(TESTFN, 'w') as f: + f.write('foo') + wait_for_file(TESTFN, delete=False) + assert os.path.exists(TESTFN) + + def test_call_until(self): + ret = call_until(lambda: 1, "ret == 1") + self.assertEqual(ret, 1) + + +class TestFSTestUtils(unittest.TestCase): + + def setUp(self): + safe_rmpath(TESTFN) + + tearDown = setUp + + def test_safe_rmpath(self): + # test file is removed + open(TESTFN, 'w').close() + safe_rmpath(TESTFN) + assert not os.path.exists(TESTFN) + # test no exception if path does not exist + safe_rmpath(TESTFN) + # test dir is removed + os.mkdir(TESTFN) + safe_rmpath(TESTFN) + assert not os.path.exists(TESTFN) + # test other exceptions are raised + with mock.patch('psutil.tests.os.stat', + side_effect=OSError(errno.EINVAL, "")) as m: + with self.assertRaises(OSError): + safe_rmpath(TESTFN) + assert m.called + + def test_chdir(self): + base = os.getcwd() + os.mkdir(TESTFN) + with chdir(TESTFN): + self.assertEqual(os.getcwd(), os.path.join(base, TESTFN)) + self.assertEqual(os.getcwd(), base) + + +class TestProcessUtils(unittest.TestCase): + + def test_reap_children(self): + subp = get_test_subprocess() + p = psutil.Process(subp.pid) + assert p.is_running() + reap_children() + assert not p.is_running() + assert not psutil.tests._pids_started + assert not psutil.tests._subprocesses_started + + def test_create_proc_children_pair(self): + p1, p2 = create_proc_children_pair() + self.assertNotEqual(p1.pid, p2.pid) + assert p1.is_running() + assert p2.is_running() + children = psutil.Process().children(recursive=True) + self.assertEqual(len(children), 2) + self.assertIn(p1, children) + self.assertIn(p2, children) + self.assertEqual(p1.ppid(), os.getpid()) + self.assertEqual(p2.ppid(), p1.pid) + + # make sure both of them are cleaned up + reap_children() + assert not p1.is_running() + assert not p2.is_running() + assert not psutil.tests._pids_started + assert not psutil.tests._subprocesses_started + + @unittest.skipIf(not POSIX, "POSIX only") + def test_create_zombie_proc(self): + zpid = create_zombie_proc() + self.addCleanup(reap_children, recursive=True) + p = psutil.Process(zpid) + self.assertEqual(p.status(), psutil.STATUS_ZOMBIE) + + +class TestNetUtils(unittest.TestCase): + + def bind_socket(self): + port = get_free_port() + with contextlib.closing(bind_socket(addr=('', port))) as s: + self.assertEqual(s.getsockname()[1], port) + + @unittest.skipIf(not POSIX, "POSIX only") + def test_bind_unix_socket(self): + with unix_socket_path() as name: + sock = bind_unix_socket(name) + with contextlib.closing(sock): + self.assertEqual(sock.family, socket.AF_UNIX) + self.assertEqual(sock.type, socket.SOCK_STREAM) + self.assertEqual(sock.getsockname(), name) + assert os.path.exists(name) + assert stat.S_ISSOCK(os.stat(name).st_mode) + # UDP + with unix_socket_path() as name: + sock = bind_unix_socket(name, type=socket.SOCK_DGRAM) + with contextlib.closing(sock): + self.assertEqual(sock.type, socket.SOCK_DGRAM) + + def tcp_tcp_socketpair(self): + addr = ("127.0.0.1", get_free_port()) + server, client = tcp_socketpair(socket.AF_INET, addr=addr) + with contextlib.closing(server): + with contextlib.closing(client): + # Ensure they are connected and the positions are + # correct. + self.assertEqual(server.getsockname(), addr) + self.assertEqual(client.getpeername(), addr) + self.assertNotEqual(client.getsockname(), addr) + + @unittest.skipIf(not POSIX, "POSIX only") + def test_unix_socketpair(self): + p = psutil.Process() + num_fds = p.num_fds() + assert not p.connections(kind='unix') + with unix_socket_path() as name: + server, client = unix_socketpair(name) + try: + assert os.path.exists(name) + assert stat.S_ISSOCK(os.stat(name).st_mode) + self.assertEqual(p.num_fds() - num_fds, 2) + self.assertEqual(len(p.connections(kind='unix')), 2) + self.assertEqual(server.getsockname(), name) + self.assertEqual(client.getpeername(), name) + finally: + client.close() + server.close() + + def test_create_sockets(self): + with create_sockets() as socks: + fams = collections.defaultdict(int) + types = collections.defaultdict(int) + for s in socks: + fams[s.family] += 1 + # work around http://bugs.python.org/issue30204 + types[s.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)] += 1 + self.assertGreaterEqual(fams[socket.AF_INET], 2) + if supports_ipv6(): + self.assertGreaterEqual(fams[socket.AF_INET6], 2) + if POSIX and HAS_CONNECTIONS_UNIX: + self.assertGreaterEqual(fams[socket.AF_UNIX], 2) + self.assertGreaterEqual(types[socket.SOCK_STREAM], 2) + self.assertGreaterEqual(types[socket.SOCK_DGRAM], 2) + + +class TestOtherUtils(unittest.TestCase): + + def test_is_namedtuple(self): + assert is_namedtuple(collections.namedtuple('foo', 'a b c')(1, 2, 3)) + assert not is_namedtuple(tuple()) + + +if __name__ == '__main__': + run_test_module_by_name(__file__) diff --git a/server/www/packages/packages-windows/x86/psutil/tests/test_osx.py b/server/www/packages/packages-windows/x86/psutil/tests/test_osx.py new file mode 100644 index 0000000..bcb2ba4 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/test_osx.py @@ -0,0 +1,303 @@ +#!/usr/bin/env python + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""OSX specific tests.""" + +import os +import re +import time + +import psutil +from psutil import OSX +from psutil.tests import create_zombie_proc +from psutil.tests import get_test_subprocess +from psutil.tests import HAS_BATTERY +from psutil.tests import MEMORY_TOLERANCE +from psutil.tests import reap_children +from psutil.tests import retry_before_failing +from psutil.tests import run_test_module_by_name +from psutil.tests import sh +from psutil.tests import unittest + + +PAGESIZE = os.sysconf("SC_PAGE_SIZE") if OSX else None + + +def sysctl(cmdline): + """Expects a sysctl command with an argument and parse the result + returning only the value of interest. + """ + out = sh(cmdline) + result = out.split()[1] + try: + return int(result) + except ValueError: + return result + + +def vm_stat(field): + """Wrapper around 'vm_stat' cmdline utility.""" + out = sh('vm_stat') + for line in out.split('\n'): + if field in line: + break + else: + raise ValueError("line not found") + return int(re.search(r'\d+', line).group(0)) * PAGESIZE + + +# http://code.activestate.com/recipes/578019/ +def human2bytes(s): + SYMBOLS = { + 'customary': ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'), + } + init = s + num = "" + while s and s[0:1].isdigit() or s[0:1] == '.': + num += s[0] + s = s[1:] + num = float(num) + letter = s.strip() + for name, sset in SYMBOLS.items(): + if letter in sset: + break + else: + if letter == 'k': + sset = SYMBOLS['customary'] + letter = letter.upper() + else: + raise ValueError("can't interpret %r" % init) + prefix = {sset[0]: 1} + for i, s in enumerate(sset[1:]): + prefix[s] = 1 << (i + 1) * 10 + return int(num * prefix[letter]) + + +@unittest.skipIf(not OSX, "OSX only") +class TestProcess(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.pid = get_test_subprocess().pid + + @classmethod + def tearDownClass(cls): + reap_children() + + def test_process_create_time(self): + output = sh("ps -o lstart -p %s" % self.pid) + start_ps = output.replace('STARTED', '').strip() + hhmmss = start_ps.split(' ')[-2] + year = start_ps.split(' ')[-1] + start_psutil = psutil.Process(self.pid).create_time() + self.assertEqual( + hhmmss, + time.strftime("%H:%M:%S", time.localtime(start_psutil))) + self.assertEqual( + year, + time.strftime("%Y", time.localtime(start_psutil))) + + +@unittest.skipIf(not OSX, "OSX only") +class TestZombieProcessAPIs(unittest.TestCase): + + @classmethod + def setUpClass(cls): + zpid = create_zombie_proc() + cls.p = psutil.Process(zpid) + + @classmethod + def tearDownClass(cls): + reap_children(recursive=True) + + def test_pidtask_info(self): + self.assertEqual(self.p.status(), psutil.STATUS_ZOMBIE) + self.p.ppid() + self.p.uids() + self.p.gids() + self.p.terminal() + self.p.create_time() + + def test_exe(self): + self.assertRaises(psutil.ZombieProcess, self.p.exe) + + def test_cmdline(self): + self.assertRaises(psutil.ZombieProcess, self.p.cmdline) + + def test_environ(self): + self.assertRaises(psutil.ZombieProcess, self.p.environ) + + def test_cwd(self): + self.assertRaises(psutil.ZombieProcess, self.p.cwd) + + def test_memory_full_info(self): + self.assertRaises(psutil.ZombieProcess, self.p.memory_full_info) + + def test_cpu_times(self): + self.assertRaises(psutil.ZombieProcess, self.p.cpu_times) + + def test_num_ctx_switches(self): + self.assertRaises(psutil.ZombieProcess, self.p.num_ctx_switches) + + def test_num_threads(self): + self.assertRaises(psutil.ZombieProcess, self.p.num_threads) + + def test_open_files(self): + self.assertRaises(psutil.ZombieProcess, self.p.open_files) + + def test_connections(self): + self.assertRaises(psutil.ZombieProcess, self.p.connections) + + def test_num_fds(self): + self.assertRaises(psutil.ZombieProcess, self.p.num_fds) + + def test_threads(self): + self.assertRaises((psutil.ZombieProcess, psutil.AccessDenied), + self.p.threads) + + def test_memory_maps(self): + self.assertRaises(psutil.ZombieProcess, self.p.memory_maps) + + +@unittest.skipIf(not OSX, "OSX only") +class TestSystemAPIs(unittest.TestCase): + + # --- disk + + def test_disks(self): + # test psutil.disk_usage() and psutil.disk_partitions() + # against "df -a" + def df(path): + out = sh('df -k "%s"' % path).strip() + lines = out.split('\n') + lines.pop(0) + line = lines.pop(0) + dev, total, used, free = line.split()[:4] + if dev == 'none': + dev = '' + total = int(total) * 1024 + used = int(used) * 1024 + free = int(free) * 1024 + return dev, total, used, free + + for part in psutil.disk_partitions(all=False): + usage = psutil.disk_usage(part.mountpoint) + dev, total, used, free = df(part.mountpoint) + self.assertEqual(part.device, dev) + self.assertEqual(usage.total, total) + # 10 MB tollerance + if abs(usage.free - free) > 10 * 1024 * 1024: + self.fail("psutil=%s, df=%s" % usage.free, free) + if abs(usage.used - used) > 10 * 1024 * 1024: + self.fail("psutil=%s, df=%s" % usage.used, used) + + # --- cpu + + def test_cpu_count_logical(self): + num = sysctl("sysctl hw.logicalcpu") + self.assertEqual(num, psutil.cpu_count(logical=True)) + + def test_cpu_count_physical(self): + num = sysctl("sysctl hw.physicalcpu") + self.assertEqual(num, psutil.cpu_count(logical=False)) + + def test_cpu_freq(self): + freq = psutil.cpu_freq() + self.assertEqual( + freq.current * 1000 * 1000, sysctl("sysctl hw.cpufrequency")) + self.assertEqual( + freq.min * 1000 * 1000, sysctl("sysctl hw.cpufrequency_min")) + self.assertEqual( + freq.max * 1000 * 1000, sysctl("sysctl hw.cpufrequency_max")) + + # --- virtual mem + + def test_vmem_total(self): + sysctl_hwphymem = sysctl('sysctl hw.memsize') + self.assertEqual(sysctl_hwphymem, psutil.virtual_memory().total) + + @retry_before_failing() + def test_vmem_free(self): + vmstat_val = vm_stat("free") + psutil_val = psutil.virtual_memory().free + self.assertAlmostEqual(psutil_val, vmstat_val, delta=MEMORY_TOLERANCE) + + @retry_before_failing() + def test_vmem_available(self): + vmstat_val = vm_stat("inactive") + vm_stat("free") + psutil_val = psutil.virtual_memory().available + self.assertAlmostEqual(psutil_val, vmstat_val, delta=MEMORY_TOLERANCE) + + @retry_before_failing() + def test_vmem_active(self): + vmstat_val = vm_stat("active") + psutil_val = psutil.virtual_memory().active + self.assertAlmostEqual(psutil_val, vmstat_val, delta=MEMORY_TOLERANCE) + + @retry_before_failing() + def test_vmem_inactive(self): + vmstat_val = vm_stat("inactive") + psutil_val = psutil.virtual_memory().inactive + self.assertAlmostEqual(psutil_val, vmstat_val, delta=MEMORY_TOLERANCE) + + @retry_before_failing() + def test_vmem_wired(self): + vmstat_val = vm_stat("wired") + psutil_val = psutil.virtual_memory().wired + self.assertAlmostEqual(psutil_val, vmstat_val, delta=MEMORY_TOLERANCE) + + # --- swap mem + + @retry_before_failing() + def test_swapmem_sin(self): + vmstat_val = vm_stat("Pageins") + psutil_val = psutil.swap_memory().sin + self.assertEqual(psutil_val, vmstat_val) + + @retry_before_failing() + def test_swapmem_sout(self): + vmstat_val = vm_stat("Pageout") + psutil_val = psutil.swap_memory().sout + self.assertEqual(psutil_val, vmstat_val) + + # Not very reliable. + # def test_swapmem_total(self): + # out = sh('sysctl vm.swapusage') + # out = out.replace('vm.swapusage: ', '') + # total, used, free = re.findall('\d+.\d+\w', out) + # psutil_smem = psutil.swap_memory() + # self.assertEqual(psutil_smem.total, human2bytes(total)) + # self.assertEqual(psutil_smem.used, human2bytes(used)) + # self.assertEqual(psutil_smem.free, human2bytes(free)) + + # --- network + + def test_net_if_stats(self): + for name, stats in psutil.net_if_stats().items(): + try: + out = sh("ifconfig %s" % name) + except RuntimeError: + pass + else: + self.assertEqual(stats.isup, 'RUNNING' in out, msg=out) + self.assertEqual(stats.mtu, + int(re.findall(r'mtu (\d+)', out)[0])) + + # --- sensors_battery + + @unittest.skipIf(not HAS_BATTERY, "no battery") + def test_sensors_battery(self): + out = sh("pmset -g batt") + percent = re.search("(\d+)%", out).group(1) + drawing_from = re.search("Now drawing from '([^']+)'", out).group(1) + power_plugged = drawing_from == "AC Power" + psutil_result = psutil.sensors_battery() + self.assertEqual(psutil_result.power_plugged, power_plugged) + self.assertEqual(psutil_result.percent, int(percent)) + + +if __name__ == '__main__': + run_test_module_by_name(__file__) diff --git a/server/www/packages/packages-windows/x86/psutil/tests/test_posix.py b/server/www/packages/packages-windows/x86/psutil/tests/test_posix.py new file mode 100644 index 0000000..c59f9a1 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/test_posix.py @@ -0,0 +1,419 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""POSIX specific tests.""" + +import datetime +import errno +import os +import re +import subprocess +import sys +import time + +import psutil +from psutil import AIX +from psutil import BSD +from psutil import LINUX +from psutil import OPENBSD +from psutil import OSX +from psutil import POSIX +from psutil import SUNOS +from psutil._compat import callable +from psutil._compat import PY3 +from psutil.tests import APPVEYOR +from psutil.tests import get_kernel_version +from psutil.tests import get_test_subprocess +from psutil.tests import mock +from psutil.tests import PYTHON_EXE +from psutil.tests import reap_children +from psutil.tests import retry_before_failing +from psutil.tests import run_test_module_by_name +from psutil.tests import sh +from psutil.tests import skip_on_access_denied +from psutil.tests import TRAVIS +from psutil.tests import unittest +from psutil.tests import wait_for_pid +from psutil.tests import which + + +def ps(cmd): + """Expects a ps command with a -o argument and parse the result + returning only the value of interest. + """ + if not LINUX: + cmd = cmd.replace(" --no-headers ", " ") + if SUNOS: + cmd = cmd.replace("-o start", "-o stime") + if AIX: + cmd = cmd.replace("-o rss", "-o rssize") + output = sh(cmd) + if not LINUX: + output = output.split('\n')[1].strip() + try: + return int(output) + except ValueError: + return output + +# ps "-o" field names differ wildly between platforms. +# "comm" means "only executable name" but is not available on BSD platforms. +# "args" means "command with all its arguments", and is also not available +# on BSD platforms. +# "command" is like "args" on most platforms, but like "comm" on AIX, +# and not available on SUNOS. +# so for the executable name we can use "comm" on Solaris and split "command" +# on other platforms. +# to get the cmdline (with args) we have to use "args" on AIX and +# Solaris, and can use "command" on all others. + + +def ps_name(pid): + field = "command" + if SUNOS: + field = "comm" + return ps("ps --no-headers -o %s -p %s" % (field, pid)).split(' ')[0] + + +def ps_args(pid): + field = "command" + if AIX or SUNOS: + field = "args" + return ps("ps --no-headers -o %s -p %s" % (field, pid)) + + +@unittest.skipIf(not POSIX, "POSIX only") +class TestProcess(unittest.TestCase): + """Compare psutil results against 'ps' command line utility (mainly).""" + + @classmethod + def setUpClass(cls): + cls.pid = get_test_subprocess([PYTHON_EXE, "-E", "-O"], + stdin=subprocess.PIPE).pid + wait_for_pid(cls.pid) + + @classmethod + def tearDownClass(cls): + reap_children() + + def test_ppid(self): + ppid_ps = ps("ps --no-headers -o ppid -p %s" % self.pid) + ppid_psutil = psutil.Process(self.pid).ppid() + self.assertEqual(ppid_ps, ppid_psutil) + + def test_uid(self): + uid_ps = ps("ps --no-headers -o uid -p %s" % self.pid) + uid_psutil = psutil.Process(self.pid).uids().real + self.assertEqual(uid_ps, uid_psutil) + + def test_gid(self): + gid_ps = ps("ps --no-headers -o rgid -p %s" % self.pid) + gid_psutil = psutil.Process(self.pid).gids().real + self.assertEqual(gid_ps, gid_psutil) + + def test_username(self): + username_ps = ps("ps --no-headers -o user -p %s" % self.pid) + username_psutil = psutil.Process(self.pid).username() + self.assertEqual(username_ps, username_psutil) + + def test_username_no_resolution(self): + # Emulate a case where the system can't resolve the uid to + # a username in which case psutil is supposed to return + # the stringified uid. + p = psutil.Process() + with mock.patch("psutil.pwd.getpwuid", side_effect=KeyError) as fun: + self.assertEqual(p.username(), str(p.uids().real)) + assert fun.called + + @skip_on_access_denied() + @retry_before_failing() + def test_rss_memory(self): + # give python interpreter some time to properly initialize + # so that the results are the same + time.sleep(0.1) + rss_ps = ps("ps --no-headers -o rss -p %s" % self.pid) + rss_psutil = psutil.Process(self.pid).memory_info()[0] / 1024 + self.assertEqual(rss_ps, rss_psutil) + + @skip_on_access_denied() + @retry_before_failing() + def test_vsz_memory(self): + # give python interpreter some time to properly initialize + # so that the results are the same + time.sleep(0.1) + vsz_ps = ps("ps --no-headers -o vsz -p %s" % self.pid) + vsz_psutil = psutil.Process(self.pid).memory_info()[1] / 1024 + self.assertEqual(vsz_ps, vsz_psutil) + + def test_name(self): + name_ps = ps_name(self.pid) + # remove path if there is any, from the command + name_ps = os.path.basename(name_ps).lower() + name_psutil = psutil.Process(self.pid).name().lower() + # ...because of how we calculate PYTHON_EXE; on OSX this may + # be "pythonX.Y". + name_ps = re.sub(r"\d.\d", "", name_ps) + name_psutil = re.sub(r"\d.\d", "", name_psutil) + self.assertEqual(name_ps, name_psutil) + + def test_name_long(self): + # On UNIX the kernel truncates the name to the first 15 + # characters. In such a case psutil tries to determine the + # full name from the cmdline. + name = "long-program-name" + cmdline = ["long-program-name-extended", "foo", "bar"] + with mock.patch("psutil._psplatform.Process.name", + return_value=name): + with mock.patch("psutil._psplatform.Process.cmdline", + return_value=cmdline): + p = psutil.Process() + self.assertEqual(p.name(), "long-program-name-extended") + + def test_name_long_cmdline_ad_exc(self): + # Same as above but emulates a case where cmdline() raises + # AccessDenied in which case psutil is supposed to return + # the truncated name instead of crashing. + name = "long-program-name" + with mock.patch("psutil._psplatform.Process.name", + return_value=name): + with mock.patch("psutil._psplatform.Process.cmdline", + side_effect=psutil.AccessDenied(0, "")): + p = psutil.Process() + self.assertEqual(p.name(), "long-program-name") + + def test_name_long_cmdline_nsp_exc(self): + # Same as above but emulates a case where cmdline() raises NSP + # which is supposed to propagate. + name = "long-program-name" + with mock.patch("psutil._psplatform.Process.name", + return_value=name): + with mock.patch("psutil._psplatform.Process.cmdline", + side_effect=psutil.NoSuchProcess(0, "")): + p = psutil.Process() + self.assertRaises(psutil.NoSuchProcess, p.name) + + @unittest.skipIf(OSX or BSD, 'ps -o start not available') + def test_create_time(self): + time_ps = ps("ps --no-headers -o start -p %s" % self.pid).split(' ')[0] + time_psutil = psutil.Process(self.pid).create_time() + time_psutil_tstamp = datetime.datetime.fromtimestamp( + time_psutil).strftime("%H:%M:%S") + # sometimes ps shows the time rounded up instead of down, so we check + # for both possible values + round_time_psutil = round(time_psutil) + round_time_psutil_tstamp = datetime.datetime.fromtimestamp( + round_time_psutil).strftime("%H:%M:%S") + self.assertIn(time_ps, [time_psutil_tstamp, round_time_psutil_tstamp]) + + def test_exe(self): + ps_pathname = ps_name(self.pid) + psutil_pathname = psutil.Process(self.pid).exe() + try: + self.assertEqual(ps_pathname, psutil_pathname) + except AssertionError: + # certain platforms such as BSD are more accurate returning: + # "/usr/local/bin/python2.7" + # ...instead of: + # "/usr/local/bin/python" + # We do not want to consider this difference in accuracy + # an error. + adjusted_ps_pathname = ps_pathname[:len(ps_pathname)] + self.assertEqual(ps_pathname, adjusted_ps_pathname) + + def test_cmdline(self): + ps_cmdline = ps_args(self.pid) + psutil_cmdline = " ".join(psutil.Process(self.pid).cmdline()) + self.assertEqual(ps_cmdline, psutil_cmdline) + + # On SUNOS "ps" reads niceness /proc/pid/psinfo which returns an + # incorrect value (20); the real deal is getpriority(2) which + # returns 0; psutil relies on it, see: + # https://github.com/giampaolo/psutil/issues/1082 + # AIX has the same issue + @unittest.skipIf(SUNOS, "not reliable on SUNOS") + @unittest.skipIf(AIX, "not reliable on AIX") + def test_nice(self): + ps_nice = ps("ps --no-headers -o nice -p %s" % self.pid) + psutil_nice = psutil.Process().nice() + self.assertEqual(ps_nice, psutil_nice) + + def test_num_fds(self): + # Note: this fails from time to time; I'm keen on thinking + # it doesn't mean something is broken + def call(p, attr): + args = () + attr = getattr(p, name, None) + if attr is not None and callable(attr): + if name == 'rlimit': + args = (psutil.RLIMIT_NOFILE,) + attr(*args) + else: + attr + + p = psutil.Process(os.getpid()) + failures = [] + ignored_names = ['terminate', 'kill', 'suspend', 'resume', 'nice', + 'send_signal', 'wait', 'children', 'as_dict', + 'memory_info_ex'] + if LINUX and get_kernel_version() < (2, 6, 36): + ignored_names.append('rlimit') + if LINUX and get_kernel_version() < (2, 6, 23): + ignored_names.append('num_ctx_switches') + for name in dir(psutil.Process): + if (name.startswith('_') or name in ignored_names): + continue + else: + try: + num1 = p.num_fds() + for x in range(2): + call(p, name) + num2 = p.num_fds() + except psutil.AccessDenied: + pass + else: + if abs(num2 - num1) > 1: + fail = "failure while processing Process.%s method " \ + "(before=%s, after=%s)" % (name, num1, num2) + failures.append(fail) + if failures: + self.fail('\n' + '\n'.join(failures)) + + +@unittest.skipIf(not POSIX, "POSIX only") +class TestSystemAPIs(unittest.TestCase): + """Test some system APIs.""" + + @retry_before_failing() + def test_pids(self): + # Note: this test might fail if the OS is starting/killing + # other processes in the meantime + if SUNOS or AIX: + cmd = ["ps", "-A", "-o", "pid"] + else: + cmd = ["ps", "ax", "-o", "pid"] + p = get_test_subprocess(cmd, stdout=subprocess.PIPE) + output = p.communicate()[0].strip() + assert p.poll() == 0 + if PY3: + output = str(output, sys.stdout.encoding) + pids_ps = [] + for line in output.split('\n')[1:]: + if line: + pid = int(line.split()[0].strip()) + pids_ps.append(pid) + # remove ps subprocess pid which is supposed to be dead in meantime + pids_ps.remove(p.pid) + pids_psutil = psutil.pids() + pids_ps.sort() + pids_psutil.sort() + + # on OSX and OPENBSD ps doesn't show pid 0 + if OSX or OPENBSD and 0 not in pids_ps: + pids_ps.insert(0, 0) + self.assertEqual(pids_ps, pids_psutil) + + # for some reason ifconfig -a does not report all interfaces + # returned by psutil + @unittest.skipIf(SUNOS, "unreliable on SUNOS") + @unittest.skipIf(TRAVIS, "unreliable on TRAVIS") + @unittest.skipIf(not which('ifconfig'), "no ifconfig cmd") + def test_nic_names(self): + output = sh("ifconfig -a") + for nic in psutil.net_io_counters(pernic=True).keys(): + for line in output.split(): + if line.startswith(nic): + break + else: + self.fail( + "couldn't find %s nic in 'ifconfig -a' output\n%s" % ( + nic, output)) + + # can't find users on APPVEYOR or TRAVIS + @unittest.skipIf(APPVEYOR or TRAVIS and not psutil.users(), + "unreliable on APPVEYOR or TRAVIS") + @retry_before_failing() + def test_users(self): + out = sh("who") + lines = out.split('\n') + users = [x.split()[0] for x in lines] + terminals = [x.split()[1] for x in lines] + self.assertEqual(len(users), len(psutil.users())) + for u in psutil.users(): + self.assertIn(u.name, users) + self.assertIn(u.terminal, terminals) + + def test_pid_exists_let_raise(self): + # According to "man 2 kill" possible error values for kill + # are (EINVAL, EPERM, ESRCH). Test that any other errno + # results in an exception. + with mock.patch("psutil._psposix.os.kill", + side_effect=OSError(errno.EBADF, "")) as m: + self.assertRaises(OSError, psutil._psposix.pid_exists, os.getpid()) + assert m.called + + def test_os_waitpid_let_raise(self): + # os.waitpid() is supposed to catch EINTR and ECHILD only. + # Test that any other errno results in an exception. + with mock.patch("psutil._psposix.os.waitpid", + side_effect=OSError(errno.EBADF, "")) as m: + self.assertRaises(OSError, psutil._psposix.wait_pid, os.getpid()) + assert m.called + + def test_os_waitpid_eintr(self): + # os.waitpid() is supposed to "retry" on EINTR. + with mock.patch("psutil._psposix.os.waitpid", + side_effect=OSError(errno.EINTR, "")) as m: + self.assertRaises( + psutil._psposix.TimeoutExpired, + psutil._psposix.wait_pid, os.getpid(), timeout=0.01) + assert m.called + + def test_os_waitpid_bad_ret_status(self): + # Simulate os.waitpid() returning a bad status. + with mock.patch("psutil._psposix.os.waitpid", + return_value=(1, -1)) as m: + self.assertRaises(ValueError, + psutil._psposix.wait_pid, os.getpid()) + assert m.called + + # AIX can return '-' in df output instead of numbers, e.g. for /proc + @unittest.skipIf(AIX, "unreliable on AIX") + def test_disk_usage(self): + def df(device): + out = sh("df -k %s" % device).strip() + line = out.split('\n')[1] + fields = line.split() + total = int(fields[1]) * 1024 + used = int(fields[2]) * 1024 + free = int(fields[3]) * 1024 + percent = float(fields[4].replace('%', '')) + return (total, used, free, percent) + + tolerance = 4 * 1024 * 1024 # 4MB + for part in psutil.disk_partitions(all=False): + usage = psutil.disk_usage(part.mountpoint) + try: + total, used, free, percent = df(part.device) + except RuntimeError as err: + # see: + # https://travis-ci.org/giampaolo/psutil/jobs/138338464 + # https://travis-ci.org/giampaolo/psutil/jobs/138343361 + err = str(err).lower() + if "no such file or directory" in err or \ + "raw devices not supported" in err or \ + "permission denied" in err: + continue + else: + raise + else: + self.assertAlmostEqual(usage.total, total, delta=tolerance) + self.assertAlmostEqual(usage.used, used, delta=tolerance) + self.assertAlmostEqual(usage.free, free, delta=tolerance) + self.assertAlmostEqual(usage.percent, percent, delta=1) + + +if __name__ == '__main__': + run_test_module_by_name(__file__) diff --git a/server/www/packages/packages-windows/x86/psutil/tests/test_process.py b/server/www/packages/packages-windows/x86/psutil/tests/test_process.py new file mode 100644 index 0000000..d5c2cd8 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/test_process.py @@ -0,0 +1,1548 @@ +#!/usr/bin/env python + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Tests for psutil.Process class.""" + +import collections +import errno +import getpass +import os +import signal +import socket +import subprocess +import sys +import tempfile +import textwrap +import time +import types + +import psutil + +from psutil import AIX +from psutil import BSD +from psutil import LINUX +from psutil import NETBSD +from psutil import OPENBSD +from psutil import OSX +from psutil import POSIX +from psutil import SUNOS +from psutil import WINDOWS +from psutil._compat import long +from psutil._compat import PY3 +from psutil.tests import APPVEYOR +from psutil.tests import call_until +from psutil.tests import copyload_shared_lib +from psutil.tests import create_exe +from psutil.tests import create_proc_children_pair +from psutil.tests import create_zombie_proc +from psutil.tests import enum +from psutil.tests import get_test_subprocess +from psutil.tests import get_winver +from psutil.tests import HAS_CPU_AFFINITY +from psutil.tests import HAS_ENVIRON +from psutil.tests import HAS_IONICE +from psutil.tests import HAS_MEMORY_MAPS +from psutil.tests import HAS_PROC_CPU_NUM +from psutil.tests import HAS_PROC_IO_COUNTERS +from psutil.tests import HAS_RLIMIT +from psutil.tests import HAS_THREADS +from psutil.tests import mock +from psutil.tests import PYPY +from psutil.tests import PYTHON_EXE +from psutil.tests import reap_children +from psutil.tests import retry_before_failing +from psutil.tests import run_test_module_by_name +from psutil.tests import safe_rmpath +from psutil.tests import sh +from psutil.tests import skip_on_access_denied +from psutil.tests import skip_on_not_implemented +from psutil.tests import TESTFILE_PREFIX +from psutil.tests import TESTFN +from psutil.tests import ThreadTask +from psutil.tests import TRAVIS +from psutil.tests import unittest +from psutil.tests import wait_for_pid +from psutil.tests import WIN_VISTA + + +# =================================================================== +# --- psutil.Process class tests +# =================================================================== + +class TestProcess(unittest.TestCase): + """Tests for psutil.Process class.""" + + def setUp(self): + safe_rmpath(TESTFN) + + def tearDown(self): + reap_children() + + def test_pid(self): + p = psutil.Process() + self.assertEqual(p.pid, os.getpid()) + sproc = get_test_subprocess() + self.assertEqual(psutil.Process(sproc.pid).pid, sproc.pid) + with self.assertRaises(AttributeError): + p.pid = 33 + + def test_kill(self): + sproc = get_test_subprocess() + test_pid = sproc.pid + p = psutil.Process(test_pid) + p.kill() + sig = p.wait() + self.assertFalse(psutil.pid_exists(test_pid)) + if POSIX: + self.assertEqual(sig, -signal.SIGKILL) + + def test_terminate(self): + sproc = get_test_subprocess() + test_pid = sproc.pid + p = psutil.Process(test_pid) + p.terminate() + sig = p.wait() + self.assertFalse(psutil.pid_exists(test_pid)) + if POSIX: + self.assertEqual(sig, -signal.SIGTERM) + + def test_send_signal(self): + sig = signal.SIGKILL if POSIX else signal.SIGTERM + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + p.send_signal(sig) + exit_sig = p.wait() + self.assertFalse(psutil.pid_exists(p.pid)) + if POSIX: + self.assertEqual(exit_sig, -sig) + # + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + p.send_signal(sig) + with mock.patch('psutil.os.kill', + side_effect=OSError(errno.ESRCH, "")): + with self.assertRaises(psutil.NoSuchProcess): + p.send_signal(sig) + # + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + p.send_signal(sig) + with mock.patch('psutil.os.kill', + side_effect=OSError(errno.EPERM, "")): + with self.assertRaises(psutil.AccessDenied): + psutil.Process().send_signal(sig) + # Sending a signal to process with PID 0 is not allowed as + # it would affect every process in the process group of + # the calling process (os.getpid()) instead of PID 0"). + if 0 in psutil.pids(): + p = psutil.Process(0) + self.assertRaises(ValueError, p.send_signal, signal.SIGTERM) + + def test_wait(self): + # check exit code signal + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + p.kill() + code = p.wait() + if POSIX: + self.assertEqual(code, -signal.SIGKILL) + else: + self.assertEqual(code, signal.SIGTERM) + self.assertFalse(p.is_running()) + + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + p.terminate() + code = p.wait() + if POSIX: + self.assertEqual(code, -signal.SIGTERM) + else: + self.assertEqual(code, signal.SIGTERM) + self.assertFalse(p.is_running()) + + # check sys.exit() code + code = "import time, sys; time.sleep(0.01); sys.exit(5);" + sproc = get_test_subprocess([PYTHON_EXE, "-c", code]) + p = psutil.Process(sproc.pid) + self.assertEqual(p.wait(), 5) + self.assertFalse(p.is_running()) + + # Test wait() issued twice. + # It is not supposed to raise NSP when the process is gone. + # On UNIX this should return None, on Windows it should keep + # returning the exit code. + sproc = get_test_subprocess([PYTHON_EXE, "-c", code]) + p = psutil.Process(sproc.pid) + self.assertEqual(p.wait(), 5) + self.assertIn(p.wait(), (5, None)) + + # test timeout + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + p.name() + self.assertRaises(psutil.TimeoutExpired, p.wait, 0.01) + + # timeout < 0 not allowed + self.assertRaises(ValueError, p.wait, -1) + + def test_wait_non_children(self): + # Test wait() against a process which is not our direct + # child. + p1, p2 = create_proc_children_pair() + self.assertRaises(psutil.TimeoutExpired, p1.wait, 0.01) + self.assertRaises(psutil.TimeoutExpired, p2.wait, 0.01) + # We also terminate the direct child otherwise the + # grandchild will hang until the parent is gone. + p1.terminate() + p2.terminate() + ret1 = p1.wait() + ret2 = p2.wait() + if POSIX: + self.assertEqual(ret1, -signal.SIGTERM) + # For processes which are not our children we're supposed + # to get None. + self.assertEqual(ret2, None) + else: + self.assertEqual(ret1, signal.SIGTERM) + self.assertEqual(ret1, signal.SIGTERM) + + def test_wait_timeout_0(self): + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + self.assertRaises(psutil.TimeoutExpired, p.wait, 0) + p.kill() + stop_at = time.time() + 2 + while True: + try: + code = p.wait(0) + except psutil.TimeoutExpired: + if time.time() >= stop_at: + raise + else: + break + if POSIX: + self.assertEqual(code, -signal.SIGKILL) + else: + self.assertEqual(code, signal.SIGTERM) + self.assertFalse(p.is_running()) + + def test_cpu_percent(self): + p = psutil.Process() + p.cpu_percent(interval=0.001) + p.cpu_percent(interval=0.001) + for x in range(100): + percent = p.cpu_percent(interval=None) + self.assertIsInstance(percent, float) + self.assertGreaterEqual(percent, 0.0) + if not POSIX: + self.assertLessEqual(percent, 100.0) + else: + self.assertGreaterEqual(percent, 0.0) + with self.assertRaises(ValueError): + p.cpu_percent(interval=-1) + + def test_cpu_percent_numcpus_none(self): + # See: https://github.com/giampaolo/psutil/issues/1087 + with mock.patch('psutil.cpu_count', return_value=None) as m: + psutil.Process().cpu_percent() + assert m.called + + def test_cpu_times(self): + times = psutil.Process().cpu_times() + assert (times.user > 0.0) or (times.system > 0.0), times + assert (times.children_user >= 0.0), times + assert (times.children_system >= 0.0), times + # make sure returned values can be pretty printed with strftime + for name in times._fields: + time.strftime("%H:%M:%S", time.localtime(getattr(times, name))) + + def test_cpu_times_2(self): + user_time, kernel_time = psutil.Process().cpu_times()[:2] + utime, ktime = os.times()[:2] + + # Use os.times()[:2] as base values to compare our results + # using a tolerance of +/- 0.1 seconds. + # It will fail if the difference between the values is > 0.1s. + if (max([user_time, utime]) - min([user_time, utime])) > 0.1: + self.fail("expected: %s, found: %s" % (utime, user_time)) + + if (max([kernel_time, ktime]) - min([kernel_time, ktime])) > 0.1: + self.fail("expected: %s, found: %s" % (ktime, kernel_time)) + + @unittest.skipIf(not HAS_PROC_CPU_NUM, "not supported") + def test_cpu_num(self): + p = psutil.Process() + num = p.cpu_num() + self.assertGreaterEqual(num, 0) + if psutil.cpu_count() == 1: + self.assertEqual(num, 0) + self.assertIn(p.cpu_num(), range(psutil.cpu_count())) + + def test_create_time(self): + sproc = get_test_subprocess() + now = time.time() + p = psutil.Process(sproc.pid) + create_time = p.create_time() + + # Use time.time() as base value to compare our result using a + # tolerance of +/- 1 second. + # It will fail if the difference between the values is > 2s. + difference = abs(create_time - now) + if difference > 2: + self.fail("expected: %s, found: %s, difference: %s" + % (now, create_time, difference)) + + # make sure returned value can be pretty printed with strftime + time.strftime("%Y %m %d %H:%M:%S", time.localtime(p.create_time())) + + @unittest.skipIf(not POSIX, 'POSIX only') + @unittest.skipIf(TRAVIS, 'not reliable on TRAVIS') + def test_terminal(self): + terminal = psutil.Process().terminal() + if sys.stdin.isatty() or sys.stdout.isatty(): + tty = os.path.realpath(sh('tty')) + self.assertEqual(terminal, tty) + else: + self.assertIsNone(terminal) + + @unittest.skipIf(not HAS_PROC_IO_COUNTERS, 'not supported') + @skip_on_not_implemented(only_if=LINUX) + def test_io_counters(self): + p = psutil.Process() + + # test reads + io1 = p.io_counters() + with open(PYTHON_EXE, 'rb') as f: + f.read() + io2 = p.io_counters() + if not BSD and not AIX: + self.assertGreater(io2.read_count, io1.read_count) + self.assertEqual(io2.write_count, io1.write_count) + if LINUX: + self.assertGreater(io2.read_chars, io1.read_chars) + self.assertEqual(io2.write_chars, io1.write_chars) + else: + self.assertGreaterEqual(io2.read_bytes, io1.read_bytes) + self.assertGreaterEqual(io2.write_bytes, io1.write_bytes) + + # test writes + io1 = p.io_counters() + with tempfile.TemporaryFile(prefix=TESTFILE_PREFIX) as f: + if PY3: + f.write(bytes("x" * 1000000, 'ascii')) + else: + f.write("x" * 1000000) + io2 = p.io_counters() + self.assertGreaterEqual(io2.write_count, io1.write_count) + self.assertGreaterEqual(io2.write_bytes, io1.write_bytes) + self.assertGreaterEqual(io2.read_count, io1.read_count) + self.assertGreaterEqual(io2.read_bytes, io1.read_bytes) + if LINUX: + self.assertGreater(io2.write_chars, io1.write_chars) + self.assertGreaterEqual(io2.read_chars, io1.read_chars) + + # sanity check + for i in range(len(io2)): + if BSD and i >= 2: + # On BSD read_bytes and write_bytes are always set to -1. + continue + self.assertGreaterEqual(io2[i], 0) + self.assertGreaterEqual(io2[i], 0) + + @unittest.skipIf(not HAS_IONICE, "not supported") + @unittest.skipIf(WINDOWS and get_winver() < WIN_VISTA, 'not supported') + def test_ionice(self): + if LINUX: + from psutil import (IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT, + IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE) + self.assertEqual(IOPRIO_CLASS_NONE, 0) + self.assertEqual(IOPRIO_CLASS_RT, 1) + self.assertEqual(IOPRIO_CLASS_BE, 2) + self.assertEqual(IOPRIO_CLASS_IDLE, 3) + p = psutil.Process() + try: + p.ionice(2) + ioclass, value = p.ionice() + if enum is not None: + self.assertIsInstance(ioclass, enum.IntEnum) + self.assertEqual(ioclass, 2) + self.assertEqual(value, 4) + # + p.ionice(3) + ioclass, value = p.ionice() + self.assertEqual(ioclass, 3) + self.assertEqual(value, 0) + # + p.ionice(2, 0) + ioclass, value = p.ionice() + self.assertEqual(ioclass, 2) + self.assertEqual(value, 0) + p.ionice(2, 7) + ioclass, value = p.ionice() + self.assertEqual(ioclass, 2) + self.assertEqual(value, 7) + finally: + p.ionice(IOPRIO_CLASS_NONE) + else: + p = psutil.Process() + original = p.ionice() + self.assertIsInstance(original, int) + try: + value = 0 # very low + if original == value: + value = 1 # low + p.ionice(value) + self.assertEqual(p.ionice(), value) + finally: + p.ionice(original) + + @unittest.skipIf(not HAS_IONICE, "not supported") + @unittest.skipIf(WINDOWS and get_winver() < WIN_VISTA, 'not supported') + def test_ionice_errs(self): + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + if LINUX: + self.assertRaises(ValueError, p.ionice, 2, 10) + self.assertRaises(ValueError, p.ionice, 2, -1) + self.assertRaises(ValueError, p.ionice, 4) + self.assertRaises(TypeError, p.ionice, 2, "foo") + self.assertRaisesRegex( + ValueError, "can't specify value with IOPRIO_CLASS_NONE", + p.ionice, psutil.IOPRIO_CLASS_NONE, 1) + self.assertRaisesRegex( + ValueError, "can't specify value with IOPRIO_CLASS_IDLE", + p.ionice, psutil.IOPRIO_CLASS_IDLE, 1) + self.assertRaisesRegex( + ValueError, "'ioclass' argument must be specified", + p.ionice, value=1) + else: + self.assertRaises(ValueError, p.ionice, 3) + self.assertRaises(TypeError, p.ionice, 2, 1) + + @unittest.skipIf(not HAS_RLIMIT, "not supported") + def test_rlimit_get(self): + import resource + p = psutil.Process(os.getpid()) + names = [x for x in dir(psutil) if x.startswith('RLIMIT')] + assert names, names + for name in names: + value = getattr(psutil, name) + self.assertGreaterEqual(value, 0) + if name in dir(resource): + self.assertEqual(value, getattr(resource, name)) + # XXX - On PyPy RLIMIT_INFINITY returned by + # resource.getrlimit() is reported as a very big long + # number instead of -1. It looks like a bug with PyPy. + if PYPY: + continue + self.assertEqual(p.rlimit(value), resource.getrlimit(value)) + else: + ret = p.rlimit(value) + self.assertEqual(len(ret), 2) + self.assertGreaterEqual(ret[0], -1) + self.assertGreaterEqual(ret[1], -1) + + @unittest.skipIf(not HAS_RLIMIT, "not supported") + def test_rlimit_set(self): + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + p.rlimit(psutil.RLIMIT_NOFILE, (5, 5)) + self.assertEqual(p.rlimit(psutil.RLIMIT_NOFILE), (5, 5)) + # If pid is 0 prlimit() applies to the calling process and + # we don't want that. + with self.assertRaises(ValueError): + psutil._psplatform.Process(0).rlimit(0) + with self.assertRaises(ValueError): + p.rlimit(psutil.RLIMIT_NOFILE, (5, 5, 5)) + + @unittest.skipIf(not HAS_RLIMIT, "not supported") + def test_rlimit(self): + p = psutil.Process() + soft, hard = p.rlimit(psutil.RLIMIT_FSIZE) + try: + p.rlimit(psutil.RLIMIT_FSIZE, (1024, hard)) + with open(TESTFN, "wb") as f: + f.write(b"X" * 1024) + # write() or flush() doesn't always cause the exception + # but close() will. + with self.assertRaises(IOError) as exc: + with open(TESTFN, "wb") as f: + f.write(b"X" * 1025) + self.assertEqual(exc.exception.errno if PY3 else exc.exception[0], + errno.EFBIG) + finally: + p.rlimit(psutil.RLIMIT_FSIZE, (soft, hard)) + self.assertEqual(p.rlimit(psutil.RLIMIT_FSIZE), (soft, hard)) + + @unittest.skipIf(not HAS_RLIMIT, "not supported") + def test_rlimit_infinity(self): + # First set a limit, then re-set it by specifying INFINITY + # and assume we overridden the previous limit. + p = psutil.Process() + soft, hard = p.rlimit(psutil.RLIMIT_FSIZE) + try: + p.rlimit(psutil.RLIMIT_FSIZE, (1024, hard)) + p.rlimit(psutil.RLIMIT_FSIZE, (psutil.RLIM_INFINITY, hard)) + with open(TESTFN, "wb") as f: + f.write(b"X" * 2048) + finally: + p.rlimit(psutil.RLIMIT_FSIZE, (soft, hard)) + self.assertEqual(p.rlimit(psutil.RLIMIT_FSIZE), (soft, hard)) + + @unittest.skipIf(not HAS_RLIMIT, "not supported") + def test_rlimit_infinity_value(self): + # RLIMIT_FSIZE should be RLIM_INFINITY, which will be a really + # big number on a platform with large file support. On these + # platforms we need to test that the get/setrlimit functions + # properly convert the number to a C long long and that the + # conversion doesn't raise an error. + p = psutil.Process() + soft, hard = p.rlimit(psutil.RLIMIT_FSIZE) + self.assertEqual(psutil.RLIM_INFINITY, hard) + p.rlimit(psutil.RLIMIT_FSIZE, (soft, hard)) + + def test_num_threads(self): + # on certain platforms such as Linux we might test for exact + # thread number, since we always have with 1 thread per process, + # but this does not apply across all platforms (OSX, Windows) + p = psutil.Process() + if OPENBSD: + try: + step1 = p.num_threads() + except psutil.AccessDenied: + raise unittest.SkipTest("on OpenBSD this requires root access") + else: + step1 = p.num_threads() + + with ThreadTask(): + step2 = p.num_threads() + self.assertEqual(step2, step1 + 1) + + @unittest.skipIf(not WINDOWS, 'WINDOWS only') + def test_num_handles(self): + # a better test is done later into test/_windows.py + p = psutil.Process() + self.assertGreater(p.num_handles(), 0) + + @unittest.skipIf(not HAS_THREADS, 'not supported') + def test_threads(self): + p = psutil.Process() + if OPENBSD: + try: + step1 = p.threads() + except psutil.AccessDenied: + raise unittest.SkipTest("on OpenBSD this requires root access") + else: + step1 = p.threads() + + with ThreadTask(): + step2 = p.threads() + self.assertEqual(len(step2), len(step1) + 1) + # on Linux, first thread id is supposed to be this process + if LINUX: + self.assertEqual(step2[0].id, os.getpid()) + athread = step2[0] + # test named tuple + self.assertEqual(athread.id, athread[0]) + self.assertEqual(athread.user_time, athread[1]) + self.assertEqual(athread.system_time, athread[2]) + + @retry_before_failing() + @skip_on_access_denied(only_if=OSX) + @unittest.skipIf(not HAS_THREADS, 'not supported') + def test_threads_2(self): + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + if OPENBSD: + try: + p.threads() + except psutil.AccessDenied: + raise unittest.SkipTest( + "on OpenBSD this requires root access") + self.assertAlmostEqual( + p.cpu_times().user, + sum([x.user_time for x in p.threads()]), delta=0.1) + self.assertAlmostEqual( + p.cpu_times().system, + sum([x.system_time for x in p.threads()]), delta=0.1) + + def test_memory_info(self): + p = psutil.Process() + + # step 1 - get a base value to compare our results + rss1, vms1 = p.memory_info()[:2] + percent1 = p.memory_percent() + self.assertGreater(rss1, 0) + self.assertGreater(vms1, 0) + + # step 2 - allocate some memory + memarr = [None] * 1500000 + + rss2, vms2 = p.memory_info()[:2] + percent2 = p.memory_percent() + + # step 3 - make sure that the memory usage bumped up + self.assertGreater(rss2, rss1) + self.assertGreaterEqual(vms2, vms1) # vms might be equal + self.assertGreater(percent2, percent1) + del memarr + + if WINDOWS: + mem = p.memory_info() + self.assertEqual(mem.rss, mem.wset) + self.assertEqual(mem.vms, mem.pagefile) + + mem = p.memory_info() + for name in mem._fields: + self.assertGreaterEqual(getattr(mem, name), 0) + + def test_memory_full_info(self): + total = psutil.virtual_memory().total + mem = psutil.Process().memory_full_info() + for name in mem._fields: + value = getattr(mem, name) + self.assertGreaterEqual(value, 0, msg=(name, value)) + self.assertLessEqual(value, total, msg=(name, value, total)) + if LINUX or WINDOWS or OSX: + self.assertGreaterEqual(mem.uss, 0) + if LINUX: + self.assertGreaterEqual(mem.pss, 0) + self.assertGreaterEqual(mem.swap, 0) + + @unittest.skipIf(not HAS_MEMORY_MAPS, "not supported") + def test_memory_maps(self): + p = psutil.Process() + maps = p.memory_maps() + paths = [x for x in maps] + self.assertEqual(len(paths), len(set(paths))) + ext_maps = p.memory_maps(grouped=False) + + for nt in maps: + if not nt.path.startswith('['): + assert os.path.isabs(nt.path), nt.path + if POSIX: + try: + assert os.path.exists(nt.path) or \ + os.path.islink(nt.path), nt.path + except AssertionError: + if not LINUX: + raise + else: + # https://github.com/giampaolo/psutil/issues/759 + with open('/proc/self/smaps') as f: + data = f.read() + if "%s (deleted)" % nt.path not in data: + raise + else: + # XXX - On Windows we have this strange behavior with + # 64 bit dlls: they are visible via explorer but cannot + # be accessed via os.stat() (wtf?). + if '64' not in os.path.basename(nt.path): + assert os.path.exists(nt.path), nt.path + for nt in ext_maps: + for fname in nt._fields: + value = getattr(nt, fname) + if fname == 'path': + continue + elif fname in ('addr', 'perms'): + assert value, value + else: + self.assertIsInstance(value, (int, long)) + assert value >= 0, value + + @unittest.skipIf(not HAS_MEMORY_MAPS, "not supported") + def test_memory_maps_lists_lib(self): + # Make sure a newly loaded shared lib is listed. + with copyload_shared_lib() as path: + def normpath(p): + return os.path.realpath(os.path.normcase(p)) + libpaths = [normpath(x.path) + for x in psutil.Process().memory_maps()] + self.assertIn(normpath(path), libpaths) + + def test_memory_percent(self): + p = psutil.Process() + ret = p.memory_percent() + assert 0 <= ret <= 100, ret + ret = p.memory_percent(memtype='vms') + assert 0 <= ret <= 100, ret + assert 0 <= ret <= 100, ret + self.assertRaises(ValueError, p.memory_percent, memtype="?!?") + if LINUX or OSX or WINDOWS: + ret = p.memory_percent(memtype='uss') + assert 0 <= ret <= 100, ret + assert 0 <= ret <= 100, ret + + def test_is_running(self): + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + assert p.is_running() + assert p.is_running() + p.kill() + p.wait() + assert not p.is_running() + assert not p.is_running() + + def test_exe(self): + sproc = get_test_subprocess() + exe = psutil.Process(sproc.pid).exe() + try: + self.assertEqual(exe, PYTHON_EXE) + except AssertionError: + if WINDOWS and len(exe) == len(PYTHON_EXE): + # on Windows we don't care about case sensitivity + normcase = os.path.normcase + self.assertEqual(normcase(exe), normcase(PYTHON_EXE)) + else: + # certain platforms such as BSD are more accurate returning: + # "/usr/local/bin/python2.7" + # ...instead of: + # "/usr/local/bin/python" + # We do not want to consider this difference in accuracy + # an error. + ver = "%s.%s" % (sys.version_info[0], sys.version_info[1]) + try: + self.assertEqual(exe.replace(ver, ''), + PYTHON_EXE.replace(ver, '')) + except AssertionError: + # Tipically OSX. Really not sure what to do here. + pass + + out = sh([exe, "-c", "import os; print('hey')"]) + self.assertEqual(out, 'hey') + + def test_cmdline(self): + cmdline = [PYTHON_EXE, "-c", "import time; time.sleep(60)"] + sproc = get_test_subprocess(cmdline) + try: + self.assertEqual(' '.join(psutil.Process(sproc.pid).cmdline()), + ' '.join(cmdline)) + except AssertionError: + # XXX - most of the times the underlying sysctl() call on Net + # and Open BSD returns a truncated string. + # Also /proc/pid/cmdline behaves the same so it looks + # like this is a kernel bug. + # XXX - AIX truncates long arguments in /proc/pid/cmdline + if NETBSD or OPENBSD or AIX: + self.assertEqual( + psutil.Process(sproc.pid).cmdline()[0], PYTHON_EXE) + else: + raise + + def test_name(self): + sproc = get_test_subprocess(PYTHON_EXE) + name = psutil.Process(sproc.pid).name().lower() + pyexe = os.path.basename(os.path.realpath(sys.executable)).lower() + assert pyexe.startswith(name), (pyexe, name) + + # XXX + @unittest.skipIf(SUNOS, "broken on SUNOS") + @unittest.skipIf(AIX, "broken on AIX") + def test_prog_w_funky_name(self): + # Test that name(), exe() and cmdline() correctly handle programs + # with funky chars such as spaces and ")", see: + # https://github.com/giampaolo/psutil/issues/628 + + def rm(): + # Try to limit occasional failures on Appveyor: + # https://ci.appveyor.com/project/giampaolo/psutil/build/1350/ + # job/lbo3bkju55le850n + try: + safe_rmpath(funky_path) + except OSError: + pass + + funky_path = TESTFN + 'foo bar )' + create_exe(funky_path) + self.addCleanup(rm) + cmdline = [funky_path, "-c", + "import time; [time.sleep(0.01) for x in range(3000)];" + "arg1", "arg2", "", "arg3", ""] + sproc = get_test_subprocess(cmdline) + p = psutil.Process(sproc.pid) + # ...in order to try to prevent occasional failures on travis + if TRAVIS: + wait_for_pid(p.pid) + self.assertEqual(p.cmdline(), cmdline) + self.assertEqual(p.name(), os.path.basename(funky_path)) + self.assertEqual(os.path.normcase(p.exe()), + os.path.normcase(funky_path)) + + @unittest.skipIf(not POSIX, 'POSIX only') + def test_uids(self): + p = psutil.Process() + real, effective, saved = p.uids() + # os.getuid() refers to "real" uid + self.assertEqual(real, os.getuid()) + # os.geteuid() refers to "effective" uid + self.assertEqual(effective, os.geteuid()) + # No such thing as os.getsuid() ("saved" uid), but starting + # from python 2.7 we have os.getresuid() which returns all + # of them. + if hasattr(os, "getresuid"): + self.assertEqual(os.getresuid(), p.uids()) + + @unittest.skipIf(not POSIX, 'POSIX only') + def test_gids(self): + p = psutil.Process() + real, effective, saved = p.gids() + # os.getuid() refers to "real" uid + self.assertEqual(real, os.getgid()) + # os.geteuid() refers to "effective" uid + self.assertEqual(effective, os.getegid()) + # No such thing as os.getsgid() ("saved" gid), but starting + # from python 2.7 we have os.getresgid() which returns all + # of them. + if hasattr(os, "getresuid"): + self.assertEqual(os.getresgid(), p.gids()) + + def test_nice(self): + p = psutil.Process() + self.assertRaises(TypeError, p.nice, "str") + if WINDOWS: + try: + init = p.nice() + if sys.version_info > (3, 4): + self.assertIsInstance(init, enum.IntEnum) + else: + self.assertIsInstance(init, int) + self.assertEqual(init, psutil.NORMAL_PRIORITY_CLASS) + p.nice(psutil.HIGH_PRIORITY_CLASS) + self.assertEqual(p.nice(), psutil.HIGH_PRIORITY_CLASS) + p.nice(psutil.NORMAL_PRIORITY_CLASS) + self.assertEqual(p.nice(), psutil.NORMAL_PRIORITY_CLASS) + finally: + p.nice(psutil.NORMAL_PRIORITY_CLASS) + else: + first_nice = p.nice() + try: + if hasattr(os, "getpriority"): + self.assertEqual( + os.getpriority(os.PRIO_PROCESS, os.getpid()), p.nice()) + p.nice(1) + self.assertEqual(p.nice(), 1) + if hasattr(os, "getpriority"): + self.assertEqual( + os.getpriority(os.PRIO_PROCESS, os.getpid()), p.nice()) + # XXX - going back to previous nice value raises + # AccessDenied on OSX + if not OSX: + p.nice(0) + self.assertEqual(p.nice(), 0) + except psutil.AccessDenied: + pass + finally: + try: + p.nice(first_nice) + except psutil.AccessDenied: + pass + + def test_status(self): + p = psutil.Process() + self.assertEqual(p.status(), psutil.STATUS_RUNNING) + + def test_username(self): + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + username = p.username() + if WINDOWS: + domain, username = username.split('\\') + self.assertEqual(username, getpass.getuser()) + if 'USERDOMAIN' in os.environ: + self.assertEqual(domain, os.environ['USERDOMAIN']) + else: + self.assertEqual(username, getpass.getuser()) + + def test_cwd(self): + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + self.assertEqual(p.cwd(), os.getcwd()) + + def test_cwd_2(self): + cmd = [PYTHON_EXE, "-c", + "import os, time; os.chdir('..'); time.sleep(60)"] + sproc = get_test_subprocess(cmd) + p = psutil.Process(sproc.pid) + call_until(p.cwd, "ret == os.path.dirname(os.getcwd())") + + @unittest.skipIf(not HAS_CPU_AFFINITY, 'not supported') + def test_cpu_affinity(self): + p = psutil.Process() + initial = p.cpu_affinity() + assert initial, initial + self.addCleanup(p.cpu_affinity, initial) + + if hasattr(os, "sched_getaffinity"): + self.assertEqual(initial, list(os.sched_getaffinity(p.pid))) + self.assertEqual(len(initial), len(set(initial))) + + all_cpus = list(range(len(psutil.cpu_percent(percpu=True)))) + # Work around travis failure: + # https://travis-ci.org/giampaolo/psutil/builds/284173194 + for n in all_cpus if not TRAVIS else initial: + p.cpu_affinity([n]) + self.assertEqual(p.cpu_affinity(), [n]) + if hasattr(os, "sched_getaffinity"): + self.assertEqual(p.cpu_affinity(), + list(os.sched_getaffinity(p.pid))) + # also test num_cpu() + if hasattr(p, "num_cpu"): + self.assertEqual(p.cpu_affinity()[0], p.num_cpu()) + + # [] is an alias for "all eligible CPUs"; on Linux this may + # not be equal to all available CPUs, see: + # https://github.com/giampaolo/psutil/issues/956 + p.cpu_affinity([]) + if LINUX: + self.assertEqual(p.cpu_affinity(), p._proc._get_eligible_cpus()) + else: + self.assertEqual(p.cpu_affinity(), all_cpus) + if hasattr(os, "sched_getaffinity"): + self.assertEqual(p.cpu_affinity(), + list(os.sched_getaffinity(p.pid))) + # + self.assertRaises(TypeError, p.cpu_affinity, 1) + p.cpu_affinity(initial) + # it should work with all iterables, not only lists + p.cpu_affinity(set(all_cpus)) + p.cpu_affinity(tuple(all_cpus)) + + # TODO: temporary, see: https://github.com/MacPython/psutil/issues/1 + @unittest.skipIf(LINUX, "temporary") + @unittest.skipIf(not HAS_CPU_AFFINITY, 'not supported') + def test_cpu_affinity_errs(self): + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + invalid_cpu = [len(psutil.cpu_times(percpu=True)) + 10] + self.assertRaises(ValueError, p.cpu_affinity, invalid_cpu) + self.assertRaises(ValueError, p.cpu_affinity, range(10000, 11000)) + self.assertRaises(TypeError, p.cpu_affinity, [0, "1"]) + self.assertRaises(ValueError, p.cpu_affinity, [0, -1]) + + # TODO: #595 + @unittest.skipIf(BSD, "broken on BSD") + # can't find any process file on Appveyor + @unittest.skipIf(APPVEYOR, "unreliable on APPVEYOR") + def test_open_files(self): + # current process + p = psutil.Process() + files = p.open_files() + self.assertFalse(TESTFN in files) + with open(TESTFN, 'wb') as f: + f.write(b'x' * 1024) + f.flush() + # give the kernel some time to see the new file + files = call_until(p.open_files, "len(ret) != %i" % len(files)) + for file in files: + if file.path == TESTFN: + if LINUX: + self.assertEqual(file.position, 1024) + break + else: + self.fail("no file found; files=%s" % repr(files)) + for file in files: + assert os.path.isfile(file.path), file + + # another process + cmdline = "import time; f = open(r'%s', 'r'); time.sleep(60);" % TESTFN + sproc = get_test_subprocess([PYTHON_EXE, "-c", cmdline]) + p = psutil.Process(sproc.pid) + + for x in range(100): + filenames = [x.path for x in p.open_files()] + if TESTFN in filenames: + break + time.sleep(.01) + else: + self.assertIn(TESTFN, filenames) + for file in filenames: + assert os.path.isfile(file), file + + # TODO: #595 + @unittest.skipIf(BSD, "broken on BSD") + # can't find any process file on Appveyor + @unittest.skipIf(APPVEYOR, "unreliable on APPVEYOR") + def test_open_files_2(self): + # test fd and path fields + with open(TESTFN, 'w') as fileobj: + p = psutil.Process() + for file in p.open_files(): + if file.path == fileobj.name or file.fd == fileobj.fileno(): + break + else: + self.fail("no file found; files=%s" % repr(p.open_files())) + self.assertEqual(file.path, fileobj.name) + if WINDOWS: + self.assertEqual(file.fd, -1) + else: + self.assertEqual(file.fd, fileobj.fileno()) + # test positions + ntuple = p.open_files()[0] + self.assertEqual(ntuple[0], ntuple.path) + self.assertEqual(ntuple[1], ntuple.fd) + # test file is gone + self.assertNotIn(fileobj.name, p.open_files()) + + @unittest.skipIf(not POSIX, 'POSIX only') + def test_num_fds(self): + p = psutil.Process() + start = p.num_fds() + file = open(TESTFN, 'w') + self.addCleanup(file.close) + self.assertEqual(p.num_fds(), start + 1) + sock = socket.socket() + self.addCleanup(sock.close) + self.assertEqual(p.num_fds(), start + 2) + file.close() + sock.close() + self.assertEqual(p.num_fds(), start) + + @skip_on_not_implemented(only_if=LINUX) + @unittest.skipIf(OPENBSD or NETBSD, "not reliable on OPENBSD & NETBSD") + def test_num_ctx_switches(self): + p = psutil.Process() + before = sum(p.num_ctx_switches()) + for x in range(500000): + after = sum(p.num_ctx_switches()) + if after > before: + return + self.fail("num ctx switches still the same after 50.000 iterations") + + def test_ppid(self): + if hasattr(os, 'getppid'): + self.assertEqual(psutil.Process().ppid(), os.getppid()) + this_parent = os.getpid() + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + self.assertEqual(p.ppid(), this_parent) + # no other process is supposed to have us as parent + reap_children(recursive=True) + if APPVEYOR: + # Occasional failures, see: + # https://ci.appveyor.com/project/giampaolo/psutil/build/ + # job/0hs623nenj7w4m33 + return + for p in psutil.process_iter(): + if p.pid == sproc.pid: + continue + # XXX: sometimes this fails on Windows; not sure why. + self.assertNotEqual(p.ppid(), this_parent, msg=p) + + def test_parent(self): + this_parent = os.getpid() + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + self.assertEqual(p.parent().pid, this_parent) + + def test_parent_disappeared(self): + # Emulate a case where the parent process disappeared. + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + with mock.patch("psutil.Process", + side_effect=psutil.NoSuchProcess(0, 'foo')): + self.assertIsNone(p.parent()) + + def test_children(self): + p = psutil.Process() + self.assertEqual(p.children(), []) + self.assertEqual(p.children(recursive=True), []) + # On Windows we set the flag to 0 in order to cancel out the + # CREATE_NO_WINDOW flag (enabled by default) which creates + # an extra "conhost.exe" child. + sproc = get_test_subprocess(creationflags=0) + children1 = p.children() + children2 = p.children(recursive=True) + for children in (children1, children2): + self.assertEqual(len(children), 1) + self.assertEqual(children[0].pid, sproc.pid) + self.assertEqual(children[0].ppid(), os.getpid()) + + def test_children_recursive(self): + # Test children() against two sub processes, p1 and p2, where + # p1 (our child) spawned p2 (our grandchild). + p1, p2 = create_proc_children_pair() + p = psutil.Process() + self.assertEqual(p.children(), [p1]) + self.assertEqual(p.children(recursive=True), [p1, p2]) + # If the intermediate process is gone there's no way for + # children() to recursively find it. + p1.terminate() + p1.wait() + self.assertEqual(p.children(recursive=True), []) + + def test_children_duplicates(self): + # find the process which has the highest number of children + table = collections.defaultdict(int) + for p in psutil.process_iter(): + try: + table[p.ppid()] += 1 + except psutil.Error: + pass + # this is the one, now let's make sure there are no duplicates + pid = sorted(table.items(), key=lambda x: x[1])[-1][0] + p = psutil.Process(pid) + try: + c = p.children(recursive=True) + except psutil.AccessDenied: # windows + pass + else: + self.assertEqual(len(c), len(set(c))) + + def test_suspend_resume(self): + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + p.suspend() + for x in range(100): + if p.status() == psutil.STATUS_STOPPED: + break + time.sleep(0.01) + p.resume() + self.assertNotEqual(p.status(), psutil.STATUS_STOPPED) + + def test_invalid_pid(self): + self.assertRaises(TypeError, psutil.Process, "1") + self.assertRaises(ValueError, psutil.Process, -1) + + def test_as_dict(self): + p = psutil.Process() + d = p.as_dict(attrs=['exe', 'name']) + self.assertEqual(sorted(d.keys()), ['exe', 'name']) + + p = psutil.Process(min(psutil.pids())) + d = p.as_dict(attrs=['connections'], ad_value='foo') + if not isinstance(d['connections'], list): + self.assertEqual(d['connections'], 'foo') + + # Test ad_value is set on AccessDenied. + with mock.patch('psutil.Process.nice', create=True, + side_effect=psutil.AccessDenied): + self.assertEqual( + p.as_dict(attrs=["nice"], ad_value=1), {"nice": 1}) + + # Test that NoSuchProcess bubbles up. + with mock.patch('psutil.Process.nice', create=True, + side_effect=psutil.NoSuchProcess(p.pid, "name")): + self.assertRaises( + psutil.NoSuchProcess, p.as_dict, attrs=["nice"]) + + # Test that ZombieProcess is swallowed. + with mock.patch('psutil.Process.nice', create=True, + side_effect=psutil.ZombieProcess(p.pid, "name")): + self.assertEqual( + p.as_dict(attrs=["nice"], ad_value="foo"), {"nice": "foo"}) + + # By default APIs raising NotImplementedError are + # supposed to be skipped. + with mock.patch('psutil.Process.nice', create=True, + side_effect=NotImplementedError): + d = p.as_dict() + self.assertNotIn('nice', list(d.keys())) + # ...unless the user explicitly asked for some attr. + with self.assertRaises(NotImplementedError): + p.as_dict(attrs=["nice"]) + + # errors + with self.assertRaises(TypeError): + p.as_dict('name') + with self.assertRaises(ValueError): + p.as_dict(['foo']) + with self.assertRaises(ValueError): + p.as_dict(['foo', 'bar']) + + def test_oneshot(self): + with mock.patch("psutil._psplatform.Process.cpu_times") as m: + p = psutil.Process() + with p.oneshot(): + p.cpu_times() + p.cpu_times() + self.assertEqual(m.call_count, 1) + + with mock.patch("psutil._psplatform.Process.cpu_times") as m: + p.cpu_times() + p.cpu_times() + self.assertEqual(m.call_count, 2) + + def test_oneshot_twice(self): + # Test the case where the ctx manager is __enter__ed twice. + # The second __enter__ is supposed to resut in a NOOP. + with mock.patch("psutil._psplatform.Process.cpu_times") as m1: + with mock.patch("psutil._psplatform.Process.oneshot_enter") as m2: + p = psutil.Process() + with p.oneshot(): + p.cpu_times() + p.cpu_times() + with p.oneshot(): + p.cpu_times() + p.cpu_times() + self.assertEqual(m1.call_count, 1) + self.assertEqual(m2.call_count, 1) + + with mock.patch("psutil._psplatform.Process.cpu_times") as m: + p.cpu_times() + p.cpu_times() + self.assertEqual(m.call_count, 2) + + def test_halfway_terminated_process(self): + # Test that NoSuchProcess exception gets raised in case the + # process dies after we create the Process object. + # Example: + # >>> proc = Process(1234) + # >>> time.sleep(2) # time-consuming task, process dies in meantime + # >>> proc.name() + # Refers to Issue #15 + sproc = get_test_subprocess() + p = psutil.Process(sproc.pid) + p.terminate() + p.wait() + if WINDOWS: + call_until(psutil.pids, "%s not in ret" % p.pid) + self.assertFalse(p.is_running()) + # self.assertFalse(p.pid in psutil.pids(), msg="retcode = %s" % + # retcode) + + excluded_names = ['pid', 'is_running', 'wait', 'create_time', + 'oneshot', 'memory_info_ex'] + if LINUX and not HAS_RLIMIT: + excluded_names.append('rlimit') + for name in dir(p): + if (name.startswith('_') or + name in excluded_names): + continue + try: + meth = getattr(p, name) + # get/set methods + if name == 'nice': + if POSIX: + ret = meth(1) + else: + ret = meth(psutil.NORMAL_PRIORITY_CLASS) + elif name == 'ionice': + ret = meth() + ret = meth(2) + elif name == 'rlimit': + ret = meth(psutil.RLIMIT_NOFILE) + ret = meth(psutil.RLIMIT_NOFILE, (5, 5)) + elif name == 'cpu_affinity': + ret = meth() + ret = meth([0]) + elif name == 'send_signal': + ret = meth(signal.SIGTERM) + else: + ret = meth() + except psutil.ZombieProcess: + self.fail("ZombieProcess for %r was not supposed to happen" % + name) + except psutil.NoSuchProcess: + pass + except psutil.AccessDenied: + if OPENBSD and name in ('threads', 'num_threads'): + pass + else: + raise + except NotImplementedError: + pass + else: + self.fail( + "NoSuchProcess exception not raised for %r, retval=%s" % ( + name, ret)) + + @unittest.skipIf(not POSIX, 'POSIX only') + def test_zombie_process(self): + def succeed_or_zombie_p_exc(fun, *args, **kwargs): + try: + return fun(*args, **kwargs) + except (psutil.ZombieProcess, psutil.AccessDenied): + pass + + zpid = create_zombie_proc() + self.addCleanup(reap_children, recursive=True) + # A zombie process should always be instantiable + zproc = psutil.Process(zpid) + # ...and at least its status always be querable + self.assertEqual(zproc.status(), psutil.STATUS_ZOMBIE) + # ...and it should be considered 'running' + self.assertTrue(zproc.is_running()) + # ...and as_dict() shouldn't crash + zproc.as_dict() + # if cmdline succeeds it should be an empty list + ret = succeed_or_zombie_p_exc(zproc.suspend) + if ret is not None: + self.assertEqual(ret, []) + + if hasattr(zproc, "rlimit"): + succeed_or_zombie_p_exc(zproc.rlimit, psutil.RLIMIT_NOFILE) + succeed_or_zombie_p_exc(zproc.rlimit, psutil.RLIMIT_NOFILE, + (5, 5)) + # set methods + succeed_or_zombie_p_exc(zproc.parent) + if hasattr(zproc, 'cpu_affinity'): + try: + succeed_or_zombie_p_exc(zproc.cpu_affinity, [0]) + except ValueError as err: + if TRAVIS and LINUX and "not eligible" in str(err): + # https://travis-ci.org/giampaolo/psutil/jobs/279890461 + pass + else: + raise + + succeed_or_zombie_p_exc(zproc.nice, 0) + if hasattr(zproc, 'ionice'): + if LINUX: + succeed_or_zombie_p_exc(zproc.ionice, 2, 0) + else: + succeed_or_zombie_p_exc(zproc.ionice, 0) # Windows + if hasattr(zproc, 'rlimit'): + succeed_or_zombie_p_exc(zproc.rlimit, + psutil.RLIMIT_NOFILE, (5, 5)) + succeed_or_zombie_p_exc(zproc.suspend) + succeed_or_zombie_p_exc(zproc.resume) + succeed_or_zombie_p_exc(zproc.terminate) + succeed_or_zombie_p_exc(zproc.kill) + + # ...its parent should 'see' it + # edit: not true on BSD and OSX + # descendants = [x.pid for x in psutil.Process().children( + # recursive=True)] + # self.assertIn(zpid, descendants) + # XXX should we also assume ppid be usable? Note: this + # would be an important use case as the only way to get + # rid of a zombie is to kill its parent. + # self.assertEqual(zpid.ppid(), os.getpid()) + # ...and all other APIs should be able to deal with it + self.assertTrue(psutil.pid_exists(zpid)) + if not TRAVIS and OSX: + # For some reason this started failing all of the sudden. + # Maybe they upgraded OSX version? + # https://travis-ci.org/giampaolo/psutil/jobs/310896404 + self.assertIn(zpid, psutil.pids()) + self.assertIn(zpid, [x.pid for x in psutil.process_iter()]) + psutil._pmap = {} + self.assertIn(zpid, [x.pid for x in psutil.process_iter()]) + + @unittest.skipIf(not POSIX, 'POSIX only') + def test_zombie_process_is_running_w_exc(self): + # Emulate a case where internally is_running() raises + # ZombieProcess. + p = psutil.Process() + with mock.patch("psutil.Process", + side_effect=psutil.ZombieProcess(0)) as m: + assert p.is_running() + assert m.called + + @unittest.skipIf(not POSIX, 'POSIX only') + def test_zombie_process_status_w_exc(self): + # Emulate a case where internally status() raises + # ZombieProcess. + p = psutil.Process() + with mock.patch("psutil._psplatform.Process.status", + side_effect=psutil.ZombieProcess(0)) as m: + self.assertEqual(p.status(), psutil.STATUS_ZOMBIE) + assert m.called + + def test_pid_0(self): + # Process(0) is supposed to work on all platforms except Linux + if 0 not in psutil.pids(): + self.assertRaises(psutil.NoSuchProcess, psutil.Process, 0) + return + + # test all methods + p = psutil.Process(0) + for name in psutil._as_dict_attrnames: + if name == 'pid': + continue + meth = getattr(p, name) + try: + ret = meth() + except psutil.AccessDenied: + pass + else: + if name in ("uids", "gids"): + self.assertEqual(ret.real, 0) + elif name == "username": + if POSIX: + self.assertEqual(p.username(), 'root') + elif WINDOWS: + self.assertEqual(p.username(), 'NT AUTHORITY\\SYSTEM') + elif name == "name": + assert name, name + + if hasattr(p, 'rlimit'): + try: + p.rlimit(psutil.RLIMIT_FSIZE) + except psutil.AccessDenied: + pass + + p.as_dict() + + if not OPENBSD: + self.assertIn(0, psutil.pids()) + self.assertTrue(psutil.pid_exists(0)) + + @unittest.skipIf(not HAS_ENVIRON, "not supported") + def test_environ(self): + def clean_dict(d): + # Most of these are problematic on Travis. + d.pop("PSUTIL_TESTING", None) + d.pop("PLAT", None) + d.pop("HOME", None) + if OSX: + d.pop("__CF_USER_TEXT_ENCODING", None) + d.pop("VERSIONER_PYTHON_PREFER_32_BIT", None) + d.pop("VERSIONER_PYTHON_VERSION", None) + return dict( + [(k.rstrip("\r\n"), v.rstrip("\r\n")) for k, v in d.items()]) + + self.maxDiff = None + p = psutil.Process() + d1 = clean_dict(p.environ()) + d2 = clean_dict(os.environ.copy()) + self.assertEqual(d1, d2) + + @unittest.skipIf(not HAS_ENVIRON, "not supported") + @unittest.skipIf(not POSIX, "POSIX only") + def test_weird_environ(self): + # environment variables can contain values without an equals sign + code = textwrap.dedent(""" + #include + #include + char * const argv[] = {"cat", 0}; + char * const envp[] = {"A=1", "X", "C=3", 0}; + int main(void) { + /* Close stderr on exec so parent can wait for the execve to + * finish. */ + if (fcntl(2, F_SETFD, FD_CLOEXEC) != 0) + return 0; + return execve("/bin/cat", argv, envp); + } + """) + path = TESTFN + create_exe(path, c_code=code) + self.addCleanup(safe_rmpath, path) + sproc = get_test_subprocess([path], + stdin=subprocess.PIPE, + stderr=subprocess.PIPE) + p = psutil.Process(sproc.pid) + wait_for_pid(p.pid) + self.assertTrue(p.is_running()) + # Wait for process to exec or exit. + self.assertEqual(sproc.stderr.read(), b"") + self.assertEqual(p.environ(), {"A": "1", "C": "3"}) + sproc.communicate() + self.assertEqual(sproc.returncode, 0) + + +# =================================================================== +# --- Limited user tests +# =================================================================== + + +if POSIX and os.getuid() == 0: + class LimitedUserTestCase(TestProcess): + """Repeat the previous tests by using a limited user. + Executed only on UNIX and only if the user who run the test script + is root. + """ + # the uid/gid the test suite runs under + if hasattr(os, 'getuid'): + PROCESS_UID = os.getuid() + PROCESS_GID = os.getgid() + + def __init__(self, *args, **kwargs): + TestProcess.__init__(self, *args, **kwargs) + # re-define all existent test methods in order to + # ignore AccessDenied exceptions + for attr in [x for x in dir(self) if x.startswith('test')]: + meth = getattr(self, attr) + + def test_(self): + try: + meth() + except psutil.AccessDenied: + pass + setattr(self, attr, types.MethodType(test_, self)) + + def setUp(self): + safe_rmpath(TESTFN) + TestProcess.setUp(self) + os.setegid(1000) + os.seteuid(1000) + + def tearDown(self): + os.setegid(self.PROCESS_UID) + os.seteuid(self.PROCESS_GID) + TestProcess.tearDown(self) + + def test_nice(self): + try: + psutil.Process().nice(-1) + except psutil.AccessDenied: + pass + else: + self.fail("exception not raised") + + def test_zombie_process(self): + # causes problems if test test suite is run as root + pass + + +# =================================================================== +# --- psutil.Popen tests +# =================================================================== + + +class TestPopen(unittest.TestCase): + """Tests for psutil.Popen class.""" + + def tearDown(self): + reap_children() + + def test_misc(self): + # XXX this test causes a ResourceWarning on Python 3 because + # psutil.__subproc instance doesn't get propertly freed. + # Not sure what to do though. + cmd = [PYTHON_EXE, "-c", "import time; time.sleep(60);"] + with psutil.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) as proc: + proc.name() + proc.cpu_times() + proc.stdin + self.assertTrue(dir(proc)) + self.assertRaises(AttributeError, getattr, proc, 'foo') + proc.terminate() + + def test_ctx_manager(self): + with psutil.Popen([PYTHON_EXE, "-V"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) as proc: + proc.communicate() + assert proc.stdout.closed + assert proc.stderr.closed + assert proc.stdin.closed + self.assertEqual(proc.returncode, 0) + + def test_kill_terminate(self): + # subprocess.Popen()'s terminate(), kill() and send_signal() do + # not raise exception after the process is gone. psutil.Popen + # diverges from that. + cmd = [PYTHON_EXE, "-c", "import time; time.sleep(60);"] + with psutil.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) as proc: + proc.terminate() + proc.wait() + self.assertRaises(psutil.NoSuchProcess, proc.terminate) + self.assertRaises(psutil.NoSuchProcess, proc.kill) + self.assertRaises(psutil.NoSuchProcess, proc.send_signal, + signal.SIGTERM) + if WINDOWS and sys.version_info >= (2, 7): + self.assertRaises(psutil.NoSuchProcess, proc.send_signal, + signal.CTRL_C_EVENT) + self.assertRaises(psutil.NoSuchProcess, proc.send_signal, + signal.CTRL_BREAK_EVENT) + + +if __name__ == '__main__': + run_test_module_by_name(__file__) diff --git a/server/www/packages/packages-windows/x86/psutil/tests/test_sunos.py b/server/www/packages/packages-windows/x86/psutil/tests/test_sunos.py new file mode 100644 index 0000000..ea9afcd --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/test_sunos.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Sun OS specific tests.""" + +import os + +import psutil +from psutil import SUNOS +from psutil.tests import run_test_module_by_name +from psutil.tests import sh +from psutil.tests import unittest + + +@unittest.skipIf(not SUNOS, "SUNOS only") +class SunOSSpecificTestCase(unittest.TestCase): + + def test_swap_memory(self): + out = sh('env PATH=/usr/sbin:/sbin:%s swap -l' % os.environ['PATH']) + lines = out.strip().split('\n')[1:] + if not lines: + raise ValueError('no swap device(s) configured') + total = free = 0 + for line in lines: + line = line.split() + t, f = line[-2:] + total += int(int(t) * 512) + free += int(int(f) * 512) + used = total - free + + psutil_swap = psutil.swap_memory() + self.assertEqual(psutil_swap.total, total) + self.assertEqual(psutil_swap.used, used) + self.assertEqual(psutil_swap.free, free) + + def test_cpu_count(self): + out = sh("/usr/sbin/psrinfo") + self.assertEqual(psutil.cpu_count(), len(out.split('\n'))) + + +if __name__ == '__main__': + run_test_module_by_name(__file__) diff --git a/server/www/packages/packages-windows/x86/psutil/tests/test_system.py b/server/www/packages/packages-windows/x86/psutil/tests/test_system.py new file mode 100644 index 0000000..20b132a --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/test_system.py @@ -0,0 +1,862 @@ +#!/usr/bin/env python + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Tests for system APIS.""" + +import contextlib +import datetime +import errno +import os +import pprint +import shutil +import signal +import socket +import sys +import tempfile +import time + +import psutil +from psutil import AIX +from psutil import BSD +from psutil import FREEBSD +from psutil import LINUX +from psutil import NETBSD +from psutil import OPENBSD +from psutil import OSX +from psutil import POSIX +from psutil import SUNOS +from psutil import WINDOWS +from psutil._compat import long +from psutil.tests import APPVEYOR +from psutil.tests import ASCII_FS +from psutil.tests import check_net_address +from psutil.tests import DEVNULL +from psutil.tests import enum +from psutil.tests import get_test_subprocess +from psutil.tests import HAS_BATTERY +from psutil.tests import HAS_CPU_FREQ +from psutil.tests import HAS_SENSORS_BATTERY +from psutil.tests import HAS_SENSORS_FANS +from psutil.tests import HAS_SENSORS_TEMPERATURES +from psutil.tests import mock +from psutil.tests import reap_children +from psutil.tests import retry_before_failing +from psutil.tests import run_test_module_by_name +from psutil.tests import safe_rmpath +from psutil.tests import TESTFN +from psutil.tests import TESTFN_UNICODE +from psutil.tests import TRAVIS +from psutil.tests import unittest + + +# =================================================================== +# --- System-related API tests +# =================================================================== + + +class TestSystemAPIs(unittest.TestCase): + """Tests for system-related APIs.""" + + def setUp(self): + safe_rmpath(TESTFN) + + def tearDown(self): + reap_children() + + def test_process_iter(self): + self.assertIn(os.getpid(), [x.pid for x in psutil.process_iter()]) + sproc = get_test_subprocess() + self.assertIn(sproc.pid, [x.pid for x in psutil.process_iter()]) + p = psutil.Process(sproc.pid) + p.kill() + p.wait() + self.assertNotIn(sproc.pid, [x.pid for x in psutil.process_iter()]) + + with mock.patch('psutil.Process', + side_effect=psutil.NoSuchProcess(os.getpid())): + self.assertEqual(list(psutil.process_iter()), []) + with mock.patch('psutil.Process', + side_effect=psutil.AccessDenied(os.getpid())): + with self.assertRaises(psutil.AccessDenied): + list(psutil.process_iter()) + + def test_prcess_iter_w_params(self): + for p in psutil.process_iter(attrs=['pid']): + self.assertEqual(list(p.info.keys()), ['pid']) + with self.assertRaises(ValueError): + list(psutil.process_iter(attrs=['foo'])) + with mock.patch("psutil._psplatform.Process.cpu_times", + side_effect=psutil.AccessDenied(0, "")) as m: + for p in psutil.process_iter(attrs=["pid", "cpu_times"]): + self.assertIsNone(p.info['cpu_times']) + self.assertGreaterEqual(p.info['pid'], 0) + assert m.called + with mock.patch("psutil._psplatform.Process.cpu_times", + side_effect=psutil.AccessDenied(0, "")) as m: + flag = object() + for p in psutil.process_iter( + attrs=["pid", "cpu_times"], ad_value=flag): + self.assertIs(p.info['cpu_times'], flag) + self.assertGreaterEqual(p.info['pid'], 0) + assert m.called + + def test_wait_procs(self): + def callback(p): + pids.append(p.pid) + + pids = [] + sproc1 = get_test_subprocess() + sproc2 = get_test_subprocess() + sproc3 = get_test_subprocess() + procs = [psutil.Process(x.pid) for x in (sproc1, sproc2, sproc3)] + self.assertRaises(ValueError, psutil.wait_procs, procs, timeout=-1) + self.assertRaises(TypeError, psutil.wait_procs, procs, callback=1) + t = time.time() + gone, alive = psutil.wait_procs(procs, timeout=0.01, callback=callback) + + self.assertLess(time.time() - t, 0.5) + self.assertEqual(gone, []) + self.assertEqual(len(alive), 3) + self.assertEqual(pids, []) + for p in alive: + self.assertFalse(hasattr(p, 'returncode')) + + @retry_before_failing(30) + def test(procs, callback): + gone, alive = psutil.wait_procs(procs, timeout=0.03, + callback=callback) + self.assertEqual(len(gone), 1) + self.assertEqual(len(alive), 2) + return gone, alive + + sproc3.terminate() + gone, alive = test(procs, callback) + self.assertIn(sproc3.pid, [x.pid for x in gone]) + if POSIX: + self.assertEqual(gone.pop().returncode, -signal.SIGTERM) + else: + self.assertEqual(gone.pop().returncode, 1) + self.assertEqual(pids, [sproc3.pid]) + for p in alive: + self.assertFalse(hasattr(p, 'returncode')) + + @retry_before_failing(30) + def test(procs, callback): + gone, alive = psutil.wait_procs(procs, timeout=0.03, + callback=callback) + self.assertEqual(len(gone), 3) + self.assertEqual(len(alive), 0) + return gone, alive + + sproc1.terminate() + sproc2.terminate() + gone, alive = test(procs, callback) + self.assertEqual(set(pids), set([sproc1.pid, sproc2.pid, sproc3.pid])) + for p in gone: + self.assertTrue(hasattr(p, 'returncode')) + + def test_wait_procs_no_timeout(self): + sproc1 = get_test_subprocess() + sproc2 = get_test_subprocess() + sproc3 = get_test_subprocess() + procs = [psutil.Process(x.pid) for x in (sproc1, sproc2, sproc3)] + for p in procs: + p.terminate() + gone, alive = psutil.wait_procs(procs) + + def test_boot_time(self): + bt = psutil.boot_time() + self.assertIsInstance(bt, float) + self.assertGreater(bt, 0) + self.assertLess(bt, time.time()) + + @unittest.skipIf(not POSIX, 'POSIX only') + def test_PAGESIZE(self): + # pagesize is used internally to perform different calculations + # and it's determined by using SC_PAGE_SIZE; make sure + # getpagesize() returns the same value. + import resource + self.assertEqual(os.sysconf("SC_PAGE_SIZE"), resource.getpagesize()) + + def test_virtual_memory(self): + mem = psutil.virtual_memory() + assert mem.total > 0, mem + assert mem.available > 0, mem + assert 0 <= mem.percent <= 100, mem + assert mem.used > 0, mem + assert mem.free >= 0, mem + for name in mem._fields: + value = getattr(mem, name) + if name != 'percent': + self.assertIsInstance(value, (int, long)) + if name != 'total': + if not value >= 0: + self.fail("%r < 0 (%s)" % (name, value)) + if value > mem.total: + self.fail("%r > total (total=%s, %s=%s)" + % (name, mem.total, name, value)) + + def test_swap_memory(self): + mem = psutil.swap_memory() + self.assertEqual( + mem._fields, ('total', 'used', 'free', 'percent', 'sin', 'sout')) + + assert mem.total >= 0, mem + assert mem.used >= 0, mem + if mem.total > 0: + # likely a system with no swap partition + assert mem.free > 0, mem + else: + assert mem.free == 0, mem + assert 0 <= mem.percent <= 100, mem + assert mem.sin >= 0, mem + assert mem.sout >= 0, mem + + def test_pid_exists(self): + sproc = get_test_subprocess() + self.assertTrue(psutil.pid_exists(sproc.pid)) + p = psutil.Process(sproc.pid) + p.kill() + p.wait() + self.assertFalse(psutil.pid_exists(sproc.pid)) + self.assertFalse(psutil.pid_exists(-1)) + self.assertEqual(psutil.pid_exists(0), 0 in psutil.pids()) + + def test_pid_exists_2(self): + reap_children() + pids = psutil.pids() + for pid in pids: + try: + assert psutil.pid_exists(pid) + except AssertionError: + # in case the process disappeared in meantime fail only + # if it is no longer in psutil.pids() + time.sleep(.1) + if pid in psutil.pids(): + self.fail(pid) + pids = range(max(pids) + 5000, max(pids) + 6000) + for pid in pids: + self.assertFalse(psutil.pid_exists(pid), msg=pid) + + def test_pids(self): + plist = [x.pid for x in psutil.process_iter()] + pidlist = psutil.pids() + self.assertEqual(plist.sort(), pidlist.sort()) + # make sure every pid is unique + self.assertEqual(len(pidlist), len(set(pidlist))) + + def test_test(self): + # test for psutil.test() function + stdout = sys.stdout + sys.stdout = DEVNULL + try: + psutil.test() + finally: + sys.stdout = stdout + + def test_cpu_count(self): + logical = psutil.cpu_count() + self.assertEqual(logical, len(psutil.cpu_times(percpu=True))) + self.assertGreaterEqual(logical, 1) + # + if os.path.exists("/proc/cpuinfo"): + with open("/proc/cpuinfo") as fd: + cpuinfo_data = fd.read() + if "physical id" not in cpuinfo_data: + raise unittest.SkipTest("cpuinfo doesn't include physical id") + physical = psutil.cpu_count(logical=False) + self.assertGreaterEqual(physical, 1) + self.assertGreaterEqual(logical, physical) + + def test_cpu_count_none(self): + # https://github.com/giampaolo/psutil/issues/1085 + for val in (-1, 0, None): + with mock.patch('psutil._psplatform.cpu_count_logical', + return_value=val) as m: + self.assertIsNone(psutil.cpu_count()) + assert m.called + with mock.patch('psutil._psplatform.cpu_count_physical', + return_value=val) as m: + self.assertIsNone(psutil.cpu_count(logical=False)) + assert m.called + + def test_cpu_times(self): + # Check type, value >= 0, str(). + total = 0 + times = psutil.cpu_times() + sum(times) + for cp_time in times: + self.assertIsInstance(cp_time, float) + self.assertGreaterEqual(cp_time, 0.0) + total += cp_time + self.assertEqual(total, sum(times)) + str(times) + # CPU times are always supposed to increase over time + # or at least remain the same and that's because time + # cannot go backwards. + # Surprisingly sometimes this might not be the case (at + # least on Windows and Linux), see: + # https://github.com/giampaolo/psutil/issues/392 + # https://github.com/giampaolo/psutil/issues/645 + # if not WINDOWS: + # last = psutil.cpu_times() + # for x in range(100): + # new = psutil.cpu_times() + # for field in new._fields: + # new_t = getattr(new, field) + # last_t = getattr(last, field) + # self.assertGreaterEqual(new_t, last_t, + # msg="%s %s" % (new_t, last_t)) + # last = new + + def test_cpu_times_time_increases(self): + # Make sure time increases between calls. + t1 = sum(psutil.cpu_times()) + time.sleep(0.1) + t2 = sum(psutil.cpu_times()) + difference = t2 - t1 + if not difference >= 0.05: + self.fail("difference %s" % difference) + + def test_per_cpu_times(self): + # Check type, value >= 0, str(). + for times in psutil.cpu_times(percpu=True): + total = 0 + sum(times) + for cp_time in times: + self.assertIsInstance(cp_time, float) + self.assertGreaterEqual(cp_time, 0.0) + total += cp_time + self.assertEqual(total, sum(times)) + str(times) + self.assertEqual(len(psutil.cpu_times(percpu=True)[0]), + len(psutil.cpu_times(percpu=False))) + + # Note: in theory CPU times are always supposed to increase over + # time or remain the same but never go backwards. In practice + # sometimes this is not the case. + # This issue seemd to be afflict Windows: + # https://github.com/giampaolo/psutil/issues/392 + # ...but it turns out also Linux (rarely) behaves the same. + # last = psutil.cpu_times(percpu=True) + # for x in range(100): + # new = psutil.cpu_times(percpu=True) + # for index in range(len(new)): + # newcpu = new[index] + # lastcpu = last[index] + # for field in newcpu._fields: + # new_t = getattr(newcpu, field) + # last_t = getattr(lastcpu, field) + # self.assertGreaterEqual( + # new_t, last_t, msg="%s %s" % (lastcpu, newcpu)) + # last = new + + def test_per_cpu_times_2(self): + # Simulate some work load then make sure time have increased + # between calls. + tot1 = psutil.cpu_times(percpu=True) + stop_at = time.time() + 0.1 + while True: + if time.time() >= stop_at: + break + tot2 = psutil.cpu_times(percpu=True) + for t1, t2 in zip(tot1, tot2): + t1, t2 = sum(t1), sum(t2) + difference = t2 - t1 + if difference >= 0.05: + return + self.fail() + + def test_cpu_times_comparison(self): + # Make sure the sum of all per cpu times is almost equal to + # base "one cpu" times. + base = psutil.cpu_times() + per_cpu = psutil.cpu_times(percpu=True) + summed_values = base._make([sum(num) for num in zip(*per_cpu)]) + for field in base._fields: + self.assertAlmostEqual( + getattr(base, field), getattr(summed_values, field), delta=1) + + def _test_cpu_percent(self, percent, last_ret, new_ret): + try: + self.assertIsInstance(percent, float) + self.assertGreaterEqual(percent, 0.0) + self.assertIsNot(percent, -0.0) + self.assertLessEqual(percent, 100.0 * psutil.cpu_count()) + except AssertionError as err: + raise AssertionError("\n%s\nlast=%s\nnew=%s" % ( + err, pprint.pformat(last_ret), pprint.pformat(new_ret))) + + def test_cpu_percent(self): + last = psutil.cpu_percent(interval=0.001) + for x in range(100): + new = psutil.cpu_percent(interval=None) + self._test_cpu_percent(new, last, new) + last = new + with self.assertRaises(ValueError): + psutil.cpu_percent(interval=-1) + + def test_per_cpu_percent(self): + last = psutil.cpu_percent(interval=0.001, percpu=True) + self.assertEqual(len(last), psutil.cpu_count()) + for x in range(100): + new = psutil.cpu_percent(interval=None, percpu=True) + for percent in new: + self._test_cpu_percent(percent, last, new) + last = new + with self.assertRaises(ValueError): + psutil.cpu_percent(interval=-1, percpu=True) + + def test_cpu_times_percent(self): + last = psutil.cpu_times_percent(interval=0.001) + for x in range(100): + new = psutil.cpu_times_percent(interval=None) + for percent in new: + self._test_cpu_percent(percent, last, new) + self._test_cpu_percent(sum(new), last, new) + last = new + + def test_per_cpu_times_percent(self): + last = psutil.cpu_times_percent(interval=0.001, percpu=True) + self.assertEqual(len(last), psutil.cpu_count()) + for x in range(100): + new = psutil.cpu_times_percent(interval=None, percpu=True) + for cpu in new: + for percent in cpu: + self._test_cpu_percent(percent, last, new) + self._test_cpu_percent(sum(cpu), last, new) + last = new + + def test_per_cpu_times_percent_negative(self): + # see: https://github.com/giampaolo/psutil/issues/645 + psutil.cpu_times_percent(percpu=True) + zero_times = [x._make([0 for x in range(len(x._fields))]) + for x in psutil.cpu_times(percpu=True)] + with mock.patch('psutil.cpu_times', return_value=zero_times): + for cpu in psutil.cpu_times_percent(percpu=True): + for percent in cpu: + self._test_cpu_percent(percent, None, None) + + def test_disk_usage(self): + usage = psutil.disk_usage(os.getcwd()) + self.assertEqual(usage._fields, ('total', 'used', 'free', 'percent')) + + assert usage.total > 0, usage + assert usage.used > 0, usage + assert usage.free > 0, usage + assert usage.total > usage.used, usage + assert usage.total > usage.free, usage + assert 0 <= usage.percent <= 100, usage.percent + if hasattr(shutil, 'disk_usage'): + # py >= 3.3, see: http://bugs.python.org/issue12442 + shutil_usage = shutil.disk_usage(os.getcwd()) + tolerance = 5 * 1024 * 1024 # 5MB + self.assertEqual(usage.total, shutil_usage.total) + self.assertAlmostEqual(usage.free, shutil_usage.free, + delta=tolerance) + self.assertAlmostEqual(usage.used, shutil_usage.used, + delta=tolerance) + + # if path does not exist OSError ENOENT is expected across + # all platforms + fname = tempfile.mktemp() + with self.assertRaises(OSError) as exc: + psutil.disk_usage(fname) + self.assertEqual(exc.exception.errno, errno.ENOENT) + + def test_disk_usage_unicode(self): + # See: https://github.com/giampaolo/psutil/issues/416 + if ASCII_FS: + with self.assertRaises(UnicodeEncodeError): + psutil.disk_usage(TESTFN_UNICODE) + + def test_disk_usage_bytes(self): + psutil.disk_usage(b'.') + + def test_disk_partitions(self): + # all = False + ls = psutil.disk_partitions(all=False) + # on travis we get: + # self.assertEqual(p.cpu_affinity(), [n]) + # AssertionError: Lists differ: [0, 1, 2, 3, 4, 5, 6, 7,... != [0] + self.assertTrue(ls, msg=ls) + for disk in ls: + self.assertIsInstance(disk.device, str) + self.assertIsInstance(disk.mountpoint, str) + self.assertIsInstance(disk.fstype, str) + self.assertIsInstance(disk.opts, str) + if WINDOWS and 'cdrom' in disk.opts: + continue + if not POSIX: + assert os.path.exists(disk.device), disk + else: + # we cannot make any assumption about this, see: + # http://goo.gl/p9c43 + disk.device + if SUNOS or TRAVIS: + # on solaris apparently mount points can also be files + assert os.path.exists(disk.mountpoint), disk + else: + assert os.path.isdir(disk.mountpoint), disk + assert disk.fstype, disk + + # all = True + ls = psutil.disk_partitions(all=True) + self.assertTrue(ls, msg=ls) + for disk in psutil.disk_partitions(all=True): + if not WINDOWS: + try: + os.stat(disk.mountpoint) + except OSError as err: + if TRAVIS and OSX and err.errno == errno.EIO: + continue + # http://mail.python.org/pipermail/python-dev/ + # 2012-June/120787.html + if err.errno not in (errno.EPERM, errno.EACCES): + raise + else: + if SUNOS or TRAVIS: + # on solaris apparently mount points can also be files + assert os.path.exists(disk.mountpoint), disk + else: + assert os.path.isdir(disk.mountpoint), disk + self.assertIsInstance(disk.fstype, str) + self.assertIsInstance(disk.opts, str) + + def find_mount_point(path): + path = os.path.abspath(path) + while not os.path.ismount(path): + path = os.path.dirname(path) + return path.lower() + + mount = find_mount_point(__file__) + mounts = [x.mountpoint.lower() for x in + psutil.disk_partitions(all=True)] + self.assertIn(mount, mounts) + psutil.disk_usage(mount) + + def test_net_io_counters(self): + def check_ntuple(nt): + self.assertEqual(nt[0], nt.bytes_sent) + self.assertEqual(nt[1], nt.bytes_recv) + self.assertEqual(nt[2], nt.packets_sent) + self.assertEqual(nt[3], nt.packets_recv) + self.assertEqual(nt[4], nt.errin) + self.assertEqual(nt[5], nt.errout) + self.assertEqual(nt[6], nt.dropin) + self.assertEqual(nt[7], nt.dropout) + assert nt.bytes_sent >= 0, nt + assert nt.bytes_recv >= 0, nt + assert nt.packets_sent >= 0, nt + assert nt.packets_recv >= 0, nt + assert nt.errin >= 0, nt + assert nt.errout >= 0, nt + assert nt.dropin >= 0, nt + assert nt.dropout >= 0, nt + + ret = psutil.net_io_counters(pernic=False) + check_ntuple(ret) + ret = psutil.net_io_counters(pernic=True) + self.assertNotEqual(ret, []) + for key in ret: + self.assertTrue(key) + self.assertIsInstance(key, str) + check_ntuple(ret[key]) + + def test_net_io_counters_no_nics(self): + # Emulate a case where no NICs are installed, see: + # https://github.com/giampaolo/psutil/issues/1062 + with mock.patch('psutil._psplatform.net_io_counters', + return_value={}) as m: + self.assertIsNone(psutil.net_io_counters(pernic=False)) + self.assertEqual(psutil.net_io_counters(pernic=True), {}) + assert m.called + + def test_net_if_addrs(self): + nics = psutil.net_if_addrs() + assert nics, nics + + nic_stats = psutil.net_if_stats() + + # Not reliable on all platforms (net_if_addrs() reports more + # interfaces). + # self.assertEqual(sorted(nics.keys()), + # sorted(psutil.net_io_counters(pernic=True).keys())) + + families = set([socket.AF_INET, socket.AF_INET6, psutil.AF_LINK]) + for nic, addrs in nics.items(): + self.assertIsInstance(nic, str) + self.assertEqual(len(set(addrs)), len(addrs)) + for addr in addrs: + self.assertIsInstance(addr.family, int) + self.assertIsInstance(addr.address, str) + self.assertIsInstance(addr.netmask, (str, type(None))) + self.assertIsInstance(addr.broadcast, (str, type(None))) + self.assertIn(addr.family, families) + if sys.version_info >= (3, 4): + self.assertIsInstance(addr.family, enum.IntEnum) + if nic_stats[nic].isup: + # Do not test binding to addresses of interfaces + # that are down + if addr.family == socket.AF_INET: + s = socket.socket(addr.family) + with contextlib.closing(s): + s.bind((addr.address, 0)) + elif addr.family == socket.AF_INET6: + info = socket.getaddrinfo( + addr.address, 0, socket.AF_INET6, + socket.SOCK_STREAM, 0, socket.AI_PASSIVE)[0] + af, socktype, proto, canonname, sa = info + s = socket.socket(af, socktype, proto) + with contextlib.closing(s): + s.bind(sa) + for ip in (addr.address, addr.netmask, addr.broadcast, + addr.ptp): + if ip is not None: + # TODO: skip AF_INET6 for now because I get: + # AddressValueError: Only hex digits permitted in + # u'c6f3%lxcbr0' in u'fe80::c8e0:fff:fe54:c6f3%lxcbr0' + if addr.family != socket.AF_INET6: + check_net_address(ip, addr.family) + # broadcast and ptp addresses are mutually exclusive + if addr.broadcast: + self.assertIsNone(addr.ptp) + elif addr.ptp: + self.assertIsNone(addr.broadcast) + + if BSD or OSX or SUNOS: + if hasattr(socket, "AF_LINK"): + self.assertEqual(psutil.AF_LINK, socket.AF_LINK) + elif LINUX: + self.assertEqual(psutil.AF_LINK, socket.AF_PACKET) + elif WINDOWS: + self.assertEqual(psutil.AF_LINK, -1) + + def test_net_if_addrs_mac_null_bytes(self): + # Simulate that the underlying C function returns an incomplete + # MAC address. psutil is supposed to fill it with null bytes. + # https://github.com/giampaolo/psutil/issues/786 + if POSIX: + ret = [('em1', psutil.AF_LINK, '06:3d:29', None, None, None)] + else: + ret = [('em1', -1, '06-3d-29', None, None, None)] + with mock.patch('psutil._psplatform.net_if_addrs', + return_value=ret) as m: + addr = psutil.net_if_addrs()['em1'][0] + assert m.called + if POSIX: + self.assertEqual(addr.address, '06:3d:29:00:00:00') + else: + self.assertEqual(addr.address, '06-3d-29-00-00-00') + + @unittest.skipIf(TRAVIS, "unreliable on TRAVIS") # raises EPERM + def test_net_if_stats(self): + nics = psutil.net_if_stats() + assert nics, nics + all_duplexes = (psutil.NIC_DUPLEX_FULL, + psutil.NIC_DUPLEX_HALF, + psutil.NIC_DUPLEX_UNKNOWN) + for name, stats in nics.items(): + self.assertIsInstance(name, str) + isup, duplex, speed, mtu = stats + self.assertIsInstance(isup, bool) + self.assertIn(duplex, all_duplexes) + self.assertIn(duplex, all_duplexes) + self.assertGreaterEqual(speed, 0) + self.assertGreaterEqual(mtu, 0) + + @unittest.skipIf(LINUX and not os.path.exists('/proc/diskstats'), + '/proc/diskstats not available on this linux version') + @unittest.skipIf(APPVEYOR and psutil.disk_io_counters() is None, + "unreliable on APPVEYOR") # no visible disks + def test_disk_io_counters(self): + def check_ntuple(nt): + self.assertEqual(nt[0], nt.read_count) + self.assertEqual(nt[1], nt.write_count) + self.assertEqual(nt[2], nt.read_bytes) + self.assertEqual(nt[3], nt.write_bytes) + if not (OPENBSD or NETBSD): + self.assertEqual(nt[4], nt.read_time) + self.assertEqual(nt[5], nt.write_time) + if LINUX: + self.assertEqual(nt[6], nt.read_merged_count) + self.assertEqual(nt[7], nt.write_merged_count) + self.assertEqual(nt[8], nt.busy_time) + elif FREEBSD: + self.assertEqual(nt[6], nt.busy_time) + for name in nt._fields: + assert getattr(nt, name) >= 0, nt + + ret = psutil.disk_io_counters(perdisk=False) + assert ret is not None, "no disks on this system?" + check_ntuple(ret) + ret = psutil.disk_io_counters(perdisk=True) + # make sure there are no duplicates + self.assertEqual(len(ret), len(set(ret))) + for key in ret: + assert key, key + check_ntuple(ret[key]) + if LINUX and key[-1].isdigit(): + # if 'sda1' is listed 'sda' shouldn't, see: + # https://github.com/giampaolo/psutil/issues/338 + while key[-1].isdigit(): + key = key[:-1] + self.assertNotIn(key, ret.keys()) + + def test_disk_io_counters_no_disks(self): + # Emulate a case where no disks are installed, see: + # https://github.com/giampaolo/psutil/issues/1062 + with mock.patch('psutil._psplatform.disk_io_counters', + return_value={}) as m: + self.assertIsNone(psutil.disk_io_counters(perdisk=False)) + self.assertEqual(psutil.disk_io_counters(perdisk=True), {}) + assert m.called + + # can't find users on APPVEYOR or TRAVIS + @unittest.skipIf(APPVEYOR or TRAVIS and not psutil.users(), + "unreliable on APPVEYOR or TRAVIS") + def test_users(self): + users = psutil.users() + self.assertNotEqual(users, []) + for user in users: + assert user.name, user + self.assertIsInstance(user.name, str) + self.assertIsInstance(user.terminal, (str, type(None))) + if user.host is not None: + self.assertIsInstance(user.host, (str, type(None))) + user.terminal + user.host + assert user.started > 0.0, user + datetime.datetime.fromtimestamp(user.started) + if WINDOWS or OPENBSD: + self.assertIsNone(user.pid) + else: + psutil.Process(user.pid) + + def test_cpu_stats(self): + # Tested more extensively in per-platform test modules. + infos = psutil.cpu_stats() + self.assertEqual( + infos._fields, + ('ctx_switches', 'interrupts', 'soft_interrupts', 'syscalls')) + for name in infos._fields: + value = getattr(infos, name) + self.assertGreaterEqual(value, 0) + # on AIX, ctx_switches is always 0 + if not AIX and name in ('ctx_switches', 'interrupts'): + self.assertGreater(value, 0) + + @unittest.skipIf(not HAS_CPU_FREQ, "not suported") + def test_cpu_freq(self): + def check_ls(ls): + for nt in ls: + self.assertEqual(nt._fields, ('current', 'min', 'max')) + self.assertLessEqual(nt.current, nt.max) + for name in nt._fields: + value = getattr(nt, name) + self.assertIsInstance(value, (int, long, float)) + self.assertGreaterEqual(value, 0) + + ls = psutil.cpu_freq(percpu=True) + if TRAVIS and not ls: + return + + assert ls, ls + check_ls([psutil.cpu_freq(percpu=False)]) + + if LINUX: + self.assertEqual(len(ls), psutil.cpu_count()) + + def test_os_constants(self): + names = ["POSIX", "WINDOWS", "LINUX", "OSX", "FREEBSD", "OPENBSD", + "NETBSD", "BSD", "SUNOS"] + for name in names: + self.assertIsInstance(getattr(psutil, name), bool, msg=name) + + if os.name == 'posix': + assert psutil.POSIX + assert not psutil.WINDOWS + names.remove("POSIX") + if "linux" in sys.platform.lower(): + assert psutil.LINUX + names.remove("LINUX") + elif "bsd" in sys.platform.lower(): + assert psutil.BSD + self.assertEqual([psutil.FREEBSD, psutil.OPENBSD, + psutil.NETBSD].count(True), 1) + names.remove("BSD") + names.remove("FREEBSD") + names.remove("OPENBSD") + names.remove("NETBSD") + elif "sunos" in sys.platform.lower() or \ + "solaris" in sys.platform.lower(): + assert psutil.SUNOS + names.remove("SUNOS") + elif "darwin" in sys.platform.lower(): + assert psutil.OSX + names.remove("OSX") + else: + assert psutil.WINDOWS + assert not psutil.POSIX + names.remove("WINDOWS") + + # assert all other constants are set to False + for name in names: + self.assertIs(getattr(psutil, name), False, msg=name) + + @unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported") + def test_sensors_temperatures(self): + temps = psutil.sensors_temperatures() + for name, entries in temps.items(): + self.assertIsInstance(name, str) + for entry in entries: + self.assertIsInstance(entry.label, str) + if entry.current is not None: + self.assertGreaterEqual(entry.current, 0) + if entry.high is not None: + self.assertGreaterEqual(entry.high, 0) + if entry.critical is not None: + self.assertGreaterEqual(entry.critical, 0) + + @unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported") + def test_sensors_temperatures_fahreneit(self): + d = {'coretemp': [('label', 50.0, 60.0, 70.0)]} + with mock.patch("psutil._psplatform.sensors_temperatures", + return_value=d) as m: + temps = psutil.sensors_temperatures( + fahrenheit=True)['coretemp'][0] + assert m.called + self.assertEqual(temps.current, 122.0) + self.assertEqual(temps.high, 140.0) + self.assertEqual(temps.critical, 158.0) + + @unittest.skipIf(not HAS_SENSORS_BATTERY, "not supported") + @unittest.skipIf(not HAS_BATTERY, "no battery") + def test_sensors_battery(self): + ret = psutil.sensors_battery() + self.assertGreaterEqual(ret.percent, 0) + self.assertLessEqual(ret.percent, 100) + if ret.secsleft not in (psutil.POWER_TIME_UNKNOWN, + psutil.POWER_TIME_UNLIMITED): + self.assertGreaterEqual(ret.secsleft, 0) + else: + if ret.secsleft == psutil.POWER_TIME_UNLIMITED: + self.assertTrue(ret.power_plugged) + self.assertIsInstance(ret.power_plugged, bool) + + @unittest.skipIf(not HAS_SENSORS_FANS, "not supported") + def test_sensors_fans(self): + fans = psutil.sensors_fans() + for name, entries in fans.items(): + self.assertIsInstance(name, str) + for entry in entries: + self.assertIsInstance(entry.label, str) + self.assertIsInstance(entry.current, (int, long)) + self.assertGreaterEqual(entry.current, 0) + + +if __name__ == '__main__': + run_test_module_by_name(__file__) diff --git a/server/www/packages/packages-windows/x86/psutil/tests/test_unicode.py b/server/www/packages/packages-windows/x86/psutil/tests/test_unicode.py new file mode 100644 index 0000000..c2a2f84 --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/test_unicode.py @@ -0,0 +1,367 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +Notes about unicode handling in psutil +====================================== + +In psutil these are the APIs returning or dealing with a string +('not tested' means they are not tested to deal with non-ASCII strings): + +* Process.cmdline() +* Process.connections('unix') +* Process.cwd() +* Process.environ() +* Process.exe() +* Process.memory_maps() +* Process.name() +* Process.open_files() +* Process.username() (not tested) + +* disk_io_counters() (not tested) +* disk_partitions() (not tested) +* disk_usage(str) +* net_connections('unix') +* net_if_addrs() (not tested) +* net_if_stats() (not tested) +* net_io_counters() (not tested) +* sensors_fans() (not tested) +* sensors_temperatures() (not tested) +* users() (not tested) + +* WindowsService.binpath() (not tested) +* WindowsService.description() (not tested) +* WindowsService.display_name() (not tested) +* WindowsService.name() (not tested) +* WindowsService.status() (not tested) +* WindowsService.username() (not tested) + +In here we create a unicode path with a funky non-ASCII name and (where +possible) make psutil return it back (e.g. on name(), exe(), open_files(), +etc.) and make sure that: + +* psutil never crashes with UnicodeDecodeError +* the returned path matches + +For a detailed explanation of how psutil handles unicode see: +- https://github.com/giampaolo/psutil/issues/1040 +- http://psutil.readthedocs.io/#unicode +""" + +import os +import traceback +import warnings +from contextlib import closing + +from psutil import BSD +from psutil import OPENBSD +from psutil import OSX +from psutil import POSIX +from psutil import WINDOWS +from psutil._compat import PY3 +from psutil._compat import u +from psutil.tests import APPVEYOR +from psutil.tests import ASCII_FS +from psutil.tests import bind_unix_socket +from psutil.tests import chdir +from psutil.tests import copyload_shared_lib +from psutil.tests import create_exe +from psutil.tests import get_test_subprocess +from psutil.tests import HAS_CONNECTIONS_UNIX +from psutil.tests import HAS_ENVIRON +from psutil.tests import HAS_MEMORY_MAPS +from psutil.tests import mock +from psutil.tests import reap_children +from psutil.tests import run_test_module_by_name +from psutil.tests import safe_mkdir +from psutil.tests import safe_rmpath as _safe_rmpath +from psutil.tests import skip_on_access_denied +from psutil.tests import TESTFILE_PREFIX +from psutil.tests import TESTFN +from psutil.tests import TESTFN_UNICODE +from psutil.tests import TRAVIS +from psutil.tests import unittest +from psutil.tests import unix_socket_path +import psutil +import psutil.tests + + +def safe_rmpath(path): + if APPVEYOR: + # TODO - this is quite random and I'm not sure why it happens, + # nor I can reproduce it locally: + # https://ci.appveyor.com/project/giampaolo/psutil/build/job/ + # jiq2cgd6stsbtn60 + # safe_rmpath() happens after reap_children() so this is weird + # Perhaps wait_procs() on Windows is broken? Maybe because + # of STILL_ACTIVE? + # https://github.com/giampaolo/psutil/blob/ + # 68c7a70728a31d8b8b58f4be6c4c0baa2f449eda/psutil/arch/ + # windows/process_info.c#L146 + try: + return _safe_rmpath(path) + except WindowsError: + traceback.print_exc() + else: + return _safe_rmpath(path) + + +def subprocess_supports_unicode(name): + """Return True if both the fs and the subprocess module can + deal with a unicode file name. + """ + if PY3: + return True + try: + safe_rmpath(name) + create_exe(name) + get_test_subprocess(cmd=[name]) + except UnicodeEncodeError: + return False + else: + return True + finally: + reap_children() + + +# An invalid unicode string. +if PY3: + INVALID_NAME = (TESTFN.encode('utf8') + b"f\xc0\x80").decode( + 'utf8', 'surrogateescape') +else: + INVALID_NAME = TESTFN + "f\xc0\x80" + + +# =================================================================== +# FS APIs +# =================================================================== + + +class _BaseFSAPIsTests(object): + funky_name = None + + @classmethod + def setUpClass(cls): + safe_rmpath(cls.funky_name) + create_exe(cls.funky_name) + + @classmethod + def tearDownClass(cls): + reap_children() + safe_rmpath(cls.funky_name) + + def tearDown(self): + reap_children() + + def expect_exact_path_match(self): + raise NotImplementedError("must be implemented in subclass") + + def test_proc_exe(self): + subp = get_test_subprocess(cmd=[self.funky_name]) + p = psutil.Process(subp.pid) + exe = p.exe() + self.assertIsInstance(exe, str) + if self.expect_exact_path_match(): + self.assertEqual(exe, self.funky_name) + + def test_proc_name(self): + subp = get_test_subprocess(cmd=[self.funky_name]) + if WINDOWS: + # On Windows name() is determined from exe() first, because + # it's faster; we want to overcome the internal optimization + # and test name() instead of exe(). + with mock.patch("psutil._psplatform.cext.proc_exe", + side_effect=psutil.AccessDenied(os.getpid())) as m: + name = psutil.Process(subp.pid).name() + assert m.called + else: + name = psutil.Process(subp.pid).name() + self.assertIsInstance(name, str) + if self.expect_exact_path_match(): + self.assertEqual(name, os.path.basename(self.funky_name)) + + def test_proc_cmdline(self): + subp = get_test_subprocess(cmd=[self.funky_name]) + p = psutil.Process(subp.pid) + cmdline = p.cmdline() + for part in cmdline: + self.assertIsInstance(part, str) + if self.expect_exact_path_match(): + self.assertEqual(cmdline, [self.funky_name]) + + def test_proc_cwd(self): + dname = self.funky_name + "2" + self.addCleanup(safe_rmpath, dname) + safe_mkdir(dname) + with chdir(dname): + p = psutil.Process() + cwd = p.cwd() + self.assertIsInstance(p.cwd(), str) + if self.expect_exact_path_match(): + self.assertEqual(cwd, dname) + + def test_proc_open_files(self): + p = psutil.Process() + start = set(p.open_files()) + with open(self.funky_name, 'rb'): + new = set(p.open_files()) + path = (new - start).pop().path + self.assertIsInstance(path, str) + if BSD and not path: + # XXX - see https://github.com/giampaolo/psutil/issues/595 + return self.skipTest("open_files on BSD is broken") + if self.expect_exact_path_match(): + self.assertEqual(os.path.normcase(path), + os.path.normcase(self.funky_name)) + + @unittest.skipIf(not POSIX, "POSIX only") + def test_proc_connections(self): + suffix = os.path.basename(self.funky_name) + with unix_socket_path(suffix=suffix) as name: + try: + sock = bind_unix_socket(name) + except UnicodeEncodeError: + if PY3: + raise + else: + raise unittest.SkipTest("not supported") + with closing(sock): + conn = psutil.Process().connections('unix')[0] + self.assertIsInstance(conn.laddr, str) + # AF_UNIX addr not set on OpenBSD + if not OPENBSD: + self.assertEqual(conn.laddr, name) + + @unittest.skipIf(not POSIX, "POSIX only") + @unittest.skipIf(not HAS_CONNECTIONS_UNIX, "can't list UNIX sockets") + @skip_on_access_denied() + def test_net_connections(self): + def find_sock(cons): + for conn in cons: + if os.path.basename(conn.laddr).startswith(TESTFILE_PREFIX): + return conn + raise ValueError("connection not found") + + suffix = os.path.basename(self.funky_name) + with unix_socket_path(suffix=suffix) as name: + try: + sock = bind_unix_socket(name) + except UnicodeEncodeError: + if PY3: + raise + else: + raise unittest.SkipTest("not supported") + with closing(sock): + cons = psutil.net_connections(kind='unix') + # AF_UNIX addr not set on OpenBSD + if not OPENBSD: + conn = find_sock(cons) + self.assertIsInstance(conn.laddr, str) + self.assertEqual(conn.laddr, name) + + def test_disk_usage(self): + dname = self.funky_name + "2" + self.addCleanup(safe_rmpath, dname) + safe_mkdir(dname) + psutil.disk_usage(dname) + + @unittest.skipIf(not HAS_MEMORY_MAPS, "not supported") + @unittest.skipIf(not PY3, "ctypes does not support unicode on PY2") + def test_memory_maps(self): + # XXX: on Python 2, using ctypes.CDLL with a unicode path + # opens a message box which blocks the test run. + with copyload_shared_lib(dst_prefix=self.funky_name) as funky_path: + def normpath(p): + return os.path.realpath(os.path.normcase(p)) + libpaths = [normpath(x.path) + for x in psutil.Process().memory_maps()] + # ...just to have a clearer msg in case of failure + libpaths = [x for x in libpaths if TESTFILE_PREFIX in x] + self.assertIn(normpath(funky_path), libpaths) + for path in libpaths: + self.assertIsInstance(path, str) + + +@unittest.skipIf(OSX and TRAVIS, "unreliable on TRAVIS") # TODO +@unittest.skipIf(ASCII_FS, "ASCII fs") +@unittest.skipIf(not subprocess_supports_unicode(TESTFN_UNICODE), + "subprocess can't deal with unicode") +class TestFSAPIs(_BaseFSAPIsTests, unittest.TestCase): + """Test FS APIs with a funky, valid, UTF8 path name.""" + funky_name = TESTFN_UNICODE + + @classmethod + def expect_exact_path_match(cls): + # Do not expect psutil to correctly handle unicode paths on + # Python 2 if os.listdir() is not able either. + if PY3: + return True + else: + here = '.' if isinstance(cls.funky_name, str) else u('.') + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + return cls.funky_name in os.listdir(here) + + +@unittest.skipIf(OSX and TRAVIS, "unreliable on TRAVIS") # TODO +@unittest.skipIf(not subprocess_supports_unicode(INVALID_NAME), + "subprocess can't deal with invalid unicode") +class TestFSAPIsWithInvalidPath(_BaseFSAPIsTests, unittest.TestCase): + """Test FS APIs with a funky, invalid path name.""" + funky_name = INVALID_NAME + + @classmethod + def expect_exact_path_match(cls): + # Invalid unicode names are supposed to work on Python 2. + return True + + +@unittest.skipIf(not WINDOWS, "WINDOWS only") +class TestWinProcessName(unittest.TestCase): + + def test_name_type(self): + # On Windows name() is determined from exe() first, because + # it's faster; we want to overcome the internal optimization + # and test name() instead of exe(). + with mock.patch("psutil._psplatform.cext.proc_exe", + side_effect=psutil.AccessDenied(os.getpid())) as m: + self.assertIsInstance(psutil.Process().name(), str) + assert m.called + + +# =================================================================== +# Non fs APIs +# =================================================================== + + +class TestNonFSAPIS(unittest.TestCase): + """Unicode tests for non fs-related APIs.""" + + def tearDown(self): + reap_children() + + @unittest.skipIf(not HAS_ENVIRON, "not supported") + def test_proc_environ(self): + # Note: differently from others, this test does not deal + # with fs paths. On Python 2 subprocess module is broken as + # it's not able to handle with non-ASCII env vars, so + # we use "è", which is part of the extended ASCII table + # (unicode point <= 255). + env = os.environ.copy() + funky_str = TESTFN_UNICODE if PY3 else 'è' + env['FUNNY_ARG'] = funky_str + sproc = get_test_subprocess(env=env) + p = psutil.Process(sproc.pid) + env = p.environ() + for k, v in env.items(): + self.assertIsInstance(k, str) + self.assertIsInstance(v, str) + self.assertEqual(env['FUNNY_ARG'], funky_str) + + +if __name__ == '__main__': + run_test_module_by_name(__file__) diff --git a/server/www/packages/packages-windows/x86/psutil/tests/test_windows.py b/server/www/packages/packages-windows/x86/psutil/tests/test_windows.py new file mode 100644 index 0000000..e4a719e --- /dev/null +++ b/server/www/packages/packages-windows/x86/psutil/tests/test_windows.py @@ -0,0 +1,838 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -* + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Windows specific tests.""" + +import datetime +import errno +import glob +import os +import platform +import re +import signal +import subprocess +import sys +import time +import warnings + +import psutil +from psutil import WINDOWS +from psutil._compat import callable +from psutil.tests import APPVEYOR +from psutil.tests import get_test_subprocess +from psutil.tests import HAS_BATTERY +from psutil.tests import mock +from psutil.tests import reap_children +from psutil.tests import retry_before_failing +from psutil.tests import run_test_module_by_name +from psutil.tests import sh +from psutil.tests import unittest + +with warnings.catch_warnings(): + warnings.simplefilter("ignore") + try: + import win32api # requires "pip install pypiwin32" + import win32con + import win32process + import wmi # requires "pip install wmi" / "make setup-dev-env" + except ImportError: + if os.name == 'nt': + raise + + +cext = psutil._psplatform.cext + +# are we a 64 bit process +IS_64_BIT = sys.maxsize > 2**32 + + +def wrap_exceptions(fun): + def wrapper(self, *args, **kwargs): + try: + return fun(self, *args, **kwargs) + except OSError as err: + from psutil._pswindows import ACCESS_DENIED_SET + if err.errno in ACCESS_DENIED_SET: + raise psutil.AccessDenied(None, None) + if err.errno == errno.ESRCH: + raise psutil.NoSuchProcess(None, None) + raise + return wrapper + + +# =================================================================== +# System APIs +# =================================================================== + + +@unittest.skipIf(not WINDOWS, "WINDOWS only") +class TestSystemAPIs(unittest.TestCase): + + def test_nic_names(self): + out = sh('ipconfig /all') + nics = psutil.net_io_counters(pernic=True).keys() + for nic in nics: + if "pseudo-interface" in nic.replace(' ', '-').lower(): + continue + if nic not in out: + self.fail( + "%r nic wasn't found in 'ipconfig /all' output" % nic) + + @unittest.skipIf('NUMBER_OF_PROCESSORS' not in os.environ, + 'NUMBER_OF_PROCESSORS env var is not available') + def test_cpu_count(self): + num_cpus = int(os.environ['NUMBER_OF_PROCESSORS']) + self.assertEqual(num_cpus, psutil.cpu_count()) + + def test_cpu_count_2(self): + sys_value = win32api.GetSystemInfo()[5] + psutil_value = psutil.cpu_count() + self.assertEqual(sys_value, psutil_value) + + def test_cpu_freq(self): + w = wmi.WMI() + proc = w.Win32_Processor()[0] + self.assertEqual(proc.CurrentClockSpeed, psutil.cpu_freq().current) + self.assertEqual(proc.MaxClockSpeed, psutil.cpu_freq().max) + + def test_total_phymem(self): + w = wmi.WMI().Win32_ComputerSystem()[0] + self.assertEqual(int(w.TotalPhysicalMemory), + psutil.virtual_memory().total) + + # @unittest.skipIf(wmi is None, "wmi module is not installed") + # def test__UPTIME(self): + # # _UPTIME constant is not public but it is used internally + # # as value to return for pid 0 creation time. + # # WMI behaves the same. + # w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0] + # p = psutil.Process(0) + # wmic_create = str(w.CreationDate.split('.')[0]) + # psutil_create = time.strftime("%Y%m%d%H%M%S", + # time.localtime(p.create_time())) + + # Note: this test is not very reliable + @unittest.skipIf(APPVEYOR, "test not relieable on appveyor") + @retry_before_failing() + def test_pids(self): + # Note: this test might fail if the OS is starting/killing + # other processes in the meantime + w = wmi.WMI().Win32_Process() + wmi_pids = set([x.ProcessId for x in w]) + psutil_pids = set(psutil.pids()) + self.assertEqual(wmi_pids, psutil_pids) + + @retry_before_failing() + def test_disks(self): + ps_parts = psutil.disk_partitions(all=True) + wmi_parts = wmi.WMI().Win32_LogicalDisk() + for ps_part in ps_parts: + for wmi_part in wmi_parts: + if ps_part.device.replace('\\', '') == wmi_part.DeviceID: + if not ps_part.mountpoint: + # this is usually a CD-ROM with no disk inserted + break + try: + usage = psutil.disk_usage(ps_part.mountpoint) + except OSError as err: + if err.errno == errno.ENOENT: + # usually this is the floppy + break + else: + raise + self.assertEqual(usage.total, int(wmi_part.Size)) + wmi_free = int(wmi_part.FreeSpace) + self.assertEqual(usage.free, wmi_free) + # 10 MB tollerance + if abs(usage.free - wmi_free) > 10 * 1024 * 1024: + self.fail("psutil=%s, wmi=%s" % ( + usage.free, wmi_free)) + break + else: + self.fail("can't find partition %s" % repr(ps_part)) + + def test_disk_usage(self): + for disk in psutil.disk_partitions(): + sys_value = win32api.GetDiskFreeSpaceEx(disk.mountpoint) + psutil_value = psutil.disk_usage(disk.mountpoint) + self.assertAlmostEqual(sys_value[0], psutil_value.free, + delta=1024 * 1024) + self.assertAlmostEqual(sys_value[1], psutil_value.total, + delta=1024 * 1024) + self.assertEqual(psutil_value.used, + psutil_value.total - psutil_value.free) + + def test_disk_partitions(self): + sys_value = [ + x + '\\' for x in win32api.GetLogicalDriveStrings().split("\\\x00") + if x and not x.startswith('A:')] + psutil_value = [x.mountpoint for x in psutil.disk_partitions(all=True)] + self.assertEqual(sys_value, psutil_value) + + def test_net_if_stats(self): + ps_names = set(cext.net_if_stats()) + wmi_adapters = wmi.WMI().Win32_NetworkAdapter() + wmi_names = set() + for wmi_adapter in wmi_adapters: + wmi_names.add(wmi_adapter.Name) + wmi_names.add(wmi_adapter.NetConnectionID) + self.assertTrue(ps_names & wmi_names, + "no common entries in %s, %s" % (ps_names, wmi_names)) + + def test_boot_time(self): + wmi_os = wmi.WMI().Win32_OperatingSystem() + wmi_btime_str = wmi_os[0].LastBootUpTime.split('.')[0] + wmi_btime_dt = datetime.datetime.strptime( + wmi_btime_str, "%Y%m%d%H%M%S") + psutil_dt = datetime.datetime.fromtimestamp(psutil.boot_time()) + diff = abs((wmi_btime_dt - psutil_dt).total_seconds()) + # Wmic time is 2-3 secs lower for some reason; that's OK. + self.assertLessEqual(diff, 3) + + def test_boot_time_fluctuation(self): + # https://github.com/giampaolo/psutil/issues/1007 + with mock.patch('psutil._pswindows.cext.boot_time', return_value=5): + self.assertEqual(psutil.boot_time(), 5) + with mock.patch('psutil._pswindows.cext.boot_time', return_value=4): + self.assertEqual(psutil.boot_time(), 5) + with mock.patch('psutil._pswindows.cext.boot_time', return_value=6): + self.assertEqual(psutil.boot_time(), 5) + with mock.patch('psutil._pswindows.cext.boot_time', return_value=333): + self.assertEqual(psutil.boot_time(), 333) + + +# =================================================================== +# sensors_battery() +# =================================================================== + + +@unittest.skipIf(not WINDOWS, "WINDOWS only") +class TestSensorsBattery(unittest.TestCase): + + def test_has_battery(self): + if win32api.GetPwrCapabilities()['SystemBatteriesPresent']: + self.assertIsNotNone(psutil.sensors_battery()) + else: + self.assertIsNone(psutil.sensors_battery()) + + @unittest.skipIf(not HAS_BATTERY, "no battery") + def test_percent(self): + w = wmi.WMI() + battery_wmi = w.query('select * from Win32_Battery')[0] + battery_psutil = psutil.sensors_battery() + self.assertAlmostEqual( + battery_psutil.percent, battery_wmi.EstimatedChargeRemaining, + delta=1) + + @unittest.skipIf(not HAS_BATTERY, "no battery") + def test_power_plugged(self): + w = wmi.WMI() + battery_wmi = w.query('select * from Win32_Battery')[0] + battery_psutil = psutil.sensors_battery() + # Status codes: + # https://msdn.microsoft.com/en-us/library/aa394074(v=vs.85).aspx + self.assertEqual(battery_psutil.power_plugged, + battery_wmi.BatteryStatus == 2) + + def test_emulate_no_battery(self): + with mock.patch("psutil._pswindows.cext.sensors_battery", + return_value=(0, 128, 0, 0)) as m: + self.assertIsNone(psutil.sensors_battery()) + assert m.called + + def test_emulate_power_connected(self): + with mock.patch("psutil._pswindows.cext.sensors_battery", + return_value=(1, 0, 0, 0)) as m: + self.assertEqual(psutil.sensors_battery().secsleft, + psutil.POWER_TIME_UNLIMITED) + assert m.called + + def test_emulate_power_charging(self): + with mock.patch("psutil._pswindows.cext.sensors_battery", + return_value=(0, 8, 0, 0)) as m: + self.assertEqual(psutil.sensors_battery().secsleft, + psutil.POWER_TIME_UNLIMITED) + assert m.called + + def test_emulate_secs_left_unknown(self): + with mock.patch("psutil._pswindows.cext.sensors_battery", + return_value=(0, 0, 0, -1)) as m: + self.assertEqual(psutil.sensors_battery().secsleft, + psutil.POWER_TIME_UNKNOWN) + assert m.called + + +# =================================================================== +# Process APIs +# =================================================================== + + +@unittest.skipIf(not WINDOWS, "WINDOWS only") +class TestProcess(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.pid = get_test_subprocess().pid + + @classmethod + def tearDownClass(cls): + reap_children() + + def test_issue_24(self): + p = psutil.Process(0) + self.assertRaises(psutil.AccessDenied, p.kill) + + def test_special_pid(self): + p = psutil.Process(4) + self.assertEqual(p.name(), 'System') + # use __str__ to access all common Process properties to check + # that nothing strange happens + str(p) + p.username() + self.assertTrue(p.create_time() >= 0.0) + try: + rss, vms = p.memory_info()[:2] + except psutil.AccessDenied: + # expected on Windows Vista and Windows 7 + if not platform.uname()[1] in ('vista', 'win-7', 'win7'): + raise + else: + self.assertTrue(rss > 0) + + def test_send_signal(self): + p = psutil.Process(self.pid) + self.assertRaises(ValueError, p.send_signal, signal.SIGINT) + + def test_exe(self): + for p in psutil.process_iter(): + try: + self.assertEqual(os.path.basename(p.exe()), p.name()) + except psutil.Error: + pass + + def test_num_handles_increment(self): + p = psutil.Process(os.getpid()) + before = p.num_handles() + handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION, + win32con.FALSE, os.getpid()) + after = p.num_handles() + self.assertEqual(after, before + 1) + win32api.CloseHandle(handle) + self.assertEqual(p.num_handles(), before) + + def test_handles_leak(self): + # Call all Process methods and make sure no handles are left + # open. This is here mainly to make sure functions using + # OpenProcess() always call CloseHandle(). + def call(p, attr): + attr = getattr(p, name, None) + if attr is not None and callable(attr): + attr() + else: + attr + + p = psutil.Process(self.pid) + failures = [] + for name in dir(psutil.Process): + if name.startswith('_') \ + or name in ('terminate', 'kill', 'suspend', 'resume', + 'nice', 'send_signal', 'wait', 'children', + 'as_dict', 'memory_info_ex'): + continue + else: + try: + call(p, name) + num1 = p.num_handles() + call(p, name) + num2 = p.num_handles() + except (psutil.NoSuchProcess, psutil.AccessDenied): + pass + else: + if num2 > num1: + fail = \ + "failure while processing Process.%s method " \ + "(before=%s, after=%s)" % (name, num1, num2) + failures.append(fail) + if failures: + self.fail('\n' + '\n'.join(failures)) + + def test_name_always_available(self): + # On Windows name() is never supposed to raise AccessDenied, + # see https://github.com/giampaolo/psutil/issues/627 + for p in psutil.process_iter(): + try: + p.name() + except psutil.NoSuchProcess: + pass + + @unittest.skipIf(not sys.version_info >= (2, 7), + "CTRL_* signals not supported") + def test_ctrl_signals(self): + p = psutil.Process(get_test_subprocess().pid) + p.send_signal(signal.CTRL_C_EVENT) + p.send_signal(signal.CTRL_BREAK_EVENT) + p.kill() + p.wait() + self.assertRaises(psutil.NoSuchProcess, + p.send_signal, signal.CTRL_C_EVENT) + self.assertRaises(psutil.NoSuchProcess, + p.send_signal, signal.CTRL_BREAK_EVENT) + + def test_compare_name_exe(self): + for p in psutil.process_iter(): + try: + a = os.path.basename(p.exe()) + b = p.name() + except (psutil.NoSuchProcess, psutil.AccessDenied): + pass + else: + self.assertEqual(a, b) + + def test_username(self): + self.assertEqual(psutil.Process().username(), + win32api.GetUserNameEx(win32con.NameSamCompatible)) + + def test_cmdline(self): + sys_value = re.sub(' +', ' ', win32api.GetCommandLine()).strip() + psutil_value = ' '.join(psutil.Process().cmdline()) + self.assertEqual(sys_value, psutil_value) + + # XXX - occasional failures + + # def test_cpu_times(self): + # handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION, + # win32con.FALSE, os.getpid()) + # self.addCleanup(win32api.CloseHandle, handle) + # sys_value = win32process.GetProcessTimes(handle) + # psutil_value = psutil.Process().cpu_times() + # self.assertAlmostEqual( + # psutil_value.user, sys_value['UserTime'] / 10000000.0, + # delta=0.2) + # self.assertAlmostEqual( + # psutil_value.user, sys_value['KernelTime'] / 10000000.0, + # delta=0.2) + + def test_nice(self): + handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION, + win32con.FALSE, os.getpid()) + self.addCleanup(win32api.CloseHandle, handle) + sys_value = win32process.GetPriorityClass(handle) + psutil_value = psutil.Process().nice() + self.assertEqual(psutil_value, sys_value) + + def test_memory_info(self): + handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION, + win32con.FALSE, self.pid) + self.addCleanup(win32api.CloseHandle, handle) + sys_value = win32process.GetProcessMemoryInfo(handle) + psutil_value = psutil.Process(self.pid).memory_info() + self.assertEqual( + sys_value['PeakWorkingSetSize'], psutil_value.peak_wset) + self.assertEqual( + sys_value['WorkingSetSize'], psutil_value.wset) + self.assertEqual( + sys_value['QuotaPeakPagedPoolUsage'], psutil_value.peak_paged_pool) + self.assertEqual( + sys_value['QuotaPagedPoolUsage'], psutil_value.paged_pool) + self.assertEqual( + sys_value['QuotaPeakNonPagedPoolUsage'], + psutil_value.peak_nonpaged_pool) + self.assertEqual( + sys_value['QuotaNonPagedPoolUsage'], psutil_value.nonpaged_pool) + self.assertEqual( + sys_value['PagefileUsage'], psutil_value.pagefile) + self.assertEqual( + sys_value['PeakPagefileUsage'], psutil_value.peak_pagefile) + + self.assertEqual(psutil_value.rss, psutil_value.wset) + self.assertEqual(psutil_value.vms, psutil_value.pagefile) + + def test_wait(self): + handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION, + win32con.FALSE, self.pid) + self.addCleanup(win32api.CloseHandle, handle) + p = psutil.Process(self.pid) + p.terminate() + psutil_value = p.wait() + sys_value = win32process.GetExitCodeProcess(handle) + self.assertEqual(psutil_value, sys_value) + + def test_cpu_affinity(self): + def from_bitmask(x): + return [i for i in range(64) if (1 << i) & x] + + handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION, + win32con.FALSE, self.pid) + self.addCleanup(win32api.CloseHandle, handle) + sys_value = from_bitmask( + win32process.GetProcessAffinityMask(handle)[0]) + psutil_value = psutil.Process(self.pid).cpu_affinity() + self.assertEqual(psutil_value, sys_value) + + def test_io_counters(self): + handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION, + win32con.FALSE, os.getpid()) + self.addCleanup(win32api.CloseHandle, handle) + sys_value = win32process.GetProcessIoCounters(handle) + psutil_value = psutil.Process().io_counters() + self.assertEqual( + psutil_value.read_count, sys_value['ReadOperationCount']) + self.assertEqual( + psutil_value.write_count, sys_value['WriteOperationCount']) + self.assertEqual( + psutil_value.read_bytes, sys_value['ReadTransferCount']) + self.assertEqual( + psutil_value.write_bytes, sys_value['WriteTransferCount']) + self.assertEqual( + psutil_value.other_count, sys_value['OtherOperationCount']) + self.assertEqual( + psutil_value.other_bytes, sys_value['OtherTransferCount']) + + def test_num_handles(self): + import ctypes + import ctypes.wintypes + PROCESS_QUERY_INFORMATION = 0x400 + handle = ctypes.windll.kernel32.OpenProcess( + PROCESS_QUERY_INFORMATION, 0, os.getpid()) + self.addCleanup(ctypes.windll.kernel32.CloseHandle, handle) + hndcnt = ctypes.wintypes.DWORD() + ctypes.windll.kernel32.GetProcessHandleCount( + handle, ctypes.byref(hndcnt)) + sys_value = hndcnt.value + psutil_value = psutil.Process().num_handles() + ctypes.windll.kernel32.CloseHandle(handle) + self.assertEqual(psutil_value, sys_value + 1) + + +@unittest.skipIf(not WINDOWS, "WINDOWS only") +class TestProcessWMI(unittest.TestCase): + """Compare Process API results with WMI.""" + + @classmethod + def setUpClass(cls): + cls.pid = get_test_subprocess().pid + + @classmethod + def tearDownClass(cls): + reap_children() + + def test_name(self): + w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0] + p = psutil.Process(self.pid) + self.assertEqual(p.name(), w.Caption) + + def test_exe(self): + w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0] + p = psutil.Process(self.pid) + # Note: wmi reports the exe as a lower case string. + # Being Windows paths case-insensitive we ignore that. + self.assertEqual(p.exe().lower(), w.ExecutablePath.lower()) + + def test_cmdline(self): + w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0] + p = psutil.Process(self.pid) + self.assertEqual(' '.join(p.cmdline()), + w.CommandLine.replace('"', '')) + + def test_username(self): + w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0] + p = psutil.Process(self.pid) + domain, _, username = w.GetOwner() + username = "%s\\%s" % (domain, username) + self.assertEqual(p.username(), username) + + def test_memory_rss(self): + time.sleep(0.1) + w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0] + p = psutil.Process(self.pid) + rss = p.memory_info().rss + self.assertEqual(rss, int(w.WorkingSetSize)) + + def test_memory_vms(self): + time.sleep(0.1) + w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0] + p = psutil.Process(self.pid) + vms = p.memory_info().vms + # http://msdn.microsoft.com/en-us/library/aa394372(VS.85).aspx + # ...claims that PageFileUsage is represented in Kilo + # bytes but funnily enough on certain platforms bytes are + # returned instead. + wmi_usage = int(w.PageFileUsage) + if (vms != wmi_usage) and (vms != wmi_usage * 1024): + self.fail("wmi=%s, psutil=%s" % (wmi_usage, vms)) + + def test_create_time(self): + w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0] + p = psutil.Process(self.pid) + wmic_create = str(w.CreationDate.split('.')[0]) + psutil_create = time.strftime("%Y%m%d%H%M%S", + time.localtime(p.create_time())) + self.assertEqual(wmic_create, psutil_create) + + +@unittest.skipIf(not WINDOWS, "WINDOWS only") +class TestDualProcessImplementation(unittest.TestCase): + """ + Certain APIs on Windows have 2 internal implementations, one + based on documented Windows APIs, another one based + NtQuerySystemInformation() which gets called as fallback in + case the first fails because of limited permission error. + Here we test that the two methods return the exact same value, + see: + https://github.com/giampaolo/psutil/issues/304 + """ + + @classmethod + def setUpClass(cls): + cls.pid = get_test_subprocess().pid + + @classmethod + def tearDownClass(cls): + reap_children() + # --- + # same tests as above but mimicks the AccessDenied failure of + # the first (fast) method failing with AD. + + def test_name(self): + name = psutil.Process(self.pid).name() + with mock.patch("psutil._psplatform.cext.proc_exe", + side_effect=psutil.AccessDenied(os.getpid())) as fun: + self.assertEqual(psutil.Process(self.pid).name(), name) + assert fun.called + + def test_memory_info(self): + mem_1 = psutil.Process(self.pid).memory_info() + with mock.patch("psutil._psplatform.cext.proc_memory_info", + side_effect=OSError(errno.EPERM, "msg")) as fun: + mem_2 = psutil.Process(self.pid).memory_info() + self.assertEqual(len(mem_1), len(mem_2)) + for i in range(len(mem_1)): + self.assertGreaterEqual(mem_1[i], 0) + self.assertGreaterEqual(mem_2[i], 0) + self.assertAlmostEqual(mem_1[i], mem_2[i], delta=512) + assert fun.called + + def test_create_time(self): + ctime = psutil.Process(self.pid).create_time() + with mock.patch("psutil._psplatform.cext.proc_create_time", + side_effect=OSError(errno.EPERM, "msg")) as fun: + self.assertEqual(psutil.Process(self.pid).create_time(), ctime) + assert fun.called + + def test_cpu_times(self): + cpu_times_1 = psutil.Process(self.pid).cpu_times() + with mock.patch("psutil._psplatform.cext.proc_cpu_times", + side_effect=OSError(errno.EPERM, "msg")) as fun: + cpu_times_2 = psutil.Process(self.pid).cpu_times() + assert fun.called + self.assertAlmostEqual( + cpu_times_1.user, cpu_times_2.user, delta=0.01) + self.assertAlmostEqual( + cpu_times_1.system, cpu_times_2.system, delta=0.01) + + def test_io_counters(self): + io_counters_1 = psutil.Process(self.pid).io_counters() + with mock.patch("psutil._psplatform.cext.proc_io_counters", + side_effect=OSError(errno.EPERM, "msg")) as fun: + io_counters_2 = psutil.Process(self.pid).io_counters() + for i in range(len(io_counters_1)): + self.assertAlmostEqual( + io_counters_1[i], io_counters_2[i], delta=5) + assert fun.called + + def test_num_handles(self): + num_handles = psutil.Process(self.pid).num_handles() + with mock.patch("psutil._psplatform.cext.proc_num_handles", + side_effect=OSError(errno.EPERM, "msg")) as fun: + self.assertEqual(psutil.Process(self.pid).num_handles(), + num_handles) + assert fun.called + + +@unittest.skipIf(not WINDOWS, "WINDOWS only") +class RemoteProcessTestCase(unittest.TestCase): + """Certain functions require calling ReadProcessMemory. + This trivially works when called on the current process. + Check that this works on other processes, especially when they + have a different bitness. + """ + + @staticmethod + def find_other_interpreter(): + # find a python interpreter that is of the opposite bitness from us + code = "import sys; sys.stdout.write(str(sys.maxsize > 2**32))" + + # XXX: a different and probably more stable approach might be to access + # the registry but accessing 64 bit paths from a 32 bit process + for filename in glob.glob(r"C:\Python*\python.exe"): + proc = subprocess.Popen(args=[filename, "-c", code], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + output, _ = proc.communicate() + if output == str(not IS_64_BIT): + return filename + + @classmethod + def setUpClass(cls): + other_python = cls.find_other_interpreter() + + if other_python is None: + raise unittest.SkipTest( + "could not find interpreter with opposite bitness") + + if IS_64_BIT: + cls.python64 = sys.executable + cls.python32 = other_python + else: + cls.python64 = other_python + cls.python32 = sys.executable + + test_args = ["-c", "import sys; sys.stdin.read()"] + + def setUp(self): + env = os.environ.copy() + env["THINK_OF_A_NUMBER"] = str(os.getpid()) + self.proc32 = get_test_subprocess([self.python32] + self.test_args, + env=env, + stdin=subprocess.PIPE) + self.proc64 = get_test_subprocess([self.python64] + self.test_args, + env=env, + stdin=subprocess.PIPE) + + def tearDown(self): + self.proc32.communicate() + self.proc64.communicate() + reap_children() + + @classmethod + def tearDownClass(cls): + reap_children() + + def test_cmdline_32(self): + p = psutil.Process(self.proc32.pid) + self.assertEqual(len(p.cmdline()), 3) + self.assertEqual(p.cmdline()[1:], self.test_args) + + def test_cmdline_64(self): + p = psutil.Process(self.proc64.pid) + self.assertEqual(len(p.cmdline()), 3) + self.assertEqual(p.cmdline()[1:], self.test_args) + + def test_cwd_32(self): + p = psutil.Process(self.proc32.pid) + self.assertEqual(p.cwd(), os.getcwd()) + + def test_cwd_64(self): + p = psutil.Process(self.proc64.pid) + self.assertEqual(p.cwd(), os.getcwd()) + + def test_environ_32(self): + p = psutil.Process(self.proc32.pid) + e = p.environ() + self.assertIn("THINK_OF_A_NUMBER", e) + self.assertEquals(e["THINK_OF_A_NUMBER"], str(os.getpid())) + + def test_environ_64(self): + p = psutil.Process(self.proc64.pid) + e = p.environ() + self.assertIn("THINK_OF_A_NUMBER", e) + self.assertEquals(e["THINK_OF_A_NUMBER"], str(os.getpid())) + + +# =================================================================== +# Windows services +# =================================================================== + + +@unittest.skipIf(not WINDOWS, "WINDOWS only") +class TestServices(unittest.TestCase): + + def test_win_service_iter(self): + valid_statuses = set([ + "running", + "paused", + "start", + "pause", + "continue", + "stop", + "stopped", + ]) + valid_start_types = set([ + "automatic", + "manual", + "disabled", + ]) + valid_statuses = set([ + "running", + "paused", + "start_pending", + "pause_pending", + "continue_pending", + "stop_pending", + "stopped" + ]) + for serv in psutil.win_service_iter(): + data = serv.as_dict() + self.assertIsInstance(data['name'], str) + self.assertNotEqual(data['name'].strip(), "") + self.assertIsInstance(data['display_name'], str) + self.assertIsInstance(data['username'], str) + self.assertIn(data['status'], valid_statuses) + if data['pid'] is not None: + psutil.Process(data['pid']) + self.assertIsInstance(data['binpath'], str) + self.assertIsInstance(data['username'], str) + self.assertIsInstance(data['start_type'], str) + self.assertIn(data['start_type'], valid_start_types) + self.assertIn(data['status'], valid_statuses) + self.assertIsInstance(data['description'], str) + pid = serv.pid() + if pid is not None: + p = psutil.Process(pid) + self.assertTrue(p.is_running()) + # win_service_get + s = psutil.win_service_get(serv.name()) + # test __eq__ + self.assertEqual(serv, s) + + def test_win_service_get(self): + name = next(psutil.win_service_iter()).name() + + with self.assertRaises(psutil.NoSuchProcess) as cm: + psutil.win_service_get(name + '???') + self.assertEqual(cm.exception.name, name + '???') + + # test NoSuchProcess + service = psutil.win_service_get(name) + exc = WindowsError( + psutil._psplatform.cext.ERROR_SERVICE_DOES_NOT_EXIST, "") + with mock.patch("psutil._psplatform.cext.winservice_query_status", + side_effect=exc): + self.assertRaises(psutil.NoSuchProcess, service.status) + with mock.patch("psutil._psplatform.cext.winservice_query_config", + side_effect=exc): + self.assertRaises(psutil.NoSuchProcess, service.username) + + # test AccessDenied + exc = WindowsError( + psutil._psplatform.cext.ERROR_ACCESS_DENIED, "") + with mock.patch("psutil._psplatform.cext.winservice_query_status", + side_effect=exc): + self.assertRaises(psutil.AccessDenied, service.status) + with mock.patch("psutil._psplatform.cext.winservice_query_config", + side_effect=exc): + self.assertRaises(psutil.AccessDenied, service.username) + + # test __str__ and __repr__ + self.assertIn(service.name(), str(service)) + self.assertIn(service.display_name(), str(service)) + self.assertIn(service.name(), repr(service)) + self.assertIn(service.display_name(), repr(service)) + + +if __name__ == '__main__': + run_test_module_by_name(__file__) diff --git a/server/www/teleport/app_bootstrap.py b/server/www/teleport/app_bootstrap.py index 324dca3..e9fd80c 100644 --- a/server/www/teleport/app_bootstrap.py +++ b/server/www/teleport/app_bootstrap.py @@ -8,8 +8,8 @@ sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'webroo def main(): from app.app_env import PATH_APP_ROOT, PATH_DATA - from app.base.webapp import get_web_app - _web_app = get_web_app() + from app.base.webapp import tp_web_app + _web_app = tp_web_app() if not _web_app.init(PATH_APP_ROOT, PATH_DATA): return 1 diff --git a/server/www/teleport/static/js/dashboard/dashboard.js b/server/www/teleport/static/js/dashboard/dashboard.js index 5b0a9db..02d9659 100644 --- a/server/www/teleport/static/js/dashboard/dashboard.js +++ b/server/www/teleport/static/js/dashboard/dashboard.js @@ -1,6 +1,7 @@ "use strict"; $app.on_init = function (cb_stack) { + $app.MAX_OVERLOAD_DATA = 20; $app.dom = { count_user: $('#count-user') , count_host: $('#count-host') @@ -13,6 +14,9 @@ $app.on_init = function (cb_stack) { // refresh overload info every 5m. $app.load_overload_info(); + $app.ws = null; + $app.init_ws(); + cb_stack.exec(); }; @@ -35,8 +39,163 @@ $app.load_basic_info = function () { } ); - setTimeout($app.load_basic_info, 60*1000); + setTimeout($app.load_basic_info, 60 * 1000); }; $app.load_overload_info = function () { + var i = 0; + // var bar_x = []; + // for (i = 0; i < $app.MAX_OVERLOAD_DATA; i++) { + // bar_x.push(i); + // } + + var now = Math.floor(Date.now() / 1000); + console.log('now', now); + + $app.bar_cpu_user = []; + $app.bar_cpu_sys = []; + var t = tp_local2utc(now - $app.MAX_OVERLOAD_DATA - 1); + console.log(t); + for (i = 0; i < $app.MAX_OVERLOAD_DATA; i++) { + var x = t + i; + console.log(x, t); + $app.bar_cpu_user.push([ + { + name: x.toString() + , value: [tp_format_datetime(tp_utc2local(x)), 0] + } + ]); + $app.bar_cpu_sys.push([ + { + name: x.toString() + , value: [tp_format_datetime(tp_utc2local(x)), 0] + } + ]); + } + //console.log('--', $app.bar_cpu_data); + + $app.bar_cpu = echarts.init(document.getElementById('bar-cpu')); + $app.bar_cpu.setOption({ + title: { + // show: false + text: 'CPU负载' + , top: 0 + , left: 50 + , textStyle: { + color: 'rgba(0,0,0,0.5)' + , fontSize: 14 + } + }, + grid: { + show: true + , left: 30 + , right: 20 + , top: 30 + , bottom: 20 + }, + tooltip: { + trigger: 'axis' + , formatter: function (params) { + console.log(params); + //params = params[0]; + var t = parseInt(params[0].name); + return tp_format_datetime(tp_utc2local(t), 'HH:mm:ss') + '
' + params[0].value[1] + '%, ' + params[1].value[1] + '%'; + } + , axisPointer: { + animation: false + } + }, + // legend: { + // // show: false + // }, + xAxis: { + type: 'time' + , boundaryGap: false + , axisLine: {show: false} + }, + // yAxis: {type: 'value', min: 'dataMin', axisLine: {show: false}, splitLine: {show: false}}, + yAxis: { + type: 'value' + , axisLine: { + show: false + } + , min: 0 + , max: 100 + , boundaryGap: [0, '50%'] + + }, + series: [ + { + name: 'cpu-sys' + , type: 'line' + , smooth: true + , symbol: 'none' + , stack: 'a' + , showSymbol: false + , data: $app.bar_cpu_sys + } + , { + name: 'cpu-user' + , type: 'line' + , smooth: true + , symbol: 'none' + , stack: 'a' + , showSymbol: false + , data: $app.bar_cpu_user + } + ] + }); +}; + +$app.init_ws = function () { + if ($app.ws !== null) + delete $app.ws; + + var _sid = Cookies.get('_sid'); + $app.ws = new WebSocket('ws://' + location.host + '/ws/' + _sid); + + $app.ws.onopen = function (e) { + // 订阅: + $app.ws.send('{"method": "subscribe", "params": ["sys_real_status"]}'); + }; + $app.ws.onclose = function (e) { + // console.log('[ws] ws-on-close', e); + setTimeout($app.init_ws, 5000); + }; + $app.ws.onmessage = function (e) { + var t = JSON.parse(e.data); + // console.log('[ws] ws-on-message', t); + + if (t.subscribe === 'sys_real_status') { + $app.bar_cpu_user.shift(); + $app.bar_cpu_user.push({ + 'name': t.data.t.toString(), + 'value': [tp_format_datetime(tp_utc2local(t.data.t)), t.data.c.u] + }); + $app.bar_cpu_sys.shift(); + $app.bar_cpu_sys.push({ + 'name': t.data.t.toString(), + 'value': [tp_format_datetime(tp_utc2local(t.data.t)), t.data.c.s] + }); + //console.log($app.bar_cpu_data); + console.log('--', t.data.t); + + $app.bar_cpu.setOption( + { + // xAxis: {data: 1}, + series: [ + { + name: 'cpu-user' + , data: $app.bar_cpu_user + } + , { + name: 'cpu-sys' + , data: $app.bar_cpu_sys + } + ] + } + ); + + } + }; }; diff --git a/server/www/teleport/view/dashboard/index.mako b/server/www/teleport/view/dashboard/index.mako index 7d8c324..41a344f 100644 --- a/server/www/teleport/view/dashboard/index.mako +++ b/server/www/teleport/view/dashboard/index.mako @@ -6,6 +6,7 @@ <%inherit file="../page_base.mako"/> <%block name="extend_js_file"> + @@ -69,8 +70,8 @@
-
CPU负载
-
+##
CPU负载
+
diff --git a/server/www/teleport/webroot/app/app_env.py b/server/www/teleport/webroot/app/app_env.py index 2146b8b..1262f8a 100644 --- a/server/www/teleport/webroot/app/app_env.py +++ b/server/www/teleport/webroot/app/app_env.py @@ -28,7 +28,7 @@ for p in x: PATH_APP_ROOT = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..')) -sys.path.append(os.path.join(PATH_APP_ROOT, 'webroot')) +# sys.path.append(os.path.join(PATH_APP_ROOT, 'webroot')) # 检查操作系统,目前支持Win/Linux/MacOS PLATFORM = platform.system().lower() diff --git a/server/www/teleport/webroot/app/base/configs.py b/server/www/teleport/webroot/app/base/configs.py index 1036005..d4b09c8 100644 --- a/server/www/teleport/webroot/app/base/configs.py +++ b/server/www/teleport/webroot/app/base/configs.py @@ -8,7 +8,7 @@ from app.const import * from .logger import log from .utils import AttrDict, tp_convert_to_attr_dict, tp_make_dir -__all__ = ['get_cfg'] +__all__ = ['tp_cfg'] class BaseAppConfig(dict): @@ -541,7 +541,7 @@ class AppConfig(BaseAppConfig): return True -def get_cfg(): +def tp_cfg(): """ :rtype: app.base.configs.AppConfig """ diff --git a/server/www/teleport/webroot/app/base/controller.py b/server/www/teleport/webroot/app/base/controller.py index 9552782..c58367e 100644 --- a/server/www/teleport/webroot/app/base/controller.py +++ b/server/www/teleport/webroot/app/base/controller.py @@ -10,7 +10,7 @@ import mako.lookup import mako.template import tornado.web from app.base.logger import log -from app.base.session import session_manager +from app.base.session import tp_session from app.const import * from tornado.escape import json_encode @@ -109,15 +109,15 @@ class TPBaseHandler(tornado.web.RequestHandler): def set_session(self, name, value, expire=None): k = '{}-{}'.format(name, self._s_id) - session_manager().set(k, value, expire) + tp_session().set(k, value, expire) def get_session(self, name, _default=None): k = '{}-{}'.format(name, self._s_id) - return session_manager().get(k, _default) + return tp_session().get(k, _default) def del_session(self, name): k = '{}-{}'.format(name, self._s_id) - return session_manager().set(k, '', -1) + return tp_session().set(k, '', -1) def get_current_user(self): return self._user diff --git a/server/www/teleport/webroot/app/base/core_server.py b/server/www/teleport/webroot/app/base/core_server.py index f060ab1..567d9df 100644 --- a/server/www/teleport/webroot/app/base/core_server.py +++ b/server/www/teleport/webroot/app/base/core_server.py @@ -5,7 +5,7 @@ import urllib.parse import tornado.gen import tornado.httpclient -from .configs import get_cfg +from .configs import tp_cfg from app.const import * from app.base.logger import log @@ -19,7 +19,7 @@ def core_service_async_post_http(post_data): data = urllib.parse.quote(v).encode('utf-8') c = tornado.httpclient.AsyncHTTPClient() - r = yield c.fetch(get_cfg().common.core_server_rpc, body=data, method='POST') + r = yield c.fetch(tp_cfg().common.core_server_rpc, body=data, method='POST') # print('async_post_http return:', r.body.decode()) # return TPE_OK, json.loads(r.body.decode()) diff --git a/server/www/teleport/webroot/app/base/db.py b/server/www/teleport/webroot/app/base/db.py index 01c66cd..023d612 100644 --- a/server/www/teleport/webroot/app/base/db.py +++ b/server/www/teleport/webroot/app/base/db.py @@ -8,7 +8,7 @@ import threading import builtins from app.const import * -from app.base.configs import get_cfg +from app.base.configs import tp_cfg from app.base.utils import AttrDict, tp_make_dir from app.base.logger import log from .database.create import DatabaseInit @@ -56,7 +56,7 @@ class TPDatabase: return self._table_prefix def init(self): - cfg = get_cfg() + cfg = tp_cfg() if 'sqlite' == cfg.database.type: if cfg.database.sqlite_file is None: cfg.set_default('database::sqlite-file', os.path.join(cfg.data_path, 'db', 'teleport.db')) @@ -105,7 +105,7 @@ class TPDatabase: sys_cfg[item[0]] = item[1] if len(sys_cfg) > 0: - get_cfg().update_sys(sys_cfg) + tp_cfg().update_sys(sys_cfg) def _init_sqlite(self, db_file): self.db_type = self.DB_TYPE_SQLITE diff --git a/server/www/teleport/webroot/app/base/mail.py b/server/www/teleport/webroot/app/base/mail.py index f056428..bc53171 100644 --- a/server/www/teleport/webroot/app/base/mail.py +++ b/server/www/teleport/webroot/app/base/mail.py @@ -9,7 +9,7 @@ from email.mime.text import MIMEText from email.utils import formataddr, parseaddr import tornado.gen -from app.base.configs import get_cfg +from app.base.configs import tp_cfg from app.base.logger import log from app.const import * @@ -60,8 +60,8 @@ def tp_send_mail(recipient, message, subject=None, sender=None, cc=None, bcc=Non :type password: string | None :rtype: dict """ - sys_smtp = get_cfg().sys.smtp - sys_smtp_password = get_cfg().sys_smtp_password + sys_smtp = tp_cfg().sys.smtp + sys_smtp_password = tp_cfg().sys_smtp_password _subject = subject if subject is not None else '系统消息' _sender = sender if sender is not None else sys_smtp.sender diff --git a/server/www/teleport/webroot/app/base/session.py b/server/www/teleport/webroot/app/base/session.py index e5bc039..a5677fb 100644 --- a/server/www/teleport/webroot/app/base/session.py +++ b/server/www/teleport/webroot/app/base/session.py @@ -4,11 +4,12 @@ import datetime import threading from app.base.logger import log -from app.base.configs import get_cfg +from app.base.configs import tp_cfg class SessionManager(threading.Thread): - SESSION_EXPIRE = 3600 # 60*60 默认超时时间为1小时 + # SESSION_EXPIRE = 3600 # 60*60 默认超时时间为1小时 + _expire = 3600 def __init__(self): super().__init__(name='session-manager-thread') @@ -20,13 +21,19 @@ class SessionManager(threading.Thread): # session表,session_id为索引,每个项为一个字典,包括 v(value), t(last access), e(expire seconds) self._session_dict = dict() + self._expire = 0 self._lock = threading.RLock() self._stop_flag = False self._timer_cond = threading.Condition() + self.update_default_expire() + def init(self): return True + def update_default_expire(self): + self._expire = tp_cfg().sys.login.session_timeout * 60 + def stop(self): self._stop_flag = True self._timer_cond.acquire() @@ -63,8 +70,7 @@ class SessionManager(threading.Thread): """ if expire is None: - # expire = self.SESSION_EXPIRE - expire = get_cfg().sys.login.session_timeout * 60 + expire = tp_cfg().sys.login.session_timeout * 60 if expire < 0: with self._lock: @@ -110,7 +116,7 @@ class SessionManager(threading.Thread): return _default -def session_manager(): +def tp_session(): """ 取得Session管理器的唯一实例 diff --git a/server/www/teleport/webroot/app/base/status.py b/server/www/teleport/webroot/app/base/status.py new file mode 100644 index 0000000..167a36f --- /dev/null +++ b/server/www/teleport/webroot/app/base/status.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- + +import time +import datetime +import threading +import psutil +import json + +from app.base.logger import log +from app.base.utils import tp_timestamp_utc_now +from app.base.configs import tp_cfg +from app.controller.ws import tp_wss + + +class TPSysStatus(threading.Thread): + def __init__(self): + super().__init__(name='sys-status-thread') + + import builtins + if '__tp_sys_status__' in builtins.__dict__: + raise RuntimeError('TPSysStatus object exists, you can not create more than one instance.') + + # session表,session_id为索引,每个项为一个字典,包括 v(value), t(last access), e(expire seconds) + self._session_dict = dict() + + self._stop_flag = False + self._time_cnt = 0 + self._interval = 2 + + self._disk_read = 0 + self._disk_write = 0 + self._net_recv = 0 + self._net_sent = 0 + + def init(self): + psutil.cpu_times_percent() + net = psutil.net_io_counters(pernic=False) + self._net_recv = net.bytes_recv + self._net_sent = net.bytes_sent + disk = psutil.disk_io_counters(perdisk=False) + self._disk_read = disk.read_bytes + self._disk_write = disk.write_bytes + + return True + + def stop(self): + self._stop_flag = True + self.join() + log.v('{} stopped.\n'.format(self.name)) + + def run(self): + while not self._stop_flag: + # time.sleep(1) + # if self._stop_flag: + # break + # self._time_cnt += 1 + # if self._time_cnt < 5: + # continue + # + # self._time_cnt = 0 + + time.sleep(self._interval) + val = {'t': tp_timestamp_utc_now()} + + cpu = psutil.cpu_times_percent() + # print(int(cpu.user * 100), int(cpu.system * 100)) + val['c'] = {'u': cpu.user, 's': cpu.system} + # + mem = psutil.virtual_memory() + val['m'] = {'u': mem.used, 't': mem.total} + # print(mem.total, mem.used, int(mem.used * 100 / mem.total)) + + disk = psutil.disk_io_counters(perdisk=False) + # val['d'] = {'r': disk.read_byes, 'w': disk.write_bytes} + # print(disk.read_bytes, disk.write_bytes) + _read = disk.read_bytes - self._disk_read + _write = disk.write_bytes - self._disk_write + self._disk_read = disk.read_bytes + self._disk_write = disk.write_bytes + + if _read < 0: + _read = 0 + if _write < 0: + _write = 0 + val['d'] = {'r': _read, 'w': _write} + # print(int(_read / self._interval), int(_write / self._interval)) + + net = psutil.net_io_counters(pernic=False) + _recv = net.bytes_recv - self._net_recv + _sent = net.bytes_sent - self._net_sent + self._net_recv = net.bytes_recv + self._net_sent = net.bytes_sent + + # On some systems such as Linux, on a very busy or long-lived system, the numbers + # returned by the kernel may overflow and wrap (restart from zero) + if _recv < 0: + _recv = 0 + if _sent < 0: + _sent = 0 + val['n'] = {'r': _recv, 's': _sent} + # print(int(_recv / self._interval), int(_sent / self._interval)) + + # s = json.dumps(val, separators=(',', ':')) + + tp_wss().send_message('sys_real_status', val) + + # print(s) + + +def tp_sys_status(): + """ + 取得TPSysStatus管理器的唯一实例 + + :rtype : TPSysStatus + """ + + import builtins + if '__tp_sys_status__' not in builtins.__dict__: + builtins.__dict__['__tp_sys_status__'] = TPSysStatus() + return builtins.__dict__['__tp_sys_status__'] diff --git a/server/www/teleport/webroot/app/base/webapp.py b/server/www/teleport/webroot/app/base/webapp.py index d73d3f5..08a86c2 100644 --- a/server/www/teleport/webroot/app/base/webapp.py +++ b/server/www/teleport/webroot/app/base/webapp.py @@ -11,11 +11,12 @@ import tornado.netutil import tornado.process import tornado.web from app.const import * -from app.base.configs import get_cfg +from app.base.configs import tp_cfg from app.base.db import get_db from app.base.logger import log -from app.base.session import session_manager -from app.base.cron import tp_corn +from app.base.session import tp_session +# from app.base.cron import tp_corn +from app.base.status import tp_sys_status class WebApp: @@ -27,7 +28,7 @@ class WebApp: def init(self, path_app_root, path_data): log.initialize() - cfg = get_cfg() + cfg = tp_cfg() cfg.app_path = path_app_root cfg.static_path = os.path.join(path_app_root, 'static') cfg.template_path = os.path.join(path_app_root, 'view') @@ -44,7 +45,7 @@ class WebApp: return True def _get_core_server_config(self): - cfg = get_cfg() + cfg = tp_cfg() try: req = {'method': 'get_config', 'param': []} req_data = json.dumps(req) @@ -71,16 +72,16 @@ class WebApp: log.e('can not initialize database interface.\n') return 0 - cfg = get_cfg() + cfg = tp_cfg() if _db.need_create or _db.need_upgrade: cfg.app_mode = APP_MODE_MAINTENANCE - get_cfg().update_sys(None) + tp_cfg().update_sys(None) else: cfg.app_mode = APP_MODE_NORMAL _db.load_system_config() - if not session_manager().init(): + if not tp_session().init(): log.e('can not initialize session manager.\n') return 0 @@ -130,25 +131,29 @@ class WebApp: return 0 # 启动session超时管理 - session_manager().start() + tp_session().start() def job(): log.v('---job--\n') - tp_corn().add_job('test', job, first_interval_seconds=None, interval_seconds=10) - tp_corn().start() + # tp_corn().add_job('test', job, first_interval_seconds=None, interval_seconds=10) + # tp_corn().init() + # tp_corn().start() + tp_sys_status().init() + tp_sys_status().start() try: tornado.ioloop.IOLoop.instance().start() except: log.e('\n') - tp_corn().stop() - session_manager().stop() + # tp_corn().stop() + tp_sys_status().stop() + tp_session().stop() return 0 -def get_web_app(): +def tp_web_app(): """ 取得WebApp的唯一实例 diff --git a/server/www/teleport/webroot/app/controller/__init__.py b/server/www/teleport/webroot/app/controller/__init__.py index 8cd61ac..1317c02 100644 --- a/server/www/teleport/webroot/app/controller/__init__.py +++ b/server/www/teleport/webroot/app/controller/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -from app.base.configs import get_cfg +from app.base.configs import tp_cfg from app.controller import auth from . import account from . import audit @@ -13,6 +13,7 @@ from . import ops from . import rpc from . import system from . import user +from . import ws __all__ = ['controllers', 'fix_controller'] @@ -243,11 +244,15 @@ controllers = [ # - [json] 维护过程中页面与后台的通讯接口 (r'/maintenance/rpc', maintenance.RpcHandler), + # WebSocket for real-time information + # ws-client call 'http://ip:7190/ws/action/' + (r'/ws/(.*)', ws.WebSocketHandler), + (r'/.*', index.CatchAllHandler), ] def fix_controller(): - dbg_mode, _ = get_cfg().get_bool('common::debug-mode', False) + dbg_mode, _ = tp_cfg().get_bool('common::debug-mode', False) if dbg_mode: controllers.append((r'/exit/9E37CBAEE2294D9D9965112025CEE87F', index.ExitHandler)) diff --git a/server/www/teleport/webroot/app/controller/account.py b/server/www/teleport/webroot/app/controller/account.py index e95c488..8b70c64 100644 --- a/server/www/teleport/webroot/app/controller/account.py +++ b/server/www/teleport/webroot/app/controller/account.py @@ -12,7 +12,7 @@ from app.const import * from app.model import account from app.model import group -# cfg = get_cfg() +# cfg = tp_cfg() # 临时认证ID的基数,每次使用时均递减 tmp_auth_id_base = -1 diff --git a/server/www/teleport/webroot/app/controller/audit.py b/server/www/teleport/webroot/app/controller/audit.py index 445be9d..e072598 100644 --- a/server/www/teleport/webroot/app/controller/audit.py +++ b/server/www/teleport/webroot/app/controller/audit.py @@ -7,7 +7,7 @@ import os import shutil from app.const import * -from app.base.configs import get_cfg +from app.base.configs import tp_cfg from app.base.logger import * from app.model import record # from app.model import user @@ -40,11 +40,11 @@ class RecordHandler(TPBaseHandler): if ret != TPE_OK: return - if not get_cfg().core.detected: + if not tp_cfg().core.detected: total_size = 0 free_size = 0 else: - total_size, free_size = get_free_space_bytes(get_cfg().core.replay_path) + total_size, free_size = get_free_space_bytes(tp_cfg().core.replay_path) param = { 'total_size': total_size, @@ -153,7 +153,7 @@ class ReplayHandler(TPBaseHandler): # class ReplayStaticFileHandler(tornado.web.StaticFileHandler): # def initialize(self, path, default_filename=None): # super().initialize(path, default_filename) -# self.root = get_cfg().core.replay_path +# self.root = tp_cfg().core.replay_path # # self.default_filename = default_filename # # @@ -185,7 +185,7 @@ class ComandLogHandler(TPBaseHandler): if protocol == 1: pass elif protocol == 2: - record_path = os.path.join(get_cfg().core.replay_path, 'ssh', '{:09d}'.format(int(record_id))) + record_path = os.path.join(tp_cfg().core.replay_path, 'ssh', '{:09d}'.format(int(record_id))) file_info = os.path.join(record_path, 'tp-ssh-cmd.txt') try: file = open(file_info, 'r') @@ -320,7 +320,7 @@ class DoGetFileHandler(TPBaseHandler): self.set_status(400) return self.write('invalid param, `type` should be `rdp`, `ssh` or `telnet`.') - file = os.path.join(get_cfg().core.replay_path, 'rdp', '{:09d}'.format(int(rid)), filename) + file = os.path.join(tp_cfg().core.replay_path, 'rdp', '{:09d}'.format(int(rid)), filename) if not os.path.exists(file): self.set_status(404) return self.write('file does not exists.') diff --git a/server/www/teleport/webroot/app/controller/auth.py b/server/www/teleport/webroot/app/controller/auth.py index 7ba482d..b2f1ba8 100644 --- a/server/www/teleport/webroot/app/controller/auth.py +++ b/server/www/teleport/webroot/app/controller/auth.py @@ -3,7 +3,7 @@ import json from urllib.parse import quote -from app.base.configs import get_cfg +from app.base.configs import tp_cfg from app.base.controller import TPBaseHandler, TPBaseJsonHandler from app.const import * from app.logic.auth.captcha import tp_captcha_generate_image @@ -14,7 +14,7 @@ from app.model import user class LoginHandler(TPBaseHandler): def get(self): from app.base.db import get_db - if get_cfg().app_mode == APP_MODE_MAINTENANCE and get_db().need_create: + if tp_cfg().app_mode == APP_MODE_MAINTENANCE and get_db().need_create: _user = { 'id': 0, 'username': 'installer', @@ -42,7 +42,7 @@ class LoginHandler(TPBaseHandler): else: username = _user['username'] - default_auth_type = get_cfg().sys.login.auth + default_auth_type = tp_cfg().sys.login.auth param = { 'ref': _ref, 'username': username, @@ -53,7 +53,7 @@ class LoginHandler(TPBaseHandler): class DoLoginHandler(TPBaseJsonHandler): def post(self): - sys_cfg = get_cfg().sys + sys_cfg = tp_cfg().sys args = self.get_argument('args', None) if args is None: diff --git a/server/www/teleport/webroot/app/controller/host.py b/server/www/teleport/webroot/app/controller/host.py index 4073296..e36edca 100644 --- a/server/www/teleport/webroot/app/controller/host.py +++ b/server/www/teleport/webroot/app/controller/host.py @@ -9,7 +9,7 @@ import ipaddress import tornado.gen import tornado.httpclient -from app.base.configs import get_cfg +from app.base.configs import tp_cfg from app.const import * from app.model import host from app.model import account @@ -171,7 +171,7 @@ class DoImportHandler(TPBaseHandler): csv_filename = '' try: - upload_path = os.path.join(get_cfg().data_path, 'tmp') # 文件的暂存路径 + upload_path = os.path.join(tp_cfg().data_path, 'tmp') # 文件的暂存路径 if not os.path.exists(upload_path): os.mkdir(upload_path) file_metas = self.request.files['csvfile'] # 提取表单中‘name’为‘file’的文件元数据 diff --git a/server/www/teleport/webroot/app/controller/maintenance.py b/server/www/teleport/webroot/app/controller/maintenance.py index ec27db1..44538e1 100644 --- a/server/www/teleport/webroot/app/controller/maintenance.py +++ b/server/www/teleport/webroot/app/controller/maintenance.py @@ -4,11 +4,11 @@ import json import threading from app.const import * -from app.base.configs import get_cfg +from app.base.configs import tp_cfg from app.base.controller import TPBaseHandler, TPBaseJsonHandler from app.base.db import get_db -cfg = get_cfg() +cfg = tp_cfg() class IndexHandler(TPBaseHandler): diff --git a/server/www/teleport/webroot/app/controller/ops.py b/server/www/teleport/webroot/app/controller/ops.py index 5063b34..fec6086 100644 --- a/server/www/teleport/webroot/app/controller/ops.py +++ b/server/www/teleport/webroot/app/controller/ops.py @@ -7,10 +7,10 @@ import time import tornado.gen import tornado.httpclient from app.base.logger import log -from app.base.configs import get_cfg +from app.base.configs import tp_cfg from app.base.controller import TPBaseHandler, TPBaseJsonHandler from app.base.core_server import * -from app.base.session import session_manager +from app.base.session import tp_session from app.const import * from app.model import account from app.model import host @@ -218,7 +218,7 @@ class DoGetSessionIDHandler(TPBaseJsonHandler): conn_id = tmp_conn_id_base log.v(conn_info) - session_manager().set('tmp-conn-info-{}'.format(conn_id), conn_info, 10) + tp_session().set('tmp-conn-info-{}'.format(conn_id), conn_info, 10) req = {'method': 'request_session', 'param': {'conn_id': conn_id}} _yr = core_service_async_post_http(req) @@ -236,11 +236,11 @@ class DoGetSessionIDHandler(TPBaseJsonHandler): data['host_ip'] = host_info['ip'] if conn_info['protocol_type'] == TP_PROTOCOL_TYPE_RDP: - data['teleport_port'] = get_cfg().core.rdp.port + data['teleport_port'] = tp_cfg().core.rdp.port elif conn_info['protocol_type'] == TP_PROTOCOL_TYPE_SSH: - data['teleport_port'] = get_cfg().core.ssh.port + data['teleport_port'] = tp_cfg().core.ssh.port elif conn_info['protocol_type'] == TP_PROTOCOL_TYPE_TELNET: - data['teleport_port'] = get_cfg().core.telnet.port + data['teleport_port'] = tp_cfg().core.telnet.port return self.write_json(0, data=data) diff --git a/server/www/teleport/webroot/app/controller/rpc.py b/server/www/teleport/webroot/app/controller/rpc.py index 7f7ab89..aece3cb 100644 --- a/server/www/teleport/webroot/app/controller/rpc.py +++ b/server/www/teleport/webroot/app/controller/rpc.py @@ -5,8 +5,8 @@ import urllib.parse import tornado.gen from app.const import * -from app.base.configs import get_cfg -from app.base.session import session_manager +from app.base.configs import tp_cfg +from app.base.session import tp_session from app.base.core_server import core_service_async_post_http from app.model import record from app.base.logger import * @@ -62,7 +62,7 @@ class RpcHandler(TPBaseJsonHandler): return self.write_json(TPE_PARAM) conn_id = param['conn_id'] - x = session_manager().taken('tmp-conn-info-{}'.format(conn_id), None) + x = tp_session().taken('tmp-conn-info-{}'.format(conn_id), None) return self.write_json(0, data=x) def _session_begin(self, param): @@ -120,7 +120,7 @@ class RpcHandler(TPBaseJsonHandler): if 'rpc' not in param: return self.write_json(-1, 'invalid param.') - get_cfg().common.core_server_rpc = param['rpc'] + tp_cfg().common.core_server_rpc = param['rpc'] # 获取core服务的配置信息 req = {'method': 'get_config', 'param': []} @@ -130,7 +130,7 @@ class RpcHandler(TPBaseJsonHandler): return self.write_json(code, 'get config from core-service failed.') log.d('update base server config info.\n') - get_cfg().update_core(ret_data) + tp_cfg().update_core(ret_data) return self.write_json(0) diff --git a/server/www/teleport/webroot/app/controller/system.py b/server/www/teleport/webroot/app/controller/system.py index 4601057..77585a5 100644 --- a/server/www/teleport/webroot/app/controller/system.py +++ b/server/www/teleport/webroot/app/controller/system.py @@ -8,7 +8,7 @@ import app.model.system as system_model import tornado.gen from app.app_ver import TP_SERVER_VER from app.base import mail -from app.base.configs import get_cfg +from app.base.configs import tp_cfg from app.base.controller import TPBaseHandler, TPBaseJsonHandler from app.base.logger import * from app.const import * @@ -16,6 +16,7 @@ from app.base.db import get_db from app.model import syslog from app.model import record from app.base.core_server import core_service_async_post_http +from app.base.session import tp_session class DoGetTimeHandler(TPBaseJsonHandler): @@ -31,7 +32,7 @@ class ConfigHandler(TPBaseHandler): if ret != TPE_OK: return - cfg = get_cfg() + cfg = tp_cfg() # core_detected = False req = {'method': 'get_config', 'param': []} @@ -42,11 +43,11 @@ class ConfigHandler(TPBaseHandler): else: cfg.update_core(ret_data) - if not get_cfg().core.detected: + if not tp_cfg().core.detected: total_size = 0 free_size = 0 else: - total_size, _, free_size = shutil.disk_usage(get_cfg().core.replay_path) + total_size, _, free_size = shutil.disk_usage(tp_cfg().core.replay_path) _db = get_db() db = {'type': _db.db_type} @@ -61,11 +62,11 @@ class ConfigHandler(TPBaseHandler): param = { 'total_size': total_size, 'free_size': free_size, - 'core_cfg': get_cfg().core, - 'sys_cfg': get_cfg().sys, + 'core_cfg': tp_cfg().core, + 'sys_cfg': tp_cfg().sys, 'web_cfg': { 'version': TP_SERVER_VER, - 'core_server_rpc': get_cfg().common.core_server_rpc, + 'core_server_rpc': tp_cfg().common.core_server_rpc, 'db': db } } @@ -238,12 +239,12 @@ class DoSaveCfgHandler(TPBaseJsonHandler): err = system_model.save_config(self, '更新SMTP设置', 'smtp', _cfg) if err == TPE_OK: # 同时更新内存缓存 - get_cfg().sys.smtp.server = _server - get_cfg().sys.smtp.port = _port - get_cfg().sys.smtp.ssl = _ssl - get_cfg().sys.smtp.sender = _sender + tp_cfg().sys.smtp.server = _server + tp_cfg().sys.smtp.port = _port + tp_cfg().sys.smtp.ssl = _ssl + tp_cfg().sys.smtp.sender = _sender # 特殊处理,防止前端拿到密码 - get_cfg().sys_smtp_password = _password + tp_cfg().sys_smtp_password = _password else: return self.write_json(err) @@ -254,9 +255,9 @@ class DoSaveCfgHandler(TPBaseJsonHandler): _timeout = _cfg['timeout'] err = system_model.save_config(self, '更新密码策略设置', 'password', _cfg) if err == TPE_OK: - get_cfg().sys.password.allow_reset = _allow_reset - get_cfg().sys.password.force_strong = _force_strong - get_cfg().sys.password.timeout = _timeout + tp_cfg().sys.password.allow_reset = _allow_reset + tp_cfg().sys.password.force_strong = _force_strong + tp_cfg().sys.password.timeout = _timeout else: return self.write_json(err) @@ -268,10 +269,11 @@ class DoSaveCfgHandler(TPBaseJsonHandler): _auth = _cfg['auth'] err = system_model.save_config(self, '更新登录策略设置', 'login', _cfg) if err == TPE_OK: - get_cfg().sys.login.session_timeout = _session_timeout - get_cfg().sys.login.retry = _retry - get_cfg().sys.login.lock_timeout = _lock_timeout - get_cfg().sys.login.auth = _auth + tp_cfg().sys.login.session_timeout = _session_timeout + tp_cfg().sys.login.retry = _retry + tp_cfg().sys.login.lock_timeout = _lock_timeout + tp_cfg().sys.login.auth = _auth + tp_session().update_default_expire() else: return self.write_json(err) @@ -283,10 +285,10 @@ class DoSaveCfgHandler(TPBaseJsonHandler): _cleanup_minute = _cfg['cleanup_minute'] err = system_model.save_config(self, '更新存储策略设置', 'storage', _cfg) if err == TPE_OK: - get_cfg().sys.storage.keep_log = _keep_log - get_cfg().sys.storage.keep_record = _keep_record - get_cfg().sys.storage.cleanup_hour = _cleanup_hour - get_cfg().sys.storage.cleanup_minute = _cleanup_minute + tp_cfg().sys.storage.keep_log = _keep_log + tp_cfg().sys.storage.keep_record = _keep_record + tp_cfg().sys.storage.cleanup_hour = _cleanup_hour + tp_cfg().sys.storage.cleanup_minute = _cleanup_minute else: return self.write_json(err) diff --git a/server/www/teleport/webroot/app/controller/user.py b/server/www/teleport/webroot/app/controller/user.py index 8a26fa1..445eaf1 100644 --- a/server/www/teleport/webroot/app/controller/user.py +++ b/server/www/teleport/webroot/app/controller/user.py @@ -7,10 +7,10 @@ import time import tornado.gen from app.base import mail -from app.base.configs import get_cfg +from app.base.configs import tp_cfg from app.base.controller import TPBaseHandler, TPBaseJsonHandler from app.base.logger import * -from app.base.session import session_manager +from app.base.session import tp_session from app.base.utils import tp_check_strong_password from app.base.utils import tp_timestamp_utc_now from app.logic.auth.oath import tp_oath_verify_code @@ -30,14 +30,14 @@ class UserListHandler(TPBaseHandler): return is_sys_smtp = False - if get_cfg().sys.loaded: - smtp = get_cfg().sys.smtp + if tp_cfg().sys.loaded: + smtp = tp_cfg().sys.smtp if len(smtp.server) > 0: is_sys_smtp = True param = { 'sys_smtp': is_sys_smtp, - 'sys_cfg': get_cfg().sys + 'sys_cfg': tp_cfg().sys } self.render('user/user-list.mako', page_param=json.dumps(param)) @@ -92,10 +92,10 @@ class ResetPasswordHandler(TPBaseHandler): _token = self.get_argument('token', None) if _token is None: # 如果尚未设置SMTP或者系统限制,不允许发送密码重置邮件 - if len(get_cfg().sys.smtp.server) == 0: + if len(tp_cfg().sys.smtp.server) == 0: param['mode'] = 2 # mode=2, show 'error' page param['code'] = TPE_NETWORK - elif not get_cfg().sys.password.allow_reset: + elif not tp_cfg().sys.password.allow_reset: param['mode'] = 2 # mode=2, show 'error' page param['code'] = TPE_PRIVILEGE else: @@ -110,7 +110,7 @@ class ResetPasswordHandler(TPBaseHandler): param['mode'] = 2 # mode=2, show 'error' page else: param['mode'] = 3 # mode=3, show 'set-new-password' page - param['force_strong'] = get_cfg().sys.password.force_strong + param['force_strong'] = tp_cfg().sys.password.force_strong self.render('user/reset-password.mako', page_param=json.dumps(param)) @@ -323,7 +323,7 @@ class DoImportHandler(TPBaseHandler): csv_filename = '' try: - upload_path = os.path.join(get_cfg().data_path, 'tmp') # 文件的暂存路径 + upload_path = os.path.join(tp_cfg().data_path, 'tmp') # 文件的暂存路径 if not os.path.exists(upload_path): os.mkdir(upload_path) file_metas = self.request.files['csvfile'] # 提取表单中‘name’为‘file’的文件元数据 @@ -675,7 +675,7 @@ class DoResetPasswordHandler(TPBaseJsonHandler): return self.write_json(TPE_PARAM) # 根据需要进行弱密码检测 - if get_cfg().sys.password.force_strong: + if tp_cfg().sys.password.force_strong: if not tp_check_strong_password(password): return self.write_json(TPE_FAILED, '密码强度太弱!强密码需要至少8个英文字符,必须包含大写字母、小写字母和数字。') @@ -721,10 +721,10 @@ class DoUpdateUsersHandler(TPBaseJsonHandler): return self.write_json(err) if action == 'lock' or action == 'remove': - v = session_manager().get_start_with('user-') + v = tp_session().get_start_with('user-') for k in v: if v[k]['v']['id'] in users: - session_manager().taken(k) + tp_session().taken(k) self.write_json(err) diff --git a/server/www/teleport/webroot/app/controller/ws.py b/server/www/teleport/webroot/app/controller/ws.py new file mode 100644 index 0000000..7ae7b8f --- /dev/null +++ b/server/www/teleport/webroot/app/controller/ws.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- + +import json +# import urllib.parse +import threading + +# import tornado.gen +from app.const import * +# from app.base.configs import tp_cfg +from app.base.session import tp_session +# from app.base.core_server import core_service_async_post_http +# from app.model import record +from app.base.logger import * +# from app.base.controller import TPBaseJsonHandler +import tornado.websocket + + +class TPWebSocketServer(object): + _clients = {} + _lock = threading.RLock() + + def __init__(self): + super().__init__() + + import builtins + if '__tp_websocket_server__' in builtins.__dict__: + raise RuntimeError('TPWebSocketServer object exists, you can not create more than one instance.') + + def have_callbacker(self, callbacker): + return callbacker in self._clients + + def register(self, callbacker): + # 记录客户端连接实例 + with self._lock: + if not self.have_callbacker(callbacker): + self._clients[callbacker] = {'subscribe': []} + + def unregister(self, callbacker): + with self._lock: + # 删除客户端连接实例 + try: + del self._clients[callbacker] + except: + # print('when unregister, not exists.') + pass + + def on_message(self, callbacker, message): + print('got message', message) + try: + req = json.loads(message) + except: + log.e('need json-format request.\n') + return + + if req['method'] == 'subscribe': + for p in req['params']: + if p not in self._clients[callbacker]['subscribe']: + self._clients[callbacker]['subscribe'].append(p) + + def send_message(self, subscribe, message): + msg = {'subscribe': subscribe, 'data': message} + s = json.dumps(msg, separators=(',', ':')) + with self._lock: + for c in self._clients: + + if subscribe in self._clients[c]['subscribe']: + c.write_message(s) + + # def response(self, _id, data): + # # print('send to client:', url, data) + # for callbacker in self.clients: + # if self.clients[callbacker].get_id() == _id: + # print('[ws] response', _id, data) + # callbacker.write_message(data) + # return + # print('## [ws] response no client.', _id) + + +def tp_wss(): + """ + 取得 TPWebSocketServer 管理器的唯一实例 + + :rtype : TPWebSocketServer + """ + + import builtins + if '__tp_websocket_server__' not in builtins.__dict__: + builtins.__dict__['__tp_websocket_server__'] = TPWebSocketServer() + return builtins.__dict__['__tp_websocket_server__'] + + +class WebSocketHandler(tornado.websocket.WebSocketHandler): + def check_origin(self, origin): # 针对websocket处理类重写同源检查的方法 + # print('check_origin: ', origin) + return True + + # 接受websocket链接,保存链接实例 + def open(self, sid): + # 处理新的连接 + k = '{}-{}'.format('user', sid) + _user = tp_session().get(k, None) + print(_user) + if _user is None: + ret = {'code': TPE_NEED_LOGIN, 'message': '需要登录'} + self.write_message(json.dumps(ret)) + return + + tp_wss().register(self) + + def on_close(self): + if not tp_wss().have_callbacker(self): + return + tp_wss().unregister(self) # 删除客户端连接 + + def on_message(self, message): + if not tp_wss().have_callbacker(self): + ret = {'code': TPE_NEED_LOGIN, 'message': '未曾成功连接'} + self.write_message(json.dumps(ret)) + return + tp_wss().on_message(self, message) + diff --git a/server/www/teleport/webroot/app/logic/auth/captcha.py b/server/www/teleport/webroot/app/logic/auth/captcha.py index 0a8f6f6..d4ebbd5 100644 --- a/server/www/teleport/webroot/app/logic/auth/captcha.py +++ b/server/www/teleport/webroot/app/logic/auth/captcha.py @@ -4,7 +4,7 @@ import io import os import random -from app.base.configs import get_cfg +from app.base.configs import tp_cfg from wheezy.captcha.image import background from wheezy.captcha.image import captcha from wheezy.captcha.image import curve @@ -32,7 +32,7 @@ def tp_captcha_generate_image(h): noise(number=80, color='#eeeeee', level=3), smooth(), text(fonts=[ - os.path.join(get_cfg().res_path, 'fonts', '001.ttf') + os.path.join(tp_cfg().res_path, 'fonts', '001.ttf') ], # font_sizes=(28, 34, 36, 32), font_sizes=(h-4, h-2, h, h+1), @@ -61,7 +61,7 @@ def tp_captcha_generate_image(h): noise(number=40, color='#eeeeee', level=2), smooth(), text(fonts=[ - os.path.join(get_cfg().res_path, 'fonts', '001.ttf') + os.path.join(tp_cfg().res_path, 'fonts', '001.ttf') ], # font_sizes=(28, 34, 36, 32), font_sizes=(h-2, h-1, h, h+1), diff --git a/server/www/teleport/webroot/app/model/record.py b/server/www/teleport/webroot/app/model/record.py index 8a882e7..18bfb3f 100644 --- a/server/www/teleport/webroot/app/model/record.py +++ b/server/www/teleport/webroot/app/model/record.py @@ -7,7 +7,7 @@ import struct import base64 from app.const import * -from app.base.configs import get_cfg +from app.base.configs import tp_cfg from app.base.db import get_db, SQL from app.base.logger import log from app.base.utils import tp_timestamp_utc_now @@ -71,10 +71,10 @@ def get_records(sql_filter, sql_order, sql_limit, sql_restrict, sql_exclude): def read_record_head(record_id): - if not get_cfg().core.detected: + if not tp_cfg().core.detected: return None, TPE_NO_CORE_SERVER - record_path = os.path.join(get_cfg().core.replay_path, 'ssh', '{:09d}'.format(int(record_id))) + record_path = os.path.join(tp_cfg().core.replay_path, 'ssh', '{:09d}'.format(int(record_id))) header_file_path = os.path.join(record_path, 'tp-ssh.tpr') if not os.path.exists(header_file_path): @@ -154,11 +154,11 @@ def read_record_head(record_id): def read_record_data(record_id, offset): - if not get_cfg().core.detected: + if not tp_cfg().core.detected: return None, TPE_NO_CORE_SERVER # read 1000 packages one time from offset. - record_path = os.path.join(get_cfg().core.replay_path, 'ssh', '{:09d}'.format(int(record_id))) + record_path = os.path.join(tp_cfg().core.replay_path, 'ssh', '{:09d}'.format(int(record_id))) file_data = os.path.join(record_path, 'tp-ssh.dat') if not os.path.exists(file_data): @@ -245,10 +245,10 @@ def delete_log(log_list): for item in log_list: log_id = int(item) try: - record_path = os.path.join(get_cfg().core.replay_path, 'ssh', '{:06d}'.format(log_id)) + record_path = os.path.join(tp_cfg().core.replay_path, 'ssh', '{:06d}'.format(log_id)) if os.path.exists(record_path): shutil.rmtree(record_path) - record_path = os.path.join(get_cfg().core.replay_path, 'rdp', '{:06d}'.format(log_id)) + record_path = os.path.join(tp_cfg().core.replay_path, 'rdp', '{:06d}'.format(log_id)) if os.path.exists(record_path): shutil.rmtree(record_path) except Exception: @@ -308,7 +308,7 @@ def session_end(record_id, ret_code): @tornado.gen.coroutine def cleanup_storage(handler): # storage config - sto = get_cfg().sys.storage + sto = tp_cfg().sys.storage db = get_db() _now = tp_timestamp_utc_now() @@ -341,7 +341,7 @@ def cleanup_storage(handler): msg.append('{} 条系统日志已清除!'.format(removed_log)) if sto.keep_record > 0: - core_cfg = get_cfg().core + core_cfg = tp_cfg().core if not core_cfg.detected: have_error = True msg.append('清除指定会话录像失败:未能检测到核心服务!') diff --git a/server/www/teleport/webroot/app/model/user.py b/server/www/teleport/webroot/app/model/user.py index 65e89c9..6c3becb 100644 --- a/server/www/teleport/webroot/app/model/user.py +++ b/server/www/teleport/webroot/app/model/user.py @@ -2,7 +2,7 @@ # import hashlib -from app.base.configs import get_cfg +from app.base.configs import tp_cfg from app.base.db import get_db, SQL from app.base.logger import log from app.base.utils import tp_timestamp_utc_now, tp_generate_random @@ -49,7 +49,7 @@ def get_by_username(username): def login(handler, username, password=None, oath_code=None): - sys_cfg = get_cfg().sys + sys_cfg = tp_cfg().sys err, user_info = get_by_username(username) if err != TPE_OK: @@ -456,7 +456,7 @@ def update_users_state(handler, user_ids, state): def update_fail_count(handler, user_info): db = get_db() - sys_cfg = get_cfg().sys + sys_cfg = tp_cfg().sys sql_list = [] is_locked = False fail_count = user_info.fail_count + 1