mirror of https://github.com/fail2ban/fail2ban
Merge branch 'master' into debian
commit
2ba9fee79f
|
@ -0,0 +1,12 @@
|
||||||
|
[codespell]
|
||||||
|
# THANKS - names
|
||||||
|
skip = .git,*.pdf,*.svg,venv,.codespellrc,THANKS,*test*.log,logs
|
||||||
|
check-hidden = true
|
||||||
|
# Ignore all acronyms etc as plenty e.g. in fail2ban/server/strptime.py
|
||||||
|
# Try to identify incomplete words which are part of a regex, hence having [] at the beginning
|
||||||
|
# Ignore all urls as something with :// in it
|
||||||
|
# Ignore all lines with codespell-ignore in them for pragma annotation
|
||||||
|
ignore-regex = (\b([A-Z][A-Z][A-Z]+|gir\.st)\b)|\[[a-zA-Z]+\][a-z]+\b|[a-z]+://\S+|.*codespell-ignore.*
|
||||||
|
# some oddly named variables, some names, etc
|
||||||
|
# wee -- comes in regex etc for weeks
|
||||||
|
ignore-words-list = theis,timere,alls,wee,wight,ans,re-use
|
|
@ -0,0 +1,22 @@
|
||||||
|
---
|
||||||
|
name: Codespell
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [master]
|
||||||
|
pull_request:
|
||||||
|
branches: [master]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
codespell:
|
||||||
|
name: Check for spelling errors
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
- name: Codespell
|
||||||
|
uses: codespell-project/actions-codespell@v2
|
|
@ -22,15 +22,15 @@ jobs:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
python-version: [2.7, 3.5, 3.6, 3.7, 3.8, 3.9, '3.10', '3.11.0-beta.3', pypy2, pypy3]
|
python-version: [3.7, 3.8, 3.9, '3.10', '3.11', '3.12', '3.13.0-alpha.2', pypy3.10]
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||||
steps:
|
steps:
|
||||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
|
@ -51,24 +51,32 @@ jobs:
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
if [[ "$F2B_PY" = 3 ]]; then python -m pip install --upgrade pip || echo "can't upgrade pip"; fi
|
#if [[ "$F2B_PY" = 3 ]]; then python -m pip install --upgrade pip || echo "can't upgrade pip"; fi
|
||||||
if [[ "$F2B_PY" = 3 ]] && ! command -v 2to3x -v 2to3 > /dev/null; then
|
|
||||||
#pip install 2to3
|
|
||||||
sudo apt-get -y install 2to3
|
|
||||||
fi
|
|
||||||
#sudo apt-get -y install python${F2B_PY/2/}-pyinotify || echo 'inotify not available'
|
#sudo apt-get -y install python${F2B_PY/2/}-pyinotify || echo 'inotify not available'
|
||||||
python -m pip install pyinotify || echo 'inotify not available'
|
python -m pip install pyinotify || echo 'inotify not available'
|
||||||
|
sudo apt-get -y install sqlite3 || echo 'sqlite3 not available'
|
||||||
#sudo apt-get -y install python${F2B_PY/2/}-systemd || echo 'systemd not available'
|
#sudo apt-get -y install python${F2B_PY/2/}-systemd || echo 'systemd not available'
|
||||||
sudo apt-get -y install libsystemd-dev || echo 'systemd dependencies seems to be unavailable'
|
sudo apt-get -y install libsystemd-dev || echo 'systemd dependencies seems to be unavailable'
|
||||||
python -m pip install systemd-python || echo 'systemd not available'
|
python -m pip install systemd-python || echo 'systemd not available'
|
||||||
#readline if available as module:
|
# readline if available as module:
|
||||||
python -c 'import readline' 2> /dev/null || python -m pip install readline || echo 'readline not available'
|
python -c 'import readline' 2> /dev/null || python -m pip install readline || echo 'readline not available'
|
||||||
|
# asyncore/asynchat:
|
||||||
|
if dpkg --compare-versions "$F2B_PYV" ge 3.12; then
|
||||||
|
#sudo apt-get -y install python${F2B_PY/2/}-setuptools || echo 'setuptools not unavailable'
|
||||||
|
python -m pip install setuptools || echo "can't install setuptools"
|
||||||
|
# don't install async* modules, we need to cover bundled-in libraries:
|
||||||
|
#python -m pip install pyasynchat || echo "can't install pyasynchat";
|
||||||
|
#python -m pip install pyasyncore || echo "can't install pyasyncore";
|
||||||
|
fi
|
||||||
|
# aiosmtpd in test_smtp (for 3.10+, no need to test it everywhere):
|
||||||
|
if dpkg --compare-versions "$F2B_PYV" ge 3.10; then
|
||||||
|
#sudo apt-get -y install python${F2B_PY/2/}-aiosmtpd || echo 'aiosmtpd not available'
|
||||||
|
python -m pip install aiosmtpd || echo 'aiosmtpd not available'
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Before scripts
|
- name: Before scripts
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE"
|
cd "$GITHUB_WORKSPACE"
|
||||||
# Manually execute 2to3 for now
|
|
||||||
if [[ "$F2B_PY" = 3 ]]; then echo "2to3 ..." && ./fail2ban-2to3; fi
|
|
||||||
_debug() { echo -n "$1 "; err=$("${@:2}" 2>&1) && echo 'OK' || echo -e "FAIL\n$err"; }
|
_debug() { echo -n "$1 "; err=$("${@:2}" 2>&1) && echo 'OK' || echo -e "FAIL\n$err"; }
|
||||||
# (debug) output current preferred encoding:
|
# (debug) output current preferred encoding:
|
||||||
_debug 'Encodings:' python -c 'import locale, sys; from fail2ban.helpers import PREFER_ENC; print(PREFER_ENC, locale.getpreferredencoding(), (sys.stdout and sys.stdout.encoding))'
|
_debug 'Encodings:' python -c 'import locale, sys; from fail2ban.helpers import PREFER_ENC; print(PREFER_ENC, locale.getpreferredencoding(), (sys.stdout and sys.stdout.encoding))'
|
||||||
|
@ -80,14 +88,8 @@ jobs:
|
||||||
|
|
||||||
- name: Test suite
|
- name: Test suite
|
||||||
run: |
|
run: |
|
||||||
if [[ "$F2B_PY" = 2 ]]; then
|
#python setup.py test
|
||||||
python setup.py test
|
|
||||||
elif dpkg --compare-versions "$F2B_PYV" lt 3.10; then
|
|
||||||
python bin/fail2ban-testcases --verbosity=2
|
python bin/fail2ban-testcases --verbosity=2
|
||||||
else
|
|
||||||
echo "Skip systemd backend since systemd-python module must be fixed for python >= v.3.10 in GHA ..."
|
|
||||||
python bin/fail2ban-testcases --verbosity=2 -i "[sS]ystemd|[jJ]ournal"
|
|
||||||
fi
|
|
||||||
|
|
||||||
#- name: Test suite (debug some systemd tests only)
|
#- name: Test suite (debug some systemd tests only)
|
||||||
#run: python bin/fail2ban-testcases --verbosity=2 "[sS]ystemd|[jJ]ournal"
|
#run: python bin/fail2ban-testcases --verbosity=2 "[sS]ystemd|[jJ]ournal"
|
||||||
|
|
|
@ -10,3 +10,4 @@ htmlcov
|
||||||
__pycache__
|
__pycache__
|
||||||
.vagrant/
|
.vagrant/
|
||||||
.idea/
|
.idea/
|
||||||
|
.venv/
|
||||||
|
|
17
.project
17
.project
|
@ -1,17 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<projectDescription>
|
|
||||||
<name>fail2ban-unstable</name>
|
|
||||||
<comment></comment>
|
|
||||||
<projects>
|
|
||||||
</projects>
|
|
||||||
<buildSpec>
|
|
||||||
<buildCommand>
|
|
||||||
<name>org.python.pydev.PyDevBuilder</name>
|
|
||||||
<arguments>
|
|
||||||
</arguments>
|
|
||||||
</buildCommand>
|
|
||||||
</buildSpec>
|
|
||||||
<natures>
|
|
||||||
<nature>org.python.pydev.pythonNature</nature>
|
|
||||||
</natures>
|
|
||||||
</projectDescription>
|
|
82
.travis.yml
82
.travis.yml
|
@ -1,82 +0,0 @@
|
||||||
# vim ft=yaml
|
|
||||||
# travis-ci.org definition for Fail2Ban build
|
|
||||||
# https://travis-ci.org/fail2ban/fail2ban/
|
|
||||||
|
|
||||||
#os: linux
|
|
||||||
|
|
||||||
language: python
|
|
||||||
dist: xenial
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
fast_finish: true
|
|
||||||
include:
|
|
||||||
- python: 2.7
|
|
||||||
#- python: pypy
|
|
||||||
- python: 3.4
|
|
||||||
- python: 3.5
|
|
||||||
- python: 3.6
|
|
||||||
- python: 3.7
|
|
||||||
- python: 3.8
|
|
||||||
- python: 3.9-dev
|
|
||||||
- python: pypy3.5
|
|
||||||
before_install:
|
|
||||||
- echo "running under $TRAVIS_PYTHON_VERSION"
|
|
||||||
- if [[ $TRAVIS_PYTHON_VERSION == 2* || $TRAVIS_PYTHON_VERSION == pypy* && $TRAVIS_PYTHON_VERSION != pypy3* ]]; then export F2B_PY=2; fi
|
|
||||||
- if [[ $TRAVIS_PYTHON_VERSION == 3* || $TRAVIS_PYTHON_VERSION == pypy3* ]]; then export F2B_PY=3; fi
|
|
||||||
- echo "Set F2B_PY=$F2B_PY"
|
|
||||||
- travis_retry sudo apt-get update -qq
|
|
||||||
# Set this so sudo executes the correct python binary
|
|
||||||
# Anything not using sudo will already have the correct environment
|
|
||||||
- export VENV_BIN="$VIRTUAL_ENV/bin" && echo "VENV_BIN set to $VENV_BIN"
|
|
||||||
install:
|
|
||||||
# Install Python packages / dependencies
|
|
||||||
# coverage
|
|
||||||
- travis_retry pip install coverage
|
|
||||||
# coveralls (note coveralls doesn't support 2.6 now):
|
|
||||||
#- if [[ $TRAVIS_PYTHON_VERSION != 2.6* ]]; then F2B_COV=1; else F2B_COV=0; fi
|
|
||||||
- F2B_COV=1
|
|
||||||
- if [[ "$F2B_COV" = 1 ]]; then travis_retry pip install coveralls; fi
|
|
||||||
# codecov:
|
|
||||||
- travis_retry pip install codecov
|
|
||||||
# dnspython or dnspython3
|
|
||||||
- if [[ "$F2B_PY" = 2 ]]; then travis_retry pip install dnspython || echo 'not installed'; fi
|
|
||||||
- if [[ "$F2B_PY" = 3 ]]; then travis_retry pip install dnspython3 || echo 'not installed'; fi
|
|
||||||
# python systemd bindings:
|
|
||||||
- if [[ "$F2B_PY" = 2 ]]; then travis_retry sudo apt-get install -qq python-systemd || echo 'not installed'; fi
|
|
||||||
- if [[ "$F2B_PY" = 3 ]]; then travis_retry sudo apt-get install -qq python3-systemd || echo 'not installed'; fi
|
|
||||||
# gamin - install manually (not in PyPI) - travis-ci system Python is 2.7
|
|
||||||
- if [[ $TRAVIS_PYTHON_VERSION == 2.7 ]]; then (travis_retry sudo apt-get install -qq python-gamin && cp /usr/share/pyshared/gamin.py /usr/lib/pyshared/python2.7/_gamin.so $VIRTUAL_ENV/lib/python2.7/site-packages/) || echo 'not installed'; fi
|
|
||||||
# pyinotify
|
|
||||||
- travis_retry pip install pyinotify || echo 'not installed'
|
|
||||||
# Install helper tools
|
|
||||||
- sudo apt-get install shellcheck
|
|
||||||
before_script:
|
|
||||||
# Manually execute 2to3 for now
|
|
||||||
- if [[ "$F2B_PY" = 3 ]]; then ./fail2ban-2to3; fi
|
|
||||||
# (debug) output current preferred encoding:
|
|
||||||
- python -c 'import locale, sys; from fail2ban.helpers import PREFER_ENC; print(PREFER_ENC, locale.getpreferredencoding(), (sys.stdout and sys.stdout.encoding))'
|
|
||||||
script:
|
|
||||||
# Keep the legacy setup.py test approach of checking coverage for python2
|
|
||||||
- if [[ "$F2B_PY" = 2 ]]; then coverage run setup.py test; fi
|
|
||||||
# Coverage doesn't pick up setup.py test with python3, so run it directly (with same verbosity as from setup)
|
|
||||||
- if [[ "$F2B_PY" = 3 ]]; then coverage run bin/fail2ban-testcases --verbosity=2; fi
|
|
||||||
# Use $VENV_BIN (not python) or else sudo will always run the system's python (2.7)
|
|
||||||
- sudo $VENV_BIN/pip install .
|
|
||||||
# Doc files should get installed on Travis under Linux (some builds/python's seem to use another path segment)
|
|
||||||
- test -e /usr/share/doc/fail2ban/FILTERS && echo 'found' || echo 'not found'
|
|
||||||
# Test initd script
|
|
||||||
- shellcheck -s bash -e SC1090,SC1091 files/debian-initd
|
|
||||||
after_success:
|
|
||||||
- if [[ "$F2B_COV" = 1 ]]; then coveralls; fi
|
|
||||||
- codecov
|
|
||||||
|
|
||||||
# Might be worth looking into
|
|
||||||
#notifications:
|
|
||||||
# email: true
|
|
||||||
# irc:
|
|
||||||
# channels: "irc.freenode.org#fail2ban"
|
|
||||||
# template:
|
|
||||||
# - "%{repository}@%{branch}: %{message} (%{build_url})"
|
|
||||||
# on_success: change
|
|
||||||
# on_failure: change
|
|
||||||
# skip_join: true
|
|
89
ChangeLog
89
ChangeLog
|
@ -7,6 +7,71 @@
|
||||||
Fail2Ban: Changelog
|
Fail2Ban: Changelog
|
||||||
===================
|
===================
|
||||||
|
|
||||||
|
ver. 1.1.0 (2024/04/25) - object-found--norad-59479-cospar-2024-069a--altitude-36267km
|
||||||
|
-----------
|
||||||
|
|
||||||
|
### Compatibility
|
||||||
|
* the minimum supported python version is now 3.5, if you have previous python version
|
||||||
|
you can use the 0.11 or 1.0 version of fail2ban or upgrade python (or even build it from source).
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
* circumvent SEGFAULT in a python's socket module by getaddrinfo with disabled IPv6 (gh-3438)
|
||||||
|
* avoid sporadic error in pyinotify backend if pending file deleted in other thread, e. g. by flushing logs (gh-3635)
|
||||||
|
* `action.d/cloudflare-token.conf` - fixes gh-3479, url-encode args by unban
|
||||||
|
* `action.d/*ipset*`: make `maxelem` ipset option configurable through banaction arguments (gh-3564)
|
||||||
|
* `filter.d/apache-common.conf` - accepts remote besides client (gh-3622)
|
||||||
|
* `filter.d/mysqld-auth.conf` - matches also if no suffix in message (mariadb 10.3 log format, gh-3603)
|
||||||
|
* `filter.d/nginx-*.conf` - nginx error-log filters extended with support of journal format (gh-3646)
|
||||||
|
* `filter.d/postfix.conf`:
|
||||||
|
- "rejected" rule extended to match "Access denied" too (gh-3474)
|
||||||
|
- avoid double counting ('lost connection after AUTH' together with message 'disconnect ...', gh-3505)
|
||||||
|
- add Sender address rejected: Malformed DNS server reply (gh-3590)
|
||||||
|
- add to postfix syslog daemon format (gh-3690)
|
||||||
|
- change journalmatch postfix, allow sub-units with postfix@-.service (gh-3692)
|
||||||
|
* `filter.d/recidive.conf`: support for systemd-journal, conditional RE depending on logtype (for file or journal, gh-3693)
|
||||||
|
* `filter.d/slapd.conf` - filter rewritten for single-line processing, matches errored result without `text=...` (gh-3604)
|
||||||
|
|
||||||
|
### New Features and Enhancements
|
||||||
|
* supports python 3.12 and 3.13 (gh-3487)
|
||||||
|
* bundling async modules removed in python 3.12+ (fallback to local libraries pyasyncore/pyasynchat if import would miss them, gh-3487)
|
||||||
|
* `fail2ban-client` extended (gh-2975):
|
||||||
|
- `fail2ban-client status --all [flavor]` - returns status of fail2ban and all jails in usual form
|
||||||
|
- `fail2ban-client stats` - returns statistic in form of table (jail, backend, found and banned counts)
|
||||||
|
- `fail2ban-client statistic` or `fail2ban-client statistics` - same as `fail2ban-client stats` (aliases for stats)
|
||||||
|
- `fail2ban-client status --all stats` - (undocumented, flavor "stats") returns statistic of all jails in form of python dict
|
||||||
|
* `fail2ban-regex` extended to load settings from jail (by simple name it'd prefer jail to the filter now, gh-2655);
|
||||||
|
to load the settings from filter one could use:
|
||||||
|
```diff
|
||||||
|
- fail2ban-regex ... sshd ; # jail
|
||||||
|
+ fail2ban-regex ... sshd.conf ; # filter
|
||||||
|
# or:
|
||||||
|
+ fail2ban-regex ... filter.d/sshd ; # filter
|
||||||
|
```
|
||||||
|
* better auto-detection for IPv6 support (`allowipv6 = auto` by default), trying to check sysctl net.ipv6.conf.all.disable_ipv6
|
||||||
|
(value read from `/proc/sys/net/ipv6/conf/all/disable_ipv6`) if available, otherwise seeks over local IPv6 from network interfaces
|
||||||
|
if available for platform and uses DNS to find local IPv6 as a fallback only
|
||||||
|
* improve `ignoreself` by considering all local addresses from network interfaces additionally to IPs from hostnames (gh-3132)
|
||||||
|
* `action.d/mikrotik.conf` - new action for mikrotik routerOS, adds and removes entries from address lists on the router (gh-2860)
|
||||||
|
* `action.d/pf.conf` - pf action extended with support of `protocol=all` (gh-3503)
|
||||||
|
* `action.d/smtp.py` - added optional support for TLS connections via the `ssl` arg.
|
||||||
|
* `filter.d/dante.conf` - new filter for Dante SOCKS server (gh-2112)
|
||||||
|
* `filter.d/exim.conf`, `filter.d/exim-spam.conf`:
|
||||||
|
- messages are prefiltered by `prefregex` now
|
||||||
|
- filter can bypass additional timestamp or pid that may be logged via systemd-journal or syslog-ng (gh-3060)
|
||||||
|
- rewrite host line regex for all varied exim's log_selector states (gh-3263, gh-3701, gh-3702)
|
||||||
|
- fixed "dropped: too many ..." regex, also matching unrecognized commands now (gh-3502)
|
||||||
|
* `filter.d/named-refused.conf` - denied allows any reason in parenthesis as suffix (gh-3697)
|
||||||
|
* `filter.d/nginx-forbidden.conf` - new filter to ban forbidden locations, e. g. using `deny` directive (gh-2226)
|
||||||
|
* `filter.d/routeros-auth.conf` - new filter detecting failed login attempts in the log produced by MikroTik RouterOS
|
||||||
|
* `filter.d/sshd.conf`:
|
||||||
|
- avoid double counting for "maximum authentication attempts exceeded" (gh-3502)
|
||||||
|
- message "Disconnecting ... Too many authentication failures" is not a failure anymore
|
||||||
|
- mode `ddos`/`aggressive` extended to match new messages caused by port scanner, wrong payload on ssh port (gh-3486):
|
||||||
|
* message authentication code incorrect [preauth]
|
||||||
|
* connection corrupted [preauth]
|
||||||
|
* timeout before authentication
|
||||||
|
|
||||||
|
|
||||||
ver. 1.0.2 (2022/11/09) - finally-war-game-test-tape-not-a-nuclear-alarm
|
ver. 1.0.2 (2022/11/09) - finally-war-game-test-tape-not-a-nuclear-alarm
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
|
@ -53,7 +118,7 @@ ver. 1.0.1 (2022/09/27) - energy-equals-mass-times-the-speed-of-light-squared
|
||||||
* [stability] solves race condition with uncontrolled growth of failure list (jail with too many matches,
|
* [stability] solves race condition with uncontrolled growth of failure list (jail with too many matches,
|
||||||
that did not cause ban), behavior changed to ban ASAP, gh-2945
|
that did not cause ban), behavior changed to ban ASAP, gh-2945
|
||||||
* fixes search for the best datepattern - e. g. if line is too short, boundaries check for previously known
|
* fixes search for the best datepattern - e. g. if line is too short, boundaries check for previously known
|
||||||
unprecise pattern may fail on incomplete lines (logging break-off, no flush, etc), gh-3020
|
imprecise pattern may fail on incomplete lines (logging break-off, no flush, etc), gh-3020
|
||||||
* [stability, performance] backend `systemd`:
|
* [stability, performance] backend `systemd`:
|
||||||
- fixes error "local variable 'line' referenced before assignment", introduced in 55d7d9e2, gh-3097
|
- fixes error "local variable 'line' referenced before assignment", introduced in 55d7d9e2, gh-3097
|
||||||
- don't update database too often (every 10 ticks or ~ 10 seconds in production)
|
- don't update database too often (every 10 ticks or ~ 10 seconds in production)
|
||||||
|
@ -391,7 +456,7 @@ filter = flt[logtype=short]
|
||||||
* `filter.d/znc-adminlog.conf`: new filter for ZNC (IRC bouncer); requires the adminlog module to be loaded
|
* `filter.d/znc-adminlog.conf`: new filter for ZNC (IRC bouncer); requires the adminlog module to be loaded
|
||||||
|
|
||||||
### Enhancements
|
### Enhancements
|
||||||
* introduced new options: `dbmaxmatches` (fail2ban.conf) and `maxmatches` (jail.conf) to contol
|
* introduced new options: `dbmaxmatches` (fail2ban.conf) and `maxmatches` (jail.conf) to control
|
||||||
how many matches per ticket fail2ban can hold in memory and store in database (gh-2402, gh-2118);
|
how many matches per ticket fail2ban can hold in memory and store in database (gh-2402, gh-2118);
|
||||||
* fail2ban.conf: introduced new section `[Thread]` and option `stacksize` to configure default size
|
* fail2ban.conf: introduced new section `[Thread]` and option `stacksize` to configure default size
|
||||||
of the stack for threads running in fail2ban (gh-2356), it could be set in `fail2ban.local` to
|
of the stack for threads running in fail2ban (gh-2356), it could be set in `fail2ban.local` to
|
||||||
|
@ -501,7 +566,7 @@ ver. 0.10.3 (2018/04/04) - the-time-is-always-right-to-do-what-is-right
|
||||||
- fixed root login refused regex (optional port before preauth, gh-2080);
|
- fixed root login refused regex (optional port before preauth, gh-2080);
|
||||||
- avoid banning of legitimate users when pam_unix used in combination with other password method, so
|
- avoid banning of legitimate users when pam_unix used in combination with other password method, so
|
||||||
bypass pam_unix failures if accepted available for this user gh-2070;
|
bypass pam_unix failures if accepted available for this user gh-2070;
|
||||||
- amend to gh-1263 with better handling of multiple attempts (failures for different user-names recognized immediatelly);
|
- amend to gh-1263 with better handling of multiple attempts (failures for different user-names recognized immediately);
|
||||||
- mode `ddos` (and `aggressive`) extended to catch `Connection closed by ... [preauth]`, so in DDOS mode
|
- mode `ddos` (and `aggressive`) extended to catch `Connection closed by ... [preauth]`, so in DDOS mode
|
||||||
it counts failure on closing connection within preauth-stage (gh-2085);
|
it counts failure on closing connection within preauth-stage (gh-2085);
|
||||||
* `action.d/abuseipdb.conf`: fixed curl cypher errors and comment quote-issue (gh-2044, gh-2101);
|
* `action.d/abuseipdb.conf`: fixed curl cypher errors and comment quote-issue (gh-2044, gh-2101);
|
||||||
|
@ -831,7 +896,7 @@ ver. 0.10.0-alpha-1 (2016/07/14) - ipv6-support-etc
|
||||||
sane environment in error case of `actioncheck`.
|
sane environment in error case of `actioncheck`.
|
||||||
* Reporting via abuseipdb.com:
|
* Reporting via abuseipdb.com:
|
||||||
- Bans can now be reported to abuseipdb
|
- Bans can now be reported to abuseipdb
|
||||||
- Catagories must be set in the config
|
- Categories must be set in the config
|
||||||
- Relevant log lines included in report
|
- Relevant log lines included in report
|
||||||
|
|
||||||
### Enhancements
|
### Enhancements
|
||||||
|
@ -968,7 +1033,7 @@ releases.
|
||||||
- Rewritten without end-anchor ($), because of potential vulnerability on very long URLs.
|
- Rewritten without end-anchor ($), because of potential vulnerability on very long URLs.
|
||||||
* filter.d/apache-badbots.conf - extended to recognize Jorgee Vulnerability Scanner (gh-1882)
|
* filter.d/apache-badbots.conf - extended to recognize Jorgee Vulnerability Scanner (gh-1882)
|
||||||
* filter.d/asterisk.conf
|
* filter.d/asterisk.conf
|
||||||
- fixed failregex AMI Asterisk authentification failed (see gh-1302)
|
- fixed failregex AMI Asterisk authentication failed (see gh-1302)
|
||||||
- removed invalid (vulnerable) regex blocking IPs using forign data (from header "from")
|
- removed invalid (vulnerable) regex blocking IPs using forign data (from header "from")
|
||||||
thus not the IP-address that really originates the request (see gh-1927)
|
thus not the IP-address that really originates the request (see gh-1927)
|
||||||
- fixed failregex for the SQL-injection attempts with single-quotes in connect-string (see gh-2011)
|
- fixed failregex for the SQL-injection attempts with single-quotes in connect-string (see gh-2011)
|
||||||
|
@ -1268,7 +1333,7 @@ ver. 0.9.3 (2015/08/01) - lets-all-stay-friends
|
||||||
* `filter.d/roundcube-auth.conf`
|
* `filter.d/roundcube-auth.conf`
|
||||||
- Updated regex to work with 'errors' log (1.0.5 and 1.1.1)
|
- Updated regex to work with 'errors' log (1.0.5 and 1.1.1)
|
||||||
- Added regex to work with 'userlogins' log
|
- Added regex to work with 'userlogins' log
|
||||||
* `action.d/sendmail*.conf` - use LC_ALL (superseeding LC_TIME) to override
|
* `action.d/sendmail*.conf` - use LC_ALL (superseding LC_TIME) to override
|
||||||
locale on systems with customized LC_ALL
|
locale on systems with customized LC_ALL
|
||||||
* performance fix: minimizes connection overhead, close socket only at
|
* performance fix: minimizes connection overhead, close socket only at
|
||||||
communication end (gh-1099)
|
communication end (gh-1099)
|
||||||
|
@ -1438,7 +1503,7 @@ ver. 0.9.1 (2014/10/29) - better, faster, stronger
|
||||||
* Ignored IPs are no longer banned when being restored from persistent
|
* Ignored IPs are no longer banned when being restored from persistent
|
||||||
database
|
database
|
||||||
* Manually unbanned IPs are now removed from persistent database, such they
|
* Manually unbanned IPs are now removed from persistent database, such they
|
||||||
wont be banned again when Fail2Ban is restarted
|
won't be banned again when Fail2Ban is restarted
|
||||||
* Pass "bantime" parameter to the actions in default jail's action
|
* Pass "bantime" parameter to the actions in default jail's action
|
||||||
definition(s)
|
definition(s)
|
||||||
* `filters.d/sieve.conf` - fixed typo in _daemon. Thanks Jisoo Park
|
* `filters.d/sieve.conf` - fixed typo in _daemon. Thanks Jisoo Park
|
||||||
|
@ -1729,7 +1794,7 @@ those filters were used.
|
||||||
all platforms to ensure permissions are the same before and after a ban.
|
all platforms to ensure permissions are the same before and after a ban.
|
||||||
Closes gh-266. hostsdeny supports daemon_list now too.
|
Closes gh-266. hostsdeny supports daemon_list now too.
|
||||||
* `action.d/bsd-ipfw` - action option unused. Change blocktype to port unreach
|
* `action.d/bsd-ipfw` - action option unused. Change blocktype to port unreach
|
||||||
instead of deny for consistancy.
|
instead of deny for consistency.
|
||||||
* `filter.d/dovecot` - added to support different dovecot failure
|
* `filter.d/dovecot` - added to support different dovecot failure
|
||||||
"..disallowed plaintext auth". Closes Debian bug #709324
|
"..disallowed plaintext auth". Closes Debian bug #709324
|
||||||
* `filter.d/roundcube-auth` - timezone offset can be positive or negative
|
* `filter.d/roundcube-auth` - timezone offset can be positive or negative
|
||||||
|
@ -1919,7 +1984,7 @@ fail2ban-users mailing list and IRC.
|
||||||
### New Features
|
### New Features
|
||||||
- Yaroslav Halchenko
|
- Yaroslav Halchenko
|
||||||
* [9ba27353] Add support for `jail.d/{confilefile}` and `fail2ban.d/{configfile}`
|
* [9ba27353] Add support for `jail.d/{confilefile}` and `fail2ban.d/{configfile}`
|
||||||
to provide additional flexibility to system adminstrators. Thanks to
|
to provide additional flexibility to system administrators. Thanks to
|
||||||
beilber for the idea. Closes gh-114.
|
beilber for the idea. Closes gh-114.
|
||||||
* [3ce53e87] Add exim filter.
|
* [3ce53e87] Add exim filter.
|
||||||
- Erwan Ben Souiden
|
- Erwan Ben Souiden
|
||||||
|
@ -2070,7 +2135,7 @@ ver. 0.8.7 (2012/07/31) - stable
|
||||||
* [47c03a2] files/nagios - spelling/grammar fixes
|
* [47c03a2] files/nagios - spelling/grammar fixes
|
||||||
* [b083038] updated Free Software Foundation's address
|
* [b083038] updated Free Software Foundation's address
|
||||||
* [9092a63] changed TLDs to invalid domains, in accordance with RFC 2606
|
* [9092a63] changed TLDs to invalid domains, in accordance with RFC 2606
|
||||||
* [642d9af,3282f86] reformated printing of jail's name to be consistent
|
* [642d9af,3282f86] reformatted printing of jail's name to be consistent
|
||||||
with init's info messages
|
with init's info messages
|
||||||
* [3282f86] uniform use of capitalized Jail in the messages
|
* [3282f86] uniform use of capitalized Jail in the messages
|
||||||
- Leonardo Chiquitto
|
- Leonardo Chiquitto
|
||||||
|
@ -2415,7 +2480,7 @@ ver. 0.6.1 (2006/03/16) - stable
|
||||||
- Fixed crash when time format does not match data
|
- Fixed crash when time format does not match data
|
||||||
- Propagated patch from Debian to fix fail2ban search path addition to the path
|
- Propagated patch from Debian to fix fail2ban search path addition to the path
|
||||||
search list: now it is added first. Thanks to Nick Craig-Wood
|
search list: now it is added first. Thanks to Nick Craig-Wood
|
||||||
- Added SMTP authentification for mail notification. Thanks to Markus Hoffmann
|
- Added SMTP authentication for mail notification. Thanks to Markus Hoffmann
|
||||||
- Removed debug mode as it is confusing for people
|
- Removed debug mode as it is confusing for people
|
||||||
- Added parsing of timestamp in TAI64N format (#1275325). Thanks to Mark
|
- Added parsing of timestamp in TAI64N format (#1275325). Thanks to Mark
|
||||||
Edgington
|
Edgington
|
||||||
|
@ -2448,7 +2513,7 @@ ver. 0.5.5 (2005/10/26) - beta
|
||||||
further adjusted by upstream author).
|
further adjusted by upstream author).
|
||||||
* Added -f command line parameter for [findtime].
|
* Added -f command line parameter for [findtime].
|
||||||
* Added a cleanup of firewall rules on emergency shutdown when unknown
|
* Added a cleanup of firewall rules on emergency shutdown when unknown
|
||||||
exception is catched.
|
exception is caught.
|
||||||
* Fail2ban should not crash now if a wrong file name is specified in config.
|
* Fail2ban should not crash now if a wrong file name is specified in config.
|
||||||
* reordered code a bit so that log targets are setup right after background
|
* reordered code a bit so that log targets are setup right after background
|
||||||
and then only loglevel (verbose, debug) is processed, so the warning could
|
and then only loglevel (verbose, debug) is processed, so the warning could
|
||||||
|
|
2
FILTERS
2
FILTERS
|
@ -129,7 +129,7 @@ Date/Time
|
||||||
---------
|
---------
|
||||||
|
|
||||||
At the moment, Fail2Ban depends on log lines to have time stamps. That is why
|
At the moment, Fail2Ban depends on log lines to have time stamps. That is why
|
||||||
before starting to develop failregex, check if your log line format known to
|
before starting to develop failregex, check if your log line format is known to
|
||||||
Fail2Ban. Copy the time component from the log line and append an IP address to
|
Fail2Ban. Copy the time component from the log line and append an IP address to
|
||||||
test with following command::
|
test with following command::
|
||||||
|
|
||||||
|
|
14
MANIFEST
14
MANIFEST
|
@ -40,6 +40,7 @@ config/action.d/mail.conf
|
||||||
config/action.d/mail-whois-common.conf
|
config/action.d/mail-whois-common.conf
|
||||||
config/action.d/mail-whois.conf
|
config/action.d/mail-whois.conf
|
||||||
config/action.d/mail-whois-lines.conf
|
config/action.d/mail-whois-lines.conf
|
||||||
|
config/action.d/mikrotik.conf
|
||||||
config/action.d/mynetwatchman.conf
|
config/action.d/mynetwatchman.conf
|
||||||
config/action.d/netscaler.conf
|
config/action.d/netscaler.conf
|
||||||
config/action.d/nftables-allports.conf
|
config/action.d/nftables-allports.conf
|
||||||
|
@ -90,6 +91,7 @@ config/filter.d/counter-strike.conf
|
||||||
config/filter.d/courier-auth.conf
|
config/filter.d/courier-auth.conf
|
||||||
config/filter.d/courier-smtp.conf
|
config/filter.d/courier-smtp.conf
|
||||||
config/filter.d/cyrus-imap.conf
|
config/filter.d/cyrus-imap.conf
|
||||||
|
config/filter.d/dante.conf
|
||||||
config/filter.d/directadmin.conf
|
config/filter.d/directadmin.conf
|
||||||
config/filter.d/domino-smtp.conf
|
config/filter.d/domino-smtp.conf
|
||||||
config/filter.d/dovecot.conf
|
config/filter.d/dovecot.conf
|
||||||
|
@ -121,6 +123,8 @@ config/filter.d/nagios.conf
|
||||||
config/filter.d/named-refused.conf
|
config/filter.d/named-refused.conf
|
||||||
config/filter.d/nginx-bad-request.conf
|
config/filter.d/nginx-bad-request.conf
|
||||||
config/filter.d/nginx-botsearch.conf
|
config/filter.d/nginx-botsearch.conf
|
||||||
|
config/filter.d/nginx-error-common.conf
|
||||||
|
config/filter.d/nginx-forbidden.conf
|
||||||
config/filter.d/nginx-http-auth.conf
|
config/filter.d/nginx-http-auth.conf
|
||||||
config/filter.d/nginx-limit-req.conf
|
config/filter.d/nginx-limit-req.conf
|
||||||
config/filter.d/nsd.conf
|
config/filter.d/nsd.conf
|
||||||
|
@ -138,6 +142,7 @@ config/filter.d/pure-ftpd.conf
|
||||||
config/filter.d/qmail.conf
|
config/filter.d/qmail.conf
|
||||||
config/filter.d/recidive.conf
|
config/filter.d/recidive.conf
|
||||||
config/filter.d/roundcube-auth.conf
|
config/filter.d/roundcube-auth.conf
|
||||||
|
config/filter.d/routeros-auth.conf
|
||||||
config/filter.d/scanlogd.conf
|
config/filter.d/scanlogd.conf
|
||||||
config/filter.d/screensharingd.conf
|
config/filter.d/screensharingd.conf
|
||||||
config/filter.d/selinux-common.conf
|
config/filter.d/selinux-common.conf
|
||||||
|
@ -175,7 +180,6 @@ CONTRIBUTING.md
|
||||||
COPYING
|
COPYING
|
||||||
.coveragerc
|
.coveragerc
|
||||||
DEVELOP
|
DEVELOP
|
||||||
fail2ban-2to3
|
|
||||||
fail2ban/client/actionreader.py
|
fail2ban/client/actionreader.py
|
||||||
fail2ban/client/beautifier.py
|
fail2ban/client/beautifier.py
|
||||||
fail2ban/client/configparserinc.py
|
fail2ban/client/configparserinc.py
|
||||||
|
@ -191,6 +195,8 @@ fail2ban/client/filterreader.py
|
||||||
fail2ban/client/__init__.py
|
fail2ban/client/__init__.py
|
||||||
fail2ban/client/jailreader.py
|
fail2ban/client/jailreader.py
|
||||||
fail2ban/client/jailsreader.py
|
fail2ban/client/jailsreader.py
|
||||||
|
fail2ban/compat/asynchat.py
|
||||||
|
fail2ban/compat/asyncore.py
|
||||||
fail2ban/exceptions.py
|
fail2ban/exceptions.py
|
||||||
fail2ban/helpers.py
|
fail2ban/helpers.py
|
||||||
fail2ban/__init__.py
|
fail2ban/__init__.py
|
||||||
|
@ -204,7 +210,6 @@ fail2ban/server/datedetector.py
|
||||||
fail2ban/server/datetemplate.py
|
fail2ban/server/datetemplate.py
|
||||||
fail2ban/server/failmanager.py
|
fail2ban/server/failmanager.py
|
||||||
fail2ban/server/failregex.py
|
fail2ban/server/failregex.py
|
||||||
fail2ban/server/filtergamin.py
|
|
||||||
fail2ban/server/filterpoll.py
|
fail2ban/server/filterpoll.py
|
||||||
fail2ban/server/filter.py
|
fail2ban/server/filter.py
|
||||||
fail2ban/server/filterpyinotify.py
|
fail2ban/server/filterpyinotify.py
|
||||||
|
@ -272,7 +277,7 @@ fail2ban/tests/files/config/apache-auth/noentry/.htaccess
|
||||||
fail2ban/tests/files/config/apache-auth/README
|
fail2ban/tests/files/config/apache-auth/README
|
||||||
fail2ban/tests/files/database_v1.db
|
fail2ban/tests/files/database_v1.db
|
||||||
fail2ban/tests/files/database_v2.db
|
fail2ban/tests/files/database_v2.db
|
||||||
fail2ban/tests/files/filter.d/substition.conf
|
fail2ban/tests/files/filter.d/substitution.conf
|
||||||
fail2ban/tests/files/filter.d/testcase01.conf
|
fail2ban/tests/files/filter.d/testcase01.conf
|
||||||
fail2ban/tests/files/filter.d/testcase02.conf
|
fail2ban/tests/files/filter.d/testcase02.conf
|
||||||
fail2ban/tests/files/filter.d/testcase02.local
|
fail2ban/tests/files/filter.d/testcase02.local
|
||||||
|
@ -300,6 +305,7 @@ fail2ban/tests/files/logs/counter-strike
|
||||||
fail2ban/tests/files/logs/courier-auth
|
fail2ban/tests/files/logs/courier-auth
|
||||||
fail2ban/tests/files/logs/courier-smtp
|
fail2ban/tests/files/logs/courier-smtp
|
||||||
fail2ban/tests/files/logs/cyrus-imap
|
fail2ban/tests/files/logs/cyrus-imap
|
||||||
|
fail2ban/tests/files/logs/dante
|
||||||
fail2ban/tests/files/logs/directadmin
|
fail2ban/tests/files/logs/directadmin
|
||||||
fail2ban/tests/files/logs/domino-smtp
|
fail2ban/tests/files/logs/domino-smtp
|
||||||
fail2ban/tests/files/logs/dovecot
|
fail2ban/tests/files/logs/dovecot
|
||||||
|
@ -329,6 +335,7 @@ fail2ban/tests/files/logs/nagios
|
||||||
fail2ban/tests/files/logs/named-refused
|
fail2ban/tests/files/logs/named-refused
|
||||||
fail2ban/tests/files/logs/nginx-bad-request
|
fail2ban/tests/files/logs/nginx-bad-request
|
||||||
fail2ban/tests/files/logs/nginx-botsearch
|
fail2ban/tests/files/logs/nginx-botsearch
|
||||||
|
fail2ban/tests/files/logs/nginx-forbidden
|
||||||
fail2ban/tests/files/logs/nginx-http-auth
|
fail2ban/tests/files/logs/nginx-http-auth
|
||||||
fail2ban/tests/files/logs/nginx-limit-req
|
fail2ban/tests/files/logs/nginx-limit-req
|
||||||
fail2ban/tests/files/logs/nsd
|
fail2ban/tests/files/logs/nsd
|
||||||
|
@ -346,6 +353,7 @@ fail2ban/tests/files/logs/pure-ftpd
|
||||||
fail2ban/tests/files/logs/qmail
|
fail2ban/tests/files/logs/qmail
|
||||||
fail2ban/tests/files/logs/recidive
|
fail2ban/tests/files/logs/recidive
|
||||||
fail2ban/tests/files/logs/roundcube-auth
|
fail2ban/tests/files/logs/roundcube-auth
|
||||||
|
fail2ban/tests/files/logs/routeros-auth
|
||||||
fail2ban/tests/files/logs/scanlogd
|
fail2ban/tests/files/logs/scanlogd
|
||||||
fail2ban/tests/files/logs/screensharingd
|
fail2ban/tests/files/logs/screensharingd
|
||||||
fail2ban/tests/files/logs/selinux-ssh
|
fail2ban/tests/files/logs/selinux-ssh
|
||||||
|
|
23
README.md
23
README.md
|
@ -2,7 +2,7 @@
|
||||||
/ _|__ _(_) |_ ) |__ __ _ _ _
|
/ _|__ _(_) |_ ) |__ __ _ _ _
|
||||||
| _/ _` | | |/ /| '_ \/ _` | ' \
|
| _/ _` | | |/ /| '_ \/ _` | ' \
|
||||||
|_| \__,_|_|_/___|_.__/\__,_|_||_|
|
|_| \__,_|_|_/___|_.__/\__,_|_||_|
|
||||||
v1.0.1.dev1 20??/??/??
|
v1.1.0.dev1 20??/??/??
|
||||||
|
|
||||||
## Fail2Ban: ban hosts that cause multiple authentication errors
|
## Fail2Ban: ban hosts that cause multiple authentication errors
|
||||||
|
|
||||||
|
@ -29,26 +29,27 @@ and the website: https://www.fail2ban.org
|
||||||
Installation:
|
Installation:
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
**It is possible that Fail2Ban is already packaged for your distribution. In
|
Fail2Ban is likely already packaged for your Linux distribution and [can installed with a simple command](https://github.com/fail2ban/fail2ban/wiki/How-to-install-fail2ban-packages).
|
||||||
this case, you should use that instead.**
|
|
||||||
|
If your distribution is not listed, you can install from GitHub:
|
||||||
|
|
||||||
Required:
|
Required:
|
||||||
- [Python2 >= 2.7 or Python >= 3.2](https://www.python.org) or [PyPy](https://pypy.org)
|
- [Python >= 3.5](https://www.python.org) or [PyPy3](https://pypy.org)
|
||||||
- python-setuptools, python-distutils or python3-setuptools for installation from source
|
- python-setuptools, python-distutils (or python3-setuptools) for installation from source
|
||||||
|
|
||||||
Optional:
|
Optional:
|
||||||
- [pyinotify >= 0.8.3](https://github.com/seb-m/pyinotify), may require:
|
- [pyinotify >= 0.8.3](https://github.com/seb-m/pyinotify), may require:
|
||||||
* Linux >= 2.6.13
|
* Linux >= 2.6.13
|
||||||
- [gamin >= 0.0.21](http://www.gnome.org/~veillard/gamin)
|
|
||||||
- [systemd >= 204](http://www.freedesktop.org/wiki/Software/systemd) and python bindings:
|
- [systemd >= 204](http://www.freedesktop.org/wiki/Software/systemd) and python bindings:
|
||||||
* [python-systemd package](https://www.freedesktop.org/software/systemd/python-systemd/index.html)
|
* [python-systemd package](https://www.freedesktop.org/software/systemd/python-systemd/index.html)
|
||||||
- [dnspython](http://www.dnspython.org/)
|
- [dnspython](http://www.dnspython.org/)
|
||||||
|
- [pyasyncore](https://pypi.org/project/pyasyncore/) and [pyasynchat](https://pypi.org/project/pyasynchat/) (normally bundled-in within fail2ban, for python 3.12+ only)
|
||||||
|
|
||||||
|
|
||||||
To install:
|
To install:
|
||||||
|
|
||||||
tar xvfj fail2ban-1.0.1.tar.bz2
|
tar xvfj fail2ban-master.tar.bz2
|
||||||
cd fail2ban-1.0.1
|
cd fail2ban-master
|
||||||
sudo python setup.py install
|
sudo python setup.py install
|
||||||
|
|
||||||
Alternatively, you can clone the source from GitHub to a directory of Your choice, and do the install from there. Pick the correct branch, for example, master or 0.11
|
Alternatively, you can clone the source from GitHub to a directory of Your choice, and do the install from there. Pick the correct branch, for example, master or 0.11
|
||||||
|
@ -90,11 +91,7 @@ fail2ban(1) and jail.conf(5) manpages for further references.
|
||||||
Code status:
|
Code status:
|
||||||
------------
|
------------
|
||||||
|
|
||||||
* travis-ci.org: [![tests status](https://secure.travis-ci.org/fail2ban/fail2ban.svg?branch=master)](https://travis-ci.org/fail2ban/fail2ban?branch=master) / [![tests status](https://secure.travis-ci.org/fail2ban/fail2ban.svg?branch=0.11)](https://travis-ci.org/fail2ban/fail2ban?branch=0.11) (0.11 branch) / [![tests status](https://secure.travis-ci.org/fail2ban/fail2ban.svg?branch=0.10)](https://travis-ci.org/fail2ban/fail2ban?branch=0.10) (0.10 branch)
|
* [![CI](https://github.com/fail2ban/fail2ban/actions/workflows/main.yml/badge.svg)](https://github.com/fail2ban/fail2ban/actions/workflows/main.yml)
|
||||||
|
|
||||||
* coveralls.io: [![Coverage Status](https://coveralls.io/repos/fail2ban/fail2ban/badge.svg?branch=master)](https://coveralls.io/github/fail2ban/fail2ban?branch=master) / [![Coverage Status](https://coveralls.io/repos/fail2ban/fail2ban/badge.svg?branch=0.11)](https://coveralls.io/github/fail2ban/fail2ban?branch=0.11) (0.11 branch) / [![Coverage Status](https://coveralls.io/repos/fail2ban/fail2ban/badge.svg?branch=0.10)](https://coveralls.io/github/fail2ban/fail2ban?branch=0.10) / (0.10 branch)
|
|
||||||
|
|
||||||
* codecov.io: [![codecov.io](https://codecov.io/gh/fail2ban/fail2ban/coverage.svg?branch=master)](https://codecov.io/gh/fail2ban/fail2ban/branch/master) / [![codecov.io](https://codecov.io/gh/fail2ban/fail2ban/coverage.svg?branch=0.11)](https://codecov.io/gh/fail2ban/fail2ban/branch/0.11) (0.11 branch) / [![codecov.io](https://codecov.io/gh/fail2ban/fail2ban/coverage.svg?branch=0.10)](https://codecov.io/gh/fail2ban/fail2ban/branch/0.10) (0.10 branch)
|
|
||||||
|
|
||||||
Contact:
|
Contact:
|
||||||
--------
|
--------
|
||||||
|
|
8
RELEASE
8
RELEASE
|
@ -13,7 +13,7 @@ Preparation
|
||||||
* Check distribution patches and see if they can be included
|
* Check distribution patches and see if they can be included
|
||||||
|
|
||||||
* https://apps.fedoraproject.org/packages/fail2ban/sources
|
* https://apps.fedoraproject.org/packages/fail2ban/sources
|
||||||
* http://sources.gentoo.org/cgi-bin/viewvc.cgi/gentoo-x86/net-analyzer/fail2ban/
|
* https://gitweb.gentoo.org/repo/gentoo.git/tree/net-analyzer/fail2ban
|
||||||
* http://svnweb.freebsd.org/ports/head/security/py-fail2ban/
|
* http://svnweb.freebsd.org/ports/head/security/py-fail2ban/
|
||||||
* https://build.opensuse.org/package/show?package=fail2ban&project=openSUSE%3AFactory
|
* https://build.opensuse.org/package/show?package=fail2ban&project=openSUSE%3AFactory
|
||||||
* http://sophie.zarb.org/sources/fail2ban (Mageia)
|
* http://sophie.zarb.org/sources/fail2ban (Mageia)
|
||||||
|
@ -49,7 +49,7 @@ Preparation
|
||||||
|
|
||||||
ad-hoc bash script to run in a clean clone:
|
ad-hoc bash script to run in a clean clone:
|
||||||
|
|
||||||
find -type f | grep -v -e '\.git' -e '/doc/' -e '\.travis' -e MANIFEST | sed -e 's,^\./,,g' | while read f; do grep -ne "^$f\$" MANIFEST >/dev/null || echo "$f" ; done
|
find -type f | grep -v -e '\.git' -e '/doc/' -e MANIFEST | sed -e 's,^\./,,g' | while read f; do grep -ne "^$f\$" MANIFEST >/dev/null || echo "$f" ; done
|
||||||
|
|
||||||
or an alternative for comparison with previous release
|
or an alternative for comparison with previous release
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ Pre Release
|
||||||
|
|
||||||
* Arch Linux:
|
* Arch Linux:
|
||||||
|
|
||||||
* https://www.archlinux.org/packages/community/any/fail2ban/
|
* https://www.archlinux.org/packages/extra/any/fail2ban/
|
||||||
|
|
||||||
* Debian: Yaroslav Halchenko <debian@onerussian.com>
|
* Debian: Yaroslav Halchenko <debian@onerussian.com>
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ Pre Release
|
||||||
|
|
||||||
* Gentoo: netmon@gentoo.org
|
* Gentoo: netmon@gentoo.org
|
||||||
|
|
||||||
* http://sources.gentoo.org/cgi-bin/viewvc.cgi/gentoo-x86/net-analyzer/fail2ban/metadata.xml?view=markup
|
* https://gitweb.gentoo.org/repo/gentoo.git/tree/net-analyzer/fail2ban/metadata.xml
|
||||||
* https://bugs.gentoo.org/buglist.cgi?quicksearch=fail2ban
|
* https://bugs.gentoo.org/buglist.cgi?quicksearch=fail2ban
|
||||||
|
|
||||||
* openSUSE: Stephan Kulow <coolo@suse.com>
|
* openSUSE: Stephan Kulow <coolo@suse.com>
|
||||||
|
|
1
THANKS
1
THANKS
|
@ -22,6 +22,7 @@ Andrey G. Grozin
|
||||||
Andy Fragen
|
Andy Fragen
|
||||||
Arturo 'Buanzo' Busleiman
|
Arturo 'Buanzo' Busleiman
|
||||||
Axel Thimm
|
Axel Thimm
|
||||||
|
Balazs Mateffy
|
||||||
Bas van den Dikkenberg
|
Bas van den Dikkenberg
|
||||||
Beau Raines
|
Beau Raines
|
||||||
Bill Heaton
|
Bill Heaton
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||||
#
|
#
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||||
"""Script to run Fail2Ban tests battery
|
"""Script to run Fail2Ban tests battery
|
||||||
|
|
|
@ -80,7 +80,7 @@ block = ip
|
||||||
# Option: blocktype
|
# Option: blocktype
|
||||||
# Notes.: How to block the traffic. Use a action from man 5 ipfw
|
# Notes.: How to block the traffic. Use a action from man 5 ipfw
|
||||||
# Common values: deny, unreach port, reset
|
# Common values: deny, unreach port, reset
|
||||||
# ACTION defination at the top of man ipfw for allowed values.
|
# ACTION definition at the top of man ipfw for allowed values.
|
||||||
# Values: STRING
|
# Values: STRING
|
||||||
#
|
#
|
||||||
blocktype = unreach port
|
blocktype = unreach port
|
||||||
|
|
|
@ -50,7 +50,8 @@ actionban = curl -s -X POST "<_cf_api_url>" \
|
||||||
# <time> unix timestamp of the ban time
|
# <time> unix timestamp of the ban time
|
||||||
# Values: CMD
|
# Values: CMD
|
||||||
#
|
#
|
||||||
actionunban = id=$(curl -s -X GET "<_cf_api_url>?mode=<cfmode>¬es=<notes>&configuration.target=<cftarget>&configuration.value=<ip>" \
|
actionunban = id=$(curl -s -X GET "<_cf_api_url>" \
|
||||||
|
--data-urlencode "mode=<cfmode>" --data-urlencode "notes=<notes>" --data-urlencode "configuration.target=<cftarget>" --data-urlencode "configuration.value=<ip>" \
|
||||||
<_cf_api_prms> \
|
<_cf_api_prms> \
|
||||||
| awk -F"[,:}]" '{for(i=1;i<=NF;i++){if($i~/'id'\042/){print $(i+1)}}}' \
|
| awk -F"[,:}]" '{for(i=1;i<=NF;i++){if($i~/'id'\042/){print $(i+1)}}}' \
|
||||||
| tr -d ' "' \
|
| tr -d ' "' \
|
||||||
|
@ -67,7 +68,7 @@ _cf_api_prms = -H "Authorization: Bearer <cftoken>" -H "Content-Type: applicatio
|
||||||
|
|
||||||
# Declare your Cloudflare Authorization Bearer Token in the [DEFAULT] section of your jail.local file.
|
# Declare your Cloudflare Authorization Bearer Token in the [DEFAULT] section of your jail.local file.
|
||||||
|
|
||||||
# The Cloudflare <ZONE_ID> of hte domain you want to manage.
|
# The Cloudflare <ZONE_ID> of the domain you want to manage.
|
||||||
#
|
#
|
||||||
# cfzone =
|
# cfzone =
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ actionunban = <ipstype_<ipsettype>/actionunban>
|
||||||
|
|
||||||
[ipstype_ipset]
|
[ipstype_ipset]
|
||||||
|
|
||||||
actionstart = ipset -exist create <ipmset> hash:ip timeout <default-ipsettime> <familyopt>
|
actionstart = ipset -exist create <ipmset> hash:ip timeout <default-ipsettime> maxelem <maxelem> <familyopt>
|
||||||
|
|
||||||
actionflush = ipset flush <ipmset>
|
actionflush = ipset flush <ipmset>
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ actionunban = ipset -exist del <ipmset> <ip>
|
||||||
|
|
||||||
[ipstype_firewalld]
|
[ipstype_firewalld]
|
||||||
|
|
||||||
actionstart = firewall-cmd --direct --new-ipset=<ipmset> --type=hash:ip --option=timeout=<default-ipsettime> <firewalld_familyopt>
|
actionstart = firewall-cmd --direct --new-ipset=<ipmset> --type=hash:ip --option=timeout=<default-ipsettime> --option=maxelem=<maxelem> <firewalld_familyopt>
|
||||||
|
|
||||||
# TODO: there doesn't seem to be an explicit way to invoke the ipset flush function using firewall-cmd
|
# TODO: there doesn't seem to be an explicit way to invoke the ipset flush function using firewall-cmd
|
||||||
actionflush =
|
actionflush =
|
||||||
|
@ -77,7 +77,13 @@ default-ipsettime = 0
|
||||||
# Values: [ NUM ] Default: 0 (managed by fail2ban by unban)
|
# Values: [ NUM ] Default: 0 (managed by fail2ban by unban)
|
||||||
ipsettime = 0
|
ipsettime = 0
|
||||||
|
|
||||||
# expresion to caclulate timeout from bantime, example:
|
# Option: maxelem
|
||||||
|
# Notes: maximal number of elements which can be stored in the ipset
|
||||||
|
# You may want to increase this for long-duration/high-volume jails
|
||||||
|
# Values: [ NUM ] Default: 65536
|
||||||
|
maxelem = 65536
|
||||||
|
|
||||||
|
# expression to calculate timeout from bantime, example:
|
||||||
# banaction = %(known/banaction)s[ipsettime='<timeout-bantime>']
|
# banaction = %(known/banaction)s[ipsettime='<timeout-bantime>']
|
||||||
timeout-bantime = $([ "<bantime>" -le 2147483 ] && echo "<bantime>" || echo 0)
|
timeout-bantime = $([ "<bantime>" -le 2147483 ] && echo "<bantime>" || echo 0)
|
||||||
|
|
||||||
|
@ -118,4 +124,4 @@ firewalld_familyopt = --option=family=inet6
|
||||||
# DEV NOTES:
|
# DEV NOTES:
|
||||||
#
|
#
|
||||||
# Author: Edgar Hoch, Daniel Black, Sergey Brester and Mihail Politaev
|
# Author: Edgar Hoch, Daniel Black, Sergey Brester and Mihail Politaev
|
||||||
# firewallcmd-new / iptables-ipset-proto6 combined for maximium goodness
|
# firewallcmd-new / iptables-ipset-proto6 combined for maximum goodness
|
||||||
|
|
|
@ -27,7 +27,7 @@ before = iptables.conf
|
||||||
# Notes.: command executed on demand at the first ban (or at the start of Fail2Ban if actionstart_on_demand is set to false).
|
# Notes.: command executed on demand at the first ban (or at the start of Fail2Ban if actionstart_on_demand is set to false).
|
||||||
# Values: CMD
|
# Values: CMD
|
||||||
#
|
#
|
||||||
actionstart = ipset --create f2b-<name> iphash
|
actionstart = ipset --create f2b-<name> maxelem <maxelem> iphash
|
||||||
<_ipt_add_rules>
|
<_ipt_add_rules>
|
||||||
|
|
||||||
|
|
||||||
|
@ -61,6 +61,14 @@ actionban = ipset --test f2b-<name> <ip> || ipset --add f2b-<name> <ip>
|
||||||
#
|
#
|
||||||
actionunban = ipset --test f2b-<name> <ip> && ipset --del f2b-<name> <ip>
|
actionunban = ipset --test f2b-<name> <ip> && ipset --del f2b-<name> <ip>
|
||||||
|
|
||||||
# Several capabilities used internaly:
|
# Several capabilities used internally:
|
||||||
|
|
||||||
rule-jump = -m set --match-set f2b-<name> src -j <blocktype>
|
rule-jump = -m set --match-set f2b-<name> src -j <blocktype>
|
||||||
|
|
||||||
|
[Init]
|
||||||
|
|
||||||
|
# Option: maxelem
|
||||||
|
# Notes: maximal number of elements which can be stored in the ipset
|
||||||
|
# You may want to increase this for long-duration/high-volume jails
|
||||||
|
# Values: [ NUM ] Default: 65536
|
||||||
|
maxelem = 65536
|
||||||
|
|
|
@ -24,7 +24,7 @@ before = iptables.conf
|
||||||
# Notes.: command executed on demand at the first ban (or at the start of Fail2Ban if actionstart_on_demand is set to false).
|
# Notes.: command executed on demand at the first ban (or at the start of Fail2Ban if actionstart_on_demand is set to false).
|
||||||
# Values: CMD
|
# Values: CMD
|
||||||
#
|
#
|
||||||
actionstart = ipset -exist create <ipmset> hash:ip timeout <default-ipsettime> <familyopt>
|
actionstart = ipset -exist create <ipmset> hash:ip timeout <default-ipsettime> maxelem <maxelem> <familyopt>
|
||||||
<_ipt_add_rules>
|
<_ipt_add_rules>
|
||||||
|
|
||||||
# Option: actionflush
|
# Option: actionflush
|
||||||
|
@ -59,7 +59,7 @@ actionban = ipset -exist add <ipmset> <ip> timeout <ipsettime>
|
||||||
#
|
#
|
||||||
actionunban = ipset -exist del <ipmset> <ip>
|
actionunban = ipset -exist del <ipmset> <ip>
|
||||||
|
|
||||||
# Several capabilities used internaly:
|
# Several capabilities used internally:
|
||||||
|
|
||||||
rule-jump = -m set --match-set <ipmset> src -j <blocktype>
|
rule-jump = -m set --match-set <ipmset> src -j <blocktype>
|
||||||
|
|
||||||
|
@ -76,7 +76,13 @@ default-ipsettime = 0
|
||||||
# Values: [ NUM ] Default: 0 (managed by fail2ban by unban)
|
# Values: [ NUM ] Default: 0 (managed by fail2ban by unban)
|
||||||
ipsettime = 0
|
ipsettime = 0
|
||||||
|
|
||||||
# expresion to caclulate timeout from bantime, example:
|
# Option: maxelem
|
||||||
|
# Notes: maximal number of elements which can be stored in the ipset
|
||||||
|
# You may want to increase this for long-duration/high-volume jails
|
||||||
|
# Values: [ NUM ] Default: 65536
|
||||||
|
maxelem = 65536
|
||||||
|
|
||||||
|
# expression to calculate timeout from bantime, example:
|
||||||
# banaction = %(known/banaction)s[ipsettime='<timeout-bantime>']
|
# banaction = %(known/banaction)s[ipsettime='<timeout-bantime>']
|
||||||
timeout-bantime = $([ "<bantime>" -le 2147483 ] && echo "<bantime>" || echo 0)
|
timeout-bantime = $([ "<bantime>" -le 2147483 ] && echo "<bantime>" || echo 0)
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ pre-rule =
|
||||||
|
|
||||||
rule-jump = -j <_ipt_rule_target>
|
rule-jump = -j <_ipt_rule_target>
|
||||||
|
|
||||||
# Several capabilities used internaly:
|
# Several capabilities used internally:
|
||||||
|
|
||||||
_ipt_for_proto-iter = for proto in $(echo '<protocol>' | sed 's/,/ /g'); do
|
_ipt_for_proto-iter = for proto in $(echo '<protocol>' | sed 's/,/ /g'); do
|
||||||
_ipt_for_proto-done = done
|
_ipt_for_proto-done = done
|
||||||
|
|
|
@ -47,7 +47,7 @@
|
||||||
# BadBot 256 Bad bot that is not honoring robots.txt or just flooding with too many requests, etc
|
# BadBot 256 Bad bot that is not honoring robots.txt or just flooding with too many requests, etc
|
||||||
# Compromised 512 The ip has been taken over by malware or botnet
|
# Compromised 512 The ip has been taken over by malware or botnet
|
||||||
# Phishing 1024 The ip is involved in phishing or spoofing
|
# Phishing 1024 The ip is involved in phishing or spoofing
|
||||||
# Iot 2048 The ip has targetted an iot (Internet of Things) device
|
# Iot 2048 The ip has targeted an iot (Internet of Things) device
|
||||||
# PortScan 4096 Port scan
|
# PortScan 4096 Port scan
|
||||||
# See https://ipthreat.net/bulkreportformat for more information
|
# See https://ipthreat.net/bulkreportformat for more information
|
||||||
# ```
|
# ```
|
||||||
|
|
|
@ -0,0 +1,84 @@
|
||||||
|
# Fail2Ban configuration file
|
||||||
|
#
|
||||||
|
# Mikrotik routerOS action to add/remove address-list entries
|
||||||
|
#
|
||||||
|
# Author: Duncan Bellamy <dunk@denkimushi.com>
|
||||||
|
# based on forum.mikrotik.com post by pakjebakmeel
|
||||||
|
#
|
||||||
|
# in the instructions:
|
||||||
|
# (10.0.0.1 is ip of mikrotik router)
|
||||||
|
# (10.0.0.2 is ip of fail2ban machine)
|
||||||
|
#
|
||||||
|
# on fail2ban machine:
|
||||||
|
# sudo mkdir /var/lib/fail2ban/ssh
|
||||||
|
# sudo chmod 700 /var/lib/fail2ban/ssh
|
||||||
|
# sudo ssh-keygen -N "" -f /var/lib/fail2ban/ssh/fail2ban_id_rsa
|
||||||
|
# sudo scp /var/lib/fail2ban/ssh/fail2ban_id_rsa.pub admin@10.0.0.1:/
|
||||||
|
# ssh admin@10.0.0.1
|
||||||
|
#
|
||||||
|
# on mikrotik router:
|
||||||
|
# /user add name=miki-f2b group=write address=10.0.0.2 password=""
|
||||||
|
# /user ssh-keys import public-key-file=fail2ban_id_rsa.pub user=miki-f2b
|
||||||
|
# /quit
|
||||||
|
#
|
||||||
|
# on fail2ban machine:
|
||||||
|
# (check password login fails)
|
||||||
|
# ssh miki-f2b@10.0.0.1
|
||||||
|
# (check private key works)
|
||||||
|
# sudo ssh -i /var/lib/fail2ban/ssh/fail2ban_id_rsa miki-f2b@10.0.0.1
|
||||||
|
#
|
||||||
|
# Then create rules on mikrorik router that use address
|
||||||
|
# list(s) maintained by fail2ban eg in the forward chain
|
||||||
|
# drop from address list, or in the forward chain drop
|
||||||
|
# from address list to server
|
||||||
|
#
|
||||||
|
# example extract from jail.local overriding some defaults
|
||||||
|
# action = mikrotik[keyfile="%(mkeyfile)s", user="%(muser)s", host="%(mhost)s", list="%(mlist)s"]
|
||||||
|
#
|
||||||
|
# ignoreip = 127.0.0.1/8 192.168.0.0/24
|
||||||
|
|
||||||
|
# mkeyfile = /etc/fail2ban/ssh/mykey_id_rsa
|
||||||
|
# muser = myuser
|
||||||
|
# mhost = 192.168.0.1
|
||||||
|
# mlist = BAD LIST
|
||||||
|
|
||||||
|
[Definition]
|
||||||
|
|
||||||
|
actionstart =
|
||||||
|
|
||||||
|
actionstop = %(actionflush)s
|
||||||
|
|
||||||
|
actionflush = %(command)s "/ip firewall address-list remove [find list=\"%(list)s\" comment~\"%(startcomment)s-*\"]"
|
||||||
|
|
||||||
|
actioncheck =
|
||||||
|
|
||||||
|
actionban = %(command)s "/ip firewall address-list add list=\"%(list)s\" address=<ip> comment=%(comment)s"
|
||||||
|
|
||||||
|
actionunban = %(command)s "/ip firewall address-list remove [find list=\"%(list)s\" comment=%(comment)s]"
|
||||||
|
|
||||||
|
command = ssh -l %(user)s -p%(port)s -i %(keyfile)s %(host)s
|
||||||
|
|
||||||
|
# Option: user
|
||||||
|
# Notes.: username to use when connecting to routerOS
|
||||||
|
user =
|
||||||
|
# Option: port
|
||||||
|
# Notes.: port to use when connecting to routerOS
|
||||||
|
port = 22
|
||||||
|
# Option: keyfile
|
||||||
|
# Notes.: ssh private key to use for connecting to routerOS
|
||||||
|
keyfile =
|
||||||
|
# Option: host
|
||||||
|
# Notes.: hostname or ip of router
|
||||||
|
host =
|
||||||
|
# Option: list
|
||||||
|
# Notes.: name of "address-list" to use on router
|
||||||
|
list = Fail2Ban
|
||||||
|
# Option: startcomment
|
||||||
|
# Notes.: used as a prefix to all comments, and used to match for flushing rules
|
||||||
|
startcomment = f2b-<name>
|
||||||
|
# Option: comment
|
||||||
|
# Notes.: comment to use on routerOS (must be unique as used for ip address removal)
|
||||||
|
comment = %(startcomment)s-<ip>
|
||||||
|
|
||||||
|
[Init]
|
||||||
|
name="%(__name__)s"
|
|
@ -5,7 +5,7 @@
|
||||||
# The script will add offender IPs to a dataset on netscaler, the dataset can then be used to block the IPs at a cs/vserver or global level
|
# The script will add offender IPs to a dataset on netscaler, the dataset can then be used to block the IPs at a cs/vserver or global level
|
||||||
# This dataset is then used to block IPs using responder policies on the netscaler.
|
# This dataset is then used to block IPs using responder policies on the netscaler.
|
||||||
#
|
#
|
||||||
# The script assumes using HTTPS with unsecure certificate to access the netscaler,
|
# The script assumes using HTTPS with insecure certificate to access the netscaler,
|
||||||
# if you have a valid certificate installed remove the -k from the curl lines, or if you want http change it accordingly (and remove the -k)
|
# if you have a valid certificate installed remove the -k from the curl lines, or if you want http change it accordingly (and remove the -k)
|
||||||
#
|
#
|
||||||
# This action depends on curl
|
# This action depends on curl
|
||||||
|
|
|
@ -44,7 +44,7 @@ match = <rule_match-<type>>
|
||||||
#
|
#
|
||||||
rule_stat = %(match)s <addr_family> saddr @<addr_set> <blocktype>
|
rule_stat = %(match)s <addr_family> saddr @<addr_set> <blocktype>
|
||||||
|
|
||||||
# optional interator over protocol's:
|
# optional iterator over protocol's:
|
||||||
_nft_for_proto-custom-iter =
|
_nft_for_proto-custom-iter =
|
||||||
_nft_for_proto-custom-done =
|
_nft_for_proto-custom-done =
|
||||||
_nft_for_proto-allports-iter =
|
_nft_for_proto-allports-iter =
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
#
|
#
|
||||||
# Author: Nick Hilliard <nick@foobar.org>
|
# Author: Nick Hilliard <nick@foobar.org>
|
||||||
# Modified by: Alexander Koeppe making PF work seamless and with IPv4 and IPv6
|
# Modified by: Alexander Koeppe making PF work seamless and with IPv4 and IPv6
|
||||||
|
# Modified by: Balazs Mateffy adding allproto option so all traffic gets blocked from the malicious source
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
|
|
||||||
|
@ -26,9 +27,11 @@
|
||||||
# }
|
# }
|
||||||
# to your main pf ruleset, where "namei" are the names of the jails
|
# to your main pf ruleset, where "namei" are the names of the jails
|
||||||
# which invoke this action
|
# which invoke this action
|
||||||
|
# to block all protocols use the pf[protocol=all] option
|
||||||
actionstart = echo "table <<tablename>-<name>> persist counters" | <pfctl> -f-
|
actionstart = echo "table <<tablename>-<name>> persist counters" | <pfctl> -f-
|
||||||
port="<port>"; if [ "$port" != "" ] && case "$port" in \{*) false;; esac; then port="{$port}"; fi
|
port="<port>"; if [ "$port" != "" ] && case "$port" in \{*) false;; esac; then port="{$port}"; fi
|
||||||
echo "<block> proto <protocol> from <<tablename>-<name>> to <actiontype>" | <pfctl> -f-
|
protocol="<protocol>"; if [ "$protocol" != "all" ]; then protocol="proto $protocol"; else protocol=all; fi
|
||||||
|
echo "<block> $protocol from <<tablename>-<name>> to <actiontype>" | <pfctl> -f-
|
||||||
|
|
||||||
# Option: start_on_demand - to start action on demand
|
# Option: start_on_demand - to start action on demand
|
||||||
# Example: `action=pf[actionstart_on_demand=true]`
|
# Example: `action=pf[actionstart_on_demand=true]`
|
||||||
|
@ -98,6 +101,7 @@ tablename = f2b
|
||||||
#
|
#
|
||||||
# The action you want pf to take.
|
# The action you want pf to take.
|
||||||
# Probably, you want "block quick", but adjust as needed.
|
# Probably, you want "block quick", but adjust as needed.
|
||||||
|
# If you want to log all blocked use "blog log quick"
|
||||||
block = block quick
|
block = block quick
|
||||||
|
|
||||||
# Option: protocol
|
# Option: protocol
|
||||||
|
|
|
@ -51,7 +51,7 @@
|
||||||
# Values: CMD
|
# Values: CMD
|
||||||
#
|
#
|
||||||
actionstart = if ! ipset -quiet -name list f2b-<name> >/dev/null;
|
actionstart = if ! ipset -quiet -name list f2b-<name> >/dev/null;
|
||||||
then ipset -quiet -exist create f2b-<name> hash:ip timeout <default-ipsettime>;
|
then ipset -quiet -exist create f2b-<name> hash:ip timeout <default-ipsettime> maxelem <maxelem>;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Option: actionstop
|
# Option: actionstop
|
||||||
|
@ -88,6 +88,14 @@ default-ipsettime = 0
|
||||||
# Values: [ NUM ] Default: 0 (managed by fail2ban by unban)
|
# Values: [ NUM ] Default: 0 (managed by fail2ban by unban)
|
||||||
ipsettime = 0
|
ipsettime = 0
|
||||||
|
|
||||||
# expresion to caclulate timeout from bantime, example:
|
# expression to calculate timeout from bantime, example:
|
||||||
# banaction = %(known/banaction)s[ipsettime='<timeout-bantime>']
|
# banaction = %(known/banaction)s[ipsettime='<timeout-bantime>']
|
||||||
timeout-bantime = $([ "<bantime>" -le 2147483 ] && echo "<bantime>" || echo 0)
|
timeout-bantime = $([ "<bantime>" -le 2147483 ] && echo "<bantime>" || echo 0)
|
||||||
|
|
||||||
|
[Init]
|
||||||
|
|
||||||
|
# Option: maxelem
|
||||||
|
# Notes: maximal number of elements which can be stored in the ipset
|
||||||
|
# You may want to increase this for long-duration/high-volume jails
|
||||||
|
# Values: [ NUM ] Default: 65536
|
||||||
|
maxelem = 65536
|
||||||
|
|
|
@ -75,7 +75,7 @@ class SMTPAction(ActionBase):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self, jail, name, host="localhost", user=None, password=None,
|
self, jail, name, host="localhost", ssl=False, user=None, password=None,
|
||||||
sendername="Fail2Ban", sender="fail2ban", dest="root", matches=None):
|
sendername="Fail2Ban", sender="fail2ban", dest="root", matches=None):
|
||||||
"""Initialise action.
|
"""Initialise action.
|
||||||
|
|
||||||
|
@ -88,6 +88,8 @@ class SMTPAction(ActionBase):
|
||||||
host : str, optional
|
host : str, optional
|
||||||
SMTP host, of host:port format. Default host "localhost" and
|
SMTP host, of host:port format. Default host "localhost" and
|
||||||
port "25"
|
port "25"
|
||||||
|
ssl : bool, optional
|
||||||
|
Whether to use TLS for the SMTP connection or not. Default False.
|
||||||
user : str, optional
|
user : str, optional
|
||||||
Username used for authentication with SMTP server.
|
Username used for authentication with SMTP server.
|
||||||
password : str, optional
|
password : str, optional
|
||||||
|
@ -109,7 +111,7 @@ class SMTPAction(ActionBase):
|
||||||
super(SMTPAction, self).__init__(jail, name)
|
super(SMTPAction, self).__init__(jail, name)
|
||||||
|
|
||||||
self.host = host
|
self.host = host
|
||||||
#TODO: self.ssl = ssl
|
self.ssl = ssl
|
||||||
|
|
||||||
self.user = user
|
self.user = user
|
||||||
self.password =password
|
self.password =password
|
||||||
|
@ -155,10 +157,18 @@ class SMTPAction(ActionBase):
|
||||||
msg['To'] = self.toaddr
|
msg['To'] = self.toaddr
|
||||||
msg['Date'] = formatdate()
|
msg['Date'] = formatdate()
|
||||||
|
|
||||||
smtp = smtplib.SMTP()
|
smtp_host, smtp_port = self.host.split(':')
|
||||||
|
smtp = smtplib.SMTP(host=smtp_host, port=smtp_port)
|
||||||
try:
|
try:
|
||||||
|
r = smtp.connect(host=smtp_host, port=smtp_port)
|
||||||
self._logSys.debug("Connected to SMTP '%s', response: %i: %s",
|
self._logSys.debug("Connected to SMTP '%s', response: %i: %s",
|
||||||
self.host, *smtp.connect(self.host))
|
self.host, *r)
|
||||||
|
|
||||||
|
if self.ssl: # pragma: no cover
|
||||||
|
r = smtp.starttls()[0];
|
||||||
|
if r != 220: # pragma: no cover
|
||||||
|
raise Exception("Failed to starttls() on '%s': %s" % (self.host, r))
|
||||||
|
|
||||||
if self.user and self.password: # pragma: no cover (ATM no tests covering that)
|
if self.user and self.password: # pragma: no cover (ATM no tests covering that)
|
||||||
smtp.login(self.user, self.password)
|
smtp.login(self.user, self.password)
|
||||||
failed_recipients = smtp.sendmail(
|
failed_recipients = smtp.sendmail(
|
||||||
|
|
|
@ -64,7 +64,7 @@ ignoreregex =
|
||||||
# ^user .*: one-time-nonce mismatch - sending new nonce\s*$
|
# ^user .*: one-time-nonce mismatch - sending new nonce\s*$
|
||||||
# ^realm mismatch - got `(?:[^']*|.*?)' but no realm specified\s*$
|
# ^realm mismatch - got `(?:[^']*|.*?)' but no realm specified\s*$
|
||||||
#
|
#
|
||||||
# Because url/referer are foreign input, short form of regex used if long enough to idetify failure.
|
# Because url/referer are foreign input, short form of regex used if long enough to identify failure.
|
||||||
#
|
#
|
||||||
# Author: Cyril Jaquier
|
# Author: Cyril Jaquier
|
||||||
# Major edits by Daniel Black and Ben Rubson.
|
# Major edits by Daniel Black and Ben Rubson.
|
||||||
|
|
|
@ -29,7 +29,7 @@ apache-prefix = <apache-prefix-<logging>>
|
||||||
|
|
||||||
apache-pref-ignore =
|
apache-pref-ignore =
|
||||||
|
|
||||||
_apache_error_client = <apache-prefix>\[(:?error|<apache-pref-ignore>\S+:\S+)\]( \[pid \d+(:\S+ \d+)?\])? \[client <HOST>(:\d{1,5})?\]
|
_apache_error_client = <apache-prefix>\[(:?error|<apache-pref-ignore>\S+:\S+)\]( \[pid \d+(:\S+ \d+)?\])? \[(?:client|remote) <HOST>(:\d{1,5})?\]
|
||||||
|
|
||||||
datepattern = {^LN-BEG}
|
datepattern = {^LN-BEG}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,16 @@
|
||||||
|
# Fail2Ban filter for dante
|
||||||
|
#
|
||||||
|
# Make sure you have "log: error" set in your "client pass" directive
|
||||||
|
#
|
||||||
|
|
||||||
|
[INCLUDES]
|
||||||
|
before = common.conf
|
||||||
|
|
||||||
|
[Definition]
|
||||||
|
_daemon = danted
|
||||||
|
|
||||||
|
failregex = ^%(__prefix_line)sinfo: block\(\d\): tcp/accept \]: <ADDR>\.\d+ \S+: error after reading \d+ bytes? in \d+ seconds?: (?:could not access|system password authentication failed for|pam_authenticate\(\) for) user "<F-USER>[^"]+</F-USER>"
|
||||||
|
|
||||||
|
[Init]
|
||||||
|
journalmatch = _SYSTEMD_UNIT=danted.service
|
||||||
|
|
|
@ -9,12 +9,43 @@ after = exim-common.local
|
||||||
|
|
||||||
[Definition]
|
[Definition]
|
||||||
|
|
||||||
host_info_pre = (?:H=([\w.-]+ )?(?:\(\S+\) )?)?
|
_fields_grp = (?: (?!H=)[A-Za-z]{1,4}(?:=\S+)?)*
|
||||||
host_info_suf = (?::\d+)?(?: I=\[\S+\](:\d+)?)?(?: U=\S+)?(?: P=e?smtp)?(?: F=(?:<>|[^@]+@\S+))?\s
|
host_info = %(_fields_grp)s (?:H=)?(?:[\w.-]+)? ?(?:\(\S+\))? ?\[<ADDR>\](?::\d+)?%(_fields_grp)s
|
||||||
host_info = %(host_info_pre)s\[<HOST>\]%(host_info_suf)s
|
pid = (?:\s?\[\d+\]|\s?[\w\.-]+ exim\[\d+\]:){0,2}
|
||||||
pid = (?: \[\d+\]| \w+ exim\[\d+\]:)?
|
|
||||||
|
|
||||||
# DEV Notes:
|
logtype = file
|
||||||
# From exim source code: ./src/receive.c:add_host_info_for_log
|
_add_pref = <lt_<logtype>/_add_pref>
|
||||||
#
|
|
||||||
# Author: Daniel Black
|
__prefix_line = %(pid)s%(_add_pref)s
|
||||||
|
|
||||||
|
[lt_journal]
|
||||||
|
_add_pref = (?: \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})?
|
||||||
|
|
||||||
|
[lt_file]
|
||||||
|
_add_pref =
|
||||||
|
|
||||||
|
# DEV Notes
|
||||||
|
# ------------
|
||||||
|
# Host string happens:
|
||||||
|
# H=[ip address]
|
||||||
|
# H=(helo_name) [ip address]
|
||||||
|
# H=host_name [ip address]
|
||||||
|
# H=host_name (helo_name) [ip address]
|
||||||
|
# flags H=host_name (helo_name) [ip address] flags
|
||||||
|
# where only [ip address] always visible, ignore ident
|
||||||
|
# From exim source code:
|
||||||
|
# src/src/host.c:host_and_ident()
|
||||||
|
# src/receive.c:add_host_info_for_log()
|
||||||
|
|
||||||
|
# Substitution of `_fields_grp` bypasses all flags but H
|
||||||
|
# Summary of Fields in Log Lines depending on log_selector
|
||||||
|
# https://www.exim.org/exim-html-current/doc/html/spec_html/ch-log_files.html
|
||||||
|
# at version exim-4.97.1
|
||||||
|
# ---
|
||||||
|
|
||||||
|
# Authors:
|
||||||
|
# Cyril Jaquier
|
||||||
|
# Daniel Black (rewrote with strong regexs)
|
||||||
|
# Sergey G. Brester aka sebres (optimization, rewrite to prefregex, reviews)
|
||||||
|
# Martin O'Neal (added additional regexs to detect authentication failures, protocol errors, and drops)
|
||||||
|
# Vladimir Varlamov (host line definition)
|
||||||
|
|
|
@ -26,11 +26,13 @@ before = exim-common.conf
|
||||||
|
|
||||||
[Definition]
|
[Definition]
|
||||||
|
|
||||||
failregex = ^%(pid)s \S+ F=(<>|\S+@\S+) %(host_info)srejected by local_scan\(\): .{0,256}$
|
prefregex = ^%(__prefix_line)s<F-CONTENT>.+</F-CONTENT>$
|
||||||
^%(pid)s %(host_info)sF=(<>|[^@]+@\S+) rejected RCPT [^@]+@\S+: .*dnsbl.*\s*$
|
|
||||||
^%(pid)s \S+ %(host_info)sF=(<>|[^@]+@\S+) rejected after DATA: This message contains a virus \(\S+\)\.\s*$
|
failregex = ^\s?\S+%(host_info)s rejected by local_scan\(\): .{0,256}$
|
||||||
^%(pid)s \S+ SA: Action: flagged as Spam but accepted: score=\d+\.\d+ required=\d+\.\d+ \(scanned in \d+/\d+ secs \| Message-Id: \S+\)\. From \S+ \(host=\S+ \[<HOST>\]\) for <honeypot>$
|
^%(host_info)s rejected RCPT [^@]+@\S+: .*dnsbl.*\s*$
|
||||||
^%(pid)s \S+ SA: Action: silently tossed message: score=\d+\.\d+ required=\d+\.\d+ trigger=\d+\.\d+ \(scanned in \d+/\d+ secs \| Message-Id: \S+\)\. From \S+ \(host=(\S+ )?\[<HOST>\]\) for \S+$
|
^\s?\S+%(host_info)s rejected after DATA: This message contains a virus \(\S+\)\.\s*$
|
||||||
|
^\s?\S+ SA: Action: flagged as Spam but accepted: score=\d+\.\d+ required=\d+\.\d+ \(scanned in \d+/\d+ secs \| Message-Id: \S+\)\. From \S+ \(host=\S+ \[<HOST>\]\) for <honeypot>$
|
||||||
|
^\s?\S+ SA: Action: silently tossed message: score=\d+\.\d+ required=\d+\.\d+ trigger=\d+\.\d+ \(scanned in \d+/\d+ secs \| Message-Id: \S+\)\. From \S+ \(host=(\S+ )?\[<HOST>\]\) for \S+$
|
||||||
|
|
||||||
ignoreregex =
|
ignoreregex =
|
||||||
|
|
||||||
|
@ -43,8 +45,6 @@ ignoreregex =
|
||||||
|
|
||||||
honeypot = trap@example.com
|
honeypot = trap@example.com
|
||||||
|
|
||||||
# DEV Notes:
|
# DEV Notes
|
||||||
# The %(host_info) defination contains a <HOST> match
|
# -----------
|
||||||
#
|
# The %(host_info) definition contains a <ADDR> match. No space before. See exim-common.conf
|
||||||
# Author: Cyril Jaquier
|
|
||||||
# Daniel Black (rewrote with strong regexs)
|
|
||||||
|
|
|
@ -13,21 +13,20 @@ before = exim-common.conf
|
||||||
|
|
||||||
[Definition]
|
[Definition]
|
||||||
|
|
||||||
# Fre-filter via "prefregex" is currently inactive because of too different failure syntax in exim-log (testing needed):
|
prefregex = ^%(__prefix_line)s<F-CONTENT>.+</F-CONTENT>$
|
||||||
#prefregex = ^%(pid)s <F-CONTENT>\b(?:\w+ authenticator failed|([\w\-]+ )?SMTP (?:(?:call|connection) from|protocol(?: synchronization)? error)|no MAIL in|(?:%(host_info_pre)s\[[^\]]+\]%(host_info_suf)s(?:sender verify fail|rejected RCPT|dropped|AUTH command))).+</F-CONTENT>$
|
|
||||||
|
|
||||||
failregex = ^%(pid)s %(host_info)ssender verify fail for <\S+>: (?:Unknown user|Unrouteable address|all relevant MX records point to non-existent hosts)\s*$
|
failregex = ^%(host_info)s sender verify fail for <\S+>: (?:Unknown user|Unrouteable address|all relevant MX records point to non-existent hosts)\s*$
|
||||||
^%(pid)s \w+ authenticator failed for (?:[^\[\( ]* )?(?:\(\S*\) )?\[<HOST>\](?::\d+)?(?: I=\[\S+\](:\d+)?)?: 535 Incorrect authentication data( \(set_id=.*\)|: \d+ Time\(s\))?\s*$
|
^\s?\w+ authenticator failed for%(host_info)s: 535 Incorrect authentication data(?: \(set_id=.*\)|: \d+ Time\(s\))?\s*$
|
||||||
^%(pid)s %(host_info)srejected RCPT [^@]+@\S+: (?:relay not permitted|Sender verify failed|Unknown user|Unrouteable address)\s*$
|
^%(host_info)s rejected RCPT [^@]+@\S+: (?:relay not permitted|Sender verify failed|Unknown user|Unrouteable address)\s*$
|
||||||
^%(pid)s SMTP protocol synchronization error \([^)]*\): rejected (?:connection from|"\S+") %(host_info)s(?:next )?input=".*"\s*$
|
^\s?SMTP protocol synchronization error \([^)]*\): rejected (?:connection from|"\S+")%(host_info)s (?:next )?input=".*"\s*$
|
||||||
^%(pid)s SMTP call from (?:[^\[\( ]* )?%(host_info)sdropped: too many (?:nonmail commands|syntax or protocol errors) \(last (?:command )?was "[^"]*"\)\s*$
|
^\s?SMTP call from%(host_info)s dropped: too many (?:(?:nonmail|unrecognized) commands|syntax or protocol errors)
|
||||||
^%(pid)s SMTP protocol error in "[^"]+(?:"+[^"]*(?="))*?" %(host_info)sAUTH command used when not advertised\s*$
|
^\s?SMTP protocol error in "[^"]+(?:"+[^"]*(?="))*?"%(host_info)s [A-Z]+ (?:command used when not advertised|authentication mechanism not supported)\s*$
|
||||||
^%(pid)s no MAIL in SMTP connection from (?:[^\[\( ]* )?(?:\(\S*\) )?%(host_info)sD=\d\S*s(?: C=\S*)?\s*$
|
^\s?no MAIL in SMTP connection from%(host_info)s
|
||||||
^%(pid)s (?:[\w\-]+ )?SMTP connection from (?:[^\[\( ]* )?(?:\(\S*\) )?%(host_info)sclosed by DROP in ACL\s*$
|
^\s?(?:[\w\-]+ )?SMTP connection from%(host_info)s closed by DROP in ACL\s*$
|
||||||
<mdre-<mode>>
|
<mdre-<mode>>
|
||||||
|
|
||||||
mdre-aggressive = ^%(pid)s no host name found for IP address <HOST>$
|
mdre-aggressive = ^\s?no host name found for IP address <ADDR>$
|
||||||
^%(pid)s no IP address found for host \S+ \(during SMTP connection from \[<HOST>\]\)$
|
^\s?no IP address found for host \S+ \(during SMTP connection from%(host_info)s\)$
|
||||||
|
|
||||||
mdre-normal =
|
mdre-normal =
|
||||||
|
|
||||||
|
@ -42,13 +41,10 @@ mode = normal
|
||||||
|
|
||||||
ignoreregex =
|
ignoreregex =
|
||||||
|
|
||||||
# DEV Notes:
|
# DEV Notes
|
||||||
# The %(host_info) defination contains a <HOST> match
|
# -----------
|
||||||
|
# The %(host_info) definition contains a <ADDR> match. No space before. See exim-common.conf
|
||||||
#
|
#
|
||||||
# SMTP protocol synchronization error \([^)]*\) <- This needs to be non-greedy
|
# SMTP protocol synchronization error \([^)]*\) <- This needs to be non-greedy
|
||||||
# to void capture beyond ")" to avoid a DoS Injection vulnerabilty as input= is
|
# to void capture beyond ")" to avoid a DoS Injection vulnerability as input= is
|
||||||
# user injectable data.
|
# user injectable data.
|
||||||
#
|
|
||||||
# Author: Cyril Jaquier
|
|
||||||
# Daniel Black (rewrote with strong regexs)
|
|
||||||
# Martin O'Neal (added additional regexs to detect authentication failures, protocol errors, and drops)
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Fail2Ban filter for unsuccesfull MongoDB authentication attempts
|
# Fail2Ban filter for unsuccessful MongoDB authentication attempts
|
||||||
#
|
#
|
||||||
# Logfile /var/log/mongodb/mongodb.log
|
# Logfile /var/log/mongodb/mongodb.log
|
||||||
#
|
#
|
||||||
|
@ -23,7 +23,7 @@ maxlines = 10
|
||||||
#
|
#
|
||||||
# Regarding the multiline regex:
|
# Regarding the multiline regex:
|
||||||
#
|
#
|
||||||
# There can be a nunber of non-related lines between the first and second part
|
# There can be a number of non-related lines between the first and second part
|
||||||
# of this regex maxlines of 10 is quite generious.
|
# of this regex maxlines of 10 is quite generious.
|
||||||
#
|
#
|
||||||
# Note the capture __connid, includes the connection ID, used in second part of regex.
|
# Note the capture __connid, includes the connection ID, used in second part of regex.
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Fail2Ban filter for unsuccesful MySQL authentication attempts
|
# Fail2Ban filter for unsuccessful MySQL authentication attempts
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
# To log wrong MySQL access attempts add to /etc/my.cnf in [mysqld]:
|
# To log wrong MySQL access attempts add to /etc/my.cnf in [mysqld]:
|
||||||
|
@ -17,7 +17,7 @@ before = common.conf
|
||||||
|
|
||||||
_daemon = mysqld
|
_daemon = mysqld
|
||||||
|
|
||||||
failregex = ^%(__prefix_line)s(?:(?:\d{6}|\d{4}-\d{2}-\d{2})[ T]\s?\d{1,2}:\d{2}:\d{2} )?(?:\d+ )?\[\w+\] (?:\[[^\]]+\] )*Access denied for user '<F-USER>[^']+</F-USER>'@'<HOST>' (to database '[^']*'|\(using password: (YES|NO)\))*\s*$
|
failregex = ^%(__prefix_line)s(?:(?:\d{6}|\d{4}-\d{2}-\d{2})[ T]\s?\d{1,2}:\d{2}:\d{2} )?(?:\d+ )?\[\w+\] (?:\[[^\]]+\] )*Access denied for user '<F-USER>[^']+</F-USER>'@'<HOST>'(?:\s+(?:to database '[^']*'|\(using password: (?:YES|NO)\)){1,2})?\s*$
|
||||||
|
|
||||||
ignoreregex =
|
ignoreregex =
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ _category_re = (?:%(_category)s: )?
|
||||||
# this can be optional (for instance if we match named native log files)
|
# this can be optional (for instance if we match named native log files)
|
||||||
__line_prefix=\s*(?:\S+ %(__daemon_combs_re)s\s+)?%(_category_re)s
|
__line_prefix=\s*(?:\S+ %(__daemon_combs_re)s\s+)?%(_category_re)s
|
||||||
|
|
||||||
prefregex = ^%(__line_prefix)s(?:(?:error|info):\s*)?client(?: @\S*)? <HOST>#\S+(?: \([\S.]+\))?: <F-CONTENT>.+</F-CONTENT>\s(?:denied|\(NOTAUTH\))\s*$
|
prefregex = ^%(__line_prefix)s(?:(?:error|info):\s*)?client(?: @\S*)? <HOST>#\S+(?: \([\S.]+\))?: <F-CONTENT>.+</F-CONTENT>\s(?:denied(?: \([^\)]*\))?|\(NOTAUTH\))\s*$
|
||||||
|
|
||||||
failregex = ^(?:view (?:internal|external): )?query(?: \(cache\))?
|
failregex = ^(?:view (?:internal|external): )?query(?: \(cache\))?
|
||||||
^zone transfer
|
^zone transfer
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
# Generic nginx error_log configuration items (to be used as interpolations) in other
|
||||||
|
# filters monitoring nginx error-logs
|
||||||
|
#
|
||||||
|
|
||||||
|
[DEFAULT]
|
||||||
|
|
||||||
|
# Type of log-file resp. log-format (file, short, journal):
|
||||||
|
logtype = file
|
||||||
|
|
||||||
|
# Daemon definition is to be specialized (if needed) in .conf file
|
||||||
|
_daemon = nginx
|
||||||
|
|
||||||
|
# Common line prefixes (beginnings) which could be used in filters
|
||||||
|
#
|
||||||
|
# [bsdverbose]? [hostname] [vserver tag] daemon_id spaces
|
||||||
|
#
|
||||||
|
# This can be optional (for instance if we match named native log files)
|
||||||
|
__prefix = <lt_<logtype>/__prefix>
|
||||||
|
|
||||||
|
__err_type = error
|
||||||
|
|
||||||
|
__prefix_line = %(__prefix)s\[%(__err_type)s\] \d+#\d+: \*\d+\s+
|
||||||
|
|
||||||
|
|
||||||
|
[lt_file]
|
||||||
|
__prefix = \s*
|
||||||
|
|
||||||
|
[lt_short]
|
||||||
|
__prefix = \s*(?:(?!\[)\S+ %(_daemon)s\[\d+\]: [^\[]*)?
|
||||||
|
|
||||||
|
[lt_journal]
|
||||||
|
__prefix = %(lt_short/__prefix)s
|
|
@ -0,0 +1,29 @@
|
||||||
|
# fail2ban filter configuration for nginx forbidden accesses
|
||||||
|
#
|
||||||
|
# If you have configured nginx to forbid some paths in your webserver, e.g.:
|
||||||
|
#
|
||||||
|
# location ~ /\. {
|
||||||
|
# deny all;
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# if a client tries to access https://yoursite/.user.ini then you will see
|
||||||
|
# in nginx error log:
|
||||||
|
#
|
||||||
|
# 2018/09/14 19:03:05 [error] 2035#2035: *9134 access forbidden by rule, client: 10.20.30.40, server: www.example.net, request: "GET /.user.ini HTTP/1.1", host: "www.example.net", referrer: "https://www.example.net"
|
||||||
|
#
|
||||||
|
# By carefully setting this filter we ban every IP that tries too many times to
|
||||||
|
# access forbidden resources.
|
||||||
|
#
|
||||||
|
# Author: Michele Bologna https://www.michelebologna.net/
|
||||||
|
|
||||||
|
[INCLUDES]
|
||||||
|
|
||||||
|
before = nginx-error-common.conf
|
||||||
|
|
||||||
|
[Definition]
|
||||||
|
failregex = ^%(__prefix_line)saccess forbidden by rule, client: <HOST>
|
||||||
|
ignoreregex =
|
||||||
|
|
||||||
|
datepattern = {^LN-BEG}
|
||||||
|
|
||||||
|
journalmatch = _SYSTEMD_UNIT=nginx.service + _COMM=nginx
|
|
@ -1,14 +1,23 @@
|
||||||
# fail2ban filter configuration for nginx
|
# fail2ban filter configuration for nginx
|
||||||
|
|
||||||
|
[INCLUDES]
|
||||||
|
|
||||||
|
before = nginx-error-common.conf
|
||||||
|
|
||||||
[Definition]
|
[Definition]
|
||||||
|
|
||||||
mode = normal
|
mode = normal
|
||||||
|
|
||||||
mdre-auth = ^\s*\[error\] \d+#\d+: \*\d+ user "(?:[^"]+|.*?)":? (?:password mismatch|was not found in "[^\"]*"), client: <HOST>, server: \S*, request: "\S+ \S+ HTTP/\d+\.\d+", host: "\S+"(?:, referrer: "\S+")?\s*$
|
__err_type = <_ertp-<mode>>
|
||||||
mdre-fallback = ^\s*\[crit\] \d+#\d+: \*\d+ SSL_do_handshake\(\) failed \(SSL: error:\S+(?: \S+){1,3} too (?:long|short)\)[^,]*, client: <HOST>
|
|
||||||
|
|
||||||
|
_ertp-auth = error
|
||||||
|
mdre-auth = ^%(__prefix_line)suser "(?:[^"]+|.*?)":? (?:password mismatch|was not found in "[^\"]*"), client: <HOST>, server: \S*, request: "\S+ \S+ HTTP/\d+\.\d+", host: "\S+"(?:, referrer: "\S+")?\s*$
|
||||||
|
_ertp-fallback = crit
|
||||||
|
mdre-fallback = ^%(__prefix_line)sSSL_do_handshake\(\) failed \(SSL: error:\S+(?: \S+){1,3} too (?:long|short)\)[^,]*, client: <HOST>
|
||||||
|
|
||||||
|
_ertp-normal = %(_ertp-auth)s
|
||||||
mdre-normal = %(mdre-auth)s
|
mdre-normal = %(mdre-auth)s
|
||||||
|
_ertp-aggressive = (?:%(_ertp-auth)s|%(_ertp-fallback)s)
|
||||||
mdre-aggressive = %(mdre-auth)s
|
mdre-aggressive = %(mdre-auth)s
|
||||||
%(mdre-fallback)s
|
%(mdre-fallback)s
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,10 @@
|
||||||
# ...
|
# ...
|
||||||
#
|
#
|
||||||
|
|
||||||
|
[INCLUDES]
|
||||||
|
|
||||||
|
before = nginx-error-common.conf
|
||||||
|
|
||||||
[Definition]
|
[Definition]
|
||||||
|
|
||||||
# Specify following expression to define exact zones, if you want to ban IPs limited
|
# Specify following expression to define exact zones, if you want to ban IPs limited
|
||||||
|
@ -33,13 +37,16 @@
|
||||||
#
|
#
|
||||||
ngx_limit_req_zones = [^"]+
|
ngx_limit_req_zones = [^"]+
|
||||||
|
|
||||||
|
# Depending on limit_req_log_level directive (may be: info | notice | warn | error):
|
||||||
|
__err_type = [a-z]+
|
||||||
|
|
||||||
# Use following full expression if you should range limit request to specified
|
# Use following full expression if you should range limit request to specified
|
||||||
# servers, requests, referrers etc. only :
|
# servers, requests, referrers etc. only :
|
||||||
#
|
#
|
||||||
# failregex = ^\s*\[[a-z]+\] \d+#\d+: \*\d+ limiting requests, excess: [\d\.]+ by zone "(?:%(ngx_limit_req_zones)s)", client: <HOST>, server: \S*, request: "\S+ \S+ HTTP/\d+\.\d+", host: "\S+"(, referrer: "\S+")?\s*$
|
# failregex = ^%(__prefix_line)slimiting requests, excess: [\d\.]+ by zone "(?:%(ngx_limit_req_zones)s)", client: <HOST>, server: \S*, request: "\S+ \S+ HTTP/\d+\.\d+", host: "\S+"(, referrer: "\S+")?\s*$
|
||||||
|
|
||||||
# Shortly, much faster and stable version of regexp:
|
# Shortly, much faster and stable version of regexp:
|
||||||
failregex = ^\s*\[[a-z]+\] \d+#\d+: \*\d+ limiting requests, excess: [\d\.]+ by zone "(?:%(ngx_limit_req_zones)s)", client: <HOST>,
|
failregex = ^%(__prefix_line)slimiting requests, excess: [\d\.]+ by zone "(?:%(ngx_limit_req_zones)s)", client: <HOST>,
|
||||||
|
|
||||||
ignoreregex =
|
ignoreregex =
|
||||||
|
|
||||||
|
|
|
@ -10,17 +10,17 @@ before = common.conf
|
||||||
|
|
||||||
[Definition]
|
[Definition]
|
||||||
|
|
||||||
_daemon = postfix(-\w+)?/\w+(?:/smtp[ds])?
|
_daemon = postfix(-\w+)?/[^/\[:\s]+(?:/smtp[ds])?
|
||||||
_port = (?::\d+)?
|
_port = (?::\d+)?
|
||||||
_pref = [A-Z]{4}
|
_pref = [A-Z]{4}
|
||||||
|
|
||||||
prefregex = ^%(__prefix_line)s<mdpr-<mode>> <F-CONTENT>.+</F-CONTENT>$
|
prefregex = ^%(__prefix_line)s<mdpr-<mode>> <F-CONTENT>.+</F-CONTENT>$
|
||||||
|
|
||||||
# Extended RE for normal mode to match reject by unknown users or undeliverable address, can be set to empty to avoid this:
|
# Extended RE for normal mode to match reject by unknown users or undeliverable address, can be set to empty to avoid this:
|
||||||
exre-user = |[Uu](?:ser unknown|ndeliverable address)
|
exre-user = |[Uu](?:ser unknown|ndeliverable address) ; pragma: codespell-ignore
|
||||||
|
|
||||||
mdpr-normal = (?:\w+: (?:milter-)?reject:|(?:improper command pipelining|too many errors) after \S+)
|
mdpr-normal = (?:\w+: (?:milter-)?reject:|(?:improper command pipelining|too many errors) after \S+)
|
||||||
mdre-normal=^%(_pref)s from [^[]*\[<HOST>\]%(_port)s: [45][50][04] [45]\.\d\.\d+ (?:(?:<[^>]*>)?: )?(?:(?:Helo command|(?:Sender|Recipient) address) rejected: )?(?:Service unavailable|(?:Client host|Command|Data command) rejected|Relay access denied|(?:Host|Domain) not found|need fully-qualified hostname|match%(exre-user)s)\b
|
mdre-normal=^%(_pref)s from [^[]*\[<HOST>\]%(_port)s: [45][50][04] [45]\.\d\.\d+ (?:(?:<[^>]*>)?: )?(?:(?:Helo command|(?:Sender|Recipient) address) rejected: )?(?:Service unavailable|Access denied|(?:Client host|Command|Data command) rejected|Relay access denied|Malformed DNS server reply|(?:Host|Domain) not found|need fully-qualified hostname|match%(exre-user)s)\b
|
||||||
^from [^[]*\[<HOST>\]%(_port)s:?
|
^from [^[]*\[<HOST>\]%(_port)s:?
|
||||||
|
|
||||||
mdpr-auth = warning:
|
mdpr-auth = warning:
|
||||||
|
@ -38,7 +38,7 @@ mdre-more = %(mdre-normal)s
|
||||||
|
|
||||||
# Includes some of the log messages described in
|
# Includes some of the log messages described in
|
||||||
# <http://www.postfix.org/POSTSCREEN_README.html>.
|
# <http://www.postfix.org/POSTSCREEN_README.html>.
|
||||||
mdpr-ddos = (?:lost connection after(?! DATA) [A-Z]+|disconnect(?= from \S+(?: \S+=\d+)* auth=0/(?:[1-9]|\d\d+))|(?:PREGREET \d+|HANGUP) after \S+|COMMAND (?:TIME|COUNT|LENGTH) LIMIT)
|
mdpr-ddos = (?:lost connection after (?!(?:DATA|AUTH)\b)[A-Z]+|disconnect(?= from \S+(?: \S+=\d+)* auth=0/(?:[1-9]|\d\d+))|(?:PREGREET \d+|HANGUP) after \S+|COMMAND (?:TIME|COUNT|LENGTH) LIMIT)
|
||||||
mdre-ddos = ^from [^[]*\[<HOST>\]%(_port)s:?
|
mdre-ddos = ^from [^[]*\[<HOST>\]%(_port)s:?
|
||||||
|
|
||||||
mdpr-extra = (?:%(mdpr-auth)s|%(mdpr-normal)s)
|
mdpr-extra = (?:%(mdpr-auth)s|%(mdpr-normal)s)
|
||||||
|
@ -76,6 +76,6 @@ ignoreregex =
|
||||||
|
|
||||||
[Init]
|
[Init]
|
||||||
|
|
||||||
journalmatch = _SYSTEMD_UNIT=postfix.service
|
journalmatch = _SYSTEMD_UNIT=postfix.service _SYSTEMD_UNIT=postfix@-.service
|
||||||
|
|
||||||
# Author: Cyril Jaquier
|
# Author: Cyril Jaquier
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
# common.local
|
# common.local
|
||||||
before = common.conf
|
before = common.conf
|
||||||
|
|
||||||
[Definition]
|
[DEFAULT]
|
||||||
|
|
||||||
_daemon = (?:fail2ban(?:-server|\.actions)\s*)
|
_daemon = (?:fail2ban(?:-server|\.actions)\s*)
|
||||||
|
|
||||||
|
@ -29,10 +29,23 @@ _jailname = recidive
|
||||||
|
|
||||||
failregex = ^%(__prefix_line)s(?:\s*fail2ban\.actions\s*%(__pid_re)s?:\s+)?NOTICE\s+\[(?!%(_jailname)s\])(?:.*)\]\s+Ban\s+<HOST>\s*$
|
failregex = ^%(__prefix_line)s(?:\s*fail2ban\.actions\s*%(__pid_re)s?:\s+)?NOTICE\s+\[(?!%(_jailname)s\])(?:.*)\]\s+Ban\s+<HOST>\s*$
|
||||||
|
|
||||||
|
[lt_short]
|
||||||
|
_daemon = (?:fail2ban(?:-server|\.actions)?\s*)
|
||||||
|
failregex = ^%(__prefix_line)s(?:\s*fail2ban(?:\.actions)?\s*%(__pid_re)s?:\s+)?(?:NOTICE\s+)?\[(?!%(_jailname)s\])(?:.*)\]\s+Ban\s+<HOST>\s*$
|
||||||
|
|
||||||
|
[lt_journal]
|
||||||
|
_daemon = <lt_short/_daemon>
|
||||||
|
failregex = <lt_short/failregex>
|
||||||
|
|
||||||
|
[Definition]
|
||||||
|
|
||||||
|
_daemon = <lt_<logtype>/_daemon>
|
||||||
|
failregex = <lt_<logtype>/failregex>
|
||||||
|
|
||||||
datepattern = ^{DATE}
|
datepattern = ^{DATE}
|
||||||
|
|
||||||
ignoreregex =
|
ignoreregex =
|
||||||
|
|
||||||
journalmatch = _SYSTEMD_UNIT=fail2ban.service PRIORITY=5
|
journalmatch = _SYSTEMD_UNIT=fail2ban.service
|
||||||
|
|
||||||
# Author: Tom Hendrikx, modifications by Amir Caspi
|
# Author: Tom Hendrikx, modifications by Amir Caspi
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
# Fail2Ban filter for failure attempts in MikroTik RouterOS
|
||||||
|
#
|
||||||
|
#
|
||||||
|
|
||||||
|
[Definition]
|
||||||
|
|
||||||
|
failregex = ^\s*\S+ system,error,critical login failure for user <F-USER>.*?</F-USER> from <ADDR> via \S+$
|
||||||
|
|
||||||
|
# Author: Vit Kabele <vit@kabele.me>
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
|
|
||||||
[Definition]
|
[Definition]
|
||||||
|
|
||||||
failregex = ^type=%(_type)s msg=audit\(:\d+\): (user )?pid=\d+ uid=%(_uid)s auid=%(_auid)s ses=\d+ subj=%(_subj)s msg='%(_msg)s'$
|
failregex = ^type=%(_type)s msg=audit\(:\d+\): (?:user )?pid=\d+ uid=%(_uid)s auid=%(_auid)s ses=\d+ subj=%(_subj)s msg='%(_msg)s'(?:\x1D|$)
|
||||||
|
|
||||||
ignoreregex =
|
ignoreregex =
|
||||||
|
|
||||||
|
|
|
@ -15,11 +15,13 @@ _subj = (?:unconfined_u|system_u):system_r:sshd_t:s0-s0:c0\.c1023
|
||||||
_exe =/usr/sbin/sshd
|
_exe =/usr/sbin/sshd
|
||||||
_terminal = ssh
|
_terminal = ssh
|
||||||
|
|
||||||
_msg = op=\S+ acct=(?P<_quote_acct>"?)\S+(?P=_quote_acct) exe="%(_exe)s" hostname=(\?|(\d+\.){3}\d+) addr=<HOST> terminal=%(_terminal)s res=failed
|
_anygrp = (?!acct=|exe=|addr=|terminal=|res=)\w+=(?:"[^"]+"|\S*)
|
||||||
|
|
||||||
|
_msg = (?:%(_anygrp)s )*acct=(?:"<F-USER>[^"]+</F-USER>"|<F-ALT_USER>\S+</F-ALT_USER>) exe="%(_exe)s" (?:%(_anygrp)s )*addr=<ADDR> terminal=%(_terminal)s res=failed
|
||||||
|
|
||||||
# DEV Notes:
|
# DEV Notes:
|
||||||
#
|
#
|
||||||
# Note: USER_LOGIN is ignored as this is the duplicate messsage
|
# Note: USER_LOGIN is ignored as this is the duplicate message
|
||||||
# ssh logs after 3 USER_AUTH failures.
|
# ssh logs after 3 USER_AUTH failures.
|
||||||
#
|
#
|
||||||
# Author: Daniel Black
|
# Author: Daniel Black
|
||||||
|
|
|
@ -13,13 +13,11 @@ before = common.conf
|
||||||
|
|
||||||
_daemon = slapd
|
_daemon = slapd
|
||||||
|
|
||||||
failregex = ^(?P<__prefix>%(__prefix_line)s)conn=(?P<_conn_>\d+) fd=\d+ ACCEPT from IP=<HOST>:\d{1,5} \(IP=\S+\)\s*<SKIPLINES>(?P=__prefix)conn=(?P=_conn_) op=\d+ RESULT(?:\s(?!err)\S+=\S*)* err=49 text=[\w\s]*$
|
prefregex = ^%(__prefix_line)sconn=<F-MLFID>\d+</F-MLFID>(?: (?:fd|op)=\d+){0,2} (?=ACCEPT|RESULT)<F-CONTENT>.+</F-CONTENT>$
|
||||||
|
|
||||||
|
failregex = ^<F-NOFAIL>ACCEPT</F-NOFAIL> from IP=<ADDR>:\d{1,5}\s+
|
||||||
|
^RESULT(?:\s(?!err)\S+=\S*)* err=49\b
|
||||||
|
|
||||||
ignoreregex =
|
ignoreregex =
|
||||||
|
|
||||||
[Init]
|
# Author: Andrii Melnyk, Sergey G. Brester
|
||||||
|
|
||||||
# "maxlines" is number of log lines to buffer for multi-line regex searches
|
|
||||||
maxlines = 20
|
|
||||||
|
|
||||||
# Author: Andrii Melnyk
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Fail2ban filter for SOGo authentcation
|
# Fail2ban filter for SOGo authentication
|
||||||
#
|
#
|
||||||
# Log file usually in /var/log/sogo/sogo.log
|
# Log file usually in /var/log/sogo/sogo.log
|
||||||
|
|
||||||
|
|
|
@ -24,8 +24,8 @@ __pref = (?:(?:error|fatal): (?:PAM: )?)?
|
||||||
#__suff = (?: port \d+)?(?: \[preauth\])?\s*
|
#__suff = (?: port \d+)?(?: \[preauth\])?\s*
|
||||||
__suff = (?: (?:port \d+|on \S+|\[preauth\])){0,3}\s*
|
__suff = (?: (?:port \d+|on \S+|\[preauth\])){0,3}\s*
|
||||||
__on_port_opt = (?: (?:port \d+|on \S+)){0,2}
|
__on_port_opt = (?: (?:port \d+|on \S+)){0,2}
|
||||||
# close by authenticating user:
|
# close by authenticating user (don't use <HOST> after %(__authng_user)s because of catch-all `.*?`):
|
||||||
__authng_user = (?: (?:invalid|authenticating) user <F-USER>\S+|.*?</F-USER>)?
|
__authng_user = (?: (?:by|from))?(?: (?:invalid|authenticating) user <F-USER>\S+|.*?</F-USER>)?(?: from)?
|
||||||
|
|
||||||
# for all possible (also future) forms of "no matching (cipher|mac|MAC|compression method|key exchange method|host key type) found",
|
# for all possible (also future) forms of "no matching (cipher|mac|MAC|compression method|key exchange method|host key type) found",
|
||||||
# see ssherr.c for all possible SSH_ERR_..._ALG_MATCH errors.
|
# see ssherr.c for all possible SSH_ERR_..._ALG_MATCH errors.
|
||||||
|
@ -38,21 +38,21 @@ __pam_auth = pam_[a-z]+
|
||||||
|
|
||||||
prefregex = ^<F-MLFID>%(__prefix_line)s</F-MLFID>%(__pref)s<F-CONTENT>.+</F-CONTENT>$
|
prefregex = ^<F-MLFID>%(__prefix_line)s</F-MLFID>%(__pref)s<F-CONTENT>.+</F-CONTENT>$
|
||||||
|
|
||||||
cmnfailre = ^[aA]uthentication (?:failure|error|failed) for <F-USER>.*</F-USER> from <HOST>( via \S+)?%(__suff)s$
|
cmnfailre = ^[aA]uthentication (?:failure|error|failed) for <F-USER>.*?</F-USER> (?:from )?<HOST>( via \S+)?%(__suff)s$
|
||||||
^User not known to the underlying authentication module for <F-USER>.*</F-USER> from <HOST>%(__suff)s$
|
^User not known to the underlying authentication module for <F-USER>.*?</F-USER> (?:from )?<HOST>%(__suff)s$
|
||||||
<cmnfailre-failed-pub-<publickey>>
|
<cmnfailre-failed-pub-<publickey>>
|
||||||
^Failed <cmnfailed> for (?P<cond_inv>invalid user )?<F-USER>(?P<cond_user>\S+)|(?(cond_inv)(?:(?! from ).)*?|[^:]+)</F-USER> from <HOST>%(__on_port_opt)s(?: ssh\d*)?(?(cond_user): |(?:(?:(?! from ).)*)$)
|
^Failed <cmnfailed> for (?P<cond_inv>invalid user )?<F-USER>(?P<cond_user>\S+)|(?(cond_inv)(?:(?! from ).)*?|[^:]+)</F-USER> from <HOST>%(__on_port_opt)s(?: ssh\d*)?(?(cond_user): |(?:(?:(?! from ).)*)$)
|
||||||
^<F-USER>ROOT</F-USER> LOGIN REFUSED FROM <HOST>
|
^<F-USER>ROOT</F-USER> LOGIN REFUSED FROM <HOST>
|
||||||
^[iI](?:llegal|nvalid) user <F-USER>.*?</F-USER> from <HOST>%(__suff)s$
|
^[iI](?:llegal|nvalid) user <F-USER>.*?</F-USER> (?:from )?<HOST>%(__suff)s$
|
||||||
^User <F-USER>\S+|.*?</F-USER> from <HOST> not allowed because not listed in AllowUsers%(__suff)s$
|
^User <F-USER>\S+|.*?</F-USER> (?:from )?<HOST> not allowed because not listed in AllowUsers%(__suff)s$
|
||||||
^User <F-USER>\S+|.*?</F-USER> from <HOST> not allowed because listed in DenyUsers%(__suff)s$
|
^User <F-USER>\S+|.*?</F-USER> (?:from )?<HOST> not allowed because listed in DenyUsers%(__suff)s$
|
||||||
^User <F-USER>\S+|.*?</F-USER> from <HOST> not allowed because not in any group%(__suff)s$
|
^User <F-USER>\S+|.*?</F-USER> (?:from )?<HOST> not allowed because not in any group%(__suff)s$
|
||||||
^refused connect from \S+ \(<HOST>\)
|
^refused connect from \S+ \(<HOST>\)
|
||||||
^Received <F-MLFFORGET>disconnect</F-MLFFORGET> from <HOST>%(__on_port_opt)s:\s*3: .*: Auth fail%(__suff)s$
|
^Received <F-MLFFORGET>disconnect</F-MLFFORGET> from <HOST>%(__on_port_opt)s:\s*3: .*: Auth fail%(__suff)s$
|
||||||
^User <F-USER>\S+|.*?</F-USER> from <HOST> not allowed because a group is listed in DenyGroups%(__suff)s$
|
^User <F-USER>\S+|.*?</F-USER> (?:from )?<HOST> not allowed because a group is listed in DenyGroups%(__suff)s$
|
||||||
^User <F-USER>\S+|.*?</F-USER> from <HOST> not allowed because none of user's groups are listed in AllowGroups%(__suff)s$
|
^User <F-USER>\S+|.*?</F-USER> (?:from )?<HOST> not allowed because none of user's groups are listed in AllowGroups%(__suff)s$
|
||||||
^<F-NOFAIL>%(__pam_auth)s\(sshd:auth\):\s+authentication failure;</F-NOFAIL>(?:\s+(?:(?:logname|e?uid|tty)=\S*)){0,4}\s+ruser=<F-ALT_USER>\S*</F-ALT_USER>\s+rhost=<HOST>(?:\s+user=<F-USER>\S*</F-USER>)?%(__suff)s$
|
^<F-NOFAIL>%(__pam_auth)s\(sshd:auth\):\s+authentication failure;</F-NOFAIL>(?:\s+(?:(?:logname|e?uid|tty)=\S*)){0,4}\s+ruser=<F-ALT_USER>\S*</F-ALT_USER>\s+rhost=<HOST>(?:\s+user=<F-USER>\S*</F-USER>)?%(__suff)s$
|
||||||
^maximum authentication attempts exceeded for <F-USER>.*</F-USER> from <HOST>%(__on_port_opt)s(?: ssh\d*)?%(__suff)s$
|
^maximum authentication attempts exceeded for (?:invalid user )?<F-USER>.*?</F-USER> (?:from )?<HOST>%(__on_port_opt)s(?: ssh\d*)?%(__suff)s$
|
||||||
^User <F-USER>\S+|.*?</F-USER> not allowed because account is locked%(__suff)s
|
^User <F-USER>\S+|.*?</F-USER> not allowed because account is locked%(__suff)s
|
||||||
^<F-MLFFORGET>Disconnecting</F-MLFFORGET>(?: from)?(?: (?:invalid|authenticating)) user <F-USER>\S+</F-USER> <HOST>%(__on_port_opt)s:\s*Change of username or service not allowed:\s*.*\[preauth\]\s*$
|
^<F-MLFFORGET>Disconnecting</F-MLFFORGET>(?: from)?(?: (?:invalid|authenticating)) user <F-USER>\S+</F-USER> <HOST>%(__on_port_opt)s:\s*Change of username or service not allowed:\s*.*\[preauth\]\s*$
|
||||||
^Disconnecting: Too many authentication failures(?: for <F-USER>\S+|.*?</F-USER>)?%(__suff)s$
|
^Disconnecting: Too many authentication failures(?: for <F-USER>\S+|.*?</F-USER>)?%(__suff)s$
|
||||||
|
@ -68,24 +68,25 @@ cmnfailed = <cmnfailed-<publickey>>
|
||||||
|
|
||||||
mdre-normal =
|
mdre-normal =
|
||||||
# used to differentiate "connection closed" with and without `[preauth]` (fail/nofail cases in ddos mode)
|
# used to differentiate "connection closed" with and without `[preauth]` (fail/nofail cases in ddos mode)
|
||||||
mdre-normal-other = ^<F-NOFAIL><F-MLFFORGET>(Connection (?:closed|reset)|Disconnected)</F-MLFFORGET></F-NOFAIL> (?:by|from)%(__authng_user)s <HOST>(?:%(__suff)s|\s*)$
|
mdre-normal-other = ^<F-NOFAIL><F-MLFFORGET>(?:Connection (?:closed|reset)|Disconnect(?:ed|ing))</F-MLFFORGET></F-NOFAIL>%(__authng_user)s <ADDR>%(__on_port_opt)s(?:: (?!Too many authentication failures)[^\[]+)?(?: \[preauth\])?\s*$
|
||||||
|
|
||||||
mdre-ddos = ^Did not receive identification string from <HOST>
|
mdre-ddos = ^(?:Did not receive identification string from|Timeout before authentication for) <HOST>
|
||||||
^kex_exchange_identification: (?:read: )?(?:[Cc]lient sent invalid protocol identifier|[Cc]onnection (?:closed by remote host|reset by peer))
|
^kex_exchange_identification: (?:read: )?(?:[Cc]lient sent invalid protocol identifier|[Cc]onnection (?:closed by remote host|reset by peer))
|
||||||
^Bad protocol version identification '.*' from <HOST>
|
^Bad protocol version identification '(?:[^']|.*?)' (?:from )?<HOST>%(__suff)s$
|
||||||
^<F-NOFAIL>SSH: Server;Ltype:</F-NOFAIL> (?:Authname|Version|Kex);Remote: <HOST>-\d+;[A-Z]\w+:
|
^<F-NOFAIL>SSH: Server;Ltype:</F-NOFAIL> (?:Authname|Version|Kex);Remote: <HOST>-\d+;[A-Z]\w+:
|
||||||
^Read from socket failed: Connection <F-MLFFORGET>reset</F-MLFFORGET> by peer
|
^Read from socket failed: Connection <F-MLFFORGET>reset</F-MLFFORGET> by peer
|
||||||
^banner exchange: Connection from <HOST><__on_port_opt>: invalid format
|
^(?:banner exchange|ssh_dispatch_run_fatal): Connection from <HOST><__on_port_opt>: (?:invalid format|(?:message authentication code incorrect|[Cc]onnection corrupted) \[preauth\])
|
||||||
|
|
||||||
# same as mdre-normal-other, but as failure (without <F-NOFAIL> with [preauth] and with <F-NOFAIL> on no preauth phase as helper to identify address):
|
# same as mdre-normal-other, but as failure (without <F-NOFAIL> with [preauth] and with <F-NOFAIL> on no preauth phase as helper to identify address):
|
||||||
mdre-ddos-other = ^<F-MLFFORGET>(Connection (?:closed|reset)|Disconnected)</F-MLFFORGET> (?:by|from)%(__authng_user)s <HOST>%(__on_port_opt)s\s+\[preauth\]\s*$
|
mdre-ddos-other = ^<F-MLFFORGET>(?:Connection (?:closed|reset)|Disconnect(?:ed|ing))</F-MLFFORGET>%(__authng_user)s <ADDR>%(__on_port_opt)s(?:: (?!Too many authentication failures)[^\[]+)?\s+\[preauth\]\s*$
|
||||||
^<F-NOFAIL><F-MLFFORGET>(Connection (?:closed|reset)|Disconnected)</F-MLFFORGET></F-NOFAIL> (?:by|from)%(__authng_user)s <HOST>(?:%(__on_port_opt)s|\s*)$
|
^<F-NOFAIL><F-MLFFORGET>(?:Connection (?:closed|reset)|Disconnect(?:ed|ing))</F-MLFFORGET></F-NOFAIL>%(__authng_user)s <ADDR>(?:%(__on_port_opt)s(?:: (?!Too many authentication failures)[^\[]+)?|\s*)$
|
||||||
|
|
||||||
mdre-extra = ^Received <F-MLFFORGET>disconnect</F-MLFFORGET> from <HOST>%(__on_port_opt)s:\s*14: No(?: supported)? authentication methods available
|
mdre-extra = ^Received <F-MLFFORGET>disconnect</F-MLFFORGET> from <HOST>%(__on_port_opt)s:\s*14: No(?: supported)? authentication methods available
|
||||||
^Unable to negotiate with <HOST>%(__on_port_opt)s: no matching <__alg_match> found.
|
^Unable to negotiate with <HOST>%(__on_port_opt)s: no matching <__alg_match> found.
|
||||||
^Unable to negotiate a <__alg_match>
|
^Unable to negotiate a <__alg_match>
|
||||||
^no matching <__alg_match> found:
|
^no matching <__alg_match> found:
|
||||||
# part of mdre-ddos-other, but user name is supplied (invalid/authenticating) on [preauth] phase only:
|
# part of mdre-ddos-other, but user name is supplied (invalid/authenticating) on [preauth] phase only:
|
||||||
mdre-extra-other = ^<F-MLFFORGET>Disconnected</F-MLFFORGET>(?: from)?(?: (?:invalid|authenticating)) user <F-USER>\S+|.*?</F-USER> <HOST>%(__on_port_opt)s \[preauth\]\s*$
|
mdre-extra-other = ^<F-MLFFORGET>Disconnected</F-MLFFORGET>(?: from)?(?: (?:invalid|authenticating)) user <F-USER>\S+|.*?</F-USER> (?:from )?<HOST>%(__on_port_opt)s \[preauth\]\s*$
|
||||||
|
|
||||||
mdre-aggressive = %(mdre-ddos)s
|
mdre-aggressive = %(mdre-ddos)s
|
||||||
%(mdre-extra)s
|
%(mdre-extra)s
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
#
|
#
|
||||||
# To use 'traefik-auth' filter you have to configure your Traefik instance to write
|
# To use 'traefik-auth' filter you have to configure your Traefik instance to write
|
||||||
# the access logs as describe in https://docs.traefik.io/configuration/logs/#access-logs
|
# the access logs as describe in https://docs.traefik.io/configuration/logs/#access-logs
|
||||||
# into a log file on host and specifiy users for Basic Authentication
|
# into a log file on host and specify users for Basic Authentication
|
||||||
# https://docs.traefik.io/configuration/entrypoints/#basic-authentication
|
# https://docs.traefik.io/configuration/entrypoints/#basic-authentication
|
||||||
#
|
#
|
||||||
# Example:
|
# Example:
|
||||||
|
@ -51,7 +51,7 @@
|
||||||
|
|
||||||
[Definition]
|
[Definition]
|
||||||
|
|
||||||
# Parameter "method" can be used to specifiy request method
|
# Parameter "method" can be used to specify request method
|
||||||
req-method = \S+
|
req-method = \S+
|
||||||
# Usage example (for jail.local):
|
# Usage example (for jail.local):
|
||||||
# filter = traefik-auth[req-method="GET|POST|HEAD"]
|
# filter = traefik-auth[req-method="GET|POST|HEAD"]
|
||||||
|
|
|
@ -97,7 +97,9 @@ before = paths-debian.conf
|
||||||
# ignorecommand = /path/to/command <ip>
|
# ignorecommand = /path/to/command <ip>
|
||||||
ignorecommand =
|
ignorecommand =
|
||||||
|
|
||||||
# "bantime" is the number of seconds that a host is banned.
|
# "bantime" is the amount of time that a host is banned, integer in seconds or
|
||||||
|
# time abbreviation format (m - minutes, h - hours, d - days, w - weeks, mo - months, y - years).
|
||||||
|
# This is to consider as an initial time if bantime.increment gets enabled.
|
||||||
bantime = 10m
|
bantime = 10m
|
||||||
|
|
||||||
# A host is banned if it has generated "maxretry" during the last "findtime"
|
# A host is banned if it has generated "maxretry" during the last "findtime"
|
||||||
|
@ -111,19 +113,17 @@ maxretry = 5
|
||||||
maxmatches = %(maxretry)s
|
maxmatches = %(maxretry)s
|
||||||
|
|
||||||
# "backend" specifies the backend used to get files modification.
|
# "backend" specifies the backend used to get files modification.
|
||||||
# Available options are "pyinotify", "gamin", "polling", "systemd" and "auto".
|
# Available options are "pyinotify", "polling", "systemd" and "auto".
|
||||||
# This option can be overridden in each jail as well.
|
# This option can be overridden in each jail as well.
|
||||||
#
|
#
|
||||||
# pyinotify: requires pyinotify (a file alteration monitor) to be installed.
|
# pyinotify: requires pyinotify (a file alteration monitor) to be installed.
|
||||||
# If pyinotify is not installed, Fail2ban will use auto.
|
# If pyinotify is not installed, Fail2ban will use auto.
|
||||||
# gamin: requires Gamin (a file alteration monitor) to be installed.
|
|
||||||
# If Gamin is not installed, Fail2ban will use auto.
|
|
||||||
# polling: uses a polling algorithm which does not require external libraries.
|
# polling: uses a polling algorithm which does not require external libraries.
|
||||||
# systemd: uses systemd python library to access the systemd journal.
|
# systemd: uses systemd python library to access the systemd journal.
|
||||||
# Specifying "logpath" is not valid for this backend.
|
# Specifying "logpath" is not valid for this backend.
|
||||||
# See "journalmatch" in the jails associated filter config
|
# See "journalmatch" in the jails associated filter config
|
||||||
# auto: will try to use the following backends, in order:
|
# auto: will try to use the following backends, in order:
|
||||||
# pyinotify, gamin, polling.
|
# pyinotify, polling.
|
||||||
#
|
#
|
||||||
# Note: if systemd backend is chosen as the default but you enable a jail
|
# Note: if systemd backend is chosen as the default but you enable a jail
|
||||||
# for which logs are present only in its own log files, specify some other
|
# for which logs are present only in its own log files, specify some other
|
||||||
|
@ -395,6 +395,10 @@ logpath = %(nginx_error_log)s
|
||||||
port = http,https
|
port = http,https
|
||||||
logpath = %(nginx_access_log)s
|
logpath = %(nginx_access_log)s
|
||||||
|
|
||||||
|
[nginx-forbidden]
|
||||||
|
port = http,https
|
||||||
|
logpath = %(nginx_error_log)s
|
||||||
|
|
||||||
# Ban attackers that try to use PHP's URL-fopen() functionality
|
# Ban attackers that try to use PHP's URL-fopen() functionality
|
||||||
# through GET/POST variables. - Experimental, with more than a year
|
# through GET/POST variables. - Experimental, with more than a year
|
||||||
# of usage in production environments.
|
# of usage in production environments.
|
||||||
|
@ -958,6 +962,9 @@ port = http,https
|
||||||
logpath = %(syslog_authpriv)s
|
logpath = %(syslog_authpriv)s
|
||||||
backend = %(syslog_backend)s
|
backend = %(syslog_backend)s
|
||||||
|
|
||||||
|
[routeros-auth]
|
||||||
|
port = ssh,http,https
|
||||||
|
logpath = /var/log/MikroTik/router.log
|
||||||
|
|
||||||
[zoneminder]
|
[zoneminder]
|
||||||
# Zoneminder HTTP/HTTPS web interface auth
|
# Zoneminder HTTP/HTTPS web interface auth
|
||||||
|
@ -978,3 +985,8 @@ banaction = %(banaction_allports)s
|
||||||
[monitorix]
|
[monitorix]
|
||||||
port = 8080
|
port = 8080
|
||||||
logpath = /var/log/monitorix-httpd
|
logpath = /var/log/monitorix-httpd
|
||||||
|
|
||||||
|
[dante]
|
||||||
|
port = 1080
|
||||||
|
logpath = %(syslog_daemon)s
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ proftpd_backend = %(default_backend)s
|
||||||
pureftpd_log = %(syslog_ftp)s
|
pureftpd_log = %(syslog_ftp)s
|
||||||
pureftpd_backend = %(default_backend)s
|
pureftpd_backend = %(default_backend)s
|
||||||
|
|
||||||
# ftp, daemon and then local7 are tried at configure time however it is overwriteable at configure time
|
# ftp, daemon and then local7 are tried at configure time however it is overwritable at configure time
|
||||||
#
|
#
|
||||||
wuftpd_log = %(syslog_ftp)s
|
wuftpd_log = %(syslog_ftp)s
|
||||||
wuftpd_backend = %(default_backend)s
|
wuftpd_backend = %(default_backend)s
|
||||||
|
|
|
@ -1,7 +0,0 @@
|
||||||
fail2ban.server.filtergamin module
|
|
||||||
==================================
|
|
||||||
|
|
||||||
.. automodule:: fail2ban.server.filtergamin
|
|
||||||
:members:
|
|
||||||
:undoc-members:
|
|
||||||
:show-inheritance:
|
|
|
@ -13,7 +13,6 @@ fail2ban.server package
|
||||||
fail2ban.server.failmanager
|
fail2ban.server.failmanager
|
||||||
fail2ban.server.failregex
|
fail2ban.server.failregex
|
||||||
fail2ban.server.filter
|
fail2ban.server.filter
|
||||||
fail2ban.server.filtergamin
|
|
||||||
fail2ban.server.filterpoll
|
fail2ban.server.filterpoll
|
||||||
fail2ban.server.filterpyinotify
|
fail2ban.server.filterpyinotify
|
||||||
fail2ban.server.filtersystemd
|
fail2ban.server.filtersystemd
|
||||||
|
|
|
@ -1,14 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
# This script carries out conversion of fail2ban to python3
|
|
||||||
# A backup of any converted files are created with ".bak"
|
|
||||||
# extension
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
if 2to3 -w --no-diffs bin/* fail2ban;then
|
|
||||||
echo "Success!" >&2
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo "Fail!" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
|
@ -89,11 +89,11 @@ class ActionReader(DefinitionInitConfigReader):
|
||||||
stream = list()
|
stream = list()
|
||||||
stream.append(head + ["addaction", self._name])
|
stream.append(head + ["addaction", self._name])
|
||||||
multi = []
|
multi = []
|
||||||
for opt, optval in opts.iteritems():
|
for opt, optval in opts.items():
|
||||||
if opt in self._configOpts and not opt.startswith('known/'):
|
if opt in self._configOpts and not opt.startswith('known/'):
|
||||||
multi.append([opt, optval])
|
multi.append([opt, optval])
|
||||||
if self._initOpts:
|
if self._initOpts:
|
||||||
for opt, optval in self._initOpts.iteritems():
|
for opt, optval in self._initOpts.items():
|
||||||
if opt not in self._configOpts and not opt.startswith('known/'):
|
if opt not in self._configOpts and not opt.startswith('known/'):
|
||||||
multi.append([opt, optval])
|
multi.append([opt, optval])
|
||||||
if len(multi) > 1:
|
if len(multi) > 1:
|
||||||
|
|
|
@ -71,24 +71,78 @@ class Beautifier:
|
||||||
elif inC[0] == "echo":
|
elif inC[0] == "echo":
|
||||||
msg = ' '.join(msg)
|
msg = ' '.join(msg)
|
||||||
elif inC[0:1] == ['status']:
|
elif inC[0:1] == ['status']:
|
||||||
if len(inC) > 1:
|
def jail_stat(response, pref=""):
|
||||||
# Display information
|
# Display jail information
|
||||||
msg = ["Status for the jail: %s" % inC[1]]
|
|
||||||
for n, res1 in enumerate(response):
|
for n, res1 in enumerate(response):
|
||||||
prefix1 = "`-" if n == len(response) - 1 else "|-"
|
prefix1 = pref + ("`-" if n == len(response) - 1 else "|-")
|
||||||
msg.append("%s %s" % (prefix1, res1[0]))
|
msg.append("%s %s" % (prefix1, res1[0]))
|
||||||
prefix1 = " " if n == len(response) - 1 else "| "
|
prefix1 = pref + (" " if n == len(response) - 1 else "| ")
|
||||||
for m, res2 in enumerate(res1[1]):
|
for m, res2 in enumerate(res1[1]):
|
||||||
prefix2 = prefix1 + ("`-" if m == len(res1[1]) - 1 else "|-")
|
prefix2 = prefix1 + ("`-" if m == len(res1[1]) - 1 else "|-")
|
||||||
val = " ".join(map(str, res2[1])) if isinstance(res2[1], list) else res2[1]
|
val = " ".join(map(str, res2[1])) if isinstance(res2[1], list) else res2[1]
|
||||||
msg.append("%s %s:\t%s" % (prefix2, res2[0], val))
|
msg.append("%s %s:\t%s" % (prefix2, res2[0], val))
|
||||||
|
if len(inC) > 1 and inC[1] != "--all":
|
||||||
|
msg = ["Status for the jail: %s" % inC[1]]
|
||||||
|
jail_stat(response)
|
||||||
else:
|
else:
|
||||||
|
jstat = None
|
||||||
|
if len(inC) > 1: # --all
|
||||||
|
jstat = response[-1]
|
||||||
|
response = response[:-1]
|
||||||
msg = ["Status"]
|
msg = ["Status"]
|
||||||
for n, res1 in enumerate(response):
|
for n, res1 in enumerate(response):
|
||||||
prefix1 = "`-" if n == len(response) - 1 else "|-"
|
prefix1 = "`-" if not jstat and n == len(response) - 1 else "|-"
|
||||||
val = " ".join(map(str, res1[1])) if isinstance(res1[1], list) else res1[1]
|
val = " ".join(map(str, res1[1])) if isinstance(res1[1], list) else res1[1]
|
||||||
msg.append("%s %s:\t%s" % (prefix1, res1[0], val))
|
msg.append("%s %s:\t%s" % (prefix1, res1[0], val))
|
||||||
|
if jstat:
|
||||||
|
msg.append("`- Status for the jails:")
|
||||||
|
i = 0
|
||||||
|
for n, j in jstat.items():
|
||||||
|
i += 1
|
||||||
|
prefix1 = "`-" if i == len(jstat) else "|-"
|
||||||
|
msg.append(" %s Jail: %s" % (prefix1, n))
|
||||||
|
jail_stat(j, " " if i == len(jstat) else " | ")
|
||||||
msg = "\n".join(msg)
|
msg = "\n".join(msg)
|
||||||
|
elif inC[0:1] == ['stats'] or inC[0:1] == ['statistics']:
|
||||||
|
def _statstable(response):
|
||||||
|
tophead = ["Jail", "Backend", "Filter", "Actions"]
|
||||||
|
headers = ["", "", "cur", "tot", "cur", "tot"]
|
||||||
|
minlens = [8, 8, 3, 3, 3, 3]
|
||||||
|
ralign = [0, 0, 1, 1, 1, 1]
|
||||||
|
rows = [[n, r[0], *r[1], *r[2]] for n, r in response.items()]
|
||||||
|
lens = []
|
||||||
|
for i in range(len(rows[0])):
|
||||||
|
col = (len(str(s[i])) for s in rows)
|
||||||
|
lens.append(max(minlens[i], max(col)))
|
||||||
|
rfmt = []
|
||||||
|
hfmt = []
|
||||||
|
for i in range(len(rows[0])):
|
||||||
|
f = "%%%ds" if ralign[i] else "%%-%ds"
|
||||||
|
rfmt.append(f % lens[i])
|
||||||
|
hfmt.append(f % lens[i])
|
||||||
|
rfmt = [rfmt[0], rfmt[1], "%s \u2502 %s" % (rfmt[2], rfmt[3]), "%s \u2502 %s" % (rfmt[4], rfmt[5])]
|
||||||
|
hfmt = [hfmt[0], hfmt[1], "%s \u2502 %s" % (hfmt[2], hfmt[3]), "%s \u2502 %s" % (hfmt[4], hfmt[5])]
|
||||||
|
tlens = [lens[0], lens[1], 3 + lens[2] + lens[3], 3 + lens[4] + lens[5]]
|
||||||
|
tfmt = [hfmt[0], hfmt[1], "%%-%ds" % (tlens[2],), "%%-%ds" % (tlens[3],)]
|
||||||
|
tsep = tfmt[0:2]
|
||||||
|
rfmt = " \u2551 ".join(rfmt)
|
||||||
|
hfmt = " \u2551 ".join(hfmt)
|
||||||
|
tfmt = " \u2551 ".join(tfmt)
|
||||||
|
tsep = " \u2551 ".join(tsep)
|
||||||
|
separator = ((tsep % tuple(tophead[0:2])) + " \u255F\u2500" +
|
||||||
|
("\u2500\u256B\u2500".join(['\u2500' * n for n in tlens[2:]])) + '\u2500')
|
||||||
|
ret = []
|
||||||
|
ret.append(tfmt % tuple(["", ""]+tophead[2:]))
|
||||||
|
ret.append(separator)
|
||||||
|
ret.append(hfmt % tuple(headers))
|
||||||
|
separator = "\u2550\u256C\u2550".join(['\u2550' * n for n in tlens]) + '\u2550'
|
||||||
|
ret.append(separator)
|
||||||
|
for row in rows:
|
||||||
|
ret.append(rfmt % tuple(row))
|
||||||
|
separator = "\u2550\u2569\u2550".join(['\u2550' * n for n in tlens]) + '\u2550'
|
||||||
|
ret.append(separator)
|
||||||
|
return ret
|
||||||
|
msg = "\n".join(_statstable(response))
|
||||||
elif len(inC) < 2:
|
elif len(inC) < 2:
|
||||||
pass # to few cmd args for below
|
pass # to few cmd args for below
|
||||||
elif inC[1] == "syslogsocket":
|
elif inC[1] == "syslogsocket":
|
||||||
|
|
|
@ -29,15 +29,13 @@ import re
|
||||||
import sys
|
import sys
|
||||||
from ..helpers import getLogger
|
from ..helpers import getLogger
|
||||||
|
|
||||||
if sys.version_info >= (3,): # pragma: 2.x no cover
|
# SafeConfigParser deprecated from Python 3.2 (renamed to ConfigParser)
|
||||||
|
from configparser import ConfigParser as SafeConfigParser, BasicInterpolation, \
|
||||||
# SafeConfigParser deprecated from Python 3.2 (renamed to ConfigParser)
|
|
||||||
from configparser import ConfigParser as SafeConfigParser, BasicInterpolation, \
|
|
||||||
InterpolationMissingOptionError, NoOptionError, NoSectionError
|
InterpolationMissingOptionError, NoOptionError, NoSectionError
|
||||||
|
|
||||||
# And interpolation of __name__ was simply removed, thus we need to
|
# And interpolation of __name__ was simply removed, thus we need to
|
||||||
# decorate default interpolator to handle it
|
# decorate default interpolator to handle it
|
||||||
class BasicInterpolationWithName(BasicInterpolation):
|
class BasicInterpolationWithName(BasicInterpolation):
|
||||||
"""Decorator to bring __name__ interpolation back.
|
"""Decorator to bring __name__ interpolation back.
|
||||||
|
|
||||||
Original handling of __name__ was removed because of
|
Original handling of __name__ was removed because of
|
||||||
|
@ -61,17 +59,6 @@ if sys.version_info >= (3,): # pragma: 2.x no cover
|
||||||
return super(BasicInterpolationWithName, self)._interpolate_some(
|
return super(BasicInterpolationWithName, self)._interpolate_some(
|
||||||
parser, option, accum, rest, section, map, *args, **kwargs)
|
parser, option, accum, rest, section, map, *args, **kwargs)
|
||||||
|
|
||||||
else: # pragma: 3.x no cover
|
|
||||||
from ConfigParser import SafeConfigParser, \
|
|
||||||
InterpolationMissingOptionError, NoOptionError, NoSectionError
|
|
||||||
|
|
||||||
# Interpolate missing known/option as option from default section
|
|
||||||
SafeConfigParser._cp_interpolate_some = SafeConfigParser._interpolate_some
|
|
||||||
def _interpolate_some(self, option, accum, rest, section, map, *args, **kwargs):
|
|
||||||
# try to wrap section options like %(section/option)s:
|
|
||||||
self._map_section_options(section, option, rest, map)
|
|
||||||
return self._cp_interpolate_some(option, accum, rest, section, map, *args, **kwargs)
|
|
||||||
SafeConfigParser._interpolate_some = _interpolate_some
|
|
||||||
|
|
||||||
def _expandConfFilesWithLocal(filenames):
|
def _expandConfFilesWithLocal(filenames):
|
||||||
"""Expands config files with local extension.
|
"""Expands config files with local extension.
|
||||||
|
@ -129,7 +116,6 @@ after = 1.conf
|
||||||
|
|
||||||
CONDITIONAL_RE = re.compile(r"^(\w+)(\?.+)$")
|
CONDITIONAL_RE = re.compile(r"^(\w+)(\?.+)$")
|
||||||
|
|
||||||
if sys.version_info >= (3,2):
|
|
||||||
# overload constructor only for fancy new Python3's
|
# overload constructor only for fancy new Python3's
|
||||||
def __init__(self, share_config=None, *args, **kwargs):
|
def __init__(self, share_config=None, *args, **kwargs):
|
||||||
kwargs = kwargs.copy()
|
kwargs = kwargs.copy()
|
||||||
|
@ -139,11 +125,6 @@ after = 1.conf
|
||||||
*args, **kwargs)
|
*args, **kwargs)
|
||||||
self._cfg_share = share_config
|
self._cfg_share = share_config
|
||||||
|
|
||||||
else:
|
|
||||||
def __init__(self, share_config=None, *args, **kwargs):
|
|
||||||
SafeConfigParser.__init__(self, *args, **kwargs)
|
|
||||||
self._cfg_share = share_config
|
|
||||||
|
|
||||||
def get_ex(self, section, option, raw=False, vars={}):
|
def get_ex(self, section, option, raw=False, vars={}):
|
||||||
"""Get an option value for a given section.
|
"""Get an option value for a given section.
|
||||||
|
|
||||||
|
@ -327,7 +308,7 @@ after = 1.conf
|
||||||
# mix it with defaults:
|
# mix it with defaults:
|
||||||
return set(opts.keys()) | set(self._defaults)
|
return set(opts.keys()) | set(self._defaults)
|
||||||
# only own option names:
|
# only own option names:
|
||||||
return opts.keys()
|
return list(opts.keys())
|
||||||
|
|
||||||
def read(self, filenames, get_includes=True):
|
def read(self, filenames, get_includes=True):
|
||||||
if not isinstance(filenames, list):
|
if not isinstance(filenames, list):
|
||||||
|
@ -356,7 +337,7 @@ after = 1.conf
|
||||||
ret += i
|
ret += i
|
||||||
# merge defaults and all sections to self:
|
# merge defaults and all sections to self:
|
||||||
alld.update(cfg.get_defaults())
|
alld.update(cfg.get_defaults())
|
||||||
for n, s in cfg.get_sections().iteritems():
|
for n, s in cfg.get_sections().items():
|
||||||
# conditional sections
|
# conditional sections
|
||||||
cond = SafeConfigParserWithIncludes.CONDITIONAL_RE.match(n)
|
cond = SafeConfigParserWithIncludes.CONDITIONAL_RE.match(n)
|
||||||
if cond:
|
if cond:
|
||||||
|
@ -366,14 +347,14 @@ after = 1.conf
|
||||||
del(s['__name__'])
|
del(s['__name__'])
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
for k in s.keys():
|
for k in list(s.keys()):
|
||||||
v = s.pop(k)
|
v = s.pop(k)
|
||||||
s[k + cond] = v
|
s[k + cond] = v
|
||||||
s2 = alls.get(n)
|
s2 = alls.get(n)
|
||||||
if isinstance(s2, dict):
|
if isinstance(s2, dict):
|
||||||
# save previous known values, for possible using in local interpolations later:
|
# save previous known values, for possible using in local interpolations later:
|
||||||
self.merge_section('KNOWN/'+n,
|
self.merge_section('KNOWN/'+n,
|
||||||
dict(filter(lambda i: i[0] in s, s2.iteritems())), '')
|
dict([i for i in iter(s2.items()) if i[0] in s]), '')
|
||||||
# merge section
|
# merge section
|
||||||
s2.update(s)
|
s2.update(s)
|
||||||
else:
|
else:
|
||||||
|
@ -385,10 +366,7 @@ after = 1.conf
|
||||||
if logSys.getEffectiveLevel() <= logLevel:
|
if logSys.getEffectiveLevel() <= logLevel:
|
||||||
logSys.log(logLevel, " Reading file: %s", fileNamesFull[0])
|
logSys.log(logLevel, " Reading file: %s", fileNamesFull[0])
|
||||||
# read file(s) :
|
# read file(s) :
|
||||||
if sys.version_info >= (3,2): # pragma: no cover
|
|
||||||
return SafeConfigParser.read(self, fileNamesFull, encoding='utf-8')
|
return SafeConfigParser.read(self, fileNamesFull, encoding='utf-8')
|
||||||
else:
|
|
||||||
return SafeConfigParser.read(self, fileNamesFull)
|
|
||||||
|
|
||||||
def merge_section(self, section, options, pref=None):
|
def merge_section(self, section, options, pref=None):
|
||||||
alls = self.get_sections()
|
alls = self.get_sections()
|
||||||
|
@ -400,7 +378,7 @@ after = 1.conf
|
||||||
sec.update(options)
|
sec.update(options)
|
||||||
return
|
return
|
||||||
sk = {}
|
sk = {}
|
||||||
for k, v in options.iteritems():
|
for k, v in options.items():
|
||||||
if not k.startswith(pref) and k != '__name__':
|
if not k.startswith(pref) and k != '__name__':
|
||||||
sk[pref+k] = v
|
sk[pref+k] = v
|
||||||
sec.update(sk)
|
sec.update(sk)
|
||||||
|
|
|
@ -26,7 +26,7 @@ __license__ = "GPL"
|
||||||
|
|
||||||
import glob
|
import glob
|
||||||
import os
|
import os
|
||||||
from ConfigParser import NoOptionError, NoSectionError
|
from configparser import NoOptionError, NoSectionError
|
||||||
|
|
||||||
from .configparserinc import sys, SafeConfigParserWithIncludes, logLevel
|
from .configparserinc import sys, SafeConfigParserWithIncludes, logLevel
|
||||||
from ..helpers import getLogger, _as_bool, _merge_dicts, substituteRecursiveTags
|
from ..helpers import getLogger, _as_bool, _merge_dicts, substituteRecursiveTags
|
||||||
|
@ -98,7 +98,7 @@ class ConfigReader():
|
||||||
def read(self, name, once=True):
|
def read(self, name, once=True):
|
||||||
""" Overloads a default (not shared) read of config reader.
|
""" Overloads a default (not shared) read of config reader.
|
||||||
|
|
||||||
To prevent mutiple reads of config files with it includes, reads into
|
To prevent multiple reads of config files with it includes, reads into
|
||||||
the config reader, if it was not yet cached/shared by 'name'.
|
the config reader, if it was not yet cached/shared by 'name'.
|
||||||
"""
|
"""
|
||||||
# already shared ?
|
# already shared ?
|
||||||
|
@ -183,7 +183,7 @@ class ConfigReader():
|
||||||
class ConfigReaderUnshared(SafeConfigParserWithIncludes):
|
class ConfigReaderUnshared(SafeConfigParserWithIncludes):
|
||||||
"""Unshared config reader (previously ConfigReader).
|
"""Unshared config reader (previously ConfigReader).
|
||||||
|
|
||||||
Do not use this class (internal not shared/cached represenation).
|
Do not use this class (internal not shared/cached representation).
|
||||||
Use ConfigReader instead.
|
Use ConfigReader instead.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -221,7 +221,7 @@ class ConfigReaderUnshared(SafeConfigParserWithIncludes):
|
||||||
config_files += sorted(glob.glob('%s/*.local' % config_dir))
|
config_files += sorted(glob.glob('%s/*.local' % config_dir))
|
||||||
|
|
||||||
# choose only existing ones
|
# choose only existing ones
|
||||||
config_files = filter(os.path.exists, config_files)
|
config_files = list(filter(os.path.exists, config_files))
|
||||||
|
|
||||||
if len(config_files):
|
if len(config_files):
|
||||||
# at least one config exists and accessible
|
# at least one config exists and accessible
|
||||||
|
@ -277,7 +277,7 @@ class ConfigReaderUnshared(SafeConfigParserWithIncludes):
|
||||||
# TODO: validate error handling here.
|
# TODO: validate error handling here.
|
||||||
except NoOptionError:
|
except NoOptionError:
|
||||||
if not optvalue is None:
|
if not optvalue is None:
|
||||||
logSys.warning("'%s' not defined in '%s'. Using default one: %r"
|
logSys.debug("'%s' not defined in '%s'. Using default one: %r"
|
||||||
% (optname, sec, optvalue))
|
% (optname, sec, optvalue))
|
||||||
values[optname] = optvalue
|
values[optname] = optvalue
|
||||||
# elif logSys.getEffectiveLevel() <= logLevel:
|
# elif logSys.getEffectiveLevel() <= logLevel:
|
||||||
|
|
|
@ -47,7 +47,7 @@ class CSocket:
|
||||||
|
|
||||||
def send(self, msg, nonblocking=False, timeout=None):
|
def send(self, msg, nonblocking=False, timeout=None):
|
||||||
# Convert every list member to string
|
# Convert every list member to string
|
||||||
obj = dumps(map(CSocket.convert, msg), HIGHEST_PROTOCOL)
|
obj = dumps(list(map(CSocket.convert, msg)), HIGHEST_PROTOCOL)
|
||||||
self.__csock.send(obj)
|
self.__csock.send(obj)
|
||||||
self.__csock.send(CSPROTO.END)
|
self.__csock.send(CSPROTO.END)
|
||||||
return self.receive(self.__csock, nonblocking, timeout)
|
return self.receive(self.__csock, nonblocking, timeout)
|
||||||
|
@ -72,7 +72,7 @@ class CSocket:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def convert(m):
|
def convert(m):
|
||||||
"""Convert every "unexpected" member of message to string"""
|
"""Convert every "unexpected" member of message to string"""
|
||||||
if isinstance(m, (basestring, bool, int, float, list, dict, set)):
|
if isinstance(m, (str, bool, int, float, list, dict, set)):
|
||||||
return m
|
return m
|
||||||
else: # pragma: no cover
|
else: # pragma: no cover
|
||||||
return str(m)
|
return str(m)
|
||||||
|
|
|
@ -45,7 +45,7 @@ def _thread_name():
|
||||||
return threading.current_thread().__class__.__name__
|
return threading.current_thread().__class__.__name__
|
||||||
|
|
||||||
def input_command(): # pragma: no cover
|
def input_command(): # pragma: no cover
|
||||||
return raw_input(PROMPT)
|
return input(PROMPT)
|
||||||
|
|
||||||
##
|
##
|
||||||
#
|
#
|
||||||
|
@ -456,7 +456,7 @@ class Fail2banClient(Fail2banCmdLine, Thread):
|
||||||
return False
|
return False
|
||||||
finally:
|
finally:
|
||||||
self._alive = False
|
self._alive = False
|
||||||
for s, sh in _prev_signals.iteritems():
|
for s, sh in _prev_signals.items():
|
||||||
signal.signal(s, sh)
|
signal.signal(s, sh)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ import sys
|
||||||
|
|
||||||
from ..version import version, normVersion
|
from ..version import version, normVersion
|
||||||
from ..protocol import printFormatted
|
from ..protocol import printFormatted
|
||||||
from ..helpers import getLogger, str2LogLevel, getVerbosityFormat, BrokenPipeError
|
from ..helpers import getLogger, str2LogLevel, getVerbosityFormat
|
||||||
|
|
||||||
# Gets the instance of the logger.
|
# Gets the instance of the logger.
|
||||||
logSys = getLogger("fail2ban")
|
logSys = getLogger("fail2ban")
|
||||||
|
|
|
@ -40,10 +40,10 @@ import os
|
||||||
import shlex
|
import shlex
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import urllib
|
import urllib.request, urllib.parse, urllib.error
|
||||||
from optparse import OptionParser, Option
|
from optparse import OptionParser, Option
|
||||||
|
|
||||||
from ConfigParser import NoOptionError, NoSectionError, MissingSectionHeaderError
|
from configparser import NoOptionError, NoSectionError, MissingSectionHeaderError
|
||||||
|
|
||||||
try: # pragma: no cover
|
try: # pragma: no cover
|
||||||
from ..server.filtersystemd import FilterSystemd
|
from ..server.filtersystemd import FilterSystemd
|
||||||
|
@ -51,7 +51,7 @@ except ImportError:
|
||||||
FilterSystemd = None
|
FilterSystemd = None
|
||||||
|
|
||||||
from ..version import version, normVersion
|
from ..version import version, normVersion
|
||||||
from .filterreader import FilterReader
|
from .jailreader import FilterReader, JailReader, NoJailError
|
||||||
from ..server.filter import Filter, FileContainer, MyTime
|
from ..server.filter import Filter, FileContainer, MyTime
|
||||||
from ..server.failregex import Regex, RegexException
|
from ..server.failregex import Regex, RegexException
|
||||||
|
|
||||||
|
@ -67,9 +67,9 @@ def debuggexURL(sample, regex, multiline=False, useDns="yes"):
|
||||||
'flavor': 'python'
|
'flavor': 'python'
|
||||||
}
|
}
|
||||||
if multiline: args['flags'] = 'm'
|
if multiline: args['flags'] = 'm'
|
||||||
return 'https://www.debuggex.com/?' + urllib.urlencode(args)
|
return 'https://www.debuggex.com/?' + urllib.parse.urlencode(args)
|
||||||
|
|
||||||
def output(args): # pragma: no cover (overriden in test-cases)
|
def output(args): # pragma: no cover (overridden in test-cases)
|
||||||
print(args)
|
print(args)
|
||||||
|
|
||||||
def shortstr(s, l=53):
|
def shortstr(s, l=53):
|
||||||
|
@ -246,7 +246,7 @@ class Fail2banRegex(object):
|
||||||
|
|
||||||
def __init__(self, opts):
|
def __init__(self, opts):
|
||||||
# set local protected members from given options:
|
# set local protected members from given options:
|
||||||
self.__dict__.update(dict(('_'+o,v) for o,v in opts.__dict__.iteritems()))
|
self.__dict__.update(dict(('_'+o,v) for o,v in opts.__dict__.items()))
|
||||||
self._opts = opts
|
self._opts = opts
|
||||||
self._maxlines_set = False # so we allow to override maxlines in cmdline
|
self._maxlines_set = False # so we allow to override maxlines in cmdline
|
||||||
self._datepattern_set = False
|
self._datepattern_set = False
|
||||||
|
@ -280,7 +280,7 @@ class Fail2banRegex(object):
|
||||||
self._filter.setUseDns(opts.usedns)
|
self._filter.setUseDns(opts.usedns)
|
||||||
self._filter.returnRawHost = opts.raw
|
self._filter.returnRawHost = opts.raw
|
||||||
self._filter.checkAllRegex = opts.checkAllRegex and not opts.out
|
self._filter.checkAllRegex = opts.checkAllRegex and not opts.out
|
||||||
# ignore pending (without ID/IP), added to matches if it hits later (if ID/IP can be retreved)
|
# ignore pending (without ID/IP), added to matches if it hits later (if ID/IP can be retrieved)
|
||||||
self._filter.ignorePending = bool(opts.out)
|
self._filter.ignorePending = bool(opts.out)
|
||||||
# callback to increment ignored RE's by index (during process):
|
# callback to increment ignored RE's by index (during process):
|
||||||
self._filter.onIgnoreRegex = self._onIgnoreRegex
|
self._filter.onIgnoreRegex = self._onIgnoreRegex
|
||||||
|
@ -312,12 +312,18 @@ class Fail2banRegex(object):
|
||||||
def _dumpRealOptions(self, reader, fltOpt):
|
def _dumpRealOptions(self, reader, fltOpt):
|
||||||
realopts = {}
|
realopts = {}
|
||||||
combopts = reader.getCombined()
|
combopts = reader.getCombined()
|
||||||
|
if isinstance(reader, FilterReader):
|
||||||
|
_get_opt = lambda k: reader.get('Definition', k)
|
||||||
|
elif reader.filter: # JailReader for jail with filter:
|
||||||
|
_get_opt = lambda k: reader.filter.get('Definition', k)
|
||||||
|
else: # JailReader for jail without filter:
|
||||||
|
_get_opt = lambda k: None
|
||||||
# output all options that are specified in filter-argument as well as some special (mostly interested):
|
# output all options that are specified in filter-argument as well as some special (mostly interested):
|
||||||
for k in ['logtype', 'datepattern'] + fltOpt.keys():
|
for k in ['logtype', 'datepattern'] + list(fltOpt.keys()):
|
||||||
# combined options win, but they contain only a sub-set in filter expected keys,
|
# combined options win, but they contain only a sub-set in filter expected keys,
|
||||||
# so get the rest from definition section:
|
# so get the rest from definition section:
|
||||||
try:
|
try:
|
||||||
realopts[k] = combopts[k] if k in combopts else reader.get('Definition', k)
|
realopts[k] = combopts[k] if k in combopts else _get_opt(k)
|
||||||
except NoOptionError: # pragma: no cover
|
except NoOptionError: # pragma: no cover
|
||||||
pass
|
pass
|
||||||
self.output("Real filter options : %r" % realopts)
|
self.output("Real filter options : %r" % realopts)
|
||||||
|
@ -330,16 +336,26 @@ class Fail2banRegex(object):
|
||||||
fltName = value
|
fltName = value
|
||||||
fltFile = None
|
fltFile = None
|
||||||
fltOpt = {}
|
fltOpt = {}
|
||||||
|
jail = None
|
||||||
if regextype == 'fail':
|
if regextype == 'fail':
|
||||||
if re.search(r'(?ms)^/{0,3}[\w/_\-.]+(?:\[.*\])?$', value):
|
if re.search(r'(?ms)^/{0,3}[\w/_\-.]+(?:\[.*\])?$', value):
|
||||||
try:
|
try:
|
||||||
fltName, fltOpt = extractOptions(value)
|
fltName, fltOpt = extractOptions(value)
|
||||||
|
if not re.search(r'(?ms)(?:/|\.(?:conf|local)$)', fltName): # name of jail?
|
||||||
|
try:
|
||||||
|
jail = JailReader(fltName, force_enable=True,
|
||||||
|
share_config=self.share_config, basedir=basedir)
|
||||||
|
jail.read()
|
||||||
|
except NoJailError:
|
||||||
|
jail = None
|
||||||
if "." in fltName[~5:]:
|
if "." in fltName[~5:]:
|
||||||
tryNames = (fltName,)
|
tryNames = (fltName,)
|
||||||
else:
|
else:
|
||||||
tryNames = (fltName, fltName + '.conf', fltName + '.local')
|
tryNames = (fltName, fltName + '.conf', fltName + '.local')
|
||||||
for fltFile in tryNames:
|
for fltFile in tryNames:
|
||||||
if not "/" in fltFile:
|
if os.path.dirname(fltFile) == 'filter.d':
|
||||||
|
fltFile = os.path.join(basedir, fltFile)
|
||||||
|
elif not "/" in fltFile:
|
||||||
if os.path.basename(basedir) == 'filter.d':
|
if os.path.basename(basedir) == 'filter.d':
|
||||||
fltFile = os.path.join(basedir, fltFile)
|
fltFile = os.path.join(basedir, fltFile)
|
||||||
else:
|
else:
|
||||||
|
@ -354,8 +370,25 @@ class Fail2banRegex(object):
|
||||||
output(" while parsing: %s" % (value,))
|
output(" while parsing: %s" % (value,))
|
||||||
if self._verbose: raise(e)
|
if self._verbose: raise(e)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
readercommands = None
|
||||||
|
# if it is jail:
|
||||||
|
if jail:
|
||||||
|
self.output( "Use %11s jail : %s" % ('', fltName) )
|
||||||
|
if fltOpt:
|
||||||
|
self.output( "Use jail/flt options : %r" % fltOpt )
|
||||||
|
if not fltOpt: fltOpt = {}
|
||||||
|
fltOpt['backend'] = self._backend
|
||||||
|
ret = jail.getOptions(addOpts=fltOpt)
|
||||||
|
if not ret:
|
||||||
|
output('ERROR: Failed to get jail for %r' % (value,))
|
||||||
|
return False
|
||||||
|
# show real options if expected:
|
||||||
|
if self._verbose > 1 or logSys.getEffectiveLevel()<=logging.DEBUG:
|
||||||
|
self._dumpRealOptions(jail, fltOpt)
|
||||||
|
readercommands = jail.convert(allow_no_files=True)
|
||||||
# if it is filter file:
|
# if it is filter file:
|
||||||
if fltFile is not None:
|
elif fltFile is not None:
|
||||||
if (basedir == self._opts.config
|
if (basedir == self._opts.config
|
||||||
or os.path.basename(basedir) == 'filter.d'
|
or os.path.basename(basedir) == 'filter.d'
|
||||||
or ("." not in fltName[~5:] and "/" not in fltName)
|
or ("." not in fltName[~5:] and "/" not in fltName)
|
||||||
|
@ -364,16 +397,17 @@ class Fail2banRegex(object):
|
||||||
if os.path.basename(basedir) == 'filter.d':
|
if os.path.basename(basedir) == 'filter.d':
|
||||||
basedir = os.path.dirname(basedir)
|
basedir = os.path.dirname(basedir)
|
||||||
fltName = os.path.splitext(os.path.basename(fltName))[0]
|
fltName = os.path.splitext(os.path.basename(fltName))[0]
|
||||||
self.output( "Use %11s filter file : %s, basedir: %s" % (regex, fltName, basedir) )
|
self.output( "Use %11s file : %s, basedir: %s" % ('filter', fltName, basedir) )
|
||||||
else:
|
else:
|
||||||
## foreign file - readexplicit this file and includes if possible:
|
## foreign file - readexplicit this file and includes if possible:
|
||||||
self.output( "Use %11s file : %s" % (regex, fltName) )
|
self.output( "Use %11s file : %s" % ('filter', fltName) )
|
||||||
basedir = None
|
basedir = None
|
||||||
if not os.path.isabs(fltName): # avoid join with "filter.d" inside FilterReader
|
if not os.path.isabs(fltName): # avoid join with "filter.d" inside FilterReader
|
||||||
fltName = os.path.abspath(fltName)
|
fltName = os.path.abspath(fltName)
|
||||||
if fltOpt:
|
if fltOpt:
|
||||||
self.output( "Use filter options : %r" % fltOpt )
|
self.output( "Use filter options : %r" % fltOpt )
|
||||||
reader = FilterReader(fltName, 'fail2ban-regex-jail', fltOpt, share_config=self.share_config, basedir=basedir)
|
reader = FilterReader(fltName, 'fail2ban-regex-jail', fltOpt,
|
||||||
|
share_config=self.share_config, basedir=basedir)
|
||||||
ret = None
|
ret = None
|
||||||
try:
|
try:
|
||||||
if basedir is not None:
|
if basedir is not None:
|
||||||
|
@ -398,6 +432,7 @@ class Fail2banRegex(object):
|
||||||
# to stream:
|
# to stream:
|
||||||
readercommands = reader.convert()
|
readercommands = reader.convert()
|
||||||
|
|
||||||
|
if readercommands:
|
||||||
regex_values = {}
|
regex_values = {}
|
||||||
for opt in readercommands:
|
for opt in readercommands:
|
||||||
if opt[0] == 'multi-set':
|
if opt[0] == 'multi-set':
|
||||||
|
@ -440,7 +475,7 @@ class Fail2banRegex(object):
|
||||||
self.output( "Use %11s line : %s" % (regex, shortstr(value)) )
|
self.output( "Use %11s line : %s" % (regex, shortstr(value)) )
|
||||||
regex_values = {regextype: [RegexStat(value)]}
|
regex_values = {regextype: [RegexStat(value)]}
|
||||||
|
|
||||||
for regextype, regex_values in regex_values.iteritems():
|
for regextype, regex_values in regex_values.items():
|
||||||
regex = regextype + 'regex'
|
regex = regextype + 'regex'
|
||||||
setattr(self, "_" + regex, regex_values)
|
setattr(self, "_" + regex, regex_values)
|
||||||
for regex in regex_values:
|
for regex in regex_values:
|
||||||
|
@ -476,7 +511,7 @@ class Fail2banRegex(object):
|
||||||
ret.append(match)
|
ret.append(match)
|
||||||
else:
|
else:
|
||||||
is_ignored = True
|
is_ignored = True
|
||||||
if self._opts.out: # (formated) output - don't need stats:
|
if self._opts.out: # (formatted) output - don't need stats:
|
||||||
return None, ret, None
|
return None, ret, None
|
||||||
# prefregex stats:
|
# prefregex stats:
|
||||||
if self._filter.prefRegex:
|
if self._filter.prefRegex:
|
||||||
|
@ -532,13 +567,13 @@ class Fail2banRegex(object):
|
||||||
def _out(ret):
|
def _out(ret):
|
||||||
for r in ret:
|
for r in ret:
|
||||||
for r in r[3].get('matches'):
|
for r in r[3].get('matches'):
|
||||||
if not isinstance(r, basestring):
|
if not isinstance(r, str):
|
||||||
r = ''.join(r for r in r)
|
r = ''.join(r for r in r)
|
||||||
output(r)
|
output(r)
|
||||||
elif ofmt == 'row':
|
elif ofmt == 'row':
|
||||||
def _out(ret):
|
def _out(ret):
|
||||||
for r in ret:
|
for r in ret:
|
||||||
output('[%r,\t%r,\t%r],' % (r[1],r[2],dict((k,v) for k, v in r[3].iteritems() if k != 'matches')))
|
output('[%r,\t%r,\t%r],' % (r[1],r[2],dict((k,v) for k, v in r[3].items() if k != 'matches')))
|
||||||
elif '<' not in ofmt:
|
elif '<' not in ofmt:
|
||||||
def _out(ret):
|
def _out(ret):
|
||||||
for r in ret:
|
for r in ret:
|
||||||
|
@ -573,7 +608,7 @@ class Fail2banRegex(object):
|
||||||
# wrap multiline tag (msg) interpolations to single line:
|
# wrap multiline tag (msg) interpolations to single line:
|
||||||
for r, v in rows:
|
for r, v in rows:
|
||||||
for r in r[3].get('matches'):
|
for r in r[3].get('matches'):
|
||||||
if not isinstance(r, basestring):
|
if not isinstance(r, str):
|
||||||
r = ''.join(r for r in r)
|
r = ''.join(r for r in r)
|
||||||
r = v.replace("\x00msg\x00", r)
|
r = v.replace("\x00msg\x00", r)
|
||||||
output(r)
|
output(r)
|
||||||
|
@ -595,7 +630,7 @@ class Fail2banRegex(object):
|
||||||
continue
|
continue
|
||||||
line_datetimestripped, ret, is_ignored = self.testRegex(line)
|
line_datetimestripped, ret, is_ignored = self.testRegex(line)
|
||||||
|
|
||||||
if self._opts.out: # (formated) output:
|
if self._opts.out: # (formatted) output:
|
||||||
if len(ret) > 0 and not is_ignored: out(ret)
|
if len(ret) > 0 and not is_ignored: out(ret)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -639,9 +674,9 @@ class Fail2banRegex(object):
|
||||||
ans = [[]]
|
ans = [[]]
|
||||||
for arg in [l, regexlist]:
|
for arg in [l, regexlist]:
|
||||||
ans = [ x + [y] for x in ans for y in arg ]
|
ans = [ x + [y] for x in ans for y in arg ]
|
||||||
b = map(lambda a: a[0] + ' | ' + a[1].getFailRegex() + ' | ' +
|
b = [a[0] + ' | ' + a[1].getFailRegex() + ' | ' +
|
||||||
debuggexURL(self.encode_line(a[0]), a[1].getFailRegex(),
|
debuggexURL(self.encode_line(a[0]), a[1].getFailRegex(),
|
||||||
multiline, self._opts.usedns), ans)
|
multiline, self._opts.usedns) for a in ans]
|
||||||
pprint_list([x.rstrip() for x in b], header)
|
pprint_list([x.rstrip() for x in b], header)
|
||||||
else:
|
else:
|
||||||
output( "%s too many to print. Use --print-all-%s " \
|
output( "%s too many to print. Use --print-all-%s " \
|
||||||
|
@ -789,7 +824,15 @@ class Fail2banRegex(object):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _loc_except_hook(exctype, value, traceback):
|
||||||
|
if (exctype != BrokenPipeError and exctype != IOError or value.errno != 32):
|
||||||
|
return sys.__excepthook__(exctype, value, traceback)
|
||||||
|
# pipe seems to be closed (head / tail / etc), thus simply exit:
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
def exec_command_line(*args):
|
def exec_command_line(*args):
|
||||||
|
sys.excepthook = _loc_except_hook; # stop on closed/broken pipe
|
||||||
|
|
||||||
logging.exitOnIOError = True
|
logging.exitOnIOError = True
|
||||||
parser = get_opt_parser()
|
parser = get_opt_parser()
|
||||||
(opts, args) = parser.parse_args(*args)
|
(opts, args) = parser.parse_args(*args)
|
||||||
|
|
|
@ -45,7 +45,7 @@ class Fail2banServer(Fail2banCmdLine):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def startServerDirect(conf, daemon=True, setServer=None):
|
def startServerDirect(conf, daemon=True, setServer=None):
|
||||||
logSys.debug(" direct starting of server in %s, deamon: %s", os.getpid(), daemon)
|
logSys.debug(" direct starting of server in %s, daemon: %s", os.getpid(), daemon)
|
||||||
from ..server.server import Server
|
from ..server.server import Server
|
||||||
server = None
|
server = None
|
||||||
try:
|
try:
|
||||||
|
@ -120,7 +120,7 @@ class Fail2banServer(Fail2banCmdLine):
|
||||||
if frk: # pragma: no cover
|
if frk: # pragma: no cover
|
||||||
os.execv(exe, args)
|
os.execv(exe, args)
|
||||||
else:
|
else:
|
||||||
# use P_WAIT instead of P_NOWAIT (to prevent defunct-zomby process), it startet as daemon, so parent exit fast after fork):
|
# use P_WAIT instead of P_NOWAIT (to prevent defunct-zomby process), it started as daemon, so parent exit fast after fork):
|
||||||
ret = os.spawnv(os.P_WAIT, exe, args)
|
ret = os.spawnv(os.P_WAIT, exe, args)
|
||||||
if ret != 0: # pragma: no cover
|
if ret != 0: # pragma: no cover
|
||||||
raise OSError(ret, "Unknown error by executing server %r with %r" % (args[1], exe))
|
raise OSError(ret, "Unknown error by executing server %r with %r" % (args[1], exe))
|
||||||
|
|
|
@ -71,7 +71,7 @@ class FilterReader(DefinitionInitConfigReader):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _fillStream(stream, opts, jailName):
|
def _fillStream(stream, opts, jailName):
|
||||||
prio0idx = 0
|
prio0idx = 0
|
||||||
for opt, value in opts.iteritems():
|
for opt, value in opts.items():
|
||||||
# Do not send a command if the value is not set (empty).
|
# Do not send a command if the value is not set (empty).
|
||||||
if value is None: continue
|
if value is None: continue
|
||||||
if opt in ("failregex", "ignoreregex"):
|
if opt in ("failregex", "ignoreregex"):
|
||||||
|
|
|
@ -29,16 +29,19 @@ import json
|
||||||
import os.path
|
import os.path
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .configreader import ConfigReaderUnshared, ConfigReader
|
from .configreader import ConfigReaderUnshared, ConfigReader, NoSectionError
|
||||||
from .filterreader import FilterReader
|
from .filterreader import FilterReader
|
||||||
from .actionreader import ActionReader
|
from .actionreader import ActionReader
|
||||||
from ..version import version
|
from ..version import version
|
||||||
from ..helpers import getLogger, extractOptions, splitWithOptions, splitwords
|
from ..helpers import _merge_dicts, getLogger, extractOptions, splitWithOptions, splitwords
|
||||||
|
|
||||||
# Gets the instance of the logger.
|
# Gets the instance of the logger.
|
||||||
logSys = getLogger(__name__)
|
logSys = getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class NoJailError(ValueError):
|
||||||
|
pass
|
||||||
|
|
||||||
class JailReader(ConfigReader):
|
class JailReader(ConfigReader):
|
||||||
|
|
||||||
def __init__(self, name, force_enable=False, **kwargs):
|
def __init__(self, name, force_enable=False, **kwargs):
|
||||||
|
@ -64,7 +67,7 @@ class JailReader(ConfigReader):
|
||||||
# Before returning -- verify that requested section
|
# Before returning -- verify that requested section
|
||||||
# exists at all
|
# exists at all
|
||||||
if not (self.__name in self.sections()):
|
if not (self.__name in self.sections()):
|
||||||
raise ValueError("Jail %r was not found among available"
|
raise NoJailError("Jail %r was not found among available"
|
||||||
% self.__name)
|
% self.__name)
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
@ -117,9 +120,9 @@ class JailReader(ConfigReader):
|
||||||
}
|
}
|
||||||
_configOpts.update(FilterReader._configOpts)
|
_configOpts.update(FilterReader._configOpts)
|
||||||
|
|
||||||
_ignoreOpts = set(['action', 'filter', 'enabled'] + FilterReader._configOpts.keys())
|
_ignoreOpts = set(['action', 'filter', 'enabled', 'backend'] + list(FilterReader._configOpts.keys()))
|
||||||
|
|
||||||
def getOptions(self):
|
def getOptions(self, addOpts=None):
|
||||||
|
|
||||||
basedir = self.getBaseDir()
|
basedir = self.getBaseDir()
|
||||||
|
|
||||||
|
@ -136,6 +139,8 @@ class JailReader(ConfigReader):
|
||||||
shouldExist=True)
|
shouldExist=True)
|
||||||
if not self.__opts: # pragma: no cover
|
if not self.__opts: # pragma: no cover
|
||||||
raise JailDefError("Init jail options failed")
|
raise JailDefError("Init jail options failed")
|
||||||
|
if addOpts:
|
||||||
|
self.__opts = _merge_dicts(self.__opts, addOpts)
|
||||||
|
|
||||||
if not self.isEnabled():
|
if not self.isEnabled():
|
||||||
return True
|
return True
|
||||||
|
@ -147,6 +152,8 @@ class JailReader(ConfigReader):
|
||||||
filterName, filterOpt = extractOptions(flt)
|
filterName, filterOpt = extractOptions(flt)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
raise JailDefError("Invalid filter definition %r: %s" % (flt, e))
|
raise JailDefError("Invalid filter definition %r: %s" % (flt, e))
|
||||||
|
if addOpts:
|
||||||
|
filterOpt = _merge_dicts(filterOpt, addOpts)
|
||||||
self.__filter = FilterReader(
|
self.__filter = FilterReader(
|
||||||
filterName, self.__name, filterOpt,
|
filterName, self.__name, filterOpt,
|
||||||
share_config=self.share_config, basedir=basedir)
|
share_config=self.share_config, basedir=basedir)
|
||||||
|
@ -219,6 +226,15 @@ class JailReader(ConfigReader):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@property
|
||||||
|
def filter(self):
|
||||||
|
return self.__filter
|
||||||
|
|
||||||
|
def getCombined(self):
|
||||||
|
if not self.__filter:
|
||||||
|
return self.__opts
|
||||||
|
return _merge_dicts(self.__opts, self.__filter.getCombined())
|
||||||
|
|
||||||
def convert(self, allow_no_files=False):
|
def convert(self, allow_no_files=False):
|
||||||
"""Convert read before __opts to the commands stream
|
"""Convert read before __opts to the commands stream
|
||||||
|
|
||||||
|
@ -235,14 +251,15 @@ class JailReader(ConfigReader):
|
||||||
if e:
|
if e:
|
||||||
stream.extend([['config-error', "Jail '%s' skipped, because of wrong configuration: %s" % (self.__name, e)]])
|
stream.extend([['config-error', "Jail '%s' skipped, because of wrong configuration: %s" % (self.__name, e)]])
|
||||||
return stream
|
return stream
|
||||||
# fill jail with filter options, using filter (only not overriden in jail):
|
# fill jail with filter options, using filter (only not overridden in jail):
|
||||||
if self.__filter:
|
if self.__filter:
|
||||||
stream.extend(self.__filter.convert())
|
stream.extend(self.__filter.convert())
|
||||||
# and using options from jail:
|
# and using options from jail:
|
||||||
FilterReader._fillStream(stream, self.__opts, self.__name)
|
FilterReader._fillStream(stream, self.__opts, self.__name)
|
||||||
for opt, value in self.__opts.iteritems():
|
backend = self.__opts.get('backend', 'auto')
|
||||||
|
for opt, value in self.__opts.items():
|
||||||
if opt == "logpath":
|
if opt == "logpath":
|
||||||
if self.__opts.get('backend', '').startswith("systemd"): continue
|
if backend.startswith("systemd"): continue
|
||||||
found_files = 0
|
found_files = 0
|
||||||
for path in value.split("\n"):
|
for path in value.split("\n"):
|
||||||
path = path.rsplit(" ", 1)
|
path = path.rsplit(" ", 1)
|
||||||
|
@ -260,8 +277,6 @@ class JailReader(ConfigReader):
|
||||||
if not allow_no_files:
|
if not allow_no_files:
|
||||||
raise ValueError(msg)
|
raise ValueError(msg)
|
||||||
logSys.warning(msg)
|
logSys.warning(msg)
|
||||||
elif opt == "backend":
|
|
||||||
backend = value
|
|
||||||
elif opt == "ignoreip":
|
elif opt == "ignoreip":
|
||||||
stream.append(["set", self.__name, "addignoreip"] + splitwords(value))
|
stream.append(["set", self.__name, "addignoreip"] + splitwords(value))
|
||||||
elif opt not in JailReader._ignoreOpts:
|
elif opt not in JailReader._ignoreOpts:
|
||||||
|
|
|
@ -0,0 +1,310 @@
|
||||||
|
# -*- Mode: Python; tab-width: 4 -*-
|
||||||
|
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
|
||||||
|
# Author: Sam Rushing <rushing@nightmare.com>
|
||||||
|
|
||||||
|
# ======================================================================
|
||||||
|
# Copyright 1996 by Sam Rushing
|
||||||
|
#
|
||||||
|
# All Rights Reserved
|
||||||
|
#
|
||||||
|
# Permission to use, copy, modify, and distribute this software and
|
||||||
|
# its documentation for any purpose and without fee is hereby
|
||||||
|
# granted, provided that the above copyright notice appear in all
|
||||||
|
# copies and that both that copyright notice and this permission
|
||||||
|
# notice appear in supporting documentation, and that the name of Sam
|
||||||
|
# Rushing not be used in advertising or publicity pertaining to
|
||||||
|
# distribution of the software without specific, written prior
|
||||||
|
# permission.
|
||||||
|
#
|
||||||
|
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
||||||
|
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
|
||||||
|
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
||||||
|
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
||||||
|
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||||
|
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
|
||||||
|
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
# ======================================================================
|
||||||
|
|
||||||
|
r"""A class supporting chat-style (command/response) protocols.
|
||||||
|
|
||||||
|
This class adds support for 'chat' style protocols - where one side
|
||||||
|
sends a 'command', and the other sends a response (examples would be
|
||||||
|
the common internet protocols - smtp, nntp, ftp, etc..).
|
||||||
|
|
||||||
|
The handle_read() method looks at the input stream for the current
|
||||||
|
'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
|
||||||
|
for multi-line output), calling self.found_terminator() on its
|
||||||
|
receipt.
|
||||||
|
|
||||||
|
for example:
|
||||||
|
Say you build an async nntp client using this class. At the start
|
||||||
|
of the connection, you'll have self.terminator set to '\r\n', in
|
||||||
|
order to process the single-line greeting. Just before issuing a
|
||||||
|
'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
|
||||||
|
command will be accumulated (using your own 'collect_incoming_data'
|
||||||
|
method) up to the terminator, and then control will be returned to
|
||||||
|
you - by calling your self.found_terminator() method.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import asyncore
|
||||||
|
except ImportError:
|
||||||
|
from . import asyncore
|
||||||
|
from collections import deque
|
||||||
|
|
||||||
|
|
||||||
|
class async_chat(asyncore.dispatcher):
|
||||||
|
"""This is an abstract class. You must derive from this class, and add
|
||||||
|
the two methods collect_incoming_data() and found_terminator()"""
|
||||||
|
|
||||||
|
# these are overridable defaults
|
||||||
|
|
||||||
|
ac_in_buffer_size = 65536
|
||||||
|
ac_out_buffer_size = 65536
|
||||||
|
|
||||||
|
# we don't want to enable the use of encoding by default, because that is a
|
||||||
|
# sign of an application bug that we don't want to pass silently
|
||||||
|
|
||||||
|
use_encoding = 0
|
||||||
|
encoding = 'latin-1'
|
||||||
|
|
||||||
|
def __init__(self, sock=None, map=None):
|
||||||
|
# for string terminator matching
|
||||||
|
self.ac_in_buffer = b''
|
||||||
|
|
||||||
|
# we use a list here rather than io.BytesIO for a few reasons...
|
||||||
|
# del lst[:] is faster than bio.truncate(0)
|
||||||
|
# lst = [] is faster than bio.truncate(0)
|
||||||
|
self.incoming = []
|
||||||
|
|
||||||
|
# we toss the use of the "simple producer" and replace it with
|
||||||
|
# a pure deque, which the original fifo was a wrapping of
|
||||||
|
self.producer_fifo = deque()
|
||||||
|
asyncore.dispatcher.__init__(self, sock, map)
|
||||||
|
|
||||||
|
def collect_incoming_data(self, data):
|
||||||
|
raise NotImplementedError("must be implemented in subclass")
|
||||||
|
|
||||||
|
def _collect_incoming_data(self, data):
|
||||||
|
self.incoming.append(data)
|
||||||
|
|
||||||
|
def _get_data(self):
|
||||||
|
d = b''.join(self.incoming)
|
||||||
|
del self.incoming[:]
|
||||||
|
return d
|
||||||
|
|
||||||
|
def found_terminator(self):
|
||||||
|
raise NotImplementedError("must be implemented in subclass")
|
||||||
|
|
||||||
|
def set_terminator(self, term):
|
||||||
|
"""Set the input delimiter.
|
||||||
|
|
||||||
|
Can be a fixed string of any length, an integer, or None.
|
||||||
|
"""
|
||||||
|
if isinstance(term, str) and self.use_encoding:
|
||||||
|
term = bytes(term, self.encoding)
|
||||||
|
elif isinstance(term, int) and term < 0:
|
||||||
|
raise ValueError('the number of received bytes must be positive')
|
||||||
|
self.terminator = term
|
||||||
|
|
||||||
|
def get_terminator(self):
|
||||||
|
return self.terminator
|
||||||
|
|
||||||
|
# grab some more data from the socket,
|
||||||
|
# throw it to the collector method,
|
||||||
|
# check for the terminator,
|
||||||
|
# if found, transition to the next state.
|
||||||
|
|
||||||
|
def handle_read(self):
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = self.recv(self.ac_in_buffer_size)
|
||||||
|
except BlockingIOError:
|
||||||
|
return
|
||||||
|
except OSError:
|
||||||
|
self.handle_error()
|
||||||
|
return
|
||||||
|
|
||||||
|
if isinstance(data, str) and self.use_encoding:
|
||||||
|
data = bytes(str, self.encoding)
|
||||||
|
self.ac_in_buffer = self.ac_in_buffer + data
|
||||||
|
|
||||||
|
# Continue to search for self.terminator in self.ac_in_buffer,
|
||||||
|
# while calling self.collect_incoming_data. The while loop
|
||||||
|
# is necessary because we might read several data+terminator
|
||||||
|
# combos with a single recv(4096).
|
||||||
|
|
||||||
|
while self.ac_in_buffer:
|
||||||
|
lb = len(self.ac_in_buffer)
|
||||||
|
terminator = self.get_terminator()
|
||||||
|
if not terminator:
|
||||||
|
# no terminator, collect it all
|
||||||
|
self.collect_incoming_data(self.ac_in_buffer)
|
||||||
|
self.ac_in_buffer = b''
|
||||||
|
elif isinstance(terminator, int):
|
||||||
|
# numeric terminator
|
||||||
|
n = terminator
|
||||||
|
if lb < n:
|
||||||
|
self.collect_incoming_data(self.ac_in_buffer)
|
||||||
|
self.ac_in_buffer = b''
|
||||||
|
self.terminator = self.terminator - lb
|
||||||
|
else:
|
||||||
|
self.collect_incoming_data(self.ac_in_buffer[:n])
|
||||||
|
self.ac_in_buffer = self.ac_in_buffer[n:]
|
||||||
|
self.terminator = 0
|
||||||
|
self.found_terminator()
|
||||||
|
else:
|
||||||
|
# 3 cases:
|
||||||
|
# 1) end of buffer matches terminator exactly:
|
||||||
|
# collect data, transition
|
||||||
|
# 2) end of buffer matches some prefix:
|
||||||
|
# collect data to the prefix
|
||||||
|
# 3) end of buffer does not match any prefix:
|
||||||
|
# collect data
|
||||||
|
terminator_len = len(terminator)
|
||||||
|
index = self.ac_in_buffer.find(terminator)
|
||||||
|
if index != -1:
|
||||||
|
# we found the terminator
|
||||||
|
if index > 0:
|
||||||
|
# don't bother reporting the empty string
|
||||||
|
# (source of subtle bugs)
|
||||||
|
self.collect_incoming_data(self.ac_in_buffer[:index])
|
||||||
|
self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
|
||||||
|
# This does the Right Thing if the terminator
|
||||||
|
# is changed here.
|
||||||
|
self.found_terminator()
|
||||||
|
else:
|
||||||
|
# check for a prefix of the terminator
|
||||||
|
index = find_prefix_at_end(self.ac_in_buffer, terminator)
|
||||||
|
if index:
|
||||||
|
if index != lb:
|
||||||
|
# we found a prefix, collect up to the prefix
|
||||||
|
self.collect_incoming_data(self.ac_in_buffer[:-index])
|
||||||
|
self.ac_in_buffer = self.ac_in_buffer[-index:]
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
# no prefix, collect it all
|
||||||
|
self.collect_incoming_data(self.ac_in_buffer)
|
||||||
|
self.ac_in_buffer = b''
|
||||||
|
|
||||||
|
def handle_write(self):
|
||||||
|
self.initiate_send()
|
||||||
|
|
||||||
|
def handle_close(self):
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
def push(self, data):
|
||||||
|
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||||
|
raise TypeError('data argument must be byte-ish (%r)',
|
||||||
|
type(data))
|
||||||
|
sabs = self.ac_out_buffer_size
|
||||||
|
if len(data) > sabs:
|
||||||
|
for i in range(0, len(data), sabs):
|
||||||
|
self.producer_fifo.append(data[i:i+sabs])
|
||||||
|
else:
|
||||||
|
self.producer_fifo.append(data)
|
||||||
|
self.initiate_send()
|
||||||
|
|
||||||
|
def push_with_producer(self, producer):
|
||||||
|
self.producer_fifo.append(producer)
|
||||||
|
self.initiate_send()
|
||||||
|
|
||||||
|
def readable(self):
|
||||||
|
"predicate for inclusion in the readable for select()"
|
||||||
|
# cannot use the old predicate, it violates the claim of the
|
||||||
|
# set_terminator method.
|
||||||
|
|
||||||
|
# return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
def writable(self):
|
||||||
|
"predicate for inclusion in the writable for select()"
|
||||||
|
return self.producer_fifo or (not self.connected)
|
||||||
|
|
||||||
|
def close_when_done(self):
|
||||||
|
"automatically close this channel once the outgoing queue is empty"
|
||||||
|
self.producer_fifo.append(None)
|
||||||
|
|
||||||
|
def initiate_send(self):
|
||||||
|
while self.producer_fifo and self.connected:
|
||||||
|
first = self.producer_fifo[0]
|
||||||
|
# handle empty string/buffer or None entry
|
||||||
|
if not first:
|
||||||
|
del self.producer_fifo[0]
|
||||||
|
if first is None:
|
||||||
|
self.handle_close()
|
||||||
|
return
|
||||||
|
|
||||||
|
# handle classic producer behavior
|
||||||
|
obs = self.ac_out_buffer_size
|
||||||
|
try:
|
||||||
|
data = first[:obs]
|
||||||
|
except TypeError:
|
||||||
|
data = first.more()
|
||||||
|
if data:
|
||||||
|
self.producer_fifo.appendleft(data)
|
||||||
|
else:
|
||||||
|
del self.producer_fifo[0]
|
||||||
|
continue
|
||||||
|
|
||||||
|
if isinstance(data, str) and self.use_encoding:
|
||||||
|
data = bytes(data, self.encoding)
|
||||||
|
|
||||||
|
# send the data
|
||||||
|
try:
|
||||||
|
num_sent = self.send(data)
|
||||||
|
except OSError:
|
||||||
|
self.handle_error()
|
||||||
|
return
|
||||||
|
|
||||||
|
if num_sent:
|
||||||
|
if num_sent < len(data) or obs < len(first):
|
||||||
|
self.producer_fifo[0] = first[num_sent:]
|
||||||
|
else:
|
||||||
|
del self.producer_fifo[0]
|
||||||
|
# we tried to send some actual data
|
||||||
|
return
|
||||||
|
|
||||||
|
def discard_buffers(self):
|
||||||
|
# Emergencies only!
|
||||||
|
self.ac_in_buffer = b''
|
||||||
|
del self.incoming[:]
|
||||||
|
self.producer_fifo.clear()
|
||||||
|
|
||||||
|
|
||||||
|
class simple_producer:
|
||||||
|
|
||||||
|
def __init__(self, data, buffer_size=512):
|
||||||
|
self.data = data
|
||||||
|
self.buffer_size = buffer_size
|
||||||
|
|
||||||
|
def more(self):
|
||||||
|
if len(self.data) > self.buffer_size:
|
||||||
|
result = self.data[:self.buffer_size]
|
||||||
|
self.data = self.data[self.buffer_size:]
|
||||||
|
return result
|
||||||
|
else:
|
||||||
|
result = self.data
|
||||||
|
self.data = b''
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# Given 'haystack', see if any prefix of 'needle' is at its end. This
|
||||||
|
# assumes an exact match has already been checked. Return the number of
|
||||||
|
# characters matched.
|
||||||
|
# for example:
|
||||||
|
# f_p_a_e("qwerty\r", "\r\n") => 1
|
||||||
|
# f_p_a_e("qwertydkjf", "\r\n") => 0
|
||||||
|
# f_p_a_e("qwerty\r\n", "\r\n") => <undefined>
|
||||||
|
|
||||||
|
# this could maybe be made faster with a computed regex?
|
||||||
|
# [answer: no; circa Python-2.0, Jan 2001]
|
||||||
|
# new python: 28961/s
|
||||||
|
# old python: 18307/s
|
||||||
|
# re: 12820/s
|
||||||
|
# regex: 14035/s
|
||||||
|
|
||||||
|
def find_prefix_at_end(haystack, needle):
|
||||||
|
l = len(needle) - 1
|
||||||
|
while l and not haystack.endswith(needle[:l]):
|
||||||
|
l -= 1
|
||||||
|
return l
|
|
@ -0,0 +1,642 @@
|
||||||
|
# -*- Mode: Python -*-
|
||||||
|
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
|
||||||
|
# Author: Sam Rushing <rushing@nightmare.com>
|
||||||
|
|
||||||
|
# ======================================================================
|
||||||
|
# Copyright 1996 by Sam Rushing
|
||||||
|
#
|
||||||
|
# All Rights Reserved
|
||||||
|
#
|
||||||
|
# Permission to use, copy, modify, and distribute this software and
|
||||||
|
# its documentation for any purpose and without fee is hereby
|
||||||
|
# granted, provided that the above copyright notice appear in all
|
||||||
|
# copies and that both that copyright notice and this permission
|
||||||
|
# notice appear in supporting documentation, and that the name of Sam
|
||||||
|
# Rushing not be used in advertising or publicity pertaining to
|
||||||
|
# distribution of the software without specific, written prior
|
||||||
|
# permission.
|
||||||
|
#
|
||||||
|
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
||||||
|
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
|
||||||
|
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
||||||
|
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
||||||
|
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||||
|
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
|
||||||
|
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
# ======================================================================
|
||||||
|
|
||||||
|
"""Basic infrastructure for asynchronous socket service clients and servers.
|
||||||
|
|
||||||
|
There are only two ways to have a program on a single processor do "more
|
||||||
|
than one thing at a time". Multi-threaded programming is the simplest and
|
||||||
|
most popular way to do it, but there is another very different technique,
|
||||||
|
that lets you have nearly all the advantages of multi-threading, without
|
||||||
|
actually using multiple threads. it's really only practical if your program
|
||||||
|
is largely I/O bound. If your program is CPU bound, then pre-emptive
|
||||||
|
scheduled threads are probably what you really need. Network servers are
|
||||||
|
rarely CPU-bound, however.
|
||||||
|
|
||||||
|
If your operating system supports the select() system call in its I/O
|
||||||
|
library (and nearly all do), then you can use it to juggle multiple
|
||||||
|
communication channels at once; doing other work while your I/O is taking
|
||||||
|
place in the "background." Although this strategy can seem strange and
|
||||||
|
complex, especially at first, it is in many ways easier to understand and
|
||||||
|
control than multi-threaded programming. The module documented here solves
|
||||||
|
many of the difficult problems for you, making the task of building
|
||||||
|
sophisticated high-performance network servers and clients a snap.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import select
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
import os
|
||||||
|
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \
|
||||||
|
ENOTCONN, ESHUTDOWN, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \
|
||||||
|
errorcode
|
||||||
|
|
||||||
|
_DISCONNECTED = frozenset({ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
|
||||||
|
EBADF})
|
||||||
|
|
||||||
|
try:
|
||||||
|
socket_map
|
||||||
|
except NameError:
|
||||||
|
socket_map = {}
|
||||||
|
|
||||||
|
def _strerror(err):
|
||||||
|
try:
|
||||||
|
return os.strerror(err)
|
||||||
|
except (ValueError, OverflowError, NameError):
|
||||||
|
if err in errorcode:
|
||||||
|
return errorcode[err]
|
||||||
|
return "Unknown error %s" %err
|
||||||
|
|
||||||
|
class ExitNow(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
|
||||||
|
|
||||||
|
def read(obj):
|
||||||
|
try:
|
||||||
|
obj.handle_read_event()
|
||||||
|
except _reraised_exceptions:
|
||||||
|
raise
|
||||||
|
except:
|
||||||
|
obj.handle_error()
|
||||||
|
|
||||||
|
def write(obj):
|
||||||
|
try:
|
||||||
|
obj.handle_write_event()
|
||||||
|
except _reraised_exceptions:
|
||||||
|
raise
|
||||||
|
except:
|
||||||
|
obj.handle_error()
|
||||||
|
|
||||||
|
def _exception(obj):
|
||||||
|
try:
|
||||||
|
obj.handle_expt_event()
|
||||||
|
except _reraised_exceptions:
|
||||||
|
raise
|
||||||
|
except:
|
||||||
|
obj.handle_error()
|
||||||
|
|
||||||
|
def readwrite(obj, flags):
|
||||||
|
try:
|
||||||
|
if flags & select.POLLIN:
|
||||||
|
obj.handle_read_event()
|
||||||
|
if flags & select.POLLOUT:
|
||||||
|
obj.handle_write_event()
|
||||||
|
if flags & select.POLLPRI:
|
||||||
|
obj.handle_expt_event()
|
||||||
|
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
|
||||||
|
obj.handle_close()
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno not in _DISCONNECTED:
|
||||||
|
obj.handle_error()
|
||||||
|
else:
|
||||||
|
obj.handle_close()
|
||||||
|
except _reraised_exceptions:
|
||||||
|
raise
|
||||||
|
except:
|
||||||
|
obj.handle_error()
|
||||||
|
|
||||||
|
def poll(timeout=0.0, map=None):
|
||||||
|
if map is None:
|
||||||
|
map = socket_map
|
||||||
|
if map:
|
||||||
|
r = []; w = []; e = []
|
||||||
|
for fd, obj in list(map.items()):
|
||||||
|
is_r = obj.readable()
|
||||||
|
is_w = obj.writable()
|
||||||
|
if is_r:
|
||||||
|
r.append(fd)
|
||||||
|
# accepting sockets should not be writable
|
||||||
|
if is_w and not obj.accepting:
|
||||||
|
w.append(fd)
|
||||||
|
if is_r or is_w:
|
||||||
|
e.append(fd)
|
||||||
|
if [] == r == w == e:
|
||||||
|
time.sleep(timeout)
|
||||||
|
return
|
||||||
|
|
||||||
|
r, w, e = select.select(r, w, e, timeout)
|
||||||
|
|
||||||
|
for fd in r:
|
||||||
|
obj = map.get(fd)
|
||||||
|
if obj is None:
|
||||||
|
continue
|
||||||
|
read(obj)
|
||||||
|
|
||||||
|
for fd in w:
|
||||||
|
obj = map.get(fd)
|
||||||
|
if obj is None:
|
||||||
|
continue
|
||||||
|
write(obj)
|
||||||
|
|
||||||
|
for fd in e:
|
||||||
|
obj = map.get(fd)
|
||||||
|
if obj is None:
|
||||||
|
continue
|
||||||
|
_exception(obj)
|
||||||
|
|
||||||
|
def poll2(timeout=0.0, map=None):
|
||||||
|
# Use the poll() support added to the select module in Python 2.0
|
||||||
|
if map is None:
|
||||||
|
map = socket_map
|
||||||
|
if timeout is not None:
|
||||||
|
# timeout is in milliseconds
|
||||||
|
timeout = int(timeout*1000)
|
||||||
|
pollster = select.poll()
|
||||||
|
if map:
|
||||||
|
for fd, obj in list(map.items()):
|
||||||
|
flags = 0
|
||||||
|
if obj.readable():
|
||||||
|
flags |= select.POLLIN | select.POLLPRI
|
||||||
|
# accepting sockets should not be writable
|
||||||
|
if obj.writable() and not obj.accepting:
|
||||||
|
flags |= select.POLLOUT
|
||||||
|
if flags:
|
||||||
|
pollster.register(fd, flags)
|
||||||
|
|
||||||
|
r = pollster.poll(timeout)
|
||||||
|
for fd, flags in r:
|
||||||
|
obj = map.get(fd)
|
||||||
|
if obj is None:
|
||||||
|
continue
|
||||||
|
readwrite(obj, flags)
|
||||||
|
|
||||||
|
poll3 = poll2 # Alias for backward compatibility
|
||||||
|
|
||||||
|
def loop(timeout=30.0, use_poll=False, map=None, count=None):
|
||||||
|
if map is None:
|
||||||
|
map = socket_map
|
||||||
|
|
||||||
|
if use_poll and hasattr(select, 'poll'):
|
||||||
|
poll_fun = poll2
|
||||||
|
else:
|
||||||
|
poll_fun = poll
|
||||||
|
|
||||||
|
if count is None:
|
||||||
|
while map:
|
||||||
|
poll_fun(timeout, map)
|
||||||
|
|
||||||
|
else:
|
||||||
|
while map and count > 0:
|
||||||
|
poll_fun(timeout, map)
|
||||||
|
count = count - 1
|
||||||
|
|
||||||
|
class dispatcher:
|
||||||
|
|
||||||
|
debug = False
|
||||||
|
connected = False
|
||||||
|
accepting = False
|
||||||
|
connecting = False
|
||||||
|
closing = False
|
||||||
|
addr = None
|
||||||
|
ignore_log_types = frozenset({'warning'})
|
||||||
|
|
||||||
|
def __init__(self, sock=None, map=None):
|
||||||
|
if map is None:
|
||||||
|
self._map = socket_map
|
||||||
|
else:
|
||||||
|
self._map = map
|
||||||
|
|
||||||
|
self._fileno = None
|
||||||
|
|
||||||
|
if sock:
|
||||||
|
# Set to nonblocking just to make sure for cases where we
|
||||||
|
# get a socket from a blocking source.
|
||||||
|
sock.setblocking(False)
|
||||||
|
self.set_socket(sock, map)
|
||||||
|
self.connected = True
|
||||||
|
# The constructor no longer requires that the socket
|
||||||
|
# passed be connected.
|
||||||
|
try:
|
||||||
|
self.addr = sock.getpeername()
|
||||||
|
except OSError as err:
|
||||||
|
if err.errno in (ENOTCONN, EINVAL):
|
||||||
|
# To handle the case where we got an unconnected
|
||||||
|
# socket.
|
||||||
|
self.connected = False
|
||||||
|
else:
|
||||||
|
# The socket is broken in some unknown way, alert
|
||||||
|
# the user and remove it from the map (to prevent
|
||||||
|
# polling of broken sockets).
|
||||||
|
self.del_channel(map)
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
self.socket = None
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
status = [self.__class__.__module__+"."+self.__class__.__qualname__]
|
||||||
|
if self.accepting and self.addr:
|
||||||
|
status.append('listening')
|
||||||
|
elif self.connected:
|
||||||
|
status.append('connected')
|
||||||
|
if self.addr is not None:
|
||||||
|
try:
|
||||||
|
status.append('%s:%d' % self.addr)
|
||||||
|
except TypeError:
|
||||||
|
status.append(repr(self.addr))
|
||||||
|
return '<%s at %#x>' % (' '.join(status), id(self))
|
||||||
|
|
||||||
|
def add_channel(self, map=None):
|
||||||
|
#self.log_info('adding channel %s' % self)
|
||||||
|
if map is None:
|
||||||
|
map = self._map
|
||||||
|
map[self._fileno] = self
|
||||||
|
|
||||||
|
def del_channel(self, map=None):
|
||||||
|
fd = self._fileno
|
||||||
|
if map is None:
|
||||||
|
map = self._map
|
||||||
|
if fd in map:
|
||||||
|
#self.log_info('closing channel %d:%s' % (fd, self))
|
||||||
|
del map[fd]
|
||||||
|
self._fileno = None
|
||||||
|
|
||||||
|
def create_socket(self, family=socket.AF_INET, type=socket.SOCK_STREAM):
|
||||||
|
self.family_and_type = family, type
|
||||||
|
sock = socket.socket(family, type)
|
||||||
|
sock.setblocking(False)
|
||||||
|
self.set_socket(sock)
|
||||||
|
|
||||||
|
def set_socket(self, sock, map=None):
|
||||||
|
self.socket = sock
|
||||||
|
self._fileno = sock.fileno()
|
||||||
|
self.add_channel(map)
|
||||||
|
|
||||||
|
def set_reuse_addr(self):
|
||||||
|
# try to re-use a server port if possible
|
||||||
|
try:
|
||||||
|
self.socket.setsockopt(
|
||||||
|
socket.SOL_SOCKET, socket.SO_REUSEADDR,
|
||||||
|
self.socket.getsockopt(socket.SOL_SOCKET,
|
||||||
|
socket.SO_REUSEADDR) | 1
|
||||||
|
)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# ==================================================
|
||||||
|
# predicates for select()
|
||||||
|
# these are used as filters for the lists of sockets
|
||||||
|
# to pass to select().
|
||||||
|
# ==================================================
|
||||||
|
|
||||||
|
def readable(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def writable(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# ==================================================
|
||||||
|
# socket object methods.
|
||||||
|
# ==================================================
|
||||||
|
|
||||||
|
def listen(self, num):
|
||||||
|
self.accepting = True
|
||||||
|
if os.name == 'nt' and num > 5:
|
||||||
|
num = 5
|
||||||
|
return self.socket.listen(num)
|
||||||
|
|
||||||
|
def bind(self, addr):
|
||||||
|
self.addr = addr
|
||||||
|
return self.socket.bind(addr)
|
||||||
|
|
||||||
|
def connect(self, address):
|
||||||
|
self.connected = False
|
||||||
|
self.connecting = True
|
||||||
|
err = self.socket.connect_ex(address)
|
||||||
|
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
|
||||||
|
or err == EINVAL and os.name == 'nt':
|
||||||
|
self.addr = address
|
||||||
|
return
|
||||||
|
if err in (0, EISCONN):
|
||||||
|
self.addr = address
|
||||||
|
self.handle_connect_event()
|
||||||
|
else:
|
||||||
|
raise OSError(err, errorcode[err])
|
||||||
|
|
||||||
|
def accept(self):
|
||||||
|
# XXX can return either an address pair or None
|
||||||
|
try:
|
||||||
|
conn, addr = self.socket.accept()
|
||||||
|
except TypeError:
|
||||||
|
return None
|
||||||
|
except OSError as why:
|
||||||
|
if why.errno in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
return conn, addr
|
||||||
|
|
||||||
|
def send(self, data):
|
||||||
|
try:
|
||||||
|
result = self.socket.send(data)
|
||||||
|
return result
|
||||||
|
except OSError as why:
|
||||||
|
if why.errno == EWOULDBLOCK:
|
||||||
|
return 0
|
||||||
|
elif why.errno in _DISCONNECTED:
|
||||||
|
self.handle_close()
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def recv(self, buffer_size):
|
||||||
|
try:
|
||||||
|
data = self.socket.recv(buffer_size)
|
||||||
|
if not data:
|
||||||
|
# a closed connection is indicated by signaling
|
||||||
|
# a read condition, and having recv() return 0.
|
||||||
|
self.handle_close()
|
||||||
|
return b''
|
||||||
|
else:
|
||||||
|
return data
|
||||||
|
except OSError as why:
|
||||||
|
# winsock sometimes raises ENOTCONN
|
||||||
|
if why.errno in _DISCONNECTED:
|
||||||
|
self.handle_close()
|
||||||
|
return b''
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.connected = False
|
||||||
|
self.accepting = False
|
||||||
|
self.connecting = False
|
||||||
|
self.del_channel()
|
||||||
|
if self.socket is not None:
|
||||||
|
try:
|
||||||
|
self.socket.close()
|
||||||
|
except OSError as why:
|
||||||
|
if why.errno not in (ENOTCONN, EBADF):
|
||||||
|
raise
|
||||||
|
|
||||||
|
# log and log_info may be overridden to provide more sophisticated
|
||||||
|
# logging and warning methods. In general, log is for 'hit' logging
|
||||||
|
# and 'log_info' is for informational, warning and error logging.
|
||||||
|
|
||||||
|
def log(self, message):
|
||||||
|
sys.stderr.write('log: %s\n' % str(message))
|
||||||
|
|
||||||
|
def log_info(self, message, type='info'):
|
||||||
|
if type not in self.ignore_log_types:
|
||||||
|
print('%s: %s' % (type, message))
|
||||||
|
|
||||||
|
def handle_read_event(self):
|
||||||
|
if self.accepting:
|
||||||
|
# accepting sockets are never connected, they "spawn" new
|
||||||
|
# sockets that are connected
|
||||||
|
self.handle_accept()
|
||||||
|
elif not self.connected:
|
||||||
|
if self.connecting:
|
||||||
|
self.handle_connect_event()
|
||||||
|
self.handle_read()
|
||||||
|
else:
|
||||||
|
self.handle_read()
|
||||||
|
|
||||||
|
def handle_connect_event(self):
|
||||||
|
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
||||||
|
if err != 0:
|
||||||
|
raise OSError(err, _strerror(err))
|
||||||
|
self.handle_connect()
|
||||||
|
self.connected = True
|
||||||
|
self.connecting = False
|
||||||
|
|
||||||
|
def handle_write_event(self):
|
||||||
|
if self.accepting:
|
||||||
|
# Accepting sockets shouldn't get a write event.
|
||||||
|
# We will pretend it didn't happen.
|
||||||
|
return
|
||||||
|
|
||||||
|
if not self.connected:
|
||||||
|
if self.connecting:
|
||||||
|
self.handle_connect_event()
|
||||||
|
self.handle_write()
|
||||||
|
|
||||||
|
def handle_expt_event(self):
|
||||||
|
# handle_expt_event() is called if there might be an error on the
|
||||||
|
# socket, or if there is OOB data
|
||||||
|
# check for the error condition first
|
||||||
|
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
||||||
|
if err != 0:
|
||||||
|
# we can get here when select.select() says that there is an
|
||||||
|
# exceptional condition on the socket
|
||||||
|
# since there is an error, we'll go ahead and close the socket
|
||||||
|
# like we would in a subclassed handle_read() that received no
|
||||||
|
# data
|
||||||
|
self.handle_close()
|
||||||
|
else:
|
||||||
|
self.handle_expt()
|
||||||
|
|
||||||
|
def handle_error(self):
|
||||||
|
nil, t, v, tbinfo = compact_traceback()
|
||||||
|
|
||||||
|
# sometimes a user repr method will crash.
|
||||||
|
try:
|
||||||
|
self_repr = repr(self)
|
||||||
|
except:
|
||||||
|
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
|
||||||
|
|
||||||
|
self.log_info(
|
||||||
|
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
|
||||||
|
self_repr,
|
||||||
|
t,
|
||||||
|
v,
|
||||||
|
tbinfo
|
||||||
|
),
|
||||||
|
'error'
|
||||||
|
)
|
||||||
|
self.handle_close()
|
||||||
|
|
||||||
|
def handle_expt(self):
|
||||||
|
self.log_info('unhandled incoming priority event', 'warning')
|
||||||
|
|
||||||
|
def handle_read(self):
|
||||||
|
self.log_info('unhandled read event', 'warning')
|
||||||
|
|
||||||
|
def handle_write(self):
|
||||||
|
self.log_info('unhandled write event', 'warning')
|
||||||
|
|
||||||
|
def handle_connect(self):
|
||||||
|
self.log_info('unhandled connect event', 'warning')
|
||||||
|
|
||||||
|
def handle_accept(self):
|
||||||
|
pair = self.accept()
|
||||||
|
if pair is not None:
|
||||||
|
self.handle_accepted(*pair)
|
||||||
|
|
||||||
|
def handle_accepted(self, sock, addr):
|
||||||
|
sock.close()
|
||||||
|
self.log_info('unhandled accepted event', 'warning')
|
||||||
|
|
||||||
|
def handle_close(self):
|
||||||
|
self.log_info('unhandled close event', 'warning')
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# adds simple buffered output capability, useful for simple clients.
|
||||||
|
# [for more sophisticated usage use asynchat.async_chat]
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class dispatcher_with_send(dispatcher):
|
||||||
|
|
||||||
|
def __init__(self, sock=None, map=None):
|
||||||
|
dispatcher.__init__(self, sock, map)
|
||||||
|
self.out_buffer = b''
|
||||||
|
|
||||||
|
def initiate_send(self):
|
||||||
|
num_sent = 0
|
||||||
|
num_sent = dispatcher.send(self, self.out_buffer[:65536])
|
||||||
|
self.out_buffer = self.out_buffer[num_sent:]
|
||||||
|
|
||||||
|
def handle_write(self):
|
||||||
|
self.initiate_send()
|
||||||
|
|
||||||
|
def writable(self):
|
||||||
|
return (not self.connected) or len(self.out_buffer)
|
||||||
|
|
||||||
|
def send(self, data):
|
||||||
|
if self.debug:
|
||||||
|
self.log_info('sending %s' % repr(data))
|
||||||
|
self.out_buffer = self.out_buffer + data
|
||||||
|
self.initiate_send()
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# used for debugging.
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def compact_traceback():
|
||||||
|
t, v, tb = sys.exc_info()
|
||||||
|
tbinfo = []
|
||||||
|
if not tb: # Must have a traceback
|
||||||
|
raise AssertionError("traceback does not exist")
|
||||||
|
while tb:
|
||||||
|
tbinfo.append((
|
||||||
|
tb.tb_frame.f_code.co_filename,
|
||||||
|
tb.tb_frame.f_code.co_name,
|
||||||
|
str(tb.tb_lineno)
|
||||||
|
))
|
||||||
|
tb = tb.tb_next
|
||||||
|
|
||||||
|
# just to be safe
|
||||||
|
del tb
|
||||||
|
|
||||||
|
file, function, line = tbinfo[-1]
|
||||||
|
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
|
||||||
|
return (file, function, line), t, v, info
|
||||||
|
|
||||||
|
def close_all(map=None, ignore_all=False):
|
||||||
|
if map is None:
|
||||||
|
map = socket_map
|
||||||
|
for x in list(map.values()):
|
||||||
|
try:
|
||||||
|
x.close()
|
||||||
|
except OSError as x:
|
||||||
|
if x.errno == EBADF:
|
||||||
|
pass
|
||||||
|
elif not ignore_all:
|
||||||
|
raise
|
||||||
|
except _reraised_exceptions:
|
||||||
|
raise
|
||||||
|
except:
|
||||||
|
if not ignore_all:
|
||||||
|
raise
|
||||||
|
map.clear()
|
||||||
|
|
||||||
|
# Asynchronous File I/O:
|
||||||
|
#
|
||||||
|
# After a little research (reading man pages on various unixen, and
|
||||||
|
# digging through the linux kernel), I've determined that select()
|
||||||
|
# isn't meant for doing asynchronous file i/o.
|
||||||
|
# Heartening, though - reading linux/mm/filemap.c shows that linux
|
||||||
|
# supports asynchronous read-ahead. So _MOST_ of the time, the data
|
||||||
|
# will be sitting in memory for us already when we go to read it.
|
||||||
|
#
|
||||||
|
# What other OS's (besides NT) support async file i/o? [VMS?]
|
||||||
|
#
|
||||||
|
# Regardless, this is useful for pipes, and stdin/stdout...
|
||||||
|
|
||||||
|
if os.name == 'posix':
|
||||||
|
class file_wrapper:
|
||||||
|
# Here we override just enough to make a file
|
||||||
|
# look like a socket for the purposes of asyncore.
|
||||||
|
# The passed fd is automatically os.dup()'d
|
||||||
|
|
||||||
|
def __init__(self, fd):
|
||||||
|
self.fd = os.dup(fd)
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
if self.fd >= 0:
|
||||||
|
warnings.warn("unclosed file %r" % self, ResourceWarning,
|
||||||
|
source=self)
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
def recv(self, *args):
|
||||||
|
return os.read(self.fd, *args)
|
||||||
|
|
||||||
|
def send(self, *args):
|
||||||
|
return os.write(self.fd, *args)
|
||||||
|
|
||||||
|
def getsockopt(self, level, optname, buflen=None):
|
||||||
|
if (level == socket.SOL_SOCKET and
|
||||||
|
optname == socket.SO_ERROR and
|
||||||
|
not buflen):
|
||||||
|
return 0
|
||||||
|
raise NotImplementedError("Only asyncore specific behaviour "
|
||||||
|
"implemented.")
|
||||||
|
|
||||||
|
read = recv
|
||||||
|
write = send
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self.fd < 0:
|
||||||
|
return
|
||||||
|
fd = self.fd
|
||||||
|
self.fd = -1
|
||||||
|
os.close(fd)
|
||||||
|
|
||||||
|
def fileno(self):
|
||||||
|
return self.fd
|
||||||
|
|
||||||
|
class file_dispatcher(dispatcher):
|
||||||
|
|
||||||
|
def __init__(self, fd, map=None):
|
||||||
|
dispatcher.__init__(self, None, map)
|
||||||
|
self.connected = True
|
||||||
|
try:
|
||||||
|
fd = fd.fileno()
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
self.set_file(fd)
|
||||||
|
# set it to non-blocking mode
|
||||||
|
os.set_blocking(fd, False)
|
||||||
|
|
||||||
|
def set_file(self, fd):
|
||||||
|
self.socket = file_wrapper(fd)
|
||||||
|
self._fileno = self.socket.fileno()
|
||||||
|
self.add_channel()
|
|
@ -31,6 +31,7 @@ import traceback
|
||||||
from threading import Lock
|
from threading import Lock
|
||||||
|
|
||||||
from .server.mytime import MyTime
|
from .server.mytime import MyTime
|
||||||
|
import importlib
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import ctypes
|
import ctypes
|
||||||
|
@ -47,30 +48,6 @@ if PREFER_ENC.startswith('ANSI_'): # pragma: no cover
|
||||||
elif all((os.getenv(v) in (None, "") for v in ('LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG'))):
|
elif all((os.getenv(v) in (None, "") for v in ('LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG'))):
|
||||||
PREFER_ENC = 'UTF-8';
|
PREFER_ENC = 'UTF-8';
|
||||||
|
|
||||||
# py-2.x: try to minimize influence of sporadic conversion errors on python 2.x,
|
|
||||||
# caused by implicit converting of string/unicode (e. g. `str(u"\uFFFD")` produces an error
|
|
||||||
# if default encoding is 'ascii');
|
|
||||||
if sys.version_info < (3,): # pragma: 3.x no cover
|
|
||||||
# correct default (global system) encoding (mostly UTF-8):
|
|
||||||
def __resetDefaultEncoding(encoding):
|
|
||||||
global PREFER_ENC
|
|
||||||
ode = sys.getdefaultencoding().upper()
|
|
||||||
if ode == 'ASCII' and ode != PREFER_ENC.upper():
|
|
||||||
# setdefaultencoding is normally deleted after site initialized, so hack-in using load of sys-module:
|
|
||||||
_sys = sys
|
|
||||||
if not hasattr(_sys, "setdefaultencoding"):
|
|
||||||
try:
|
|
||||||
from imp import load_dynamic as __ldm
|
|
||||||
_sys = __ldm('_sys', 'sys')
|
|
||||||
except ImportError: # pragma: no cover - only if load_dynamic fails
|
|
||||||
reload(sys)
|
|
||||||
_sys = sys
|
|
||||||
if hasattr(_sys, "setdefaultencoding"):
|
|
||||||
_sys.setdefaultencoding(encoding)
|
|
||||||
# override to PREFER_ENC:
|
|
||||||
__resetDefaultEncoding(PREFER_ENC)
|
|
||||||
del __resetDefaultEncoding
|
|
||||||
|
|
||||||
# todo: rewrite explicit (and implicit) str-conversions via encode/decode with IO-encoding (sys.stdout.encoding),
|
# todo: rewrite explicit (and implicit) str-conversions via encode/decode with IO-encoding (sys.stdout.encoding),
|
||||||
# e. g. inside tags-replacement by command-actions, etc.
|
# e. g. inside tags-replacement by command-actions, etc.
|
||||||
|
|
||||||
|
@ -84,8 +61,7 @@ if sys.version_info < (3,): # pragma: 3.x no cover
|
||||||
# [True, True, False]; # -- python2
|
# [True, True, False]; # -- python2
|
||||||
# [True, False, True]; # -- python3
|
# [True, False, True]; # -- python3
|
||||||
#
|
#
|
||||||
if sys.version_info >= (3,): # pragma: 2.x no cover
|
def uni_decode(x, enc=PREFER_ENC, errors='strict'):
|
||||||
def uni_decode(x, enc=PREFER_ENC, errors='strict'):
|
|
||||||
try:
|
try:
|
||||||
if isinstance(x, bytes):
|
if isinstance(x, bytes):
|
||||||
return x.decode(enc, errors)
|
return x.decode(enc, errors)
|
||||||
|
@ -94,31 +70,15 @@ if sys.version_info >= (3,): # pragma: 2.x no cover
|
||||||
if errors != 'strict':
|
if errors != 'strict':
|
||||||
raise
|
raise
|
||||||
return x.decode(enc, 'replace')
|
return x.decode(enc, 'replace')
|
||||||
def uni_string(x):
|
def uni_string(x):
|
||||||
if not isinstance(x, bytes):
|
if not isinstance(x, bytes):
|
||||||
return str(x)
|
return str(x)
|
||||||
return x.decode(PREFER_ENC, 'replace')
|
return x.decode(PREFER_ENC, 'replace')
|
||||||
else: # pragma: 3.x no cover
|
def uni_bytes(x):
|
||||||
def uni_decode(x, enc=PREFER_ENC, errors='strict'):
|
return bytes(x, 'UTF-8')
|
||||||
try:
|
|
||||||
if isinstance(x, unicode):
|
|
||||||
return x.encode(enc, errors)
|
|
||||||
return x
|
|
||||||
except (UnicodeDecodeError, UnicodeEncodeError): # pragma: no cover - unsure if reachable
|
|
||||||
if errors != 'strict':
|
|
||||||
raise
|
|
||||||
return x.encode(enc, 'replace')
|
|
||||||
if sys.getdefaultencoding().upper() != 'UTF-8': # pragma: no cover - utf-8 is default encoding now
|
|
||||||
def uni_string(x):
|
|
||||||
if not isinstance(x, unicode):
|
|
||||||
return str(x)
|
|
||||||
return x.encode(PREFER_ENC, 'replace')
|
|
||||||
else:
|
|
||||||
uni_string = str
|
|
||||||
|
|
||||||
|
|
||||||
def _as_bool(val):
|
def _as_bool(val):
|
||||||
return bool(val) if not isinstance(val, basestring) \
|
return bool(val) if not isinstance(val, str) \
|
||||||
else val.lower() in ('1', 'on', 'true', 'yes')
|
else val.lower() in ('1', 'on', 'true', 'yes')
|
||||||
|
|
||||||
|
|
||||||
|
@ -223,11 +183,6 @@ def __stopOnIOError(logSys=None, logHndlr=None): # pragma: no cover
|
||||||
pass
|
pass
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
try:
|
|
||||||
BrokenPipeError = BrokenPipeError
|
|
||||||
except NameError: # pragma: 3.x no cover
|
|
||||||
BrokenPipeError = IOError
|
|
||||||
|
|
||||||
__origLog = logging.Logger._log
|
__origLog = logging.Logger._log
|
||||||
def __safeLog(self, level, msg, args, **kwargs):
|
def __safeLog(self, level, msg, args, **kwargs):
|
||||||
"""Safe log inject to avoid possible errors by unsafe log-handlers,
|
"""Safe log inject to avoid possible errors by unsafe log-handlers,
|
||||||
|
@ -327,38 +282,19 @@ def splitwords(s):
|
||||||
"""
|
"""
|
||||||
if not s:
|
if not s:
|
||||||
return []
|
return []
|
||||||
return filter(bool, map(lambda v: v.strip(), re.split('[ ,\n]+', s)))
|
return list(filter(bool, [v.strip() for v in re.split(r'[\s,]+', s)]))
|
||||||
|
|
||||||
if sys.version_info >= (3,5):
|
def _merge_dicts(x, y):
|
||||||
eval(compile(r'''if 1:
|
|
||||||
def _merge_dicts(x, y):
|
|
||||||
"""Helper to merge dicts.
|
"""Helper to merge dicts.
|
||||||
"""
|
"""
|
||||||
if y:
|
if y:
|
||||||
return {**x, **y}
|
return {**x, **y}
|
||||||
return x
|
return x
|
||||||
|
|
||||||
def _merge_copy_dicts(x, y):
|
def _merge_copy_dicts(x, y):
|
||||||
"""Helper to merge dicts to guarantee a copy result (r is never x).
|
"""Helper to merge dicts to guarantee a copy result (r is never x).
|
||||||
"""
|
"""
|
||||||
return {**x, **y}
|
return {**x, **y}
|
||||||
''', __file__, 'exec'))
|
|
||||||
else:
|
|
||||||
def _merge_dicts(x, y):
|
|
||||||
"""Helper to merge dicts.
|
|
||||||
"""
|
|
||||||
r = x
|
|
||||||
if y:
|
|
||||||
r = x.copy()
|
|
||||||
r.update(y)
|
|
||||||
return r
|
|
||||||
def _merge_copy_dicts(x, y):
|
|
||||||
"""Helper to merge dicts to guarantee a copy result (r is never x).
|
|
||||||
"""
|
|
||||||
r = x.copy()
|
|
||||||
if y:
|
|
||||||
r.update(y)
|
|
||||||
return r
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Following function used for parse options from parameter (e.g. `name[p1=0, p2="..."][p3='...']`).
|
# Following function used for parse options from parameter (e.g. `name[p1=0, p2="..."][p3='...']`).
|
||||||
|
@ -444,7 +380,7 @@ def substituteRecursiveTags(inptags, conditional='',
|
||||||
while True:
|
while True:
|
||||||
repFlag = False
|
repFlag = False
|
||||||
# substitute each value:
|
# substitute each value:
|
||||||
for tag in tags.iterkeys():
|
for tag in tags.keys():
|
||||||
# ignore escaped or already done (or in ignore list):
|
# ignore escaped or already done (or in ignore list):
|
||||||
if tag in ignore or tag in done: continue
|
if tag in ignore or tag in done: continue
|
||||||
# ignore replacing callable items from calling map - should be converted on demand only (by get):
|
# ignore replacing callable items from calling map - should be converted on demand only (by get):
|
||||||
|
@ -484,7 +420,7 @@ def substituteRecursiveTags(inptags, conditional='',
|
||||||
m = tre_search(value, m.end())
|
m = tre_search(value, m.end())
|
||||||
continue
|
continue
|
||||||
# if calling map - be sure we've string:
|
# if calling map - be sure we've string:
|
||||||
if not isinstance(repl, basestring): repl = uni_string(repl)
|
if not isinstance(repl, str): repl = uni_string(repl)
|
||||||
value = value.replace('<%s>' % rtag, repl)
|
value = value.replace('<%s>' % rtag, repl)
|
||||||
#logSys.log(5, 'value now: %s' % value)
|
#logSys.log(5, 'value now: %s' % value)
|
||||||
# increment reference count:
|
# increment reference count:
|
||||||
|
@ -517,10 +453,7 @@ if _libcap:
|
||||||
Side effect: name can be silently truncated to 15 bytes (16 bytes with NTS zero)
|
Side effect: name can be silently truncated to 15 bytes (16 bytes with NTS zero)
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
if sys.version_info >= (3,): # pragma: 2.x no cover
|
|
||||||
name = name.encode()
|
name = name.encode()
|
||||||
else: # pragma: 3.x no cover
|
|
||||||
name = bytes(name)
|
|
||||||
_libcap.prctl(15, name) # PR_SET_NAME = 15
|
_libcap.prctl(15, name) # PR_SET_NAME = 15
|
||||||
except: # pragma: no cover
|
except: # pragma: no cover
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -58,6 +58,8 @@ protocol = [
|
||||||
["banned", "return jails with banned IPs as dictionary"],
|
["banned", "return jails with banned IPs as dictionary"],
|
||||||
["banned <IP> ... <IP>]", "return list(s) of jails where given IP(s) are banned"],
|
["banned <IP> ... <IP>]", "return list(s) of jails where given IP(s) are banned"],
|
||||||
["status", "gets the current status of the server"],
|
["status", "gets the current status of the server"],
|
||||||
|
["status --all [FLAVOR]", "gets the current status of all jails, with optional flavor or extended info"],
|
||||||
|
["stat[istic]s", "gets the current statistics of all jails as table"],
|
||||||
["ping", "tests if the server is alive"],
|
["ping", "tests if the server is alive"],
|
||||||
["echo", "for internal usage, returns back and outputs a given string"],
|
["echo", "for internal usage, returns back and outputs a given string"],
|
||||||
["help", "return this output"],
|
["help", "return this output"],
|
||||||
|
|
|
@ -114,9 +114,9 @@ class CallingMap(MutableMapping, object):
|
||||||
def _asdict(self, calculated=False, checker=None):
|
def _asdict(self, calculated=False, checker=None):
|
||||||
d = dict(self.data, **self.storage)
|
d = dict(self.data, **self.storage)
|
||||||
if not calculated:
|
if not calculated:
|
||||||
return dict((n,v) for n,v in d.iteritems() \
|
return dict((n,v) for n,v in d.items() \
|
||||||
if not callable(v) or n in self.CM_REPR_ITEMS)
|
if not callable(v) or n in self.CM_REPR_ITEMS)
|
||||||
for n,v in d.items():
|
for n,v in list(d.items()):
|
||||||
if callable(v):
|
if callable(v):
|
||||||
try:
|
try:
|
||||||
# calculate:
|
# calculate:
|
||||||
|
@ -182,7 +182,7 @@ class CallingMap(MutableMapping, object):
|
||||||
return self.__class__(_merge_copy_dicts(self.data, self.storage))
|
return self.__class__(_merge_copy_dicts(self.data, self.storage))
|
||||||
|
|
||||||
|
|
||||||
class ActionBase(object):
|
class ActionBase(object, metaclass=ABCMeta):
|
||||||
"""An abstract base class for actions in Fail2Ban.
|
"""An abstract base class for actions in Fail2Ban.
|
||||||
|
|
||||||
Action Base is a base definition of what methods need to be in
|
Action Base is a base definition of what methods need to be in
|
||||||
|
@ -212,7 +212,6 @@ class ActionBase(object):
|
||||||
Any additional arguments specified in `jail.conf` or passed
|
Any additional arguments specified in `jail.conf` or passed
|
||||||
via `fail2ban-client` will be passed as keyword arguments.
|
via `fail2ban-client` will be passed as keyword arguments.
|
||||||
"""
|
"""
|
||||||
__metaclass__ = ABCMeta
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def __subclasshook__(cls, C):
|
def __subclasshook__(cls, C):
|
||||||
|
@ -423,7 +422,7 @@ class CommandAction(ActionBase):
|
||||||
if not callable(family): # pragma: no cover
|
if not callable(family): # pragma: no cover
|
||||||
return self.__substCache.get(key, {}).get(family)
|
return self.__substCache.get(key, {}).get(family)
|
||||||
# family as expression - use it to filter values:
|
# family as expression - use it to filter values:
|
||||||
return [v for f, v in self.__substCache.get(key, {}).iteritems() if family(f)]
|
return [v for f, v in self.__substCache.get(key, {}).items() if family(f)]
|
||||||
cmd = args[0]
|
cmd = args[0]
|
||||||
if cmd: # set:
|
if cmd: # set:
|
||||||
try:
|
try:
|
||||||
|
@ -435,7 +434,7 @@ class CommandAction(ActionBase):
|
||||||
try:
|
try:
|
||||||
famd = self.__substCache[key]
|
famd = self.__substCache[key]
|
||||||
cmd = famd.pop(family)
|
cmd = famd.pop(family)
|
||||||
for family, v in famd.items():
|
for family, v in list(famd.items()):
|
||||||
if v == cmd:
|
if v == cmd:
|
||||||
del famd[family]
|
del famd[family]
|
||||||
except KeyError: # pragma: no cover
|
except KeyError: # pragma: no cover
|
||||||
|
@ -451,7 +450,7 @@ class CommandAction(ActionBase):
|
||||||
res = True
|
res = True
|
||||||
err = 'Script error'
|
err = 'Script error'
|
||||||
if not family: # all started:
|
if not family: # all started:
|
||||||
family = [famoper for (famoper,v) in self.__started.iteritems() if v]
|
family = [famoper for (famoper,v) in self.__started.items() if v]
|
||||||
for famoper in family:
|
for famoper in family:
|
||||||
try:
|
try:
|
||||||
cmd = self._getOperation(tag, famoper)
|
cmd = self._getOperation(tag, famoper)
|
||||||
|
@ -631,7 +630,7 @@ class CommandAction(ActionBase):
|
||||||
and executes the resulting command.
|
and executes the resulting command.
|
||||||
"""
|
"""
|
||||||
# collect started families, may be started on demand (conditional):
|
# collect started families, may be started on demand (conditional):
|
||||||
family = [f for (f,v) in self.__started.iteritems() if v & 3 == 3]; # started and contains items
|
family = [f for (f,v) in self.__started.items() if v & 3 == 3]; # started and contains items
|
||||||
# if nothing contains items:
|
# if nothing contains items:
|
||||||
if not family: return True
|
if not family: return True
|
||||||
# flush:
|
# flush:
|
||||||
|
@ -656,7 +655,7 @@ class CommandAction(ActionBase):
|
||||||
"""
|
"""
|
||||||
# collect started families, if started on demand (conditional):
|
# collect started families, if started on demand (conditional):
|
||||||
if family is None:
|
if family is None:
|
||||||
family = [f for (f,v) in self.__started.iteritems() if v]
|
family = [f for (f,v) in self.__started.items() if v]
|
||||||
# if no started (on demand) actions:
|
# if no started (on demand) actions:
|
||||||
if not family: return True
|
if not family: return True
|
||||||
self.__started = {}
|
self.__started = {}
|
||||||
|
@ -690,7 +689,7 @@ class CommandAction(ActionBase):
|
||||||
ret = True
|
ret = True
|
||||||
# for each started family:
|
# for each started family:
|
||||||
if self.actioncheck:
|
if self.actioncheck:
|
||||||
for (family, started) in self.__started.items():
|
for (family, started) in list(self.__started.items()):
|
||||||
if started and not self._invariantCheck(family, beforeRepair):
|
if started and not self._invariantCheck(family, beforeRepair):
|
||||||
# reset started flag and command of executed operation:
|
# reset started flag and command of executed operation:
|
||||||
self.__started[family] = 0
|
self.__started[family] = 0
|
||||||
|
|
|
@ -156,11 +156,11 @@ class Actions(JailThread, Mapping):
|
||||||
else:
|
else:
|
||||||
if hasattr(self, '_reload_actions'):
|
if hasattr(self, '_reload_actions'):
|
||||||
# reload actions after all parameters set via stream:
|
# reload actions after all parameters set via stream:
|
||||||
for name, initOpts in self._reload_actions.iteritems():
|
for name, initOpts in self._reload_actions.items():
|
||||||
if name in self._actions:
|
if name in self._actions:
|
||||||
self._actions[name].reload(**(initOpts if initOpts else {}))
|
self._actions[name].reload(**(initOpts if initOpts else {}))
|
||||||
# remove obsolete actions (untouched by reload process):
|
# remove obsolete actions (untouched by reload process):
|
||||||
delacts = OrderedDict((name, action) for name, action in self._actions.iteritems()
|
delacts = OrderedDict((name, action) for name, action in self._actions.items()
|
||||||
if name not in self._reload_actions)
|
if name not in self._reload_actions)
|
||||||
if len(delacts):
|
if len(delacts):
|
||||||
# unban all tickets using removed actions only:
|
# unban all tickets using removed actions only:
|
||||||
|
@ -217,7 +217,7 @@ class Actions(JailThread, Mapping):
|
||||||
return lst
|
return lst
|
||||||
if len(ids) == 1:
|
if len(ids) == 1:
|
||||||
return 1 if ids[0] in lst else 0
|
return 1 if ids[0] in lst else 0
|
||||||
return map(lambda ip: 1 if ip in lst else 0, ids)
|
return [1 if ip in lst else 0 for ip in ids]
|
||||||
|
|
||||||
def getBanList(self, withTime=False):
|
def getBanList(self, withTime=False):
|
||||||
"""Returns the list of banned IP addresses.
|
"""Returns the list of banned IP addresses.
|
||||||
|
@ -288,7 +288,7 @@ class Actions(JailThread, Mapping):
|
||||||
if not isinstance(ip, IPAddr):
|
if not isinstance(ip, IPAddr):
|
||||||
ipa = IPAddr(ip)
|
ipa = IPAddr(ip)
|
||||||
if not ipa.isSingle: # subnet (mask/cidr) or raw (may be dns/hostname):
|
if not ipa.isSingle: # subnet (mask/cidr) or raw (may be dns/hostname):
|
||||||
ips = filter(ipa.contains, self.banManager.getBanList())
|
ips = list(filter(ipa.contains, self.banManager.getBanList()))
|
||||||
if ips:
|
if ips:
|
||||||
return self.removeBannedIP(ips, db, ifexists)
|
return self.removeBannedIP(ips, db, ifexists)
|
||||||
# not found:
|
# not found:
|
||||||
|
@ -305,7 +305,7 @@ class Actions(JailThread, Mapping):
|
||||||
"""
|
"""
|
||||||
if actions is None:
|
if actions is None:
|
||||||
actions = self._actions
|
actions = self._actions
|
||||||
for name, action in reversed(actions.items()):
|
for name, action in reversed(list(actions.items())):
|
||||||
try:
|
try:
|
||||||
action.stop()
|
action.stop()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -328,7 +328,7 @@ class Actions(JailThread, Mapping):
|
||||||
True when the thread exits nicely.
|
True when the thread exits nicely.
|
||||||
"""
|
"""
|
||||||
cnt = 0
|
cnt = 0
|
||||||
for name, action in self._actions.iteritems():
|
for name, action in self._actions.items():
|
||||||
try:
|
try:
|
||||||
action.start()
|
action.start()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -505,7 +505,7 @@ class Actions(JailThread, Mapping):
|
||||||
Observers.Main.add('banFound', bTicket, self._jail, btime)
|
Observers.Main.add('banFound', bTicket, self._jail, btime)
|
||||||
logSys.notice("[%s] %sBan %s", self._jail.name, ('' if not bTicket.restored else 'Restore '), ip)
|
logSys.notice("[%s] %sBan %s", self._jail.name, ('' if not bTicket.restored else 'Restore '), ip)
|
||||||
# do actions :
|
# do actions :
|
||||||
for name, action in self._actions.iteritems():
|
for name, action in self._actions.items():
|
||||||
try:
|
try:
|
||||||
if bTicket.restored and getattr(action, 'norestored', False):
|
if bTicket.restored and getattr(action, 'norestored', False):
|
||||||
continue
|
continue
|
||||||
|
@ -543,13 +543,13 @@ class Actions(JailThread, Mapping):
|
||||||
# avoid too often checks:
|
# avoid too often checks:
|
||||||
if not rebanacts and MyTime.time() > self.__lastConsistencyCheckTM + 3:
|
if not rebanacts and MyTime.time() > self.__lastConsistencyCheckTM + 3:
|
||||||
self.__lastConsistencyCheckTM = MyTime.time()
|
self.__lastConsistencyCheckTM = MyTime.time()
|
||||||
for action in self._actions.itervalues():
|
for action in self._actions.values():
|
||||||
if hasattr(action, 'consistencyCheck'):
|
if hasattr(action, 'consistencyCheck'):
|
||||||
action.consistencyCheck()
|
action.consistencyCheck()
|
||||||
# check epoch in order to reban it:
|
# check epoch in order to reban it:
|
||||||
if bTicket.banEpoch < self.banEpoch:
|
if bTicket.banEpoch < self.banEpoch:
|
||||||
if not rebanacts: rebanacts = dict(
|
if not rebanacts: rebanacts = dict(
|
||||||
(name, action) for name, action in self._actions.iteritems()
|
(name, action) for name, action in self._actions.items()
|
||||||
if action.banEpoch > bTicket.banEpoch)
|
if action.banEpoch > bTicket.banEpoch)
|
||||||
cnt += self.__reBan(bTicket, actions=rebanacts)
|
cnt += self.__reBan(bTicket, actions=rebanacts)
|
||||||
else: # pragma: no cover - unexpected: ticket is not banned for some reasons - reban using all actions:
|
else: # pragma: no cover - unexpected: ticket is not banned for some reasons - reban using all actions:
|
||||||
|
@ -576,8 +576,8 @@ class Actions(JailThread, Mapping):
|
||||||
ip = ticket.getID()
|
ip = ticket.getID()
|
||||||
aInfo = self._getActionInfo(ticket)
|
aInfo = self._getActionInfo(ticket)
|
||||||
if log:
|
if log:
|
||||||
logSys.notice("[%s] Reban %s%s", self._jail.name, ip, (', action %r' % actions.keys()[0] if len(actions) == 1 else ''))
|
logSys.notice("[%s] Reban %s%s", self._jail.name, ip, (', action %r' % list(actions.keys())[0] if len(actions) == 1 else ''))
|
||||||
for name, action in actions.iteritems():
|
for name, action in actions.items():
|
||||||
try:
|
try:
|
||||||
logSys.debug("[%s] action %r: reban %s", self._jail.name, name, ip)
|
logSys.debug("[%s] action %r: reban %s", self._jail.name, name, ip)
|
||||||
if not aInfo.immutable: aInfo.reset()
|
if not aInfo.immutable: aInfo.reset()
|
||||||
|
@ -601,7 +601,7 @@ class Actions(JailThread, Mapping):
|
||||||
if not self.banManager._inBanList(ticket): return
|
if not self.banManager._inBanList(ticket): return
|
||||||
# do actions :
|
# do actions :
|
||||||
aInfo = None
|
aInfo = None
|
||||||
for name, action in self._actions.iteritems():
|
for name, action in self._actions.items():
|
||||||
try:
|
try:
|
||||||
if ticket.restored and getattr(action, 'norestored', False):
|
if ticket.restored and getattr(action, 'norestored', False):
|
||||||
continue
|
continue
|
||||||
|
@ -650,7 +650,7 @@ class Actions(JailThread, Mapping):
|
||||||
cnt = 0
|
cnt = 0
|
||||||
# first we'll execute flush for actions supporting this operation:
|
# first we'll execute flush for actions supporting this operation:
|
||||||
unbactions = {}
|
unbactions = {}
|
||||||
for name, action in (actions if actions is not None else self._actions).iteritems():
|
for name, action in (actions if actions is not None else self._actions).items():
|
||||||
try:
|
try:
|
||||||
if hasattr(action, 'flush') and (not isinstance(action, CommandAction) or action.actionflush):
|
if hasattr(action, 'flush') and (not isinstance(action, CommandAction) or action.actionflush):
|
||||||
logSys.notice("[%s] Flush ticket(s) with %s", self._jail.name, name)
|
logSys.notice("[%s] Flush ticket(s) with %s", self._jail.name, name)
|
||||||
|
@ -670,7 +670,7 @@ class Actions(JailThread, Mapping):
|
||||||
action.consistencyCheck(_beforeRepair)
|
action.consistencyCheck(_beforeRepair)
|
||||||
continue
|
continue
|
||||||
# fallback to single unbans:
|
# fallback to single unbans:
|
||||||
logSys.debug(" Unban tickets each individualy")
|
logSys.debug(" Unban tickets each individually")
|
||||||
unbactions[name] = action
|
unbactions[name] = action
|
||||||
actions = unbactions
|
actions = unbactions
|
||||||
# flush the database also:
|
# flush the database also:
|
||||||
|
@ -705,7 +705,7 @@ class Actions(JailThread, Mapping):
|
||||||
aInfo = self._getActionInfo(ticket)
|
aInfo = self._getActionInfo(ticket)
|
||||||
if log:
|
if log:
|
||||||
logSys.notice("[%s] Unban %s", self._jail.name, ip)
|
logSys.notice("[%s] Unban %s", self._jail.name, ip)
|
||||||
for name, action in unbactions.iteritems():
|
for name, action in unbactions.items():
|
||||||
try:
|
try:
|
||||||
logSys.debug("[%s] action %r: unban %s", self._jail.name, name, ip)
|
logSys.debug("[%s] action %r: unban %s", self._jail.name, name, ip)
|
||||||
if not aInfo.immutable: aInfo.reset()
|
if not aInfo.immutable: aInfo.reset()
|
||||||
|
@ -721,9 +721,11 @@ class Actions(JailThread, Mapping):
|
||||||
"""Status of current and total ban counts and current banned IP list.
|
"""Status of current and total ban counts and current banned IP list.
|
||||||
"""
|
"""
|
||||||
# TODO: Allow this list to be printed as 'status' output
|
# TODO: Allow this list to be printed as 'status' output
|
||||||
supported_flavors = ["short", "basic", "cymru"]
|
supported_flavors = ["short", "basic", "stats", "cymru"]
|
||||||
if flavor is None or flavor not in supported_flavors:
|
if flavor is None or flavor not in supported_flavors:
|
||||||
logSys.warning("Unsupported extended jail status flavor %r. Supported: %s" % (flavor, supported_flavors))
|
logSys.warning("Unsupported extended jail status flavor %r. Supported: %s" % (flavor, supported_flavors))
|
||||||
|
if flavor == "stats":
|
||||||
|
return (self.banManager.size(), self.banManager.getBanTotal())
|
||||||
# Always print this information (basic)
|
# Always print this information (basic)
|
||||||
if flavor != "short":
|
if flavor != "short":
|
||||||
banned = self.banManager.getBanList()
|
banned = self.banManager.getBanList()
|
||||||
|
|
|
@ -25,8 +25,14 @@ __copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||||
__license__ = "GPL"
|
__license__ = "GPL"
|
||||||
|
|
||||||
from pickle import dumps, loads, HIGHEST_PROTOCOL
|
from pickle import dumps, loads, HIGHEST_PROTOCOL
|
||||||
import asynchat
|
try:
|
||||||
import asyncore
|
import asynchat
|
||||||
|
except ImportError:
|
||||||
|
from ..compat import asynchat
|
||||||
|
try:
|
||||||
|
import asyncore
|
||||||
|
except ImportError:
|
||||||
|
from ..compat import asyncore
|
||||||
import errno
|
import errno
|
||||||
import fcntl
|
import fcntl
|
||||||
import os
|
import os
|
||||||
|
@ -178,7 +184,7 @@ def loop(active, timeout=None, use_poll=False, err_count=None):
|
||||||
elif err_count['listen'] > 100: # pragma: no cover - normally unreachable
|
elif err_count['listen'] > 100: # pragma: no cover - normally unreachable
|
||||||
if (
|
if (
|
||||||
e.args[0] == errno.EMFILE # [Errno 24] Too many open files
|
e.args[0] == errno.EMFILE # [Errno 24] Too many open files
|
||||||
or sum(err_count.itervalues()) > 1000
|
or sum(err_count.values()) > 1000
|
||||||
):
|
):
|
||||||
logSys.critical("Too many errors - critical count reached %r", err_count)
|
logSys.critical("Too many errors - critical count reached %r", err_count)
|
||||||
break
|
break
|
||||||
|
@ -220,7 +226,7 @@ class AsyncServer(asyncore.dispatcher):
|
||||||
elif self.__errCount['accept'] > 100:
|
elif self.__errCount['accept'] > 100:
|
||||||
if (
|
if (
|
||||||
(isinstance(e, socket.error) and e.args[0] == errno.EMFILE) # [Errno 24] Too many open files
|
(isinstance(e, socket.error) and e.args[0] == errno.EMFILE) # [Errno 24] Too many open files
|
||||||
or sum(self.__errCount.itervalues()) > 1000
|
or sum(self.__errCount.values()) > 1000
|
||||||
):
|
):
|
||||||
logSys.critical("Too many errors - critical count reached %r", self.__errCount)
|
logSys.critical("Too many errors - critical count reached %r", self.__errCount)
|
||||||
self.stop()
|
self.stop()
|
||||||
|
|
|
@ -103,7 +103,7 @@ class BanManager:
|
||||||
return list(self.__banList.keys())
|
return list(self.__banList.keys())
|
||||||
with self.__lock:
|
with self.__lock:
|
||||||
lst = []
|
lst = []
|
||||||
for ticket in self.__banList.itervalues():
|
for ticket in self.__banList.values():
|
||||||
eob = ticket.getEndOfBanTime(self.__banTime)
|
eob = ticket.getEndOfBanTime(self.__banTime)
|
||||||
lst.append((ticket,eob))
|
lst.append((ticket,eob))
|
||||||
lst.sort(key=lambda t: t[1])
|
lst.sort(key=lambda t: t[1])
|
||||||
|
@ -161,7 +161,7 @@ class BanManager:
|
||||||
return return_dict
|
return return_dict
|
||||||
# get ips in lock:
|
# get ips in lock:
|
||||||
with self.__lock:
|
with self.__lock:
|
||||||
banIPs = [banData.getIP() for banData in self.__banList.values()]
|
banIPs = [banData.getIP() for banData in list(self.__banList.values())]
|
||||||
# get cymru info:
|
# get cymru info:
|
||||||
try:
|
try:
|
||||||
for ip in banIPs:
|
for ip in banIPs:
|
||||||
|
@ -333,7 +333,7 @@ class BanManager:
|
||||||
# Gets the list of ticket to remove (thereby correct next unban time).
|
# Gets the list of ticket to remove (thereby correct next unban time).
|
||||||
unBanList = {}
|
unBanList = {}
|
||||||
nextUnbanTime = BanTicket.MAX_TIME
|
nextUnbanTime = BanTicket.MAX_TIME
|
||||||
for fid,ticket in self.__banList.iteritems():
|
for fid,ticket in self.__banList.items():
|
||||||
# current time greater as end of ban - timed out:
|
# current time greater as end of ban - timed out:
|
||||||
eob = ticket.getEndOfBanTime(self.__banTime)
|
eob = ticket.getEndOfBanTime(self.__banTime)
|
||||||
if time > eob:
|
if time > eob:
|
||||||
|
@ -349,15 +349,15 @@ class BanManager:
|
||||||
if len(unBanList):
|
if len(unBanList):
|
||||||
if len(unBanList) / 2.0 <= len(self.__banList) / 3.0:
|
if len(unBanList) / 2.0 <= len(self.__banList) / 3.0:
|
||||||
# few as 2/3 should be removed - remove particular items:
|
# few as 2/3 should be removed - remove particular items:
|
||||||
for fid in unBanList.iterkeys():
|
for fid in unBanList.keys():
|
||||||
del self.__banList[fid]
|
del self.__banList[fid]
|
||||||
else:
|
else:
|
||||||
# create new dictionary without items to be deleted:
|
# create new dictionary without items to be deleted:
|
||||||
self.__banList = dict((fid,ticket) for fid,ticket in self.__banList.iteritems() \
|
self.__banList = dict((fid,ticket) for fid,ticket in self.__banList.items() \
|
||||||
if fid not in unBanList)
|
if fid not in unBanList)
|
||||||
|
|
||||||
# return list of tickets:
|
# return list of tickets:
|
||||||
return unBanList.values()
|
return list(unBanList.values())
|
||||||
|
|
||||||
##
|
##
|
||||||
# Flush the ban list.
|
# Flush the ban list.
|
||||||
|
@ -367,7 +367,7 @@ class BanManager:
|
||||||
|
|
||||||
def flushBanList(self):
|
def flushBanList(self):
|
||||||
with self.__lock:
|
with self.__lock:
|
||||||
uBList = self.__banList.values()
|
uBList = list(self.__banList.values())
|
||||||
self.__banList = dict()
|
self.__banList = dict()
|
||||||
return uBList
|
return uBList
|
||||||
|
|
||||||
|
|
|
@ -45,8 +45,7 @@ def _json_default(x):
|
||||||
x = list(x)
|
x = list(x)
|
||||||
return uni_string(x)
|
return uni_string(x)
|
||||||
|
|
||||||
if sys.version_info >= (3,): # pragma: 2.x no cover
|
def _json_dumps_safe(x):
|
||||||
def _json_dumps_safe(x):
|
|
||||||
try:
|
try:
|
||||||
x = json.dumps(x, ensure_ascii=False, default=_json_default).encode(
|
x = json.dumps(x, ensure_ascii=False, default=_json_default).encode(
|
||||||
PREFER_ENC, 'replace')
|
PREFER_ENC, 'replace')
|
||||||
|
@ -56,37 +55,7 @@ if sys.version_info >= (3,): # pragma: 2.x no cover
|
||||||
x = '{}'
|
x = '{}'
|
||||||
return x
|
return x
|
||||||
|
|
||||||
def _json_loads_safe(x):
|
def _json_loads_safe(x):
|
||||||
try:
|
|
||||||
x = json.loads(x.decode(PREFER_ENC, 'replace'))
|
|
||||||
except Exception as e:
|
|
||||||
# converter handler should be exception-safe
|
|
||||||
logSys.error('json loads failed: %r', e, exc_info=logSys.getEffectiveLevel() <= 4)
|
|
||||||
x = {}
|
|
||||||
return x
|
|
||||||
else: # pragma: 3.x no cover
|
|
||||||
def _normalize(x):
|
|
||||||
if isinstance(x, dict):
|
|
||||||
return dict((_normalize(k), _normalize(v)) for k, v in x.iteritems())
|
|
||||||
elif isinstance(x, (list, set)):
|
|
||||||
return [_normalize(element) for element in x]
|
|
||||||
elif isinstance(x, unicode):
|
|
||||||
# in 2.x default text_factory is unicode - so return proper unicode here:
|
|
||||||
return x.encode(PREFER_ENC, 'replace').decode(PREFER_ENC)
|
|
||||||
elif isinstance(x, basestring):
|
|
||||||
return x.decode(PREFER_ENC, 'replace')
|
|
||||||
return x
|
|
||||||
|
|
||||||
def _json_dumps_safe(x):
|
|
||||||
try:
|
|
||||||
x = json.dumps(_normalize(x), ensure_ascii=False, default=_json_default)
|
|
||||||
except Exception as e:
|
|
||||||
# adapter handler should be exception-safe
|
|
||||||
logSys.error('json dumps failed: %r', e, exc_info=logSys.getEffectiveLevel() <= 4)
|
|
||||||
x = '{}'
|
|
||||||
return x
|
|
||||||
|
|
||||||
def _json_loads_safe(x):
|
|
||||||
try:
|
try:
|
||||||
x = json.loads(x.decode(PREFER_ENC, 'replace'))
|
x = json.loads(x.decode(PREFER_ENC, 'replace'))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -135,7 +104,7 @@ class Fail2BanDb(object):
|
||||||
sqlite3.OperationalError
|
sqlite3.OperationalError
|
||||||
Error connecting/creating a SQLite3 database.
|
Error connecting/creating a SQLite3 database.
|
||||||
RuntimeError
|
RuntimeError
|
||||||
If exisiting database fails to update to new schema.
|
If existing database fails to update to new schema.
|
||||||
|
|
||||||
Attributes
|
Attributes
|
||||||
----------
|
----------
|
||||||
|
@ -525,7 +494,7 @@ class Fail2BanDb(object):
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
jail : Jail
|
jail : Jail
|
||||||
If specified, will only reutrn logs belonging to the jail.
|
If specified, will only return logs belonging to the jail.
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
|
|
|
@ -227,8 +227,10 @@ class DateEpoch(DateTemplate):
|
||||||
self.name = "LongEpoch" if not pattern else pattern
|
self.name = "LongEpoch" if not pattern else pattern
|
||||||
epochRE = r"\d{10,11}(?:\d{3}(?:\.\d{1,6}|\d{3})?)?"
|
epochRE = r"\d{10,11}(?:\d{3}(?:\.\d{1,6}|\d{3})?)?"
|
||||||
if pattern:
|
if pattern:
|
||||||
# pattern should capture/cut out the whole match:
|
# pattern should find the whole pattern, but cut out grouped match (or whole match if no groups specified):
|
||||||
regex = "(" + RE_EPOCH_PATTERN.sub(lambda v: "(%s)" % epochRE, pattern) + ")"
|
regex = RE_EPOCH_PATTERN.sub(lambda v: "(%s)" % epochRE, pattern)
|
||||||
|
if not RE_GROUPED.search(pattern):
|
||||||
|
regex = "(" + regex + ")"
|
||||||
self._grpIdx = 2
|
self._grpIdx = 2
|
||||||
self.setRegex(regex)
|
self.setRegex(regex)
|
||||||
elif not lineBeginOnly:
|
elif not lineBeginOnly:
|
||||||
|
@ -355,7 +357,7 @@ class DatePatternRegex(DateTemplate):
|
||||||
|
|
||||||
|
|
||||||
class DateTai64n(DateTemplate):
|
class DateTai64n(DateTemplate):
|
||||||
"""A date template which matches TAI64N formate timestamps.
|
"""A date template which matches TAI64N format timestamps.
|
||||||
|
|
||||||
Attributes
|
Attributes
|
||||||
----------
|
----------
|
||||||
|
|
|
@ -55,7 +55,7 @@ class FailManager:
|
||||||
def getFailCount(self):
|
def getFailCount(self):
|
||||||
# may be slow on large list of failures, should be used for test purposes only...
|
# may be slow on large list of failures, should be used for test purposes only...
|
||||||
with self.__lock:
|
with self.__lock:
|
||||||
return len(self.__failList), sum([f.getRetry() for f in self.__failList.values()])
|
return len(self.__failList), sum([f.getRetry() for f in list(self.__failList.values())])
|
||||||
|
|
||||||
def setMaxRetry(self, value):
|
def setMaxRetry(self, value):
|
||||||
self.__maxRetry = value
|
self.__maxRetry = value
|
||||||
|
@ -116,7 +116,7 @@ class FailManager:
|
||||||
# in case of having many active failures, it should be ran only
|
# in case of having many active failures, it should be ran only
|
||||||
# if debug level is "low" enough
|
# if debug level is "low" enough
|
||||||
failures_summary = ', '.join(['%s:%d' % (k, v.getRetry())
|
failures_summary = ', '.join(['%s:%d' % (k, v.getRetry())
|
||||||
for k,v in self.__failList.iteritems()])
|
for k,v in self.__failList.items()])
|
||||||
logSys.log(logLevel, "Total # of detected failures: %d. Current failures from %d IPs (IP:count): %s"
|
logSys.log(logLevel, "Total # of detected failures: %d. Current failures from %d IPs (IP:count): %s"
|
||||||
% (self.__failTotal, len(self.__failList), failures_summary))
|
% (self.__failTotal, len(self.__failList), failures_summary))
|
||||||
|
|
||||||
|
@ -129,7 +129,7 @@ class FailManager:
|
||||||
def cleanup(self, time):
|
def cleanup(self, time):
|
||||||
time -= self.__maxTime
|
time -= self.__maxTime
|
||||||
with self.__lock:
|
with self.__lock:
|
||||||
todelete = [fid for fid,item in self.__failList.iteritems() \
|
todelete = [fid for fid,item in self.__failList.items() \
|
||||||
if item.getTime() <= time]
|
if item.getTime() <= time]
|
||||||
if len(todelete) == len(self.__failList):
|
if len(todelete) == len(self.__failList):
|
||||||
# remove all:
|
# remove all:
|
||||||
|
@ -143,7 +143,7 @@ class FailManager:
|
||||||
del self.__failList[fid]
|
del self.__failList[fid]
|
||||||
else:
|
else:
|
||||||
# create new dictionary without items to be deleted:
|
# create new dictionary without items to be deleted:
|
||||||
self.__failList = dict((fid,item) for fid,item in self.__failList.iteritems() \
|
self.__failList = dict((fid,item) for fid,item in self.__failList.items() \
|
||||||
if item.getTime() > time)
|
if item.getTime() > time)
|
||||||
self.__bgSvc.service()
|
self.__bgSvc.service()
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,6 @@ __copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||||
__license__ = "GPL"
|
__license__ = "GPL"
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import sre_constants
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from .ipdns import IPAddr
|
from .ipdns import IPAddr
|
||||||
|
@ -143,9 +142,7 @@ class Regex:
|
||||||
self._regex = regex
|
self._regex = regex
|
||||||
self._altValues = []
|
self._altValues = []
|
||||||
self._tupleValues = []
|
self._tupleValues = []
|
||||||
for k in filter(
|
for k in [k for k in self._regexObj.groupindex if len(k) > len(COMPLNAME_PRE[0])]:
|
||||||
lambda k: len(k) > len(COMPLNAME_PRE[0]), self._regexObj.groupindex
|
|
||||||
):
|
|
||||||
n = COMPLNAME_CRE.match(k)
|
n = COMPLNAME_CRE.match(k)
|
||||||
if n:
|
if n:
|
||||||
g, n = n.group(1), mapTag2Opt(n.group(2))
|
g, n = n.group(1), mapTag2Opt(n.group(2))
|
||||||
|
@ -157,7 +154,7 @@ class Regex:
|
||||||
self._tupleValues.sort()
|
self._tupleValues.sort()
|
||||||
self._altValues = self._altValues if len(self._altValues) else None
|
self._altValues = self._altValues if len(self._altValues) else None
|
||||||
self._tupleValues = self._tupleValues if len(self._tupleValues) else None
|
self._tupleValues = self._tupleValues if len(self._tupleValues) else None
|
||||||
except sre_constants.error as e:
|
except re.error as e:
|
||||||
raise RegexException("Unable to compile regular expression '%s':\n%s" %
|
raise RegexException("Unable to compile regular expression '%s':\n%s" %
|
||||||
(regex, e))
|
(regex, e))
|
||||||
# set fetch handler depending on presence of alternate (or tuple) tags:
|
# set fetch handler depending on presence of alternate (or tuple) tags:
|
||||||
|
@ -235,7 +232,7 @@ class Regex:
|
||||||
#
|
#
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _tupleLinesBuf(tupleLines):
|
def _tupleLinesBuf(tupleLines):
|
||||||
return "\n".join(map(lambda v: "".join(v[::2]), tupleLines)) + "\n"
|
return "\n".join(["".join(v[::2]) for v in tupleLines]) + "\n"
|
||||||
|
|
||||||
##
|
##
|
||||||
# Searches the regular expression.
|
# Searches the regular expression.
|
||||||
|
@ -243,11 +240,11 @@ class Regex:
|
||||||
# Sets an internal cache (match object) in order to avoid searching for
|
# Sets an internal cache (match object) in order to avoid searching for
|
||||||
# the pattern again. This method must be called before calling any other
|
# the pattern again. This method must be called before calling any other
|
||||||
# method of this object.
|
# method of this object.
|
||||||
# @param a list of tupples. The tupples are ( prematch, datematch, postdatematch )
|
# @param a list of tuples. The tuples are ( prematch, datematch, postdatematch )
|
||||||
|
|
||||||
def search(self, tupleLines, orgLines=None):
|
def search(self, tupleLines, orgLines=None):
|
||||||
buf = tupleLines
|
buf = tupleLines
|
||||||
if not isinstance(tupleLines, basestring):
|
if not isinstance(tupleLines, str):
|
||||||
buf = Regex._tupleLinesBuf(tupleLines)
|
buf = Regex._tupleLinesBuf(tupleLines)
|
||||||
self._matchCache = self._regexObj.search(buf)
|
self._matchCache = self._regexObj.search(buf)
|
||||||
if self._matchCache:
|
if self._matchCache:
|
||||||
|
|
|
@ -307,7 +307,7 @@ class Filter(JailThread):
|
||||||
dd = DateDetector()
|
dd = DateDetector()
|
||||||
dd.default_tz = self.__logtimezone
|
dd.default_tz = self.__logtimezone
|
||||||
if not isinstance(pattern, (list, tuple)):
|
if not isinstance(pattern, (list, tuple)):
|
||||||
pattern = filter(bool, map(str.strip, re.split('\n+', pattern)))
|
pattern = list(filter(bool, list(map(str.strip, re.split('\n+', pattern)))))
|
||||||
for pattern in pattern:
|
for pattern in pattern:
|
||||||
dd.appendTemplate(pattern)
|
dd.appendTemplate(pattern)
|
||||||
self.dateDetector = dd
|
self.dateDetector = dd
|
||||||
|
@ -635,7 +635,7 @@ class Filter(JailThread):
|
||||||
e = m.end(1)
|
e = m.end(1)
|
||||||
m = line[s:e]
|
m = line[s:e]
|
||||||
tupleLine = (line[:s], m, line[e:])
|
tupleLine = (line[:s], m, line[e:])
|
||||||
if m: # found and not empty - retrive date:
|
if m: # found and not empty - retrieve date:
|
||||||
date = self.dateDetector.getTime(m, timeMatch)
|
date = self.dateDetector.getTime(m, timeMatch)
|
||||||
if date is not None:
|
if date is not None:
|
||||||
# Lets get the time part
|
# Lets get the time part
|
||||||
|
@ -666,7 +666,7 @@ class Filter(JailThread):
|
||||||
if self.checkFindTime and date is not None:
|
if self.checkFindTime and date is not None:
|
||||||
# if in operation (modifications have been really found):
|
# if in operation (modifications have been really found):
|
||||||
if self.inOperation:
|
if self.inOperation:
|
||||||
# if weird date - we'd simulate now for timeing issue (too large deviation from now):
|
# if weird date - we'd simulate now for timing issue (too large deviation from now):
|
||||||
delta = int(date - MyTime.time())
|
delta = int(date - MyTime.time())
|
||||||
if abs(delta) > 60:
|
if abs(delta) > 60:
|
||||||
# log timing issue as warning once per day:
|
# log timing issue as warning once per day:
|
||||||
|
@ -800,7 +800,7 @@ class Filter(JailThread):
|
||||||
if (nfflgs & 4) == 0 and not mlfidGroups.get('mlfpending', 0):
|
if (nfflgs & 4) == 0 and not mlfidGroups.get('mlfpending', 0):
|
||||||
mlfidGroups.pop("matches", None)
|
mlfidGroups.pop("matches", None)
|
||||||
# overwrite multi-line failure with all values, available in fail:
|
# overwrite multi-line failure with all values, available in fail:
|
||||||
mlfidGroups.update(((k,v) for k,v in fail.iteritems() if v is not None))
|
mlfidGroups.update(((k,v) for k,v in fail.items() if v is not None))
|
||||||
# new merged failure data:
|
# new merged failure data:
|
||||||
fail = mlfidGroups
|
fail = mlfidGroups
|
||||||
# if forget (disconnect/reset) - remove cached entry:
|
# if forget (disconnect/reset) - remove cached entry:
|
||||||
|
@ -944,7 +944,7 @@ class Filter(JailThread):
|
||||||
ip = fid
|
ip = fid
|
||||||
raw = True
|
raw = True
|
||||||
# if mlfid case (not failure):
|
# if mlfid case (not failure):
|
||||||
if ip is None:
|
if fid is None and ip is None:
|
||||||
if ll <= 7: logSys.log(7, "No failure-id by mlfid %r in regex %s: %s",
|
if ll <= 7: logSys.log(7, "No failure-id by mlfid %r in regex %s: %s",
|
||||||
mlfid, failRegexIndex, fail.get('mlfforget', "waiting for identifier"))
|
mlfid, failRegexIndex, fail.get('mlfforget', "waiting for identifier"))
|
||||||
fail['mlfpending'] = 1; # mark failure is pending
|
fail['mlfpending'] = 1; # mark failure is pending
|
||||||
|
@ -978,6 +978,8 @@ class Filter(JailThread):
|
||||||
def status(self, flavor="basic"):
|
def status(self, flavor="basic"):
|
||||||
"""Status of failures detected by filter.
|
"""Status of failures detected by filter.
|
||||||
"""
|
"""
|
||||||
|
if flavor == "stats":
|
||||||
|
return (self.failManager.size(), self.failManager.getFailTotal())
|
||||||
ret = [("Currently failed", self.failManager.size()),
|
ret = [("Currently failed", self.failManager.size()),
|
||||||
("Total failed", self.failManager.getFailTotal())]
|
("Total failed", self.failManager.getFailTotal())]
|
||||||
return ret
|
return ret
|
||||||
|
@ -1045,7 +1047,7 @@ class FileFilter(Filter):
|
||||||
# @return log paths
|
# @return log paths
|
||||||
|
|
||||||
def getLogPaths(self):
|
def getLogPaths(self):
|
||||||
return self.__logs.keys()
|
return list(self.__logs.keys())
|
||||||
|
|
||||||
##
|
##
|
||||||
# Get the log containers
|
# Get the log containers
|
||||||
|
@ -1053,7 +1055,7 @@ class FileFilter(Filter):
|
||||||
# @return log containers
|
# @return log containers
|
||||||
|
|
||||||
def getLogs(self):
|
def getLogs(self):
|
||||||
return self.__logs.values()
|
return list(self.__logs.values())
|
||||||
|
|
||||||
##
|
##
|
||||||
# Get the count of log containers
|
# Get the count of log containers
|
||||||
|
@ -1079,7 +1081,7 @@ class FileFilter(Filter):
|
||||||
|
|
||||||
def setLogEncoding(self, encoding):
|
def setLogEncoding(self, encoding):
|
||||||
encoding = super(FileFilter, self).setLogEncoding(encoding)
|
encoding = super(FileFilter, self).setLogEncoding(encoding)
|
||||||
for log in self.__logs.itervalues():
|
for log in self.__logs.values():
|
||||||
log.setEncoding(encoding)
|
log.setEncoding(encoding)
|
||||||
|
|
||||||
def getLog(self, path):
|
def getLog(self, path):
|
||||||
|
@ -1255,7 +1257,9 @@ class FileFilter(Filter):
|
||||||
"""Status of Filter plus files being monitored.
|
"""Status of Filter plus files being monitored.
|
||||||
"""
|
"""
|
||||||
ret = super(FileFilter, self).status(flavor=flavor)
|
ret = super(FileFilter, self).status(flavor=flavor)
|
||||||
path = self.__logs.keys()
|
if flavor == "stats":
|
||||||
|
return ret
|
||||||
|
path = list(self.__logs.keys())
|
||||||
ret.append(("File list", path))
|
ret.append(("File list", path))
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
@ -1277,7 +1281,7 @@ class FileFilter(Filter):
|
||||||
if self._pendDBUpdates and self.jail.database:
|
if self._pendDBUpdates and self.jail.database:
|
||||||
self._updateDBPending()
|
self._updateDBPending()
|
||||||
# stop files monitoring:
|
# stop files monitoring:
|
||||||
for path in self.__logs.keys():
|
for path in list(self.__logs.keys()):
|
||||||
self.delLogPath(path)
|
self.delLogPath(path)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
|
@ -1530,7 +1534,7 @@ class FileContainer:
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
return self
|
return self
|
||||||
def next(self):
|
def __next__(self):
|
||||||
line = self.readline()
|
line = self.readline()
|
||||||
if line is None:
|
if line is None:
|
||||||
self.close()
|
self.close()
|
||||||
|
|
|
@ -1,136 +0,0 @@
|
||||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
|
||||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
|
||||||
|
|
||||||
# This file is part of Fail2Ban.
|
|
||||||
#
|
|
||||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation; either version 2 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Fail2Ban is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Fail2Ban; if not, write to the Free Software
|
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
||||||
|
|
||||||
# Author: Cyril Jaquier, Yaroslav Halchenko
|
|
||||||
|
|
||||||
__author__ = "Cyril Jaquier, Yaroslav Halchenko"
|
|
||||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2012 Yaroslav Halchenko"
|
|
||||||
__license__ = "GPL"
|
|
||||||
|
|
||||||
import fcntl
|
|
||||||
import time
|
|
||||||
|
|
||||||
import gamin
|
|
||||||
|
|
||||||
from .failmanager import FailManagerEmpty
|
|
||||||
from .filter import FileFilter
|
|
||||||
from .mytime import MyTime
|
|
||||||
from .utils import Utils
|
|
||||||
from ..helpers import getLogger
|
|
||||||
|
|
||||||
# Gets the instance of the logger.
|
|
||||||
logSys = getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
##
|
|
||||||
# Log reader class.
|
|
||||||
#
|
|
||||||
# This class reads a log file and detects login failures or anything else
|
|
||||||
# that matches a given regular expression. This class is instanciated by
|
|
||||||
# a Jail object.
|
|
||||||
|
|
||||||
class FilterGamin(FileFilter):
|
|
||||||
|
|
||||||
##
|
|
||||||
# Constructor.
|
|
||||||
#
|
|
||||||
# Initialize the filter object with default values.
|
|
||||||
# @param jail the jail object
|
|
||||||
|
|
||||||
def __init__(self, jail):
|
|
||||||
FileFilter.__init__(self, jail)
|
|
||||||
# Gamin monitor
|
|
||||||
self.monitor = gamin.WatchMonitor()
|
|
||||||
fd = self.monitor.get_fd()
|
|
||||||
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
|
|
||||||
fcntl.fcntl(fd, fcntl.F_SETFD, flags|fcntl.FD_CLOEXEC)
|
|
||||||
logSys.debug("Created FilterGamin")
|
|
||||||
|
|
||||||
def callback(self, path, event):
|
|
||||||
logSys.log(4, "Got event: " + repr(event) + " for " + path)
|
|
||||||
if event in (gamin.GAMCreated, gamin.GAMChanged, gamin.GAMExists):
|
|
||||||
logSys.debug("File changed: " + path)
|
|
||||||
|
|
||||||
self.ticks += 1
|
|
||||||
self.getFailures(path)
|
|
||||||
|
|
||||||
##
|
|
||||||
# Add a log file path
|
|
||||||
#
|
|
||||||
# @param path log file path
|
|
||||||
|
|
||||||
def _addLogPath(self, path):
|
|
||||||
self.monitor.watch_file(path, self.callback)
|
|
||||||
|
|
||||||
##
|
|
||||||
# Delete a log path
|
|
||||||
#
|
|
||||||
# @param path the log file to delete
|
|
||||||
|
|
||||||
def _delLogPath(self, path):
|
|
||||||
self.monitor.stop_watch(path)
|
|
||||||
|
|
||||||
def _handleEvents(self):
|
|
||||||
ret = False
|
|
||||||
mon = self.monitor
|
|
||||||
while mon and mon.event_pending() > 0:
|
|
||||||
mon.handle_events()
|
|
||||||
mon = self.monitor
|
|
||||||
ret = True
|
|
||||||
return ret
|
|
||||||
|
|
||||||
##
|
|
||||||
# Main loop.
|
|
||||||
#
|
|
||||||
# This function is the main loop of the thread. It checks if the
|
|
||||||
# file has been modified and looks for failures.
|
|
||||||
# @return True when the thread exits nicely
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
# Gamin needs a loop to collect and dispatch events
|
|
||||||
while self.active:
|
|
||||||
if self.idle:
|
|
||||||
# wait a little bit here for not idle, to prevent hi-load:
|
|
||||||
if not Utils.wait_for(lambda: not self.active or not self.idle,
|
|
||||||
self.sleeptime * 10, self.sleeptime
|
|
||||||
):
|
|
||||||
self.ticks += 1
|
|
||||||
continue
|
|
||||||
Utils.wait_for(lambda: not self.active or self._handleEvents(),
|
|
||||||
self.sleeptime)
|
|
||||||
self.ticks += 1
|
|
||||||
if self.ticks % 10 == 0:
|
|
||||||
self.performSvc()
|
|
||||||
|
|
||||||
logSys.debug("[%s] filter terminated", self.jailName)
|
|
||||||
return True
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
super(FilterGamin, self).stop()
|
|
||||||
self.__cleanup()
|
|
||||||
|
|
||||||
##
|
|
||||||
# Desallocates the resources used by Gamin.
|
|
||||||
|
|
||||||
def __cleanup(self):
|
|
||||||
if not self.monitor:
|
|
||||||
return
|
|
||||||
for filename in self.getLogPaths():
|
|
||||||
self.monitor.stop_watch(filename)
|
|
||||||
self.monitor = None
|
|
|
@ -173,4 +173,4 @@ class FilterPoll(FileFilter):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def getPendingPaths(self):
|
def getPendingPaths(self):
|
||||||
return self.__file404Cnt.keys()
|
return list(self.__file404Cnt.keys())
|
||||||
|
|
|
@ -155,7 +155,7 @@ class FilterPyinotify(FileFilter):
|
||||||
except KeyError: pass
|
except KeyError: pass
|
||||||
|
|
||||||
def getPendingPaths(self):
|
def getPendingPaths(self):
|
||||||
return self.__pending.keys()
|
return list(self.__pending.keys())
|
||||||
|
|
||||||
def _checkPending(self):
|
def _checkPending(self):
|
||||||
if not self.__pending:
|
if not self.__pending:
|
||||||
|
@ -173,7 +173,9 @@ class FilterPyinotify(FileFilter):
|
||||||
if not chkpath(path): # not found - prolong for next time
|
if not chkpath(path): # not found - prolong for next time
|
||||||
if retardTM < 60: retardTM *= 2
|
if retardTM < 60: retardTM *= 2
|
||||||
if minTime > retardTM: minTime = retardTM
|
if minTime > retardTM: minTime = retardTM
|
||||||
|
try:
|
||||||
self.__pending[path][0] = retardTM
|
self.__pending[path][0] = retardTM
|
||||||
|
except KeyError: pass
|
||||||
continue
|
continue
|
||||||
logSys.log(logging.MSG, "Log presence detected for %s %s",
|
logSys.log(logging.MSG, "Log presence detected for %s %s",
|
||||||
"directory" if isDir else "file", path)
|
"directory" if isDir else "file", path)
|
||||||
|
@ -181,7 +183,7 @@ class FilterPyinotify(FileFilter):
|
||||||
self.__pendingChkTime = time.time()
|
self.__pendingChkTime = time.time()
|
||||||
self.__pendingMinTime = minTime
|
self.__pendingMinTime = minTime
|
||||||
# process now because we've missed it in monitoring:
|
# process now because we've missed it in monitoring:
|
||||||
for path, isDir in found.iteritems():
|
for path, isDir in found.items():
|
||||||
self._delPending(path)
|
self._delPending(path)
|
||||||
# refresh monitoring of this:
|
# refresh monitoring of this:
|
||||||
if isDir is not None:
|
if isDir is not None:
|
||||||
|
|
|
@ -253,7 +253,7 @@ class FilterSystemd(JournalFilter): # pragma: systemd no cover
|
||||||
return ((logline[:0], date[0] + ' ', logline.replace('\n', '\\n')), date[1])
|
return ((logline[:0], date[0] + ' ', logline.replace('\n', '\\n')), date[1])
|
||||||
|
|
||||||
def seekToTime(self, date):
|
def seekToTime(self, date):
|
||||||
if isinstance(date, (int, long)):
|
if isinstance(date, int):
|
||||||
date = float(date)
|
date = float(date)
|
||||||
self.__journal.seek_realtime(date)
|
self.__journal.seek_realtime(date)
|
||||||
|
|
||||||
|
@ -344,7 +344,7 @@ class FilterSystemd(JournalFilter): # pragma: systemd no cover
|
||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
if self.idle:
|
if self.idle:
|
||||||
# because journal.wait will returns immediatelly if we have records in journal,
|
# because journal.wait will returns immediately if we have records in journal,
|
||||||
# just wait a little bit here for not idle, to prevent hi-load:
|
# just wait a little bit here for not idle, to prevent hi-load:
|
||||||
if not Utils.wait_for(lambda: not self.active or not self.idle,
|
if not Utils.wait_for(lambda: not self.active or not self.idle,
|
||||||
self.sleeptime * 10, self.sleeptime
|
self.sleeptime * 10, self.sleeptime
|
||||||
|
@ -429,12 +429,14 @@ class FilterSystemd(JournalFilter): # pragma: systemd no cover
|
||||||
|
|
||||||
def status(self, flavor="basic"):
|
def status(self, flavor="basic"):
|
||||||
ret = super(FilterSystemd, self).status(flavor=flavor)
|
ret = super(FilterSystemd, self).status(flavor=flavor)
|
||||||
|
if flavor == "stats":
|
||||||
|
return ret
|
||||||
ret.append(("Journal matches",
|
ret.append(("Journal matches",
|
||||||
[" + ".join(" ".join(match) for match in self.__matches)]))
|
[" + ".join(" ".join(match) for match in self.__matches)]))
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def _updateDBPending(self):
|
def _updateDBPending(self):
|
||||||
"""Apply pending updates (jornal position) to database.
|
"""Apply pending updates (journal position) to database.
|
||||||
"""
|
"""
|
||||||
db = self.jail.database
|
db = self.jail.database
|
||||||
while True:
|
while True:
|
||||||
|
|
|
@ -92,14 +92,14 @@ class DNSUtils:
|
||||||
# retrieve ips
|
# retrieve ips
|
||||||
ips = set()
|
ips = set()
|
||||||
saveerr = None
|
saveerr = None
|
||||||
for fam, ipfam in ((socket.AF_INET, IPAddr.FAM_IPv4), (socket.AF_INET6, IPAddr.FAM_IPv6)):
|
for fam in ((socket.AF_INET,socket.AF_INET6) if DNSUtils.IPv6IsAllowed() else (socket.AF_INET,)):
|
||||||
try:
|
try:
|
||||||
for result in socket.getaddrinfo(dns, None, fam, 0, socket.IPPROTO_TCP):
|
for result in socket.getaddrinfo(dns, None, fam, 0, socket.IPPROTO_TCP):
|
||||||
# if getaddrinfo returns something unexpected:
|
# if getaddrinfo returns something unexpected:
|
||||||
if len(result) < 4 or not len(result[4]): continue
|
if len(result) < 4 or not len(result[4]): continue
|
||||||
# get ip from `(2, 1, 6, '', ('127.0.0.1', 0))`,be sure we've an ip-string
|
# get ip from `(2, 1, 6, '', ('127.0.0.1', 0))`,be sure we've an ip-string
|
||||||
# (some python-versions resp. host configurations causes returning of integer there):
|
# (some python-versions resp. host configurations causes returning of integer there):
|
||||||
ip = IPAddr(str(result[4][0]), ipfam)
|
ip = IPAddr(str(result[4][0]), IPAddr._AF2FAM(fam))
|
||||||
if ip.isValid:
|
if ip.isValid:
|
||||||
ips.add(ip)
|
ips.add(ip)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -154,8 +154,9 @@ class DNSUtils:
|
||||||
# try find cached own hostnames (this tuple-key cannot be used elsewhere):
|
# try find cached own hostnames (this tuple-key cannot be used elsewhere):
|
||||||
key = ('self','hostname', fqdn)
|
key = ('self','hostname', fqdn)
|
||||||
name = DNSUtils.CACHE_ipToName.get(key)
|
name = DNSUtils.CACHE_ipToName.get(key)
|
||||||
|
if name is not None:
|
||||||
|
return name
|
||||||
# get it using different ways (hostname, fully-qualified or vice versa):
|
# get it using different ways (hostname, fully-qualified or vice versa):
|
||||||
if name is None:
|
|
||||||
name = ''
|
name = ''
|
||||||
for hostname in (
|
for hostname in (
|
||||||
(getfqdn, socket.gethostname) if fqdn else (socket.gethostname, getfqdn)
|
(getfqdn, socket.gethostname) if fqdn else (socket.gethostname, getfqdn)
|
||||||
|
@ -177,8 +178,9 @@ class DNSUtils:
|
||||||
"""Get own host names of self"""
|
"""Get own host names of self"""
|
||||||
# try find cached own hostnames:
|
# try find cached own hostnames:
|
||||||
names = DNSUtils.CACHE_ipToName.get(DNSUtils._getSelfNames_key)
|
names = DNSUtils.CACHE_ipToName.get(DNSUtils._getSelfNames_key)
|
||||||
|
if names is not None:
|
||||||
|
return names
|
||||||
# get it using different ways (a set with names of localhost, hostname, fully qualified):
|
# get it using different ways (a set with names of localhost, hostname, fully qualified):
|
||||||
if names is None:
|
|
||||||
names = set([
|
names = set([
|
||||||
'localhost', DNSUtils.getHostname(False), DNSUtils.getHostname(True)
|
'localhost', DNSUtils.getHostname(False), DNSUtils.getHostname(True)
|
||||||
]) - set(['']) # getHostname can return ''
|
]) - set(['']) # getHostname can return ''
|
||||||
|
@ -186,6 +188,25 @@ class DNSUtils:
|
||||||
DNSUtils.CACHE_ipToName.set(DNSUtils._getSelfNames_key, names)
|
DNSUtils.CACHE_ipToName.set(DNSUtils._getSelfNames_key, names)
|
||||||
return names
|
return names
|
||||||
|
|
||||||
|
# key to find cached network interfaces IPs (this tuple-key cannot be used elsewhere):
|
||||||
|
_getNetIntrfIPs_key = ('netintrf','ips')
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getNetIntrfIPs():
|
||||||
|
"""Get own IP addresses of self"""
|
||||||
|
# to find cached own IPs:
|
||||||
|
ips = DNSUtils.CACHE_nameToIp.get(DNSUtils._getNetIntrfIPs_key)
|
||||||
|
if ips is not None:
|
||||||
|
return ips
|
||||||
|
# try to obtain from network interfaces if possible (implemented for this platform):
|
||||||
|
try:
|
||||||
|
ips = IPAddrSet([a for ni, a in DNSUtils._NetworkInterfacesAddrs()])
|
||||||
|
except:
|
||||||
|
ips = IPAddrSet()
|
||||||
|
# cache and return :
|
||||||
|
DNSUtils.CACHE_nameToIp.set(DNSUtils._getNetIntrfIPs_key, ips)
|
||||||
|
return ips
|
||||||
|
|
||||||
# key to find cached own IPs (this tuple-key cannot be used elsewhere):
|
# key to find cached own IPs (this tuple-key cannot be used elsewhere):
|
||||||
_getSelfIPs_key = ('self','ips')
|
_getSelfIPs_key = ('self','ips')
|
||||||
|
|
||||||
|
@ -194,12 +215,14 @@ class DNSUtils:
|
||||||
"""Get own IP addresses of self"""
|
"""Get own IP addresses of self"""
|
||||||
# to find cached own IPs:
|
# to find cached own IPs:
|
||||||
ips = DNSUtils.CACHE_nameToIp.get(DNSUtils._getSelfIPs_key)
|
ips = DNSUtils.CACHE_nameToIp.get(DNSUtils._getSelfIPs_key)
|
||||||
# get it using different ways (a set with IPs of localhost, hostname, fully qualified):
|
if ips is not None:
|
||||||
if ips is None:
|
return ips
|
||||||
ips = set()
|
# firstly try to obtain from network interfaces if possible (implemented for this platform):
|
||||||
|
ips = IPAddrSet(DNSUtils.getNetIntrfIPs())
|
||||||
|
# extend it using different ways (a set with IPs of localhost, hostname, fully qualified):
|
||||||
for hostname in DNSUtils.getSelfNames():
|
for hostname in DNSUtils.getSelfNames():
|
||||||
try:
|
try:
|
||||||
ips |= set(DNSUtils.textToIp(hostname, 'yes'))
|
ips |= IPAddrSet(DNSUtils.dnsToIp(hostname))
|
||||||
except Exception as e: # pragma: no cover
|
except Exception as e: # pragma: no cover
|
||||||
logSys.warning("Retrieving own IPs of %s failed: %s", hostname, e)
|
logSys.warning("Retrieving own IPs of %s failed: %s", hostname, e)
|
||||||
# cache and return :
|
# cache and return :
|
||||||
|
@ -208,6 +231,38 @@ class DNSUtils:
|
||||||
|
|
||||||
_IPv6IsAllowed = None
|
_IPv6IsAllowed = None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _IPv6IsSupportedBySystem():
|
||||||
|
if not socket.has_ipv6:
|
||||||
|
return False
|
||||||
|
# try to check sysctl net.ipv6.conf.all.disable_ipv6:
|
||||||
|
try:
|
||||||
|
with open('/proc/sys/net/ipv6/conf/all/disable_ipv6', 'rb') as f:
|
||||||
|
# if 1 - disabled, 0 - enabled
|
||||||
|
return not int(f.read())
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
s = None
|
||||||
|
try:
|
||||||
|
# try to create INET6 socket:
|
||||||
|
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
||||||
|
# bind it to free port for any interface supporting IPv6:
|
||||||
|
s.bind(("", 0));
|
||||||
|
return True
|
||||||
|
except Exception as e: # pragma: no cover
|
||||||
|
if hasattr(e, 'errno'):
|
||||||
|
import errno
|
||||||
|
# negative (-9 'Address family not supported', etc) or not available/supported:
|
||||||
|
if e.errno < 0 or e.errno in (errno.EADDRNOTAVAIL, errno.EAFNOSUPPORT):
|
||||||
|
return False
|
||||||
|
# in use:
|
||||||
|
if e.errno in (errno.EADDRINUSE, errno.EACCES): # normally unreachable (free port and root)
|
||||||
|
return True
|
||||||
|
finally:
|
||||||
|
if s: s.close()
|
||||||
|
# unable to detect:
|
||||||
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def setIPv6IsAllowed(value):
|
def setIPv6IsAllowed(value):
|
||||||
DNSUtils._IPv6IsAllowed = value
|
DNSUtils._IPv6IsAllowed = value
|
||||||
|
@ -224,7 +279,17 @@ class DNSUtils:
|
||||||
v = DNSUtils.CACHE_nameToIp.get(DNSUtils._IPv6IsAllowed_key)
|
v = DNSUtils.CACHE_nameToIp.get(DNSUtils._IPv6IsAllowed_key)
|
||||||
if v is not None:
|
if v is not None:
|
||||||
return v
|
return v
|
||||||
v = any((':' in ip.ntoa) for ip in DNSUtils.getSelfIPs())
|
v = DNSUtils._IPv6IsSupportedBySystem()
|
||||||
|
if v is None:
|
||||||
|
# detect by IPs of host:
|
||||||
|
ips = DNSUtils.getNetIntrfIPs()
|
||||||
|
if not ips:
|
||||||
|
DNSUtils._IPv6IsAllowed = True; # avoid self recursion from getSelfIPs -> dnsToIp -> IPv6IsAllowed
|
||||||
|
try:
|
||||||
|
ips = DNSUtils.getSelfIPs()
|
||||||
|
finally:
|
||||||
|
DNSUtils._IPv6IsAllowed = None
|
||||||
|
v = any((':' in ip.ntoa) for ip in ips)
|
||||||
DNSUtils.CACHE_nameToIp.set(DNSUtils._IPv6IsAllowed_key, v)
|
DNSUtils.CACHE_nameToIp.set(DNSUtils._IPv6IsAllowed_key, v)
|
||||||
return v
|
return v
|
||||||
|
|
||||||
|
@ -239,9 +304,11 @@ class IPAddr(object):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
IP_4_RE = r"""(?:\d{1,3}\.){3}\d{1,3}"""
|
IP_4_RE = r"""(?:\d{1,3}\.){3}\d{1,3}"""
|
||||||
IP_6_RE = r"""(?:[0-9a-fA-F]{1,4}::?|::){1,7}(?:[0-9a-fA-F]{1,4}|(?<=:):)"""
|
IP_6_RE = r"""(?:[0-9a-fA-F]{1,4}::?|:){1,7}(?:[0-9a-fA-F]{1,4}|(?<=:):)"""
|
||||||
IP_4_6_CRE = re.compile(
|
IP_4_6_CRE = re.compile(
|
||||||
r"""^(?:(?P<IPv4>%s)|\[?(?P<IPv6>%s)\]?)$""" % (IP_4_RE, IP_6_RE))
|
r"""^(?:(?P<IPv4>%s)|\[?(?P<IPv6>%s)\]?)$""" % (IP_4_RE, IP_6_RE))
|
||||||
|
IP_W_CIDR_CRE = re.compile(
|
||||||
|
r"""^(%s|%s)/(?:(\d+)|(%s|%s))$""" % (IP_4_RE, IP_6_RE, IP_4_RE, IP_6_RE))
|
||||||
# An IPv4 compatible IPv6 to be reused (see below)
|
# An IPv4 compatible IPv6 to be reused (see below)
|
||||||
IP6_4COMPAT = None
|
IP6_4COMPAT = None
|
||||||
|
|
||||||
|
@ -255,6 +322,9 @@ class IPAddr(object):
|
||||||
CIDR_UNSPEC = -1
|
CIDR_UNSPEC = -1
|
||||||
FAM_IPv4 = CIDR_RAW - socket.AF_INET
|
FAM_IPv4 = CIDR_RAW - socket.AF_INET
|
||||||
FAM_IPv6 = CIDR_RAW - socket.AF_INET6
|
FAM_IPv6 = CIDR_RAW - socket.AF_INET6
|
||||||
|
@staticmethod
|
||||||
|
def _AF2FAM(v):
|
||||||
|
return IPAddr.CIDR_RAW - v
|
||||||
|
|
||||||
def __new__(cls, ipstr, cidr=CIDR_UNSPEC):
|
def __new__(cls, ipstr, cidr=CIDR_UNSPEC):
|
||||||
if cidr == IPAddr.CIDR_UNSPEC and isinstance(ipstr, (tuple, list)):
|
if cidr == IPAddr.CIDR_UNSPEC and isinstance(ipstr, (tuple, list)):
|
||||||
|
@ -292,13 +362,17 @@ class IPAddr(object):
|
||||||
# test mask:
|
# test mask:
|
||||||
if "/" not in ipstr:
|
if "/" not in ipstr:
|
||||||
return ipstr, IPAddr.CIDR_UNSPEC
|
return ipstr, IPAddr.CIDR_UNSPEC
|
||||||
s = ipstr.split('/', 1)
|
s = IPAddr.IP_W_CIDR_CRE.match(ipstr)
|
||||||
# IP address without CIDR mask
|
if s is None:
|
||||||
if len(s) > 2:
|
return ipstr, IPAddr.CIDR_UNSPEC
|
||||||
raise ValueError("invalid ipstr %r, too many plen representation" % (ipstr,))
|
s = list(s.groups())
|
||||||
if "." in s[1] or ":" in s[1]: # 255.255.255.0 resp. ffff:: style mask
|
if s[2]: # 255.255.255.0 resp. ffff:: style mask
|
||||||
s[1] = IPAddr.masktoplen(s[1])
|
s[1] = IPAddr.masktoplen(s[2])
|
||||||
s[1] = long(s[1])
|
del s[2]
|
||||||
|
try:
|
||||||
|
s[1] = int(s[1])
|
||||||
|
except ValueError:
|
||||||
|
return ipstr, IPAddr.CIDR_UNSPEC
|
||||||
return s
|
return s
|
||||||
|
|
||||||
def __init(self, ipstr, cidr=CIDR_UNSPEC):
|
def __init(self, ipstr, cidr=CIDR_UNSPEC):
|
||||||
|
@ -332,7 +406,7 @@ class IPAddr(object):
|
||||||
|
|
||||||
# mask out host portion if prefix length is supplied
|
# mask out host portion if prefix length is supplied
|
||||||
if cidr is not None and cidr >= 0:
|
if cidr is not None and cidr >= 0:
|
||||||
mask = ~(0xFFFFFFFFL >> cidr)
|
mask = ~(0xFFFFFFFF >> cidr)
|
||||||
self._addr &= mask
|
self._addr &= mask
|
||||||
self._plen = cidr
|
self._plen = cidr
|
||||||
|
|
||||||
|
@ -344,13 +418,13 @@ class IPAddr(object):
|
||||||
|
|
||||||
# mask out host portion if prefix length is supplied
|
# mask out host portion if prefix length is supplied
|
||||||
if cidr is not None and cidr >= 0:
|
if cidr is not None and cidr >= 0:
|
||||||
mask = ~(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFL >> cidr)
|
mask = ~(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF >> cidr)
|
||||||
self._addr &= mask
|
self._addr &= mask
|
||||||
self._plen = cidr
|
self._plen = cidr
|
||||||
|
|
||||||
# if IPv6 address is a IPv4-compatible, make instance a IPv4
|
# if IPv6 address is a IPv4-compatible, make instance a IPv4
|
||||||
elif self.isInNet(IPAddr.IP6_4COMPAT):
|
elif self.isInNet(IPAddr.IP6_4COMPAT):
|
||||||
self._addr = lo & 0xFFFFFFFFL
|
self._addr = lo & 0xFFFFFFFF
|
||||||
self._family = socket.AF_INET
|
self._family = socket.AF_INET
|
||||||
self._plen = 32
|
self._plen = 32
|
||||||
else:
|
else:
|
||||||
|
@ -360,7 +434,7 @@ class IPAddr(object):
|
||||||
return repr(self.ntoa)
|
return repr(self.ntoa)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return self.ntoa if isinstance(self.ntoa, basestring) else str(self.ntoa)
|
return self.ntoa if isinstance(self.ntoa, str) else str(self.ntoa)
|
||||||
|
|
||||||
def __reduce__(self):
|
def __reduce__(self):
|
||||||
"""IPAddr pickle-handler, that simply wraps IPAddr to the str
|
"""IPAddr pickle-handler, that simply wraps IPAddr to the str
|
||||||
|
@ -474,7 +548,7 @@ class IPAddr(object):
|
||||||
elif self.isIPv6:
|
elif self.isIPv6:
|
||||||
# convert network to host byte order
|
# convert network to host byte order
|
||||||
hi = self._addr >> 64
|
hi = self._addr >> 64
|
||||||
lo = self._addr & 0xFFFFFFFFFFFFFFFFL
|
lo = self._addr & 0xFFFFFFFFFFFFFFFF
|
||||||
binary = struct.pack("!QQ", hi, lo)
|
binary = struct.pack("!QQ", hi, lo)
|
||||||
if self._plen and self._plen < 128:
|
if self._plen and self._plen < 128:
|
||||||
add = "/%d" % self._plen
|
add = "/%d" % self._plen
|
||||||
|
@ -532,9 +606,9 @@ class IPAddr(object):
|
||||||
if self.family != net.family:
|
if self.family != net.family:
|
||||||
return False
|
return False
|
||||||
if self.isIPv4:
|
if self.isIPv4:
|
||||||
mask = ~(0xFFFFFFFFL >> net.plen)
|
mask = ~(0xFFFFFFFF >> net.plen)
|
||||||
elif self.isIPv6:
|
elif self.isIPv6:
|
||||||
mask = ~(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFL >> net.plen)
|
mask = ~(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF >> net.plen)
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -545,13 +619,16 @@ class IPAddr(object):
|
||||||
"""
|
"""
|
||||||
return isinstance(ip, IPAddr) and (ip == self or ip.isInNet(self))
|
return isinstance(ip, IPAddr) and (ip == self or ip.isInNet(self))
|
||||||
|
|
||||||
|
def __contains__(self, ip):
|
||||||
|
return self.contains(ip)
|
||||||
|
|
||||||
# Pre-calculated map: addr to maskplen
|
# Pre-calculated map: addr to maskplen
|
||||||
def __getMaskMap():
|
def __getMaskMap():
|
||||||
m6 = (1 << 128)-1
|
m6 = (1 << 128)-1
|
||||||
m4 = (1 << 32)-1
|
m4 = (1 << 32)-1
|
||||||
mmap = {m6: 128, m4: 32, 0: 0}
|
mmap = {m6: 128, m4: 32, 0: 0}
|
||||||
m = 0
|
m = 0
|
||||||
for i in xrange(0, 128):
|
for i in range(0, 128):
|
||||||
m |= 1 << i
|
m |= 1 << i
|
||||||
if i < 32:
|
if i < 32:
|
||||||
mmap[m ^ m4] = 32-1-i
|
mmap[m ^ m4] = 32-1-i
|
||||||
|
@ -587,10 +664,142 @@ class IPAddr(object):
|
||||||
if not match:
|
if not match:
|
||||||
return None
|
return None
|
||||||
ipstr = match.group('IPv4')
|
ipstr = match.group('IPv4')
|
||||||
if ipstr != '':
|
if ipstr is not None and ipstr != '':
|
||||||
return ipstr
|
return ipstr
|
||||||
return match.group('IPv6')
|
return match.group('IPv6')
|
||||||
|
|
||||||
|
|
||||||
# An IPv4 compatible IPv6 to be reused
|
# An IPv4 compatible IPv6 to be reused
|
||||||
IPAddr.IP6_4COMPAT = IPAddr("::ffff:0:0", 96)
|
IPAddr.IP6_4COMPAT = IPAddr("::ffff:0:0", 96)
|
||||||
|
|
||||||
|
|
||||||
|
class IPAddrSet(set):
|
||||||
|
|
||||||
|
hasSubNet = False
|
||||||
|
|
||||||
|
def __init__(self, ips=[]):
|
||||||
|
ips2 = set()
|
||||||
|
for ip in ips:
|
||||||
|
if not isinstance(ip, IPAddr): ip = IPAddr(ip)
|
||||||
|
ips2.add(ip)
|
||||||
|
self.hasSubNet |= not ip.isSingle
|
||||||
|
set.__init__(self, ips2)
|
||||||
|
|
||||||
|
def add(self, ip):
|
||||||
|
if not isinstance(ip, IPAddr): ip = IPAddr(ip)
|
||||||
|
self.hasSubNet |= not ip.isSingle
|
||||||
|
set.add(self, ip)
|
||||||
|
|
||||||
|
def __contains__(self, ip):
|
||||||
|
if not isinstance(ip, IPAddr): ip = IPAddr(ip)
|
||||||
|
# IP can be found directly or IP is in each subnet:
|
||||||
|
return set.__contains__(self, ip) or (self.hasSubNet and any(n.contains(ip) for n in self))
|
||||||
|
|
||||||
|
|
||||||
|
def _NetworkInterfacesAddrs(withMask=False):
|
||||||
|
|
||||||
|
# Closure implementing lazy load modules and libc and define _NetworkInterfacesAddrs on demand:
|
||||||
|
# Currently tested on Linux only (TODO: implement for MacOS, Solaris, etc)
|
||||||
|
try:
|
||||||
|
from ctypes import (
|
||||||
|
Structure, Union, POINTER,
|
||||||
|
pointer, get_errno, cast,
|
||||||
|
c_ushort, c_byte, c_void_p, c_char_p, c_uint, c_int, c_uint16, c_uint32
|
||||||
|
)
|
||||||
|
import ctypes.util
|
||||||
|
import ctypes
|
||||||
|
|
||||||
|
class struct_sockaddr(Structure):
|
||||||
|
_fields_ = [
|
||||||
|
('sa_family', c_ushort),
|
||||||
|
('sa_data', c_byte * 14),]
|
||||||
|
|
||||||
|
class struct_sockaddr_in(Structure):
|
||||||
|
_fields_ = [
|
||||||
|
('sin_family', c_ushort),
|
||||||
|
('sin_port', c_uint16),
|
||||||
|
('sin_addr', c_byte * 4)]
|
||||||
|
|
||||||
|
class struct_sockaddr_in6(Structure):
|
||||||
|
_fields_ = [
|
||||||
|
('sin6_family', c_ushort),
|
||||||
|
('sin6_port', c_uint16),
|
||||||
|
('sin6_flowinfo', c_uint32),
|
||||||
|
('sin6_addr', c_byte * 16),
|
||||||
|
('sin6_scope_id', c_uint32)]
|
||||||
|
|
||||||
|
class union_ifa_ifu(Union):
|
||||||
|
_fields_ = [
|
||||||
|
('ifu_broadaddr', POINTER(struct_sockaddr)),
|
||||||
|
('ifu_dstaddr', POINTER(struct_sockaddr)),]
|
||||||
|
|
||||||
|
class struct_ifaddrs(Structure):
|
||||||
|
pass
|
||||||
|
struct_ifaddrs._fields_ = [
|
||||||
|
('ifa_next', POINTER(struct_ifaddrs)),
|
||||||
|
('ifa_name', c_char_p),
|
||||||
|
('ifa_flags', c_uint),
|
||||||
|
('ifa_addr', POINTER(struct_sockaddr)),
|
||||||
|
('ifa_netmask', POINTER(struct_sockaddr)),
|
||||||
|
('ifa_ifu', union_ifa_ifu),
|
||||||
|
('ifa_data', c_void_p),]
|
||||||
|
|
||||||
|
libc = ctypes.CDLL(ctypes.util.find_library('c') or "")
|
||||||
|
if not libc.getifaddrs: # pragma: no cover
|
||||||
|
raise NotImplementedError('libc.getifaddrs is not available')
|
||||||
|
|
||||||
|
def ifap_iter(ifap):
|
||||||
|
ifa = ifap.contents
|
||||||
|
while True:
|
||||||
|
yield ifa
|
||||||
|
if not ifa.ifa_next:
|
||||||
|
break
|
||||||
|
ifa = ifa.ifa_next.contents
|
||||||
|
|
||||||
|
def getfamaddr(ifa, withMask=False):
|
||||||
|
sa = ifa.ifa_addr.contents
|
||||||
|
fam = sa.sa_family
|
||||||
|
if fam == socket.AF_INET:
|
||||||
|
sa = cast(pointer(sa), POINTER(struct_sockaddr_in)).contents
|
||||||
|
addr = socket.inet_ntop(fam, sa.sin_addr)
|
||||||
|
if withMask:
|
||||||
|
nm = ifa.ifa_netmask.contents
|
||||||
|
if nm is not None and nm.sa_family == socket.AF_INET:
|
||||||
|
nm = cast(pointer(nm), POINTER(struct_sockaddr_in)).contents
|
||||||
|
addr += '/'+socket.inet_ntop(fam, nm.sin_addr)
|
||||||
|
return IPAddr(addr)
|
||||||
|
elif fam == socket.AF_INET6:
|
||||||
|
sa = cast(pointer(sa), POINTER(struct_sockaddr_in6)).contents
|
||||||
|
addr = socket.inet_ntop(fam, sa.sin6_addr)
|
||||||
|
if withMask:
|
||||||
|
nm = ifa.ifa_netmask.contents
|
||||||
|
if nm is not None and nm.sa_family == socket.AF_INET6:
|
||||||
|
nm = cast(pointer(nm), POINTER(struct_sockaddr_in6)).contents
|
||||||
|
addr += '/'+socket.inet_ntop(fam, nm.sin6_addr)
|
||||||
|
return IPAddr(addr)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _NetworkInterfacesAddrs(withMask=False):
|
||||||
|
ifap = POINTER(struct_ifaddrs)()
|
||||||
|
result = libc.getifaddrs(pointer(ifap))
|
||||||
|
if result != 0:
|
||||||
|
raise OSError(get_errno())
|
||||||
|
del result
|
||||||
|
try:
|
||||||
|
for ifa in ifap_iter(ifap):
|
||||||
|
name = ifa.ifa_name.decode("UTF-8")
|
||||||
|
addr = getfamaddr(ifa, withMask)
|
||||||
|
if addr:
|
||||||
|
yield name, addr
|
||||||
|
finally:
|
||||||
|
libc.freeifaddrs(ifap)
|
||||||
|
|
||||||
|
except Exception as e: # pragma: no cover
|
||||||
|
_init_error = NotImplementedError(e)
|
||||||
|
def _NetworkInterfacesAddrs():
|
||||||
|
raise _init_error
|
||||||
|
|
||||||
|
DNSUtils._NetworkInterfacesAddrs = staticmethod(_NetworkInterfacesAddrs);
|
||||||
|
return _NetworkInterfacesAddrs(withMask)
|
||||||
|
|
||||||
|
DNSUtils._NetworkInterfacesAddrs = staticmethod(_NetworkInterfacesAddrs);
|
||||||
|
|
|
@ -26,7 +26,7 @@ __license__ = "GPL"
|
||||||
import logging
|
import logging
|
||||||
import math
|
import math
|
||||||
import random
|
import random
|
||||||
import Queue
|
import queue
|
||||||
|
|
||||||
from .actions import Actions
|
from .actions import Actions
|
||||||
from ..helpers import getLogger, _as_bool, extractOptions, MyTime
|
from ..helpers import getLogger, _as_bool, extractOptions, MyTime
|
||||||
|
@ -66,7 +66,7 @@ class Jail(object):
|
||||||
#Known backends. Each backend should have corresponding __initBackend method
|
#Known backends. Each backend should have corresponding __initBackend method
|
||||||
# yoh: stored in a list instead of a tuple since only
|
# yoh: stored in a list instead of a tuple since only
|
||||||
# list had .index until 2.6
|
# list had .index until 2.6
|
||||||
_BACKENDS = ['pyinotify', 'gamin', 'polling', 'systemd']
|
_BACKENDS = ['pyinotify', 'polling', 'systemd']
|
||||||
|
|
||||||
def __init__(self, name, backend = "auto", db=None):
|
def __init__(self, name, backend = "auto", db=None):
|
||||||
self.__db = db
|
self.__db = db
|
||||||
|
@ -76,13 +76,14 @@ class Jail(object):
|
||||||
"might not function correctly. Please shorten"
|
"might not function correctly. Please shorten"
|
||||||
% name)
|
% name)
|
||||||
self.__name = name
|
self.__name = name
|
||||||
self.__queue = Queue.Queue()
|
self.__queue = queue.Queue()
|
||||||
self.__filter = None
|
self.__filter = None
|
||||||
# Extra parameters for increase ban time
|
# Extra parameters for increase ban time
|
||||||
self._banExtra = {};
|
self._banExtra = {};
|
||||||
logSys.info("Creating new jail '%s'" % self.name)
|
logSys.info("Creating new jail '%s'" % self.name)
|
||||||
|
self._realBackend = None
|
||||||
if backend is not None:
|
if backend is not None:
|
||||||
self._setBackend(backend)
|
self._realBackend = self._setBackend(backend)
|
||||||
self.backend = backend
|
self.backend = backend
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
@ -113,7 +114,7 @@ class Jail(object):
|
||||||
else:
|
else:
|
||||||
logSys.info("Initiated %r backend" % b)
|
logSys.info("Initiated %r backend" % b)
|
||||||
self.__actions = Actions(self)
|
self.__actions = Actions(self)
|
||||||
return # we are done
|
return b # we are done
|
||||||
except ImportError as e: # pragma: no cover
|
except ImportError as e: # pragma: no cover
|
||||||
# Log debug if auto, but error if specific
|
# Log debug if auto, but error if specific
|
||||||
logSys.log(
|
logSys.log(
|
||||||
|
@ -127,25 +128,19 @@ class Jail(object):
|
||||||
"Failed to initialize any backend for Jail %r" % self.name)
|
"Failed to initialize any backend for Jail %r" % self.name)
|
||||||
|
|
||||||
def _initPolling(self, **kwargs):
|
def _initPolling(self, **kwargs):
|
||||||
from filterpoll import FilterPoll
|
from .filterpoll import FilterPoll
|
||||||
logSys.info("Jail '%s' uses poller %r" % (self.name, kwargs))
|
logSys.info("Jail '%s' uses poller %r" % (self.name, kwargs))
|
||||||
self.__filter = FilterPoll(self, **kwargs)
|
self.__filter = FilterPoll(self, **kwargs)
|
||||||
|
|
||||||
def _initGamin(self, **kwargs):
|
|
||||||
# Try to import gamin
|
|
||||||
from filtergamin import FilterGamin
|
|
||||||
logSys.info("Jail '%s' uses Gamin %r" % (self.name, kwargs))
|
|
||||||
self.__filter = FilterGamin(self, **kwargs)
|
|
||||||
|
|
||||||
def _initPyinotify(self, **kwargs):
|
def _initPyinotify(self, **kwargs):
|
||||||
# Try to import pyinotify
|
# Try to import pyinotify
|
||||||
from filterpyinotify import FilterPyinotify
|
from .filterpyinotify import FilterPyinotify
|
||||||
logSys.info("Jail '%s' uses pyinotify %r" % (self.name, kwargs))
|
logSys.info("Jail '%s' uses pyinotify %r" % (self.name, kwargs))
|
||||||
self.__filter = FilterPyinotify(self, **kwargs)
|
self.__filter = FilterPyinotify(self, **kwargs)
|
||||||
|
|
||||||
def _initSystemd(self, **kwargs): # pragma: systemd no cover
|
def _initSystemd(self, **kwargs): # pragma: systemd no cover
|
||||||
# Try to import systemd
|
# Try to import systemd
|
||||||
from filtersystemd import FilterSystemd
|
from .filtersystemd import FilterSystemd
|
||||||
logSys.info("Jail '%s' uses systemd %r" % (self.name, kwargs))
|
logSys.info("Jail '%s' uses systemd %r" % (self.name, kwargs))
|
||||||
self.__filter = FilterSystemd(self, **kwargs)
|
self.__filter = FilterSystemd(self, **kwargs)
|
||||||
|
|
||||||
|
@ -191,9 +186,14 @@ class Jail(object):
|
||||||
def status(self, flavor="basic"):
|
def status(self, flavor="basic"):
|
||||||
"""The status of the jail.
|
"""The status of the jail.
|
||||||
"""
|
"""
|
||||||
|
fstat = self.filter.status(flavor=flavor)
|
||||||
|
astat = self.actions.status(flavor=flavor)
|
||||||
|
if flavor == "stats":
|
||||||
|
backend = type(self.filter).__name__.replace('Filter', '').lower()
|
||||||
|
return [self._realBackend or self.backend, fstat, astat]
|
||||||
return [
|
return [
|
||||||
("Filter", self.filter.status(flavor=flavor)),
|
("Filter", fstat),
|
||||||
("Actions", self.actions.status(flavor=flavor)),
|
("Actions", astat),
|
||||||
]
|
]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -219,7 +219,7 @@ class Jail(object):
|
||||||
try:
|
try:
|
||||||
ticket = self.__queue.get(False)
|
ticket = self.__queue.get(False)
|
||||||
return ticket
|
return ticket
|
||||||
except Queue.Empty:
|
except queue.Empty:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def setBanTimeExtra(self, opt, value):
|
def setBanTimeExtra(self, opt, value):
|
||||||
|
@ -294,10 +294,10 @@ class Jail(object):
|
||||||
correctBanTime=correctBanTime, maxmatches=self.filter.failManager.maxMatches
|
correctBanTime=correctBanTime, maxmatches=self.filter.failManager.maxMatches
|
||||||
):
|
):
|
||||||
try:
|
try:
|
||||||
#logSys.debug('restored ticket: %s', ticket)
|
|
||||||
if self.filter.inIgnoreIPList(ticket.getID(), log_ignore=True): continue
|
|
||||||
# mark ticked was restored from database - does not put it again into db:
|
# mark ticked was restored from database - does not put it again into db:
|
||||||
ticket.restored = True
|
ticket.restored = True
|
||||||
|
#logSys.debug('restored ticket: %s', ticket)
|
||||||
|
if self.filter._inIgnoreIPList(ticket.getID(), ticket): continue
|
||||||
# correct start time / ban time (by the same end of ban):
|
# correct start time / ban time (by the same end of ban):
|
||||||
btm = ticket.getBanTime(forbantime)
|
btm = ticket.getBanTime(forbantime)
|
||||||
diftm = MyTime.time() - ticket.getTime()
|
diftm = MyTime.time() - ticket.getTime()
|
||||||
|
|
|
@ -67,7 +67,6 @@ class Jails(Mapping):
|
||||||
"""
|
"""
|
||||||
with self.__lock:
|
with self.__lock:
|
||||||
if name in self._jails:
|
if name in self._jails:
|
||||||
if noduplicates:
|
|
||||||
raise DuplicateJailException(name)
|
raise DuplicateJailException(name)
|
||||||
else:
|
else:
|
||||||
self._jails[name] = Jail(name, backend, db)
|
self._jails[name] = Jail(name, backend, db)
|
||||||
|
|
|
@ -78,14 +78,9 @@ class JailThread(Thread):
|
||||||
print(e)
|
print(e)
|
||||||
self.run = run_with_except_hook
|
self.run = run_with_except_hook
|
||||||
|
|
||||||
if sys.version_info >= (3,): # pragma: 2.x no cover
|
|
||||||
def _bootstrap(self):
|
def _bootstrap(self):
|
||||||
prctl_set_th_name(self.name)
|
prctl_set_th_name(self.name)
|
||||||
return super(JailThread, self)._bootstrap();
|
return super(JailThread, self)._bootstrap();
|
||||||
else: # pragma: 3.x no cover
|
|
||||||
def __bootstrap(self):
|
|
||||||
prctl_set_th_name(self.name)
|
|
||||||
return Thread._Thread__bootstrap(self)
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def status(self, flavor="basic"): # pragma: no cover - abstract
|
def status(self, flavor="basic"): # pragma: no cover - abstract
|
||||||
|
@ -125,9 +120,6 @@ class JailThread(Thread):
|
||||||
if self.active is not None:
|
if self.active is not None:
|
||||||
super(JailThread, self).join()
|
super(JailThread, self).join()
|
||||||
|
|
||||||
## python 2.x replace binding of private __bootstrap method:
|
|
||||||
if sys.version_info < (3,): # pragma: 3.x no cover
|
|
||||||
JailThread._Thread__bootstrap = JailThread._JailThread__bootstrap
|
|
||||||
## python 3.9, restore isAlive method:
|
## python 3.9, restore isAlive method:
|
||||||
elif not hasattr(JailThread, 'isAlive'): # pragma: 2.x no cover
|
if not hasattr(JailThread, 'isAlive'):
|
||||||
JailThread.isAlive = JailThread.is_alive
|
JailThread.isAlive = JailThread.is_alive
|
||||||
|
|
|
@ -165,7 +165,7 @@ class MyTime:
|
||||||
|
|
||||||
@returns number (calculated seconds from expression "val")
|
@returns number (calculated seconds from expression "val")
|
||||||
"""
|
"""
|
||||||
if isinstance(val, (int, long, float, complex)):
|
if isinstance(val, (int, float, complex)):
|
||||||
return val
|
return val
|
||||||
# replace together standing abbreviations, example '1d12h' -> '1d 12h':
|
# replace together standing abbreviations, example '1d12h' -> '1d 12h':
|
||||||
val = MyTime._str2sec_prep.sub(r" \1", val)
|
val = MyTime._str2sec_prep.sub(r" \1", val)
|
||||||
|
|
|
@ -161,7 +161,7 @@ class ObserverThread(JailThread):
|
||||||
self.pulse_notify()
|
self.pulse_notify()
|
||||||
|
|
||||||
def add_wn(self, *event):
|
def add_wn(self, *event):
|
||||||
"""Add a event to queue withouth notifying thread to wake up.
|
"""Add a event to queue without notifying thread to wake up.
|
||||||
"""
|
"""
|
||||||
## lock and add new event to queue:
|
## lock and add new event to queue:
|
||||||
with self._queue_lock:
|
with self._queue_lock:
|
||||||
|
@ -465,7 +465,7 @@ class ObserverThread(JailThread):
|
||||||
return banTime
|
return banTime
|
||||||
|
|
||||||
def banFound(self, ticket, jail, btime):
|
def banFound(self, ticket, jail, btime):
|
||||||
""" Notify observer a ban occured for ip
|
""" Notify observer a ban occurred for ip
|
||||||
|
|
||||||
Observer will check ip was known (bad) and possibly increase/prolong a ban time
|
Observer will check ip was known (bad) and possibly increase/prolong a ban time
|
||||||
Secondary we will actualize the bans and bips (bad ip) in database
|
Secondary we will actualize the bans and bips (bad ip) in database
|
||||||
|
@ -507,7 +507,7 @@ class ObserverThread(JailThread):
|
||||||
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||||
|
|
||||||
def prolongBan(self, ticket, jail):
|
def prolongBan(self, ticket, jail):
|
||||||
""" Notify observer a ban occured for ip
|
""" Notify observer a ban occurred for ip
|
||||||
|
|
||||||
Observer will check ip was known (bad) and possibly increase/prolong a ban time
|
Observer will check ip was known (bad) and possibly increase/prolong a ban time
|
||||||
Secondary we will actualize the bans and bips (bad ip) in database
|
Secondary we will actualize the bans and bips (bad ip) in database
|
||||||
|
@ -521,7 +521,7 @@ class ObserverThread(JailThread):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||||
|
|
||||||
# Global observer initial created in server (could be later rewriten via singleton)
|
# Global observer initial created in server (could be later rewritten via singleton)
|
||||||
class _Observers:
|
class _Observers:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.Main = None
|
self.Main = None
|
||||||
|
|
|
@ -58,11 +58,6 @@ except ImportError: # pragma: no cover
|
||||||
def _thread_name():
|
def _thread_name():
|
||||||
return threading.current_thread().__class__.__name__
|
return threading.current_thread().__class__.__name__
|
||||||
|
|
||||||
try:
|
|
||||||
FileExistsError
|
|
||||||
except NameError: # pragma: 3.x no cover
|
|
||||||
FileExistsError = OSError
|
|
||||||
|
|
||||||
def _make_file_path(name):
|
def _make_file_path(name):
|
||||||
"""Creates path of file (last level only) on demand"""
|
"""Creates path of file (last level only) on demand"""
|
||||||
name = os.path.dirname(name)
|
name = os.path.dirname(name)
|
||||||
|
@ -209,7 +204,7 @@ class Server:
|
||||||
|
|
||||||
# Restore default signal handlers:
|
# Restore default signal handlers:
|
||||||
if _thread_name() == '_MainThread':
|
if _thread_name() == '_MainThread':
|
||||||
for s, sh in self.__prev_signals.iteritems():
|
for s, sh in self.__prev_signals.items():
|
||||||
signal.signal(s, sh)
|
signal.signal(s, sh)
|
||||||
|
|
||||||
# Give observer a small chance to complete its work before exit
|
# Give observer a small chance to complete its work before exit
|
||||||
|
@ -227,7 +222,7 @@ class Server:
|
||||||
obsMain.stop()
|
obsMain.stop()
|
||||||
|
|
||||||
# Explicit close database (server can leave in a thread,
|
# Explicit close database (server can leave in a thread,
|
||||||
# so delayed GC can prevent commiting changes)
|
# so delayed GC can prevent committing changes)
|
||||||
if self.__db:
|
if self.__db:
|
||||||
self.__db.close()
|
self.__db.close()
|
||||||
self.__db = None
|
self.__db = None
|
||||||
|
@ -287,10 +282,10 @@ class Server:
|
||||||
logSys.info("Stopping all jails")
|
logSys.info("Stopping all jails")
|
||||||
with self.__lock:
|
with self.__lock:
|
||||||
# 1st stop all jails (signal and stop actions/filter thread):
|
# 1st stop all jails (signal and stop actions/filter thread):
|
||||||
for name in self.__jails.keys():
|
for name in list(self.__jails.keys()):
|
||||||
self.delJail(name, stop=True, join=False)
|
self.delJail(name, stop=True, join=False)
|
||||||
# 2nd wait for end and delete jails:
|
# 2nd wait for end and delete jails:
|
||||||
for name in self.__jails.keys():
|
for name in list(self.__jails.keys()):
|
||||||
self.delJail(name, stop=False, join=True)
|
self.delJail(name, stop=False, join=True)
|
||||||
|
|
||||||
def clearCaches(self):
|
def clearCaches(self):
|
||||||
|
@ -328,7 +323,7 @@ class Server:
|
||||||
if "--restart" in opts:
|
if "--restart" in opts:
|
||||||
self.stopAllJail()
|
self.stopAllJail()
|
||||||
# first set all affected jail(s) to idle and reset filter regex and other lists/dicts:
|
# first set all affected jail(s) to idle and reset filter regex and other lists/dicts:
|
||||||
for jn, jail in self.__jails.iteritems():
|
for jn, jail in self.__jails.items():
|
||||||
if name == '--all' or jn == name:
|
if name == '--all' or jn == name:
|
||||||
jail.idle = True
|
jail.idle = True
|
||||||
self.__reload_state[jn] = jail
|
self.__reload_state[jn] = jail
|
||||||
|
@ -339,7 +334,7 @@ class Server:
|
||||||
# end reload, all affected (or new) jails have already all new parameters (via stream) and (re)started:
|
# end reload, all affected (or new) jails have already all new parameters (via stream) and (re)started:
|
||||||
with self.__lock:
|
with self.__lock:
|
||||||
deljails = []
|
deljails = []
|
||||||
for jn, jail in self.__jails.iteritems():
|
for jn, jail in self.__jails.items():
|
||||||
# still in reload state:
|
# still in reload state:
|
||||||
if jn in self.__reload_state:
|
if jn in self.__reload_state:
|
||||||
# remove jails that are not reloaded (untouched, so not in new configuration)
|
# remove jails that are not reloaded (untouched, so not in new configuration)
|
||||||
|
@ -539,7 +534,7 @@ class Server:
|
||||||
jails = [self.__jails[name]]
|
jails = [self.__jails[name]]
|
||||||
else:
|
else:
|
||||||
# in all jails:
|
# in all jails:
|
||||||
jails = self.__jails.values()
|
jails = list(self.__jails.values())
|
||||||
# unban given or all (if value is None):
|
# unban given or all (if value is None):
|
||||||
cnt = 0
|
cnt = 0
|
||||||
ifexists |= (name is None)
|
ifexists |= (name is None)
|
||||||
|
@ -553,7 +548,7 @@ class Server:
|
||||||
jails = [self.__jails[name]]
|
jails = [self.__jails[name]]
|
||||||
else:
|
else:
|
||||||
# in all jails:
|
# in all jails:
|
||||||
jails = self.__jails.values()
|
jails = list(self.__jails.values())
|
||||||
# check banned ids:
|
# check banned ids:
|
||||||
res = []
|
res = []
|
||||||
if name is None and ids:
|
if name is None and ids:
|
||||||
|
@ -603,20 +598,29 @@ class Server:
|
||||||
def isAlive(self, jailnum=None):
|
def isAlive(self, jailnum=None):
|
||||||
if jailnum is not None and len(self.__jails) != jailnum:
|
if jailnum is not None and len(self.__jails) != jailnum:
|
||||||
return 0
|
return 0
|
||||||
for jail in self.__jails.values():
|
for jail in list(self.__jails.values()):
|
||||||
if not jail.isAlive():
|
if not jail.isAlive():
|
||||||
return 0
|
return 0
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
# Status
|
# Status
|
||||||
def status(self):
|
def status(self, name="", flavor="basic"):
|
||||||
try:
|
try:
|
||||||
self.__lock.acquire()
|
self.__lock.acquire()
|
||||||
jails = list(self.__jails)
|
jails = sorted(self.__jails.items())
|
||||||
jails.sort()
|
if flavor != "stats":
|
||||||
jailList = ", ".join(jails)
|
jailList = [n for n, j in jails]
|
||||||
ret = [("Number of jail", len(self.__jails)),
|
ret = [
|
||||||
("Jail list", jailList)]
|
("Number of jail", len(jailList)),
|
||||||
|
("Jail list", ", ".join(jailList))
|
||||||
|
]
|
||||||
|
if name == '--all':
|
||||||
|
jstat = dict(jails)
|
||||||
|
for n, j in jails:
|
||||||
|
jstat[n] = j.status(flavor=flavor)
|
||||||
|
if flavor == "stats":
|
||||||
|
return jstat
|
||||||
|
ret.append(jstat)
|
||||||
return ret
|
return ret
|
||||||
finally:
|
finally:
|
||||||
self.__lock.release()
|
self.__lock.release()
|
||||||
|
@ -725,14 +729,8 @@ class Server:
|
||||||
# Remove the handler.
|
# Remove the handler.
|
||||||
logger.removeHandler(handler)
|
logger.removeHandler(handler)
|
||||||
# And try to close -- it might be closed already
|
# And try to close -- it might be closed already
|
||||||
try:
|
|
||||||
handler.flush()
|
handler.flush()
|
||||||
handler.close()
|
handler.close()
|
||||||
except (ValueError, KeyError): # pragma: no cover
|
|
||||||
# Is known to be thrown after logging was shutdown once
|
|
||||||
# with older Pythons -- seems to be safe to ignore there
|
|
||||||
if sys.version_info < (3,) or sys.version_info >= (3, 2):
|
|
||||||
raise
|
|
||||||
# detailed format by deep log levels (as DEBUG=10):
|
# detailed format by deep log levels (as DEBUG=10):
|
||||||
if logger.getEffectiveLevel() <= logging.DEBUG: # pragma: no cover
|
if logger.getEffectiveLevel() <= logging.DEBUG: # pragma: no cover
|
||||||
if self.__verbose is None:
|
if self.__verbose is None:
|
||||||
|
@ -818,7 +816,7 @@ class Server:
|
||||||
return DNSUtils.setIPv6IsAllowed(value)
|
return DNSUtils.setIPv6IsAllowed(value)
|
||||||
|
|
||||||
def setThreadOptions(self, value):
|
def setThreadOptions(self, value):
|
||||||
for o, v in value.iteritems():
|
for o, v in value.items():
|
||||||
if o == 'stacksize':
|
if o == 'stacksize':
|
||||||
threading.stack_size(int(v)*1024)
|
threading.stack_size(int(v)*1024)
|
||||||
else: # pragma: no cover
|
else: # pragma: no cover
|
||||||
|
@ -936,32 +934,16 @@ class Server:
|
||||||
# the default value (configurable).
|
# the default value (configurable).
|
||||||
try:
|
try:
|
||||||
fdlist = self.__get_fdlist()
|
fdlist = self.__get_fdlist()
|
||||||
maxfd = -1
|
for fd in fdlist:
|
||||||
|
try:
|
||||||
|
os.close(fd)
|
||||||
|
except OSError: # ERROR (ignore)
|
||||||
|
pass
|
||||||
except:
|
except:
|
||||||
try:
|
try:
|
||||||
maxfd = os.sysconf("SC_OPEN_MAX")
|
maxfd = os.sysconf("SC_OPEN_MAX")
|
||||||
except (AttributeError, ValueError):
|
except (AttributeError, ValueError):
|
||||||
maxfd = 256 # default maximum
|
maxfd = 256 # default maximum
|
||||||
fdlist = xrange(maxfd+1)
|
|
||||||
|
|
||||||
# urandom should not be closed in Python 3.4.0. Fixed in 3.4.1
|
|
||||||
# http://bugs.python.org/issue21207
|
|
||||||
if sys.version_info[0:3] == (3, 4, 0): # pragma: no cover
|
|
||||||
urandom_fd = os.open("/dev/urandom", os.O_RDONLY)
|
|
||||||
for fd in fdlist:
|
|
||||||
try:
|
|
||||||
if not os.path.sameopenfile(urandom_fd, fd):
|
|
||||||
os.close(fd)
|
|
||||||
except OSError: # ERROR (ignore)
|
|
||||||
pass
|
|
||||||
os.close(urandom_fd)
|
|
||||||
elif maxfd == -1:
|
|
||||||
for fd in fdlist:
|
|
||||||
try:
|
|
||||||
os.close(fd)
|
|
||||||
except OSError: # ERROR (ignore)
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
os.closerange(0, maxfd)
|
os.closerange(0, maxfd)
|
||||||
|
|
||||||
# Redirect the standard file descriptors to /dev/null.
|
# Redirect the standard file descriptors to /dev/null.
|
||||||
|
|
|
@ -60,7 +60,7 @@ timeRE['H'] = r"(?P<H>[0-1]?\d|2[0-3])"
|
||||||
timeRE['M'] = r"(?P<M>[0-5]?\d)"
|
timeRE['M'] = r"(?P<M>[0-5]?\d)"
|
||||||
timeRE['S'] = r"(?P<S>[0-5]?\d|6[0-1])"
|
timeRE['S'] = r"(?P<S>[0-5]?\d|6[0-1])"
|
||||||
|
|
||||||
# Extend build-in TimeRE with some exact patterns
|
# Extend built-in TimeRE with some exact patterns
|
||||||
# exact two-digit patterns:
|
# exact two-digit patterns:
|
||||||
timeRE['Exd'] = r"(?P<d>[1-2]\d|0[1-9]|3[0-1])"
|
timeRE['Exd'] = r"(?P<d>[1-2]\d|0[1-9]|3[0-1])"
|
||||||
timeRE['Exm'] = r"(?P<m>0[1-9]|1[0-2])"
|
timeRE['Exm'] = r"(?P<m>0[1-9]|1[0-2])"
|
||||||
|
@ -99,7 +99,7 @@ def _updateTimeRE():
|
||||||
if len(exprset) > 1 else "".join(exprset)
|
if len(exprset) > 1 else "".join(exprset)
|
||||||
exprset = set( cent(now[0].year + i) for i in (-1, distance) )
|
exprset = set( cent(now[0].year + i) for i in (-1, distance) )
|
||||||
if len(now) > 1 and now[1]:
|
if len(now) > 1 and now[1]:
|
||||||
exprset |= set( cent(now[1].year + i) for i in xrange(-1, now[0].year-now[1].year+1, distance) )
|
exprset |= set( cent(now[1].year + i) for i in range(-1, now[0].year-now[1].year+1, distance) )
|
||||||
return grp(sorted(list(exprset)))
|
return grp(sorted(list(exprset)))
|
||||||
|
|
||||||
# more precise year patterns, within same century of last year and
|
# more precise year patterns, within same century of last year and
|
||||||
|
@ -116,7 +116,7 @@ def _updateTimeRE():
|
||||||
_updateTimeRE()
|
_updateTimeRE()
|
||||||
|
|
||||||
def getTimePatternRE():
|
def getTimePatternRE():
|
||||||
keys = timeRE.keys()
|
keys = list(timeRE.keys())
|
||||||
patt = (r"%%(%%|%s|[%s])" % (
|
patt = (r"%%(%%|%s|[%s])" % (
|
||||||
"|".join([k for k in keys if len(k) > 1]),
|
"|".join([k for k in keys if len(k) > 1]),
|
||||||
"".join([k for k in keys if len(k) == 1]),
|
"".join([k for k in keys if len(k) == 1]),
|
||||||
|
@ -171,7 +171,7 @@ def zone2offset(tz, dt):
|
||||||
"""
|
"""
|
||||||
if isinstance(tz, int):
|
if isinstance(tz, int):
|
||||||
return tz
|
return tz
|
||||||
if isinstance(tz, basestring):
|
if isinstance(tz, str):
|
||||||
return validateTimeZone(tz)
|
return validateTimeZone(tz)
|
||||||
tz, tzo = tz
|
tz, tzo = tz
|
||||||
if tzo is None or tzo == '': # without offset
|
if tzo is None or tzo == '': # without offset
|
||||||
|
@ -208,7 +208,7 @@ def reGroupDictStrptime(found_dict, msec=False, default_tz=None):
|
||||||
year = month = day = tzoffset = \
|
year = month = day = tzoffset = \
|
||||||
weekday = julian = week_of_year = None
|
weekday = julian = week_of_year = None
|
||||||
hour = minute = second = fraction = 0
|
hour = minute = second = fraction = 0
|
||||||
for key, val in found_dict.iteritems():
|
for key, val in found_dict.items():
|
||||||
if val is None: continue
|
if val is None: continue
|
||||||
# Directives not explicitly handled below:
|
# Directives not explicitly handled below:
|
||||||
# c, x, X
|
# c, x, X
|
||||||
|
@ -307,7 +307,7 @@ def reGroupDictStrptime(found_dict, msec=False, default_tz=None):
|
||||||
day = now.day
|
day = now.day
|
||||||
assume_today = True
|
assume_today = True
|
||||||
|
|
||||||
# Actully create date
|
# Actually create date
|
||||||
date_result = datetime.datetime(
|
date_result = datetime.datetime(
|
||||||
year, month, day, hour, minute, second, fraction)
|
year, month, day, hour, minute, second, fraction)
|
||||||
# Correct timezone if not supplied in the log linge
|
# Correct timezone if not supplied in the log linge
|
||||||
|
|
|
@ -55,7 +55,7 @@ class Ticket(object):
|
||||||
self._time = time if time is not None else MyTime.time()
|
self._time = time if time is not None else MyTime.time()
|
||||||
self._data = {'matches': matches or [], 'failures': 0}
|
self._data = {'matches': matches or [], 'failures': 0}
|
||||||
if data is not None:
|
if data is not None:
|
||||||
for k,v in data.iteritems():
|
for k,v in data.items():
|
||||||
if v is not None:
|
if v is not None:
|
||||||
self._data[k] = v
|
self._data[k] = v
|
||||||
if ticket:
|
if ticket:
|
||||||
|
@ -88,7 +88,7 @@ class Ticket(object):
|
||||||
|
|
||||||
def setID(self, value):
|
def setID(self, value):
|
||||||
# guarantee using IPAddr instead of unicode, str for the IP
|
# guarantee using IPAddr instead of unicode, str for the IP
|
||||||
if isinstance(value, basestring):
|
if isinstance(value, str):
|
||||||
value = IPAddr(value)
|
value = IPAddr(value)
|
||||||
self._id = value
|
self._id = value
|
||||||
|
|
||||||
|
@ -180,7 +180,7 @@ class Ticket(object):
|
||||||
if len(args) == 1:
|
if len(args) == 1:
|
||||||
# todo: if support >= 2.7 only:
|
# todo: if support >= 2.7 only:
|
||||||
# self._data = {k:v for k,v in args[0].iteritems() if v is not None}
|
# self._data = {k:v for k,v in args[0].iteritems() if v is not None}
|
||||||
self._data = dict([(k,v) for k,v in args[0].iteritems() if v is not None])
|
self._data = dict([(k,v) for k,v in args[0].items() if v is not None])
|
||||||
# add k,v list or dict (merge):
|
# add k,v list or dict (merge):
|
||||||
elif len(args) == 2:
|
elif len(args) == 2:
|
||||||
self._data.update((args,))
|
self._data.update((args,))
|
||||||
|
@ -191,7 +191,7 @@ class Ticket(object):
|
||||||
# filter (delete) None values:
|
# filter (delete) None values:
|
||||||
# todo: if support >= 2.7 only:
|
# todo: if support >= 2.7 only:
|
||||||
# self._data = {k:v for k,v in self._data.iteritems() if v is not None}
|
# self._data = {k:v for k,v in self._data.iteritems() if v is not None}
|
||||||
self._data = dict([(k,v) for k,v in self._data.iteritems() if v is not None])
|
self._data = dict([(k,v) for k,v in self._data.items() if v is not None])
|
||||||
|
|
||||||
def getData(self, key=None, default=None):
|
def getData(self, key=None, default=None):
|
||||||
# return whole data dict:
|
# return whole data dict:
|
||||||
|
@ -200,17 +200,17 @@ class Ticket(object):
|
||||||
# return default if not exists:
|
# return default if not exists:
|
||||||
if not self._data:
|
if not self._data:
|
||||||
return default
|
return default
|
||||||
if not isinstance(key,(str,unicode,type(None),int,float,bool,complex)):
|
if not isinstance(key,(str,type(None),int,float,bool,complex)):
|
||||||
# return filtered by lambda/function:
|
# return filtered by lambda/function:
|
||||||
if callable(key):
|
if callable(key):
|
||||||
# todo: if support >= 2.7 only:
|
# todo: if support >= 2.7 only:
|
||||||
# return {k:v for k,v in self._data.iteritems() if key(k)}
|
# return {k:v for k,v in self._data.iteritems() if key(k)}
|
||||||
return dict([(k,v) for k,v in self._data.iteritems() if key(k)])
|
return dict([(k,v) for k,v in self._data.items() if key(k)])
|
||||||
# return filtered by keys:
|
# return filtered by keys:
|
||||||
if hasattr(key, '__iter__'):
|
if hasattr(key, '__iter__'):
|
||||||
# todo: if support >= 2.7 only:
|
# todo: if support >= 2.7 only:
|
||||||
# return {k:v for k,v in self._data.iteritems() if k in key}
|
# return {k:v for k,v in self._data.iteritems() if k in key}
|
||||||
return dict([(k,v) for k,v in self._data.iteritems() if k in key])
|
return dict([(k,v) for k,v in self._data.items() if k in key])
|
||||||
# return single value of data:
|
# return single value of data:
|
||||||
return self._data.get(key, default)
|
return self._data.get(key, default)
|
||||||
|
|
||||||
|
@ -257,7 +257,7 @@ class FailTicket(Ticket):
|
||||||
as estimation from rate by previous known interval (if it exceeds the findTime)
|
as estimation from rate by previous known interval (if it exceeds the findTime)
|
||||||
"""
|
"""
|
||||||
if time > self._time:
|
if time > self._time:
|
||||||
# expand current interval and attemps count (considering maxTime):
|
# expand current interval and attempts count (considering maxTime):
|
||||||
if self._firstTime < time - maxTime:
|
if self._firstTime < time - maxTime:
|
||||||
# adjust retry calculated as estimation from rate by previous known interval:
|
# adjust retry calculated as estimation from rate by previous known interval:
|
||||||
self._retry = int(round(self._retry / float(time - self._firstTime) * maxTime))
|
self._retry = int(round(self._retry / float(time - self._firstTime) * maxTime))
|
||||||
|
|
|
@ -144,6 +144,8 @@ class Transmitter:
|
||||||
return self.__commandGet(command[1:])
|
return self.__commandGet(command[1:])
|
||||||
elif name == "status":
|
elif name == "status":
|
||||||
return self.status(command[1:])
|
return self.status(command[1:])
|
||||||
|
elif name in ("stats", "statistic", "statistics"):
|
||||||
|
return self.__server.status("--all", "stats")
|
||||||
elif name == "version":
|
elif name == "version":
|
||||||
return version.version
|
return version.version
|
||||||
elif name == "config-error":
|
elif name == "config-error":
|
||||||
|
@ -488,7 +490,7 @@ class Transmitter:
|
||||||
opt = command[1][len("bantime."):]
|
opt = command[1][len("bantime."):]
|
||||||
return self.__server.getBanTimeExtra(name, opt)
|
return self.__server.getBanTimeExtra(name, opt)
|
||||||
elif command[1] == "actions":
|
elif command[1] == "actions":
|
||||||
return self.__server.getActions(name).keys()
|
return list(self.__server.getActions(name).keys())
|
||||||
elif command[1] == "action":
|
elif command[1] == "action":
|
||||||
actionname = command[2]
|
actionname = command[2]
|
||||||
actionvalue = command[3]
|
actionvalue = command[3]
|
||||||
|
@ -512,11 +514,10 @@ class Transmitter:
|
||||||
def status(self, command):
|
def status(self, command):
|
||||||
if len(command) == 0:
|
if len(command) == 0:
|
||||||
return self.__server.status()
|
return self.__server.status()
|
||||||
elif len(command) == 1:
|
elif len(command) >= 1 and len(command) <= 2:
|
||||||
name = command[0]
|
name = command[0]
|
||||||
return self.__server.statusJail(name)
|
flavor = command[1] if len(command) == 2 else "basic"
|
||||||
elif len(command) == 2:
|
if name == "--all":
|
||||||
name = command[0]
|
return self.__server.status("--all", flavor)
|
||||||
flavor = command[1]
|
|
||||||
return self.__server.statusJail(name, flavor=flavor)
|
return self.__server.statusJail(name, flavor=flavor)
|
||||||
raise Exception("Invalid command (no status)")
|
raise Exception("Invalid command (no status)")
|
||||||
|
|
|
@ -32,10 +32,7 @@ import time
|
||||||
from ..helpers import getLogger, _merge_dicts, uni_decode
|
from ..helpers import getLogger, _merge_dicts, uni_decode
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
|
||||||
if sys.version_info >= (3, 3):
|
import importlib.machinery
|
||||||
import importlib.machinery
|
|
||||||
else:
|
|
||||||
import imp
|
|
||||||
|
|
||||||
# Gets the instance of the logger.
|
# Gets the instance of the logger.
|
||||||
logSys = getLogger(__name__)
|
logSys = getLogger(__name__)
|
||||||
|
@ -53,7 +50,7 @@ _RETCODE_HINTS = {
|
||||||
|
|
||||||
# Dictionary to lookup signal name from number
|
# Dictionary to lookup signal name from number
|
||||||
signame = dict((num, name)
|
signame = dict((num, name)
|
||||||
for name, num in signal.__dict__.iteritems() if name.startswith("SIG"))
|
for name, num in signal.__dict__.items() if name.startswith("SIG"))
|
||||||
|
|
||||||
class Utils():
|
class Utils():
|
||||||
"""Utilities provide diverse static methods like executes OS shell commands, etc.
|
"""Utilities provide diverse static methods like executes OS shell commands, etc.
|
||||||
|
@ -140,7 +137,7 @@ class Utils():
|
||||||
if not isinstance(realCmd, list):
|
if not isinstance(realCmd, list):
|
||||||
realCmd = [realCmd]
|
realCmd = [realCmd]
|
||||||
i = len(realCmd)-1
|
i = len(realCmd)-1
|
||||||
for k, v in varsDict.iteritems():
|
for k, v in varsDict.items():
|
||||||
varsStat += "%s=$%s " % (k, i)
|
varsStat += "%s=$%s " % (k, i)
|
||||||
realCmd.append(v)
|
realCmd.append(v)
|
||||||
i += 1
|
i += 1
|
||||||
|
@ -355,10 +352,6 @@ class Utils():
|
||||||
def load_python_module(pythonModule):
|
def load_python_module(pythonModule):
|
||||||
pythonModuleName = os.path.splitext(
|
pythonModuleName = os.path.splitext(
|
||||||
os.path.basename(pythonModule))[0]
|
os.path.basename(pythonModule))[0]
|
||||||
if sys.version_info >= (3, 3):
|
|
||||||
mod = importlib.machinery.SourceFileLoader(
|
mod = importlib.machinery.SourceFileLoader(
|
||||||
pythonModuleName, pythonModule).load_module()
|
pythonModuleName, pythonModule).load_module()
|
||||||
else:
|
|
||||||
mod = imp.load_source(
|
|
||||||
pythonModuleName, pythonModule)
|
|
||||||
return mod
|
return mod
|
||||||
|
|
|
@ -18,72 +18,23 @@
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import smtpd
|
|
||||||
import threading
|
import threading
|
||||||
import unittest
|
import unittest
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
if sys.version_info >= (3, 3):
|
import importlib
|
||||||
import importlib
|
|
||||||
else:
|
|
||||||
import imp
|
|
||||||
|
|
||||||
from ..dummyjail import DummyJail
|
from ..dummyjail import DummyJail
|
||||||
|
|
||||||
from ..utils import CONFIG_DIR, asyncserver, Utils, uni_decode
|
from ..utils import CONFIG_DIR, asyncserver, Utils, uni_decode
|
||||||
|
|
||||||
class TestSMTPServer(smtpd.SMTPServer):
|
|
||||||
|
|
||||||
def __init__(self, *args):
|
class _SMTPActionTestCase():
|
||||||
smtpd.SMTPServer.__init__(self, *args)
|
|
||||||
|
def _reset_smtpd(self):
|
||||||
|
for a in ('mailfrom', 'org_data', 'data'):
|
||||||
|
if hasattr(self.smtpd, a): delattr(self.smtpd, a)
|
||||||
self.ready = False
|
self.ready = False
|
||||||
|
|
||||||
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
|
|
||||||
self.peer = peer
|
|
||||||
self.mailfrom = mailfrom
|
|
||||||
self.rcpttos = rcpttos
|
|
||||||
self.org_data = data
|
|
||||||
# replace new line (with tab or space) for possible mime translations (word wrap),
|
|
||||||
self.data = re.sub(r"\n[\t ]", " ", uni_decode(data))
|
|
||||||
self.ready = True
|
|
||||||
|
|
||||||
|
|
||||||
class SMTPActionTest(unittest.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
"""Call before every test case."""
|
|
||||||
unittest.F2B.SkipIfCfgMissing(action='smtp.py')
|
|
||||||
super(SMTPActionTest, self).setUp()
|
|
||||||
self.jail = DummyJail()
|
|
||||||
pythonModule = os.path.join(CONFIG_DIR, "action.d", "smtp.py")
|
|
||||||
pythonModuleName = os.path.basename(pythonModule.rstrip(".py"))
|
|
||||||
if sys.version_info >= (3, 3):
|
|
||||||
customActionModule = importlib.machinery.SourceFileLoader(
|
|
||||||
pythonModuleName, pythonModule).load_module()
|
|
||||||
else:
|
|
||||||
customActionModule = imp.load_source(
|
|
||||||
pythonModuleName, pythonModule)
|
|
||||||
|
|
||||||
self.smtpd = TestSMTPServer(("localhost", 0), None)
|
|
||||||
port = self.smtpd.socket.getsockname()[1]
|
|
||||||
|
|
||||||
self.action = customActionModule.Action(
|
|
||||||
self.jail, "test", host="localhost:%i" % port)
|
|
||||||
|
|
||||||
## because of bug in loop (see loop in asyncserver.py) use it's loop instead of asyncore.loop:
|
|
||||||
self._active = True
|
|
||||||
self._loop_thread = threading.Thread(
|
|
||||||
target=asyncserver.loop, kwargs={'active': lambda: self._active})
|
|
||||||
self._loop_thread.daemon = True
|
|
||||||
self._loop_thread.start()
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
"""Call after every test case."""
|
|
||||||
self.smtpd.close()
|
|
||||||
self._active = False
|
|
||||||
self._loop_thread.join()
|
|
||||||
super(SMTPActionTest, self).tearDown()
|
|
||||||
|
|
||||||
def _exec_and_wait(self, doaction, timeout=3, short=False):
|
def _exec_and_wait(self, doaction, timeout=3, short=False):
|
||||||
if short: timeout /= 25
|
if short: timeout /= 25
|
||||||
self.smtpd.ready = False
|
self.smtpd.ready = False
|
||||||
|
@ -94,6 +45,7 @@ class SMTPActionTest(unittest.TestCase):
|
||||||
self._exec_and_wait(self.action.start)
|
self._exec_and_wait(self.action.start)
|
||||||
self.assertEqual(self.smtpd.mailfrom, "fail2ban")
|
self.assertEqual(self.smtpd.mailfrom, "fail2ban")
|
||||||
self.assertEqual(self.smtpd.rcpttos, ["root"])
|
self.assertEqual(self.smtpd.rcpttos, ["root"])
|
||||||
|
self.action.ssl = False # ensure it works without TLS as a sanity check
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
"Subject: [Fail2Ban] %s: started" % self.jail.name
|
"Subject: [Fail2Ban] %s: started" % self.jail.name
|
||||||
in self.smtpd.data)
|
in self.smtpd.data)
|
||||||
|
@ -160,3 +112,201 @@ class SMTPActionTest(unittest.TestCase):
|
||||||
self.assertTrue("From: %s <%s>" %
|
self.assertTrue("From: %s <%s>" %
|
||||||
(self.action.fromname, self.action.fromaddr) in self.smtpd.data)
|
(self.action.fromname, self.action.fromaddr) in self.smtpd.data)
|
||||||
self.assertEqual(set(self.smtpd.rcpttos), set(["test@example.com", "test2@example.com"]))
|
self.assertEqual(set(self.smtpd.rcpttos), set(["test@example.com", "test2@example.com"]))
|
||||||
|
|
||||||
|
try:
|
||||||
|
import smtpd
|
||||||
|
|
||||||
|
class TestSMTPServer(smtpd.SMTPServer):
|
||||||
|
|
||||||
|
def __init__(self, *args):
|
||||||
|
smtpd.SMTPServer.__init__(self, *args)
|
||||||
|
self.ready = False
|
||||||
|
|
||||||
|
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
|
||||||
|
self.peer = peer
|
||||||
|
self.mailfrom = mailfrom
|
||||||
|
self.rcpttos = rcpttos
|
||||||
|
self.org_data = data
|
||||||
|
# replace new line (with tab or space) for possible mime translations (word wrap),
|
||||||
|
self.data = re.sub(r"\n[\t ]", " ", uni_decode(data))
|
||||||
|
self.ready = True
|
||||||
|
|
||||||
|
|
||||||
|
class SMTPActionTest(unittest.TestCase, _SMTPActionTestCase):
|
||||||
|
|
||||||
|
def setUpClass():
|
||||||
|
"""Call before tests."""
|
||||||
|
unittest.F2B.SkipIfCfgMissing(action='smtp.py')
|
||||||
|
|
||||||
|
cls = SMTPActionTest
|
||||||
|
cls.smtpd = TestSMTPServer(("localhost", 0), None)
|
||||||
|
cls.port = cls.smtpd.socket.getsockname()[1]
|
||||||
|
|
||||||
|
## because of bug in loop (see loop in asyncserver.py) use it's loop instead of asyncore.loop:
|
||||||
|
cls._active = True
|
||||||
|
cls._loop_thread = threading.Thread(
|
||||||
|
target=asyncserver.loop, kwargs={'active': lambda: cls._active})
|
||||||
|
cls._loop_thread.daemon = True
|
||||||
|
cls._loop_thread.start()
|
||||||
|
|
||||||
|
def tearDownClass():
|
||||||
|
"""Call after tests."""
|
||||||
|
cls = SMTPActionTest
|
||||||
|
cls.smtpd.close()
|
||||||
|
cls._active = False
|
||||||
|
cls._loop_thread.join()
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
"""Call before every test case."""
|
||||||
|
unittest.F2B.SkipIfCfgMissing(action='smtp.py')
|
||||||
|
super(SMTPActionTest, self).setUp()
|
||||||
|
self.jail = DummyJail()
|
||||||
|
pythonModule = os.path.join(CONFIG_DIR, "action.d", "smtp.py")
|
||||||
|
pythonModuleName = os.path.basename(pythonModule.rstrip(".py"))
|
||||||
|
customActionModule = importlib.machinery.SourceFileLoader(
|
||||||
|
pythonModuleName, pythonModule).load_module()
|
||||||
|
|
||||||
|
self.action = customActionModule.Action(
|
||||||
|
self.jail, "test", host="localhost:%i" % self.port)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
"""Call after every test case."""
|
||||||
|
self._reset_smtpd()
|
||||||
|
super(SMTPActionTest, self).tearDown()
|
||||||
|
|
||||||
|
except ImportError as e:
|
||||||
|
print("I: Skipping smtp tests: %s" % e)
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
import asyncio
|
||||||
|
from aiosmtpd.controller import Controller
|
||||||
|
import socket
|
||||||
|
import ssl
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
class TestSMTPHandler:
|
||||||
|
def __init__(self, *args):
|
||||||
|
self.ready = False
|
||||||
|
|
||||||
|
async def handle_DATA(self, server, session, envelope):
|
||||||
|
self.peer = session.peer
|
||||||
|
self.mailfrom = envelope.mail_from
|
||||||
|
self.rcpttos = envelope.rcpt_tos
|
||||||
|
self.org_data = envelope.content.decode()
|
||||||
|
# normalize CRLF -> LF:
|
||||||
|
self.data = re.sub(r"\r\n", "\n", uni_decode(self.org_data))
|
||||||
|
self.ready = True
|
||||||
|
return '250 OK'
|
||||||
|
|
||||||
|
async def handle_exception(self, error):
|
||||||
|
print(error)
|
||||||
|
return '542 Internal server error'
|
||||||
|
|
||||||
|
|
||||||
|
class AIOSMTPActionTest(unittest.TestCase, _SMTPActionTestCase):
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def create_temp_self_signed_cert(cls):
|
||||||
|
"""
|
||||||
|
Create a self signed SSL certificate in temporary files for host
|
||||||
|
'localhost'
|
||||||
|
|
||||||
|
Returns a tuple containing the certificate file name and the key
|
||||||
|
file name.
|
||||||
|
|
||||||
|
The cert (ECC:256, 100years) created with:
|
||||||
|
openssl req -x509 -out /tmp/f2b-localhost.crt -keyout /tmp/f2b-localhost.key -days 36500 -newkey ec:<(openssl ecparam -name prime256v1) -nodes -sha256 \
|
||||||
|
-subj '/CN=localhost' -extensions EXT -config <( \
|
||||||
|
printf "[dn]\nCN=localhost\n[req]\ndistinguished_name = dn\n[EXT]\nsubjectAltName=DNS:localhost\nkeyUsage=digitalSignature\nextendedKeyUsage=serverAuth" \
|
||||||
|
)
|
||||||
|
cat /tmp/f2b-localhost.*
|
||||||
|
rm /tmp/f2b-localhost.*
|
||||||
|
|
||||||
|
"""
|
||||||
|
if hasattr(cls, 'crtfiles'): return cls.crtfiles
|
||||||
|
cls.crtfiles = crtfiles = (tempfile.mktemp(".crt", "f2b_cert_"), tempfile.mktemp(".key", "f2b_cert_"))
|
||||||
|
with open(crtfiles[0], 'w') as f:
|
||||||
|
f.write(
|
||||||
|
'-----BEGIN CERTIFICATE-----\n'
|
||||||
|
'MIIBhDCCASugAwIBAgIUCuW168kD3G7XrpFwGHwE6vGfoJkwCgYIKoZIzj0EAwIw\n'
|
||||||
|
'FDESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTIzMTIzMDE3NDUzNFoYDzIxMjMxMjA2\n'
|
||||||
|
'MTc0NTM0WjAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwWTATBgcqhkjOPQIBBggqhkjO\n'
|
||||||
|
'PQMBBwNCAARDa8BO/UE4axzvnOQ/pCc/ZTp351X1TqIfjEFaMoZOItz1/MW3ZCuS\n'
|
||||||
|
'2vuby3rMn0WZ59RWVotBqA6lcMVcgDq3o1kwVzAUBgNVHREEDTALgglsb2NhbGhv\n'
|
||||||
|
'c3QwCwYDVR0PBAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMBMB0GA1UdDgQWBBS8\n'
|
||||||
|
'kH1Ucuq+wlex5DxxHDe1kKGdcjAKBggqhkjOPQQDAgNHADBEAiBmv05+BvXWMzLg\n'
|
||||||
|
'TtF4McoQNrU/0TTKhV8o+mgd+47tMAIgaaSNRnfjGIfJMbXg7Bh53qOIu5+lnm1b\n'
|
||||||
|
'ySygMgFmePs=\n'
|
||||||
|
'-----END CERTIFICATE-----\n'
|
||||||
|
)
|
||||||
|
with open(crtfiles[1], 'w') as f:
|
||||||
|
f.write(
|
||||||
|
'-----BEGIN PRIVATE KEY-----\n'
|
||||||
|
'MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgoBGcojKPZMYut7aP\n'
|
||||||
|
'JGe2GW+2lVV0zJpgCsZ7816a9uqhRANCAARDa8BO/UE4axzvnOQ/pCc/ZTp351X1\n'
|
||||||
|
'TqIfjEFaMoZOItz1/MW3ZCuS2vuby3rMn0WZ59RWVotBqA6lcMVcgDq3\n'
|
||||||
|
'-----END PRIVATE KEY-----\n'
|
||||||
|
)
|
||||||
|
# return file names
|
||||||
|
return crtfiles
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _del_cert(cls):
|
||||||
|
if hasattr(cls, 'crtfiles') and cls.crtfiles:
|
||||||
|
for f in cls.crtfiles:
|
||||||
|
try:
|
||||||
|
os.unlink(f)
|
||||||
|
except FileNotFoundError: pass
|
||||||
|
cls.crtfiles = None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _free_port():
|
||||||
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
|
s.bind(('localhost', 0))
|
||||||
|
return s.getsockname()[1]
|
||||||
|
|
||||||
|
def setUpClass():
|
||||||
|
"""Call before tests."""
|
||||||
|
unittest.F2B.SkipIfCfgMissing(action='smtp.py')
|
||||||
|
|
||||||
|
cert_file, cert_key = AIOSMTPActionTest.create_temp_self_signed_cert()
|
||||||
|
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
|
||||||
|
ssl_context.load_cert_chain(cert_file, cert_key)
|
||||||
|
|
||||||
|
cls = AIOSMTPActionTest
|
||||||
|
cls.port = cls._free_port()
|
||||||
|
cls.smtpd = TestSMTPHandler()
|
||||||
|
cls.controller = Controller(cls.smtpd, hostname='localhost', server_hostname='localhost', port=cls.port,
|
||||||
|
server_kwargs={'tls_context': ssl_context, 'require_starttls': False})
|
||||||
|
# Run the event loop in a separate thread.
|
||||||
|
cls.controller.start()
|
||||||
|
|
||||||
|
def tearDownClass():
|
||||||
|
"""Call after tests."""
|
||||||
|
cls = AIOSMTPActionTest
|
||||||
|
cls.controller.stop()
|
||||||
|
cls._del_cert()
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
"""Call before every test case."""
|
||||||
|
unittest.F2B.SkipIfCfgMissing(action='smtp.py')
|
||||||
|
super(AIOSMTPActionTest, self).setUp()
|
||||||
|
self.jail = DummyJail()
|
||||||
|
pythonModule = os.path.join(CONFIG_DIR, "action.d", "smtp.py")
|
||||||
|
pythonModuleName = os.path.basename(pythonModule.rstrip(".py"))
|
||||||
|
customActionModule = importlib.machinery.SourceFileLoader(
|
||||||
|
pythonModuleName, pythonModule).load_module()
|
||||||
|
|
||||||
|
self.action = customActionModule.Action(
|
||||||
|
self.jail, "test", host="localhost:%i" % self.port)
|
||||||
|
|
||||||
|
self.action.ssl = True
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
"""Call after every test case."""
|
||||||
|
self._reset_smtpd()
|
||||||
|
super(AIOSMTPActionTest, self).tearDown()
|
||||||
|
|
||||||
|
except ImportError as e:
|
||||||
|
print("I: Skipping SSL smtp tests: %s" % e)
|
||||||
|
|
|
@ -70,7 +70,7 @@ class CommandActionTest(LogCaptureTestCase):
|
||||||
lambda: substituteRecursiveTags({'A': '<B>', 'B': '<A>'}))
|
lambda: substituteRecursiveTags({'A': '<B>', 'B': '<A>'}))
|
||||||
self.assertRaises(ValueError,
|
self.assertRaises(ValueError,
|
||||||
lambda: substituteRecursiveTags({'A': '<B>', 'B': '<C>', 'C': '<A>'}))
|
lambda: substituteRecursiveTags({'A': '<B>', 'B': '<C>', 'C': '<A>'}))
|
||||||
# Unresolveable substition
|
# Unresolveable substitution
|
||||||
self.assertRaises(ValueError,
|
self.assertRaises(ValueError,
|
||||||
lambda: substituteRecursiveTags({'A': 'to=<B> fromip=<IP>', 'C': '<B>', 'B': '<C>', 'D': ''}))
|
lambda: substituteRecursiveTags({'A': 'to=<B> fromip=<IP>', 'C': '<B>', 'B': '<C>', 'D': ''}))
|
||||||
self.assertRaises(ValueError,
|
self.assertRaises(ValueError,
|
||||||
|
@ -242,14 +242,14 @@ class CommandActionTest(LogCaptureTestCase):
|
||||||
setattr(self.__action, 'ab', "<ac>")
|
setattr(self.__action, 'ab', "<ac>")
|
||||||
setattr(self.__action, 'x?family=inet6', "")
|
setattr(self.__action, 'x?family=inet6', "")
|
||||||
# produce self-referencing properties except:
|
# produce self-referencing properties except:
|
||||||
self.assertRaisesRegexp(ValueError, r"properties contain self referencing definitions",
|
self.assertRaisesRegex(ValueError, r"properties contain self referencing definitions",
|
||||||
lambda: self.__action.replaceTag("<a><b>",
|
lambda: self.__action.replaceTag("<a><b>",
|
||||||
self.__action._properties, conditional="family=inet4")
|
self.__action._properties, conditional="family=inet4")
|
||||||
)
|
)
|
||||||
# remore self-referencing in props:
|
# remote self-referencing in props:
|
||||||
delattr(self.__action, 'ac')
|
delattr(self.__action, 'ac')
|
||||||
# produce self-referencing query except:
|
# produce self-referencing query except:
|
||||||
self.assertRaisesRegexp(ValueError, r"possible self referencing definitions in query",
|
self.assertRaisesRegex(ValueError, r"possible self referencing definitions in query",
|
||||||
lambda: self.__action.replaceTag("<x"*30+">"*30,
|
lambda: self.__action.replaceTag("<x"*30+">"*30,
|
||||||
self.__action._properties, conditional="family=inet6")
|
self.__action._properties, conditional="family=inet6")
|
||||||
)
|
)
|
||||||
|
@ -276,7 +276,7 @@ class CommandActionTest(LogCaptureTestCase):
|
||||||
conditional="family=inet6", cache=cache),
|
conditional="family=inet6", cache=cache),
|
||||||
"Text 890-567 text 567 '567'")
|
"Text 890-567 text 567 '567'")
|
||||||
self.assertTrue(len(cache) >= 3)
|
self.assertTrue(len(cache) >= 3)
|
||||||
# set one parameter - internal properties and cache should be reseted:
|
# set one parameter - internal properties and cache should be reset:
|
||||||
setattr(self.__action, 'xyz', "000-<abc>")
|
setattr(self.__action, 'xyz', "000-<abc>")
|
||||||
self.assertEqual(len(cache), 0)
|
self.assertEqual(len(cache), 0)
|
||||||
# test againg, should have 000 instead of 890:
|
# test againg, should have 000 instead of 890:
|
||||||
|
|
|
@ -177,7 +177,7 @@ class StatusExtendedCymruInfo(unittest.TestCase):
|
||||||
super(StatusExtendedCymruInfo, self).setUp()
|
super(StatusExtendedCymruInfo, self).setUp()
|
||||||
unittest.F2B.SkipIfNoNetwork()
|
unittest.F2B.SkipIfNoNetwork()
|
||||||
setUpMyTime()
|
setUpMyTime()
|
||||||
self.__ban_ip = iter(DNSUtils.dnsToIp("resolver1.opendns.com")).next()
|
self.__ban_ip = next(iter(DNSUtils.dnsToIp("resolver1.opendns.com")))
|
||||||
self.__asn = "36692"
|
self.__asn = "36692"
|
||||||
self.__country = "US"
|
self.__country = "US"
|
||||||
self.__rir = "arin"
|
self.__rir = "arin"
|
||||||
|
|
|
@ -70,8 +70,8 @@ class BeautifierTest(unittest.TestCase):
|
||||||
|
|
||||||
def testStatus(self):
|
def testStatus(self):
|
||||||
self.b.setInputCmd(["status"])
|
self.b.setInputCmd(["status"])
|
||||||
response = (("Number of jails", 0), ("Jail list", ["ssh", "exim4"]))
|
response = (("Number of jails", 2), ("Jail list", ", ".join(["ssh", "exim4"])))
|
||||||
output = "Status\n|- Number of jails:\t0\n`- Jail list:\tssh exim4"
|
output = "Status\n|- Number of jails:\t2\n`- Jail list:\tssh, exim4"
|
||||||
self.assertEqual(self.b.beautify(response), output)
|
self.assertEqual(self.b.beautify(response), output)
|
||||||
|
|
||||||
self.b.setInputCmd(["status", "ssh"])
|
self.b.setInputCmd(["status", "ssh"])
|
||||||
|
@ -105,6 +105,90 @@ class BeautifierTest(unittest.TestCase):
|
||||||
output += " `- Banned IP list: 192.168.0.1 10.2.2.1 2001:db8::1"
|
output += " `- Banned IP list: 192.168.0.1 10.2.2.1 2001:db8::1"
|
||||||
self.assertEqual(self.b.beautify(response), output)
|
self.assertEqual(self.b.beautify(response), output)
|
||||||
|
|
||||||
|
self.b.setInputCmd(["status", "--all"])
|
||||||
|
response = (("Number of jails", 2), ("Jail list", ", ".join(["ssh", "exim4"])), {
|
||||||
|
"ssh": (
|
||||||
|
("Filter", [
|
||||||
|
("Currently failed", 0),
|
||||||
|
("Total failed", 0),
|
||||||
|
("File list", "/var/log/auth.log")
|
||||||
|
]
|
||||||
|
),
|
||||||
|
("Actions", [
|
||||||
|
("Currently banned", 3),
|
||||||
|
("Total banned", 3),
|
||||||
|
("Banned IP list", [
|
||||||
|
IPAddr("192.168.0.1"),
|
||||||
|
IPAddr("::ffff:10.2.2.1"),
|
||||||
|
IPAddr("2001:db8::1")
|
||||||
|
]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"exim4": (
|
||||||
|
("Filter", [
|
||||||
|
("Currently failed", 3),
|
||||||
|
("Total failed", 6),
|
||||||
|
("File list", "/var/log/exim4/mainlog")
|
||||||
|
]
|
||||||
|
),
|
||||||
|
("Actions", [
|
||||||
|
("Currently banned", 0),
|
||||||
|
("Total banned", 0),
|
||||||
|
("Banned IP list", []
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
})
|
||||||
|
output = (
|
||||||
|
"Status\n"
|
||||||
|
+ "|- Number of jails:\t2\n"
|
||||||
|
+ "|- Jail list:\tssh, exim4\n"
|
||||||
|
+ "`- Status for the jails:\n"
|
||||||
|
+ " |- Jail: ssh\n"
|
||||||
|
+ " | |- Filter\n"
|
||||||
|
+ " | | |- Currently failed: 0\n"
|
||||||
|
+ " | | |- Total failed: 0\n"
|
||||||
|
+ " | | `- File list: /var/log/auth.log\n"
|
||||||
|
+ " | `- Actions\n"
|
||||||
|
+ " | |- Currently banned: 3\n"
|
||||||
|
+ " | |- Total banned: 3\n"
|
||||||
|
+ " | `- Banned IP list: 192.168.0.1 10.2.2.1 2001:db8::1\n"
|
||||||
|
+ " `- Jail: exim4\n"
|
||||||
|
+ " |- Filter\n"
|
||||||
|
+ " | |- Currently failed: 3\n"
|
||||||
|
+ " | |- Total failed: 6\n"
|
||||||
|
+ " | `- File list: /var/log/exim4/mainlog\n"
|
||||||
|
+ " `- Actions\n"
|
||||||
|
+ " |- Currently banned: 0\n"
|
||||||
|
+ " |- Total banned: 0\n"
|
||||||
|
+ " `- Banned IP list: "
|
||||||
|
)
|
||||||
|
self.assertEqual(self.b.beautify(response), output)
|
||||||
|
|
||||||
|
def testStatusStats(self):
|
||||||
|
self.b.setInputCmd(["stats"])
|
||||||
|
response = {
|
||||||
|
"ssh": ["systemd", (3, 6), (12, 24)],
|
||||||
|
"exim4": ["pyinotify", (6, 12), (20, 20)],
|
||||||
|
"jail-with-long-name": ["polling", (0, 0), (0, 0)]
|
||||||
|
}
|
||||||
|
output = (""
|
||||||
|
+ " ? ? Filter ? Actions \n"
|
||||||
|
+ "Jail ? Backend ????????????????????????\n"
|
||||||
|
+ " ? ? cur ? tot ? cur ? tot\n"
|
||||||
|
+ "????????????????????????????????????????????????????????\n"
|
||||||
|
+ "ssh ? systemd ? 3 ? 6 ? 12 ? 24\n"
|
||||||
|
+ "exim4 ? pyinotify ? 6 ? 12 ? 20 ? 20\n"
|
||||||
|
+ "jail-with-long-name ? polling ? 0 ? 0 ? 0 ? 0\n"
|
||||||
|
+ "????????????????????????????????????????????????????????"
|
||||||
|
)
|
||||||
|
response = self.b.beautify(response).encode('ascii', 'replace').decode('ascii')
|
||||||
|
self.assertEqual(response, output)
|
||||||
|
|
||||||
|
|
||||||
def testFlushLogs(self):
|
def testFlushLogs(self):
|
||||||
self.b.setInputCmd(["flushlogs"])
|
self.b.setInputCmd(["flushlogs"])
|
||||||
self.assertEqual(self.b.beautify("rolled over"), "logs: rolled over")
|
self.assertEqual(self.b.beautify("rolled over"), "logs: rolled over")
|
||||||
|
|
|
@ -61,6 +61,7 @@ class ConfigReaderTest(unittest.TestCase):
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
"""Call after every test case."""
|
"""Call after every test case."""
|
||||||
shutil.rmtree(self.d)
|
shutil.rmtree(self.d)
|
||||||
|
super(ConfigReaderTest, self).tearDown()
|
||||||
|
|
||||||
def _write(self, fname, value=None, content=None):
|
def _write(self, fname, value=None, content=None):
|
||||||
# verify if we don't need to create .d directory
|
# verify if we don't need to create .d directory
|
||||||
|
@ -337,7 +338,7 @@ class JailReaderTest(LogCaptureTestCase):
|
||||||
self.assertTrue(jail.getOptions())
|
self.assertTrue(jail.getOptions())
|
||||||
self.assertTrue(jail.isEnabled())
|
self.assertTrue(jail.isEnabled())
|
||||||
stream = jail.convert()
|
stream = jail.convert()
|
||||||
# check filter options are overriden with values specified directly in jail:
|
# check filter options are overridden with values specified directly in jail:
|
||||||
# prefregex:
|
# prefregex:
|
||||||
self.assertEqual([['set', 'sshd-override-flt-opts', 'prefregex', '^Test']],
|
self.assertEqual([['set', 'sshd-override-flt-opts', 'prefregex', '^Test']],
|
||||||
[o for o in stream if len(o) > 2 and o[2] == 'prefregex'])
|
[o for o in stream if len(o) > 2 and o[2] == 'prefregex'])
|
||||||
|
@ -419,7 +420,7 @@ class JailReaderTest(LogCaptureTestCase):
|
||||||
# And multiple groups (`][` instead of `,`)
|
# And multiple groups (`][` instead of `,`)
|
||||||
result = extractOptions(option.replace(',', ']['))
|
result = extractOptions(option.replace(',', ']['))
|
||||||
expected2 = (expected[0],
|
expected2 = (expected[0],
|
||||||
dict((k, v.replace(',', '][')) for k, v in expected[1].iteritems())
|
dict((k, v.replace(',', '][')) for k, v in expected[1].items())
|
||||||
)
|
)
|
||||||
self.assertEqual(expected2, result)
|
self.assertEqual(expected2, result)
|
||||||
|
|
||||||
|
@ -565,7 +566,7 @@ class FilterReaderTest(LogCaptureTestCase):
|
||||||
|
|
||||||
def testFilterReaderSubstitionDefault(self):
|
def testFilterReaderSubstitionDefault(self):
|
||||||
output = [['set', 'jailname', 'addfailregex', 'to=sweet@example.com fromip=<IP>']]
|
output = [['set', 'jailname', 'addfailregex', 'to=sweet@example.com fromip=<IP>']]
|
||||||
filterReader = FilterReader('substition', "jailname", {},
|
filterReader = FilterReader('substitution', "jailname", {},
|
||||||
share_config=TEST_FILES_DIR_SHARE_CFG, basedir=TEST_FILES_DIR)
|
share_config=TEST_FILES_DIR_SHARE_CFG, basedir=TEST_FILES_DIR)
|
||||||
filterReader.read()
|
filterReader.read()
|
||||||
filterReader.getOptions(None)
|
filterReader.getOptions(None)
|
||||||
|
@ -585,7 +586,7 @@ class FilterReaderTest(LogCaptureTestCase):
|
||||||
|
|
||||||
def testFilterReaderSubstitionSet(self):
|
def testFilterReaderSubstitionSet(self):
|
||||||
output = [['set', 'jailname', 'addfailregex', 'to=sour@example.com fromip=<IP>']]
|
output = [['set', 'jailname', 'addfailregex', 'to=sour@example.com fromip=<IP>']]
|
||||||
filterReader = FilterReader('substition', "jailname", {'honeypot': 'sour@example.com'},
|
filterReader = FilterReader('substitution', "jailname", {'honeypot': 'sour@example.com'},
|
||||||
share_config=TEST_FILES_DIR_SHARE_CFG, basedir=TEST_FILES_DIR)
|
share_config=TEST_FILES_DIR_SHARE_CFG, basedir=TEST_FILES_DIR)
|
||||||
filterReader.read()
|
filterReader.read()
|
||||||
filterReader.getOptions(None)
|
filterReader.getOptions(None)
|
||||||
|
@ -595,8 +596,8 @@ class FilterReaderTest(LogCaptureTestCase):
|
||||||
def testFilterReaderSubstitionKnown(self):
|
def testFilterReaderSubstitionKnown(self):
|
||||||
output = [['set', 'jailname', 'addfailregex', '^to=test,sweet@example.com,test2,sweet@example.com fromip=<IP>$']]
|
output = [['set', 'jailname', 'addfailregex', '^to=test,sweet@example.com,test2,sweet@example.com fromip=<IP>$']]
|
||||||
filterName, filterOpt = extractOptions(
|
filterName, filterOpt = extractOptions(
|
||||||
'substition[failregex="^<known/failregex>$", honeypot="<sweet>,<known/honeypot>", sweet="test,<known/honeypot>,test2"]')
|
'substitution[failregex="^<known/failregex>$", honeypot="<sweet>,<known/honeypot>", sweet="test,<known/honeypot>,test2"]')
|
||||||
filterReader = FilterReader('substition', "jailname", filterOpt,
|
filterReader = FilterReader('substitution', "jailname", filterOpt,
|
||||||
share_config=TEST_FILES_DIR_SHARE_CFG, basedir=TEST_FILES_DIR)
|
share_config=TEST_FILES_DIR_SHARE_CFG, basedir=TEST_FILES_DIR)
|
||||||
filterReader.read()
|
filterReader.read()
|
||||||
filterReader.getOptions(None)
|
filterReader.getOptions(None)
|
||||||
|
@ -606,8 +607,8 @@ class FilterReaderTest(LogCaptureTestCase):
|
||||||
def testFilterReaderSubstitionSection(self):
|
def testFilterReaderSubstitionSection(self):
|
||||||
output = [['set', 'jailname', 'addfailregex', '^\\s*to=fail2ban@localhost fromip=<IP>\\s*$']]
|
output = [['set', 'jailname', 'addfailregex', '^\\s*to=fail2ban@localhost fromip=<IP>\\s*$']]
|
||||||
filterName, filterOpt = extractOptions(
|
filterName, filterOpt = extractOptions(
|
||||||
'substition[failregex="^\\s*<Definition/failregex>\\s*$", honeypot="<default/honeypot>"]')
|
'substitution[failregex="^\\s*<Definition/failregex>\\s*$", honeypot="<default/honeypot>"]')
|
||||||
filterReader = FilterReader('substition', "jailname", filterOpt,
|
filterReader = FilterReader('substitution', "jailname", filterOpt,
|
||||||
share_config=TEST_FILES_DIR_SHARE_CFG, basedir=TEST_FILES_DIR)
|
share_config=TEST_FILES_DIR_SHARE_CFG, basedir=TEST_FILES_DIR)
|
||||||
filterReader.read()
|
filterReader.read()
|
||||||
filterReader.getOptions(None)
|
filterReader.getOptions(None)
|
||||||
|
@ -616,13 +617,13 @@ class FilterReaderTest(LogCaptureTestCase):
|
||||||
|
|
||||||
def testFilterReaderSubstitionFail(self):
|
def testFilterReaderSubstitionFail(self):
|
||||||
# directly subst the same var :
|
# directly subst the same var :
|
||||||
filterReader = FilterReader('substition', "jailname", {'honeypot': '<honeypot>'},
|
filterReader = FilterReader('substitution', "jailname", {'honeypot': '<honeypot>'},
|
||||||
share_config=TEST_FILES_DIR_SHARE_CFG, basedir=TEST_FILES_DIR)
|
share_config=TEST_FILES_DIR_SHARE_CFG, basedir=TEST_FILES_DIR)
|
||||||
filterReader.read()
|
filterReader.read()
|
||||||
filterReader.getOptions(None)
|
filterReader.getOptions(None)
|
||||||
self.assertRaises(ValueError, FilterReader.convert, filterReader)
|
self.assertRaises(ValueError, FilterReader.convert, filterReader)
|
||||||
# cross subst the same var :
|
# cross subst the same var :
|
||||||
filterReader = FilterReader('substition', "jailname", {'honeypot': '<sweet>', 'sweet': '<honeypot>'},
|
filterReader = FilterReader('substitution', "jailname", {'honeypot': '<sweet>', 'sweet': '<honeypot>'},
|
||||||
share_config=TEST_FILES_DIR_SHARE_CFG, basedir=TEST_FILES_DIR)
|
share_config=TEST_FILES_DIR_SHARE_CFG, basedir=TEST_FILES_DIR)
|
||||||
filterReader.read()
|
filterReader.read()
|
||||||
filterReader.getOptions(None)
|
filterReader.getOptions(None)
|
||||||
|
@ -1018,7 +1019,7 @@ filter = testfilter1
|
||||||
self.assertEqual(add_actions[-1][-1], "{}")
|
self.assertEqual(add_actions[-1][-1], "{}")
|
||||||
|
|
||||||
def testLogPathFileFilterBackend(self):
|
def testLogPathFileFilterBackend(self):
|
||||||
self.assertRaisesRegexp(ValueError, r"Have not found any log file for .* jail",
|
self.assertRaisesRegex(ValueError, r"Have not found any log file for .* jail",
|
||||||
self._testLogPath, backend='polling')
|
self._testLogPath, backend='polling')
|
||||||
|
|
||||||
def testLogPathSystemdBackend(self):
|
def testLogPathSystemdBackend(self):
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue