diff --git a/.codespellrc b/.codespellrc
new file mode 100644
index 00000000..a5dd775e
--- /dev/null
+++ b/.codespellrc
@@ -0,0 +1,12 @@
+[codespell]
+# THANKS - names
+skip = .git,*.pdf,*.svg,venv,.codespellrc,THANKS,*test*.log,logs
+check-hidden = true
+# Ignore all acronyms etc as plenty e.g. in fail2ban/server/strptime.py
+# Try to identify incomplete words which are part of a regex, hence having [] at the beginning
+# Ignore all urls as something with :// in it
+# Ignore all lines with codespell-ignore in them for pragma annotation
+ignore-regex = (\b([A-Z][A-Z][A-Z]+|gir\.st)\b)|\[[a-zA-Z]+\][a-z]+\b|[a-z]+://\S+|.*codespell-ignore.*
+# some oddly named variables, some names, etc
+# wee -- comes in regex etc for weeks
+ignore-words-list = theis,timere,alls,wee,wight,ans,re-use
diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
new file mode 100644
index 00000000..7373affc
--- /dev/null
+++ b/.github/workflows/codespell.yml
@@ -0,0 +1,22 @@
+---
+name: Codespell
+
+on:
+ push:
+ branches: [master]
+ pull_request:
+ branches: [master]
+
+permissions:
+ contents: read
+
+jobs:
+ codespell:
+ name: Check for spelling errors
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ - name: Codespell
+ uses: codespell-project/actions-codespell@v2
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 39c85231..30e38f7d 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -22,15 +22,15 @@ jobs:
runs-on: ubuntu-20.04
strategy:
matrix:
- python-version: [2.7, 3.5, 3.6, 3.7, 3.8, 3.9, '3.10', '3.11.0-beta.3', pypy2, pypy3]
+ python-version: [3.7, 3.8, 3.9, '3.10', '3.11', '3.12', '3.13.0-alpha.2', pypy3.10]
fail-fast: false
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
- name: Set up Python
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
@@ -51,24 +51,32 @@ jobs:
- name: Install dependencies
run: |
- if [[ "$F2B_PY" = 3 ]]; then python -m pip install --upgrade pip || echo "can't upgrade pip"; fi
- if [[ "$F2B_PY" = 3 ]] && ! command -v 2to3x -v 2to3 > /dev/null; then
- #pip install 2to3
- sudo apt-get -y install 2to3
- fi
+ #if [[ "$F2B_PY" = 3 ]]; then python -m pip install --upgrade pip || echo "can't upgrade pip"; fi
#sudo apt-get -y install python${F2B_PY/2/}-pyinotify || echo 'inotify not available'
python -m pip install pyinotify || echo 'inotify not available'
+ sudo apt-get -y install sqlite3 || echo 'sqlite3 not available'
#sudo apt-get -y install python${F2B_PY/2/}-systemd || echo 'systemd not available'
sudo apt-get -y install libsystemd-dev || echo 'systemd dependencies seems to be unavailable'
python -m pip install systemd-python || echo 'systemd not available'
- #readline if available as module:
+ # readline if available as module:
python -c 'import readline' 2> /dev/null || python -m pip install readline || echo 'readline not available'
+ # asyncore/asynchat:
+ if dpkg --compare-versions "$F2B_PYV" ge 3.12; then
+ #sudo apt-get -y install python${F2B_PY/2/}-setuptools || echo 'setuptools not unavailable'
+ python -m pip install setuptools || echo "can't install setuptools"
+ # don't install async* modules, we need to cover bundled-in libraries:
+ #python -m pip install pyasynchat || echo "can't install pyasynchat";
+ #python -m pip install pyasyncore || echo "can't install pyasyncore";
+ fi
+ # aiosmtpd in test_smtp (for 3.10+, no need to test it everywhere):
+ if dpkg --compare-versions "$F2B_PYV" ge 3.10; then
+ #sudo apt-get -y install python${F2B_PY/2/}-aiosmtpd || echo 'aiosmtpd not available'
+ python -m pip install aiosmtpd || echo 'aiosmtpd not available'
+ fi
- name: Before scripts
run: |
cd "$GITHUB_WORKSPACE"
- # Manually execute 2to3 for now
- if [[ "$F2B_PY" = 3 ]]; then echo "2to3 ..." && ./fail2ban-2to3; fi
_debug() { echo -n "$1 "; err=$("${@:2}" 2>&1) && echo 'OK' || echo -e "FAIL\n$err"; }
# (debug) output current preferred encoding:
_debug 'Encodings:' python -c 'import locale, sys; from fail2ban.helpers import PREFER_ENC; print(PREFER_ENC, locale.getpreferredencoding(), (sys.stdout and sys.stdout.encoding))'
@@ -80,14 +88,8 @@ jobs:
- name: Test suite
run: |
- if [[ "$F2B_PY" = 2 ]]; then
- python setup.py test
- elif dpkg --compare-versions "$F2B_PYV" lt 3.10; then
- python bin/fail2ban-testcases --verbosity=2
- else
- echo "Skip systemd backend since systemd-python module must be fixed for python >= v.3.10 in GHA ..."
- python bin/fail2ban-testcases --verbosity=2 -i "[sS]ystemd|[jJ]ournal"
- fi
+ #python setup.py test
+ python bin/fail2ban-testcases --verbosity=2
#- name: Test suite (debug some systemd tests only)
#run: python bin/fail2ban-testcases --verbosity=2 "[sS]ystemd|[jJ]ournal"
diff --git a/.gitignore b/.gitignore
index 780ecfb5..5f1b8924 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,3 +10,4 @@ htmlcov
__pycache__
.vagrant/
.idea/
+.venv/
diff --git a/.project b/.project
deleted file mode 100644
index 6d59cd24..00000000
--- a/.project
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
- fail2ban-unstable
-
-
-
-
-
- org.python.pydev.PyDevBuilder
-
-
-
-
-
- org.python.pydev.pythonNature
-
-
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 502af5be..00000000
--- a/.travis.yml
+++ /dev/null
@@ -1,82 +0,0 @@
-# vim ft=yaml
-# travis-ci.org definition for Fail2Ban build
-# https://travis-ci.org/fail2ban/fail2ban/
-
-#os: linux
-
-language: python
-dist: xenial
-
-matrix:
- fast_finish: true
- include:
- - python: 2.7
- #- python: pypy
- - python: 3.4
- - python: 3.5
- - python: 3.6
- - python: 3.7
- - python: 3.8
- - python: 3.9-dev
- - python: pypy3.5
-before_install:
- - echo "running under $TRAVIS_PYTHON_VERSION"
- - if [[ $TRAVIS_PYTHON_VERSION == 2* || $TRAVIS_PYTHON_VERSION == pypy* && $TRAVIS_PYTHON_VERSION != pypy3* ]]; then export F2B_PY=2; fi
- - if [[ $TRAVIS_PYTHON_VERSION == 3* || $TRAVIS_PYTHON_VERSION == pypy3* ]]; then export F2B_PY=3; fi
- - echo "Set F2B_PY=$F2B_PY"
- - travis_retry sudo apt-get update -qq
- # Set this so sudo executes the correct python binary
- # Anything not using sudo will already have the correct environment
- - export VENV_BIN="$VIRTUAL_ENV/bin" && echo "VENV_BIN set to $VENV_BIN"
-install:
- # Install Python packages / dependencies
- # coverage
- - travis_retry pip install coverage
- # coveralls (note coveralls doesn't support 2.6 now):
- #- if [[ $TRAVIS_PYTHON_VERSION != 2.6* ]]; then F2B_COV=1; else F2B_COV=0; fi
- - F2B_COV=1
- - if [[ "$F2B_COV" = 1 ]]; then travis_retry pip install coveralls; fi
- # codecov:
- - travis_retry pip install codecov
- # dnspython or dnspython3
- - if [[ "$F2B_PY" = 2 ]]; then travis_retry pip install dnspython || echo 'not installed'; fi
- - if [[ "$F2B_PY" = 3 ]]; then travis_retry pip install dnspython3 || echo 'not installed'; fi
- # python systemd bindings:
- - if [[ "$F2B_PY" = 2 ]]; then travis_retry sudo apt-get install -qq python-systemd || echo 'not installed'; fi
- - if [[ "$F2B_PY" = 3 ]]; then travis_retry sudo apt-get install -qq python3-systemd || echo 'not installed'; fi
- # gamin - install manually (not in PyPI) - travis-ci system Python is 2.7
- - if [[ $TRAVIS_PYTHON_VERSION == 2.7 ]]; then (travis_retry sudo apt-get install -qq python-gamin && cp /usr/share/pyshared/gamin.py /usr/lib/pyshared/python2.7/_gamin.so $VIRTUAL_ENV/lib/python2.7/site-packages/) || echo 'not installed'; fi
- # pyinotify
- - travis_retry pip install pyinotify || echo 'not installed'
- # Install helper tools
- - sudo apt-get install shellcheck
-before_script:
- # Manually execute 2to3 for now
- - if [[ "$F2B_PY" = 3 ]]; then ./fail2ban-2to3; fi
- # (debug) output current preferred encoding:
- - python -c 'import locale, sys; from fail2ban.helpers import PREFER_ENC; print(PREFER_ENC, locale.getpreferredencoding(), (sys.stdout and sys.stdout.encoding))'
-script:
- # Keep the legacy setup.py test approach of checking coverage for python2
- - if [[ "$F2B_PY" = 2 ]]; then coverage run setup.py test; fi
- # Coverage doesn't pick up setup.py test with python3, so run it directly (with same verbosity as from setup)
- - if [[ "$F2B_PY" = 3 ]]; then coverage run bin/fail2ban-testcases --verbosity=2; fi
- # Use $VENV_BIN (not python) or else sudo will always run the system's python (2.7)
- - sudo $VENV_BIN/pip install .
- # Doc files should get installed on Travis under Linux (some builds/python's seem to use another path segment)
- - test -e /usr/share/doc/fail2ban/FILTERS && echo 'found' || echo 'not found'
- # Test initd script
- - shellcheck -s bash -e SC1090,SC1091 files/debian-initd
-after_success:
- - if [[ "$F2B_COV" = 1 ]]; then coveralls; fi
- - codecov
-
-# Might be worth looking into
-#notifications:
-# email: true
-# irc:
-# channels: "irc.freenode.org#fail2ban"
-# template:
-# - "%{repository}@%{branch}: %{message} (%{build_url})"
-# on_success: change
-# on_failure: change
-# skip_join: true
diff --git a/ChangeLog b/ChangeLog
index 8c3be67d..c9ba7d5a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -7,6 +7,71 @@
Fail2Ban: Changelog
===================
+ver. 1.1.0 (2024/04/25) - object-found--norad-59479-cospar-2024-069a--altitude-36267km
+-----------
+
+### Compatibility
+* the minimum supported python version is now 3.5, if you have previous python version
+ you can use the 0.11 or 1.0 version of fail2ban or upgrade python (or even build it from source).
+
+### Fixes
+* circumvent SEGFAULT in a python's socket module by getaddrinfo with disabled IPv6 (gh-3438)
+* avoid sporadic error in pyinotify backend if pending file deleted in other thread, e. g. by flushing logs (gh-3635)
+* `action.d/cloudflare-token.conf` - fixes gh-3479, url-encode args by unban
+* `action.d/*ipset*`: make `maxelem` ipset option configurable through banaction arguments (gh-3564)
+* `filter.d/apache-common.conf` - accepts remote besides client (gh-3622)
+* `filter.d/mysqld-auth.conf` - matches also if no suffix in message (mariadb 10.3 log format, gh-3603)
+* `filter.d/nginx-*.conf` - nginx error-log filters extended with support of journal format (gh-3646)
+* `filter.d/postfix.conf`:
+ - "rejected" rule extended to match "Access denied" too (gh-3474)
+ - avoid double counting ('lost connection after AUTH' together with message 'disconnect ...', gh-3505)
+ - add Sender address rejected: Malformed DNS server reply (gh-3590)
+ - add to postfix syslog daemon format (gh-3690)
+ - change journalmatch postfix, allow sub-units with postfix@-.service (gh-3692)
+* `filter.d/recidive.conf`: support for systemd-journal, conditional RE depending on logtype (for file or journal, gh-3693)
+* `filter.d/slapd.conf` - filter rewritten for single-line processing, matches errored result without `text=...` (gh-3604)
+
+### New Features and Enhancements
+* supports python 3.12 and 3.13 (gh-3487)
+* bundling async modules removed in python 3.12+ (fallback to local libraries pyasyncore/pyasynchat if import would miss them, gh-3487)
+* `fail2ban-client` extended (gh-2975):
+ - `fail2ban-client status --all [flavor]` - returns status of fail2ban and all jails in usual form
+ - `fail2ban-client stats` - returns statistic in form of table (jail, backend, found and banned counts)
+ - `fail2ban-client statistic` or `fail2ban-client statistics` - same as `fail2ban-client stats` (aliases for stats)
+ - `fail2ban-client status --all stats` - (undocumented, flavor "stats") returns statistic of all jails in form of python dict
+* `fail2ban-regex` extended to load settings from jail (by simple name it'd prefer jail to the filter now, gh-2655);
+ to load the settings from filter one could use:
+```diff
+- fail2ban-regex ... sshd ; # jail
++ fail2ban-regex ... sshd.conf ; # filter
+# or:
++ fail2ban-regex ... filter.d/sshd ; # filter
+```
+* better auto-detection for IPv6 support (`allowipv6 = auto` by default), trying to check sysctl net.ipv6.conf.all.disable_ipv6
+ (value read from `/proc/sys/net/ipv6/conf/all/disable_ipv6`) if available, otherwise seeks over local IPv6 from network interfaces
+ if available for platform and uses DNS to find local IPv6 as a fallback only
+* improve `ignoreself` by considering all local addresses from network interfaces additionally to IPs from hostnames (gh-3132)
+* `action.d/mikrotik.conf` - new action for mikrotik routerOS, adds and removes entries from address lists on the router (gh-2860)
+* `action.d/pf.conf` - pf action extended with support of `protocol=all` (gh-3503)
+* `action.d/smtp.py` - added optional support for TLS connections via the `ssl` arg.
+* `filter.d/dante.conf` - new filter for Dante SOCKS server (gh-2112)
+* `filter.d/exim.conf`, `filter.d/exim-spam.conf`:
+ - messages are prefiltered by `prefregex` now
+ - filter can bypass additional timestamp or pid that may be logged via systemd-journal or syslog-ng (gh-3060)
+ - rewrite host line regex for all varied exim's log_selector states (gh-3263, gh-3701, gh-3702)
+ - fixed "dropped: too many ..." regex, also matching unrecognized commands now (gh-3502)
+* `filter.d/named-refused.conf` - denied allows any reason in parenthesis as suffix (gh-3697)
+* `filter.d/nginx-forbidden.conf` - new filter to ban forbidden locations, e. g. using `deny` directive (gh-2226)
+* `filter.d/routeros-auth.conf` - new filter detecting failed login attempts in the log produced by MikroTik RouterOS
+* `filter.d/sshd.conf`:
+ - avoid double counting for "maximum authentication attempts exceeded" (gh-3502)
+ - message "Disconnecting ... Too many authentication failures" is not a failure anymore
+ - mode `ddos`/`aggressive` extended to match new messages caused by port scanner, wrong payload on ssh port (gh-3486):
+ * message authentication code incorrect [preauth]
+ * connection corrupted [preauth]
+ * timeout before authentication
+
+
ver. 1.0.2 (2022/11/09) - finally-war-game-test-tape-not-a-nuclear-alarm
-----------
@@ -53,7 +118,7 @@ ver. 1.0.1 (2022/09/27) - energy-equals-mass-times-the-speed-of-light-squared
* [stability] solves race condition with uncontrolled growth of failure list (jail with too many matches,
that did not cause ban), behavior changed to ban ASAP, gh-2945
* fixes search for the best datepattern - e. g. if line is too short, boundaries check for previously known
- unprecise pattern may fail on incomplete lines (logging break-off, no flush, etc), gh-3020
+ imprecise pattern may fail on incomplete lines (logging break-off, no flush, etc), gh-3020
* [stability, performance] backend `systemd`:
- fixes error "local variable 'line' referenced before assignment", introduced in 55d7d9e2, gh-3097
- don't update database too often (every 10 ticks or ~ 10 seconds in production)
@@ -391,7 +456,7 @@ filter = flt[logtype=short]
* `filter.d/znc-adminlog.conf`: new filter for ZNC (IRC bouncer); requires the adminlog module to be loaded
### Enhancements
-* introduced new options: `dbmaxmatches` (fail2ban.conf) and `maxmatches` (jail.conf) to contol
+* introduced new options: `dbmaxmatches` (fail2ban.conf) and `maxmatches` (jail.conf) to control
how many matches per ticket fail2ban can hold in memory and store in database (gh-2402, gh-2118);
* fail2ban.conf: introduced new section `[Thread]` and option `stacksize` to configure default size
of the stack for threads running in fail2ban (gh-2356), it could be set in `fail2ban.local` to
@@ -501,7 +566,7 @@ ver. 0.10.3 (2018/04/04) - the-time-is-always-right-to-do-what-is-right
- fixed root login refused regex (optional port before preauth, gh-2080);
- avoid banning of legitimate users when pam_unix used in combination with other password method, so
bypass pam_unix failures if accepted available for this user gh-2070;
- - amend to gh-1263 with better handling of multiple attempts (failures for different user-names recognized immediatelly);
+ - amend to gh-1263 with better handling of multiple attempts (failures for different user-names recognized immediately);
- mode `ddos` (and `aggressive`) extended to catch `Connection closed by ... [preauth]`, so in DDOS mode
it counts failure on closing connection within preauth-stage (gh-2085);
* `action.d/abuseipdb.conf`: fixed curl cypher errors and comment quote-issue (gh-2044, gh-2101);
@@ -831,7 +896,7 @@ ver. 0.10.0-alpha-1 (2016/07/14) - ipv6-support-etc
sane environment in error case of `actioncheck`.
* Reporting via abuseipdb.com:
- Bans can now be reported to abuseipdb
- - Catagories must be set in the config
+ - Categories must be set in the config
- Relevant log lines included in report
### Enhancements
@@ -968,7 +1033,7 @@ releases.
- Rewritten without end-anchor ($), because of potential vulnerability on very long URLs.
* filter.d/apache-badbots.conf - extended to recognize Jorgee Vulnerability Scanner (gh-1882)
* filter.d/asterisk.conf
- - fixed failregex AMI Asterisk authentification failed (see gh-1302)
+ - fixed failregex AMI Asterisk authentication failed (see gh-1302)
- removed invalid (vulnerable) regex blocking IPs using forign data (from header "from")
thus not the IP-address that really originates the request (see gh-1927)
- fixed failregex for the SQL-injection attempts with single-quotes in connect-string (see gh-2011)
@@ -1268,7 +1333,7 @@ ver. 0.9.3 (2015/08/01) - lets-all-stay-friends
* `filter.d/roundcube-auth.conf`
- Updated regex to work with 'errors' log (1.0.5 and 1.1.1)
- Added regex to work with 'userlogins' log
-* `action.d/sendmail*.conf` - use LC_ALL (superseeding LC_TIME) to override
+* `action.d/sendmail*.conf` - use LC_ALL (superseding LC_TIME) to override
locale on systems with customized LC_ALL
* performance fix: minimizes connection overhead, close socket only at
communication end (gh-1099)
@@ -1438,7 +1503,7 @@ ver. 0.9.1 (2014/10/29) - better, faster, stronger
* Ignored IPs are no longer banned when being restored from persistent
database
* Manually unbanned IPs are now removed from persistent database, such they
- wont be banned again when Fail2Ban is restarted
+ won't be banned again when Fail2Ban is restarted
* Pass "bantime" parameter to the actions in default jail's action
definition(s)
* `filters.d/sieve.conf` - fixed typo in _daemon. Thanks Jisoo Park
@@ -1729,7 +1794,7 @@ those filters were used.
all platforms to ensure permissions are the same before and after a ban.
Closes gh-266. hostsdeny supports daemon_list now too.
* `action.d/bsd-ipfw` - action option unused. Change blocktype to port unreach
- instead of deny for consistancy.
+ instead of deny for consistency.
* `filter.d/dovecot` - added to support different dovecot failure
"..disallowed plaintext auth". Closes Debian bug #709324
* `filter.d/roundcube-auth` - timezone offset can be positive or negative
@@ -1919,7 +1984,7 @@ fail2ban-users mailing list and IRC.
### New Features
- Yaroslav Halchenko
* [9ba27353] Add support for `jail.d/{confilefile}` and `fail2ban.d/{configfile}`
- to provide additional flexibility to system adminstrators. Thanks to
+ to provide additional flexibility to system administrators. Thanks to
beilber for the idea. Closes gh-114.
* [3ce53e87] Add exim filter.
- Erwan Ben Souiden
@@ -2070,7 +2135,7 @@ ver. 0.8.7 (2012/07/31) - stable
* [47c03a2] files/nagios - spelling/grammar fixes
* [b083038] updated Free Software Foundation's address
* [9092a63] changed TLDs to invalid domains, in accordance with RFC 2606
- * [642d9af,3282f86] reformated printing of jail's name to be consistent
+ * [642d9af,3282f86] reformatted printing of jail's name to be consistent
with init's info messages
* [3282f86] uniform use of capitalized Jail in the messages
- Leonardo Chiquitto
@@ -2415,7 +2480,7 @@ ver. 0.6.1 (2006/03/16) - stable
- Fixed crash when time format does not match data
- Propagated patch from Debian to fix fail2ban search path addition to the path
search list: now it is added first. Thanks to Nick Craig-Wood
-- Added SMTP authentification for mail notification. Thanks to Markus Hoffmann
+- Added SMTP authentication for mail notification. Thanks to Markus Hoffmann
- Removed debug mode as it is confusing for people
- Added parsing of timestamp in TAI64N format (#1275325). Thanks to Mark
Edgington
@@ -2448,7 +2513,7 @@ ver. 0.5.5 (2005/10/26) - beta
further adjusted by upstream author).
* Added -f command line parameter for [findtime].
* Added a cleanup of firewall rules on emergency shutdown when unknown
- exception is catched.
+ exception is caught.
* Fail2ban should not crash now if a wrong file name is specified in config.
* reordered code a bit so that log targets are setup right after background
and then only loglevel (verbose, debug) is processed, so the warning could
diff --git a/FILTERS b/FILTERS
index 2ed6281d..18b8825c 100644
--- a/FILTERS
+++ b/FILTERS
@@ -129,7 +129,7 @@ Date/Time
---------
At the moment, Fail2Ban depends on log lines to have time stamps. That is why
-before starting to develop failregex, check if your log line format known to
+before starting to develop failregex, check if your log line format is known to
Fail2Ban. Copy the time component from the log line and append an IP address to
test with following command::
diff --git a/MANIFEST b/MANIFEST
index fec09dde..972a2b48 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -40,6 +40,7 @@ config/action.d/mail.conf
config/action.d/mail-whois-common.conf
config/action.d/mail-whois.conf
config/action.d/mail-whois-lines.conf
+config/action.d/mikrotik.conf
config/action.d/mynetwatchman.conf
config/action.d/netscaler.conf
config/action.d/nftables-allports.conf
@@ -90,6 +91,7 @@ config/filter.d/counter-strike.conf
config/filter.d/courier-auth.conf
config/filter.d/courier-smtp.conf
config/filter.d/cyrus-imap.conf
+config/filter.d/dante.conf
config/filter.d/directadmin.conf
config/filter.d/domino-smtp.conf
config/filter.d/dovecot.conf
@@ -121,6 +123,8 @@ config/filter.d/nagios.conf
config/filter.d/named-refused.conf
config/filter.d/nginx-bad-request.conf
config/filter.d/nginx-botsearch.conf
+config/filter.d/nginx-error-common.conf
+config/filter.d/nginx-forbidden.conf
config/filter.d/nginx-http-auth.conf
config/filter.d/nginx-limit-req.conf
config/filter.d/nsd.conf
@@ -138,6 +142,7 @@ config/filter.d/pure-ftpd.conf
config/filter.d/qmail.conf
config/filter.d/recidive.conf
config/filter.d/roundcube-auth.conf
+config/filter.d/routeros-auth.conf
config/filter.d/scanlogd.conf
config/filter.d/screensharingd.conf
config/filter.d/selinux-common.conf
@@ -175,7 +180,6 @@ CONTRIBUTING.md
COPYING
.coveragerc
DEVELOP
-fail2ban-2to3
fail2ban/client/actionreader.py
fail2ban/client/beautifier.py
fail2ban/client/configparserinc.py
@@ -191,6 +195,8 @@ fail2ban/client/filterreader.py
fail2ban/client/__init__.py
fail2ban/client/jailreader.py
fail2ban/client/jailsreader.py
+fail2ban/compat/asynchat.py
+fail2ban/compat/asyncore.py
fail2ban/exceptions.py
fail2ban/helpers.py
fail2ban/__init__.py
@@ -204,7 +210,6 @@ fail2ban/server/datedetector.py
fail2ban/server/datetemplate.py
fail2ban/server/failmanager.py
fail2ban/server/failregex.py
-fail2ban/server/filtergamin.py
fail2ban/server/filterpoll.py
fail2ban/server/filter.py
fail2ban/server/filterpyinotify.py
@@ -272,7 +277,7 @@ fail2ban/tests/files/config/apache-auth/noentry/.htaccess
fail2ban/tests/files/config/apache-auth/README
fail2ban/tests/files/database_v1.db
fail2ban/tests/files/database_v2.db
-fail2ban/tests/files/filter.d/substition.conf
+fail2ban/tests/files/filter.d/substitution.conf
fail2ban/tests/files/filter.d/testcase01.conf
fail2ban/tests/files/filter.d/testcase02.conf
fail2ban/tests/files/filter.d/testcase02.local
@@ -300,6 +305,7 @@ fail2ban/tests/files/logs/counter-strike
fail2ban/tests/files/logs/courier-auth
fail2ban/tests/files/logs/courier-smtp
fail2ban/tests/files/logs/cyrus-imap
+fail2ban/tests/files/logs/dante
fail2ban/tests/files/logs/directadmin
fail2ban/tests/files/logs/domino-smtp
fail2ban/tests/files/logs/dovecot
@@ -329,6 +335,7 @@ fail2ban/tests/files/logs/nagios
fail2ban/tests/files/logs/named-refused
fail2ban/tests/files/logs/nginx-bad-request
fail2ban/tests/files/logs/nginx-botsearch
+fail2ban/tests/files/logs/nginx-forbidden
fail2ban/tests/files/logs/nginx-http-auth
fail2ban/tests/files/logs/nginx-limit-req
fail2ban/tests/files/logs/nsd
@@ -346,6 +353,7 @@ fail2ban/tests/files/logs/pure-ftpd
fail2ban/tests/files/logs/qmail
fail2ban/tests/files/logs/recidive
fail2ban/tests/files/logs/roundcube-auth
+fail2ban/tests/files/logs/routeros-auth
fail2ban/tests/files/logs/scanlogd
fail2ban/tests/files/logs/screensharingd
fail2ban/tests/files/logs/selinux-ssh
diff --git a/README.md b/README.md
index 6bf94c25..601d72ca 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
/ _|__ _(_) |_ ) |__ __ _ _ _
| _/ _` | | |/ /| '_ \/ _` | ' \
|_| \__,_|_|_/___|_.__/\__,_|_||_|
- v1.0.1.dev1 20??/??/??
+ v1.1.0.dev1 20??/??/??
## Fail2Ban: ban hosts that cause multiple authentication errors
@@ -29,26 +29,27 @@ and the website: https://www.fail2ban.org
Installation:
-------------
-**It is possible that Fail2Ban is already packaged for your distribution. In
-this case, you should use that instead.**
+Fail2Ban is likely already packaged for your Linux distribution and [can installed with a simple command](https://github.com/fail2ban/fail2ban/wiki/How-to-install-fail2ban-packages).
+
+If your distribution is not listed, you can install from GitHub:
Required:
-- [Python2 >= 2.7 or Python >= 3.2](https://www.python.org) or [PyPy](https://pypy.org)
-- python-setuptools, python-distutils or python3-setuptools for installation from source
+- [Python >= 3.5](https://www.python.org) or [PyPy3](https://pypy.org)
+- python-setuptools, python-distutils (or python3-setuptools) for installation from source
Optional:
- [pyinotify >= 0.8.3](https://github.com/seb-m/pyinotify), may require:
* Linux >= 2.6.13
-- [gamin >= 0.0.21](http://www.gnome.org/~veillard/gamin)
- [systemd >= 204](http://www.freedesktop.org/wiki/Software/systemd) and python bindings:
* [python-systemd package](https://www.freedesktop.org/software/systemd/python-systemd/index.html)
- [dnspython](http://www.dnspython.org/)
+- [pyasyncore](https://pypi.org/project/pyasyncore/) and [pyasynchat](https://pypi.org/project/pyasynchat/) (normally bundled-in within fail2ban, for python 3.12+ only)
To install:
- tar xvfj fail2ban-1.0.1.tar.bz2
- cd fail2ban-1.0.1
+ tar xvfj fail2ban-master.tar.bz2
+ cd fail2ban-master
sudo python setup.py install
Alternatively, you can clone the source from GitHub to a directory of Your choice, and do the install from there. Pick the correct branch, for example, master or 0.11
@@ -90,11 +91,7 @@ fail2ban(1) and jail.conf(5) manpages for further references.
Code status:
------------
-* travis-ci.org: [![tests status](https://secure.travis-ci.org/fail2ban/fail2ban.svg?branch=master)](https://travis-ci.org/fail2ban/fail2ban?branch=master) / [![tests status](https://secure.travis-ci.org/fail2ban/fail2ban.svg?branch=0.11)](https://travis-ci.org/fail2ban/fail2ban?branch=0.11) (0.11 branch) / [![tests status](https://secure.travis-ci.org/fail2ban/fail2ban.svg?branch=0.10)](https://travis-ci.org/fail2ban/fail2ban?branch=0.10) (0.10 branch)
-
-* coveralls.io: [![Coverage Status](https://coveralls.io/repos/fail2ban/fail2ban/badge.svg?branch=master)](https://coveralls.io/github/fail2ban/fail2ban?branch=master) / [![Coverage Status](https://coveralls.io/repos/fail2ban/fail2ban/badge.svg?branch=0.11)](https://coveralls.io/github/fail2ban/fail2ban?branch=0.11) (0.11 branch) / [![Coverage Status](https://coveralls.io/repos/fail2ban/fail2ban/badge.svg?branch=0.10)](https://coveralls.io/github/fail2ban/fail2ban?branch=0.10) / (0.10 branch)
-
-* codecov.io: [![codecov.io](https://codecov.io/gh/fail2ban/fail2ban/coverage.svg?branch=master)](https://codecov.io/gh/fail2ban/fail2ban/branch/master) / [![codecov.io](https://codecov.io/gh/fail2ban/fail2ban/coverage.svg?branch=0.11)](https://codecov.io/gh/fail2ban/fail2ban/branch/0.11) (0.11 branch) / [![codecov.io](https://codecov.io/gh/fail2ban/fail2ban/coverage.svg?branch=0.10)](https://codecov.io/gh/fail2ban/fail2ban/branch/0.10) (0.10 branch)
+* [![CI](https://github.com/fail2ban/fail2ban/actions/workflows/main.yml/badge.svg)](https://github.com/fail2ban/fail2ban/actions/workflows/main.yml)
Contact:
--------
diff --git a/RELEASE b/RELEASE
index 2b2bc58e..96f402a0 100644
--- a/RELEASE
+++ b/RELEASE
@@ -13,7 +13,7 @@ Preparation
* Check distribution patches and see if they can be included
* https://apps.fedoraproject.org/packages/fail2ban/sources
- * http://sources.gentoo.org/cgi-bin/viewvc.cgi/gentoo-x86/net-analyzer/fail2ban/
+ * https://gitweb.gentoo.org/repo/gentoo.git/tree/net-analyzer/fail2ban
* http://svnweb.freebsd.org/ports/head/security/py-fail2ban/
* https://build.opensuse.org/package/show?package=fail2ban&project=openSUSE%3AFactory
* http://sophie.zarb.org/sources/fail2ban (Mageia)
@@ -49,7 +49,7 @@ Preparation
ad-hoc bash script to run in a clean clone:
- find -type f | grep -v -e '\.git' -e '/doc/' -e '\.travis' -e MANIFEST | sed -e 's,^\./,,g' | while read f; do grep -ne "^$f\$" MANIFEST >/dev/null || echo "$f" ; done
+ find -type f | grep -v -e '\.git' -e '/doc/' -e MANIFEST | sed -e 's,^\./,,g' | while read f; do grep -ne "^$f\$" MANIFEST >/dev/null || echo "$f" ; done
or an alternative for comparison with previous release
@@ -115,7 +115,7 @@ Pre Release
* Arch Linux:
- * https://www.archlinux.org/packages/community/any/fail2ban/
+ * https://www.archlinux.org/packages/extra/any/fail2ban/
* Debian: Yaroslav Halchenko
@@ -134,7 +134,7 @@ Pre Release
* Gentoo: netmon@gentoo.org
- * http://sources.gentoo.org/cgi-bin/viewvc.cgi/gentoo-x86/net-analyzer/fail2ban/metadata.xml?view=markup
+ * https://gitweb.gentoo.org/repo/gentoo.git/tree/net-analyzer/fail2ban/metadata.xml
* https://bugs.gentoo.org/buglist.cgi?quicksearch=fail2ban
* openSUSE: Stephan Kulow
diff --git a/THANKS b/THANKS
index 9dd2e47c..7c008c2c 100644
--- a/THANKS
+++ b/THANKS
@@ -22,6 +22,7 @@ Andrey G. Grozin
Andy Fragen
Arturo 'Buanzo' Busleiman
Axel Thimm
+Balazs Mateffy
Bas van den Dikkenberg
Beau Raines
Bill Heaton
diff --git a/bin/fail2ban-client b/bin/fail2ban-client
index 5e6843ed..31a701aa 100755
--- a/bin/fail2ban-client
+++ b/bin/fail2ban-client
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
diff --git a/bin/fail2ban-regex b/bin/fail2ban-regex
index 09044f0a..e60d2542 100755
--- a/bin/fail2ban-regex
+++ b/bin/fail2ban-regex
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
#
diff --git a/bin/fail2ban-server b/bin/fail2ban-server
index 03dc0fd3..23f2fa90 100755
--- a/bin/fail2ban-server
+++ b/bin/fail2ban-server
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
diff --git a/bin/fail2ban-testcases b/bin/fail2ban-testcases
index ba3d90b9..48aae4b5 100755
--- a/bin/fail2ban-testcases
+++ b/bin/fail2ban-testcases
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
"""Script to run Fail2Ban tests battery
diff --git a/config/action.d/bsd-ipfw.conf b/config/action.d/bsd-ipfw.conf
index 444192d3..d0029454 100644
--- a/config/action.d/bsd-ipfw.conf
+++ b/config/action.d/bsd-ipfw.conf
@@ -80,7 +80,7 @@ block = ip
# Option: blocktype
# Notes.: How to block the traffic. Use a action from man 5 ipfw
# Common values: deny, unreach port, reset
-# ACTION defination at the top of man ipfw for allowed values.
+# ACTION definition at the top of man ipfw for allowed values.
# Values: STRING
#
blocktype = unreach port
diff --git a/config/action.d/cloudflare-token.conf b/config/action.d/cloudflare-token.conf
index 8c5c37de..ff5f5c4e 100644
--- a/config/action.d/cloudflare-token.conf
+++ b/config/action.d/cloudflare-token.conf
@@ -50,11 +50,12 @@ actionban = curl -s -X POST "<_cf_api_url>" \
# unix timestamp of the ban time
# Values: CMD
#
-actionunban = id=$(curl -s -X GET "<_cf_api_url>?mode=¬es=&configuration.target=&configuration.value=" \
- <_cf_api_prms> \
- | awk -F"[,:}]" '{for(i=1;i<=NF;i++){if($i~/'id'\042/){print $(i+1)}}}' \
- | tr -d ' "' \
- | head -n 1)
+actionunban = id=$(curl -s -X GET "<_cf_api_url>" \
+ --data-urlencode "mode=" --data-urlencode "notes=" --data-urlencode "configuration.target=" --data-urlencode "configuration.value=" \
+ <_cf_api_prms> \
+ | awk -F"[,:}]" '{for(i=1;i<=NF;i++){if($i~/'id'\042/){print $(i+1)}}}' \
+ | tr -d ' "' \
+ | head -n 1)
if [ -z "$id" ]; then echo ": id for cannot be found using target "; exit 0; fi; \
curl -s -X DELETE "<_cf_api_url>/$id" \
<_cf_api_prms> \
@@ -67,7 +68,7 @@ _cf_api_prms = -H "Authorization: Bearer " -H "Content-Type: applicatio
# Declare your Cloudflare Authorization Bearer Token in the [DEFAULT] section of your jail.local file.
-# The Cloudflare of hte domain you want to manage.
+# The Cloudflare of the domain you want to manage.
#
# cfzone =
diff --git a/config/action.d/firewallcmd-ipset.conf b/config/action.d/firewallcmd-ipset.conf
index c36ba694..c5282c62 100644
--- a/config/action.d/firewallcmd-ipset.conf
+++ b/config/action.d/firewallcmd-ipset.conf
@@ -35,7 +35,7 @@ actionunban = /actionunban>
[ipstype_ipset]
-actionstart = ipset -exist create hash:ip timeout
+actionstart = ipset -exist create hash:ip timeout maxelem
actionflush = ipset flush
@@ -47,7 +47,7 @@ actionunban = ipset -exist del
[ipstype_firewalld]
-actionstart = firewall-cmd --direct --new-ipset= --type=hash:ip --option=timeout=
+actionstart = firewall-cmd --direct --new-ipset= --type=hash:ip --option=timeout= --option=maxelem=
# TODO: there doesn't seem to be an explicit way to invoke the ipset flush function using firewall-cmd
actionflush =
@@ -77,7 +77,13 @@ default-ipsettime = 0
# Values: [ NUM ] Default: 0 (managed by fail2ban by unban)
ipsettime = 0
-# expresion to caclulate timeout from bantime, example:
+# Option: maxelem
+# Notes: maximal number of elements which can be stored in the ipset
+# You may want to increase this for long-duration/high-volume jails
+# Values: [ NUM ] Default: 65536
+maxelem = 65536
+
+# expression to calculate timeout from bantime, example:
# banaction = %(known/banaction)s[ipsettime='']
timeout-bantime = $([ "" -le 2147483 ] && echo "" || echo 0)
@@ -118,4 +124,4 @@ firewalld_familyopt = --option=family=inet6
# DEV NOTES:
#
# Author: Edgar Hoch, Daniel Black, Sergey Brester and Mihail Politaev
-# firewallcmd-new / iptables-ipset-proto6 combined for maximium goodness
+# firewallcmd-new / iptables-ipset-proto6 combined for maximum goodness
diff --git a/config/action.d/iptables-ipset-proto4.conf b/config/action.d/iptables-ipset-proto4.conf
index 37624284..51648dbb 100644
--- a/config/action.d/iptables-ipset-proto4.conf
+++ b/config/action.d/iptables-ipset-proto4.conf
@@ -27,7 +27,7 @@ before = iptables.conf
# Notes.: command executed on demand at the first ban (or at the start of Fail2Ban if actionstart_on_demand is set to false).
# Values: CMD
#
-actionstart = ipset --create f2b- iphash
+actionstart = ipset --create f2b- maxelem iphash
<_ipt_add_rules>
@@ -61,6 +61,14 @@ actionban = ipset --test f2b- || ipset --add f2b-
#
actionunban = ipset --test f2b- && ipset --del f2b-
-# Several capabilities used internaly:
+# Several capabilities used internally:
rule-jump = -m set --match-set f2b- src -j
+
+[Init]
+
+# Option: maxelem
+# Notes: maximal number of elements which can be stored in the ipset
+# You may want to increase this for long-duration/high-volume jails
+# Values: [ NUM ] Default: 65536
+maxelem = 65536
diff --git a/config/action.d/iptables-ipset.conf b/config/action.d/iptables-ipset.conf
index b44e6ec4..07f89415 100644
--- a/config/action.d/iptables-ipset.conf
+++ b/config/action.d/iptables-ipset.conf
@@ -24,7 +24,7 @@ before = iptables.conf
# Notes.: command executed on demand at the first ban (or at the start of Fail2Ban if actionstart_on_demand is set to false).
# Values: CMD
#
-actionstart = ipset -exist create hash:ip timeout
+actionstart = ipset -exist create hash:ip timeout maxelem
<_ipt_add_rules>
# Option: actionflush
@@ -59,7 +59,7 @@ actionban = ipset -exist add timeout
#
actionunban = ipset -exist del
-# Several capabilities used internaly:
+# Several capabilities used internally:
rule-jump = -m set --match-set src -j
@@ -76,7 +76,13 @@ default-ipsettime = 0
# Values: [ NUM ] Default: 0 (managed by fail2ban by unban)
ipsettime = 0
-# expresion to caclulate timeout from bantime, example:
+# Option: maxelem
+# Notes: maximal number of elements which can be stored in the ipset
+# You may want to increase this for long-duration/high-volume jails
+# Values: [ NUM ] Default: 65536
+maxelem = 65536
+
+# expression to calculate timeout from bantime, example:
# banaction = %(known/banaction)s[ipsettime='']
timeout-bantime = $([ "" -le 2147483 ] && echo "" || echo 0)
diff --git a/config/action.d/iptables.conf b/config/action.d/iptables.conf
index 67d496f5..382716ac 100644
--- a/config/action.d/iptables.conf
+++ b/config/action.d/iptables.conf
@@ -62,7 +62,7 @@ pre-rule =
rule-jump = -j <_ipt_rule_target>
-# Several capabilities used internaly:
+# Several capabilities used internally:
_ipt_for_proto-iter = for proto in $(echo '' | sed 's/,/ /g'); do
_ipt_for_proto-done = done
diff --git a/config/action.d/ipthreat.conf b/config/action.d/ipthreat.conf
index 193a60f2..5a14fa3d 100644
--- a/config/action.d/ipthreat.conf
+++ b/config/action.d/ipthreat.conf
@@ -47,7 +47,7 @@
# BadBot 256 Bad bot that is not honoring robots.txt or just flooding with too many requests, etc
# Compromised 512 The ip has been taken over by malware or botnet
# Phishing 1024 The ip is involved in phishing or spoofing
-# Iot 2048 The ip has targetted an iot (Internet of Things) device
+# Iot 2048 The ip has targeted an iot (Internet of Things) device
# PortScan 4096 Port scan
# See https://ipthreat.net/bulkreportformat for more information
# ```
diff --git a/config/action.d/mikrotik.conf b/config/action.d/mikrotik.conf
new file mode 100644
index 00000000..9343c86b
--- /dev/null
+++ b/config/action.d/mikrotik.conf
@@ -0,0 +1,84 @@
+# Fail2Ban configuration file
+#
+# Mikrotik routerOS action to add/remove address-list entries
+#
+# Author: Duncan Bellamy
+# based on forum.mikrotik.com post by pakjebakmeel
+#
+# in the instructions:
+# (10.0.0.1 is ip of mikrotik router)
+# (10.0.0.2 is ip of fail2ban machine)
+#
+# on fail2ban machine:
+# sudo mkdir /var/lib/fail2ban/ssh
+# sudo chmod 700 /var/lib/fail2ban/ssh
+# sudo ssh-keygen -N "" -f /var/lib/fail2ban/ssh/fail2ban_id_rsa
+# sudo scp /var/lib/fail2ban/ssh/fail2ban_id_rsa.pub admin@10.0.0.1:/
+# ssh admin@10.0.0.1
+#
+# on mikrotik router:
+# /user add name=miki-f2b group=write address=10.0.0.2 password=""
+# /user ssh-keys import public-key-file=fail2ban_id_rsa.pub user=miki-f2b
+# /quit
+#
+# on fail2ban machine:
+# (check password login fails)
+# ssh miki-f2b@10.0.0.1
+# (check private key works)
+# sudo ssh -i /var/lib/fail2ban/ssh/fail2ban_id_rsa miki-f2b@10.0.0.1
+#
+# Then create rules on mikrorik router that use address
+# list(s) maintained by fail2ban eg in the forward chain
+# drop from address list, or in the forward chain drop
+# from address list to server
+#
+# example extract from jail.local overriding some defaults
+# action = mikrotik[keyfile="%(mkeyfile)s", user="%(muser)s", host="%(mhost)s", list="%(mlist)s"]
+#
+# ignoreip = 127.0.0.1/8 192.168.0.0/24
+
+# mkeyfile = /etc/fail2ban/ssh/mykey_id_rsa
+# muser = myuser
+# mhost = 192.168.0.1
+# mlist = BAD LIST
+
+[Definition]
+
+actionstart =
+
+actionstop = %(actionflush)s
+
+actionflush = %(command)s "/ip firewall address-list remove [find list=\"%(list)s\" comment~\"%(startcomment)s-*\"]"
+
+actioncheck =
+
+actionban = %(command)s "/ip firewall address-list add list=\"%(list)s\" address= comment=%(comment)s"
+
+actionunban = %(command)s "/ip firewall address-list remove [find list=\"%(list)s\" comment=%(comment)s]"
+
+command = ssh -l %(user)s -p%(port)s -i %(keyfile)s %(host)s
+
+# Option: user
+# Notes.: username to use when connecting to routerOS
+user =
+# Option: port
+# Notes.: port to use when connecting to routerOS
+port = 22
+# Option: keyfile
+# Notes.: ssh private key to use for connecting to routerOS
+keyfile =
+# Option: host
+# Notes.: hostname or ip of router
+host =
+# Option: list
+# Notes.: name of "address-list" to use on router
+list = Fail2Ban
+# Option: startcomment
+# Notes.: used as a prefix to all comments, and used to match for flushing rules
+startcomment = f2b-
+# Option: comment
+# Notes.: comment to use on routerOS (must be unique as used for ip address removal)
+comment = %(startcomment)s-
+
+[Init]
+name="%(__name__)s"
diff --git a/config/action.d/netscaler.conf b/config/action.d/netscaler.conf
index 87f7e7bf..0432031f 100644
--- a/config/action.d/netscaler.conf
+++ b/config/action.d/netscaler.conf
@@ -5,7 +5,7 @@
# The script will add offender IPs to a dataset on netscaler, the dataset can then be used to block the IPs at a cs/vserver or global level
# This dataset is then used to block IPs using responder policies on the netscaler.
#
-# The script assumes using HTTPS with unsecure certificate to access the netscaler,
+# The script assumes using HTTPS with insecure certificate to access the netscaler,
# if you have a valid certificate installed remove the -k from the curl lines, or if you want http change it accordingly (and remove the -k)
#
# This action depends on curl
diff --git a/config/action.d/nftables.conf b/config/action.d/nftables.conf
index 77cf3661..98f3c64e 100644
--- a/config/action.d/nftables.conf
+++ b/config/action.d/nftables.conf
@@ -44,7 +44,7 @@ match = >
#
rule_stat = %(match)s saddr @
-# optional interator over protocol's:
+# optional iterator over protocol's:
_nft_for_proto-custom-iter =
_nft_for_proto-custom-done =
_nft_for_proto-allports-iter =
diff --git a/config/action.d/pf.conf b/config/action.d/pf.conf
index 933b4de0..7181ed96 100644
--- a/config/action.d/pf.conf
+++ b/config/action.d/pf.conf
@@ -4,6 +4,7 @@
#
# Author: Nick Hilliard
# Modified by: Alexander Koeppe making PF work seamless and with IPv4 and IPv6
+# Modified by: Balazs Mateffy adding allproto option so all traffic gets blocked from the malicious source
#
#
@@ -26,9 +27,11 @@
# }
# to your main pf ruleset, where "namei" are the names of the jails
# which invoke this action
+# to block all protocols use the pf[protocol=all] option
actionstart = echo "table <-> persist counters" | -f-
port=""; if [ "$port" != "" ] && case "$port" in \{*) false;; esac; then port="{$port}"; fi
- echo " proto from <-> to " | -f-
+ protocol=""; if [ "$protocol" != "all" ]; then protocol="proto $protocol"; else protocol=all; fi
+ echo " $protocol from <-> to " | -f-
# Option: start_on_demand - to start action on demand
# Example: `action=pf[actionstart_on_demand=true]`
@@ -98,6 +101,7 @@ tablename = f2b
#
# The action you want pf to take.
# Probably, you want "block quick", but adjust as needed.
+# If you want to log all blocked use "blog log quick"
block = block quick
# Option: protocol
diff --git a/config/action.d/shorewall-ipset-proto6.conf b/config/action.d/shorewall-ipset-proto6.conf
index eacb53d9..fade8107 100644
--- a/config/action.d/shorewall-ipset-proto6.conf
+++ b/config/action.d/shorewall-ipset-proto6.conf
@@ -51,7 +51,7 @@
# Values: CMD
#
actionstart = if ! ipset -quiet -name list f2b- >/dev/null;
- then ipset -quiet -exist create f2b- hash:ip timeout ;
+ then ipset -quiet -exist create f2b- hash:ip timeout maxelem ;
fi
# Option: actionstop
@@ -88,6 +88,14 @@ default-ipsettime = 0
# Values: [ NUM ] Default: 0 (managed by fail2ban by unban)
ipsettime = 0
-# expresion to caclulate timeout from bantime, example:
+# expression to calculate timeout from bantime, example:
# banaction = %(known/banaction)s[ipsettime='']
timeout-bantime = $([ "" -le 2147483 ] && echo "" || echo 0)
+
+[Init]
+
+# Option: maxelem
+# Notes: maximal number of elements which can be stored in the ipset
+# You may want to increase this for long-duration/high-volume jails
+# Values: [ NUM ] Default: 65536
+maxelem = 65536
diff --git a/config/action.d/smtp.py b/config/action.d/smtp.py
index 5c27d0ff..40eda1b5 100644
--- a/config/action.d/smtp.py
+++ b/config/action.d/smtp.py
@@ -75,7 +75,7 @@ class SMTPAction(ActionBase):
"""
def __init__(
- self, jail, name, host="localhost", user=None, password=None,
+ self, jail, name, host="localhost", ssl=False, user=None, password=None,
sendername="Fail2Ban", sender="fail2ban", dest="root", matches=None):
"""Initialise action.
@@ -88,6 +88,8 @@ class SMTPAction(ActionBase):
host : str, optional
SMTP host, of host:port format. Default host "localhost" and
port "25"
+ ssl : bool, optional
+ Whether to use TLS for the SMTP connection or not. Default False.
user : str, optional
Username used for authentication with SMTP server.
password : str, optional
@@ -109,7 +111,7 @@ class SMTPAction(ActionBase):
super(SMTPAction, self).__init__(jail, name)
self.host = host
- #TODO: self.ssl = ssl
+ self.ssl = ssl
self.user = user
self.password =password
@@ -155,10 +157,18 @@ class SMTPAction(ActionBase):
msg['To'] = self.toaddr
msg['Date'] = formatdate()
- smtp = smtplib.SMTP()
+ smtp_host, smtp_port = self.host.split(':')
+ smtp = smtplib.SMTP(host=smtp_host, port=smtp_port)
try:
+ r = smtp.connect(host=smtp_host, port=smtp_port)
self._logSys.debug("Connected to SMTP '%s', response: %i: %s",
- self.host, *smtp.connect(self.host))
+ self.host, *r)
+
+ if self.ssl: # pragma: no cover
+ r = smtp.starttls()[0];
+ if r != 220: # pragma: no cover
+ raise Exception("Failed to starttls() on '%s': %s" % (self.host, r))
+
if self.user and self.password: # pragma: no cover (ATM no tests covering that)
smtp.login(self.user, self.password)
failed_recipients = smtp.sendmail(
diff --git a/config/filter.d/apache-auth.conf b/config/filter.d/apache-auth.conf
index 40f6d6e3..e80ac9ac 100644
--- a/config/filter.d/apache-auth.conf
+++ b/config/filter.d/apache-auth.conf
@@ -64,7 +64,7 @@ ignoreregex =
# ^user .*: one-time-nonce mismatch - sending new nonce\s*$
# ^realm mismatch - got `(?:[^']*|.*?)' but no realm specified\s*$
#
-# Because url/referer are foreign input, short form of regex used if long enough to idetify failure.
+# Because url/referer are foreign input, short form of regex used if long enough to identify failure.
#
# Author: Cyril Jaquier
# Major edits by Daniel Black and Ben Rubson.
diff --git a/config/filter.d/apache-common.conf b/config/filter.d/apache-common.conf
index 6577fe7d..7932066d 100644
--- a/config/filter.d/apache-common.conf
+++ b/config/filter.d/apache-common.conf
@@ -29,7 +29,7 @@ apache-prefix = >
apache-pref-ignore =
-_apache_error_client = \[(:?error|\S+:\S+)\]( \[pid \d+(:\S+ \d+)?\])? \[client (:\d{1,5})?\]
+_apache_error_client = \[(:?error|\S+:\S+)\]( \[pid \d+(:\S+ \d+)?\])? \[(?:client|remote) (:\d{1,5})?\]
datepattern = {^LN-BEG}
diff --git a/config/filter.d/dante.conf b/config/filter.d/dante.conf
new file mode 100644
index 00000000..3026414f
--- /dev/null
+++ b/config/filter.d/dante.conf
@@ -0,0 +1,16 @@
+# Fail2Ban filter for dante
+#
+# Make sure you have "log: error" set in your "client pass" directive
+#
+
+[INCLUDES]
+before = common.conf
+
+[Definition]
+_daemon = danted
+
+failregex = ^%(__prefix_line)sinfo: block\(\d\): tcp/accept \]: \.\d+ \S+: error after reading \d+ bytes? in \d+ seconds?: (?:could not access|system password authentication failed for|pam_authenticate\(\) for) user "[^"]+ "
+
+[Init]
+journalmatch = _SYSTEMD_UNIT=danted.service
+
diff --git a/config/filter.d/exim-common.conf b/config/filter.d/exim-common.conf
index 36644e94..78c093bb 100644
--- a/config/filter.d/exim-common.conf
+++ b/config/filter.d/exim-common.conf
@@ -9,12 +9,43 @@ after = exim-common.local
[Definition]
-host_info_pre = (?:H=([\w.-]+ )?(?:\(\S+\) )?)?
-host_info_suf = (?::\d+)?(?: I=\[\S+\](:\d+)?)?(?: U=\S+)?(?: P=e?smtp)?(?: F=(?:<>|[^@]+@\S+))?\s
-host_info = %(host_info_pre)s\[\]%(host_info_suf)s
-pid = (?: \[\d+\]| \w+ exim\[\d+\]:)?
+_fields_grp = (?: (?!H=)[A-Za-z]{1,4}(?:=\S+)?)*
+host_info = %(_fields_grp)s (?:H=)?(?:[\w.-]+)? ?(?:\(\S+\))? ?\[\](?::\d+)?%(_fields_grp)s
+pid = (?:\s?\[\d+\]|\s?[\w\.-]+ exim\[\d+\]:){0,2}
-# DEV Notes:
-# From exim source code: ./src/receive.c:add_host_info_for_log
-#
-# Author: Daniel Black
+logtype = file
+_add_pref = /_add_pref>
+
+__prefix_line = %(pid)s%(_add_pref)s
+
+[lt_journal]
+_add_pref = (?: \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})?
+
+[lt_file]
+_add_pref =
+
+# DEV Notes
+# ------------
+# Host string happens:
+# H=[ip address]
+# H=(helo_name) [ip address]
+# H=host_name [ip address]
+# H=host_name (helo_name) [ip address]
+# flags H=host_name (helo_name) [ip address] flags
+# where only [ip address] always visible, ignore ident
+# From exim source code:
+# src/src/host.c:host_and_ident()
+# src/receive.c:add_host_info_for_log()
+
+# Substitution of `_fields_grp` bypasses all flags but H
+# Summary of Fields in Log Lines depending on log_selector
+# https://www.exim.org/exim-html-current/doc/html/spec_html/ch-log_files.html
+# at version exim-4.97.1
+# ---
+
+# Authors:
+# Cyril Jaquier
+# Daniel Black (rewrote with strong regexs)
+# Sergey G. Brester aka sebres (optimization, rewrite to prefregex, reviews)
+# Martin O'Neal (added additional regexs to detect authentication failures, protocol errors, and drops)
+# Vladimir Varlamov (host line definition)
diff --git a/config/filter.d/exim-spam.conf b/config/filter.d/exim-spam.conf
index 733c884b..a1833157 100644
--- a/config/filter.d/exim-spam.conf
+++ b/config/filter.d/exim-spam.conf
@@ -26,11 +26,13 @@ before = exim-common.conf
[Definition]
-failregex = ^%(pid)s \S+ F=(<>|\S+@\S+) %(host_info)srejected by local_scan\(\): .{0,256}$
- ^%(pid)s %(host_info)sF=(<>|[^@]+@\S+) rejected RCPT [^@]+@\S+: .*dnsbl.*\s*$
- ^%(pid)s \S+ %(host_info)sF=(<>|[^@]+@\S+) rejected after DATA: This message contains a virus \(\S+\)\.\s*$
- ^%(pid)s \S+ SA: Action: flagged as Spam but accepted: score=\d+\.\d+ required=\d+\.\d+ \(scanned in \d+/\d+ secs \| Message-Id: \S+\)\. From \S+ \(host=\S+ \[\]\) for $
- ^%(pid)s \S+ SA: Action: silently tossed message: score=\d+\.\d+ required=\d+\.\d+ trigger=\d+\.\d+ \(scanned in \d+/\d+ secs \| Message-Id: \S+\)\. From \S+ \(host=(\S+ )?\[\]\) for \S+$
+prefregex = ^%(__prefix_line)s.+ $
+
+failregex = ^\s?\S+%(host_info)s rejected by local_scan\(\): .{0,256}$
+ ^%(host_info)s rejected RCPT [^@]+@\S+: .*dnsbl.*\s*$
+ ^\s?\S+%(host_info)s rejected after DATA: This message contains a virus \(\S+\)\.\s*$
+ ^\s?\S+ SA: Action: flagged as Spam but accepted: score=\d+\.\d+ required=\d+\.\d+ \(scanned in \d+/\d+ secs \| Message-Id: \S+\)\. From \S+ \(host=\S+ \[\]\) for $
+ ^\s?\S+ SA: Action: silently tossed message: score=\d+\.\d+ required=\d+\.\d+ trigger=\d+\.\d+ \(scanned in \d+/\d+ secs \| Message-Id: \S+\)\. From \S+ \(host=(\S+ )?\[\]\) for \S+$
ignoreregex =
@@ -43,8 +45,6 @@ ignoreregex =
honeypot = trap@example.com
-# DEV Notes:
-# The %(host_info) defination contains a match
-#
-# Author: Cyril Jaquier
-# Daniel Black (rewrote with strong regexs)
+# DEV Notes
+# -----------
+# The %(host_info) definition contains a match. No space before. See exim-common.conf
diff --git a/config/filter.d/exim.conf b/config/filter.d/exim.conf
index 6a8c12c5..cca32de3 100644
--- a/config/filter.d/exim.conf
+++ b/config/filter.d/exim.conf
@@ -13,21 +13,20 @@ before = exim-common.conf
[Definition]
-# Fre-filter via "prefregex" is currently inactive because of too different failure syntax in exim-log (testing needed):
-#prefregex = ^%(pid)s \b(?:\w+ authenticator failed|([\w\-]+ )?SMTP (?:(?:call|connection) from|protocol(?: synchronization)? error)|no MAIL in|(?:%(host_info_pre)s\[[^\]]+\]%(host_info_suf)s(?:sender verify fail|rejected RCPT|dropped|AUTH command))).+ $
+prefregex = ^%(__prefix_line)s.+ $
-failregex = ^%(pid)s %(host_info)ssender verify fail for <\S+>: (?:Unknown user|Unrouteable address|all relevant MX records point to non-existent hosts)\s*$
- ^%(pid)s \w+ authenticator failed for (?:[^\[\( ]* )?(?:\(\S*\) )?\[\](?::\d+)?(?: I=\[\S+\](:\d+)?)?: 535 Incorrect authentication data( \(set_id=.*\)|: \d+ Time\(s\))?\s*$
- ^%(pid)s %(host_info)srejected RCPT [^@]+@\S+: (?:relay not permitted|Sender verify failed|Unknown user|Unrouteable address)\s*$
- ^%(pid)s SMTP protocol synchronization error \([^)]*\): rejected (?:connection from|"\S+") %(host_info)s(?:next )?input=".*"\s*$
- ^%(pid)s SMTP call from (?:[^\[\( ]* )?%(host_info)sdropped: too many (?:nonmail commands|syntax or protocol errors) \(last (?:command )?was "[^"]*"\)\s*$
- ^%(pid)s SMTP protocol error in "[^"]+(?:"+[^"]*(?="))*?" %(host_info)sAUTH command used when not advertised\s*$
- ^%(pid)s no MAIL in SMTP connection from (?:[^\[\( ]* )?(?:\(\S*\) )?%(host_info)sD=\d\S*s(?: C=\S*)?\s*$
- ^%(pid)s (?:[\w\-]+ )?SMTP connection from (?:[^\[\( ]* )?(?:\(\S*\) )?%(host_info)sclosed by DROP in ACL\s*$
+failregex = ^%(host_info)s sender verify fail for <\S+>: (?:Unknown user|Unrouteable address|all relevant MX records point to non-existent hosts)\s*$
+ ^\s?\w+ authenticator failed for%(host_info)s: 535 Incorrect authentication data(?: \(set_id=.*\)|: \d+ Time\(s\))?\s*$
+ ^%(host_info)s rejected RCPT [^@]+@\S+: (?:relay not permitted|Sender verify failed|Unknown user|Unrouteable address)\s*$
+ ^\s?SMTP protocol synchronization error \([^)]*\): rejected (?:connection from|"\S+")%(host_info)s (?:next )?input=".*"\s*$
+ ^\s?SMTP call from%(host_info)s dropped: too many (?:(?:nonmail|unrecognized) commands|syntax or protocol errors)
+ ^\s?SMTP protocol error in "[^"]+(?:"+[^"]*(?="))*?"%(host_info)s [A-Z]+ (?:command used when not advertised|authentication mechanism not supported)\s*$
+ ^\s?no MAIL in SMTP connection from%(host_info)s
+ ^\s?(?:[\w\-]+ )?SMTP connection from%(host_info)s closed by DROP in ACL\s*$
>
-mdre-aggressive = ^%(pid)s no host name found for IP address $
- ^%(pid)s no IP address found for host \S+ \(during SMTP connection from \[\]\)$
+mdre-aggressive = ^\s?no host name found for IP address $
+ ^\s?no IP address found for host \S+ \(during SMTP connection from%(host_info)s\)$
mdre-normal =
@@ -42,13 +41,10 @@ mode = normal
ignoreregex =
-# DEV Notes:
-# The %(host_info) defination contains a match
+# DEV Notes
+# -----------
+# The %(host_info) definition contains a match. No space before. See exim-common.conf
#
# SMTP protocol synchronization error \([^)]*\) <- This needs to be non-greedy
-# to void capture beyond ")" to avoid a DoS Injection vulnerabilty as input= is
+# to void capture beyond ")" to avoid a DoS Injection vulnerability as input= is
# user injectable data.
-#
-# Author: Cyril Jaquier
-# Daniel Black (rewrote with strong regexs)
-# Martin O'Neal (added additional regexs to detect authentication failures, protocol errors, and drops)
diff --git a/config/filter.d/mongodb-auth.conf b/config/filter.d/mongodb-auth.conf
index 66c27abb..d02227e0 100644
--- a/config/filter.d/mongodb-auth.conf
+++ b/config/filter.d/mongodb-auth.conf
@@ -1,4 +1,4 @@
-# Fail2Ban filter for unsuccesfull MongoDB authentication attempts
+# Fail2Ban filter for unsuccessful MongoDB authentication attempts
#
# Logfile /var/log/mongodb/mongodb.log
#
@@ -23,7 +23,7 @@ maxlines = 10
#
# Regarding the multiline regex:
#
-# There can be a nunber of non-related lines between the first and second part
+# There can be a number of non-related lines between the first and second part
# of this regex maxlines of 10 is quite generious.
#
# Note the capture __connid, includes the connection ID, used in second part of regex.
diff --git a/config/filter.d/mysqld-auth.conf b/config/filter.d/mysqld-auth.conf
index 930c9b5a..4afd4ada 100644
--- a/config/filter.d/mysqld-auth.conf
+++ b/config/filter.d/mysqld-auth.conf
@@ -1,4 +1,4 @@
-# Fail2Ban filter for unsuccesful MySQL authentication attempts
+# Fail2Ban filter for unsuccessful MySQL authentication attempts
#
#
# To log wrong MySQL access attempts add to /etc/my.cnf in [mysqld]:
@@ -17,7 +17,7 @@ before = common.conf
_daemon = mysqld
-failregex = ^%(__prefix_line)s(?:(?:\d{6}|\d{4}-\d{2}-\d{2})[ T]\s?\d{1,2}:\d{2}:\d{2} )?(?:\d+ )?\[\w+\] (?:\[[^\]]+\] )*Access denied for user '[^']+ '@'' (to database '[^']*'|\(using password: (YES|NO)\))*\s*$
+failregex = ^%(__prefix_line)s(?:(?:\d{6}|\d{4}-\d{2}-\d{2})[ T]\s?\d{1,2}:\d{2}:\d{2} )?(?:\d+ )?\[\w+\] (?:\[[^\]]+\] )*Access denied for user '[^']+ '@''(?:\s+(?:to database '[^']*'|\(using password: (?:YES|NO)\)){1,2})?\s*$
ignoreregex =
diff --git a/config/filter.d/named-refused.conf b/config/filter.d/named-refused.conf
index 798f66e6..5e8b0624 100644
--- a/config/filter.d/named-refused.conf
+++ b/config/filter.d/named-refused.conf
@@ -37,7 +37,7 @@ _category_re = (?:%(_category)s: )?
# this can be optional (for instance if we match named native log files)
__line_prefix=\s*(?:\S+ %(__daemon_combs_re)s\s+)?%(_category_re)s
-prefregex = ^%(__line_prefix)s(?:(?:error|info):\s*)?client(?: @\S*)? #\S+(?: \([\S.]+\))?: .+ \s(?:denied|\(NOTAUTH\))\s*$
+prefregex = ^%(__line_prefix)s(?:(?:error|info):\s*)?client(?: @\S*)? #\S+(?: \([\S.]+\))?: .+ \s(?:denied(?: \([^\)]*\))?|\(NOTAUTH\))\s*$
failregex = ^(?:view (?:internal|external): )?query(?: \(cache\))?
^zone transfer
diff --git a/config/filter.d/nginx-error-common.conf b/config/filter.d/nginx-error-common.conf
new file mode 100644
index 00000000..efc9829a
--- /dev/null
+++ b/config/filter.d/nginx-error-common.conf
@@ -0,0 +1,32 @@
+# Generic nginx error_log configuration items (to be used as interpolations) in other
+# filters monitoring nginx error-logs
+#
+
+[DEFAULT]
+
+# Type of log-file resp. log-format (file, short, journal):
+logtype = file
+
+# Daemon definition is to be specialized (if needed) in .conf file
+_daemon = nginx
+
+# Common line prefixes (beginnings) which could be used in filters
+#
+# [bsdverbose]? [hostname] [vserver tag] daemon_id spaces
+#
+# This can be optional (for instance if we match named native log files)
+__prefix = /__prefix>
+
+__err_type = error
+
+__prefix_line = %(__prefix)s\[%(__err_type)s\] \d+#\d+: \*\d+\s+
+
+
+[lt_file]
+__prefix = \s*
+
+[lt_short]
+__prefix = \s*(?:(?!\[)\S+ %(_daemon)s\[\d+\]: [^\[]*)?
+
+[lt_journal]
+__prefix = %(lt_short/__prefix)s
diff --git a/config/filter.d/nginx-forbidden.conf b/config/filter.d/nginx-forbidden.conf
new file mode 100644
index 00000000..6df33465
--- /dev/null
+++ b/config/filter.d/nginx-forbidden.conf
@@ -0,0 +1,29 @@
+# fail2ban filter configuration for nginx forbidden accesses
+#
+# If you have configured nginx to forbid some paths in your webserver, e.g.:
+#
+# location ~ /\. {
+# deny all;
+# }
+#
+# if a client tries to access https://yoursite/.user.ini then you will see
+# in nginx error log:
+#
+# 2018/09/14 19:03:05 [error] 2035#2035: *9134 access forbidden by rule, client: 10.20.30.40, server: www.example.net, request: "GET /.user.ini HTTP/1.1", host: "www.example.net", referrer: "https://www.example.net"
+#
+# By carefully setting this filter we ban every IP that tries too many times to
+# access forbidden resources.
+#
+# Author: Michele Bologna https://www.michelebologna.net/
+
+[INCLUDES]
+
+before = nginx-error-common.conf
+
+[Definition]
+failregex = ^%(__prefix_line)saccess forbidden by rule, client:
+ignoreregex =
+
+datepattern = {^LN-BEG}
+
+journalmatch = _SYSTEMD_UNIT=nginx.service + _COMM=nginx
diff --git a/config/filter.d/nginx-http-auth.conf b/config/filter.d/nginx-http-auth.conf
index 71806e85..454e9186 100644
--- a/config/filter.d/nginx-http-auth.conf
+++ b/config/filter.d/nginx-http-auth.conf
@@ -1,14 +1,23 @@
# fail2ban filter configuration for nginx
+[INCLUDES]
+
+before = nginx-error-common.conf
[Definition]
mode = normal
-mdre-auth = ^\s*\[error\] \d+#\d+: \*\d+ user "(?:[^"]+|.*?)":? (?:password mismatch|was not found in "[^\"]*"), client: , server: \S*, request: "\S+ \S+ HTTP/\d+\.\d+", host: "\S+"(?:, referrer: "\S+")?\s*$
-mdre-fallback = ^\s*\[crit\] \d+#\d+: \*\d+ SSL_do_handshake\(\) failed \(SSL: error:\S+(?: \S+){1,3} too (?:long|short)\)[^,]*, client:
+__err_type = <_ertp->
+_ertp-auth = error
+mdre-auth = ^%(__prefix_line)suser "(?:[^"]+|.*?)":? (?:password mismatch|was not found in "[^\"]*"), client: , server: \S*, request: "\S+ \S+ HTTP/\d+\.\d+", host: "\S+"(?:, referrer: "\S+")?\s*$
+_ertp-fallback = crit
+mdre-fallback = ^%(__prefix_line)sSSL_do_handshake\(\) failed \(SSL: error:\S+(?: \S+){1,3} too (?:long|short)\)[^,]*, client:
+
+_ertp-normal = %(_ertp-auth)s
mdre-normal = %(mdre-auth)s
+_ertp-aggressive = (?:%(_ertp-auth)s|%(_ertp-fallback)s)
mdre-aggressive = %(mdre-auth)s
%(mdre-fallback)s
diff --git a/config/filter.d/nginx-limit-req.conf b/config/filter.d/nginx-limit-req.conf
index 2f45e831..29d37d09 100644
--- a/config/filter.d/nginx-limit-req.conf
+++ b/config/filter.d/nginx-limit-req.conf
@@ -23,6 +23,10 @@
# ...
#
+[INCLUDES]
+
+before = nginx-error-common.conf
+
[Definition]
# Specify following expression to define exact zones, if you want to ban IPs limited
@@ -33,13 +37,16 @@
#
ngx_limit_req_zones = [^"]+
+# Depending on limit_req_log_level directive (may be: info | notice | warn | error):
+__err_type = [a-z]+
+
# Use following full expression if you should range limit request to specified
# servers, requests, referrers etc. only :
#
-# failregex = ^\s*\[[a-z]+\] \d+#\d+: \*\d+ limiting requests, excess: [\d\.]+ by zone "(?:%(ngx_limit_req_zones)s)", client: , server: \S*, request: "\S+ \S+ HTTP/\d+\.\d+", host: "\S+"(, referrer: "\S+")?\s*$
+# failregex = ^%(__prefix_line)slimiting requests, excess: [\d\.]+ by zone "(?:%(ngx_limit_req_zones)s)", client: , server: \S*, request: "\S+ \S+ HTTP/\d+\.\d+", host: "\S+"(, referrer: "\S+")?\s*$
# Shortly, much faster and stable version of regexp:
-failregex = ^\s*\[[a-z]+\] \d+#\d+: \*\d+ limiting requests, excess: [\d\.]+ by zone "(?:%(ngx_limit_req_zones)s)", client: ,
+failregex = ^%(__prefix_line)slimiting requests, excess: [\d\.]+ by zone "(?:%(ngx_limit_req_zones)s)", client: ,
ignoreregex =
diff --git a/config/filter.d/postfix.conf b/config/filter.d/postfix.conf
index b374f472..5497504e 100644
--- a/config/filter.d/postfix.conf
+++ b/config/filter.d/postfix.conf
@@ -10,17 +10,17 @@ before = common.conf
[Definition]
-_daemon = postfix(-\w+)?/\w+(?:/smtp[ds])?
+_daemon = postfix(-\w+)?/[^/\[:\s]+(?:/smtp[ds])?
_port = (?::\d+)?
_pref = [A-Z]{4}
prefregex = ^%(__prefix_line)s> .+ $
# Extended RE for normal mode to match reject by unknown users or undeliverable address, can be set to empty to avoid this:
-exre-user = |[Uu](?:ser unknown|ndeliverable address)
+exre-user = |[Uu](?:ser unknown|ndeliverable address) ; pragma: codespell-ignore
mdpr-normal = (?:\w+: (?:milter-)?reject:|(?:improper command pipelining|too many errors) after \S+)
-mdre-normal=^%(_pref)s from [^[]*\[\]%(_port)s: [45][50][04] [45]\.\d\.\d+ (?:(?:<[^>]*>)?: )?(?:(?:Helo command|(?:Sender|Recipient) address) rejected: )?(?:Service unavailable|(?:Client host|Command|Data command) rejected|Relay access denied|(?:Host|Domain) not found|need fully-qualified hostname|match%(exre-user)s)\b
+mdre-normal=^%(_pref)s from [^[]*\[\]%(_port)s: [45][50][04] [45]\.\d\.\d+ (?:(?:<[^>]*>)?: )?(?:(?:Helo command|(?:Sender|Recipient) address) rejected: )?(?:Service unavailable|Access denied|(?:Client host|Command|Data command) rejected|Relay access denied|Malformed DNS server reply|(?:Host|Domain) not found|need fully-qualified hostname|match%(exre-user)s)\b
^from [^[]*\[\]%(_port)s:?
mdpr-auth = warning:
@@ -38,7 +38,7 @@ mdre-more = %(mdre-normal)s
# Includes some of the log messages described in
# .
-mdpr-ddos = (?:lost connection after(?! DATA) [A-Z]+|disconnect(?= from \S+(?: \S+=\d+)* auth=0/(?:[1-9]|\d\d+))|(?:PREGREET \d+|HANGUP) after \S+|COMMAND (?:TIME|COUNT|LENGTH) LIMIT)
+mdpr-ddos = (?:lost connection after (?!(?:DATA|AUTH)\b)[A-Z]+|disconnect(?= from \S+(?: \S+=\d+)* auth=0/(?:[1-9]|\d\d+))|(?:PREGREET \d+|HANGUP) after \S+|COMMAND (?:TIME|COUNT|LENGTH) LIMIT)
mdre-ddos = ^from [^[]*\[\]%(_port)s:?
mdpr-extra = (?:%(mdpr-auth)s|%(mdpr-normal)s)
@@ -76,6 +76,6 @@ ignoreregex =
[Init]
-journalmatch = _SYSTEMD_UNIT=postfix.service
+journalmatch = _SYSTEMD_UNIT=postfix.service _SYSTEMD_UNIT=postfix@-.service
# Author: Cyril Jaquier
diff --git a/config/filter.d/recidive.conf b/config/filter.d/recidive.conf
index 63833cab..86d939bb 100644
--- a/config/filter.d/recidive.conf
+++ b/config/filter.d/recidive.conf
@@ -19,7 +19,7 @@
# common.local
before = common.conf
-[Definition]
+[DEFAULT]
_daemon = (?:fail2ban(?:-server|\.actions)\s*)
@@ -29,10 +29,23 @@ _jailname = recidive
failregex = ^%(__prefix_line)s(?:\s*fail2ban\.actions\s*%(__pid_re)s?:\s+)?NOTICE\s+\[(?!%(_jailname)s\])(?:.*)\]\s+Ban\s+\s*$
+[lt_short]
+_daemon = (?:fail2ban(?:-server|\.actions)?\s*)
+failregex = ^%(__prefix_line)s(?:\s*fail2ban(?:\.actions)?\s*%(__pid_re)s?:\s+)?(?:NOTICE\s+)?\[(?!%(_jailname)s\])(?:.*)\]\s+Ban\s+\s*$
+
+[lt_journal]
+_daemon =
+failregex =
+
+[Definition]
+
+_daemon = /_daemon>
+failregex = /failregex>
+
datepattern = ^{DATE}
ignoreregex =
-journalmatch = _SYSTEMD_UNIT=fail2ban.service PRIORITY=5
+journalmatch = _SYSTEMD_UNIT=fail2ban.service
# Author: Tom Hendrikx, modifications by Amir Caspi
diff --git a/config/filter.d/routeros-auth.conf b/config/filter.d/routeros-auth.conf
new file mode 100644
index 00000000..090296d4
--- /dev/null
+++ b/config/filter.d/routeros-auth.conf
@@ -0,0 +1,10 @@
+# Fail2Ban filter for failure attempts in MikroTik RouterOS
+#
+#
+
+[Definition]
+
+failregex = ^\s*\S+ system,error,critical login failure for user .*? from via \S+$
+
+# Author: Vit Kabele
+
diff --git a/config/filter.d/selinux-common.conf b/config/filter.d/selinux-common.conf
index b3e0ae4f..dc9616d2 100644
--- a/config/filter.d/selinux-common.conf
+++ b/config/filter.d/selinux-common.conf
@@ -14,7 +14,7 @@
[Definition]
-failregex = ^type=%(_type)s msg=audit\(:\d+\): (user )?pid=\d+ uid=%(_uid)s auid=%(_auid)s ses=\d+ subj=%(_subj)s msg='%(_msg)s'$
+failregex = ^type=%(_type)s msg=audit\(:\d+\): (?:user )?pid=\d+ uid=%(_uid)s auid=%(_auid)s ses=\d+ subj=%(_subj)s msg='%(_msg)s'(?:\x1D|$)
ignoreregex =
diff --git a/config/filter.d/selinux-ssh.conf b/config/filter.d/selinux-ssh.conf
index 6955094f..f5aa9b08 100644
--- a/config/filter.d/selinux-ssh.conf
+++ b/config/filter.d/selinux-ssh.conf
@@ -15,11 +15,13 @@ _subj = (?:unconfined_u|system_u):system_r:sshd_t:s0-s0:c0\.c1023
_exe =/usr/sbin/sshd
_terminal = ssh
-_msg = op=\S+ acct=(?P<_quote_acct>"?)\S+(?P=_quote_acct) exe="%(_exe)s" hostname=(\?|(\d+\.){3}\d+) addr= terminal=%(_terminal)s res=failed
+_anygrp = (?!acct=|exe=|addr=|terminal=|res=)\w+=(?:"[^"]+"|\S*)
+
+_msg = (?:%(_anygrp)s )*acct=(?:"[^"]+ "|\S+ ) exe="%(_exe)s" (?:%(_anygrp)s )*addr= terminal=%(_terminal)s res=failed
# DEV Notes:
#
-# Note: USER_LOGIN is ignored as this is the duplicate messsage
+# Note: USER_LOGIN is ignored as this is the duplicate message
# ssh logs after 3 USER_AUTH failures.
#
# Author: Daniel Black
diff --git a/config/filter.d/slapd.conf b/config/filter.d/slapd.conf
index 22cf4304..791decd2 100644
--- a/config/filter.d/slapd.conf
+++ b/config/filter.d/slapd.conf
@@ -13,13 +13,11 @@ before = common.conf
_daemon = slapd
-failregex = ^(?P<__prefix>%(__prefix_line)s)conn=(?P<_conn_>\d+) fd=\d+ ACCEPT from IP=:\d{1,5} \(IP=\S+\)\s*(?P=__prefix)conn=(?P=_conn_) op=\d+ RESULT(?:\s(?!err)\S+=\S*)* err=49 text=[\w\s]*$
+prefregex = ^%(__prefix_line)sconn=\d+ (?: (?:fd|op)=\d+){0,2} (?=ACCEPT|RESULT).+ $
+
+failregex = ^ACCEPT from IP=:\d{1,5}\s+
+ ^RESULT(?:\s(?!err)\S+=\S*)* err=49\b
ignoreregex =
-[Init]
-
-# "maxlines" is number of log lines to buffer for multi-line regex searches
-maxlines = 20
-
-# Author: Andrii Melnyk
+# Author: Andrii Melnyk, Sergey G. Brester
diff --git a/config/filter.d/sogo-auth.conf b/config/filter.d/sogo-auth.conf
index 4155f89e..c01b0353 100644
--- a/config/filter.d/sogo-auth.conf
+++ b/config/filter.d/sogo-auth.conf
@@ -1,4 +1,4 @@
-# Fail2ban filter for SOGo authentcation
+# Fail2ban filter for SOGo authentication
#
# Log file usually in /var/log/sogo/sogo.log
diff --git a/config/filter.d/sshd.conf b/config/filter.d/sshd.conf
index d5d189b0..a954774c 100644
--- a/config/filter.d/sshd.conf
+++ b/config/filter.d/sshd.conf
@@ -24,8 +24,8 @@ __pref = (?:(?:error|fatal): (?:PAM: )?)?
#__suff = (?: port \d+)?(?: \[preauth\])?\s*
__suff = (?: (?:port \d+|on \S+|\[preauth\])){0,3}\s*
__on_port_opt = (?: (?:port \d+|on \S+)){0,2}
-# close by authenticating user:
-__authng_user = (?: (?:invalid|authenticating) user \S+|.*? )?
+# close by authenticating user (don't use after %(__authng_user)s because of catch-all `.*?`):
+__authng_user = (?: (?:by|from))?(?: (?:invalid|authenticating) user \S+|.*? )?(?: from)?
# for all possible (also future) forms of "no matching (cipher|mac|MAC|compression method|key exchange method|host key type) found",
# see ssherr.c for all possible SSH_ERR_..._ALG_MATCH errors.
@@ -38,21 +38,21 @@ __pam_auth = pam_[a-z]+
prefregex = ^%(__prefix_line)s %(__pref)s.+ $
-cmnfailre = ^[aA]uthentication (?:failure|error|failed) for .* from ( via \S+)?%(__suff)s$
- ^User not known to the underlying authentication module for .* from %(__suff)s$
+cmnfailre = ^[aA]uthentication (?:failure|error|failed) for .*? (?:from )?( via \S+)?%(__suff)s$
+ ^User not known to the underlying authentication module for .*? (?:from )?%(__suff)s$
>
^Failed for (?Pinvalid user )?(?P\S+)|(?(cond_inv)(?:(?! from ).)*?|[^:]+) from %(__on_port_opt)s(?: ssh\d*)?(?(cond_user): |(?:(?:(?! from ).)*)$)
^ROOT LOGIN REFUSED FROM
- ^[iI](?:llegal|nvalid) user .*? from %(__suff)s$
- ^User \S+|.*? from not allowed because not listed in AllowUsers%(__suff)s$
- ^User \S+|.*? from not allowed because listed in DenyUsers%(__suff)s$
- ^User \S+|.*? from not allowed because not in any group%(__suff)s$
+ ^[iI](?:llegal|nvalid) user .*? (?:from )?%(__suff)s$
+ ^User \S+|.*? (?:from )? not allowed because not listed in AllowUsers%(__suff)s$
+ ^User \S+|.*? (?:from )? not allowed because listed in DenyUsers%(__suff)s$
+ ^User \S+|.*? (?:from )? not allowed because not in any group%(__suff)s$
^refused connect from \S+ \(\)
^Received disconnect from %(__on_port_opt)s:\s*3: .*: Auth fail%(__suff)s$
- ^User \S+|.*? from not allowed because a group is listed in DenyGroups%(__suff)s$
- ^User \S+|.*? from not allowed because none of user's groups are listed in AllowGroups%(__suff)s$
+ ^User \S+|.*? (?:from )? not allowed because a group is listed in DenyGroups%(__suff)s$
+ ^User \S+|.*? (?:from )? not allowed because none of user's groups are listed in AllowGroups%(__suff)s$
^%(__pam_auth)s\(sshd:auth\):\s+authentication failure; (?:\s+(?:(?:logname|e?uid|tty)=\S*)){0,4}\s+ruser=\S* \s+rhost=(?:\s+user=\S* )?%(__suff)s$
- ^maximum authentication attempts exceeded for .* from %(__on_port_opt)s(?: ssh\d*)?%(__suff)s$
+ ^maximum authentication attempts exceeded for (?:invalid user )?.*? (?:from )?%(__on_port_opt)s(?: ssh\d*)?%(__suff)s$
^User \S+|.*? not allowed because account is locked%(__suff)s
^Disconnecting (?: from)?(?: (?:invalid|authenticating)) user \S+ %(__on_port_opt)s:\s*Change of username or service not allowed:\s*.*\[preauth\]\s*$
^Disconnecting: Too many authentication failures(?: for \S+|.*? )?%(__suff)s$
@@ -68,24 +68,25 @@ cmnfailed = >
mdre-normal =
# used to differentiate "connection closed" with and without `[preauth]` (fail/nofail cases in ddos mode)
-mdre-normal-other = ^(Connection (?:closed|reset)|Disconnected) (?:by|from)%(__authng_user)s (?:%(__suff)s|\s*)$
+mdre-normal-other = ^(?:Connection (?:closed|reset)|Disconnect(?:ed|ing)) %(__authng_user)s %(__on_port_opt)s(?:: (?!Too many authentication failures)[^\[]+)?(?: \[preauth\])?\s*$
-mdre-ddos = ^Did not receive identification string from
+mdre-ddos = ^(?:Did not receive identification string from|Timeout before authentication for)
^kex_exchange_identification: (?:read: )?(?:[Cc]lient sent invalid protocol identifier|[Cc]onnection (?:closed by remote host|reset by peer))
- ^Bad protocol version identification '.*' from
+ ^Bad protocol version identification '(?:[^']|.*?)' (?:from )?%(__suff)s$
^SSH: Server;Ltype: (?:Authname|Version|Kex);Remote: -\d+;[A-Z]\w+:
^Read from socket failed: Connection reset by peer
- ^banner exchange: Connection from <__on_port_opt>: invalid format
+ ^(?:banner exchange|ssh_dispatch_run_fatal): Connection from <__on_port_opt>: (?:invalid format|(?:message authentication code incorrect|[Cc]onnection corrupted) \[preauth\])
+
# same as mdre-normal-other, but as failure (without with [preauth] and with on no preauth phase as helper to identify address):
-mdre-ddos-other = ^(Connection (?:closed|reset)|Disconnected) (?:by|from)%(__authng_user)s %(__on_port_opt)s\s+\[preauth\]\s*$
- ^(Connection (?:closed|reset)|Disconnected) (?:by|from)%(__authng_user)s (?:%(__on_port_opt)s|\s*)$
+mdre-ddos-other = ^(?:Connection (?:closed|reset)|Disconnect(?:ed|ing)) %(__authng_user)s %(__on_port_opt)s(?:: (?!Too many authentication failures)[^\[]+)?\s+\[preauth\]\s*$
+ ^(?:Connection (?:closed|reset)|Disconnect(?:ed|ing)) %(__authng_user)s (?:%(__on_port_opt)s(?:: (?!Too many authentication failures)[^\[]+)?|\s*)$
mdre-extra = ^Received disconnect from %(__on_port_opt)s:\s*14: No(?: supported)? authentication methods available
^Unable to negotiate with %(__on_port_opt)s: no matching <__alg_match> found.
^Unable to negotiate a <__alg_match>
^no matching <__alg_match> found:
# part of mdre-ddos-other, but user name is supplied (invalid/authenticating) on [preauth] phase only:
-mdre-extra-other = ^Disconnected (?: from)?(?: (?:invalid|authenticating)) user \S+|.*? %(__on_port_opt)s \[preauth\]\s*$
+mdre-extra-other = ^Disconnected (?: from)?(?: (?:invalid|authenticating)) user \S+|.*? (?:from )?%(__on_port_opt)s \[preauth\]\s*$
mdre-aggressive = %(mdre-ddos)s
%(mdre-extra)s
diff --git a/config/filter.d/traefik-auth.conf b/config/filter.d/traefik-auth.conf
index 8022fee1..5be16909 100644
--- a/config/filter.d/traefik-auth.conf
+++ b/config/filter.d/traefik-auth.conf
@@ -5,7 +5,7 @@
#
# To use 'traefik-auth' filter you have to configure your Traefik instance to write
# the access logs as describe in https://docs.traefik.io/configuration/logs/#access-logs
-# into a log file on host and specifiy users for Basic Authentication
+# into a log file on host and specify users for Basic Authentication
# https://docs.traefik.io/configuration/entrypoints/#basic-authentication
#
# Example:
@@ -51,7 +51,7 @@
[Definition]
-# Parameter "method" can be used to specifiy request method
+# Parameter "method" can be used to specify request method
req-method = \S+
# Usage example (for jail.local):
# filter = traefik-auth[req-method="GET|POST|HEAD"]
diff --git a/config/jail.conf b/config/jail.conf
index fe8db527..01e1fdf7 100644
--- a/config/jail.conf
+++ b/config/jail.conf
@@ -97,7 +97,9 @@ before = paths-debian.conf
# ignorecommand = /path/to/command
ignorecommand =
-# "bantime" is the number of seconds that a host is banned.
+# "bantime" is the amount of time that a host is banned, integer in seconds or
+# time abbreviation format (m - minutes, h - hours, d - days, w - weeks, mo - months, y - years).
+# This is to consider as an initial time if bantime.increment gets enabled.
bantime = 10m
# A host is banned if it has generated "maxretry" during the last "findtime"
@@ -111,19 +113,17 @@ maxretry = 5
maxmatches = %(maxretry)s
# "backend" specifies the backend used to get files modification.
-# Available options are "pyinotify", "gamin", "polling", "systemd" and "auto".
+# Available options are "pyinotify", "polling", "systemd" and "auto".
# This option can be overridden in each jail as well.
#
# pyinotify: requires pyinotify (a file alteration monitor) to be installed.
# If pyinotify is not installed, Fail2ban will use auto.
-# gamin: requires Gamin (a file alteration monitor) to be installed.
-# If Gamin is not installed, Fail2ban will use auto.
# polling: uses a polling algorithm which does not require external libraries.
# systemd: uses systemd python library to access the systemd journal.
# Specifying "logpath" is not valid for this backend.
# See "journalmatch" in the jails associated filter config
# auto: will try to use the following backends, in order:
-# pyinotify, gamin, polling.
+# pyinotify, polling.
#
# Note: if systemd backend is chosen as the default but you enable a jail
# for which logs are present only in its own log files, specify some other
@@ -395,6 +395,10 @@ logpath = %(nginx_error_log)s
port = http,https
logpath = %(nginx_access_log)s
+[nginx-forbidden]
+port = http,https
+logpath = %(nginx_error_log)s
+
# Ban attackers that try to use PHP's URL-fopen() functionality
# through GET/POST variables. - Experimental, with more than a year
# of usage in production environments.
@@ -958,6 +962,9 @@ port = http,https
logpath = %(syslog_authpriv)s
backend = %(syslog_backend)s
+[routeros-auth]
+port = ssh,http,https
+logpath = /var/log/MikroTik/router.log
[zoneminder]
# Zoneminder HTTP/HTTPS web interface auth
@@ -978,3 +985,8 @@ banaction = %(banaction_allports)s
[monitorix]
port = 8080
logpath = /var/log/monitorix-httpd
+
+[dante]
+port = 1080
+logpath = %(syslog_daemon)s
+
diff --git a/config/paths-common.conf b/config/paths-common.conf
index 4f6a5f71..ad9f6f28 100644
--- a/config/paths-common.conf
+++ b/config/paths-common.conf
@@ -67,7 +67,7 @@ proftpd_backend = %(default_backend)s
pureftpd_log = %(syslog_ftp)s
pureftpd_backend = %(default_backend)s
-# ftp, daemon and then local7 are tried at configure time however it is overwriteable at configure time
+# ftp, daemon and then local7 are tried at configure time however it is overwritable at configure time
#
wuftpd_log = %(syslog_ftp)s
wuftpd_backend = %(default_backend)s
diff --git a/doc/fail2ban.server.filtergamin.rst b/doc/fail2ban.server.filtergamin.rst
deleted file mode 100644
index f248772f..00000000
--- a/doc/fail2ban.server.filtergamin.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-fail2ban.server.filtergamin module
-==================================
-
-.. automodule:: fail2ban.server.filtergamin
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/fail2ban.server.rst b/doc/fail2ban.server.rst
index dc5d1dee..e15ae021 100644
--- a/doc/fail2ban.server.rst
+++ b/doc/fail2ban.server.rst
@@ -13,7 +13,6 @@ fail2ban.server package
fail2ban.server.failmanager
fail2ban.server.failregex
fail2ban.server.filter
- fail2ban.server.filtergamin
fail2ban.server.filterpoll
fail2ban.server.filterpyinotify
fail2ban.server.filtersystemd
diff --git a/fail2ban-2to3 b/fail2ban-2to3
deleted file mode 100755
index 2015ed5b..00000000
--- a/fail2ban-2to3
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-# This script carries out conversion of fail2ban to python3
-# A backup of any converted files are created with ".bak"
-# extension
-
-set -eu
-
-if 2to3 -w --no-diffs bin/* fail2ban;then
- echo "Success!" >&2
- exit 0
-else
- echo "Fail!" >&2
- exit 1
-fi
diff --git a/fail2ban/client/actionreader.py b/fail2ban/client/actionreader.py
index 88b0aca1..5a57338b 100644
--- a/fail2ban/client/actionreader.py
+++ b/fail2ban/client/actionreader.py
@@ -89,11 +89,11 @@ class ActionReader(DefinitionInitConfigReader):
stream = list()
stream.append(head + ["addaction", self._name])
multi = []
- for opt, optval in opts.iteritems():
+ for opt, optval in opts.items():
if opt in self._configOpts and not opt.startswith('known/'):
multi.append([opt, optval])
if self._initOpts:
- for opt, optval in self._initOpts.iteritems():
+ for opt, optval in self._initOpts.items():
if opt not in self._configOpts and not opt.startswith('known/'):
multi.append([opt, optval])
if len(multi) > 1:
diff --git a/fail2ban/client/beautifier.py b/fail2ban/client/beautifier.py
index 97cd38b2..7ef173a6 100644
--- a/fail2ban/client/beautifier.py
+++ b/fail2ban/client/beautifier.py
@@ -71,24 +71,78 @@ class Beautifier:
elif inC[0] == "echo":
msg = ' '.join(msg)
elif inC[0:1] == ['status']:
- if len(inC) > 1:
- # Display information
- msg = ["Status for the jail: %s" % inC[1]]
+ def jail_stat(response, pref=""):
+ # Display jail information
for n, res1 in enumerate(response):
- prefix1 = "`-" if n == len(response) - 1 else "|-"
+ prefix1 = pref + ("`-" if n == len(response) - 1 else "|-")
msg.append("%s %s" % (prefix1, res1[0]))
- prefix1 = " " if n == len(response) - 1 else "| "
+ prefix1 = pref + (" " if n == len(response) - 1 else "| ")
for m, res2 in enumerate(res1[1]):
prefix2 = prefix1 + ("`-" if m == len(res1[1]) - 1 else "|-")
val = " ".join(map(str, res2[1])) if isinstance(res2[1], list) else res2[1]
msg.append("%s %s:\t%s" % (prefix2, res2[0], val))
+ if len(inC) > 1 and inC[1] != "--all":
+ msg = ["Status for the jail: %s" % inC[1]]
+ jail_stat(response)
else:
+ jstat = None
+ if len(inC) > 1: # --all
+ jstat = response[-1]
+ response = response[:-1]
msg = ["Status"]
for n, res1 in enumerate(response):
- prefix1 = "`-" if n == len(response) - 1 else "|-"
+ prefix1 = "`-" if not jstat and n == len(response) - 1 else "|-"
val = " ".join(map(str, res1[1])) if isinstance(res1[1], list) else res1[1]
msg.append("%s %s:\t%s" % (prefix1, res1[0], val))
+ if jstat:
+ msg.append("`- Status for the jails:")
+ i = 0
+ for n, j in jstat.items():
+ i += 1
+ prefix1 = "`-" if i == len(jstat) else "|-"
+ msg.append(" %s Jail: %s" % (prefix1, n))
+ jail_stat(j, " " if i == len(jstat) else " | ")
msg = "\n".join(msg)
+ elif inC[0:1] == ['stats'] or inC[0:1] == ['statistics']:
+ def _statstable(response):
+ tophead = ["Jail", "Backend", "Filter", "Actions"]
+ headers = ["", "", "cur", "tot", "cur", "tot"]
+ minlens = [8, 8, 3, 3, 3, 3]
+ ralign = [0, 0, 1, 1, 1, 1]
+ rows = [[n, r[0], *r[1], *r[2]] for n, r in response.items()]
+ lens = []
+ for i in range(len(rows[0])):
+ col = (len(str(s[i])) for s in rows)
+ lens.append(max(minlens[i], max(col)))
+ rfmt = []
+ hfmt = []
+ for i in range(len(rows[0])):
+ f = "%%%ds" if ralign[i] else "%%-%ds"
+ rfmt.append(f % lens[i])
+ hfmt.append(f % lens[i])
+ rfmt = [rfmt[0], rfmt[1], "%s \u2502 %s" % (rfmt[2], rfmt[3]), "%s \u2502 %s" % (rfmt[4], rfmt[5])]
+ hfmt = [hfmt[0], hfmt[1], "%s \u2502 %s" % (hfmt[2], hfmt[3]), "%s \u2502 %s" % (hfmt[4], hfmt[5])]
+ tlens = [lens[0], lens[1], 3 + lens[2] + lens[3], 3 + lens[4] + lens[5]]
+ tfmt = [hfmt[0], hfmt[1], "%%-%ds" % (tlens[2],), "%%-%ds" % (tlens[3],)]
+ tsep = tfmt[0:2]
+ rfmt = " \u2551 ".join(rfmt)
+ hfmt = " \u2551 ".join(hfmt)
+ tfmt = " \u2551 ".join(tfmt)
+ tsep = " \u2551 ".join(tsep)
+ separator = ((tsep % tuple(tophead[0:2])) + " \u255F\u2500" +
+ ("\u2500\u256B\u2500".join(['\u2500' * n for n in tlens[2:]])) + '\u2500')
+ ret = []
+ ret.append(tfmt % tuple(["", ""]+tophead[2:]))
+ ret.append(separator)
+ ret.append(hfmt % tuple(headers))
+ separator = "\u2550\u256C\u2550".join(['\u2550' * n for n in tlens]) + '\u2550'
+ ret.append(separator)
+ for row in rows:
+ ret.append(rfmt % tuple(row))
+ separator = "\u2550\u2569\u2550".join(['\u2550' * n for n in tlens]) + '\u2550'
+ ret.append(separator)
+ return ret
+ msg = "\n".join(_statstable(response))
elif len(inC) < 2:
pass # to few cmd args for below
elif inC[1] == "syslogsocket":
diff --git a/fail2ban/client/configparserinc.py b/fail2ban/client/configparserinc.py
index cc4ada0a..11b9a461 100644
--- a/fail2ban/client/configparserinc.py
+++ b/fail2ban/client/configparserinc.py
@@ -29,49 +29,36 @@ import re
import sys
from ..helpers import getLogger
-if sys.version_info >= (3,): # pragma: 2.x no cover
+# SafeConfigParser deprecated from Python 3.2 (renamed to ConfigParser)
+from configparser import ConfigParser as SafeConfigParser, BasicInterpolation, \
+ InterpolationMissingOptionError, NoOptionError, NoSectionError
- # SafeConfigParser deprecated from Python 3.2 (renamed to ConfigParser)
- from configparser import ConfigParser as SafeConfigParser, BasicInterpolation, \
- InterpolationMissingOptionError, NoOptionError, NoSectionError
+# And interpolation of __name__ was simply removed, thus we need to
+# decorate default interpolator to handle it
+class BasicInterpolationWithName(BasicInterpolation):
+ """Decorator to bring __name__ interpolation back.
- # And interpolation of __name__ was simply removed, thus we need to
- # decorate default interpolator to handle it
- class BasicInterpolationWithName(BasicInterpolation):
- """Decorator to bring __name__ interpolation back.
+ Original handling of __name__ was removed because of
+ functional deficiencies: http://bugs.python.org/issue10489
- Original handling of __name__ was removed because of
- functional deficiencies: http://bugs.python.org/issue10489
+ commit v3.2a4-105-g61f2761
+ Author: Lukasz Langa
+ Date: Sun Nov 21 13:41:35 2010 +0000
- commit v3.2a4-105-g61f2761
- Author: Lukasz Langa
- Date: Sun Nov 21 13:41:35 2010 +0000
+ Issue #10489: removed broken `__name__` support from configparser
- Issue #10489: removed broken `__name__` support from configparser
+ But should be fine to reincarnate for our use case
+ """
+ def _interpolate_some(self, parser, option, accum, rest, section, map,
+ *args, **kwargs):
+ if section and not (__name__ in map):
+ map = map.copy() # just to be safe
+ map['__name__'] = section
+ # try to wrap section options like %(section/option)s:
+ parser._map_section_options(section, option, rest, map)
+ return super(BasicInterpolationWithName, self)._interpolate_some(
+ parser, option, accum, rest, section, map, *args, **kwargs)
- But should be fine to reincarnate for our use case
- """
- def _interpolate_some(self, parser, option, accum, rest, section, map,
- *args, **kwargs):
- if section and not (__name__ in map):
- map = map.copy() # just to be safe
- map['__name__'] = section
- # try to wrap section options like %(section/option)s:
- parser._map_section_options(section, option, rest, map)
- return super(BasicInterpolationWithName, self)._interpolate_some(
- parser, option, accum, rest, section, map, *args, **kwargs)
-
-else: # pragma: 3.x no cover
- from ConfigParser import SafeConfigParser, \
- InterpolationMissingOptionError, NoOptionError, NoSectionError
-
- # Interpolate missing known/option as option from default section
- SafeConfigParser._cp_interpolate_some = SafeConfigParser._interpolate_some
- def _interpolate_some(self, option, accum, rest, section, map, *args, **kwargs):
- # try to wrap section options like %(section/option)s:
- self._map_section_options(section, option, rest, map)
- return self._cp_interpolate_some(option, accum, rest, section, map, *args, **kwargs)
- SafeConfigParser._interpolate_some = _interpolate_some
def _expandConfFilesWithLocal(filenames):
"""Expands config files with local extension.
@@ -129,20 +116,14 @@ after = 1.conf
CONDITIONAL_RE = re.compile(r"^(\w+)(\?.+)$")
- if sys.version_info >= (3,2):
- # overload constructor only for fancy new Python3's
- def __init__(self, share_config=None, *args, **kwargs):
- kwargs = kwargs.copy()
- kwargs['interpolation'] = BasicInterpolationWithName()
- kwargs['inline_comment_prefixes'] = ";"
- super(SafeConfigParserWithIncludes, self).__init__(
- *args, **kwargs)
- self._cfg_share = share_config
-
- else:
- def __init__(self, share_config=None, *args, **kwargs):
- SafeConfigParser.__init__(self, *args, **kwargs)
- self._cfg_share = share_config
+ # overload constructor only for fancy new Python3's
+ def __init__(self, share_config=None, *args, **kwargs):
+ kwargs = kwargs.copy()
+ kwargs['interpolation'] = BasicInterpolationWithName()
+ kwargs['inline_comment_prefixes'] = ";"
+ super(SafeConfigParserWithIncludes, self).__init__(
+ *args, **kwargs)
+ self._cfg_share = share_config
def get_ex(self, section, option, raw=False, vars={}):
"""Get an option value for a given section.
@@ -327,7 +308,7 @@ after = 1.conf
# mix it with defaults:
return set(opts.keys()) | set(self._defaults)
# only own option names:
- return opts.keys()
+ return list(opts.keys())
def read(self, filenames, get_includes=True):
if not isinstance(filenames, list):
@@ -356,7 +337,7 @@ after = 1.conf
ret += i
# merge defaults and all sections to self:
alld.update(cfg.get_defaults())
- for n, s in cfg.get_sections().iteritems():
+ for n, s in cfg.get_sections().items():
# conditional sections
cond = SafeConfigParserWithIncludes.CONDITIONAL_RE.match(n)
if cond:
@@ -366,14 +347,14 @@ after = 1.conf
del(s['__name__'])
except KeyError:
pass
- for k in s.keys():
+ for k in list(s.keys()):
v = s.pop(k)
s[k + cond] = v
s2 = alls.get(n)
if isinstance(s2, dict):
# save previous known values, for possible using in local interpolations later:
self.merge_section('KNOWN/'+n,
- dict(filter(lambda i: i[0] in s, s2.iteritems())), '')
+ dict([i for i in iter(s2.items()) if i[0] in s]), '')
# merge section
s2.update(s)
else:
@@ -385,10 +366,7 @@ after = 1.conf
if logSys.getEffectiveLevel() <= logLevel:
logSys.log(logLevel, " Reading file: %s", fileNamesFull[0])
# read file(s) :
- if sys.version_info >= (3,2): # pragma: no cover
- return SafeConfigParser.read(self, fileNamesFull, encoding='utf-8')
- else:
- return SafeConfigParser.read(self, fileNamesFull)
+ return SafeConfigParser.read(self, fileNamesFull, encoding='utf-8')
def merge_section(self, section, options, pref=None):
alls = self.get_sections()
@@ -400,7 +378,7 @@ after = 1.conf
sec.update(options)
return
sk = {}
- for k, v in options.iteritems():
+ for k, v in options.items():
if not k.startswith(pref) and k != '__name__':
sk[pref+k] = v
sec.update(sk)
diff --git a/fail2ban/client/configreader.py b/fail2ban/client/configreader.py
index 1b5a56a2..6b65e71a 100644
--- a/fail2ban/client/configreader.py
+++ b/fail2ban/client/configreader.py
@@ -26,7 +26,7 @@ __license__ = "GPL"
import glob
import os
-from ConfigParser import NoOptionError, NoSectionError
+from configparser import NoOptionError, NoSectionError
from .configparserinc import sys, SafeConfigParserWithIncludes, logLevel
from ..helpers import getLogger, _as_bool, _merge_dicts, substituteRecursiveTags
@@ -98,7 +98,7 @@ class ConfigReader():
def read(self, name, once=True):
""" Overloads a default (not shared) read of config reader.
- To prevent mutiple reads of config files with it includes, reads into
+ To prevent multiple reads of config files with it includes, reads into
the config reader, if it was not yet cached/shared by 'name'.
"""
# already shared ?
@@ -183,7 +183,7 @@ class ConfigReader():
class ConfigReaderUnshared(SafeConfigParserWithIncludes):
"""Unshared config reader (previously ConfigReader).
- Do not use this class (internal not shared/cached represenation).
+ Do not use this class (internal not shared/cached representation).
Use ConfigReader instead.
"""
@@ -221,7 +221,7 @@ class ConfigReaderUnshared(SafeConfigParserWithIncludes):
config_files += sorted(glob.glob('%s/*.local' % config_dir))
# choose only existing ones
- config_files = filter(os.path.exists, config_files)
+ config_files = list(filter(os.path.exists, config_files))
if len(config_files):
# at least one config exists and accessible
@@ -277,7 +277,7 @@ class ConfigReaderUnshared(SafeConfigParserWithIncludes):
# TODO: validate error handling here.
except NoOptionError:
if not optvalue is None:
- logSys.warning("'%s' not defined in '%s'. Using default one: %r"
+ logSys.debug("'%s' not defined in '%s'. Using default one: %r"
% (optname, sec, optvalue))
values[optname] = optvalue
# elif logSys.getEffectiveLevel() <= logLevel:
diff --git a/fail2ban/client/csocket.py b/fail2ban/client/csocket.py
index 88795674..ed2d2516 100644
--- a/fail2ban/client/csocket.py
+++ b/fail2ban/client/csocket.py
@@ -47,7 +47,7 @@ class CSocket:
def send(self, msg, nonblocking=False, timeout=None):
# Convert every list member to string
- obj = dumps(map(CSocket.convert, msg), HIGHEST_PROTOCOL)
+ obj = dumps(list(map(CSocket.convert, msg)), HIGHEST_PROTOCOL)
self.__csock.send(obj)
self.__csock.send(CSPROTO.END)
return self.receive(self.__csock, nonblocking, timeout)
@@ -72,7 +72,7 @@ class CSocket:
@staticmethod
def convert(m):
"""Convert every "unexpected" member of message to string"""
- if isinstance(m, (basestring, bool, int, float, list, dict, set)):
+ if isinstance(m, (str, bool, int, float, list, dict, set)):
return m
else: # pragma: no cover
return str(m)
diff --git a/fail2ban/client/fail2banclient.py b/fail2ban/client/fail2banclient.py
index f3b0f7b2..e8fa410d 100755
--- a/fail2ban/client/fail2banclient.py
+++ b/fail2ban/client/fail2banclient.py
@@ -45,7 +45,7 @@ def _thread_name():
return threading.current_thread().__class__.__name__
def input_command(): # pragma: no cover
- return raw_input(PROMPT)
+ return input(PROMPT)
##
#
@@ -456,7 +456,7 @@ class Fail2banClient(Fail2banCmdLine, Thread):
return False
finally:
self._alive = False
- for s, sh in _prev_signals.iteritems():
+ for s, sh in _prev_signals.items():
signal.signal(s, sh)
diff --git a/fail2ban/client/fail2bancmdline.py b/fail2ban/client/fail2bancmdline.py
index c2f6d0be..62008288 100644
--- a/fail2ban/client/fail2bancmdline.py
+++ b/fail2ban/client/fail2bancmdline.py
@@ -27,7 +27,7 @@ import sys
from ..version import version, normVersion
from ..protocol import printFormatted
-from ..helpers import getLogger, str2LogLevel, getVerbosityFormat, BrokenPipeError
+from ..helpers import getLogger, str2LogLevel, getVerbosityFormat
# Gets the instance of the logger.
logSys = getLogger("fail2ban")
diff --git a/fail2ban/client/fail2banregex.py b/fail2ban/client/fail2banregex.py
index b1795588..d3615c8a 100644
--- a/fail2ban/client/fail2banregex.py
+++ b/fail2ban/client/fail2banregex.py
@@ -40,10 +40,10 @@ import os
import shlex
import sys
import time
-import urllib
+import urllib.request, urllib.parse, urllib.error
from optparse import OptionParser, Option
-from ConfigParser import NoOptionError, NoSectionError, MissingSectionHeaderError
+from configparser import NoOptionError, NoSectionError, MissingSectionHeaderError
try: # pragma: no cover
from ..server.filtersystemd import FilterSystemd
@@ -51,7 +51,7 @@ except ImportError:
FilterSystemd = None
from ..version import version, normVersion
-from .filterreader import FilterReader
+from .jailreader import FilterReader, JailReader, NoJailError
from ..server.filter import Filter, FileContainer, MyTime
from ..server.failregex import Regex, RegexException
@@ -67,9 +67,9 @@ def debuggexURL(sample, regex, multiline=False, useDns="yes"):
'flavor': 'python'
}
if multiline: args['flags'] = 'm'
- return 'https://www.debuggex.com/?' + urllib.urlencode(args)
+ return 'https://www.debuggex.com/?' + urllib.parse.urlencode(args)
-def output(args): # pragma: no cover (overriden in test-cases)
+def output(args): # pragma: no cover (overridden in test-cases)
print(args)
def shortstr(s, l=53):
@@ -246,7 +246,7 @@ class Fail2banRegex(object):
def __init__(self, opts):
# set local protected members from given options:
- self.__dict__.update(dict(('_'+o,v) for o,v in opts.__dict__.iteritems()))
+ self.__dict__.update(dict(('_'+o,v) for o,v in opts.__dict__.items()))
self._opts = opts
self._maxlines_set = False # so we allow to override maxlines in cmdline
self._datepattern_set = False
@@ -280,7 +280,7 @@ class Fail2banRegex(object):
self._filter.setUseDns(opts.usedns)
self._filter.returnRawHost = opts.raw
self._filter.checkAllRegex = opts.checkAllRegex and not opts.out
- # ignore pending (without ID/IP), added to matches if it hits later (if ID/IP can be retreved)
+ # ignore pending (without ID/IP), added to matches if it hits later (if ID/IP can be retrieved)
self._filter.ignorePending = bool(opts.out)
# callback to increment ignored RE's by index (during process):
self._filter.onIgnoreRegex = self._onIgnoreRegex
@@ -312,12 +312,18 @@ class Fail2banRegex(object):
def _dumpRealOptions(self, reader, fltOpt):
realopts = {}
combopts = reader.getCombined()
+ if isinstance(reader, FilterReader):
+ _get_opt = lambda k: reader.get('Definition', k)
+ elif reader.filter: # JailReader for jail with filter:
+ _get_opt = lambda k: reader.filter.get('Definition', k)
+ else: # JailReader for jail without filter:
+ _get_opt = lambda k: None
# output all options that are specified in filter-argument as well as some special (mostly interested):
- for k in ['logtype', 'datepattern'] + fltOpt.keys():
+ for k in ['logtype', 'datepattern'] + list(fltOpt.keys()):
# combined options win, but they contain only a sub-set in filter expected keys,
# so get the rest from definition section:
try:
- realopts[k] = combopts[k] if k in combopts else reader.get('Definition', k)
+ realopts[k] = combopts[k] if k in combopts else _get_opt(k)
except NoOptionError: # pragma: no cover
pass
self.output("Real filter options : %r" % realopts)
@@ -330,16 +336,26 @@ class Fail2banRegex(object):
fltName = value
fltFile = None
fltOpt = {}
+ jail = None
if regextype == 'fail':
if re.search(r'(?ms)^/{0,3}[\w/_\-.]+(?:\[.*\])?$', value):
try:
fltName, fltOpt = extractOptions(value)
+ if not re.search(r'(?ms)(?:/|\.(?:conf|local)$)', fltName): # name of jail?
+ try:
+ jail = JailReader(fltName, force_enable=True,
+ share_config=self.share_config, basedir=basedir)
+ jail.read()
+ except NoJailError:
+ jail = None
if "." in fltName[~5:]:
tryNames = (fltName,)
else:
tryNames = (fltName, fltName + '.conf', fltName + '.local')
for fltFile in tryNames:
- if not "/" in fltFile:
+ if os.path.dirname(fltFile) == 'filter.d':
+ fltFile = os.path.join(basedir, fltFile)
+ elif not "/" in fltFile:
if os.path.basename(basedir) == 'filter.d':
fltFile = os.path.join(basedir, fltFile)
else:
@@ -354,8 +370,25 @@ class Fail2banRegex(object):
output(" while parsing: %s" % (value,))
if self._verbose: raise(e)
return False
+
+ readercommands = None
+ # if it is jail:
+ if jail:
+ self.output( "Use %11s jail : %s" % ('', fltName) )
+ if fltOpt:
+ self.output( "Use jail/flt options : %r" % fltOpt )
+ if not fltOpt: fltOpt = {}
+ fltOpt['backend'] = self._backend
+ ret = jail.getOptions(addOpts=fltOpt)
+ if not ret:
+ output('ERROR: Failed to get jail for %r' % (value,))
+ return False
+ # show real options if expected:
+ if self._verbose > 1 or logSys.getEffectiveLevel()<=logging.DEBUG:
+ self._dumpRealOptions(jail, fltOpt)
+ readercommands = jail.convert(allow_no_files=True)
# if it is filter file:
- if fltFile is not None:
+ elif fltFile is not None:
if (basedir == self._opts.config
or os.path.basename(basedir) == 'filter.d'
or ("." not in fltName[~5:] and "/" not in fltName)
@@ -364,16 +397,17 @@ class Fail2banRegex(object):
if os.path.basename(basedir) == 'filter.d':
basedir = os.path.dirname(basedir)
fltName = os.path.splitext(os.path.basename(fltName))[0]
- self.output( "Use %11s filter file : %s, basedir: %s" % (regex, fltName, basedir) )
+ self.output( "Use %11s file : %s, basedir: %s" % ('filter', fltName, basedir) )
else:
## foreign file - readexplicit this file and includes if possible:
- self.output( "Use %11s file : %s" % (regex, fltName) )
+ self.output( "Use %11s file : %s" % ('filter', fltName) )
basedir = None
if not os.path.isabs(fltName): # avoid join with "filter.d" inside FilterReader
fltName = os.path.abspath(fltName)
if fltOpt:
self.output( "Use filter options : %r" % fltOpt )
- reader = FilterReader(fltName, 'fail2ban-regex-jail', fltOpt, share_config=self.share_config, basedir=basedir)
+ reader = FilterReader(fltName, 'fail2ban-regex-jail', fltOpt,
+ share_config=self.share_config, basedir=basedir)
ret = None
try:
if basedir is not None:
@@ -398,6 +432,7 @@ class Fail2banRegex(object):
# to stream:
readercommands = reader.convert()
+ if readercommands:
regex_values = {}
for opt in readercommands:
if opt[0] == 'multi-set':
@@ -440,7 +475,7 @@ class Fail2banRegex(object):
self.output( "Use %11s line : %s" % (regex, shortstr(value)) )
regex_values = {regextype: [RegexStat(value)]}
- for regextype, regex_values in regex_values.iteritems():
+ for regextype, regex_values in regex_values.items():
regex = regextype + 'regex'
setattr(self, "_" + regex, regex_values)
for regex in regex_values:
@@ -476,7 +511,7 @@ class Fail2banRegex(object):
ret.append(match)
else:
is_ignored = True
- if self._opts.out: # (formated) output - don't need stats:
+ if self._opts.out: # (formatted) output - don't need stats:
return None, ret, None
# prefregex stats:
if self._filter.prefRegex:
@@ -532,13 +567,13 @@ class Fail2banRegex(object):
def _out(ret):
for r in ret:
for r in r[3].get('matches'):
- if not isinstance(r, basestring):
+ if not isinstance(r, str):
r = ''.join(r for r in r)
output(r)
elif ofmt == 'row':
def _out(ret):
for r in ret:
- output('[%r,\t%r,\t%r],' % (r[1],r[2],dict((k,v) for k, v in r[3].iteritems() if k != 'matches')))
+ output('[%r,\t%r,\t%r],' % (r[1],r[2],dict((k,v) for k, v in r[3].items() if k != 'matches')))
elif '<' not in ofmt:
def _out(ret):
for r in ret:
@@ -573,7 +608,7 @@ class Fail2banRegex(object):
# wrap multiline tag (msg) interpolations to single line:
for r, v in rows:
for r in r[3].get('matches'):
- if not isinstance(r, basestring):
+ if not isinstance(r, str):
r = ''.join(r for r in r)
r = v.replace("\x00msg\x00", r)
output(r)
@@ -595,7 +630,7 @@ class Fail2banRegex(object):
continue
line_datetimestripped, ret, is_ignored = self.testRegex(line)
- if self._opts.out: # (formated) output:
+ if self._opts.out: # (formatted) output:
if len(ret) > 0 and not is_ignored: out(ret)
continue
@@ -639,9 +674,9 @@ class Fail2banRegex(object):
ans = [[]]
for arg in [l, regexlist]:
ans = [ x + [y] for x in ans for y in arg ]
- b = map(lambda a: a[0] + ' | ' + a[1].getFailRegex() + ' | ' +
+ b = [a[0] + ' | ' + a[1].getFailRegex() + ' | ' +
debuggexURL(self.encode_line(a[0]), a[1].getFailRegex(),
- multiline, self._opts.usedns), ans)
+ multiline, self._opts.usedns) for a in ans]
pprint_list([x.rstrip() for x in b], header)
else:
output( "%s too many to print. Use --print-all-%s " \
@@ -789,7 +824,15 @@ class Fail2banRegex(object):
return True
+def _loc_except_hook(exctype, value, traceback):
+ if (exctype != BrokenPipeError and exctype != IOError or value.errno != 32):
+ return sys.__excepthook__(exctype, value, traceback)
+ # pipe seems to be closed (head / tail / etc), thus simply exit:
+ sys.exit(0)
+
def exec_command_line(*args):
+ sys.excepthook = _loc_except_hook; # stop on closed/broken pipe
+
logging.exitOnIOError = True
parser = get_opt_parser()
(opts, args) = parser.parse_args(*args)
diff --git a/fail2ban/client/fail2banserver.py b/fail2ban/client/fail2banserver.py
index eee78d5f..7e7f492d 100644
--- a/fail2ban/client/fail2banserver.py
+++ b/fail2ban/client/fail2banserver.py
@@ -45,7 +45,7 @@ class Fail2banServer(Fail2banCmdLine):
@staticmethod
def startServerDirect(conf, daemon=True, setServer=None):
- logSys.debug(" direct starting of server in %s, deamon: %s", os.getpid(), daemon)
+ logSys.debug(" direct starting of server in %s, daemon: %s", os.getpid(), daemon)
from ..server.server import Server
server = None
try:
@@ -120,7 +120,7 @@ class Fail2banServer(Fail2banCmdLine):
if frk: # pragma: no cover
os.execv(exe, args)
else:
- # use P_WAIT instead of P_NOWAIT (to prevent defunct-zomby process), it startet as daemon, so parent exit fast after fork):
+ # use P_WAIT instead of P_NOWAIT (to prevent defunct-zomby process), it started as daemon, so parent exit fast after fork):
ret = os.spawnv(os.P_WAIT, exe, args)
if ret != 0: # pragma: no cover
raise OSError(ret, "Unknown error by executing server %r with %r" % (args[1], exe))
diff --git a/fail2ban/client/filterreader.py b/fail2ban/client/filterreader.py
index 24341014..0b6c0172 100644
--- a/fail2ban/client/filterreader.py
+++ b/fail2ban/client/filterreader.py
@@ -71,7 +71,7 @@ class FilterReader(DefinitionInitConfigReader):
@staticmethod
def _fillStream(stream, opts, jailName):
prio0idx = 0
- for opt, value in opts.iteritems():
+ for opt, value in opts.items():
# Do not send a command if the value is not set (empty).
if value is None: continue
if opt in ("failregex", "ignoreregex"):
diff --git a/fail2ban/client/jailreader.py b/fail2ban/client/jailreader.py
index 37746d4c..e7242bfd 100644
--- a/fail2ban/client/jailreader.py
+++ b/fail2ban/client/jailreader.py
@@ -29,16 +29,19 @@ import json
import os.path
import re
-from .configreader import ConfigReaderUnshared, ConfigReader
+from .configreader import ConfigReaderUnshared, ConfigReader, NoSectionError
from .filterreader import FilterReader
from .actionreader import ActionReader
from ..version import version
-from ..helpers import getLogger, extractOptions, splitWithOptions, splitwords
+from ..helpers import _merge_dicts, getLogger, extractOptions, splitWithOptions, splitwords
# Gets the instance of the logger.
logSys = getLogger(__name__)
+class NoJailError(ValueError):
+ pass
+
class JailReader(ConfigReader):
def __init__(self, name, force_enable=False, **kwargs):
@@ -64,7 +67,7 @@ class JailReader(ConfigReader):
# Before returning -- verify that requested section
# exists at all
if not (self.__name in self.sections()):
- raise ValueError("Jail %r was not found among available"
+ raise NoJailError("Jail %r was not found among available"
% self.__name)
return out
@@ -117,9 +120,9 @@ class JailReader(ConfigReader):
}
_configOpts.update(FilterReader._configOpts)
- _ignoreOpts = set(['action', 'filter', 'enabled'] + FilterReader._configOpts.keys())
+ _ignoreOpts = set(['action', 'filter', 'enabled', 'backend'] + list(FilterReader._configOpts.keys()))
- def getOptions(self):
+ def getOptions(self, addOpts=None):
basedir = self.getBaseDir()
@@ -136,6 +139,8 @@ class JailReader(ConfigReader):
shouldExist=True)
if not self.__opts: # pragma: no cover
raise JailDefError("Init jail options failed")
+ if addOpts:
+ self.__opts = _merge_dicts(self.__opts, addOpts)
if not self.isEnabled():
return True
@@ -147,6 +152,8 @@ class JailReader(ConfigReader):
filterName, filterOpt = extractOptions(flt)
except ValueError as e:
raise JailDefError("Invalid filter definition %r: %s" % (flt, e))
+ if addOpts:
+ filterOpt = _merge_dicts(filterOpt, addOpts)
self.__filter = FilterReader(
filterName, self.__name, filterOpt,
share_config=self.share_config, basedir=basedir)
@@ -219,6 +226,15 @@ class JailReader(ConfigReader):
return False
return True
+ @property
+ def filter(self):
+ return self.__filter
+
+ def getCombined(self):
+ if not self.__filter:
+ return self.__opts
+ return _merge_dicts(self.__opts, self.__filter.getCombined())
+
def convert(self, allow_no_files=False):
"""Convert read before __opts to the commands stream
@@ -235,14 +251,15 @@ class JailReader(ConfigReader):
if e:
stream.extend([['config-error', "Jail '%s' skipped, because of wrong configuration: %s" % (self.__name, e)]])
return stream
- # fill jail with filter options, using filter (only not overriden in jail):
+ # fill jail with filter options, using filter (only not overridden in jail):
if self.__filter:
stream.extend(self.__filter.convert())
# and using options from jail:
FilterReader._fillStream(stream, self.__opts, self.__name)
- for opt, value in self.__opts.iteritems():
+ backend = self.__opts.get('backend', 'auto')
+ for opt, value in self.__opts.items():
if opt == "logpath":
- if self.__opts.get('backend', '').startswith("systemd"): continue
+ if backend.startswith("systemd"): continue
found_files = 0
for path in value.split("\n"):
path = path.rsplit(" ", 1)
@@ -260,8 +277,6 @@ class JailReader(ConfigReader):
if not allow_no_files:
raise ValueError(msg)
logSys.warning(msg)
- elif opt == "backend":
- backend = value
elif opt == "ignoreip":
stream.append(["set", self.__name, "addignoreip"] + splitwords(value))
elif opt not in JailReader._ignoreOpts:
diff --git a/fail2ban/compat/asynchat.py b/fail2ban/compat/asynchat.py
new file mode 100644
index 00000000..0671b774
--- /dev/null
+++ b/fail2ban/compat/asynchat.py
@@ -0,0 +1,310 @@
+# -*- Mode: Python; tab-width: 4 -*-
+# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
+# Author: Sam Rushing
+
+# ======================================================================
+# Copyright 1996 by Sam Rushing
+#
+# All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software and
+# its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of Sam
+# Rushing not be used in advertising or publicity pertaining to
+# distribution of the software without specific, written prior
+# permission.
+#
+# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
+# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# ======================================================================
+
+r"""A class supporting chat-style (command/response) protocols.
+
+This class adds support for 'chat' style protocols - where one side
+sends a 'command', and the other sends a response (examples would be
+the common internet protocols - smtp, nntp, ftp, etc..).
+
+The handle_read() method looks at the input stream for the current
+'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
+for multi-line output), calling self.found_terminator() on its
+receipt.
+
+for example:
+Say you build an async nntp client using this class. At the start
+of the connection, you'll have self.terminator set to '\r\n', in
+order to process the single-line greeting. Just before issuing a
+'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
+command will be accumulated (using your own 'collect_incoming_data'
+method) up to the terminator, and then control will be returned to
+you - by calling your self.found_terminator() method.
+"""
+try:
+ import asyncore
+except ImportError:
+ from . import asyncore
+from collections import deque
+
+
+class async_chat(asyncore.dispatcher):
+ """This is an abstract class. You must derive from this class, and add
+ the two methods collect_incoming_data() and found_terminator()"""
+
+ # these are overridable defaults
+
+ ac_in_buffer_size = 65536
+ ac_out_buffer_size = 65536
+
+ # we don't want to enable the use of encoding by default, because that is a
+ # sign of an application bug that we don't want to pass silently
+
+ use_encoding = 0
+ encoding = 'latin-1'
+
+ def __init__(self, sock=None, map=None):
+ # for string terminator matching
+ self.ac_in_buffer = b''
+
+ # we use a list here rather than io.BytesIO for a few reasons...
+ # del lst[:] is faster than bio.truncate(0)
+ # lst = [] is faster than bio.truncate(0)
+ self.incoming = []
+
+ # we toss the use of the "simple producer" and replace it with
+ # a pure deque, which the original fifo was a wrapping of
+ self.producer_fifo = deque()
+ asyncore.dispatcher.__init__(self, sock, map)
+
+ def collect_incoming_data(self, data):
+ raise NotImplementedError("must be implemented in subclass")
+
+ def _collect_incoming_data(self, data):
+ self.incoming.append(data)
+
+ def _get_data(self):
+ d = b''.join(self.incoming)
+ del self.incoming[:]
+ return d
+
+ def found_terminator(self):
+ raise NotImplementedError("must be implemented in subclass")
+
+ def set_terminator(self, term):
+ """Set the input delimiter.
+
+ Can be a fixed string of any length, an integer, or None.
+ """
+ if isinstance(term, str) and self.use_encoding:
+ term = bytes(term, self.encoding)
+ elif isinstance(term, int) and term < 0:
+ raise ValueError('the number of received bytes must be positive')
+ self.terminator = term
+
+ def get_terminator(self):
+ return self.terminator
+
+ # grab some more data from the socket,
+ # throw it to the collector method,
+ # check for the terminator,
+ # if found, transition to the next state.
+
+ def handle_read(self):
+
+ try:
+ data = self.recv(self.ac_in_buffer_size)
+ except BlockingIOError:
+ return
+ except OSError:
+ self.handle_error()
+ return
+
+ if isinstance(data, str) and self.use_encoding:
+ data = bytes(str, self.encoding)
+ self.ac_in_buffer = self.ac_in_buffer + data
+
+ # Continue to search for self.terminator in self.ac_in_buffer,
+ # while calling self.collect_incoming_data. The while loop
+ # is necessary because we might read several data+terminator
+ # combos with a single recv(4096).
+
+ while self.ac_in_buffer:
+ lb = len(self.ac_in_buffer)
+ terminator = self.get_terminator()
+ if not terminator:
+ # no terminator, collect it all
+ self.collect_incoming_data(self.ac_in_buffer)
+ self.ac_in_buffer = b''
+ elif isinstance(terminator, int):
+ # numeric terminator
+ n = terminator
+ if lb < n:
+ self.collect_incoming_data(self.ac_in_buffer)
+ self.ac_in_buffer = b''
+ self.terminator = self.terminator - lb
+ else:
+ self.collect_incoming_data(self.ac_in_buffer[:n])
+ self.ac_in_buffer = self.ac_in_buffer[n:]
+ self.terminator = 0
+ self.found_terminator()
+ else:
+ # 3 cases:
+ # 1) end of buffer matches terminator exactly:
+ # collect data, transition
+ # 2) end of buffer matches some prefix:
+ # collect data to the prefix
+ # 3) end of buffer does not match any prefix:
+ # collect data
+ terminator_len = len(terminator)
+ index = self.ac_in_buffer.find(terminator)
+ if index != -1:
+ # we found the terminator
+ if index > 0:
+ # don't bother reporting the empty string
+ # (source of subtle bugs)
+ self.collect_incoming_data(self.ac_in_buffer[:index])
+ self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
+ # This does the Right Thing if the terminator
+ # is changed here.
+ self.found_terminator()
+ else:
+ # check for a prefix of the terminator
+ index = find_prefix_at_end(self.ac_in_buffer, terminator)
+ if index:
+ if index != lb:
+ # we found a prefix, collect up to the prefix
+ self.collect_incoming_data(self.ac_in_buffer[:-index])
+ self.ac_in_buffer = self.ac_in_buffer[-index:]
+ break
+ else:
+ # no prefix, collect it all
+ self.collect_incoming_data(self.ac_in_buffer)
+ self.ac_in_buffer = b''
+
+ def handle_write(self):
+ self.initiate_send()
+
+ def handle_close(self):
+ self.close()
+
+ def push(self, data):
+ if not isinstance(data, (bytes, bytearray, memoryview)):
+ raise TypeError('data argument must be byte-ish (%r)',
+ type(data))
+ sabs = self.ac_out_buffer_size
+ if len(data) > sabs:
+ for i in range(0, len(data), sabs):
+ self.producer_fifo.append(data[i:i+sabs])
+ else:
+ self.producer_fifo.append(data)
+ self.initiate_send()
+
+ def push_with_producer(self, producer):
+ self.producer_fifo.append(producer)
+ self.initiate_send()
+
+ def readable(self):
+ "predicate for inclusion in the readable for select()"
+ # cannot use the old predicate, it violates the claim of the
+ # set_terminator method.
+
+ # return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
+ return 1
+
+ def writable(self):
+ "predicate for inclusion in the writable for select()"
+ return self.producer_fifo or (not self.connected)
+
+ def close_when_done(self):
+ "automatically close this channel once the outgoing queue is empty"
+ self.producer_fifo.append(None)
+
+ def initiate_send(self):
+ while self.producer_fifo and self.connected:
+ first = self.producer_fifo[0]
+ # handle empty string/buffer or None entry
+ if not first:
+ del self.producer_fifo[0]
+ if first is None:
+ self.handle_close()
+ return
+
+ # handle classic producer behavior
+ obs = self.ac_out_buffer_size
+ try:
+ data = first[:obs]
+ except TypeError:
+ data = first.more()
+ if data:
+ self.producer_fifo.appendleft(data)
+ else:
+ del self.producer_fifo[0]
+ continue
+
+ if isinstance(data, str) and self.use_encoding:
+ data = bytes(data, self.encoding)
+
+ # send the data
+ try:
+ num_sent = self.send(data)
+ except OSError:
+ self.handle_error()
+ return
+
+ if num_sent:
+ if num_sent < len(data) or obs < len(first):
+ self.producer_fifo[0] = first[num_sent:]
+ else:
+ del self.producer_fifo[0]
+ # we tried to send some actual data
+ return
+
+ def discard_buffers(self):
+ # Emergencies only!
+ self.ac_in_buffer = b''
+ del self.incoming[:]
+ self.producer_fifo.clear()
+
+
+class simple_producer:
+
+ def __init__(self, data, buffer_size=512):
+ self.data = data
+ self.buffer_size = buffer_size
+
+ def more(self):
+ if len(self.data) > self.buffer_size:
+ result = self.data[:self.buffer_size]
+ self.data = self.data[self.buffer_size:]
+ return result
+ else:
+ result = self.data
+ self.data = b''
+ return result
+
+
+# Given 'haystack', see if any prefix of 'needle' is at its end. This
+# assumes an exact match has already been checked. Return the number of
+# characters matched.
+# for example:
+# f_p_a_e("qwerty\r", "\r\n") => 1
+# f_p_a_e("qwertydkjf", "\r\n") => 0
+# f_p_a_e("qwerty\r\n", "\r\n") =>
+
+# this could maybe be made faster with a computed regex?
+# [answer: no; circa Python-2.0, Jan 2001]
+# new python: 28961/s
+# old python: 18307/s
+# re: 12820/s
+# regex: 14035/s
+
+def find_prefix_at_end(haystack, needle):
+ l = len(needle) - 1
+ while l and not haystack.endswith(needle[:l]):
+ l -= 1
+ return l
diff --git a/fail2ban/compat/asyncore.py b/fail2ban/compat/asyncore.py
new file mode 100644
index 00000000..eeea4888
--- /dev/null
+++ b/fail2ban/compat/asyncore.py
@@ -0,0 +1,642 @@
+# -*- Mode: Python -*-
+# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
+# Author: Sam Rushing
+
+# ======================================================================
+# Copyright 1996 by Sam Rushing
+#
+# All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software and
+# its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of Sam
+# Rushing not be used in advertising or publicity pertaining to
+# distribution of the software without specific, written prior
+# permission.
+#
+# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
+# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# ======================================================================
+
+"""Basic infrastructure for asynchronous socket service clients and servers.
+
+There are only two ways to have a program on a single processor do "more
+than one thing at a time". Multi-threaded programming is the simplest and
+most popular way to do it, but there is another very different technique,
+that lets you have nearly all the advantages of multi-threading, without
+actually using multiple threads. it's really only practical if your program
+is largely I/O bound. If your program is CPU bound, then pre-emptive
+scheduled threads are probably what you really need. Network servers are
+rarely CPU-bound, however.
+
+If your operating system supports the select() system call in its I/O
+library (and nearly all do), then you can use it to juggle multiple
+communication channels at once; doing other work while your I/O is taking
+place in the "background." Although this strategy can seem strange and
+complex, especially at first, it is in many ways easier to understand and
+control than multi-threaded programming. The module documented here solves
+many of the difficult problems for you, making the task of building
+sophisticated high-performance network servers and clients a snap.
+"""
+
+import select
+import socket
+import sys
+import time
+import warnings
+
+import os
+from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \
+ ENOTCONN, ESHUTDOWN, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \
+ errorcode
+
+_DISCONNECTED = frozenset({ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
+ EBADF})
+
+try:
+ socket_map
+except NameError:
+ socket_map = {}
+
+def _strerror(err):
+ try:
+ return os.strerror(err)
+ except (ValueError, OverflowError, NameError):
+ if err in errorcode:
+ return errorcode[err]
+ return "Unknown error %s" %err
+
+class ExitNow(Exception):
+ pass
+
+_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
+
+def read(obj):
+ try:
+ obj.handle_read_event()
+ except _reraised_exceptions:
+ raise
+ except:
+ obj.handle_error()
+
+def write(obj):
+ try:
+ obj.handle_write_event()
+ except _reraised_exceptions:
+ raise
+ except:
+ obj.handle_error()
+
+def _exception(obj):
+ try:
+ obj.handle_expt_event()
+ except _reraised_exceptions:
+ raise
+ except:
+ obj.handle_error()
+
+def readwrite(obj, flags):
+ try:
+ if flags & select.POLLIN:
+ obj.handle_read_event()
+ if flags & select.POLLOUT:
+ obj.handle_write_event()
+ if flags & select.POLLPRI:
+ obj.handle_expt_event()
+ if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
+ obj.handle_close()
+ except OSError as e:
+ if e.errno not in _DISCONNECTED:
+ obj.handle_error()
+ else:
+ obj.handle_close()
+ except _reraised_exceptions:
+ raise
+ except:
+ obj.handle_error()
+
+def poll(timeout=0.0, map=None):
+ if map is None:
+ map = socket_map
+ if map:
+ r = []; w = []; e = []
+ for fd, obj in list(map.items()):
+ is_r = obj.readable()
+ is_w = obj.writable()
+ if is_r:
+ r.append(fd)
+ # accepting sockets should not be writable
+ if is_w and not obj.accepting:
+ w.append(fd)
+ if is_r or is_w:
+ e.append(fd)
+ if [] == r == w == e:
+ time.sleep(timeout)
+ return
+
+ r, w, e = select.select(r, w, e, timeout)
+
+ for fd in r:
+ obj = map.get(fd)
+ if obj is None:
+ continue
+ read(obj)
+
+ for fd in w:
+ obj = map.get(fd)
+ if obj is None:
+ continue
+ write(obj)
+
+ for fd in e:
+ obj = map.get(fd)
+ if obj is None:
+ continue
+ _exception(obj)
+
+def poll2(timeout=0.0, map=None):
+ # Use the poll() support added to the select module in Python 2.0
+ if map is None:
+ map = socket_map
+ if timeout is not None:
+ # timeout is in milliseconds
+ timeout = int(timeout*1000)
+ pollster = select.poll()
+ if map:
+ for fd, obj in list(map.items()):
+ flags = 0
+ if obj.readable():
+ flags |= select.POLLIN | select.POLLPRI
+ # accepting sockets should not be writable
+ if obj.writable() and not obj.accepting:
+ flags |= select.POLLOUT
+ if flags:
+ pollster.register(fd, flags)
+
+ r = pollster.poll(timeout)
+ for fd, flags in r:
+ obj = map.get(fd)
+ if obj is None:
+ continue
+ readwrite(obj, flags)
+
+poll3 = poll2 # Alias for backward compatibility
+
+def loop(timeout=30.0, use_poll=False, map=None, count=None):
+ if map is None:
+ map = socket_map
+
+ if use_poll and hasattr(select, 'poll'):
+ poll_fun = poll2
+ else:
+ poll_fun = poll
+
+ if count is None:
+ while map:
+ poll_fun(timeout, map)
+
+ else:
+ while map and count > 0:
+ poll_fun(timeout, map)
+ count = count - 1
+
+class dispatcher:
+
+ debug = False
+ connected = False
+ accepting = False
+ connecting = False
+ closing = False
+ addr = None
+ ignore_log_types = frozenset({'warning'})
+
+ def __init__(self, sock=None, map=None):
+ if map is None:
+ self._map = socket_map
+ else:
+ self._map = map
+
+ self._fileno = None
+
+ if sock:
+ # Set to nonblocking just to make sure for cases where we
+ # get a socket from a blocking source.
+ sock.setblocking(False)
+ self.set_socket(sock, map)
+ self.connected = True
+ # The constructor no longer requires that the socket
+ # passed be connected.
+ try:
+ self.addr = sock.getpeername()
+ except OSError as err:
+ if err.errno in (ENOTCONN, EINVAL):
+ # To handle the case where we got an unconnected
+ # socket.
+ self.connected = False
+ else:
+ # The socket is broken in some unknown way, alert
+ # the user and remove it from the map (to prevent
+ # polling of broken sockets).
+ self.del_channel(map)
+ raise
+ else:
+ self.socket = None
+
+ def __repr__(self):
+ status = [self.__class__.__module__+"."+self.__class__.__qualname__]
+ if self.accepting and self.addr:
+ status.append('listening')
+ elif self.connected:
+ status.append('connected')
+ if self.addr is not None:
+ try:
+ status.append('%s:%d' % self.addr)
+ except TypeError:
+ status.append(repr(self.addr))
+ return '<%s at %#x>' % (' '.join(status), id(self))
+
+ def add_channel(self, map=None):
+ #self.log_info('adding channel %s' % self)
+ if map is None:
+ map = self._map
+ map[self._fileno] = self
+
+ def del_channel(self, map=None):
+ fd = self._fileno
+ if map is None:
+ map = self._map
+ if fd in map:
+ #self.log_info('closing channel %d:%s' % (fd, self))
+ del map[fd]
+ self._fileno = None
+
+ def create_socket(self, family=socket.AF_INET, type=socket.SOCK_STREAM):
+ self.family_and_type = family, type
+ sock = socket.socket(family, type)
+ sock.setblocking(False)
+ self.set_socket(sock)
+
+ def set_socket(self, sock, map=None):
+ self.socket = sock
+ self._fileno = sock.fileno()
+ self.add_channel(map)
+
+ def set_reuse_addr(self):
+ # try to re-use a server port if possible
+ try:
+ self.socket.setsockopt(
+ socket.SOL_SOCKET, socket.SO_REUSEADDR,
+ self.socket.getsockopt(socket.SOL_SOCKET,
+ socket.SO_REUSEADDR) | 1
+ )
+ except OSError:
+ pass
+
+ # ==================================================
+ # predicates for select()
+ # these are used as filters for the lists of sockets
+ # to pass to select().
+ # ==================================================
+
+ def readable(self):
+ return True
+
+ def writable(self):
+ return True
+
+ # ==================================================
+ # socket object methods.
+ # ==================================================
+
+ def listen(self, num):
+ self.accepting = True
+ if os.name == 'nt' and num > 5:
+ num = 5
+ return self.socket.listen(num)
+
+ def bind(self, addr):
+ self.addr = addr
+ return self.socket.bind(addr)
+
+ def connect(self, address):
+ self.connected = False
+ self.connecting = True
+ err = self.socket.connect_ex(address)
+ if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
+ or err == EINVAL and os.name == 'nt':
+ self.addr = address
+ return
+ if err in (0, EISCONN):
+ self.addr = address
+ self.handle_connect_event()
+ else:
+ raise OSError(err, errorcode[err])
+
+ def accept(self):
+ # XXX can return either an address pair or None
+ try:
+ conn, addr = self.socket.accept()
+ except TypeError:
+ return None
+ except OSError as why:
+ if why.errno in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
+ return None
+ else:
+ raise
+ else:
+ return conn, addr
+
+ def send(self, data):
+ try:
+ result = self.socket.send(data)
+ return result
+ except OSError as why:
+ if why.errno == EWOULDBLOCK:
+ return 0
+ elif why.errno in _DISCONNECTED:
+ self.handle_close()
+ return 0
+ else:
+ raise
+
+ def recv(self, buffer_size):
+ try:
+ data = self.socket.recv(buffer_size)
+ if not data:
+ # a closed connection is indicated by signaling
+ # a read condition, and having recv() return 0.
+ self.handle_close()
+ return b''
+ else:
+ return data
+ except OSError as why:
+ # winsock sometimes raises ENOTCONN
+ if why.errno in _DISCONNECTED:
+ self.handle_close()
+ return b''
+ else:
+ raise
+
+ def close(self):
+ self.connected = False
+ self.accepting = False
+ self.connecting = False
+ self.del_channel()
+ if self.socket is not None:
+ try:
+ self.socket.close()
+ except OSError as why:
+ if why.errno not in (ENOTCONN, EBADF):
+ raise
+
+ # log and log_info may be overridden to provide more sophisticated
+ # logging and warning methods. In general, log is for 'hit' logging
+ # and 'log_info' is for informational, warning and error logging.
+
+ def log(self, message):
+ sys.stderr.write('log: %s\n' % str(message))
+
+ def log_info(self, message, type='info'):
+ if type not in self.ignore_log_types:
+ print('%s: %s' % (type, message))
+
+ def handle_read_event(self):
+ if self.accepting:
+ # accepting sockets are never connected, they "spawn" new
+ # sockets that are connected
+ self.handle_accept()
+ elif not self.connected:
+ if self.connecting:
+ self.handle_connect_event()
+ self.handle_read()
+ else:
+ self.handle_read()
+
+ def handle_connect_event(self):
+ err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ if err != 0:
+ raise OSError(err, _strerror(err))
+ self.handle_connect()
+ self.connected = True
+ self.connecting = False
+
+ def handle_write_event(self):
+ if self.accepting:
+ # Accepting sockets shouldn't get a write event.
+ # We will pretend it didn't happen.
+ return
+
+ if not self.connected:
+ if self.connecting:
+ self.handle_connect_event()
+ self.handle_write()
+
+ def handle_expt_event(self):
+ # handle_expt_event() is called if there might be an error on the
+ # socket, or if there is OOB data
+ # check for the error condition first
+ err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ if err != 0:
+ # we can get here when select.select() says that there is an
+ # exceptional condition on the socket
+ # since there is an error, we'll go ahead and close the socket
+ # like we would in a subclassed handle_read() that received no
+ # data
+ self.handle_close()
+ else:
+ self.handle_expt()
+
+ def handle_error(self):
+ nil, t, v, tbinfo = compact_traceback()
+
+ # sometimes a user repr method will crash.
+ try:
+ self_repr = repr(self)
+ except:
+ self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
+
+ self.log_info(
+ 'uncaptured python exception, closing channel %s (%s:%s %s)' % (
+ self_repr,
+ t,
+ v,
+ tbinfo
+ ),
+ 'error'
+ )
+ self.handle_close()
+
+ def handle_expt(self):
+ self.log_info('unhandled incoming priority event', 'warning')
+
+ def handle_read(self):
+ self.log_info('unhandled read event', 'warning')
+
+ def handle_write(self):
+ self.log_info('unhandled write event', 'warning')
+
+ def handle_connect(self):
+ self.log_info('unhandled connect event', 'warning')
+
+ def handle_accept(self):
+ pair = self.accept()
+ if pair is not None:
+ self.handle_accepted(*pair)
+
+ def handle_accepted(self, sock, addr):
+ sock.close()
+ self.log_info('unhandled accepted event', 'warning')
+
+ def handle_close(self):
+ self.log_info('unhandled close event', 'warning')
+ self.close()
+
+# ---------------------------------------------------------------------------
+# adds simple buffered output capability, useful for simple clients.
+# [for more sophisticated usage use asynchat.async_chat]
+# ---------------------------------------------------------------------------
+
+class dispatcher_with_send(dispatcher):
+
+ def __init__(self, sock=None, map=None):
+ dispatcher.__init__(self, sock, map)
+ self.out_buffer = b''
+
+ def initiate_send(self):
+ num_sent = 0
+ num_sent = dispatcher.send(self, self.out_buffer[:65536])
+ self.out_buffer = self.out_buffer[num_sent:]
+
+ def handle_write(self):
+ self.initiate_send()
+
+ def writable(self):
+ return (not self.connected) or len(self.out_buffer)
+
+ def send(self, data):
+ if self.debug:
+ self.log_info('sending %s' % repr(data))
+ self.out_buffer = self.out_buffer + data
+ self.initiate_send()
+
+# ---------------------------------------------------------------------------
+# used for debugging.
+# ---------------------------------------------------------------------------
+
+def compact_traceback():
+ t, v, tb = sys.exc_info()
+ tbinfo = []
+ if not tb: # Must have a traceback
+ raise AssertionError("traceback does not exist")
+ while tb:
+ tbinfo.append((
+ tb.tb_frame.f_code.co_filename,
+ tb.tb_frame.f_code.co_name,
+ str(tb.tb_lineno)
+ ))
+ tb = tb.tb_next
+
+ # just to be safe
+ del tb
+
+ file, function, line = tbinfo[-1]
+ info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
+ return (file, function, line), t, v, info
+
+def close_all(map=None, ignore_all=False):
+ if map is None:
+ map = socket_map
+ for x in list(map.values()):
+ try:
+ x.close()
+ except OSError as x:
+ if x.errno == EBADF:
+ pass
+ elif not ignore_all:
+ raise
+ except _reraised_exceptions:
+ raise
+ except:
+ if not ignore_all:
+ raise
+ map.clear()
+
+# Asynchronous File I/O:
+#
+# After a little research (reading man pages on various unixen, and
+# digging through the linux kernel), I've determined that select()
+# isn't meant for doing asynchronous file i/o.
+# Heartening, though - reading linux/mm/filemap.c shows that linux
+# supports asynchronous read-ahead. So _MOST_ of the time, the data
+# will be sitting in memory for us already when we go to read it.
+#
+# What other OS's (besides NT) support async file i/o? [VMS?]
+#
+# Regardless, this is useful for pipes, and stdin/stdout...
+
+if os.name == 'posix':
+ class file_wrapper:
+ # Here we override just enough to make a file
+ # look like a socket for the purposes of asyncore.
+ # The passed fd is automatically os.dup()'d
+
+ def __init__(self, fd):
+ self.fd = os.dup(fd)
+
+ def __del__(self):
+ if self.fd >= 0:
+ warnings.warn("unclosed file %r" % self, ResourceWarning,
+ source=self)
+ self.close()
+
+ def recv(self, *args):
+ return os.read(self.fd, *args)
+
+ def send(self, *args):
+ return os.write(self.fd, *args)
+
+ def getsockopt(self, level, optname, buflen=None):
+ if (level == socket.SOL_SOCKET and
+ optname == socket.SO_ERROR and
+ not buflen):
+ return 0
+ raise NotImplementedError("Only asyncore specific behaviour "
+ "implemented.")
+
+ read = recv
+ write = send
+
+ def close(self):
+ if self.fd < 0:
+ return
+ fd = self.fd
+ self.fd = -1
+ os.close(fd)
+
+ def fileno(self):
+ return self.fd
+
+ class file_dispatcher(dispatcher):
+
+ def __init__(self, fd, map=None):
+ dispatcher.__init__(self, None, map)
+ self.connected = True
+ try:
+ fd = fd.fileno()
+ except AttributeError:
+ pass
+ self.set_file(fd)
+ # set it to non-blocking mode
+ os.set_blocking(fd, False)
+
+ def set_file(self, fd):
+ self.socket = file_wrapper(fd)
+ self._fileno = self.socket.fileno()
+ self.add_channel()
diff --git a/fail2ban/helpers.py b/fail2ban/helpers.py
index 5c1750a6..fe62ae1e 100644
--- a/fail2ban/helpers.py
+++ b/fail2ban/helpers.py
@@ -31,6 +31,7 @@ import traceback
from threading import Lock
from .server.mytime import MyTime
+import importlib
try:
import ctypes
@@ -47,30 +48,6 @@ if PREFER_ENC.startswith('ANSI_'): # pragma: no cover
elif all((os.getenv(v) in (None, "") for v in ('LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG'))):
PREFER_ENC = 'UTF-8';
-# py-2.x: try to minimize influence of sporadic conversion errors on python 2.x,
-# caused by implicit converting of string/unicode (e. g. `str(u"\uFFFD")` produces an error
-# if default encoding is 'ascii');
-if sys.version_info < (3,): # pragma: 3.x no cover
- # correct default (global system) encoding (mostly UTF-8):
- def __resetDefaultEncoding(encoding):
- global PREFER_ENC
- ode = sys.getdefaultencoding().upper()
- if ode == 'ASCII' and ode != PREFER_ENC.upper():
- # setdefaultencoding is normally deleted after site initialized, so hack-in using load of sys-module:
- _sys = sys
- if not hasattr(_sys, "setdefaultencoding"):
- try:
- from imp import load_dynamic as __ldm
- _sys = __ldm('_sys', 'sys')
- except ImportError: # pragma: no cover - only if load_dynamic fails
- reload(sys)
- _sys = sys
- if hasattr(_sys, "setdefaultencoding"):
- _sys.setdefaultencoding(encoding)
- # override to PREFER_ENC:
- __resetDefaultEncoding(PREFER_ENC)
- del __resetDefaultEncoding
-
# todo: rewrite explicit (and implicit) str-conversions via encode/decode with IO-encoding (sys.stdout.encoding),
# e. g. inside tags-replacement by command-actions, etc.
@@ -84,41 +61,24 @@ if sys.version_info < (3,): # pragma: 3.x no cover
# [True, True, False]; # -- python2
# [True, False, True]; # -- python3
#
-if sys.version_info >= (3,): # pragma: 2.x no cover
- def uni_decode(x, enc=PREFER_ENC, errors='strict'):
- try:
- if isinstance(x, bytes):
- return x.decode(enc, errors)
- return x
- except (UnicodeDecodeError, UnicodeEncodeError): # pragma: no cover - unsure if reachable
- if errors != 'strict':
- raise
- return x.decode(enc, 'replace')
- def uni_string(x):
- if not isinstance(x, bytes):
- return str(x)
- return x.decode(PREFER_ENC, 'replace')
-else: # pragma: 3.x no cover
- def uni_decode(x, enc=PREFER_ENC, errors='strict'):
- try:
- if isinstance(x, unicode):
- return x.encode(enc, errors)
- return x
- except (UnicodeDecodeError, UnicodeEncodeError): # pragma: no cover - unsure if reachable
- if errors != 'strict':
- raise
- return x.encode(enc, 'replace')
- if sys.getdefaultencoding().upper() != 'UTF-8': # pragma: no cover - utf-8 is default encoding now
- def uni_string(x):
- if not isinstance(x, unicode):
- return str(x)
- return x.encode(PREFER_ENC, 'replace')
- else:
- uni_string = str
-
+def uni_decode(x, enc=PREFER_ENC, errors='strict'):
+ try:
+ if isinstance(x, bytes):
+ return x.decode(enc, errors)
+ return x
+ except (UnicodeDecodeError, UnicodeEncodeError): # pragma: no cover - unsure if reachable
+ if errors != 'strict':
+ raise
+ return x.decode(enc, 'replace')
+def uni_string(x):
+ if not isinstance(x, bytes):
+ return str(x)
+ return x.decode(PREFER_ENC, 'replace')
+def uni_bytes(x):
+ return bytes(x, 'UTF-8')
def _as_bool(val):
- return bool(val) if not isinstance(val, basestring) \
+ return bool(val) if not isinstance(val, str) \
else val.lower() in ('1', 'on', 'true', 'yes')
@@ -223,11 +183,6 @@ def __stopOnIOError(logSys=None, logHndlr=None): # pragma: no cover
pass
sys.exit(0)
-try:
- BrokenPipeError = BrokenPipeError
-except NameError: # pragma: 3.x no cover
- BrokenPipeError = IOError
-
__origLog = logging.Logger._log
def __safeLog(self, level, msg, args, **kwargs):
"""Safe log inject to avoid possible errors by unsafe log-handlers,
@@ -327,38 +282,19 @@ def splitwords(s):
"""
if not s:
return []
- return filter(bool, map(lambda v: v.strip(), re.split('[ ,\n]+', s)))
+ return list(filter(bool, [v.strip() for v in re.split(r'[\s,]+', s)]))
-if sys.version_info >= (3,5):
- eval(compile(r'''if 1:
- def _merge_dicts(x, y):
- """Helper to merge dicts.
- """
- if y:
- return {**x, **y}
- return x
-
- def _merge_copy_dicts(x, y):
- """Helper to merge dicts to guarantee a copy result (r is never x).
- """
+def _merge_dicts(x, y):
+ """Helper to merge dicts.
+ """
+ if y:
return {**x, **y}
- ''', __file__, 'exec'))
-else:
- def _merge_dicts(x, y):
- """Helper to merge dicts.
- """
- r = x
- if y:
- r = x.copy()
- r.update(y)
- return r
- def _merge_copy_dicts(x, y):
- """Helper to merge dicts to guarantee a copy result (r is never x).
- """
- r = x.copy()
- if y:
- r.update(y)
- return r
+ return x
+
+def _merge_copy_dicts(x, y):
+ """Helper to merge dicts to guarantee a copy result (r is never x).
+ """
+ return {**x, **y}
#
# Following function used for parse options from parameter (e.g. `name[p1=0, p2="..."][p3='...']`).
@@ -444,7 +380,7 @@ def substituteRecursiveTags(inptags, conditional='',
while True:
repFlag = False
# substitute each value:
- for tag in tags.iterkeys():
+ for tag in tags.keys():
# ignore escaped or already done (or in ignore list):
if tag in ignore or tag in done: continue
# ignore replacing callable items from calling map - should be converted on demand only (by get):
@@ -484,7 +420,7 @@ def substituteRecursiveTags(inptags, conditional='',
m = tre_search(value, m.end())
continue
# if calling map - be sure we've string:
- if not isinstance(repl, basestring): repl = uni_string(repl)
+ if not isinstance(repl, str): repl = uni_string(repl)
value = value.replace('<%s>' % rtag, repl)
#logSys.log(5, 'value now: %s' % value)
# increment reference count:
@@ -517,10 +453,7 @@ if _libcap:
Side effect: name can be silently truncated to 15 bytes (16 bytes with NTS zero)
"""
try:
- if sys.version_info >= (3,): # pragma: 2.x no cover
- name = name.encode()
- else: # pragma: 3.x no cover
- name = bytes(name)
+ name = name.encode()
_libcap.prctl(15, name) # PR_SET_NAME = 15
except: # pragma: no cover
pass
diff --git a/fail2ban/protocol.py b/fail2ban/protocol.py
index a81c6657..077091f7 100644
--- a/fail2ban/protocol.py
+++ b/fail2ban/protocol.py
@@ -58,6 +58,8 @@ protocol = [
["banned", "return jails with banned IPs as dictionary"],
["banned ... ]", "return list(s) of jails where given IP(s) are banned"],
["status", "gets the current status of the server"],
+["status --all [FLAVOR]", "gets the current status of all jails, with optional flavor or extended info"],
+["stat[istic]s", "gets the current statistics of all jails as table"],
["ping", "tests if the server is alive"],
["echo", "for internal usage, returns back and outputs a given string"],
["help", "return this output"],
diff --git a/fail2ban/server/action.py b/fail2ban/server/action.py
index 16ff6621..760a73d7 100644
--- a/fail2ban/server/action.py
+++ b/fail2ban/server/action.py
@@ -114,9 +114,9 @@ class CallingMap(MutableMapping, object):
def _asdict(self, calculated=False, checker=None):
d = dict(self.data, **self.storage)
if not calculated:
- return dict((n,v) for n,v in d.iteritems() \
+ return dict((n,v) for n,v in d.items() \
if not callable(v) or n in self.CM_REPR_ITEMS)
- for n,v in d.items():
+ for n,v in list(d.items()):
if callable(v):
try:
# calculate:
@@ -182,7 +182,7 @@ class CallingMap(MutableMapping, object):
return self.__class__(_merge_copy_dicts(self.data, self.storage))
-class ActionBase(object):
+class ActionBase(object, metaclass=ABCMeta):
"""An abstract base class for actions in Fail2Ban.
Action Base is a base definition of what methods need to be in
@@ -212,7 +212,6 @@ class ActionBase(object):
Any additional arguments specified in `jail.conf` or passed
via `fail2ban-client` will be passed as keyword arguments.
"""
- __metaclass__ = ABCMeta
@classmethod
def __subclasshook__(cls, C):
@@ -423,7 +422,7 @@ class CommandAction(ActionBase):
if not callable(family): # pragma: no cover
return self.__substCache.get(key, {}).get(family)
# family as expression - use it to filter values:
- return [v for f, v in self.__substCache.get(key, {}).iteritems() if family(f)]
+ return [v for f, v in self.__substCache.get(key, {}).items() if family(f)]
cmd = args[0]
if cmd: # set:
try:
@@ -435,7 +434,7 @@ class CommandAction(ActionBase):
try:
famd = self.__substCache[key]
cmd = famd.pop(family)
- for family, v in famd.items():
+ for family, v in list(famd.items()):
if v == cmd:
del famd[family]
except KeyError: # pragma: no cover
@@ -451,7 +450,7 @@ class CommandAction(ActionBase):
res = True
err = 'Script error'
if not family: # all started:
- family = [famoper for (famoper,v) in self.__started.iteritems() if v]
+ family = [famoper for (famoper,v) in self.__started.items() if v]
for famoper in family:
try:
cmd = self._getOperation(tag, famoper)
@@ -631,7 +630,7 @@ class CommandAction(ActionBase):
and executes the resulting command.
"""
# collect started families, may be started on demand (conditional):
- family = [f for (f,v) in self.__started.iteritems() if v & 3 == 3]; # started and contains items
+ family = [f for (f,v) in self.__started.items() if v & 3 == 3]; # started and contains items
# if nothing contains items:
if not family: return True
# flush:
@@ -656,7 +655,7 @@ class CommandAction(ActionBase):
"""
# collect started families, if started on demand (conditional):
if family is None:
- family = [f for (f,v) in self.__started.iteritems() if v]
+ family = [f for (f,v) in self.__started.items() if v]
# if no started (on demand) actions:
if not family: return True
self.__started = {}
@@ -690,7 +689,7 @@ class CommandAction(ActionBase):
ret = True
# for each started family:
if self.actioncheck:
- for (family, started) in self.__started.items():
+ for (family, started) in list(self.__started.items()):
if started and not self._invariantCheck(family, beforeRepair):
# reset started flag and command of executed operation:
self.__started[family] = 0
diff --git a/fail2ban/server/actions.py b/fail2ban/server/actions.py
index fa045ab5..26e80107 100644
--- a/fail2ban/server/actions.py
+++ b/fail2ban/server/actions.py
@@ -156,11 +156,11 @@ class Actions(JailThread, Mapping):
else:
if hasattr(self, '_reload_actions'):
# reload actions after all parameters set via stream:
- for name, initOpts in self._reload_actions.iteritems():
+ for name, initOpts in self._reload_actions.items():
if name in self._actions:
self._actions[name].reload(**(initOpts if initOpts else {}))
# remove obsolete actions (untouched by reload process):
- delacts = OrderedDict((name, action) for name, action in self._actions.iteritems()
+ delacts = OrderedDict((name, action) for name, action in self._actions.items()
if name not in self._reload_actions)
if len(delacts):
# unban all tickets using removed actions only:
@@ -217,7 +217,7 @@ class Actions(JailThread, Mapping):
return lst
if len(ids) == 1:
return 1 if ids[0] in lst else 0
- return map(lambda ip: 1 if ip in lst else 0, ids)
+ return [1 if ip in lst else 0 for ip in ids]
def getBanList(self, withTime=False):
"""Returns the list of banned IP addresses.
@@ -288,7 +288,7 @@ class Actions(JailThread, Mapping):
if not isinstance(ip, IPAddr):
ipa = IPAddr(ip)
if not ipa.isSingle: # subnet (mask/cidr) or raw (may be dns/hostname):
- ips = filter(ipa.contains, self.banManager.getBanList())
+ ips = list(filter(ipa.contains, self.banManager.getBanList()))
if ips:
return self.removeBannedIP(ips, db, ifexists)
# not found:
@@ -305,7 +305,7 @@ class Actions(JailThread, Mapping):
"""
if actions is None:
actions = self._actions
- for name, action in reversed(actions.items()):
+ for name, action in reversed(list(actions.items())):
try:
action.stop()
except Exception as e:
@@ -328,7 +328,7 @@ class Actions(JailThread, Mapping):
True when the thread exits nicely.
"""
cnt = 0
- for name, action in self._actions.iteritems():
+ for name, action in self._actions.items():
try:
action.start()
except Exception as e:
@@ -505,7 +505,7 @@ class Actions(JailThread, Mapping):
Observers.Main.add('banFound', bTicket, self._jail, btime)
logSys.notice("[%s] %sBan %s", self._jail.name, ('' if not bTicket.restored else 'Restore '), ip)
# do actions :
- for name, action in self._actions.iteritems():
+ for name, action in self._actions.items():
try:
if bTicket.restored and getattr(action, 'norestored', False):
continue
@@ -543,13 +543,13 @@ class Actions(JailThread, Mapping):
# avoid too often checks:
if not rebanacts and MyTime.time() > self.__lastConsistencyCheckTM + 3:
self.__lastConsistencyCheckTM = MyTime.time()
- for action in self._actions.itervalues():
+ for action in self._actions.values():
if hasattr(action, 'consistencyCheck'):
action.consistencyCheck()
# check epoch in order to reban it:
if bTicket.banEpoch < self.banEpoch:
if not rebanacts: rebanacts = dict(
- (name, action) for name, action in self._actions.iteritems()
+ (name, action) for name, action in self._actions.items()
if action.banEpoch > bTicket.banEpoch)
cnt += self.__reBan(bTicket, actions=rebanacts)
else: # pragma: no cover - unexpected: ticket is not banned for some reasons - reban using all actions:
@@ -576,8 +576,8 @@ class Actions(JailThread, Mapping):
ip = ticket.getID()
aInfo = self._getActionInfo(ticket)
if log:
- logSys.notice("[%s] Reban %s%s", self._jail.name, ip, (', action %r' % actions.keys()[0] if len(actions) == 1 else ''))
- for name, action in actions.iteritems():
+ logSys.notice("[%s] Reban %s%s", self._jail.name, ip, (', action %r' % list(actions.keys())[0] if len(actions) == 1 else ''))
+ for name, action in actions.items():
try:
logSys.debug("[%s] action %r: reban %s", self._jail.name, name, ip)
if not aInfo.immutable: aInfo.reset()
@@ -601,7 +601,7 @@ class Actions(JailThread, Mapping):
if not self.banManager._inBanList(ticket): return
# do actions :
aInfo = None
- for name, action in self._actions.iteritems():
+ for name, action in self._actions.items():
try:
if ticket.restored and getattr(action, 'norestored', False):
continue
@@ -650,7 +650,7 @@ class Actions(JailThread, Mapping):
cnt = 0
# first we'll execute flush for actions supporting this operation:
unbactions = {}
- for name, action in (actions if actions is not None else self._actions).iteritems():
+ for name, action in (actions if actions is not None else self._actions).items():
try:
if hasattr(action, 'flush') and (not isinstance(action, CommandAction) or action.actionflush):
logSys.notice("[%s] Flush ticket(s) with %s", self._jail.name, name)
@@ -670,7 +670,7 @@ class Actions(JailThread, Mapping):
action.consistencyCheck(_beforeRepair)
continue
# fallback to single unbans:
- logSys.debug(" Unban tickets each individualy")
+ logSys.debug(" Unban tickets each individually")
unbactions[name] = action
actions = unbactions
# flush the database also:
@@ -705,7 +705,7 @@ class Actions(JailThread, Mapping):
aInfo = self._getActionInfo(ticket)
if log:
logSys.notice("[%s] Unban %s", self._jail.name, ip)
- for name, action in unbactions.iteritems():
+ for name, action in unbactions.items():
try:
logSys.debug("[%s] action %r: unban %s", self._jail.name, name, ip)
if not aInfo.immutable: aInfo.reset()
@@ -721,9 +721,11 @@ class Actions(JailThread, Mapping):
"""Status of current and total ban counts and current banned IP list.
"""
# TODO: Allow this list to be printed as 'status' output
- supported_flavors = ["short", "basic", "cymru"]
+ supported_flavors = ["short", "basic", "stats", "cymru"]
if flavor is None or flavor not in supported_flavors:
logSys.warning("Unsupported extended jail status flavor %r. Supported: %s" % (flavor, supported_flavors))
+ if flavor == "stats":
+ return (self.banManager.size(), self.banManager.getBanTotal())
# Always print this information (basic)
if flavor != "short":
banned = self.banManager.getBanList()
diff --git a/fail2ban/server/asyncserver.py b/fail2ban/server/asyncserver.py
index e3400737..0c36d846 100644
--- a/fail2ban/server/asyncserver.py
+++ b/fail2ban/server/asyncserver.py
@@ -25,8 +25,14 @@ __copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
from pickle import dumps, loads, HIGHEST_PROTOCOL
-import asynchat
-import asyncore
+try:
+ import asynchat
+except ImportError:
+ from ..compat import asynchat
+try:
+ import asyncore
+except ImportError:
+ from ..compat import asyncore
import errno
import fcntl
import os
@@ -178,7 +184,7 @@ def loop(active, timeout=None, use_poll=False, err_count=None):
elif err_count['listen'] > 100: # pragma: no cover - normally unreachable
if (
e.args[0] == errno.EMFILE # [Errno 24] Too many open files
- or sum(err_count.itervalues()) > 1000
+ or sum(err_count.values()) > 1000
):
logSys.critical("Too many errors - critical count reached %r", err_count)
break
@@ -220,7 +226,7 @@ class AsyncServer(asyncore.dispatcher):
elif self.__errCount['accept'] > 100:
if (
(isinstance(e, socket.error) and e.args[0] == errno.EMFILE) # [Errno 24] Too many open files
- or sum(self.__errCount.itervalues()) > 1000
+ or sum(self.__errCount.values()) > 1000
):
logSys.critical("Too many errors - critical count reached %r", self.__errCount)
self.stop()
diff --git a/fail2ban/server/banmanager.py b/fail2ban/server/banmanager.py
index 9168d5b8..d3e89820 100644
--- a/fail2ban/server/banmanager.py
+++ b/fail2ban/server/banmanager.py
@@ -103,7 +103,7 @@ class BanManager:
return list(self.__banList.keys())
with self.__lock:
lst = []
- for ticket in self.__banList.itervalues():
+ for ticket in self.__banList.values():
eob = ticket.getEndOfBanTime(self.__banTime)
lst.append((ticket,eob))
lst.sort(key=lambda t: t[1])
@@ -161,7 +161,7 @@ class BanManager:
return return_dict
# get ips in lock:
with self.__lock:
- banIPs = [banData.getIP() for banData in self.__banList.values()]
+ banIPs = [banData.getIP() for banData in list(self.__banList.values())]
# get cymru info:
try:
for ip in banIPs:
@@ -333,7 +333,7 @@ class BanManager:
# Gets the list of ticket to remove (thereby correct next unban time).
unBanList = {}
nextUnbanTime = BanTicket.MAX_TIME
- for fid,ticket in self.__banList.iteritems():
+ for fid,ticket in self.__banList.items():
# current time greater as end of ban - timed out:
eob = ticket.getEndOfBanTime(self.__banTime)
if time > eob:
@@ -349,15 +349,15 @@ class BanManager:
if len(unBanList):
if len(unBanList) / 2.0 <= len(self.__banList) / 3.0:
# few as 2/3 should be removed - remove particular items:
- for fid in unBanList.iterkeys():
+ for fid in unBanList.keys():
del self.__banList[fid]
else:
# create new dictionary without items to be deleted:
- self.__banList = dict((fid,ticket) for fid,ticket in self.__banList.iteritems() \
+ self.__banList = dict((fid,ticket) for fid,ticket in self.__banList.items() \
if fid not in unBanList)
# return list of tickets:
- return unBanList.values()
+ return list(unBanList.values())
##
# Flush the ban list.
@@ -367,7 +367,7 @@ class BanManager:
def flushBanList(self):
with self.__lock:
- uBList = self.__banList.values()
+ uBList = list(self.__banList.values())
self.__banList = dict()
return uBList
diff --git a/fail2ban/server/database.py b/fail2ban/server/database.py
index 877cbb93..294164c3 100644
--- a/fail2ban/server/database.py
+++ b/fail2ban/server/database.py
@@ -45,55 +45,24 @@ def _json_default(x):
x = list(x)
return uni_string(x)
-if sys.version_info >= (3,): # pragma: 2.x no cover
- def _json_dumps_safe(x):
- try:
- x = json.dumps(x, ensure_ascii=False, default=_json_default).encode(
- PREFER_ENC, 'replace')
- except Exception as e:
- # adapter handler should be exception-safe
- logSys.error('json dumps failed: %r', e, exc_info=logSys.getEffectiveLevel() <= 4)
- x = '{}'
- return x
+def _json_dumps_safe(x):
+ try:
+ x = json.dumps(x, ensure_ascii=False, default=_json_default).encode(
+ PREFER_ENC, 'replace')
+ except Exception as e:
+ # adapter handler should be exception-safe
+ logSys.error('json dumps failed: %r', e, exc_info=logSys.getEffectiveLevel() <= 4)
+ x = '{}'
+ return x
- def _json_loads_safe(x):
- try:
- x = json.loads(x.decode(PREFER_ENC, 'replace'))
- except Exception as e:
- # converter handler should be exception-safe
- logSys.error('json loads failed: %r', e, exc_info=logSys.getEffectiveLevel() <= 4)
- x = {}
- return x
-else: # pragma: 3.x no cover
- def _normalize(x):
- if isinstance(x, dict):
- return dict((_normalize(k), _normalize(v)) for k, v in x.iteritems())
- elif isinstance(x, (list, set)):
- return [_normalize(element) for element in x]
- elif isinstance(x, unicode):
- # in 2.x default text_factory is unicode - so return proper unicode here:
- return x.encode(PREFER_ENC, 'replace').decode(PREFER_ENC)
- elif isinstance(x, basestring):
- return x.decode(PREFER_ENC, 'replace')
- return x
-
- def _json_dumps_safe(x):
- try:
- x = json.dumps(_normalize(x), ensure_ascii=False, default=_json_default)
- except Exception as e:
- # adapter handler should be exception-safe
- logSys.error('json dumps failed: %r', e, exc_info=logSys.getEffectiveLevel() <= 4)
- x = '{}'
- return x
-
- def _json_loads_safe(x):
- try:
- x = json.loads(x.decode(PREFER_ENC, 'replace'))
- except Exception as e:
- # converter handler should be exception-safe
- logSys.error('json loads failed: %r', e, exc_info=logSys.getEffectiveLevel() <= 4)
- x = {}
- return x
+def _json_loads_safe(x):
+ try:
+ x = json.loads(x.decode(PREFER_ENC, 'replace'))
+ except Exception as e:
+ # converter handler should be exception-safe
+ logSys.error('json loads failed: %r', e, exc_info=logSys.getEffectiveLevel() <= 4)
+ x = {}
+ return x
sqlite3.register_adapter(dict, _json_dumps_safe)
sqlite3.register_converter("JSON", _json_loads_safe)
@@ -135,7 +104,7 @@ class Fail2BanDb(object):
sqlite3.OperationalError
Error connecting/creating a SQLite3 database.
RuntimeError
- If exisiting database fails to update to new schema.
+ If existing database fails to update to new schema.
Attributes
----------
@@ -525,7 +494,7 @@ class Fail2BanDb(object):
Parameters
----------
jail : Jail
- If specified, will only reutrn logs belonging to the jail.
+ If specified, will only return logs belonging to the jail.
Returns
-------
diff --git a/fail2ban/server/datetemplate.py b/fail2ban/server/datetemplate.py
index e02772d8..5dc721ae 100644
--- a/fail2ban/server/datetemplate.py
+++ b/fail2ban/server/datetemplate.py
@@ -227,8 +227,10 @@ class DateEpoch(DateTemplate):
self.name = "LongEpoch" if not pattern else pattern
epochRE = r"\d{10,11}(?:\d{3}(?:\.\d{1,6}|\d{3})?)?"
if pattern:
- # pattern should capture/cut out the whole match:
- regex = "(" + RE_EPOCH_PATTERN.sub(lambda v: "(%s)" % epochRE, pattern) + ")"
+ # pattern should find the whole pattern, but cut out grouped match (or whole match if no groups specified):
+ regex = RE_EPOCH_PATTERN.sub(lambda v: "(%s)" % epochRE, pattern)
+ if not RE_GROUPED.search(pattern):
+ regex = "(" + regex + ")"
self._grpIdx = 2
self.setRegex(regex)
elif not lineBeginOnly:
@@ -355,7 +357,7 @@ class DatePatternRegex(DateTemplate):
class DateTai64n(DateTemplate):
- """A date template which matches TAI64N formate timestamps.
+ """A date template which matches TAI64N format timestamps.
Attributes
----------
diff --git a/fail2ban/server/failmanager.py b/fail2ban/server/failmanager.py
index 3c71d51a..2effc450 100644
--- a/fail2ban/server/failmanager.py
+++ b/fail2ban/server/failmanager.py
@@ -55,7 +55,7 @@ class FailManager:
def getFailCount(self):
# may be slow on large list of failures, should be used for test purposes only...
with self.__lock:
- return len(self.__failList), sum([f.getRetry() for f in self.__failList.values()])
+ return len(self.__failList), sum([f.getRetry() for f in list(self.__failList.values())])
def setMaxRetry(self, value):
self.__maxRetry = value
@@ -116,7 +116,7 @@ class FailManager:
# in case of having many active failures, it should be ran only
# if debug level is "low" enough
failures_summary = ', '.join(['%s:%d' % (k, v.getRetry())
- for k,v in self.__failList.iteritems()])
+ for k,v in self.__failList.items()])
logSys.log(logLevel, "Total # of detected failures: %d. Current failures from %d IPs (IP:count): %s"
% (self.__failTotal, len(self.__failList), failures_summary))
@@ -129,7 +129,7 @@ class FailManager:
def cleanup(self, time):
time -= self.__maxTime
with self.__lock:
- todelete = [fid for fid,item in self.__failList.iteritems() \
+ todelete = [fid for fid,item in self.__failList.items() \
if item.getTime() <= time]
if len(todelete) == len(self.__failList):
# remove all:
@@ -143,7 +143,7 @@ class FailManager:
del self.__failList[fid]
else:
# create new dictionary without items to be deleted:
- self.__failList = dict((fid,item) for fid,item in self.__failList.iteritems() \
+ self.__failList = dict((fid,item) for fid,item in self.__failList.items() \
if item.getTime() > time)
self.__bgSvc.service()
diff --git a/fail2ban/server/failregex.py b/fail2ban/server/failregex.py
index a9b144af..85636f36 100644
--- a/fail2ban/server/failregex.py
+++ b/fail2ban/server/failregex.py
@@ -22,7 +22,6 @@ __copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import re
-import sre_constants
import sys
from .ipdns import IPAddr
@@ -143,9 +142,7 @@ class Regex:
self._regex = regex
self._altValues = []
self._tupleValues = []
- for k in filter(
- lambda k: len(k) > len(COMPLNAME_PRE[0]), self._regexObj.groupindex
- ):
+ for k in [k for k in self._regexObj.groupindex if len(k) > len(COMPLNAME_PRE[0])]:
n = COMPLNAME_CRE.match(k)
if n:
g, n = n.group(1), mapTag2Opt(n.group(2))
@@ -157,7 +154,7 @@ class Regex:
self._tupleValues.sort()
self._altValues = self._altValues if len(self._altValues) else None
self._tupleValues = self._tupleValues if len(self._tupleValues) else None
- except sre_constants.error as e:
+ except re.error as e:
raise RegexException("Unable to compile regular expression '%s':\n%s" %
(regex, e))
# set fetch handler depending on presence of alternate (or tuple) tags:
@@ -235,7 +232,7 @@ class Regex:
#
@staticmethod
def _tupleLinesBuf(tupleLines):
- return "\n".join(map(lambda v: "".join(v[::2]), tupleLines)) + "\n"
+ return "\n".join(["".join(v[::2]) for v in tupleLines]) + "\n"
##
# Searches the regular expression.
@@ -243,11 +240,11 @@ class Regex:
# Sets an internal cache (match object) in order to avoid searching for
# the pattern again. This method must be called before calling any other
# method of this object.
- # @param a list of tupples. The tupples are ( prematch, datematch, postdatematch )
+ # @param a list of tuples. The tuples are ( prematch, datematch, postdatematch )
def search(self, tupleLines, orgLines=None):
buf = tupleLines
- if not isinstance(tupleLines, basestring):
+ if not isinstance(tupleLines, str):
buf = Regex._tupleLinesBuf(tupleLines)
self._matchCache = self._regexObj.search(buf)
if self._matchCache:
diff --git a/fail2ban/server/filter.py b/fail2ban/server/filter.py
index 68968284..f8b36cf6 100644
--- a/fail2ban/server/filter.py
+++ b/fail2ban/server/filter.py
@@ -307,7 +307,7 @@ class Filter(JailThread):
dd = DateDetector()
dd.default_tz = self.__logtimezone
if not isinstance(pattern, (list, tuple)):
- pattern = filter(bool, map(str.strip, re.split('\n+', pattern)))
+ pattern = list(filter(bool, list(map(str.strip, re.split('\n+', pattern)))))
for pattern in pattern:
dd.appendTemplate(pattern)
self.dateDetector = dd
@@ -635,7 +635,7 @@ class Filter(JailThread):
e = m.end(1)
m = line[s:e]
tupleLine = (line[:s], m, line[e:])
- if m: # found and not empty - retrive date:
+ if m: # found and not empty - retrieve date:
date = self.dateDetector.getTime(m, timeMatch)
if date is not None:
# Lets get the time part
@@ -666,7 +666,7 @@ class Filter(JailThread):
if self.checkFindTime and date is not None:
# if in operation (modifications have been really found):
if self.inOperation:
- # if weird date - we'd simulate now for timeing issue (too large deviation from now):
+ # if weird date - we'd simulate now for timing issue (too large deviation from now):
delta = int(date - MyTime.time())
if abs(delta) > 60:
# log timing issue as warning once per day:
@@ -800,7 +800,7 @@ class Filter(JailThread):
if (nfflgs & 4) == 0 and not mlfidGroups.get('mlfpending', 0):
mlfidGroups.pop("matches", None)
# overwrite multi-line failure with all values, available in fail:
- mlfidGroups.update(((k,v) for k,v in fail.iteritems() if v is not None))
+ mlfidGroups.update(((k,v) for k,v in fail.items() if v is not None))
# new merged failure data:
fail = mlfidGroups
# if forget (disconnect/reset) - remove cached entry:
@@ -944,7 +944,7 @@ class Filter(JailThread):
ip = fid
raw = True
# if mlfid case (not failure):
- if ip is None:
+ if fid is None and ip is None:
if ll <= 7: logSys.log(7, "No failure-id by mlfid %r in regex %s: %s",
mlfid, failRegexIndex, fail.get('mlfforget', "waiting for identifier"))
fail['mlfpending'] = 1; # mark failure is pending
@@ -978,6 +978,8 @@ class Filter(JailThread):
def status(self, flavor="basic"):
"""Status of failures detected by filter.
"""
+ if flavor == "stats":
+ return (self.failManager.size(), self.failManager.getFailTotal())
ret = [("Currently failed", self.failManager.size()),
("Total failed", self.failManager.getFailTotal())]
return ret
@@ -1045,7 +1047,7 @@ class FileFilter(Filter):
# @return log paths
def getLogPaths(self):
- return self.__logs.keys()
+ return list(self.__logs.keys())
##
# Get the log containers
@@ -1053,7 +1055,7 @@ class FileFilter(Filter):
# @return log containers
def getLogs(self):
- return self.__logs.values()
+ return list(self.__logs.values())
##
# Get the count of log containers
@@ -1079,7 +1081,7 @@ class FileFilter(Filter):
def setLogEncoding(self, encoding):
encoding = super(FileFilter, self).setLogEncoding(encoding)
- for log in self.__logs.itervalues():
+ for log in self.__logs.values():
log.setEncoding(encoding)
def getLog(self, path):
@@ -1255,7 +1257,9 @@ class FileFilter(Filter):
"""Status of Filter plus files being monitored.
"""
ret = super(FileFilter, self).status(flavor=flavor)
- path = self.__logs.keys()
+ if flavor == "stats":
+ return ret
+ path = list(self.__logs.keys())
ret.append(("File list", path))
return ret
@@ -1277,7 +1281,7 @@ class FileFilter(Filter):
if self._pendDBUpdates and self.jail.database:
self._updateDBPending()
# stop files monitoring:
- for path in self.__logs.keys():
+ for path in list(self.__logs.keys()):
self.delLogPath(path)
def stop(self):
@@ -1530,7 +1534,7 @@ class FileContainer:
def __iter__(self):
return self
- def next(self):
+ def __next__(self):
line = self.readline()
if line is None:
self.close()
diff --git a/fail2ban/server/filtergamin.py b/fail2ban/server/filtergamin.py
deleted file mode 100644
index c5373445..00000000
--- a/fail2ban/server/filtergamin.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
-# vi: set ft=python sts=4 ts=4 sw=4 noet :
-
-# This file is part of Fail2Ban.
-#
-# Fail2Ban is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# Fail2Ban is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Fail2Ban; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-
-# Author: Cyril Jaquier, Yaroslav Halchenko
-
-__author__ = "Cyril Jaquier, Yaroslav Halchenko"
-__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2012 Yaroslav Halchenko"
-__license__ = "GPL"
-
-import fcntl
-import time
-
-import gamin
-
-from .failmanager import FailManagerEmpty
-from .filter import FileFilter
-from .mytime import MyTime
-from .utils import Utils
-from ..helpers import getLogger
-
-# Gets the instance of the logger.
-logSys = getLogger(__name__)
-
-
-##
-# Log reader class.
-#
-# This class reads a log file and detects login failures or anything else
-# that matches a given regular expression. This class is instanciated by
-# a Jail object.
-
-class FilterGamin(FileFilter):
-
- ##
- # Constructor.
- #
- # Initialize the filter object with default values.
- # @param jail the jail object
-
- def __init__(self, jail):
- FileFilter.__init__(self, jail)
- # Gamin monitor
- self.monitor = gamin.WatchMonitor()
- fd = self.monitor.get_fd()
- flags = fcntl.fcntl(fd, fcntl.F_GETFD)
- fcntl.fcntl(fd, fcntl.F_SETFD, flags|fcntl.FD_CLOEXEC)
- logSys.debug("Created FilterGamin")
-
- def callback(self, path, event):
- logSys.log(4, "Got event: " + repr(event) + " for " + path)
- if event in (gamin.GAMCreated, gamin.GAMChanged, gamin.GAMExists):
- logSys.debug("File changed: " + path)
-
- self.ticks += 1
- self.getFailures(path)
-
- ##
- # Add a log file path
- #
- # @param path log file path
-
- def _addLogPath(self, path):
- self.monitor.watch_file(path, self.callback)
-
- ##
- # Delete a log path
- #
- # @param path the log file to delete
-
- def _delLogPath(self, path):
- self.monitor.stop_watch(path)
-
- def _handleEvents(self):
- ret = False
- mon = self.monitor
- while mon and mon.event_pending() > 0:
- mon.handle_events()
- mon = self.monitor
- ret = True
- return ret
-
- ##
- # Main loop.
- #
- # This function is the main loop of the thread. It checks if the
- # file has been modified and looks for failures.
- # @return True when the thread exits nicely
-
- def run(self):
- # Gamin needs a loop to collect and dispatch events
- while self.active:
- if self.idle:
- # wait a little bit here for not idle, to prevent hi-load:
- if not Utils.wait_for(lambda: not self.active or not self.idle,
- self.sleeptime * 10, self.sleeptime
- ):
- self.ticks += 1
- continue
- Utils.wait_for(lambda: not self.active or self._handleEvents(),
- self.sleeptime)
- self.ticks += 1
- if self.ticks % 10 == 0:
- self.performSvc()
-
- logSys.debug("[%s] filter terminated", self.jailName)
- return True
-
- def stop(self):
- super(FilterGamin, self).stop()
- self.__cleanup()
-
- ##
- # Desallocates the resources used by Gamin.
-
- def __cleanup(self):
- if not self.monitor:
- return
- for filename in self.getLogPaths():
- self.monitor.stop_watch(filename)
- self.monitor = None
diff --git a/fail2ban/server/filterpoll.py b/fail2ban/server/filterpoll.py
index 196955e5..8a289cea 100644
--- a/fail2ban/server/filterpoll.py
+++ b/fail2ban/server/filterpoll.py
@@ -173,4 +173,4 @@ class FilterPoll(FileFilter):
return False
def getPendingPaths(self):
- return self.__file404Cnt.keys()
+ return list(self.__file404Cnt.keys())
diff --git a/fail2ban/server/filterpyinotify.py b/fail2ban/server/filterpyinotify.py
index 16b6cfd5..81bc7de3 100644
--- a/fail2ban/server/filterpyinotify.py
+++ b/fail2ban/server/filterpyinotify.py
@@ -155,7 +155,7 @@ class FilterPyinotify(FileFilter):
except KeyError: pass
def getPendingPaths(self):
- return self.__pending.keys()
+ return list(self.__pending.keys())
def _checkPending(self):
if not self.__pending:
@@ -173,7 +173,9 @@ class FilterPyinotify(FileFilter):
if not chkpath(path): # not found - prolong for next time
if retardTM < 60: retardTM *= 2
if minTime > retardTM: minTime = retardTM
- self.__pending[path][0] = retardTM
+ try:
+ self.__pending[path][0] = retardTM
+ except KeyError: pass
continue
logSys.log(logging.MSG, "Log presence detected for %s %s",
"directory" if isDir else "file", path)
@@ -181,7 +183,7 @@ class FilterPyinotify(FileFilter):
self.__pendingChkTime = time.time()
self.__pendingMinTime = minTime
# process now because we've missed it in monitoring:
- for path, isDir in found.iteritems():
+ for path, isDir in found.items():
self._delPending(path)
# refresh monitoring of this:
if isDir is not None:
diff --git a/fail2ban/server/filtersystemd.py b/fail2ban/server/filtersystemd.py
index a83b7a13..5aea9fda 100644
--- a/fail2ban/server/filtersystemd.py
+++ b/fail2ban/server/filtersystemd.py
@@ -253,7 +253,7 @@ class FilterSystemd(JournalFilter): # pragma: systemd no cover
return ((logline[:0], date[0] + ' ', logline.replace('\n', '\\n')), date[1])
def seekToTime(self, date):
- if isinstance(date, (int, long)):
+ if isinstance(date, int):
date = float(date)
self.__journal.seek_realtime(date)
@@ -344,7 +344,7 @@ class FilterSystemd(JournalFilter): # pragma: systemd no cover
except OSError:
pass
if self.idle:
- # because journal.wait will returns immediatelly if we have records in journal,
+ # because journal.wait will returns immediately if we have records in journal,
# just wait a little bit here for not idle, to prevent hi-load:
if not Utils.wait_for(lambda: not self.active or not self.idle,
self.sleeptime * 10, self.sleeptime
@@ -429,12 +429,14 @@ class FilterSystemd(JournalFilter): # pragma: systemd no cover
def status(self, flavor="basic"):
ret = super(FilterSystemd, self).status(flavor=flavor)
+ if flavor == "stats":
+ return ret
ret.append(("Journal matches",
[" + ".join(" ".join(match) for match in self.__matches)]))
return ret
def _updateDBPending(self):
- """Apply pending updates (jornal position) to database.
+ """Apply pending updates (journal position) to database.
"""
db = self.jail.database
while True:
diff --git a/fail2ban/server/ipdns.py b/fail2ban/server/ipdns.py
index d917d031..7ca1e432 100644
--- a/fail2ban/server/ipdns.py
+++ b/fail2ban/server/ipdns.py
@@ -92,14 +92,14 @@ class DNSUtils:
# retrieve ips
ips = set()
saveerr = None
- for fam, ipfam in ((socket.AF_INET, IPAddr.FAM_IPv4), (socket.AF_INET6, IPAddr.FAM_IPv6)):
+ for fam in ((socket.AF_INET,socket.AF_INET6) if DNSUtils.IPv6IsAllowed() else (socket.AF_INET,)):
try:
for result in socket.getaddrinfo(dns, None, fam, 0, socket.IPPROTO_TCP):
# if getaddrinfo returns something unexpected:
if len(result) < 4 or not len(result[4]): continue
# get ip from `(2, 1, 6, '', ('127.0.0.1', 0))`,be sure we've an ip-string
# (some python-versions resp. host configurations causes returning of integer there):
- ip = IPAddr(str(result[4][0]), ipfam)
+ ip = IPAddr(str(result[4][0]), IPAddr._AF2FAM(fam))
if ip.isValid:
ips.add(ip)
except Exception as e:
@@ -154,17 +154,18 @@ class DNSUtils:
# try find cached own hostnames (this tuple-key cannot be used elsewhere):
key = ('self','hostname', fqdn)
name = DNSUtils.CACHE_ipToName.get(key)
+ if name is not None:
+ return name
# get it using different ways (hostname, fully-qualified or vice versa):
- if name is None:
- name = ''
- for hostname in (
- (getfqdn, socket.gethostname) if fqdn else (socket.gethostname, getfqdn)
- ):
- try:
- name = hostname()
- break
- except Exception as e: # pragma: no cover
- logSys.warning("Retrieving own hostnames failed: %s", e)
+ name = ''
+ for hostname in (
+ (getfqdn, socket.gethostname) if fqdn else (socket.gethostname, getfqdn)
+ ):
+ try:
+ name = hostname()
+ break
+ except Exception as e: # pragma: no cover
+ logSys.warning("Retrieving own hostnames failed: %s", e)
# cache and return :
DNSUtils.CACHE_ipToName.set(key, name)
return name
@@ -177,15 +178,35 @@ class DNSUtils:
"""Get own host names of self"""
# try find cached own hostnames:
names = DNSUtils.CACHE_ipToName.get(DNSUtils._getSelfNames_key)
+ if names is not None:
+ return names
# get it using different ways (a set with names of localhost, hostname, fully qualified):
- if names is None:
- names = set([
- 'localhost', DNSUtils.getHostname(False), DNSUtils.getHostname(True)
- ]) - set(['']) # getHostname can return ''
+ names = set([
+ 'localhost', DNSUtils.getHostname(False), DNSUtils.getHostname(True)
+ ]) - set(['']) # getHostname can return ''
# cache and return :
DNSUtils.CACHE_ipToName.set(DNSUtils._getSelfNames_key, names)
return names
+ # key to find cached network interfaces IPs (this tuple-key cannot be used elsewhere):
+ _getNetIntrfIPs_key = ('netintrf','ips')
+
+ @staticmethod
+ def getNetIntrfIPs():
+ """Get own IP addresses of self"""
+ # to find cached own IPs:
+ ips = DNSUtils.CACHE_nameToIp.get(DNSUtils._getNetIntrfIPs_key)
+ if ips is not None:
+ return ips
+ # try to obtain from network interfaces if possible (implemented for this platform):
+ try:
+ ips = IPAddrSet([a for ni, a in DNSUtils._NetworkInterfacesAddrs()])
+ except:
+ ips = IPAddrSet()
+ # cache and return :
+ DNSUtils.CACHE_nameToIp.set(DNSUtils._getNetIntrfIPs_key, ips)
+ return ips
+
# key to find cached own IPs (this tuple-key cannot be used elsewhere):
_getSelfIPs_key = ('self','ips')
@@ -194,20 +215,54 @@ class DNSUtils:
"""Get own IP addresses of self"""
# to find cached own IPs:
ips = DNSUtils.CACHE_nameToIp.get(DNSUtils._getSelfIPs_key)
- # get it using different ways (a set with IPs of localhost, hostname, fully qualified):
- if ips is None:
- ips = set()
- for hostname in DNSUtils.getSelfNames():
- try:
- ips |= set(DNSUtils.textToIp(hostname, 'yes'))
- except Exception as e: # pragma: no cover
- logSys.warning("Retrieving own IPs of %s failed: %s", hostname, e)
+ if ips is not None:
+ return ips
+ # firstly try to obtain from network interfaces if possible (implemented for this platform):
+ ips = IPAddrSet(DNSUtils.getNetIntrfIPs())
+ # extend it using different ways (a set with IPs of localhost, hostname, fully qualified):
+ for hostname in DNSUtils.getSelfNames():
+ try:
+ ips |= IPAddrSet(DNSUtils.dnsToIp(hostname))
+ except Exception as e: # pragma: no cover
+ logSys.warning("Retrieving own IPs of %s failed: %s", hostname, e)
# cache and return :
DNSUtils.CACHE_nameToIp.set(DNSUtils._getSelfIPs_key, ips)
return ips
_IPv6IsAllowed = None
+ @staticmethod
+ def _IPv6IsSupportedBySystem():
+ if not socket.has_ipv6:
+ return False
+ # try to check sysctl net.ipv6.conf.all.disable_ipv6:
+ try:
+ with open('/proc/sys/net/ipv6/conf/all/disable_ipv6', 'rb') as f:
+ # if 1 - disabled, 0 - enabled
+ return not int(f.read())
+ except:
+ pass
+ s = None
+ try:
+ # try to create INET6 socket:
+ s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ # bind it to free port for any interface supporting IPv6:
+ s.bind(("", 0));
+ return True
+ except Exception as e: # pragma: no cover
+ if hasattr(e, 'errno'):
+ import errno
+ # negative (-9 'Address family not supported', etc) or not available/supported:
+ if e.errno < 0 or e.errno in (errno.EADDRNOTAVAIL, errno.EAFNOSUPPORT):
+ return False
+ # in use:
+ if e.errno in (errno.EADDRINUSE, errno.EACCES): # normally unreachable (free port and root)
+ return True
+ finally:
+ if s: s.close()
+ # unable to detect:
+ return None
+
@staticmethod
def setIPv6IsAllowed(value):
DNSUtils._IPv6IsAllowed = value
@@ -224,7 +279,17 @@ class DNSUtils:
v = DNSUtils.CACHE_nameToIp.get(DNSUtils._IPv6IsAllowed_key)
if v is not None:
return v
- v = any((':' in ip.ntoa) for ip in DNSUtils.getSelfIPs())
+ v = DNSUtils._IPv6IsSupportedBySystem()
+ if v is None:
+ # detect by IPs of host:
+ ips = DNSUtils.getNetIntrfIPs()
+ if not ips:
+ DNSUtils._IPv6IsAllowed = True; # avoid self recursion from getSelfIPs -> dnsToIp -> IPv6IsAllowed
+ try:
+ ips = DNSUtils.getSelfIPs()
+ finally:
+ DNSUtils._IPv6IsAllowed = None
+ v = any((':' in ip.ntoa) for ip in ips)
DNSUtils.CACHE_nameToIp.set(DNSUtils._IPv6IsAllowed_key, v)
return v
@@ -239,9 +304,11 @@ class IPAddr(object):
"""
IP_4_RE = r"""(?:\d{1,3}\.){3}\d{1,3}"""
- IP_6_RE = r"""(?:[0-9a-fA-F]{1,4}::?|::){1,7}(?:[0-9a-fA-F]{1,4}|(?<=:):)"""
+ IP_6_RE = r"""(?:[0-9a-fA-F]{1,4}::?|:){1,7}(?:[0-9a-fA-F]{1,4}|(?<=:):)"""
IP_4_6_CRE = re.compile(
r"""^(?:(?P%s)|\[?(?P%s)\]?)$""" % (IP_4_RE, IP_6_RE))
+ IP_W_CIDR_CRE = re.compile(
+ r"""^(%s|%s)/(?:(\d+)|(%s|%s))$""" % (IP_4_RE, IP_6_RE, IP_4_RE, IP_6_RE))
# An IPv4 compatible IPv6 to be reused (see below)
IP6_4COMPAT = None
@@ -255,6 +322,9 @@ class IPAddr(object):
CIDR_UNSPEC = -1
FAM_IPv4 = CIDR_RAW - socket.AF_INET
FAM_IPv6 = CIDR_RAW - socket.AF_INET6
+ @staticmethod
+ def _AF2FAM(v):
+ return IPAddr.CIDR_RAW - v
def __new__(cls, ipstr, cidr=CIDR_UNSPEC):
if cidr == IPAddr.CIDR_UNSPEC and isinstance(ipstr, (tuple, list)):
@@ -292,13 +362,17 @@ class IPAddr(object):
# test mask:
if "/" not in ipstr:
return ipstr, IPAddr.CIDR_UNSPEC
- s = ipstr.split('/', 1)
- # IP address without CIDR mask
- if len(s) > 2:
- raise ValueError("invalid ipstr %r, too many plen representation" % (ipstr,))
- if "." in s[1] or ":" in s[1]: # 255.255.255.0 resp. ffff:: style mask
- s[1] = IPAddr.masktoplen(s[1])
- s[1] = long(s[1])
+ s = IPAddr.IP_W_CIDR_CRE.match(ipstr)
+ if s is None:
+ return ipstr, IPAddr.CIDR_UNSPEC
+ s = list(s.groups())
+ if s[2]: # 255.255.255.0 resp. ffff:: style mask
+ s[1] = IPAddr.masktoplen(s[2])
+ del s[2]
+ try:
+ s[1] = int(s[1])
+ except ValueError:
+ return ipstr, IPAddr.CIDR_UNSPEC
return s
def __init(self, ipstr, cidr=CIDR_UNSPEC):
@@ -332,7 +406,7 @@ class IPAddr(object):
# mask out host portion if prefix length is supplied
if cidr is not None and cidr >= 0:
- mask = ~(0xFFFFFFFFL >> cidr)
+ mask = ~(0xFFFFFFFF >> cidr)
self._addr &= mask
self._plen = cidr
@@ -344,13 +418,13 @@ class IPAddr(object):
# mask out host portion if prefix length is supplied
if cidr is not None and cidr >= 0:
- mask = ~(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFL >> cidr)
+ mask = ~(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF >> cidr)
self._addr &= mask
self._plen = cidr
# if IPv6 address is a IPv4-compatible, make instance a IPv4
elif self.isInNet(IPAddr.IP6_4COMPAT):
- self._addr = lo & 0xFFFFFFFFL
+ self._addr = lo & 0xFFFFFFFF
self._family = socket.AF_INET
self._plen = 32
else:
@@ -360,7 +434,7 @@ class IPAddr(object):
return repr(self.ntoa)
def __str__(self):
- return self.ntoa if isinstance(self.ntoa, basestring) else str(self.ntoa)
+ return self.ntoa if isinstance(self.ntoa, str) else str(self.ntoa)
def __reduce__(self):
"""IPAddr pickle-handler, that simply wraps IPAddr to the str
@@ -474,7 +548,7 @@ class IPAddr(object):
elif self.isIPv6:
# convert network to host byte order
hi = self._addr >> 64
- lo = self._addr & 0xFFFFFFFFFFFFFFFFL
+ lo = self._addr & 0xFFFFFFFFFFFFFFFF
binary = struct.pack("!QQ", hi, lo)
if self._plen and self._plen < 128:
add = "/%d" % self._plen
@@ -532,9 +606,9 @@ class IPAddr(object):
if self.family != net.family:
return False
if self.isIPv4:
- mask = ~(0xFFFFFFFFL >> net.plen)
+ mask = ~(0xFFFFFFFF >> net.plen)
elif self.isIPv6:
- mask = ~(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFL >> net.plen)
+ mask = ~(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF >> net.plen)
else:
return False
@@ -545,13 +619,16 @@ class IPAddr(object):
"""
return isinstance(ip, IPAddr) and (ip == self or ip.isInNet(self))
+ def __contains__(self, ip):
+ return self.contains(ip)
+
# Pre-calculated map: addr to maskplen
def __getMaskMap():
m6 = (1 << 128)-1
m4 = (1 << 32)-1
mmap = {m6: 128, m4: 32, 0: 0}
m = 0
- for i in xrange(0, 128):
+ for i in range(0, 128):
m |= 1 << i
if i < 32:
mmap[m ^ m4] = 32-1-i
@@ -587,10 +664,142 @@ class IPAddr(object):
if not match:
return None
ipstr = match.group('IPv4')
- if ipstr != '':
+ if ipstr is not None and ipstr != '':
return ipstr
return match.group('IPv6')
# An IPv4 compatible IPv6 to be reused
IPAddr.IP6_4COMPAT = IPAddr("::ffff:0:0", 96)
+
+
+class IPAddrSet(set):
+
+ hasSubNet = False
+
+ def __init__(self, ips=[]):
+ ips2 = set()
+ for ip in ips:
+ if not isinstance(ip, IPAddr): ip = IPAddr(ip)
+ ips2.add(ip)
+ self.hasSubNet |= not ip.isSingle
+ set.__init__(self, ips2)
+
+ def add(self, ip):
+ if not isinstance(ip, IPAddr): ip = IPAddr(ip)
+ self.hasSubNet |= not ip.isSingle
+ set.add(self, ip)
+
+ def __contains__(self, ip):
+ if not isinstance(ip, IPAddr): ip = IPAddr(ip)
+ # IP can be found directly or IP is in each subnet:
+ return set.__contains__(self, ip) or (self.hasSubNet and any(n.contains(ip) for n in self))
+
+
+def _NetworkInterfacesAddrs(withMask=False):
+
+ # Closure implementing lazy load modules and libc and define _NetworkInterfacesAddrs on demand:
+ # Currently tested on Linux only (TODO: implement for MacOS, Solaris, etc)
+ try:
+ from ctypes import (
+ Structure, Union, POINTER,
+ pointer, get_errno, cast,
+ c_ushort, c_byte, c_void_p, c_char_p, c_uint, c_int, c_uint16, c_uint32
+ )
+ import ctypes.util
+ import ctypes
+
+ class struct_sockaddr(Structure):
+ _fields_ = [
+ ('sa_family', c_ushort),
+ ('sa_data', c_byte * 14),]
+
+ class struct_sockaddr_in(Structure):
+ _fields_ = [
+ ('sin_family', c_ushort),
+ ('sin_port', c_uint16),
+ ('sin_addr', c_byte * 4)]
+
+ class struct_sockaddr_in6(Structure):
+ _fields_ = [
+ ('sin6_family', c_ushort),
+ ('sin6_port', c_uint16),
+ ('sin6_flowinfo', c_uint32),
+ ('sin6_addr', c_byte * 16),
+ ('sin6_scope_id', c_uint32)]
+
+ class union_ifa_ifu(Union):
+ _fields_ = [
+ ('ifu_broadaddr', POINTER(struct_sockaddr)),
+ ('ifu_dstaddr', POINTER(struct_sockaddr)),]
+
+ class struct_ifaddrs(Structure):
+ pass
+ struct_ifaddrs._fields_ = [
+ ('ifa_next', POINTER(struct_ifaddrs)),
+ ('ifa_name', c_char_p),
+ ('ifa_flags', c_uint),
+ ('ifa_addr', POINTER(struct_sockaddr)),
+ ('ifa_netmask', POINTER(struct_sockaddr)),
+ ('ifa_ifu', union_ifa_ifu),
+ ('ifa_data', c_void_p),]
+
+ libc = ctypes.CDLL(ctypes.util.find_library('c') or "")
+ if not libc.getifaddrs: # pragma: no cover
+ raise NotImplementedError('libc.getifaddrs is not available')
+
+ def ifap_iter(ifap):
+ ifa = ifap.contents
+ while True:
+ yield ifa
+ if not ifa.ifa_next:
+ break
+ ifa = ifa.ifa_next.contents
+
+ def getfamaddr(ifa, withMask=False):
+ sa = ifa.ifa_addr.contents
+ fam = sa.sa_family
+ if fam == socket.AF_INET:
+ sa = cast(pointer(sa), POINTER(struct_sockaddr_in)).contents
+ addr = socket.inet_ntop(fam, sa.sin_addr)
+ if withMask:
+ nm = ifa.ifa_netmask.contents
+ if nm is not None and nm.sa_family == socket.AF_INET:
+ nm = cast(pointer(nm), POINTER(struct_sockaddr_in)).contents
+ addr += '/'+socket.inet_ntop(fam, nm.sin_addr)
+ return IPAddr(addr)
+ elif fam == socket.AF_INET6:
+ sa = cast(pointer(sa), POINTER(struct_sockaddr_in6)).contents
+ addr = socket.inet_ntop(fam, sa.sin6_addr)
+ if withMask:
+ nm = ifa.ifa_netmask.contents
+ if nm is not None and nm.sa_family == socket.AF_INET6:
+ nm = cast(pointer(nm), POINTER(struct_sockaddr_in6)).contents
+ addr += '/'+socket.inet_ntop(fam, nm.sin6_addr)
+ return IPAddr(addr)
+ return None
+
+ def _NetworkInterfacesAddrs(withMask=False):
+ ifap = POINTER(struct_ifaddrs)()
+ result = libc.getifaddrs(pointer(ifap))
+ if result != 0:
+ raise OSError(get_errno())
+ del result
+ try:
+ for ifa in ifap_iter(ifap):
+ name = ifa.ifa_name.decode("UTF-8")
+ addr = getfamaddr(ifa, withMask)
+ if addr:
+ yield name, addr
+ finally:
+ libc.freeifaddrs(ifap)
+
+ except Exception as e: # pragma: no cover
+ _init_error = NotImplementedError(e)
+ def _NetworkInterfacesAddrs():
+ raise _init_error
+
+ DNSUtils._NetworkInterfacesAddrs = staticmethod(_NetworkInterfacesAddrs);
+ return _NetworkInterfacesAddrs(withMask)
+
+DNSUtils._NetworkInterfacesAddrs = staticmethod(_NetworkInterfacesAddrs);
diff --git a/fail2ban/server/jail.py b/fail2ban/server/jail.py
index 2c84e475..0f8e3566 100644
--- a/fail2ban/server/jail.py
+++ b/fail2ban/server/jail.py
@@ -26,7 +26,7 @@ __license__ = "GPL"
import logging
import math
import random
-import Queue
+import queue
from .actions import Actions
from ..helpers import getLogger, _as_bool, extractOptions, MyTime
@@ -66,7 +66,7 @@ class Jail(object):
#Known backends. Each backend should have corresponding __initBackend method
# yoh: stored in a list instead of a tuple since only
# list had .index until 2.6
- _BACKENDS = ['pyinotify', 'gamin', 'polling', 'systemd']
+ _BACKENDS = ['pyinotify', 'polling', 'systemd']
def __init__(self, name, backend = "auto", db=None):
self.__db = db
@@ -76,13 +76,14 @@ class Jail(object):
"might not function correctly. Please shorten"
% name)
self.__name = name
- self.__queue = Queue.Queue()
+ self.__queue = queue.Queue()
self.__filter = None
# Extra parameters for increase ban time
self._banExtra = {};
logSys.info("Creating new jail '%s'" % self.name)
+ self._realBackend = None
if backend is not None:
- self._setBackend(backend)
+ self._realBackend = self._setBackend(backend)
self.backend = backend
def __repr__(self):
@@ -113,7 +114,7 @@ class Jail(object):
else:
logSys.info("Initiated %r backend" % b)
self.__actions = Actions(self)
- return # we are done
+ return b # we are done
except ImportError as e: # pragma: no cover
# Log debug if auto, but error if specific
logSys.log(
@@ -127,25 +128,19 @@ class Jail(object):
"Failed to initialize any backend for Jail %r" % self.name)
def _initPolling(self, **kwargs):
- from filterpoll import FilterPoll
+ from .filterpoll import FilterPoll
logSys.info("Jail '%s' uses poller %r" % (self.name, kwargs))
self.__filter = FilterPoll(self, **kwargs)
- def _initGamin(self, **kwargs):
- # Try to import gamin
- from filtergamin import FilterGamin
- logSys.info("Jail '%s' uses Gamin %r" % (self.name, kwargs))
- self.__filter = FilterGamin(self, **kwargs)
-
def _initPyinotify(self, **kwargs):
# Try to import pyinotify
- from filterpyinotify import FilterPyinotify
+ from .filterpyinotify import FilterPyinotify
logSys.info("Jail '%s' uses pyinotify %r" % (self.name, kwargs))
self.__filter = FilterPyinotify(self, **kwargs)
def _initSystemd(self, **kwargs): # pragma: systemd no cover
# Try to import systemd
- from filtersystemd import FilterSystemd
+ from .filtersystemd import FilterSystemd
logSys.info("Jail '%s' uses systemd %r" % (self.name, kwargs))
self.__filter = FilterSystemd(self, **kwargs)
@@ -191,10 +186,15 @@ class Jail(object):
def status(self, flavor="basic"):
"""The status of the jail.
"""
+ fstat = self.filter.status(flavor=flavor)
+ astat = self.actions.status(flavor=flavor)
+ if flavor == "stats":
+ backend = type(self.filter).__name__.replace('Filter', '').lower()
+ return [self._realBackend or self.backend, fstat, astat]
return [
- ("Filter", self.filter.status(flavor=flavor)),
- ("Actions", self.actions.status(flavor=flavor)),
- ]
+ ("Filter", fstat),
+ ("Actions", astat),
+ ]
@property
def hasFailTickets(self):
@@ -219,7 +219,7 @@ class Jail(object):
try:
ticket = self.__queue.get(False)
return ticket
- except Queue.Empty:
+ except queue.Empty:
return False
def setBanTimeExtra(self, opt, value):
@@ -294,10 +294,10 @@ class Jail(object):
correctBanTime=correctBanTime, maxmatches=self.filter.failManager.maxMatches
):
try:
- #logSys.debug('restored ticket: %s', ticket)
- if self.filter.inIgnoreIPList(ticket.getID(), log_ignore=True): continue
# mark ticked was restored from database - does not put it again into db:
ticket.restored = True
+ #logSys.debug('restored ticket: %s', ticket)
+ if self.filter._inIgnoreIPList(ticket.getID(), ticket): continue
# correct start time / ban time (by the same end of ban):
btm = ticket.getBanTime(forbantime)
diftm = MyTime.time() - ticket.getTime()
diff --git a/fail2ban/server/jails.py b/fail2ban/server/jails.py
index 27e12ddf..eaaa9518 100644
--- a/fail2ban/server/jails.py
+++ b/fail2ban/server/jails.py
@@ -67,8 +67,7 @@ class Jails(Mapping):
"""
with self.__lock:
if name in self._jails:
- if noduplicates:
- raise DuplicateJailException(name)
+ raise DuplicateJailException(name)
else:
self._jails[name] = Jail(name, backend, db)
diff --git a/fail2ban/server/jailthread.py b/fail2ban/server/jailthread.py
index 67955a06..d87f8ed0 100644
--- a/fail2ban/server/jailthread.py
+++ b/fail2ban/server/jailthread.py
@@ -78,14 +78,9 @@ class JailThread(Thread):
print(e)
self.run = run_with_except_hook
- if sys.version_info >= (3,): # pragma: 2.x no cover
- def _bootstrap(self):
- prctl_set_th_name(self.name)
- return super(JailThread, self)._bootstrap();
- else: # pragma: 3.x no cover
- def __bootstrap(self):
- prctl_set_th_name(self.name)
- return Thread._Thread__bootstrap(self)
+ def _bootstrap(self):
+ prctl_set_th_name(self.name)
+ return super(JailThread, self)._bootstrap();
@abstractmethod
def status(self, flavor="basic"): # pragma: no cover - abstract
@@ -125,9 +120,6 @@ class JailThread(Thread):
if self.active is not None:
super(JailThread, self).join()
-## python 2.x replace binding of private __bootstrap method:
-if sys.version_info < (3,): # pragma: 3.x no cover
- JailThread._Thread__bootstrap = JailThread._JailThread__bootstrap
## python 3.9, restore isAlive method:
-elif not hasattr(JailThread, 'isAlive'): # pragma: 2.x no cover
+if not hasattr(JailThread, 'isAlive'):
JailThread.isAlive = JailThread.is_alive
diff --git a/fail2ban/server/mytime.py b/fail2ban/server/mytime.py
index 315d8a30..ff46b7ef 100644
--- a/fail2ban/server/mytime.py
+++ b/fail2ban/server/mytime.py
@@ -165,7 +165,7 @@ class MyTime:
@returns number (calculated seconds from expression "val")
"""
- if isinstance(val, (int, long, float, complex)):
+ if isinstance(val, (int, float, complex)):
return val
# replace together standing abbreviations, example '1d12h' -> '1d 12h':
val = MyTime._str2sec_prep.sub(r" \1", val)
diff --git a/fail2ban/server/observer.py b/fail2ban/server/observer.py
index b1c9b37d..31858ecc 100644
--- a/fail2ban/server/observer.py
+++ b/fail2ban/server/observer.py
@@ -161,7 +161,7 @@ class ObserverThread(JailThread):
self.pulse_notify()
def add_wn(self, *event):
- """Add a event to queue withouth notifying thread to wake up.
+ """Add a event to queue without notifying thread to wake up.
"""
## lock and add new event to queue:
with self._queue_lock:
@@ -465,7 +465,7 @@ class ObserverThread(JailThread):
return banTime
def banFound(self, ticket, jail, btime):
- """ Notify observer a ban occured for ip
+ """ Notify observer a ban occurred for ip
Observer will check ip was known (bad) and possibly increase/prolong a ban time
Secondary we will actualize the bans and bips (bad ip) in database
@@ -507,7 +507,7 @@ class ObserverThread(JailThread):
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
def prolongBan(self, ticket, jail):
- """ Notify observer a ban occured for ip
+ """ Notify observer a ban occurred for ip
Observer will check ip was known (bad) and possibly increase/prolong a ban time
Secondary we will actualize the bans and bips (bad ip) in database
@@ -521,7 +521,7 @@ class ObserverThread(JailThread):
except Exception as e:
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
-# Global observer initial created in server (could be later rewriten via singleton)
+# Global observer initial created in server (could be later rewritten via singleton)
class _Observers:
def __init__(self):
self.Main = None
diff --git a/fail2ban/server/server.py b/fail2ban/server/server.py
index 660f7918..e438c4ca 100644
--- a/fail2ban/server/server.py
+++ b/fail2ban/server/server.py
@@ -58,11 +58,6 @@ except ImportError: # pragma: no cover
def _thread_name():
return threading.current_thread().__class__.__name__
-try:
- FileExistsError
-except NameError: # pragma: 3.x no cover
- FileExistsError = OSError
-
def _make_file_path(name):
"""Creates path of file (last level only) on demand"""
name = os.path.dirname(name)
@@ -209,7 +204,7 @@ class Server:
# Restore default signal handlers:
if _thread_name() == '_MainThread':
- for s, sh in self.__prev_signals.iteritems():
+ for s, sh in self.__prev_signals.items():
signal.signal(s, sh)
# Give observer a small chance to complete its work before exit
@@ -227,7 +222,7 @@ class Server:
obsMain.stop()
# Explicit close database (server can leave in a thread,
- # so delayed GC can prevent commiting changes)
+ # so delayed GC can prevent committing changes)
if self.__db:
self.__db.close()
self.__db = None
@@ -287,10 +282,10 @@ class Server:
logSys.info("Stopping all jails")
with self.__lock:
# 1st stop all jails (signal and stop actions/filter thread):
- for name in self.__jails.keys():
+ for name in list(self.__jails.keys()):
self.delJail(name, stop=True, join=False)
# 2nd wait for end and delete jails:
- for name in self.__jails.keys():
+ for name in list(self.__jails.keys()):
self.delJail(name, stop=False, join=True)
def clearCaches(self):
@@ -328,7 +323,7 @@ class Server:
if "--restart" in opts:
self.stopAllJail()
# first set all affected jail(s) to idle and reset filter regex and other lists/dicts:
- for jn, jail in self.__jails.iteritems():
+ for jn, jail in self.__jails.items():
if name == '--all' or jn == name:
jail.idle = True
self.__reload_state[jn] = jail
@@ -339,7 +334,7 @@ class Server:
# end reload, all affected (or new) jails have already all new parameters (via stream) and (re)started:
with self.__lock:
deljails = []
- for jn, jail in self.__jails.iteritems():
+ for jn, jail in self.__jails.items():
# still in reload state:
if jn in self.__reload_state:
# remove jails that are not reloaded (untouched, so not in new configuration)
@@ -539,7 +534,7 @@ class Server:
jails = [self.__jails[name]]
else:
# in all jails:
- jails = self.__jails.values()
+ jails = list(self.__jails.values())
# unban given or all (if value is None):
cnt = 0
ifexists |= (name is None)
@@ -553,7 +548,7 @@ class Server:
jails = [self.__jails[name]]
else:
# in all jails:
- jails = self.__jails.values()
+ jails = list(self.__jails.values())
# check banned ids:
res = []
if name is None and ids:
@@ -603,20 +598,29 @@ class Server:
def isAlive(self, jailnum=None):
if jailnum is not None and len(self.__jails) != jailnum:
return 0
- for jail in self.__jails.values():
+ for jail in list(self.__jails.values()):
if not jail.isAlive():
return 0
return 1
# Status
- def status(self):
+ def status(self, name="", flavor="basic"):
try:
self.__lock.acquire()
- jails = list(self.__jails)
- jails.sort()
- jailList = ", ".join(jails)
- ret = [("Number of jail", len(self.__jails)),
- ("Jail list", jailList)]
+ jails = sorted(self.__jails.items())
+ if flavor != "stats":
+ jailList = [n for n, j in jails]
+ ret = [
+ ("Number of jail", len(jailList)),
+ ("Jail list", ", ".join(jailList))
+ ]
+ if name == '--all':
+ jstat = dict(jails)
+ for n, j in jails:
+ jstat[n] = j.status(flavor=flavor)
+ if flavor == "stats":
+ return jstat
+ ret.append(jstat)
return ret
finally:
self.__lock.release()
@@ -725,14 +729,8 @@ class Server:
# Remove the handler.
logger.removeHandler(handler)
# And try to close -- it might be closed already
- try:
- handler.flush()
- handler.close()
- except (ValueError, KeyError): # pragma: no cover
- # Is known to be thrown after logging was shutdown once
- # with older Pythons -- seems to be safe to ignore there
- if sys.version_info < (3,) or sys.version_info >= (3, 2):
- raise
+ handler.flush()
+ handler.close()
# detailed format by deep log levels (as DEBUG=10):
if logger.getEffectiveLevel() <= logging.DEBUG: # pragma: no cover
if self.__verbose is None:
@@ -818,7 +816,7 @@ class Server:
return DNSUtils.setIPv6IsAllowed(value)
def setThreadOptions(self, value):
- for o, v in value.iteritems():
+ for o, v in value.items():
if o == 'stacksize':
threading.stack_size(int(v)*1024)
else: # pragma: no cover
@@ -936,32 +934,16 @@ class Server:
# the default value (configurable).
try:
fdlist = self.__get_fdlist()
- maxfd = -1
- except:
- try:
- maxfd = os.sysconf("SC_OPEN_MAX")
- except (AttributeError, ValueError):
- maxfd = 256 # default maximum
- fdlist = xrange(maxfd+1)
-
- # urandom should not be closed in Python 3.4.0. Fixed in 3.4.1
- # http://bugs.python.org/issue21207
- if sys.version_info[0:3] == (3, 4, 0): # pragma: no cover
- urandom_fd = os.open("/dev/urandom", os.O_RDONLY)
- for fd in fdlist:
- try:
- if not os.path.sameopenfile(urandom_fd, fd):
- os.close(fd)
- except OSError: # ERROR (ignore)
- pass
- os.close(urandom_fd)
- elif maxfd == -1:
for fd in fdlist:
try:
os.close(fd)
except OSError: # ERROR (ignore)
pass
- else:
+ except:
+ try:
+ maxfd = os.sysconf("SC_OPEN_MAX")
+ except (AttributeError, ValueError):
+ maxfd = 256 # default maximum
os.closerange(0, maxfd)
# Redirect the standard file descriptors to /dev/null.
diff --git a/fail2ban/server/strptime.py b/fail2ban/server/strptime.py
index 12be163a..9b6b85df 100644
--- a/fail2ban/server/strptime.py
+++ b/fail2ban/server/strptime.py
@@ -60,7 +60,7 @@ timeRE['H'] = r"(?P[0-1]?\d|2[0-3])"
timeRE['M'] = r"(?P[0-5]?\d)"
timeRE['S'] = r"(?P[0-5]?\d|6[0-1])"
-# Extend build-in TimeRE with some exact patterns
+# Extend built-in TimeRE with some exact patterns
# exact two-digit patterns:
timeRE['Exd'] = r"(?P[1-2]\d|0[1-9]|3[0-1])"
timeRE['Exm'] = r"(?P0[1-9]|1[0-2])"
@@ -99,7 +99,7 @@ def _updateTimeRE():
if len(exprset) > 1 else "".join(exprset)
exprset = set( cent(now[0].year + i) for i in (-1, distance) )
if len(now) > 1 and now[1]:
- exprset |= set( cent(now[1].year + i) for i in xrange(-1, now[0].year-now[1].year+1, distance) )
+ exprset |= set( cent(now[1].year + i) for i in range(-1, now[0].year-now[1].year+1, distance) )
return grp(sorted(list(exprset)))
# more precise year patterns, within same century of last year and
@@ -116,7 +116,7 @@ def _updateTimeRE():
_updateTimeRE()
def getTimePatternRE():
- keys = timeRE.keys()
+ keys = list(timeRE.keys())
patt = (r"%%(%%|%s|[%s])" % (
"|".join([k for k in keys if len(k) > 1]),
"".join([k for k in keys if len(k) == 1]),
@@ -171,7 +171,7 @@ def zone2offset(tz, dt):
"""
if isinstance(tz, int):
return tz
- if isinstance(tz, basestring):
+ if isinstance(tz, str):
return validateTimeZone(tz)
tz, tzo = tz
if tzo is None or tzo == '': # without offset
@@ -208,7 +208,7 @@ def reGroupDictStrptime(found_dict, msec=False, default_tz=None):
year = month = day = tzoffset = \
weekday = julian = week_of_year = None
hour = minute = second = fraction = 0
- for key, val in found_dict.iteritems():
+ for key, val in found_dict.items():
if val is None: continue
# Directives not explicitly handled below:
# c, x, X
@@ -307,7 +307,7 @@ def reGroupDictStrptime(found_dict, msec=False, default_tz=None):
day = now.day
assume_today = True
- # Actully create date
+ # Actually create date
date_result = datetime.datetime(
year, month, day, hour, minute, second, fraction)
# Correct timezone if not supplied in the log linge
diff --git a/fail2ban/server/ticket.py b/fail2ban/server/ticket.py
index 96e67773..72573ec4 100644
--- a/fail2ban/server/ticket.py
+++ b/fail2ban/server/ticket.py
@@ -55,7 +55,7 @@ class Ticket(object):
self._time = time if time is not None else MyTime.time()
self._data = {'matches': matches or [], 'failures': 0}
if data is not None:
- for k,v in data.iteritems():
+ for k,v in data.items():
if v is not None:
self._data[k] = v
if ticket:
@@ -88,7 +88,7 @@ class Ticket(object):
def setID(self, value):
# guarantee using IPAddr instead of unicode, str for the IP
- if isinstance(value, basestring):
+ if isinstance(value, str):
value = IPAddr(value)
self._id = value
@@ -180,7 +180,7 @@ class Ticket(object):
if len(args) == 1:
# todo: if support >= 2.7 only:
# self._data = {k:v for k,v in args[0].iteritems() if v is not None}
- self._data = dict([(k,v) for k,v in args[0].iteritems() if v is not None])
+ self._data = dict([(k,v) for k,v in args[0].items() if v is not None])
# add k,v list or dict (merge):
elif len(args) == 2:
self._data.update((args,))
@@ -191,7 +191,7 @@ class Ticket(object):
# filter (delete) None values:
# todo: if support >= 2.7 only:
# self._data = {k:v for k,v in self._data.iteritems() if v is not None}
- self._data = dict([(k,v) for k,v in self._data.iteritems() if v is not None])
+ self._data = dict([(k,v) for k,v in self._data.items() if v is not None])
def getData(self, key=None, default=None):
# return whole data dict:
@@ -200,17 +200,17 @@ class Ticket(object):
# return default if not exists:
if not self._data:
return default
- if not isinstance(key,(str,unicode,type(None),int,float,bool,complex)):
+ if not isinstance(key,(str,type(None),int,float,bool,complex)):
# return filtered by lambda/function:
if callable(key):
# todo: if support >= 2.7 only:
# return {k:v for k,v in self._data.iteritems() if key(k)}
- return dict([(k,v) for k,v in self._data.iteritems() if key(k)])
+ return dict([(k,v) for k,v in self._data.items() if key(k)])
# return filtered by keys:
if hasattr(key, '__iter__'):
# todo: if support >= 2.7 only:
# return {k:v for k,v in self._data.iteritems() if k in key}
- return dict([(k,v) for k,v in self._data.iteritems() if k in key])
+ return dict([(k,v) for k,v in self._data.items() if k in key])
# return single value of data:
return self._data.get(key, default)
@@ -257,7 +257,7 @@ class FailTicket(Ticket):
as estimation from rate by previous known interval (if it exceeds the findTime)
"""
if time > self._time:
- # expand current interval and attemps count (considering maxTime):
+ # expand current interval and attempts count (considering maxTime):
if self._firstTime < time - maxTime:
# adjust retry calculated as estimation from rate by previous known interval:
self._retry = int(round(self._retry / float(time - self._firstTime) * maxTime))
diff --git a/fail2ban/server/transmitter.py b/fail2ban/server/transmitter.py
index 6de60f94..92d591f0 100644
--- a/fail2ban/server/transmitter.py
+++ b/fail2ban/server/transmitter.py
@@ -144,6 +144,8 @@ class Transmitter:
return self.__commandGet(command[1:])
elif name == "status":
return self.status(command[1:])
+ elif name in ("stats", "statistic", "statistics"):
+ return self.__server.status("--all", "stats")
elif name == "version":
return version.version
elif name == "config-error":
@@ -488,7 +490,7 @@ class Transmitter:
opt = command[1][len("bantime."):]
return self.__server.getBanTimeExtra(name, opt)
elif command[1] == "actions":
- return self.__server.getActions(name).keys()
+ return list(self.__server.getActions(name).keys())
elif command[1] == "action":
actionname = command[2]
actionvalue = command[3]
@@ -512,11 +514,10 @@ class Transmitter:
def status(self, command):
if len(command) == 0:
return self.__server.status()
- elif len(command) == 1:
+ elif len(command) >= 1 and len(command) <= 2:
name = command[0]
- return self.__server.statusJail(name)
- elif len(command) == 2:
- name = command[0]
- flavor = command[1]
+ flavor = command[1] if len(command) == 2 else "basic"
+ if name == "--all":
+ return self.__server.status("--all", flavor)
return self.__server.statusJail(name, flavor=flavor)
raise Exception("Invalid command (no status)")
diff --git a/fail2ban/server/utils.py b/fail2ban/server/utils.py
index 18073ea7..02a6bc6d 100644
--- a/fail2ban/server/utils.py
+++ b/fail2ban/server/utils.py
@@ -32,10 +32,7 @@ import time
from ..helpers import getLogger, _merge_dicts, uni_decode
from collections import OrderedDict
-if sys.version_info >= (3, 3):
- import importlib.machinery
-else:
- import imp
+import importlib.machinery
# Gets the instance of the logger.
logSys = getLogger(__name__)
@@ -53,7 +50,7 @@ _RETCODE_HINTS = {
# Dictionary to lookup signal name from number
signame = dict((num, name)
- for name, num in signal.__dict__.iteritems() if name.startswith("SIG"))
+ for name, num in signal.__dict__.items() if name.startswith("SIG"))
class Utils():
"""Utilities provide diverse static methods like executes OS shell commands, etc.
@@ -140,7 +137,7 @@ class Utils():
if not isinstance(realCmd, list):
realCmd = [realCmd]
i = len(realCmd)-1
- for k, v in varsDict.iteritems():
+ for k, v in varsDict.items():
varsStat += "%s=$%s " % (k, i)
realCmd.append(v)
i += 1
@@ -355,10 +352,6 @@ class Utils():
def load_python_module(pythonModule):
pythonModuleName = os.path.splitext(
os.path.basename(pythonModule))[0]
- if sys.version_info >= (3, 3):
- mod = importlib.machinery.SourceFileLoader(
- pythonModuleName, pythonModule).load_module()
- else:
- mod = imp.load_source(
- pythonModuleName, pythonModule)
+ mod = importlib.machinery.SourceFileLoader(
+ pythonModuleName, pythonModule).load_module()
return mod
diff --git a/fail2ban/tests/action_d/test_smtp.py b/fail2ban/tests/action_d/test_smtp.py
index 8d20055a..6ad99978 100644
--- a/fail2ban/tests/action_d/test_smtp.py
+++ b/fail2ban/tests/action_d/test_smtp.py
@@ -18,72 +18,23 @@
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
-import smtpd
import threading
import unittest
import re
import sys
-if sys.version_info >= (3, 3):
- import importlib
-else:
- import imp
+import importlib
from ..dummyjail import DummyJail
-
from ..utils import CONFIG_DIR, asyncserver, Utils, uni_decode
-class TestSMTPServer(smtpd.SMTPServer):
- def __init__(self, *args):
- smtpd.SMTPServer.__init__(self, *args)
+class _SMTPActionTestCase():
+
+ def _reset_smtpd(self):
+ for a in ('mailfrom', 'org_data', 'data'):
+ if hasattr(self.smtpd, a): delattr(self.smtpd, a)
self.ready = False
- def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
- self.peer = peer
- self.mailfrom = mailfrom
- self.rcpttos = rcpttos
- self.org_data = data
- # replace new line (with tab or space) for possible mime translations (word wrap),
- self.data = re.sub(r"\n[\t ]", " ", uni_decode(data))
- self.ready = True
-
-
-class SMTPActionTest(unittest.TestCase):
-
- def setUp(self):
- """Call before every test case."""
- unittest.F2B.SkipIfCfgMissing(action='smtp.py')
- super(SMTPActionTest, self).setUp()
- self.jail = DummyJail()
- pythonModule = os.path.join(CONFIG_DIR, "action.d", "smtp.py")
- pythonModuleName = os.path.basename(pythonModule.rstrip(".py"))
- if sys.version_info >= (3, 3):
- customActionModule = importlib.machinery.SourceFileLoader(
- pythonModuleName, pythonModule).load_module()
- else:
- customActionModule = imp.load_source(
- pythonModuleName, pythonModule)
-
- self.smtpd = TestSMTPServer(("localhost", 0), None)
- port = self.smtpd.socket.getsockname()[1]
-
- self.action = customActionModule.Action(
- self.jail, "test", host="localhost:%i" % port)
-
- ## because of bug in loop (see loop in asyncserver.py) use it's loop instead of asyncore.loop:
- self._active = True
- self._loop_thread = threading.Thread(
- target=asyncserver.loop, kwargs={'active': lambda: self._active})
- self._loop_thread.daemon = True
- self._loop_thread.start()
-
- def tearDown(self):
- """Call after every test case."""
- self.smtpd.close()
- self._active = False
- self._loop_thread.join()
- super(SMTPActionTest, self).tearDown()
-
def _exec_and_wait(self, doaction, timeout=3, short=False):
if short: timeout /= 25
self.smtpd.ready = False
@@ -94,6 +45,7 @@ class SMTPActionTest(unittest.TestCase):
self._exec_and_wait(self.action.start)
self.assertEqual(self.smtpd.mailfrom, "fail2ban")
self.assertEqual(self.smtpd.rcpttos, ["root"])
+ self.action.ssl = False # ensure it works without TLS as a sanity check
self.assertTrue(
"Subject: [Fail2Ban] %s: started" % self.jail.name
in self.smtpd.data)
@@ -160,3 +112,201 @@ class SMTPActionTest(unittest.TestCase):
self.assertTrue("From: %s <%s>" %
(self.action.fromname, self.action.fromaddr) in self.smtpd.data)
self.assertEqual(set(self.smtpd.rcpttos), set(["test@example.com", "test2@example.com"]))
+
+try:
+ import smtpd
+
+ class TestSMTPServer(smtpd.SMTPServer):
+
+ def __init__(self, *args):
+ smtpd.SMTPServer.__init__(self, *args)
+ self.ready = False
+
+ def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
+ self.peer = peer
+ self.mailfrom = mailfrom
+ self.rcpttos = rcpttos
+ self.org_data = data
+ # replace new line (with tab or space) for possible mime translations (word wrap),
+ self.data = re.sub(r"\n[\t ]", " ", uni_decode(data))
+ self.ready = True
+
+
+ class SMTPActionTest(unittest.TestCase, _SMTPActionTestCase):
+
+ def setUpClass():
+ """Call before tests."""
+ unittest.F2B.SkipIfCfgMissing(action='smtp.py')
+
+ cls = SMTPActionTest
+ cls.smtpd = TestSMTPServer(("localhost", 0), None)
+ cls.port = cls.smtpd.socket.getsockname()[1]
+
+ ## because of bug in loop (see loop in asyncserver.py) use it's loop instead of asyncore.loop:
+ cls._active = True
+ cls._loop_thread = threading.Thread(
+ target=asyncserver.loop, kwargs={'active': lambda: cls._active})
+ cls._loop_thread.daemon = True
+ cls._loop_thread.start()
+
+ def tearDownClass():
+ """Call after tests."""
+ cls = SMTPActionTest
+ cls.smtpd.close()
+ cls._active = False
+ cls._loop_thread.join()
+
+ def setUp(self):
+ """Call before every test case."""
+ unittest.F2B.SkipIfCfgMissing(action='smtp.py')
+ super(SMTPActionTest, self).setUp()
+ self.jail = DummyJail()
+ pythonModule = os.path.join(CONFIG_DIR, "action.d", "smtp.py")
+ pythonModuleName = os.path.basename(pythonModule.rstrip(".py"))
+ customActionModule = importlib.machinery.SourceFileLoader(
+ pythonModuleName, pythonModule).load_module()
+
+ self.action = customActionModule.Action(
+ self.jail, "test", host="localhost:%i" % self.port)
+
+ def tearDown(self):
+ """Call after every test case."""
+ self._reset_smtpd()
+ super(SMTPActionTest, self).tearDown()
+
+except ImportError as e:
+ print("I: Skipping smtp tests: %s" % e)
+
+
+try:
+ import asyncio
+ from aiosmtpd.controller import Controller
+ import socket
+ import ssl
+ import tempfile
+
+ class TestSMTPHandler:
+ def __init__(self, *args):
+ self.ready = False
+
+ async def handle_DATA(self, server, session, envelope):
+ self.peer = session.peer
+ self.mailfrom = envelope.mail_from
+ self.rcpttos = envelope.rcpt_tos
+ self.org_data = envelope.content.decode()
+ # normalize CRLF -> LF:
+ self.data = re.sub(r"\r\n", "\n", uni_decode(self.org_data))
+ self.ready = True
+ return '250 OK'
+
+ async def handle_exception(self, error):
+ print(error)
+ return '542 Internal server error'
+
+
+ class AIOSMTPActionTest(unittest.TestCase, _SMTPActionTestCase):
+
+ @classmethod
+ def create_temp_self_signed_cert(cls):
+ """
+ Create a self signed SSL certificate in temporary files for host
+ 'localhost'
+
+ Returns a tuple containing the certificate file name and the key
+ file name.
+
+ The cert (ECC:256, 100years) created with:
+ openssl req -x509 -out /tmp/f2b-localhost.crt -keyout /tmp/f2b-localhost.key -days 36500 -newkey ec:<(openssl ecparam -name prime256v1) -nodes -sha256 \
+ -subj '/CN=localhost' -extensions EXT -config <( \
+ printf "[dn]\nCN=localhost\n[req]\ndistinguished_name = dn\n[EXT]\nsubjectAltName=DNS:localhost\nkeyUsage=digitalSignature\nextendedKeyUsage=serverAuth" \
+ )
+ cat /tmp/f2b-localhost.*
+ rm /tmp/f2b-localhost.*
+
+ """
+ if hasattr(cls, 'crtfiles'): return cls.crtfiles
+ cls.crtfiles = crtfiles = (tempfile.mktemp(".crt", "f2b_cert_"), tempfile.mktemp(".key", "f2b_cert_"))
+ with open(crtfiles[0], 'w') as f:
+ f.write(
+ '-----BEGIN CERTIFICATE-----\n'
+ 'MIIBhDCCASugAwIBAgIUCuW168kD3G7XrpFwGHwE6vGfoJkwCgYIKoZIzj0EAwIw\n'
+ 'FDESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTIzMTIzMDE3NDUzNFoYDzIxMjMxMjA2\n'
+ 'MTc0NTM0WjAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwWTATBgcqhkjOPQIBBggqhkjO\n'
+ 'PQMBBwNCAARDa8BO/UE4axzvnOQ/pCc/ZTp351X1TqIfjEFaMoZOItz1/MW3ZCuS\n'
+ '2vuby3rMn0WZ59RWVotBqA6lcMVcgDq3o1kwVzAUBgNVHREEDTALgglsb2NhbGhv\n'
+ 'c3QwCwYDVR0PBAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMBMB0GA1UdDgQWBBS8\n'
+ 'kH1Ucuq+wlex5DxxHDe1kKGdcjAKBggqhkjOPQQDAgNHADBEAiBmv05+BvXWMzLg\n'
+ 'TtF4McoQNrU/0TTKhV8o+mgd+47tMAIgaaSNRnfjGIfJMbXg7Bh53qOIu5+lnm1b\n'
+ 'ySygMgFmePs=\n'
+ '-----END CERTIFICATE-----\n'
+ )
+ with open(crtfiles[1], 'w') as f:
+ f.write(
+ '-----BEGIN PRIVATE KEY-----\n'
+ 'MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgoBGcojKPZMYut7aP\n'
+ 'JGe2GW+2lVV0zJpgCsZ7816a9uqhRANCAARDa8BO/UE4axzvnOQ/pCc/ZTp351X1\n'
+ 'TqIfjEFaMoZOItz1/MW3ZCuS2vuby3rMn0WZ59RWVotBqA6lcMVcgDq3\n'
+ '-----END PRIVATE KEY-----\n'
+ )
+ # return file names
+ return crtfiles
+
+ @classmethod
+ def _del_cert(cls):
+ if hasattr(cls, 'crtfiles') and cls.crtfiles:
+ for f in cls.crtfiles:
+ try:
+ os.unlink(f)
+ except FileNotFoundError: pass
+ cls.crtfiles = None
+
+ @staticmethod
+ def _free_port():
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
+ s.bind(('localhost', 0))
+ return s.getsockname()[1]
+
+ def setUpClass():
+ """Call before tests."""
+ unittest.F2B.SkipIfCfgMissing(action='smtp.py')
+
+ cert_file, cert_key = AIOSMTPActionTest.create_temp_self_signed_cert()
+ ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+ ssl_context.load_cert_chain(cert_file, cert_key)
+
+ cls = AIOSMTPActionTest
+ cls.port = cls._free_port()
+ cls.smtpd = TestSMTPHandler()
+ cls.controller = Controller(cls.smtpd, hostname='localhost', server_hostname='localhost', port=cls.port,
+ server_kwargs={'tls_context': ssl_context, 'require_starttls': False})
+ # Run the event loop in a separate thread.
+ cls.controller.start()
+
+ def tearDownClass():
+ """Call after tests."""
+ cls = AIOSMTPActionTest
+ cls.controller.stop()
+ cls._del_cert()
+
+ def setUp(self):
+ """Call before every test case."""
+ unittest.F2B.SkipIfCfgMissing(action='smtp.py')
+ super(AIOSMTPActionTest, self).setUp()
+ self.jail = DummyJail()
+ pythonModule = os.path.join(CONFIG_DIR, "action.d", "smtp.py")
+ pythonModuleName = os.path.basename(pythonModule.rstrip(".py"))
+ customActionModule = importlib.machinery.SourceFileLoader(
+ pythonModuleName, pythonModule).load_module()
+
+ self.action = customActionModule.Action(
+ self.jail, "test", host="localhost:%i" % self.port)
+
+ self.action.ssl = True
+
+ def tearDown(self):
+ """Call after every test case."""
+ self._reset_smtpd()
+ super(AIOSMTPActionTest, self).tearDown()
+
+except ImportError as e:
+ print("I: Skipping SSL smtp tests: %s" % e)
diff --git a/fail2ban/tests/actiontestcase.py b/fail2ban/tests/actiontestcase.py
index ce5de483..c353376c 100644
--- a/fail2ban/tests/actiontestcase.py
+++ b/fail2ban/tests/actiontestcase.py
@@ -70,7 +70,7 @@ class CommandActionTest(LogCaptureTestCase):
lambda: substituteRecursiveTags({'A': '', 'B': ''}))
self.assertRaises(ValueError,
lambda: substituteRecursiveTags({'A': '', 'B': '', 'C': ''}))
- # Unresolveable substition
+ # Unresolveable substitution
self.assertRaises(ValueError,
lambda: substituteRecursiveTags({'A': 'to= fromip=', 'C': '', 'B': '', 'D': ''}))
self.assertRaises(ValueError,
@@ -242,14 +242,14 @@ class CommandActionTest(LogCaptureTestCase):
setattr(self.__action, 'ab', "")
setattr(self.__action, 'x?family=inet6', "")
# produce self-referencing properties except:
- self.assertRaisesRegexp(ValueError, r"properties contain self referencing definitions",
+ self.assertRaisesRegex(ValueError, r"properties contain self referencing definitions",
lambda: self.__action.replaceTag("",
self.__action._properties, conditional="family=inet4")
)
- # remore self-referencing in props:
+ # remote self-referencing in props:
delattr(self.__action, 'ac')
# produce self-referencing query except:
- self.assertRaisesRegexp(ValueError, r"possible self referencing definitions in query",
+ self.assertRaisesRegex(ValueError, r"possible self referencing definitions in query",
lambda: self.__action.replaceTag(""*30,
self.__action._properties, conditional="family=inet6")
)
@@ -276,7 +276,7 @@ class CommandActionTest(LogCaptureTestCase):
conditional="family=inet6", cache=cache),
"Text 890-567 text 567 '567'")
self.assertTrue(len(cache) >= 3)
- # set one parameter - internal properties and cache should be reseted:
+ # set one parameter - internal properties and cache should be reset:
setattr(self.__action, 'xyz', "000-")
self.assertEqual(len(cache), 0)
# test againg, should have 000 instead of 890:
diff --git a/fail2ban/tests/banmanagertestcase.py b/fail2ban/tests/banmanagertestcase.py
index cf25ac0f..2c0c4c4f 100644
--- a/fail2ban/tests/banmanagertestcase.py
+++ b/fail2ban/tests/banmanagertestcase.py
@@ -177,7 +177,7 @@ class StatusExtendedCymruInfo(unittest.TestCase):
super(StatusExtendedCymruInfo, self).setUp()
unittest.F2B.SkipIfNoNetwork()
setUpMyTime()
- self.__ban_ip = iter(DNSUtils.dnsToIp("resolver1.opendns.com")).next()
+ self.__ban_ip = next(iter(DNSUtils.dnsToIp("resolver1.opendns.com")))
self.__asn = "36692"
self.__country = "US"
self.__rir = "arin"
diff --git a/fail2ban/tests/clientbeautifiertestcase.py b/fail2ban/tests/clientbeautifiertestcase.py
index 79a0ff54..defedbe1 100644
--- a/fail2ban/tests/clientbeautifiertestcase.py
+++ b/fail2ban/tests/clientbeautifiertestcase.py
@@ -70,8 +70,8 @@ class BeautifierTest(unittest.TestCase):
def testStatus(self):
self.b.setInputCmd(["status"])
- response = (("Number of jails", 0), ("Jail list", ["ssh", "exim4"]))
- output = "Status\n|- Number of jails:\t0\n`- Jail list:\tssh exim4"
+ response = (("Number of jails", 2), ("Jail list", ", ".join(["ssh", "exim4"])))
+ output = "Status\n|- Number of jails:\t2\n`- Jail list:\tssh, exim4"
self.assertEqual(self.b.beautify(response), output)
self.b.setInputCmd(["status", "ssh"])
@@ -105,6 +105,90 @@ class BeautifierTest(unittest.TestCase):
output += " `- Banned IP list: 192.168.0.1 10.2.2.1 2001:db8::1"
self.assertEqual(self.b.beautify(response), output)
+ self.b.setInputCmd(["status", "--all"])
+ response = (("Number of jails", 2), ("Jail list", ", ".join(["ssh", "exim4"])), {
+ "ssh": (
+ ("Filter", [
+ ("Currently failed", 0),
+ ("Total failed", 0),
+ ("File list", "/var/log/auth.log")
+ ]
+ ),
+ ("Actions", [
+ ("Currently banned", 3),
+ ("Total banned", 3),
+ ("Banned IP list", [
+ IPAddr("192.168.0.1"),
+ IPAddr("::ffff:10.2.2.1"),
+ IPAddr("2001:db8::1")
+ ]
+ )
+ ]
+ )
+ ),
+ "exim4": (
+ ("Filter", [
+ ("Currently failed", 3),
+ ("Total failed", 6),
+ ("File list", "/var/log/exim4/mainlog")
+ ]
+ ),
+ ("Actions", [
+ ("Currently banned", 0),
+ ("Total banned", 0),
+ ("Banned IP list", []
+ )
+ ]
+ )
+ )
+ })
+ output = (
+ "Status\n"
+ + "|- Number of jails:\t2\n"
+ + "|- Jail list:\tssh, exim4\n"
+ + "`- Status for the jails:\n"
+ + " |- Jail: ssh\n"
+ + " | |- Filter\n"
+ + " | | |- Currently failed: 0\n"
+ + " | | |- Total failed: 0\n"
+ + " | | `- File list: /var/log/auth.log\n"
+ + " | `- Actions\n"
+ + " | |- Currently banned: 3\n"
+ + " | |- Total banned: 3\n"
+ + " | `- Banned IP list: 192.168.0.1 10.2.2.1 2001:db8::1\n"
+ + " `- Jail: exim4\n"
+ + " |- Filter\n"
+ + " | |- Currently failed: 3\n"
+ + " | |- Total failed: 6\n"
+ + " | `- File list: /var/log/exim4/mainlog\n"
+ + " `- Actions\n"
+ + " |- Currently banned: 0\n"
+ + " |- Total banned: 0\n"
+ + " `- Banned IP list: "
+ )
+ self.assertEqual(self.b.beautify(response), output)
+
+ def testStatusStats(self):
+ self.b.setInputCmd(["stats"])
+ response = {
+ "ssh": ["systemd", (3, 6), (12, 24)],
+ "exim4": ["pyinotify", (6, 12), (20, 20)],
+ "jail-with-long-name": ["polling", (0, 0), (0, 0)]
+ }
+ output = (""
+ + " ? ? Filter ? Actions \n"
+ + "Jail ? Backend ????????????????????????\n"
+ + " ? ? cur ? tot ? cur ? tot\n"
+ + "????????????????????????????????????????????????????????\n"
+ + "ssh ? systemd ? 3 ? 6 ? 12 ? 24\n"
+ + "exim4 ? pyinotify ? 6 ? 12 ? 20 ? 20\n"
+ + "jail-with-long-name ? polling ? 0 ? 0 ? 0 ? 0\n"
+ + "????????????????????????????????????????????????????????"
+ )
+ response = self.b.beautify(response).encode('ascii', 'replace').decode('ascii')
+ self.assertEqual(response, output)
+
+
def testFlushLogs(self):
self.b.setInputCmd(["flushlogs"])
self.assertEqual(self.b.beautify("rolled over"), "logs: rolled over")
diff --git a/fail2ban/tests/clientreadertestcase.py b/fail2ban/tests/clientreadertestcase.py
index 37083a06..0388fd3d 100644
--- a/fail2ban/tests/clientreadertestcase.py
+++ b/fail2ban/tests/clientreadertestcase.py
@@ -61,6 +61,7 @@ class ConfigReaderTest(unittest.TestCase):
def tearDown(self):
"""Call after every test case."""
shutil.rmtree(self.d)
+ super(ConfigReaderTest, self).tearDown()
def _write(self, fname, value=None, content=None):
# verify if we don't need to create .d directory
@@ -337,7 +338,7 @@ class JailReaderTest(LogCaptureTestCase):
self.assertTrue(jail.getOptions())
self.assertTrue(jail.isEnabled())
stream = jail.convert()
- # check filter options are overriden with values specified directly in jail:
+ # check filter options are overridden with values specified directly in jail:
# prefregex:
self.assertEqual([['set', 'sshd-override-flt-opts', 'prefregex', '^Test']],
[o for o in stream if len(o) > 2 and o[2] == 'prefregex'])
@@ -419,7 +420,7 @@ class JailReaderTest(LogCaptureTestCase):
# And multiple groups (`][` instead of `,`)
result = extractOptions(option.replace(',', ']['))
expected2 = (expected[0],
- dict((k, v.replace(',', '][')) for k, v in expected[1].iteritems())
+ dict((k, v.replace(',', '][')) for k, v in expected[1].items())
)
self.assertEqual(expected2, result)
@@ -565,7 +566,7 @@ class FilterReaderTest(LogCaptureTestCase):
def testFilterReaderSubstitionDefault(self):
output = [['set', 'jailname', 'addfailregex', 'to=sweet@example.com fromip=']]
- filterReader = FilterReader('substition', "jailname", {},
+ filterReader = FilterReader('substitution', "jailname", {},
share_config=TEST_FILES_DIR_SHARE_CFG, basedir=TEST_FILES_DIR)
filterReader.read()
filterReader.getOptions(None)
@@ -585,7 +586,7 @@ class FilterReaderTest(LogCaptureTestCase):
def testFilterReaderSubstitionSet(self):
output = [['set', 'jailname', 'addfailregex', 'to=sour@example.com fromip=']]
- filterReader = FilterReader('substition', "jailname", {'honeypot': 'sour@example.com'},
+ filterReader = FilterReader('substitution', "jailname", {'honeypot': 'sour@example.com'},
share_config=TEST_FILES_DIR_SHARE_CFG, basedir=TEST_FILES_DIR)
filterReader.read()
filterReader.getOptions(None)
@@ -595,8 +596,8 @@ class FilterReaderTest(LogCaptureTestCase):
def testFilterReaderSubstitionKnown(self):
output = [['set', 'jailname', 'addfailregex', '^to=test,sweet@example.com,test2,sweet@example.com fromip=$']]
filterName, filterOpt = extractOptions(
- 'substition[failregex="^$", honeypot=",", sweet="test,,test2"]')
- filterReader = FilterReader('substition', "jailname", filterOpt,
+ 'substitution[failregex="^$", honeypot=",", sweet="test,,test2"]')
+ filterReader = FilterReader('substitution', "jailname", filterOpt,
share_config=TEST_FILES_DIR_SHARE_CFG, basedir=TEST_FILES_DIR)
filterReader.read()
filterReader.getOptions(None)
@@ -606,8 +607,8 @@ class FilterReaderTest(LogCaptureTestCase):
def testFilterReaderSubstitionSection(self):
output = [['set', 'jailname', 'addfailregex', '^\\s*to=fail2ban@localhost fromip=\\s*$']]
filterName, filterOpt = extractOptions(
- 'substition[failregex="^\\s*\\s*$", honeypot=""]')
- filterReader = FilterReader('substition', "jailname", filterOpt,
+ 'substitution[failregex="^\\s*