Merge branch '_enh/test_backends' -- fixing inotify backend, RF backends, unittests

* _enh/test_backends:
  RF: reordered tests + enabled gamin now that its fix is pending in Debian
  ENH+BF: filtergamin -- to be more inline with current design of filterinotify
  ENH: 1 more sleep_4_poll to guarantee difference in time stamp
  ENH: few more delays for cases relying on time stamps
  ENH: tests much more robust now across pythons 2.4 -- 2.7
  BF+RF: pyinotify refreshes watcher upon CREATE, unified/simplified *(add|del)LogPath among *Filters
  ENH: fail2ban-testcases -- custom logging format to ease debugging, non-0 exit code in case of failure
  ENH: Filter's testcases -- rename, del + list again --- a bit unstable, might still fail from time to time
  BF: pyinotify -- monitor the parent directory for IN_CREATE + process freshly added file (Closes gh-44)
  ENH: first working unittest for checking polling and inotify backends
  RF/BF: just use pyinotify.ThreadedNotifier thread in filterpyinotify
  RF: filter.py -- single readline in a loop
  ENH: FilterPoll -- adjusted some msgs + allowed to operate without jail (for testing)
  Minor additional comment to DEVELOP
  ENH: extended test LogfileMonitor
pull/8/merge
Yaroslav Halchenko 2012-07-20 09:50:08 -04:00
commit dca5634717
7 changed files with 527 additions and 227 deletions

13
DEVELOP
View File

@ -137,7 +137,18 @@ filter*.py
Implementations of FileFilter's for specific backends. Derived
classes should provide an implementation of `run` and usually
override `addLogPath`, `delLogPath` methods.
override `addLogPath`, `delLogPath` methods. In run() method they all
one way or another provide
try:
while True:
ticket = self.failManager.toBan()
self.jail.putFailTicket(ticket)
except FailManagerEmpty:
self.failManager.cleanup(MyTime.time())
thus channeling "ban tickets" from their failManager to a
corresponding jail.
action.py
~~~~~~~~~

View File

@ -63,20 +63,19 @@ parser = get_opt_parser()
(opts, files) = parser.parse_args()
assert(not len(files))
# Set the time to a fixed, known value
# Sun Aug 14 12:00:00 CEST 2005
# yoh: we need to adjust TZ to match the one used by Cyril so all the timestamps match
old_TZ = os.environ.get('TZ', None)
os.environ['TZ'] = 'Europe/Zurich'
time.tzset()
MyTime.setTime(1124013600)
# Gets the instance of the logger.
#
# Logging
#
logSys = logging.getLogger("fail2ban")
# Add the default logging handler
stdout = logging.StreamHandler(sys.stdout)
logSys.addHandler(stdout)
# Numerical level of verbosity corresponding to a log "level"
verbosity = {'debug': 3,
'info': 2,
'warn': 1,
'error': 1,
'fatal': 0,
None: 1}[opts.log_level]
if opts.log_level is not None:
# so we had explicit settings
logSys.setLevel(getattr(logging, opts.log_level.upper()))
@ -86,9 +85,27 @@ else:
# unless error occurs
logSys.setLevel(getattr(logging, 'FATAL'))
# Add the default logging handler
stdout = logging.StreamHandler(sys.stdout)
# Custom log format for the verbose tests runs
if verbosity > 1:
stdout.setFormatter(logging.Formatter(' %(asctime)-15s %(thread)s %(message)s'))
else:
# just prefix with the space
stdout.setFormatter(logging.Formatter(' %(message)s'))
logSys.addHandler(stdout)
#
# Let know the version
#
if not opts.log_level or opts.log_level != 'fatal':
print "Fail2ban " + version + " test suite. Please wait..."
#
# Gather the tests
#
tests = unittest.TestSuite()
# Server
@ -102,6 +119,20 @@ tests.addTest(unittest.makeSuite(banmanagertestcase.AddFailure))
# ClientReader
tests.addTest(unittest.makeSuite(clientreadertestcase.JailReaderTest))
# Filter
tests.addTest(unittest.makeSuite(filtertestcase.IgnoreIP))
tests.addTest(unittest.makeSuite(filtertestcase.LogFile))
tests.addTest(unittest.makeSuite(filtertestcase.LogFileMonitor))
tests.addTest(unittest.makeSuite(filtertestcase.GetFailures))
tests.addTest(unittest.makeSuite(filtertestcase.DNSUtilsTests))
# DateDetector
tests.addTest(unittest.makeSuite(datedetectortestcase.DateDetectorTest))
#
# Extensive use-tests of different available filters backends
#
from server.filterpoll import FilterPoll
filters = [FilterPoll] # always available
@ -110,8 +141,7 @@ filters = [FilterPoll] # always available
# with good old unittest
try:
from server.filtergamin import FilterGamin
# That shmug plain doesn't work and stalls things ATM
# filters.append(FilterGamin)
filters.append(FilterGamin)
except:
pass
@ -125,33 +155,30 @@ for Filter_ in filters:
tests.addTest(unittest.makeSuite(
filtertestcase.get_monitor_failures_testcase(Filter_)))
# yoh: adding them (in particular datadetectortestscase before above
# get_monitor_failures_testcase's makes them fail (probably due
# to additional thread making it busier or smth like
# that)... TODO
# Filter
tests.addTest(unittest.makeSuite(filtertestcase.IgnoreIP))
tests.addTest(unittest.makeSuite(filtertestcase.LogFile))
tests.addTest(unittest.makeSuite(filtertestcase.GetFailures))
tests.addTest(unittest.makeSuite(filtertestcase.DNSUtilsTests))
#
# Run the tests
#
testRunner = unittest.TextTestRunner(verbosity=verbosity)
# DateDetector
tests.addTest(unittest.makeSuite(datedetectortestcase.DateDetectorTest))
try:
# Set the time to a fixed, known value
# Sun Aug 14 12:00:00 CEST 2005
# yoh: we need to adjust TZ to match the one used by Cyril so all the timestamps match
old_TZ = os.environ.get('TZ', None)
os.environ['TZ'] = 'Europe/Zurich'
time.tzset()
MyTime.setTime(1124013600)
# Tests runner
testRunner = unittest.TextTestRunner(
verbosity={'debug': 3,
'info': 2,
'warn': 1,
'error': 1,
'fatal': 0,
None: 1}[opts.log_level])
testRunner.run(tests)
tests_results = testRunner.run(tests)
# Just for the sake of it reset the TZ
# yoh is planing to move all this into setup/teardown methods within tests
os.environ.pop('TZ')
if old_TZ:
os.environ['TZ'] = old_TZ
time.tzset()
finally:
# Just for the sake of it reset the TZ
# yoh: move all this into setup/teardown methods within tests
os.environ.pop('TZ')
if old_TZ:
os.environ['TZ'] = old_TZ
time.tzset()
if not tests_results.wasSuccessful():
sys.exit(1)

View File

@ -395,8 +395,19 @@ class FileFilter(Filter):
# @param path log file path
def addLogPath(self, path, tail = False):
container = FileContainer(path, tail)
self.__logPath.append(container)
if self.containsLogPath(path):
logSys.error(path + " already exists")
else:
container = FileContainer(path, tail)
self.__logPath.append(container)
logSys.info("Added logfile = %s" % path)
self._addLogPath(path) # backend specific
def _addLogPath(self, path):
# nothing to do by default
# to be overriden by backends
pass
##
# Delete a log path
@ -407,8 +418,15 @@ class FileFilter(Filter):
for log in self.__logPath:
if log.getFileName() == path:
self.__logPath.remove(log)
logSys.info("Removed logfile = %s" % path)
self._delLogPath(path)
return
def _delLogPath(self, path):
# nothing to do by default
# to be overriden by backends
pass
##
# Get the log file path
#
@ -455,14 +473,12 @@ class FileFilter(Filter):
logSys.exception(e)
return False
line = container.readline()
while not line == "":
if not self._isActive():
# The jail has been stopped
while True:
line = container.readline()
if (line == "") or not self._isActive():
# The jail reached the bottom or has been stopped
break
self.processLineAndAdd(line)
# Read a new line.
line = container.readline()
container.close()
return True

View File

@ -17,14 +17,10 @@
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Cyril Jaquier
#
# $Revision$
# Author: Cyril Jaquier, Yaroslav Halchenko
__author__ = "Cyril Jaquier"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__author__ = "Cyril Jaquier, Yaroslav Halchenko"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2012 Yaroslav Halchenko"
__license__ = "GPL"
from failmanager import FailManagerEmpty
@ -50,7 +46,7 @@ class FilterGamin(FileFilter):
#
# Initialize the filter object with default values.
# @param jail the jail object
def __init__(self, jail):
FileFilter.__init__(self, jail)
self.__modified = False
@ -63,36 +59,43 @@ class FilterGamin(FileFilter):
logSys.debug("Got event: " + `event` + " for " + path)
if event in (gamin.GAMCreated, gamin.GAMChanged, gamin.GAMExists):
logSys.debug("File changed: " + path)
self.getFailures(path)
self.__modified = True
self._process_file(path)
def _process_file(self, path):
"""Process a given file
TODO -- RF:
this is a common logic and must be shared/provided by FileFilter
"""
self.getFailures(path)
try:
while True:
ticket = self.failManager.toBan()
self.jail.putFailTicket(ticket)
except FailManagerEmpty:
self.failManager.cleanup(MyTime.time())
self.dateDetector.sortTemplate()
self.__modified = False
##
# Add a log file path
#
# @param path log file path
def addLogPath(self, path, tail = False):
if self.containsLogPath(path):
logSys.error(path + " already exists")
else:
self.monitor.watch_file(path, self.callback)
FileFilter.addLogPath(self, path, tail)
logSys.info("Added logfile = %s" % path)
def _addLogPath(self, path):
self.monitor.watch_file(path, self.callback)
##
# Delete a log path
#
# @param path the log file to delete
def delLogPath(self, path):
if not self.containsLogPath(path):
logSys.error(path + " is not monitored")
else:
self.monitor.stop_watch(path)
FileFilter.delLogPath(self, path)
logSys.info("Removed logfile = %s" % path)
def _delLogPath(self, path):
self.monitor.stop_watch(path)
##
# Main loop.
#
@ -102,30 +105,22 @@ class FilterGamin(FileFilter):
def run(self):
self.setActive(True)
# Gamin needs a loop to collect and dispatch events
while self._isActive():
if not self.getIdle():
# We cannot block here because we want to be able to
# exit.
if self.monitor.event_pending():
self.monitor.handle_events()
if self.__modified:
try:
while True:
ticket = self.failManager.toBan()
self.jail.putFailTicket(ticket)
except FailManagerEmpty:
self.failManager.cleanup(MyTime.time())
self.dateDetector.sortTemplate()
self.__modified = False
time.sleep(self.getSleepTime())
else:
time.sleep(self.getSleepTime())
# Cleanup Gamin
self.__cleanup()
time.sleep(self.getSleepTime())
logSys.debug(self.jail.getName() + ": filter terminated")
return True
def stop(self):
super(FilterGamin, self).stop()
self.__cleanup()
##
# Desallocates the resources used by Gamin.

View File

@ -17,14 +17,13 @@
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Cyril Jaquier
#
# $Revision$
# Author: Cyril Jaquier, Yaroslav Halchenko
#
__author__ = "Cyril Jaquier"
__author__ = "Cyril Jaquier, Yaroslav Halchenko"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier; 2012 Yaroslav Halchenko"
__license__ = "GPL"
from failmanager import FailManagerEmpty
@ -50,7 +49,7 @@ class FilterPoll(FileFilter):
#
# Initialize the filter object with default values.
# @param jail the jail object
def __init__(self, jail):
FileFilter.__init__(self, jail)
self.__modified = False
@ -64,29 +63,19 @@ class FilterPoll(FileFilter):
#
# @param path log file path
def addLogPath(self, path, tail = False):
if self.containsLogPath(path):
logSys.error(path + " already exists")
else:
self.__lastModTime[path] = 0
self.__file404Cnt[path] = 0
FileFilter.addLogPath(self, path, tail)
logSys.info("Added logfile = %s" % path)
def _addLogPath(self, path):
self.__lastModTime[path] = 0
self.__file404Cnt[path] = 0
##
# Delete a log path
#
# @param path the log file to delete
def delLogPath(self, path):
if not self.containsLogPath(path):
logSys.error(path + " is not monitored")
else:
del self.__lastModTime[path]
del self.__file404Cnt[path]
FileFilter.delLogPath(self, path)
logSys.info("Removed logfile = %s" % path)
def _delLogPath(self, path):
del self.__lastModTime[path]
del self.__file404Cnt[path]
##
# Main loop.
#
@ -100,8 +89,9 @@ class FilterPoll(FileFilter):
if not self.getIdle():
# Get file modification
for container in self.getLogPath():
if self.isModified(container.getFileName()):
self.getFailures(container.getFileName())
filename = container.getFileName()
if self.isModified(filename):
self.getFailures(filename)
self.__modified = True
if self.__modified:
@ -116,7 +106,8 @@ class FilterPoll(FileFilter):
time.sleep(self.getSleepTime())
else:
time.sleep(self.getSleepTime())
logSys.debug(self.jail.getName() + ": filter terminated")
logSys.debug((self.jail and self.jail.getName() or "jailless") +
" filter terminated")
return True
##
@ -124,7 +115,7 @@ class FilterPoll(FileFilter):
#
# Checks if the log file has been modified using os.stat().
# @return True if log file has been modified
def isModified(self, filename):
try:
logStats = os.stat(filename)
@ -135,11 +126,15 @@ class FilterPoll(FileFilter):
logSys.debug(filename + " has been modified")
self.__lastModTime[filename] = logStats.st_mtime
return True
except OSError:
logSys.error("Unable to get stat on " + filename)
self.__file404Cnt[filename] = self.__file404Cnt[filename] + 1
except OSError, e:
logSys.error("Unable to get stat on %s because of: %s"
% (filename, e))
self.__file404Cnt[filename] += 1
if self.__file404Cnt[filename] > 2:
logSys.warn("Too much read error. Set the jail idle")
self.jail.setIdle(True)
logSys.warn("Too many errors. Setting the jail idle")
if self.jail:
self.jail.setIdle(True)
else:
logSys.warn("No jail is assigned to %s" % self)
self.__file404Cnt[filename] = 0
return False

View File

@ -29,6 +29,8 @@ from mytime import MyTime
import time, logging, pyinotify
from os.path import dirname, sep as pathsep
# Gets the instance of the logger.
logSys = logging.getLogger("fail2ban.filter")
@ -55,7 +57,29 @@ class FilterPyinotify(FileFilter):
logSys.debug("Created FilterPyinotify")
def callback(self, path):
def callback(self, event):
path = event.pathname
if event.mask == pyinotify.IN_CREATE:
# check if that is a file we care about
if not path in self.__watches:
logSys.debug("Ignoring creation of %s we do not monitor" % path)
return
else:
# we need to substitute the watcher with a new one, so first
# remove old one
self._delFileWatcher(path)
# place a new one
self._addFileWatcher(path)
self._process_file(path)
def _process_file(self, path):
"""Process a given file
TODO -- RF:
this is a common logic and must be shared/provided by FileFilter
"""
self.getFailures(path)
try:
while True:
@ -66,77 +90,93 @@ class FilterPyinotify(FileFilter):
self.dateDetector.sortTemplate()
self.__modified = False
def _addFileWatcher(self, path):
wd = self.__monitor.add_watch(path, pyinotify.IN_MODIFY)
self.__watches.update(wd)
logSys.debug("Added file watcher for %s" % path)
# process the file since we did get even
self._process_file(path)
def _delFileWatcher(self, path):
wdInt = self.__watches[path]
wd = self.__monitor.rm_watch(wdInt)
if wd[wdInt]:
del self.__watches[path]
logSys.debug("Removed file watcher for %s" % path)
return True
else:
return False
##
# Add a log file path
#
# @param path log file path
def addLogPath(self, path, tail=False):
if self.containsLogPath(path):
logSys.error(path + " already exists")
else:
wd = self.__monitor.add_watch(path, pyinotify.IN_MODIFY)
self.__watches.update(wd)
FileFilter.addLogPath(self, path, tail)
logSys.info("Added logfile = %s" % path)
def _addLogPath(self, path):
path_dir = dirname(path)
if not (path_dir in self.__watches):
# we need to watch also the directory for IN_CREATE
self.__watches.update(
self.__monitor.add_watch(path_dir, pyinotify.IN_CREATE))
logSys.debug("Added monitor for the parent directory %s" % path_dir)
##
self._addFileWatcher(path)
##
# Delete a log path
#
# @param path the log file to delete
def delLogPath(self, path):
if not self.containsLogPath(path):
logSys.error(path + " is not monitored")
else:
wdInt = self.__watches[path]
wd = self.__monitor.rm_watch(wdInt)
if wd[wdInt]:
del self.__watches[path]
FileFilter.delLogPath(self, path)
logSys.info("Removed logfile = %s" % path)
else:
logSys.error("Failed to remove watch on path: %s", path)
def _delLogPath(self, path):
if not self._delFileWatcher(path):
logSys.error("Failed to remove watch on path: %s", path)
path_dir = dirname(path)
if not len([k for k in self.__watches
if k.startswith(path_dir + pathsep)]):
# Remove watches for the directory
# since there is no other monitored file under this directory
wdInt = self.__watches.pop(path_dir)
_ = self.__monitor.rm_watch(wdInt)
logSys.debug("Removed monitor for the parent directory %s" % path_dir)
##
# Main loop.
#
# This function is the main loop of the thread. It checks if the
# file has been modified and looks for failures.
# @return True when the thread exits nicely
# Since all detection is offloaded to pyinotifier -- no manual
# loop is necessary
def run(self):
self.setActive(True)
self.__notifier = pyinotify.ThreadedNotifier(self.__monitor,
ProcessPyinotify(self))
self.__notifier.start()
while self._isActive():
if not self.getIdle():
self.__notifier.process_events()
if self.__notifier.check_events():
self.__notifier.read_events()
else:
time.sleep(self.getSleepTime())
# Cleanup pyinotify
self.__cleanup()
logSys.debug(self.jail.getName() + ": filter terminated")
logSys.debug("pyinotifier started for %s." % self.jail.getName())
# TODO: verify that there is nothing really to be done for
# idle jails
return True
##
# Call super.stop() and then stop the 'Notifier'
def stop(self):
# Call super to set __isRunning
super(FilterPyinotify, self).stop()
# Now stop the Notifier, otherwise we're deadlocked
# Stop the notifier thread
self.__notifier.stop()
self.__notifier.join() # to not exit before notifier does
self.__cleanup() # for pedantic ones
##
# Deallocates the resources used by pyinotify.
def __cleanup(self):
del self.__notifier
del self.__monitor
self.__notifier = None
self.__monitor = None
class ProcessPyinotify(pyinotify.ProcessEvent):
@ -152,4 +192,4 @@ class ProcessPyinotify(pyinotify.ProcessEvent):
# just need default, since using mask on watch to limit events
def process_default(self, event):
logSys.debug("Callback for Event: %s" % event)
self.__FileFilter.callback(event.pathname)
self.__FileFilter.callback(event)

View File

@ -24,6 +24,7 @@ __license__ = "GPL"
import unittest
import os
import sys
import time
import tempfile
@ -46,25 +47,43 @@ def _killfile(f, name):
except:
pass
def _assert_equal_entries(utest, found, output):
def _sleep_4_poll():
"""PollFilter relies on file timestamps - so we might need to
sleep to guarantee that they differ
"""
if sys.version_info[:2] <= (2,4):
# on old Python st_mtime is int, so we should give
# at least 1 sec so polling filter could detect
# the change
time.sleep(1.)
else:
time.sleep(0.1)
def _assert_equal_entries(utest, found, output, count=None):
"""Little helper to unify comparisons with the target entries
and report helpful failure reports instead of millions of seconds ;)
"""
utest.assertEqual(found[:2], output[:2])
utest.assertEqual(found[0], output[0]) # IP
utest.assertEqual(found[1], count or output[1]) # count
found_time, output_time = \
time.localtime(found[2]),\
time.localtime(output[2])
utest.assertEqual(found_time, output_time)
if len(output) > 3: # match matches
if len(output) > 3 and count is None: # match matches
# do not check if custom count (e.g. going through them twice)
utest.assertEqual(repr(found[3]), repr(output[3]))
def _assert_correct_last_attempt(utest, filter_, output):
def _assert_correct_last_attempt(utest, filter_, output, count=None):
"""Additional helper to wrap most common test case
Test filter to contain target ticket
"""
ticket = filter_.failManager.toBan()
if isinstance(filter_, DummyJail):
ticket = filter_.getFailTicket()
else:
# when we are testing without jails
ticket = filter_.failManager.toBan()
attempts = ticket.getAttempt()
date = ticket.getTime()
@ -72,13 +91,17 @@ def _assert_correct_last_attempt(utest, filter_, output):
matches = ticket.getMatches()
found = (ip, attempts, date, matches)
_assert_equal_entries(utest, found, output)
_assert_equal_entries(utest, found, output, count)
def _copy_lines_between_files(fin, fout, n=None, skip=0, mode='a', terminal_line=""):
"""Copy lines from one file to another (which might be already open)
Returns open fout
"""
if sys.version_info[:2] <= (2,4):
# on old Python st_mtime is int, so we should give at least 1 sec so
# polling filter could detect the change
time.sleep(1)
if isinstance(fin, str):
fin = open(fin, 'r')
if isinstance(fout, str):
@ -153,9 +176,151 @@ class LogFile(unittest.TestCase):
self.assertTrue(self.filter.isModified(LogFile.FILENAME))
class LogFileMonitor(unittest.TestCase):
"""Few more tests for FilterPoll API
"""
def setUp(self):
"""Call before every test case."""
self.filter = self.name = 'NA'
_, self.name = tempfile.mkstemp('fail2ban', 'monitorfailures')
self.file = open(self.name, 'a')
self.filter = FilterPoll(None)
self.filter.addLogPath(self.name)
self.filter.setActive(True)
self.filter.addFailRegex("(?:(?:Authentication failure|Failed [-/\w+]+) for(?: [iI](?:llegal|nvalid) user)?|[Ii](?:llegal|nvalid) user|ROOT LOGIN REFUSED) .*(?: from|FROM) <HOST>")
def tearDown(self):
_killfile(self.file, self.name)
pass
def isModified(self, delay=2.):
"""Wait up to `delay` sec to assure that it was modified or not
"""
time0 = time.time()
while time.time() < time0 + delay:
if self.filter.isModified(self.name):
return True
time.sleep(0.1)
return False
def notModified(self):
# shorter wait time for not modified status
return not self.isModified(0.4)
def testNewChangeViaIsModified(self):
# it is a brand new one -- so first we think it is modified
self.assertTrue(self.isModified())
# but not any longer
self.assertTrue(self.notModified())
self.assertTrue(self.notModified())
_sleep_4_poll() # to guarantee freshier mtime
for i in range(4): # few changes
# unless we write into it
self.file.write("line%d\n" % i)
self.file.flush()
self.assertTrue(self.isModified())
self.assertTrue(self.notModified())
_sleep_4_poll() # to guarantee freshier mtime
os.rename(self.name, self.name + '.old')
# we are not signaling as modified whenever
# it gets away
self.assertTrue(self.notModified())
f = open(self.name, 'a')
self.assertTrue(self.isModified())
self.assertTrue(self.notModified())
_sleep_4_poll()
f.write("line%d\n" % i)
f.flush()
self.assertTrue(self.isModified())
self.assertTrue(self.notModified())
_killfile(f, self.name)
_killfile(self.name, self.name + '.old')
pass
def testNewChangeViaGetFailures_simple(self):
# suck in lines from this sample log file
self.filter.getFailures(self.name)
self.assertRaises(FailManagerEmpty, self.filter.failManager.toBan)
# Now let's feed it with entries from the file
_copy_lines_between_files(GetFailures.FILENAME_01, self.file, n=5)
self.filter.getFailures(self.name)
self.assertRaises(FailManagerEmpty, self.filter.failManager.toBan)
# and it should have not been enough
_copy_lines_between_files(GetFailures.FILENAME_01, self.file, skip=5)
self.filter.getFailures(self.name)
_assert_correct_last_attempt(self, self.filter, GetFailures.FAILURES_01)
def testNewChangeViaGetFailures_rewrite(self):
#
# if we rewrite the file at once
self.file.close()
_copy_lines_between_files(GetFailures.FILENAME_01, self.name)
self.filter.getFailures(self.name)
_assert_correct_last_attempt(self, self.filter, GetFailures.FAILURES_01)
# What if file gets overridden
# yoh: skip so we skip those 2 identical lines which our
# filter "marked" as the known beginning, otherwise it
# would not detect "rotation"
self.file = _copy_lines_between_files(GetFailures.FILENAME_01, self.name,
skip=3, mode='w')
self.filter.getFailures(self.name)
#self.assertRaises(FailManagerEmpty, self.filter.failManager.toBan)
_assert_correct_last_attempt(self, self.filter, GetFailures.FAILURES_01)
def testNewChangeViaGetFailures_move(self):
#
# if we move file into a new location while it has been open already
self.file = _copy_lines_between_files(GetFailures.FILENAME_01, self.name,
n=14, mode='w')
self.filter.getFailures(self.name)
self.assertRaises(FailManagerEmpty, self.filter.failManager.toBan)
self.assertEqual(self.filter.failManager.getFailTotal(), 2)
# move aside, but leaving the handle still open...
os.rename(self.name, self.name + '.bak')
_copy_lines_between_files(GetFailures.FILENAME_01, self.name, skip=14)
self.filter.getFailures(self.name)
_assert_correct_last_attempt(self, self.filter, GetFailures.FAILURES_01)
self.assertEqual(self.filter.failManager.getFailTotal(), 3)
from threading import Lock
class DummyJail(object):
"""A simple 'jail' to suck in all the tickets generated by Filter's
"""
def __init__(self):
self.lock = Lock()
self.queue = []
def __len__(self):
try:
self.lock.acquire()
return len(self.queue)
finally:
self.lock.release()
def putFailTicket(self, ticket):
try:
self.lock.acquire()
self.queue.append(ticket)
finally:
self.lock.release()
def getFailTicket(self):
try:
self.lock.acquire()
return self.queue.pop()
finally:
self.lock.release()
def getName(self):
return "DummyJail #%s with %d tickets" % (id(self), len(self))
def get_monitor_failures_testcase(Filter_):
"""Generator of TestCase's for different filters
"""Generator of TestCase's for different filters/backends
"""
class MonitorFailures(unittest.TestCase):
@ -164,13 +329,26 @@ def get_monitor_failures_testcase(Filter_):
self.filter = self.name = 'NA'
_, self.name = tempfile.mkstemp('fail2ban', 'monitorfailures')
self.file = open(self.name, 'a')
self.filter = Filter_(None)
self.jail = DummyJail()
self.filter = Filter_(self.jail)
self.filter.addLogPath(self.name)
self.filter.setActive(True)
self.filter.addFailRegex("(?:(?:Authentication failure|Failed [-/\w+]+) for(?: [iI](?:llegal|nvalid) user)?|[Ii](?:llegal|nvalid) user|ROOT LOGIN REFUSED) .*(?: from|FROM) <HOST>")
self.filter.start()
# If filter is polling it would sleep a bit to guarantee that
# we have initial time-stamp difference to trigger "actions"
self._sleep_4_poll()
#print "D: started filter %s" % self.filter
def tearDown(self):
#print "D: SLEEPING A BIT"
#import time; time.sleep(5)
#print "D: TEARING DOWN"
self.filter.stop()
#print "D: WAITING FOR FILTER TO STOP"
self.filter.join() # wait for the thread to terminate
#print "D: KILLING THE FILE"
_killfile(self.file, self.name)
pass
@ -178,100 +356,138 @@ def get_monitor_failures_testcase(Filter_):
return "MonitorFailures%s(%s)" \
% (Filter_, hasattr(self, 'name') and self.name or 'tempfile')
def isModified(self, delay=2.):
def isFilled(self, delay=2.):
"""Wait up to `delay` sec to assure that it was modified or not
"""
time0 = time.time()
while time.time() < time0 + delay:
if self.filter.isModified(self.name):
if len(self.jail):
return True
time.sleep(0.1)
return False
def notModified(self):
def _sleep_4_poll(self):
# Since FilterPoll relies on time stamps and some
# actions might be happening too fast in the tests,
# sleep a bit to guarantee reliable time stamps
if isinstance(self.filter, FilterPoll):
_sleep_4_poll()
def isEmpty(self, delay=0.4):
# shorter wait time for not modified status
return not self.isModified(0.4)
return not self.isFilled(delay)
def _testNewChangeViaIsModified(self):
if not hasattr(self.filter, 'isModified'):
raise unittest.SkipTest(
"%s does not have isModified (present only in poll atm"
% (self.filter,))
# it is a brand new one -- so first we think it is modified
self.assertTrue(self.isModified())
# but not any longer
self.assertTrue(self.notModified())
self.assertTrue(self.notModified())
for i in range(4): # few changes
# unless we write into it
self.file.write("line%d\n" % i)
self.file.flush()
self.assertTrue(self.isModified())
self.assertTrue(self.notModified())
os.rename(self.name, self.name + '.old')
# we are not signaling as modified whenever
# it gets away
self.assertTrue(self.notModified())
f = open(self.name, 'a')
self.assertTrue(self.isModified())
self.assertTrue(self.notModified())
f.write("line%d\n" % i)
f.flush()
self.assertTrue(self.isModified())
self.assertTrue(self.notModified())
_killfile(f, self.name)
_killfile(self.name, self.name + '.old')
pass
def assert_correct_last_attempt(self, failures, count=None):
self.assertTrue(self.isFilled(10)) # give Filter a chance to react
_assert_correct_last_attempt(self, self.jail, failures, count=count)
def testNewChangeViaGetFailures_simple(self):
def test_grow_file(self):
# suck in lines from this sample log file
self.filter.getFailures(self.name)
self.assertRaises(FailManagerEmpty, self.filter.failManager.toBan)
# Now let's feed it with entries from the file
_copy_lines_between_files(GetFailures.FILENAME_01, self.file, n=5)
self.filter.getFailures(self.name)
self.assertRaises(FailManagerEmpty, self.filter.failManager.toBan)
# and it should have not been enough
# and our dummy jail is empty as well
self.assertFalse(len(self.jail))
# since it should have not been enough
_copy_lines_between_files(GetFailures.FILENAME_01, self.file, skip=5)
self.filter.getFailures(self.name)
_assert_correct_last_attempt(self, self.filter, GetFailures.FAILURES_01)
self.assertTrue(self.isFilled(6))
# so we sleep for up to 2 sec for it not to become empty,
# and meanwhile pass to other thread(s) and filter should
# have gathered new failures and passed them into the
# DummyJail
self.assertEqual(len(self.jail), 1)
# and there should be no "stuck" ticket in failManager
self.assertRaises(FailManagerEmpty, self.filter.failManager.toBan)
self.assert_correct_last_attempt(GetFailures.FAILURES_01)
self.assertEqual(len(self.jail), 0)
def testNewChangeViaGetFailures_rewrite(self):
#
#return
# just for fun let's copy all of them again and see if that results
# in a new ban
_copy_lines_between_files(GetFailures.FILENAME_01, self.file, n=100)
self.assert_correct_last_attempt(GetFailures.FAILURES_01)
def test_rewrite_file(self):
# if we rewrite the file at once
self.file.close()
_copy_lines_between_files(GetFailures.FILENAME_01, self.name)
self.filter.getFailures(self.name)
_assert_correct_last_attempt(self, self.filter, GetFailures.FAILURES_01)
self.assert_correct_last_attempt(GetFailures.FAILURES_01)
# What if file gets overriden
# What if file gets overridden
# yoh: skip so we skip those 2 identical lines which our
# filter "marked" as the known beginning, otherwise it
# would not detect "rotation"
self.file = _copy_lines_between_files(GetFailures.FILENAME_01, self.name,
skip=3, mode='w')
self.filter.getFailures(self.name)
#self.assertRaises(FailManagerEmpty, self.filter.failManager.toBan)
_assert_correct_last_attempt(self, self.filter, GetFailures.FAILURES_01)
self.assert_correct_last_attempt(GetFailures.FAILURES_01)
def testNewChangeViaGetFailures_move(self):
#
def test_move_file(self):
# if we move file into a new location while it has been open already
self.file = _copy_lines_between_files(GetFailures.FILENAME_01, self.name,
n=14, mode='w')
self.filter.getFailures(self.name)
self.assertTrue(self.isEmpty(2))
self.assertRaises(FailManagerEmpty, self.filter.failManager.toBan)
self.assertEqual(self.filter.failManager.getFailTotal(), 2)
self.assertEqual(self.filter.failManager.getFailTotal(), 2) # Fails with Poll from time to time
# move aside, but leaving the handle still open...
os.rename(self.name, self.name + '.bak')
_copy_lines_between_files(GetFailures.FILENAME_01, self.name, skip=14)
self.filter.getFailures(self.name)
_assert_correct_last_attempt(self, self.filter, GetFailures.FAILURES_01)
self.assert_correct_last_attempt(GetFailures.FAILURES_01)
self.assertEqual(self.filter.failManager.getFailTotal(), 3)
# now remove the moved file
_killfile(None, self.name + '.bak')
_copy_lines_between_files(GetFailures.FILENAME_01, self.name, n=100)
self.assert_correct_last_attempt(GetFailures.FAILURES_01)
self.assertEqual(self.filter.failManager.getFailTotal(), 6)
def test_new_bogus_file(self):
# to make sure that watching whole directory does not effect
_copy_lines_between_files(GetFailures.FILENAME_01, self.name, n=100)
self.assert_correct_last_attempt(GetFailures.FAILURES_01)
# create a bogus file in the same directory and see if that doesn't affect
open(self.name + '.bak2', 'w').write('')
_copy_lines_between_files(GetFailures.FILENAME_01, self.name, n=100)
self.assert_correct_last_attempt(GetFailures.FAILURES_01)
self.assertEqual(self.filter.failManager.getFailTotal(), 6)
_killfile(None, self.name + '.bak2')
def test_delLogPath(self):
# Smoke test for removing of the path from being watched
# basic full test
_copy_lines_between_files(GetFailures.FILENAME_01, self.file, n=100)
self.assert_correct_last_attempt(GetFailures.FAILURES_01)
# and now remove the LogPath
self.filter.delLogPath(self.name)
_copy_lines_between_files(GetFailures.FILENAME_01, self.file, n=100)
# so we should get no more failures detected
self.assertTrue(self.isEmpty(2))
# but then if we add it back again
self.filter.addLogPath(self.name)
# Tricky catch here is that it should get them from the
# tail written before, so let's not copy anything yet
#_copy_lines_between_files(GetFailures.FILENAME_01, self.name, n=100)
# we should detect the failures
self.assert_correct_last_attempt(GetFailures.FAILURES_01, count=6) # was needed if we write twice above
# now copy and get even more
_copy_lines_between_files(GetFailures.FILENAME_01, self.file, n=100)
# yoh: not sure why count here is not 9... TODO
self.assert_correct_last_attempt(GetFailures.FAILURES_01)#, count=9)
return MonitorFailures