- filtering of test cases extended for method additionally to class (matches method only not whole class for regexp matched method name);

- new options for "fail2ban-testcases" introduced: "-g" or "--no-gamin" and "-m" or "--memory-db", both are true also if "-f" or "--fast" specified,
  for example: `fail2ban-testcases -ngm` will runs faster (because no network, no gamin, memory database)
  but will use the same default sleep intervals as in production (in comparison to -nf);
- seekToTime rewritten, accuracy increased by seekToTime, extended for all FileFilter (PyInotify, Gamin also), test cases extended etc.
- common performance optimized and code reviewed;
pull/1346/head
sebres 2015-07-17 21:05:50 +02:00
parent 6faffe3201
commit cf3cf27fa3
8 changed files with 301 additions and 143 deletions

View File

@ -58,6 +58,12 @@ def get_opt_parser():
Option('-n', "--no-network", action="store_true",
dest="no_network",
help="Do not run tests that require the network"),
Option('-g', "--no-gamin", action="store_true",
dest="no_gamin",
help="Do not run tests that require the gamin"),
Option('-m', "--memory-db", action="store_true",
dest="memory_db",
help="Run database tests using memory instead of file"),
Option('-f', "--fast", action="store_true",
dest="fast",
help="Try to increase speed of the tests, decreasing of wait intervals, memory database"),

View File

@ -560,6 +560,7 @@ class FileFilter(Filter):
Filter.__init__(self, jail, **kwargs)
## The log file path.
self.__logs = dict()
self.__autoSeek = dict()
self.setLogEncoding("auto")
##
@ -567,7 +568,7 @@ class FileFilter(Filter):
#
# @param path log file path
def addLogPath(self, path, tail=False):
def addLogPath(self, path, tail=False, autoSeek=True):
if path in self.__logs:
logSys.error(path + " already exists")
else:
@ -579,6 +580,11 @@ class FileFilter(Filter):
log.setPos(lastpos)
self.__logs[path] = log
logSys.info("Added logfile = %s (pos = %s, hash = %s)" , path, log.getPos(), log.getHash())
if autoSeek:
# if default, seek to "current time" - "find time":
if isinstance(autoSeek, bool):
autoSeek = MyTime.time() - self.getFindTime()
self.__autoSeek[path] = autoSeek
self._addLogPath(path) # backend specific
def _addLogPath(self, path):
@ -673,7 +679,7 @@ class FileFilter(Filter):
# MyTime.time()-self.findTime. When a failure is detected, a FailTicket
# is created and is added to the FailManager.
def getFailures(self, filename, startTime=None):
def getFailures(self, filename):
log = self.getLog(filename)
if log is None:
logSys.error("Unable to get failures in " + filename)
@ -695,13 +701,17 @@ class FileFilter(Filter):
logSys.exception(e)
return False
# prevent completely read of big files first time (after start of service), initial seek to start time using half-interval search algorithm:
if log.getPos() == 0 and startTime is not None:
# seek to find time for first usage only (prevent performance decline with polling of big files)
if self.__autoSeek.get(filename):
startTime = self.__autoSeek[filename]
del self.__autoSeek[filename]
# prevent completely read of big files first time (after start of service),
# initial seek to start time using half-interval search algorithm:
try:
# startTime = MyTime.time() - self.getFindTime()
self.seekToTime(log, startTime)
except Exception, e: # pragma: no cover
logSys.error("Error during seek to start time in \"%s\"", filename)
raise
logSys.exception(e)
return False
@ -726,71 +736,88 @@ class FileFilter(Filter):
# Seeks to line with date (search using half-interval search algorithm), to start polling from it
#
def seekToTime(self, container, date):
def seekToTime(self, container, date, accuracy=3):
fs = container.getFileSize()
if logSys.getEffectiveLevel() <= logging.DEBUG:
logSys.debug("Seek to find time %s (%s), file size %s", date,
datetime.datetime.fromtimestamp(date).strftime("%Y-%m-%d %H:%M:%S"), fs)
date -= 0.009
minp = 0
minp = container.getPos()
maxp = fs
lastpos = 0
lastFew = 0
lastTime = None
tryPos = minp
lastPos = -1
foundPos = 0
foundTime = None
cntr = 0
unixTime = None
lasti = 0
movecntr = 1
movecntr = accuracy
while maxp > minp:
i = int(minp + (maxp - minp) / 2)
pos = container.seek(i)
if tryPos is None:
pos = int(minp + (maxp - minp) / 2)
else:
pos, tryPos = tryPos, None
# because container seek will go to start of next line (minus CRLF):
pos = max(0, pos-2)
seekpos = pos = container.seek(pos)
cntr += 1
# within next 5 lines try to find any legal datetime:
lncntr = 5;
dateTimeMatch = None
llen = 0
if lastpos == pos:
i = pos
nextp = None
while True:
line = container.readline()
if not line:
break
llen += len(line)
l = line.rstrip('\r\n')
(timeMatch, template) = self.dateDetector.matchTime(l)
(timeMatch, template) = self.dateDetector.matchTime(line)
if timeMatch:
dateTimeMatch = self.dateDetector.getTime2(l[timeMatch.start():timeMatch.end()], (timeMatch, template))
dateTimeMatch = self.dateDetector.getTime2(line[timeMatch.start():timeMatch.end()], (timeMatch, template))
else:
nextp = container.tell()
if nextp > maxp:
pos = seekpos
break
pos = nextp
if not dateTimeMatch and lncntr:
lncntr -= 1
continue
break
# not found at this step - stop searching
if dateTimeMatch:
unixTime = dateTimeMatch[0]
if unixTime >= date:
if foundTime is None or unixTime <= foundTime:
foundPos = pos
foundTime = unixTime
if pos == maxp:
pos = seekpos
if pos < maxp:
maxp = pos
else:
if foundTime is None or unixTime >= foundTime:
foundPos = pos
foundTime = unixTime
if nextp is None:
nextp = container.tell()
pos = nextp
if pos > minp:
minp = pos
# if we can't move (position not changed)
if i + llen == lasti:
if pos == lastPos:
movecntr -= 1
if movecntr <= 0:
break
lasti = i + llen;
# not found at this step - stop searching
if not dateTimeMatch:
# we have found large area without any date mached
# or end of search - try min position (because can be end of previous line):
if minp != lastPos:
lastPos = tryPos = minp
continue
break
unixTime = dateTimeMatch[0]
if unixTime >= date:
maxp = i
else:
minp = i + llen
lastFew = pos;
lastTime = unixTime
lastpos = pos
# if found position have a time greater as given - use smallest time we have found
if unixTime is None or unixTime > date:
unixTime = lastTime
lastpos = container.seek(lastFew, False)
else:
lastpos = container.seek(lastpos, False)
container.setPos(lastpos)
lastPos = pos
# always use smallest pos, that could be found:
foundPos = container.seek(minp, False)
container.setPos(foundPos)
if logSys.getEffectiveLevel() <= logging.DEBUG:
logSys.debug("Position %s from %s, found time %s (%s) within %s seeks", lastpos, fs, unixTime,
(datetime.datetime.fromtimestamp(unixTime).strftime("%Y-%m-%d %H:%M:%S") if unixTime is not None else ''), cntr)
logSys.debug("Position %s from %s, found time %s (%s) within %s seeks", lastPos, fs, foundTime,
(datetime.datetime.fromtimestamp(foundTime).strftime("%Y-%m-%d %H:%M:%S") if foundTime is not None else ''), cntr)
def status(self, flavor="basic"):
"""Status of Filter plus files being monitored.
@ -898,11 +925,15 @@ class FileContainer:
# seek to given position
h.seek(offs, 0)
# goto end of next line
if endLine:
if offs and endLine:
h.readline()
# get current real position
return h.tell()
def tell(self):
# get current real position
return self.__handler.tell()
@staticmethod
def decode_line(filename, enc, line):
try:

View File

@ -58,7 +58,6 @@ class FilterPoll(FileFilter):
## The time of the last modification of the file.
self.__prevStats = dict()
self.__file404Cnt = dict()
self.__initial = dict()
logSys.debug("Created FilterPoll")
##
@ -109,11 +108,7 @@ class FilterPoll(FileFilter):
modlst = []
Utils.wait_for(lambda: self.getModified(modlst), self.sleeptime)
for filename in modlst:
# set start time as now - find time for first usage only (prevent performance bug with polling of big files)
self.getFailures(filename,
(MyTime.time() - self.getFindTime()) if not self.__initial.get(filename) else None
)
self.__initial[filename] = True
self.getFailures(filename)
self.__modified = True
if self.__modified:

View File

@ -176,6 +176,9 @@ class FailTicket(Ticket):
self.__retry = self._data['failures'];
def setRetry(self, value):
""" Set artificial retry count, normally equal failures / attempt,
used in incremental features (BanTimeIncr) to increase retry count for bad IPs
"""
self.__retry = value
if not self._data['failures']:
self._data['failures'] = 1
@ -184,6 +187,9 @@ class FailTicket(Ticket):
self._data['matches'] = []
def getRetry(self):
""" Returns failures / attempt count or
artificial retry count increased for bad IPs
"""
return max(self.__retry, self._data['failures'])
def inc(self, matches=None, attempt=1, count=1):

View File

@ -36,18 +36,20 @@ from ..server.actions import Actions
from .dummyjail import DummyJail
try:
from ..server.database import Fail2BanDb as Fail2BanDb
# because of tests performance use memory instead of file:
def TestFail2BanDb(filename):
if unittest.F2B.fast:
return Fail2BanDb(':memory:')
return Fail2BanDb(filename)
except ImportError:
except ImportError: # pragma: no cover
Fail2BanDb = None
from .utils import LogCaptureTestCase
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "files")
# because of tests performance use memory instead of file:
def getFail2BanDb(filename):
if unittest.F2B.memory_db: # pragma: no cover
return Fail2BanDb(':memory:')
return Fail2BanDb(filename)
class DatabaseTest(LogCaptureTestCase):
def setUp(self):
@ -59,8 +61,10 @@ class DatabaseTest(LogCaptureTestCase):
"available.")
elif Fail2BanDb is None:
return
self.dbFilename = None
if not unittest.F2B.memory_db:
_, self.dbFilename = tempfile.mkstemp(".db", "fail2ban_")
self.db = TestFail2BanDb(self.dbFilename)
self.db = getFail2BanDb(self.dbFilename)
def tearDown(self):
"""Call after every test case."""
@ -68,6 +72,7 @@ class DatabaseTest(LogCaptureTestCase):
if Fail2BanDb is None: # pragma: no cover
return
# Cleanup
if self.dbFilename is not None:
os.remove(self.dbFilename)
def testGetFilename(self):
@ -106,6 +111,9 @@ class DatabaseTest(LogCaptureTestCase):
def testUpdateDb(self):
if Fail2BanDb is None: # pragma: no cover
return
self.db = None
if self.dbFilename is None: # pragma: no cover
_, self.dbFilename = tempfile.mkstemp(".db", "fail2ban_")
shutil.copyfile(
os.path.join(TEST_FILES_DIR, 'database_v1.db'), self.dbFilename)
self.db = Fail2BanDb(self.dbFilename)

View File

@ -81,6 +81,16 @@ def _killfile(f, name):
_killfile(None, name + '.bak')
def _maxWaitTime(wtime):
if unittest.F2B.fast:
wtime /= 10
return wtime
def _tm(time):
return datetime.datetime.fromtimestamp(time).strftime("%Y-%m-%d %H:%M:%S")
def _assert_equal_entries(utest, found, output, count=None):
"""Little helper to unify comparisons with the target entries
@ -122,9 +132,15 @@ def _assert_correct_last_attempt(utest, filter_, output, count=None):
Test filter to contain target ticket
"""
if isinstance(filter_, DummyJail):
# get fail ticket from jail
found = _ticket_tuple(filter_.getFailTicket())
else:
# when we are testing without jails
# wait for failures (up to max time)
Utils.wait_for(
lambda: filter_.failManager.getFailTotal() >= (count if count else output[1]),
_maxWaitTime(10))
# get fail ticket from filter
found = _ticket_tuple(filter_.failManager.toBan())
_assert_equal_entries(utest, found, output, count)
@ -352,58 +368,131 @@ class LogFileFilterPoll(unittest.TestCase):
self.assertTrue(self.filter.isModified(LogFileFilterPoll.FILENAME))
self.assertFalse(self.filter.isModified(LogFileFilterPoll.FILENAME))
def testSeekToTime(self):
def testSeekToTimeSmallFile(self):
fname = tempfile.mktemp(prefix='tmp_fail2ban', suffix='.log')
tm = lambda time: datetime.datetime.fromtimestamp(time).strftime("%Y-%m-%d %H:%M:%S")
time = 1417512352
f = open(fname, 'w')
fc = None
try:
fc = FileContainer(fname, self.filter.getLogEncoding())
fc.open()
fc.setPos(0); self.filter.seekToTime(fc, time)
try:
f.flush()
# empty :
fc.setPos(0); self.filter.seekToTime(fc, time)
self.assertEqual(fc.getPos(), 0)
# one entry with exact time:
f.write("%s [sshd] error: PAM: failure len 1\n" % tm(time))
f.write("%s [sshd] error: PAM: failure len 1\n" % _tm(time))
f.flush()
fc.setPos(0); self.filter.seekToTime(fc, time)
# one entry with smaller time:
# rewrite :
f.seek(0)
f.write("%s [sshd] error: PAM: failure len 1\n" % tm(time - 10))
f.truncate()
fc.close()
fc = FileContainer(fname, self.filter.getLogEncoding())
fc.open()
# no time - nothing should be found :
for i in xrange(10):
f.write("[sshd] error: PAM: failure len 1\n")
f.flush()
fc.setPos(0); self.filter.seekToTime(fc, time)
self.assertEqual(fc.getPos(), 0)
f.write("%s [sshd] error: PAM: failure len 3 2 1\n" % tm(time - 9))
# rewrite
f.seek(0)
f.truncate()
fc.close()
fc = FileContainer(fname, self.filter.getLogEncoding())
fc.open()
# one entry with smaller time:
f.write("%s [sshd] error: PAM: failure len 2\n" % _tm(time - 10))
f.flush()
fc.setPos(0); self.filter.seekToTime(fc, time)
self.assertEqual(fc.getPos(), 0)
# add exact time between:
f.write("%s [sshd] error: PAM: failure\n" % tm(time - 1))
self.assertEqual(fc.getPos(), 53)
# two entries with smaller time:
f.write("%s [sshd] error: PAM: failure len 3 2 1\n" % _tm(time - 9))
f.flush()
fc.setPos(0); self.filter.seekToTime(fc, time)
self.assertEqual(fc.getPos(), 110)
# check move after end (all of time smaller):
f.write("%s [sshd] error: PAM: failure\n" % _tm(time - 1))
f.flush()
self.assertEqual(fc.getFileSize(), 157)
fc.setPos(0); self.filter.seekToTime(fc, time)
self.assertEqual(fc.getPos(), 157)
# stil one exact line:
f.write("%s [sshd] error: PAM: Authentication failure\n" % tm(time))
f.write("%s [sshd] error: PAM: failure len 1\n" % tm(time))
f.write("%s [sshd] error: PAM: Authentication failure\n" % _tm(time))
f.write("%s [sshd] error: PAM: failure len 1\n" % _tm(time))
f.flush()
fc.setPos(0); self.filter.seekToTime(fc, time)
self.assertEqual(fc.getPos(), 110)
self.assertEqual(fc.getPos(), 157)
# add something hereafter:
f.write("%s [sshd] error: PAM: failure len 3 2 1\n" % tm(time + 2))
f.write("%s [sshd] error: PAM: Authentication failure\n" % tm(time + 3))
f.write("%s [sshd] error: PAM: failure len 3 2 1\n" % _tm(time + 2))
f.write("%s [sshd] error: PAM: Authentication failure\n" % _tm(time + 3))
f.flush()
fc.setPos(0); self.filter.seekToTime(fc, time)
self.assertEqual(fc.getPos(), 110)
self.assertEqual(fc.getPos(), 157)
# add something hereafter:
f.write("%s [sshd] error: PAM: failure\n" % tm(time + 9))
f.write("%s [sshd] error: PAM: failure len 3 2 1\n" % tm(time + 9))
f.write("%s [sshd] error: PAM: failure\n" % _tm(time + 9))
f.write("%s [sshd] error: PAM: failure len 4 3 2\n" % _tm(time + 9))
f.flush()
fc.setPos(0); self.filter.seekToTime(fc, time)
self.assertEqual(fc.getPos(), 110)
self.assertEqual(fc.getPos(), 157)
# start search from current pos :
fc.setPos(157); self.filter.seekToTime(fc, time)
self.assertEqual(fc.getPos(), 157)
# start search from current pos :
fc.setPos(110); self.filter.seekToTime(fc, time)
self.assertEqual(fc.getPos(), 157)
finally:
if fc:
fc.close()
_killfile(f, fname)
def testSeekToTimeLargeFile(self):
fname = tempfile.mktemp(prefix='tmp_fail2ban', suffix='.log')
time = 1417512352
f = open(fname, 'w')
fc = None
count = 1000 if unittest.F2B.fast else 10000
try:
fc = FileContainer(fname, self.filter.getLogEncoding())
fc.open()
f.seek(0)
# variable length of file (ca 45K or 450K before and hereafter):
# write lines with smaller as search time:
t = time - count - 1
for i in xrange(count):
f.write("%s [sshd] error: PAM: failure\n" % _tm(t))
t += 1
f.flush()
fc.setPos(0); self.filter.seekToTime(fc, time)
self.assertEqual(fc.getPos(), 47*count)
# write lines with exact search time:
for i in xrange(10):
f.write("%s [sshd] error: PAM: failure\n" % _tm(time))
f.flush()
fc.setPos(0); self.filter.seekToTime(fc, time)
self.assertEqual(fc.getPos(), 47*count)
fc.setPos(4*count); self.filter.seekToTime(fc, time)
self.assertEqual(fc.getPos(), 47*count)
# write lines with greater as search time:
t = time+1
for i in xrange(count//500):
for j in xrange(500):
f.write("%s [sshd] error: PAM: failure\n" % _tm(t))
t += 1
f.flush()
fc.setPos(0); self.filter.seekToTime(fc, time)
self.assertEqual(fc.getPos(), 47*count)
fc.setPos(53); self.filter.seekToTime(fc, time)
self.assertEqual(fc.getPos(), 47*count)
finally:
if fc:
fc.close()
_killfile(f, fname)
@ -418,7 +507,7 @@ class LogFileMonitor(LogCaptureTestCase):
_, self.name = tempfile.mkstemp('fail2ban', 'monitorfailures')
self.file = open(self.name, 'a')
self.filter = FilterPoll(DummyJail())
self.filter.addLogPath(self.name)
self.filter.addLogPath(self.name, autoSeek=False)
self.filter.active = True
self.filter.addFailRegex("(?:(?:Authentication failure|Failed [-/\w+]+) for(?: [iI](?:llegal|nvalid) user)?|[Ii](?:llegal|nvalid) user|ROOT LOGIN REFUSED) .*(?: from|FROM) <HOST>")
@ -431,11 +520,12 @@ class LogFileMonitor(LogCaptureTestCase):
def isModified(self, delay=2.):
"""Wait up to `delay` sec to assure that it was modified or not
"""
return Utils.wait_for(lambda: self.filter.isModified(self.name), delay)
return Utils.wait_for(lambda: self.filter.isModified(self.name), _maxWaitTime(delay))
def notModified(self):
# shorter wait time for not modified status
return not self.isModified(4*Utils.DEFAULT_SLEEP_TIME)
def notModified(self, delay=2.):
"""Wait up to `delay` sec as long as it was not modified
"""
return Utils.wait_for(lambda: not self.filter.isModified(self.name), _maxWaitTime(delay))
def testUnaccessibleLogFile(self):
os.chmod(self.name, 0)
@ -478,7 +568,7 @@ class LogFileMonitor(LogCaptureTestCase):
os.rename(self.name, self.name + '.old')
# we are not signaling as modified whenever
# it gets away
self.assertTrue(self.notModified())
self.assertTrue(self.notModified(1))
f = open(self.name, 'a')
self.assertTrue(self.isModified())
self.assertTrue(self.notModified())
@ -562,7 +652,7 @@ def get_monitor_failures_testcase(Filter_):
self.file = open(self.name, 'a')
self.jail = DummyJail()
self.filter = Filter_(self.jail)
self.filter.addLogPath(self.name)
self.filter.addLogPath(self.name, autoSeek=False)
self.filter.active = True
self.filter.addFailRegex("(?:(?:Authentication failure|Failed [-/\w+]+) for(?: [iI](?:llegal|nvalid) user)?|[Ii](?:llegal|nvalid) user|ROOT LOGIN REFUSED) .*(?: from|FROM) <HOST>")
self.filter.start()
@ -587,21 +677,21 @@ def get_monitor_failures_testcase(Filter_):
def isFilled(self, delay=1.):
"""Wait up to `delay` sec to assure that it was modified or not
"""
return Utils.wait_for(self.jail.isFilled, delay)
return Utils.wait_for(self.jail.isFilled, _maxWaitTime(delay))
def _sleep_4_poll(self):
# Since FilterPoll relies on time stamps and some
# actions might be happening too fast in the tests,
# sleep a bit to guarantee reliable time stamps
if isinstance(self.filter, FilterPoll):
Utils.wait_for(self.filter.isAlive, 4*Utils.DEFAULT_SLEEP_TIME)
Utils.wait_for(self.filter.isAlive, _maxWaitTime(5))
def isEmpty(self, delay=4*Utils.DEFAULT_SLEEP_TIME):
def isEmpty(self, delay=_maxWaitTime(5)):
# shorter wait time for not modified status
return Utils.wait_for(self.jail.isEmpty, delay)
return Utils.wait_for(self.jail.isEmpty, _maxWaitTime(delay))
def assert_correct_last_attempt(self, failures, count=None):
self.assertTrue(self.isFilled(20)) # give Filter a chance to react
self.assertTrue(self.isFilled(10)) # give Filter a chance to react
_assert_correct_last_attempt(self, self.jail, failures, count=count)
def test_grow_file(self):
@ -616,7 +706,7 @@ def get_monitor_failures_testcase(Filter_):
# since it should have not been enough
_copy_lines_between_files(GetFailures.FILENAME_01, self.file, skip=5)
self.assertTrue(self.isFilled(6))
self.assertTrue(self.isFilled(10))
# so we sleep for up to 2 sec for it not to become empty,
# and meanwhile pass to other thread(s) and filter should
# have gathered new failures and passed them into the
@ -653,11 +743,11 @@ def get_monitor_failures_testcase(Filter_):
self.file = _copy_lines_between_files(GetFailures.FILENAME_01, self.name,
n=14, mode='w')
# Poll might need more time
self.assertTrue(self.isEmpty(min(4, 100 * Utils.DEFAULT_SLEEP_TIME)),
self.assertTrue(self.isEmpty(_maxWaitTime(5)),
"Queue must be empty but it is not: %s."
% (', '.join([str(x) for x in self.jail.queue])))
self.assertRaises(FailManagerEmpty, self.filter.failManager.toBan)
Utils.wait_for(lambda: self.filter.failManager.getFailTotal() == 2, 50 * Utils.DEFAULT_SLEEP_TIME)
Utils.wait_for(lambda: self.filter.failManager.getFailTotal() == 2, _maxWaitTime(10))
self.assertEqual(self.filter.failManager.getFailTotal(), 2)
# move aside, but leaving the handle still open...
@ -682,7 +772,7 @@ def get_monitor_failures_testcase(Filter_):
if interim_kill:
_killfile(None, self.name)
time.sleep(Utils.DEFAULT_SLEEP_TIME) # let them know
time.sleep(Utils.DEFAULT_SLEEP_INTERVAL) # let them know
# now create a new one to override old one
_copy_lines_between_files(GetFailures.FILENAME_01, self.name + '.new',
@ -729,10 +819,10 @@ def get_monitor_failures_testcase(Filter_):
_copy_lines_between_files(GetFailures.FILENAME_01, self.file, n=100)
# so we should get no more failures detected
self.assertTrue(self.isEmpty(200 * Utils.DEFAULT_SLEEP_TIME))
self.assertTrue(self.isEmpty(_maxWaitTime(10)))
# but then if we add it back again
self.filter.addLogPath(self.name)
# but then if we add it back again (no seek to time in FileFilter's, because in file used the same time)
self.filter.addLogPath(self.name, autoSeek=False)
# Tricky catch here is that it should get them from the
# tail written before, so let's not copy anything yet
#_copy_lines_between_files(GetFailures.FILENAME_01, self.name, n=100)
@ -789,14 +879,14 @@ def get_monitor_failures_journal_testcase(Filter_): # pragma: systemd no cover
def isFilled(self, delay=1.):
"""Wait up to `delay` sec to assure that it was modified or not
"""
return Utils.wait_for(self.jail.isFilled, delay)
return Utils.wait_for(self.jail.isFilled, _maxWaitTime(delay))
def isEmpty(self, delay=4*Utils.DEFAULT_SLEEP_TIME):
def isEmpty(self, delay=_maxWaitTime(5)):
# shorter wait time for not modified status
return Utils.wait_for(self.jail.isEmpty, delay)
return Utils.wait_for(self.jail.isEmpty, _maxWaitTime(delay))
def assert_correct_ban(self, test_ip, test_attempts):
self.assertTrue(self.isFilled(10)) # give Filter a chance to react
self.assertTrue(self.isFilled(_maxWaitTime(10))) # give Filter a chance to react
ticket = self.jail.getFailTicket()
attempts = ticket.getAttempt()
@ -819,7 +909,7 @@ def get_monitor_failures_journal_testcase(Filter_): # pragma: systemd no cover
_copy_lines_to_journal(
self.test_file, self.journal_fields, skip=2, n=3)
self.assertTrue(self.isFilled(6))
self.assertTrue(self.isFilled(10))
# so we sleep for up to 6 sec for it not to become empty,
# and meanwhile pass to other thread(s) and filter should
# have gathered new failures and passed them into the
@ -852,7 +942,7 @@ def get_monitor_failures_journal_testcase(Filter_): # pragma: systemd no cover
_copy_lines_to_journal(
self.test_file, self.journal_fields, n=5, skip=5)
# so we should get no more failures detected
self.assertTrue(self.isEmpty(200 * Utils.DEFAULT_SLEEP_TIME))
self.assertTrue(self.isEmpty(_maxWaitTime(10)))
# but then if we add it back again
self.filter.addJournalMatch([
@ -863,7 +953,7 @@ def get_monitor_failures_journal_testcase(Filter_): # pragma: systemd no cover
_copy_lines_to_journal(
self.test_file, self.journal_fields, n=6, skip=10)
# we should detect the failures
self.assertTrue(self.isFilled(6))
self.assertTrue(self.isFilled(10))
return MonitorJournalFailures
@ -931,7 +1021,7 @@ class GetFailures(LogCaptureTestCase):
filename = filename or GetFailures.FILENAME_01
failures = failures or GetFailures.FAILURES_01
self.filter.addLogPath(filename)
self.filter.addLogPath(filename, autoSeek=0)
self.filter.addFailRegex("(?:(?:Authentication failure|Failed [-/\w+]+) for(?: [iI](?:llegal|nvalid) user)?|[Ii](?:llegal|nvalid) user|ROOT LOGIN REFUSED) .*(?: from|FROM) <HOST>$")
self.filter.getFailures(filename)
_assert_correct_last_attempt(self, self.filter, failures)
@ -955,7 +1045,7 @@ class GetFailures(LogCaptureTestCase):
[u'Aug 14 11:%d:59 i60p295 sshd[12365]: Failed publickey for roehl from ::ffff:141.3.81.106 port 51332 ssh2'
% m for m in 53, 54, 57, 58])
self.filter.addLogPath(GetFailures.FILENAME_02)
self.filter.addLogPath(GetFailures.FILENAME_02, autoSeek=0)
self.filter.addFailRegex("Failed .* from <HOST>")
self.filter.getFailures(GetFailures.FILENAME_02)
_assert_correct_last_attempt(self, self.filter, output)
@ -963,25 +1053,35 @@ class GetFailures(LogCaptureTestCase):
def testGetFailures03(self):
output = ('203.162.223.135', 7, 1124013544.0)
self.filter.addLogPath(GetFailures.FILENAME_03)
self.filter.addLogPath(GetFailures.FILENAME_03, autoSeek=0)
self.filter.addFailRegex("error,relay=<HOST>,.*550 User unknown")
self.filter.getFailures(GetFailures.FILENAME_03)
_assert_correct_last_attempt(self, self.filter, output)
def testGetFailures03_seek(self):
def testGetFailures03_Seek1(self):
# same test as above but with seek to 'Aug 14 11:55:04' - so other output ...
output = ('203.162.223.135', 5, 1124013544.0)
self.filter.addLogPath(GetFailures.FILENAME_03)
self.filter.addLogPath(GetFailures.FILENAME_03, autoSeek=output[2] - 4*60)
self.filter.addFailRegex("error,relay=<HOST>,.*550 User unknown")
self.filter.getFailures(GetFailures.FILENAME_03, output[2] - 4*60 + 1)
self.filter.getFailures(GetFailures.FILENAME_03)
_assert_correct_last_attempt(self, self.filter, output)
def testGetFailures03_Seek2(self):
# same test as above but with seek to 'Aug 14 11:59:04' - so other output ...
output = ('203.162.223.135', 1, 1124013544.0)
self.filter.setMaxRetry(1)
self.filter.addLogPath(GetFailures.FILENAME_03, autoSeek=output[2])
self.filter.addFailRegex("error,relay=<HOST>,.*550 User unknown")
self.filter.getFailures(GetFailures.FILENAME_03)
_assert_correct_last_attempt(self, self.filter, output)
def testGetFailures04(self):
output = [('212.41.96.186', 4, 1124013600.0),
('212.41.96.185', 4, 1124017198.0)]
self.filter.addLogPath(GetFailures.FILENAME_04)
self.filter.addLogPath(GetFailures.FILENAME_04, autoSeek=0)
self.filter.addFailRegex("Invalid user .* <HOST>")
self.filter.getFailures(GetFailures.FILENAME_04)
@ -1048,7 +1148,7 @@ class GetFailures(LogCaptureTestCase):
filter_.active = True
filter_.failManager.setMaxRetry(1) # we might have just few failures
filter_.addLogPath(GetFailures.FILENAME_USEDNS)
filter_.addLogPath(GetFailures.FILENAME_USEDNS, autoSeek=False)
filter_.addFailRegex("Failed .* from <HOST>")
filter_.getFailures(GetFailures.FILENAME_USEDNS)
_assert_correct_last_attempt(self, filter_, output)
@ -1056,14 +1156,14 @@ class GetFailures(LogCaptureTestCase):
def testGetFailuresMultiRegex(self):
output = ('141.3.81.106', 8, 1124013541.0)
self.filter.addLogPath(GetFailures.FILENAME_02)
self.filter.addLogPath(GetFailures.FILENAME_02, autoSeek=False)
self.filter.addFailRegex("Failed .* from <HOST>")
self.filter.addFailRegex("Accepted .* from <HOST>")
self.filter.getFailures(GetFailures.FILENAME_02)
_assert_correct_last_attempt(self, self.filter, output)
def testGetFailuresIgnoreRegex(self):
self.filter.addLogPath(GetFailures.FILENAME_02)
self.filter.addLogPath(GetFailures.FILENAME_02, autoSeek=False)
self.filter.addFailRegex("Failed .* from <HOST>")
self.filter.addFailRegex("Accepted .* from <HOST>")
self.filter.addIgnoreRegex("for roehl")
@ -1075,7 +1175,7 @@ class GetFailures(LogCaptureTestCase):
def testGetFailuresMultiLine(self):
output = [("192.0.43.10", 2, 1124013599.0),
("192.0.43.11", 1, 1124013598.0)]
self.filter.addLogPath(GetFailures.FILENAME_MULTILINE)
self.filter.addLogPath(GetFailures.FILENAME_MULTILINE, autoSeek=False)
self.filter.addFailRegex("^.*rsyncd\[(?P<pid>\d+)\]: connect from .+ \(<HOST>\)$<SKIPLINES>^.+ rsyncd\[(?P=pid)\]: rsync error: .*$")
self.filter.setMaxLines(100)
self.filter.setMaxRetry(1)
@ -1093,7 +1193,7 @@ class GetFailures(LogCaptureTestCase):
def testGetFailuresMultiLineIgnoreRegex(self):
output = [("192.0.43.10", 2, 1124013599.0)]
self.filter.addLogPath(GetFailures.FILENAME_MULTILINE)
self.filter.addLogPath(GetFailures.FILENAME_MULTILINE, autoSeek=False)
self.filter.addFailRegex("^.*rsyncd\[(?P<pid>\d+)\]: connect from .+ \(<HOST>\)$<SKIPLINES>^.+ rsyncd\[(?P=pid)\]: rsync error: .*$")
self.filter.addIgnoreRegex("rsync error: Received SIGINT")
self.filter.setMaxLines(100)
@ -1109,7 +1209,7 @@ class GetFailures(LogCaptureTestCase):
output = [("192.0.43.10", 2, 1124013599.0),
("192.0.43.11", 1, 1124013598.0),
("192.0.43.15", 1, 1124013598.0)]
self.filter.addLogPath(GetFailures.FILENAME_MULTILINE)
self.filter.addLogPath(GetFailures.FILENAME_MULTILINE, autoSeek=False)
self.filter.addFailRegex("^.*rsyncd\[(?P<pid>\d+)\]: connect from .+ \(<HOST>\)$<SKIPLINES>^.+ rsyncd\[(?P=pid)\]: rsync error: .*$")
self.filter.addFailRegex("^.* sendmail\[.*, msgid=<(?P<msgid>[^>]+).*relay=\[<HOST>\].*$<SKIPLINES>^.+ spamd: result: Y \d+ .*,mid=<(?P=msgid)>(,bayes=[.\d]+)?(,autolearn=\S+)?\s*$")
self.filter.setMaxLines(100)

View File

@ -76,12 +76,15 @@ class TransmitterBase(unittest.TestCase):
self.server.quit()
def setGetTest(self, cmd, inValue, outValue=(None,), outCode=0, jail=None, repr_=False):
"""Process set/get commands and compare both return values
with outValue if it was given otherwise with inValue"""
setCmd = ["set", cmd, inValue]
getCmd = ["get", cmd]
if jail is not None:
setCmd.insert(1, jail)
getCmd.insert(1, jail)
# if outValue was not given (now None is allowed return/compare value also)
if outValue == (None,):
outValue = inValue
@ -173,7 +176,7 @@ class Transmitter(TransmitterBase):
self.assertEqual(self.transm.proceed(["sleep", "0.0001"]), (0, None))
def testDatabase(self):
if not unittest.F2B.fast:
if not unittest.F2B.memory_db:
tmp, tmpFilename = tempfile.mkstemp(".db", "fail2ban_")
else: # pragma: no cover
tmpFilename = ':memory:'
@ -208,7 +211,7 @@ class Transmitter(TransmitterBase):
self.assertEqual(self.transm.proceed(
["set", "dbfile", "None"]),
(0, None))
if not unittest.F2B.fast:
if not unittest.F2B.memory_db:
os.close(tmp)
os.unlink(tmpFilename)

View File

@ -23,6 +23,7 @@ __copyright__ = "Copyright (c) 2013 Yaroslav Halchenko"
__license__ = "GPL"
import logging
import optparse
import os
import re
import sys
@ -35,6 +36,7 @@ from ..server.filter import DNSUtils
from ..server.mytime import MyTime
from ..server.utils import Utils
logSys = getLogger(__name__)
CONFIG_DIR = os.environ.get('FAIL2BAN_CONFIG_DIR', None)
@ -47,10 +49,14 @@ if not CONFIG_DIR:
CONFIG_DIR = '/etc/fail2ban'
class F2B():
def __init__(self, fast=False, no_network=False):
self.fast=fast
self.no_network=no_network
class F2B(optparse.Values):
def __init__(self, opts={}):
self.__dict__ = opts.__dict__ if opts else {
'fast': False, 'memory_db':False, 'no_gamin': False, 'no_network': False,
}
if self.fast:
self.memory_db = True
self.no_gamin = True
def SkipIfFast(self):
pass
def SkipIfNoNetwork(self):
@ -58,13 +64,11 @@ class F2B():
def initTests(opts):
if opts: # pragma: no cover
unittest.F2B = F2B(opts.fast, opts.no_network)
else:
unittest.F2B = F2B()
unittest.F2B = F2B(opts)
# --fast :
if unittest.F2B.fast: # pragma: no cover
# prevent long sleeping during test cases...
# racy decrease default sleep intervals to test it faster
# (prevent long sleeping during test cases ... less time goes to sleep):
Utils.DEFAULT_SLEEP_TIME = 0.0025
Utils.DEFAULT_SLEEP_INTERVAL = 0.0005
def F2B_SkipIfFast():
@ -74,7 +78,7 @@ def initTests(opts):
# sleep intervals are large - use replacement for sleep to check time to sleep:
_org_sleep = time.sleep
def _new_sleep(v):
if (v > Utils.DEFAULT_SLEEP_TIME):
if (v > Utils.DEFAULT_SLEEP_TIME): # pragma: no cover
raise ValueError('[BAD-CODE] To long sleep interval: %s, try to use conditional Utils.wait_for instead' % v)
_org_sleep(min(v, Utils.DEFAULT_SLEEP_TIME))
time.sleep = _new_sleep
@ -103,7 +107,7 @@ def setUpMyTime():
def tearDownMyTime():
os.environ.pop('TZ')
if old_TZ:
if old_TZ: # pragma: no cover
os.environ['TZ'] = old_TZ
time.tzset()
MyTime.myTime = None
@ -135,11 +139,15 @@ def gatherTests(regexps=None, opts=None):
_regexps = [re.compile(r) for r in regexps]
def addTest(self, suite):
suite_str = str(suite)
matched = []
for test in suite:
s = str(test)
for r in self._regexps:
if r.search(suite_str):
super(FilteredTestSuite, self).addTest(suite)
return
if r.search(s):
matched.append(test)
break
for test in matched:
super(FilteredTestSuite, self).addTest(test)
tests = FilteredTestSuite()
@ -160,7 +168,7 @@ def gatherTests(regexps=None, opts=None):
try:
import dns
tests.addTest(unittest.makeSuite(banmanagertestcase.StatusExtendedCymruInfo))
except ImportError:
except ImportError: # pragma: no cover
pass
# ClientReaders
tests.addTest(unittest.makeSuite(clientreadertestcase.ConfigReaderTest))
@ -225,7 +233,8 @@ def gatherTests(regexps=None, opts=None):
try:
# because gamin can be very slow on some platforms (and can produce many failures
# with fast sleep interval) - skip it by fast run:
unittest.F2B.SkipIfFast()
if unittest.F2B.fast or unittest.F2B.no_gamin: # pragma: no cover
raise Exception('Skip, fast: %s, no_gamin: %s' % (unittest.F2B.fast, unittest.F2B.no_gamin))
from ..server.filtergamin import FilterGamin
filters.append(FilterGamin)
except Exception, e: # pragma: no cover