code review and minor repair after merge with performance branch (changed naming convention, wrong resolved conflicts, etc)

pull/1460/head
sebres 2015-12-29 14:45:56 +01:00
parent 21f058a9f7
commit 9d4f163e88
7 changed files with 57 additions and 47 deletions

View File

@ -8,9 +8,16 @@ Fail2Ban: Changelog
ver. 0.9.5 (2015/XX/XXX) - increment ban time
-----------
- Fixes:
* purge database will be executed now (within observer).
* restoring currently banned ip after service restart fixed
(now < timeofban + bantime), ignore old log failures (already banned)
- New Features:
* increment ban time (+ observer) functionality introduced.
Thanks Serg G. Brester (sebres)
* database functionality extended with bad ips.
ver. 0.9.4 (2015/XX/XXX) - wanna-be-released
-----------
@ -94,10 +101,6 @@ ver. 0.9.3 (2015/08/01) - lets-all-stay-friends
the emails. Adjust <grepopts> to augment the behavior.
- Fixes:
* purge database will be executed now (within observer).
* database functionality extended with bad ips.
* restoring currently banned ip after service restart fixed
(now < timeofban + bantime), ignore old log failures (already banned)
* reload in interactive mode appends all the jails twice (gh-825)
* reload server/jail failed if database used (but was not changed) and
some jail active (gh-1072)
@ -241,6 +244,7 @@ ver. 0.9.2 (2015/04/29) - better-quick-now-than-later
* Added syslogsocket configuration to fail2ban.conf
* Note in the jail.conf for the recidive jail to increase dbpurgeage (gh-964)
ver. 0.9.1 (2014/10/29) - better, faster, stronger
----------

View File

@ -683,7 +683,7 @@ class FileFilter(Filter):
# MyTime.time()-self.findTime. When a failure is detected, a FailTicket
# is created and is added to the FailManager.
def getFailures(self, filename, startTime=None):
def getFailures(self, filename):
log = self.getLog(filename)
if log is None:
logSys.error("Unable to get failures in " + filename)

View File

@ -58,7 +58,6 @@ class FilterPoll(FileFilter):
## The time of the last modification of the file.
self.__prevStats = dict()
self.__file404Cnt = dict()
self.__initial = dict()
logSys.debug("Created FilterPoll")
##

View File

@ -27,10 +27,12 @@ __license__ = "GPL"
import threading
from .jailthread import JailThread
from .failmanager import FailManagerEmpty
import os, logging, time, datetime, math, json, random
import sys
from ..helpers import getLogger
from .mytime import MyTime
from .utils import Utils
# Gets the instance of the logger.
logSys = getLogger(__name__)
@ -55,9 +57,14 @@ class ObserverThread(JailThread):
The time the thread sleeps for in the loop.
"""
# observer is event driven and it sleep organized incremental, so sleep intervals can be shortly:
DEFAULT_SLEEP_INTERVAL = Utils.DEFAULT_SLEEP_INTERVAL / 10
def __init__(self):
self.active = False
self.idle = False
# init thread
super(ObserverThread, self).__init__(name='Observer')
# before started - idle:
self.idle = True
## Event queue
self._queue_lock = threading.RLock()
self._queue = []
@ -71,8 +78,6 @@ class ObserverThread(JailThread):
self._paused = False
self.__db = None
self.__db_purge_interval = 60*60
# start thread
super(ObserverThread, self).__init__(name='Observer')
# observer is a not main thread:
self.daemon = True
@ -167,8 +172,8 @@ class ObserverThread(JailThread):
'db_set': self.db_set,
'db_purge': self.db_purge,
# service events of observer self:
'is_alive' : self.is_alive,
'is_active': self.is_active,
'is_alive' : self.isAlive,
'is_active': self.isActive,
'start': self.start,
'stop': self.stop,
'nop': lambda:(),
@ -208,7 +213,7 @@ class ObserverThread(JailThread):
continue
else:
## notify event deleted (shutdown) - just sleep a litle bit (waiting for shutdown events, prevent high cpu usage)
time.sleep(0.001)
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
## stop by shutdown and empty queue :
if not self.is_full:
break
@ -224,11 +229,11 @@ class ObserverThread(JailThread):
self.idle = True
return True
def is_alive(self):
def isAlive(self):
#logSys.debug("Observer alive...")
return True
def is_active(self, fromStr=None):
def isActive(self, fromStr=None):
# logSys.info("Observer alive, %s%s",
# 'active' if self.active else 'inactive',
# '' if fromStr is None else (", called from '%s'" % fromStr))
@ -266,7 +271,7 @@ class ObserverThread(JailThread):
def wait_empty(self, sleeptime=None):
"""Wait observer is running and returns if observer has no more events (queue is empty)
"""
time.sleep(0.001)
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
if sleeptime is not None:
e = MyTime.time() + sleeptime
# block queue with not operation to be sure all really jobs are executed if nop goes from queue :
@ -277,16 +282,16 @@ class ObserverThread(JailThread):
while self.is_full:
if sleeptime is not None and MyTime.time() > e:
break
time.sleep(0.01)
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
# wait idle to be sure the last queue element is processed (because pop event before processing it) :
self.wait_idle(0.01)
self.wait_idle(0.001)
return not self.is_full
def wait_idle(self, sleeptime=None):
"""Wait observer is running and returns if observer idle (observer sleeps)
"""
time.sleep(0.001)
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
if self.idle:
return True
if sleeptime is not None:
@ -294,7 +299,7 @@ class ObserverThread(JailThread):
while not self.idle:
if sleeptime is not None and MyTime.time() > e:
break
time.sleep(0.01)
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
return self.idle
@property
@ -340,7 +345,7 @@ class ObserverThread(JailThread):
Observer will check ip was known (bad) and possibly increase an retry count
"""
# check jail active :
if not jail.is_alive():
if not jail.isAlive():
return
ip = ticket.getIP()
unixTime = ticket.getTime()
@ -371,10 +376,8 @@ class ObserverThread(JailThread):
logSys.info("[%s] Found %s, bad - %s, %s # -> %s%s", jail.name, ip,
datetime.datetime.fromtimestamp(unixTime).strftime("%Y-%m-%d %H:%M:%S"), banCount, retryCount,
(', Ban' if retryCount >= maxRetry else ''))
# remove matches from this ticket, because a ticket was already added by filter self
ticket.setMatches(None)
# retryCount-1, because a ticket was already once incremented by filter self
failManager.addFailure(ticket, retryCount - 1, True)
retryCount = failManager.addFailure(ticket, retryCount - 1, True)
# after observe we have increased count >= maxretry ...
if retryCount >= maxRetry:
@ -384,7 +387,7 @@ class ObserverThread(JailThread):
while True:
ticket = failManager.toBan(ip)
jail.putFailTicket(ticket)
except Exception:
except FailManagerEmpty:
failManager.cleanup(MyTime.time())
except Exception as e:
@ -409,7 +412,7 @@ class ObserverThread(JailThread):
new ban time.
"""
# check jail active :
if not jail.is_alive():
if not jail.isAlive() or not jail.database:
return
be = jail.getBanTimeExtra()
ip = ticket.getIP()

View File

@ -24,19 +24,16 @@ __author__ = "Cyril Jaquier"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import sys
from ..helpers import getLogger
from .mytime import MyTime
# Gets the instance of the logger.
logSys = getLogger(__name__)
RESTORED = 0x01
class Ticket:
RESTORED = 0x01
def __init__(self, ip=None, time=None, matches=None, ticket=None):
"""Ticket constructor
@ -60,7 +57,7 @@ class Ticket:
def __str__(self):
return "%s: ip=%s time=%s bantime=%s bancount=%s #attempts=%d matches=%r" % \
(self.__class__.__name__.split('.')[-1], self.__ip, self._time,
self.__banTime, self.__banCount,
self._banTime, self._banCount,
self._data['failures'], self._data.get('matches', []))
def __repr__(self):
@ -125,10 +122,13 @@ class Ticket:
return self._data.get('matches', [])
def setRestored(self, value):
self._flags |= RESTORED
if value:
self._flags = Ticket.RESTORED
else:
self._flags &= ~(Ticket.RESTORED)
def getRestored(self):
return 1 if self._flags & RESTORED else 0
return self._flags & Ticket.RESTORED
def setData(self, *args, **argv):
# if overwrite - set data and filter None values:

View File

@ -127,7 +127,7 @@ class DatabaseTest(LogCaptureTestCase):
os.remove(self.db._dbBackupFilename)
def testUpdateDb2(self):
if Fail2BanDb is None: # pragma: no cover
if Fail2BanDb is None or self.db.filename == ':memory:': # pragma: no cover
return
shutil.copyfile(
os.path.join(TEST_FILES_DIR, 'database_v2.db'), self.dbFilename)

View File

@ -33,14 +33,14 @@ import time
from ..server.mytime import MyTime
from ..server.ticket import FailTicket
from ..server.failmanager import FailManager
from ..server.banmanager import BanManager
from ..server.observer import Observers, ObserverThread
from ..server.utils import Utils
from .utils import LogCaptureTestCase
from ..server.filter import Filter
from .dummyjail import DummyJail
try:
from ..server.database import Fail2BanDb
except ImportError:
Fail2BanDb = None
from .databasetestcase import getFail2BanDb, Fail2BanDb
class BanTimeIncr(LogCaptureTestCase):
@ -191,7 +191,7 @@ class BanTimeIncrDB(unittest.TestCase):
elif Fail2BanDb is None:
return
_, self.dbFilename = tempfile.mkstemp(".db", "fail2ban_")
self.db = Fail2BanDb(self.dbFilename)
self.db = getFail2BanDb(self.dbFilename)
self.jail = DummyJail()
self.jail.database = self.db
self.Observer = ObserverThread()
@ -199,13 +199,13 @@ class BanTimeIncrDB(unittest.TestCase):
def tearDown(self):
"""Call after every test case."""
super(BanTimeIncrDB, self).tearDown()
if Fail2BanDb is None: # pragma: no cover
return
# Cleanup
self.Observer.stop()
Observers.Main = None
os.remove(self.dbFilename)
super(BanTimeIncrDB, self).tearDown()
def incrBanTime(self, ticket, banTime=None):
jail = self.jail;
@ -457,7 +457,7 @@ class BanTimeIncrDB(unittest.TestCase):
self.db._purgeAge = -240*60*60
obs.add_named_timer('DB_PURGE', 0.001, 'db_purge')
# wait for timer ready
time.sleep(0.025)
obs.wait_idle(0.025)
# wait for ready
obs.add('nop')
obs.wait_empty(5)
@ -498,13 +498,17 @@ class BanTimeIncrDB(unittest.TestCase):
ticket2 = jail.getFailTicket()
if ticket2:
break
time.sleep(0.1)
time.sleep(Utils.DEFAULT_SLEEP_INTERVAL)
if MyTime.time() > to: # pragma: no cover
raise RuntimeError('unexpected timeout: wait 30 seconds instead of few ms.')
# check ticket and failure count:
self.assertFalse(not ticket2)
self.assertEqual(ticket2.getAttempt(), failManager.getMaxRetry())
self.assertEqual(ticket2.getRetry(), failManager.getMaxRetry())
# wrap FailTicket to BanTicket:
failticket2 = ticket2
ticket2 = BanManager.createBanTicket(failticket2)
self.assertEqual(ticket2, failticket2)
# add this ticket to ban (use observer only without ban manager):
obs.add('banFound', ticket2, jail, 10)
obs.wait_empty(5)
@ -568,7 +572,7 @@ class ObserverTest(LogCaptureTestCase):
obs = ObserverThread()
obs.start()
# wait for idle
obs.wait_idle(0.1)
obs.wait_idle(1)
# observer will replace test set:
o = set(['test'])
obs.add('call', o.clear)
@ -582,7 +586,7 @@ class ObserverTest(LogCaptureTestCase):
# observer will replace test set, but first after pause ends:
obs.add('call', o.clear)
obs.add('call', o.add, 'test3')
obs.wait_empty(0.25)
obs.wait_empty(10 * Utils.DEFAULT_SLEEP_TIME)
self.assertTrue(obs.is_full)
self.assertEqual(o, set(['test2']))
obs.paused = False
@ -590,8 +594,8 @@ class ObserverTest(LogCaptureTestCase):
obs.wait_empty(1)
self.assertEqual(o, set(['test3']))
self.assertTrue(obs.is_active())
self.assertTrue(obs.is_alive())
self.assertTrue(obs.isActive())
self.assertTrue(obs.isAlive())
obs.stop()
obs = None