diff --git a/fail2ban/server/filter.py b/fail2ban/server/filter.py index 1ff81d65..40bfeef5 100644 --- a/fail2ban/server/filter.py +++ b/fail2ban/server/filter.py @@ -112,6 +112,8 @@ class Filter(JailThread): self.onIgnoreRegex = None ## if true ignores obsolete failures (failure time < now - findTime): self.checkFindTime = True + ## shows that filter is in operation mode (processing new messages): + self.inOperation = True ## if true prevents against retarded banning in case of RC by too many failures (disabled only for test purposes): self.banASAP = True ## Ticks counter @@ -586,16 +588,26 @@ class Filter(JailThread): if self.__ignoreCache: c.set(key, False) return False + def _logWarnOnce(self, nextLTM, *args): + """Log some issue as warning once per day, otherwise level 7""" + if MyTime.time() < getattr(self, nextLTM, 0): + if logSys.getEffectiveLevel() <= 7: logSys.log(7, *(args[0])) + else: + setattr(self, nextLTM, MyTime.time() + 24*60*60) + for args in args: + logSys.warning('[%s] ' + args[0], self.jailName, *args[1:]) + def processLine(self, line, date=None): """Split the time portion from log msg and return findFailures on them """ + logSys.log(7, "Working on line %r", line) + + noDate = False if date: tupleLine = line self.__lastTimeText = tupleLine[1] self.__lastDate = date else: - logSys.log(7, "Working on line %r", line) - # try to parse date: timeMatch = self.dateDetector.matchTime(line) m = timeMatch[0] @@ -606,22 +618,51 @@ class Filter(JailThread): tupleLine = (line[:s], m, line[e:]) if m: # found and not empty - retrive date: date = self.dateDetector.getTime(m, timeMatch) - - if date is None: - if m: logSys.error("findFailure failed to parse timeText: %s", m) + if date is not None: + # Lets get the time part + date = date[0] + self.__lastTimeText = m + self.__lastDate = date + else: + logSys.error("findFailure failed to parse timeText: %s", m) + else: + tupleLine = ("", "", line) + # still no date - try to use last known: + if date is None: + noDate = True + if self.__lastDate and self.__lastDate > MyTime.time() - 60: + tupleLine = ("", self.__lastTimeText, line) date = self.__lastDate - else: - # Lets get the time part - date = date[0] - self.__lastTimeText = m + + if self.checkFindTime: + # if in operation (modifications have been really found): + if self.inOperation: + # if weird date - we'd simulate now for timeing issue (too large deviation from now): + if (date is None or date < MyTime.time() - 60 or date > MyTime.time() + 60): + # log time zone issue as warning once per day: + self._logWarnOnce("_next_simByTimeWarn", + ("Simulate NOW in operation since found time has too large deviation %s ~ %s +/- %s", + date, MyTime.time(), 60), + ("Please check jail has possibly a timezone issue. Line with odd timestamp: %s", + line)) + # simulate now as date: + date = MyTime.time() self.__lastDate = date else: - tupleLine = (line, self.__lastTimeText, "") - date = self.__lastDate + # in initialization (restore) phase, if too old - ignore: + if date is not None and date < MyTime.time() - self.getFindTime(): + # log time zone issue as warning once per day: + self._logWarnOnce("_next_ignByTimeWarn", + ("Ignore line since time %s < %s - %s", + date, MyTime.time(), self.getFindTime()), + ("Please check jail has possibly a timezone issue. Line with odd timestamp: %s", + line)) + # ignore - too old (obsolete) entry: + return [] # save last line (lazy convert of process line tuple to string on demand): self.processedLine = lambda: "".join(tupleLine[::2]) - return self.findFailure(tupleLine, date) + return self.findFailure(tupleLine, date, noDate=noDate) def processLineAndAdd(self, line, date=None): """Processes the line for failures and populates failManager @@ -755,7 +796,7 @@ class Filter(JailThread): # to find the logging time. # @return a dict with IP and timestamp. - def findFailure(self, tupleLine, date): + def findFailure(self, tupleLine, date, noDate=False): failList = list() ll = logSys.getEffectiveLevel() @@ -765,11 +806,6 @@ class Filter(JailThread): returnRawHost = True cidr = IPAddr.CIDR_RAW - if self.checkFindTime and date is not None and date < MyTime.time() - self.getFindTime(): - if ll <= 5: logSys.log(5, "Ignore line since time %s < %s - %s", - date, MyTime.time(), self.getFindTime()) - return failList - if self.__lineBufferSize > 1: self.__lineBuffer.append(tupleLine) orgBuffer = self.__lineBuffer = self.__lineBuffer[-self.__lineBufferSize:] @@ -820,17 +856,17 @@ class Filter(JailThread): if not self.checkAllRegex: break continue - if date is None: - logSys.warning( - "Found a match for %r but no valid date/time " - "found for %r. Please try setting a custom " + if noDate: + self._logWarnOnce("_next_noTimeWarn", + ("Found a match but no valid date/time found for %r.", tupleLine[1]), + ("Match without a timestamp: %s", "\n".join(failRegex.getMatchedLines())), + ("Please try setting a custom " "date pattern (see man page jail.conf(5)). " "If format is complex, please " "file a detailed issue on" " https://github.com/fail2ban/fail2ban/issues " - "in order to get support for this format.", - "\n".join(failRegex.getMatchedLines()), tupleLine[1]) - continue + "in order to get support for this format.",)) + if date is None and self.checkFindTime: continue # we should check all regex (bypass on multi-line, otherwise too complex): if not self.checkAllRegex or self.__lineBufferSize > 1: self.__lineBuffer, buf = failRegex.getUnmatchedTupleLines(), None @@ -1023,7 +1059,7 @@ class FileFilter(Filter): # MyTime.time()-self.findTime. When a failure is detected, a FailTicket # is created and is added to the FailManager. - def getFailures(self, filename): + def getFailures(self, filename, inOperation=None): log = self.getLog(filename) if log is None: logSys.error("Unable to get failures in %s", filename) @@ -1068,9 +1104,14 @@ class FileFilter(Filter): if has_content: while not self.idle: line = log.readline() - if not line or not self.active: - # The jail reached the bottom or has been stopped + if not self.active: break; # jail has been stopped + if not line: + # The jail reached the bottom, simply set in operation for this log + # (since we are first time at end of file, growing is only possible after modifications): + log.inOperation = True break + # acquire in operation from log and process: + self.inOperation = inOperation if inOperation is not None else log.inOperation self.processLineAndAdd(line.rstrip('\r\n')) finally: log.close() @@ -1230,6 +1271,8 @@ class FileContainer: self.__pos = 0 finally: handler.close() + ## shows that log is in operation mode (expecting new messages only from here): + self.inOperation = tail def getFileName(self): return self.__filename @@ -1303,16 +1346,17 @@ class FileContainer: return line.decode(enc, 'strict') except (UnicodeDecodeError, UnicodeEncodeError) as e: global _decode_line_warn - lev = logging.DEBUG - if _decode_line_warn.get(filename, 0) <= MyTime.time(): + lev = 7 + if not _decode_line_warn.get(filename, 0): lev = logging.WARNING - _decode_line_warn[filename] = MyTime.time() + 24*60*60 + _decode_line_warn.set(filename, 1) logSys.log(lev, - "Error decoding line from '%s' with '%s'." - " Consider setting logencoding=utf-8 (or another appropriate" - " encoding) for this jail. Continuing" - " to process line ignoring invalid characters: %r", - filename, enc, line) + "Error decoding line from '%s' with '%s'.", filename, enc) + if logSys.getEffectiveLevel() <= lev: + logSys.log(lev, "Consider setting logencoding=utf-8 (or another appropriate" + " encoding) for this jail. Continuing" + " to process line ignoring invalid characters: %r", + line) # decode with replacing error chars: line = line.decode(enc, 'replace') return line @@ -1333,7 +1377,7 @@ class FileContainer: ## print "D: Closed %s with pos %d" % (handler, self.__pos) ## sys.stdout.flush() -_decode_line_warn = {} +_decode_line_warn = Utils.Cache(maxCount=1000, maxTime=24*60*60); ## diff --git a/fail2ban/server/utils.py b/fail2ban/server/utils.py index 053aa04f..4e64ca0b 100644 --- a/fail2ban/server/utils.py +++ b/fail2ban/server/utils.py @@ -125,6 +125,10 @@ class Utils(): with self.__lock: self._cache.pop(k, None) + def clear(self): + with self.__lock: + self._cache.clear() + @staticmethod def setFBlockMode(fhandle, value): diff --git a/fail2ban/tests/fail2banregextestcase.py b/fail2ban/tests/fail2banregextestcase.py index 2da0098e..2e1d18db 100644 --- a/fail2ban/tests/fail2banregextestcase.py +++ b/fail2ban/tests/fail2banregextestcase.py @@ -172,7 +172,7 @@ class Fail2banRegexTest(LogCaptureTestCase): "--print-all-matched", FILENAME_01, RE_00 )) - self.assertLogged('Lines: 19 lines, 0 ignored, 13 matched, 6 missed') + self.assertLogged('Lines: 19 lines, 0 ignored, 16 matched, 3 missed') self.assertLogged('Error decoding line'); self.assertLogged('Continuing to process line ignoring invalid characters') @@ -186,7 +186,7 @@ class Fail2banRegexTest(LogCaptureTestCase): "--print-all-matched", "--raw", FILENAME_01, RE_00 )) - self.assertLogged('Lines: 19 lines, 0 ignored, 16 matched, 3 missed') + self.assertLogged('Lines: 19 lines, 0 ignored, 19 matched, 0 missed') def testDirectRE_1raw_noDns(self): self.assertTrue(_test_exec( @@ -194,7 +194,7 @@ class Fail2banRegexTest(LogCaptureTestCase): "--print-all-matched", "--raw", "--usedns=no", FILENAME_01, RE_00 )) - self.assertLogged('Lines: 19 lines, 0 ignored, 13 matched, 6 missed') + self.assertLogged('Lines: 19 lines, 0 ignored, 16 matched, 3 missed') # usage of \S+ causes raw handling automatically: self.pruneLog() self.assertTrue(_test_exec( diff --git a/fail2ban/tests/files/logs/zzz-generic-example b/fail2ban/tests/files/logs/zzz-generic-example index d0bd3322..118c7e12 100644 --- a/fail2ban/tests/files/logs/zzz-generic-example +++ b/fail2ban/tests/files/logs/zzz-generic-example @@ -30,8 +30,8 @@ Jun 21 16:55:02 machine kernel: [ 970.699396] @vserver_demo test- # failJSON: { "time": "2005-06-21T16:55:03", "match": true , "host": "192.0.2.3" } [Jun 21 16:55:03] machine kernel: [ 970.699396] @vserver_demo test-demo(pam_unix)[13709] [ID 255 test] F2B: failure from 192.0.2.3 -# -- wrong time direct in journal-line (used last known date): -# failJSON: { "time": "2005-06-21T16:55:03", "match": true , "host": "192.0.2.1" } +# -- wrong time direct in journal-line (used last known date or now, but null because no checkFindTime in samples test factory): +# failJSON: { "time": null, "match": true , "host": "192.0.2.1" } 0000-12-30 00:00:00 server test-demo[47831]: F2B: failure from 192.0.2.1 # -- wrong time after newline in message (plist without escaped newlines): # failJSON: { "match": false } @@ -42,8 +42,8 @@ Jun 22 20:37:04 server test-demo[402]: writeToStorage plist={ applicationDate = "0000-12-30 00:00:00 +0000"; # failJSON: { "match": false } } -# -- wrong time direct in journal-line (used last known date): -# failJSON: { "time": "2005-06-22T20:37:04", "match": true , "host": "192.0.2.2" } +# -- wrong time direct in journal-line (used last known date, but null because no checkFindTime in samples test factory): +# failJSON: { "time": null, "match": true , "host": "192.0.2.2" } 0000-12-30 00:00:00 server test-demo[47831]: F2B: failure from 192.0.2.2 # -- test no zone and UTC/GMT named zone "2005-06-21T14:55:10 UTC" == "2005-06-21T16:55:10 CEST" (diff +2h in CEST): diff --git a/fail2ban/tests/filtertestcase.py b/fail2ban/tests/filtertestcase.py index e8634929..63f43b21 100644 --- a/fail2ban/tests/filtertestcase.py +++ b/fail2ban/tests/filtertestcase.py @@ -215,7 +215,7 @@ def _copy_lines_between_files(in_, fout, n=None, skip=0, mode='a', terminal_line # Write: all at once and flush if isinstance(fout, str): fout = open(fout, mode) - fout.write('\n'.join(lines)) + fout.write('\n'.join(lines)+'\n') fout.flush() if isinstance(in_, str): # pragma: no branch - only used with str in test cases # Opened earlier, therefore must close it @@ -878,7 +878,7 @@ class LogFileMonitor(LogCaptureTestCase): self.assertRaises(FailManagerEmpty, self.filter.failManager.toBan) # and it should have not been enough - _copy_lines_between_files(GetFailures.FILENAME_01, self.file, skip=5) + _copy_lines_between_files(GetFailures.FILENAME_01, self.file, skip=12, n=3) self.filter.getFailures(self.name) _assert_correct_last_attempt(self, self.filter, GetFailures.FAILURES_01) @@ -897,7 +897,7 @@ class LogFileMonitor(LogCaptureTestCase): # filter "marked" as the known beginning, otherwise it # would not detect "rotation" self.file = _copy_lines_between_files(GetFailures.FILENAME_01, self.name, - skip=3, mode='w') + skip=12, n=3, mode='w') self.filter.getFailures(self.name) #self.assertRaises(FailManagerEmpty, self.filter.failManager.toBan) _assert_correct_last_attempt(self, self.filter, GetFailures.FAILURES_01) @@ -916,7 +916,7 @@ class LogFileMonitor(LogCaptureTestCase): # move aside, but leaving the handle still open... os.rename(self.name, self.name + '.bak') - _copy_lines_between_files(GetFailures.FILENAME_01, self.name, skip=14).close() + _copy_lines_between_files(GetFailures.FILENAME_01, self.name, skip=14, n=1).close() self.filter.getFailures(self.name) _assert_correct_last_attempt(self, self.filter, GetFailures.FAILURES_01) self.assertEqual(self.filter.failManager.getFailTotal(), 3) @@ -1027,13 +1027,13 @@ def get_monitor_failures_testcase(Filter_): self.assertRaises(FailManagerEmpty, self.filter.failManager.toBan) # Now let's feed it with entries from the file - _copy_lines_between_files(GetFailures.FILENAME_01, self.file, n=5) + _copy_lines_between_files(GetFailures.FILENAME_01, self.file, n=12) self.assertRaises(FailManagerEmpty, self.filter.failManager.toBan) # and our dummy jail is empty as well self.assertFalse(len(self.jail)) # since it should have not been enough - _copy_lines_between_files(GetFailures.FILENAME_01, self.file, skip=5) + _copy_lines_between_files(GetFailures.FILENAME_01, self.file, skip=12, n=3) if idle: self.waitForTicks(1) self.assertTrue(self.isEmpty(1)) @@ -1052,7 +1052,7 @@ def get_monitor_failures_testcase(Filter_): #return # just for fun let's copy all of them again and see if that results # in a new ban - _copy_lines_between_files(GetFailures.FILENAME_01, self.file, n=100) + _copy_lines_between_files(GetFailures.FILENAME_01, self.file, skip=12, n=3) self.assert_correct_last_attempt(GetFailures.FAILURES_01) def test_rewrite_file(self): @@ -1066,7 +1066,7 @@ def get_monitor_failures_testcase(Filter_): # filter "marked" as the known beginning, otherwise it # would not detect "rotation" self.file = _copy_lines_between_files(GetFailures.FILENAME_01, self.name, - skip=3, mode='w') + skip=12, n=3, mode='w') self.assert_correct_last_attempt(GetFailures.FAILURES_01) def _wait4failures(self, count=2): @@ -1087,13 +1087,13 @@ def get_monitor_failures_testcase(Filter_): # move aside, but leaving the handle still open... os.rename(self.name, self.name + '.bak') - _copy_lines_between_files(GetFailures.FILENAME_01, self.name, skip=14).close() + _copy_lines_between_files(GetFailures.FILENAME_01, self.name, skip=14, n=1).close() self.assert_correct_last_attempt(GetFailures.FAILURES_01) self.assertEqual(self.filter.failManager.getFailTotal(), 3) # now remove the moved file _killfile(None, self.name + '.bak') - _copy_lines_between_files(GetFailures.FILENAME_01, self.name, n=100).close() + _copy_lines_between_files(GetFailures.FILENAME_01, self.name, skip=12, n=3).close() self.assert_correct_last_attempt(GetFailures.FAILURES_01) self.assertEqual(self.filter.failManager.getFailTotal(), 6) @@ -1169,8 +1169,7 @@ def get_monitor_failures_testcase(Filter_): def _test_move_into_file(self, interim_kill=False): # if we move a new file into the location of an old (monitored) file - _copy_lines_between_files(GetFailures.FILENAME_01, self.name, - n=100).close() + _copy_lines_between_files(GetFailures.FILENAME_01, self.name).close() # make sure that it is monitored first self.assert_correct_last_attempt(GetFailures.FAILURES_01) self.assertEqual(self.filter.failManager.getFailTotal(), 3) @@ -1181,14 +1180,14 @@ def get_monitor_failures_testcase(Filter_): # now create a new one to override old one _copy_lines_between_files(GetFailures.FILENAME_01, self.name + '.new', - n=100).close() + skip=12, n=3).close() os.rename(self.name + '.new', self.name) self.assert_correct_last_attempt(GetFailures.FAILURES_01) self.assertEqual(self.filter.failManager.getFailTotal(), 6) # and to make sure that it now monitored for changes _copy_lines_between_files(GetFailures.FILENAME_01, self.name, - n=100).close() + skip=12, n=3).close() self.assert_correct_last_attempt(GetFailures.FAILURES_01) self.assertEqual(self.filter.failManager.getFailTotal(), 9) @@ -1207,7 +1206,7 @@ def get_monitor_failures_testcase(Filter_): # create a bogus file in the same directory and see if that doesn't affect open(self.name + '.bak2', 'w').close() - _copy_lines_between_files(GetFailures.FILENAME_01, self.name, n=100).close() + _copy_lines_between_files(GetFailures.FILENAME_01, self.name, skip=12, n=3).close() self.assert_correct_last_attempt(GetFailures.FAILURES_01) self.assertEqual(self.filter.failManager.getFailTotal(), 6) _killfile(None, self.name + '.bak2') @@ -1239,8 +1238,8 @@ def get_monitor_failures_testcase(Filter_): self.assert_correct_last_attempt(GetFailures.FAILURES_01, count=6) # was needed if we write twice above # now copy and get even more - _copy_lines_between_files(GetFailures.FILENAME_01, self.file, n=100) - # check for 3 failures (not 9), because 6 already get above... + _copy_lines_between_files(GetFailures.FILENAME_01, self.file, skip=12, n=3) + # check for 3 failures (not 9), because 6 already get above... self.assert_correct_last_attempt(GetFailures.FAILURES_01) # total count in this test: self.assertEqual(self.filter.failManager.getFailTotal(), 12) @@ -1606,13 +1605,21 @@ class GetFailures(LogCaptureTestCase): _assert_correct_last_attempt(self, self.filter, output) def testGetFailures03(self): - output = ('203.162.223.135', 9, 1124013600.0) + output = ('203.162.223.135', 6, 1124013600.0) self.filter.addLogPath(GetFailures.FILENAME_03, autoSeek=0) self.filter.addFailRegex(r"error,relay=,.*550 User unknown") self.filter.getFailures(GetFailures.FILENAME_03) _assert_correct_last_attempt(self, self.filter, output) + def testGetFailures03_InOperation(self): + output = ('203.162.223.135', 9, 1124013600.0) + + self.filter.addLogPath(GetFailures.FILENAME_03, autoSeek=0) + self.filter.addFailRegex(r"error,relay=,.*550 User unknown") + self.filter.getFailures(GetFailures.FILENAME_03, inOperation=True) + _assert_correct_last_attempt(self, self.filter, output) + def testGetFailures03_Seek1(self): # same test as above but with seek to 'Aug 14 11:55:04' - so other output ... output = ('203.162.223.135', 3, 1124013600.0) @@ -1673,6 +1680,7 @@ class GetFailures(LogCaptureTestCase): for enc in (None, 'utf-8', 'ascii'): if enc is not None: self.tearDown();self.setUp(); + if DefLogSys.getEffectiveLevel() > 7: DefLogSys.setLevel(7); # ensure decode_line logs always self.filter.checkFindTime = False; self.filter.setLogEncoding(enc); # speedup search using exact date pattern: