Blacken
continuous-integration/drone/push Build is failing Details

This commit is contained in:
IamTheFij 2022-04-04 20:23:15 -07:00
parent 094c910cd4
commit e41d82f9d2
6 changed files with 207 additions and 207 deletions

View File

@ -1,17 +1,15 @@
repos:
- repo: https://github.com/psf/black
rev: 22.3.0
hooks:
- id: black
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v1.2.3
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: autopep8-wrapper
args:
- -i
- --ignore=E265,E309,E501
- id: debug-statements
language_version: python3
- id: flake8
language_version: python3
- id: check-yaml
args:
- --allow-multiple-documents

View File

@ -16,15 +16,14 @@ from prometheus_client import start_http_server
DEFAULT_METRICS_PORT = 8080
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s %(levelname)s %(name)s %(message)s'
level=logging.ERROR, format="%(asctime)s %(levelname)s %(name)s %(message)s"
)
logging.getLogger(__name__).addHandler(logging.NullHandler())
def read_yaml(path):
"""Loads config from a YAML file with env interpolation"""
with open(path, 'r') as yaml:
with open(path, "r") as yaml:
contents = yaml.read()
return yamlenv.load(contents)
@ -35,44 +34,40 @@ def validate_monitor_settings(settings):
Note: Cannot yet validate the Alerts exist from within this class.
That will be done by Minitor later
"""
name = settings.get('name')
name = settings.get("name")
if not name:
raise InvalidMonitorException('Invalid name for monitor')
if not settings.get('command'):
raise InvalidMonitorException(
'Invalid command for monitor {}'.format(name)
)
raise InvalidMonitorException("Invalid name for monitor")
if not settings.get("command"):
raise InvalidMonitorException("Invalid command for monitor {}".format(name))
type_assertions = (
('check_interval', int),
('alert_after', int),
('alert_every', int),
("check_interval", int),
("alert_after", int),
("alert_every", int),
)
for key, val_type in type_assertions:
val = settings.get(key)
if not isinstance(val, val_type):
raise InvalidMonitorException(
'Invalid type on {}: {}. Expected {} and found {}'.format(
"Invalid type on {}: {}. Expected {} and found {}".format(
name, key, val_type.__name__, type(val).__name__
)
)
non_zero = (
'check_interval',
'alert_after',
"check_interval",
"alert_after",
)
for key in non_zero:
if settings.get(key) == 0:
raise InvalidMonitorException(
'Invalid value for {}: {}. Value cannot be 0'.format(
name, key
)
"Invalid value for {}: {}. Value cannot be 0".format(name, key)
)
def maybe_decode(bstr, encoding='utf-8'):
def maybe_decode(bstr, encoding="utf-8"):
try:
return bstr.decode(encoding)
except TypeError:
@ -82,14 +77,14 @@ def maybe_decode(bstr, encoding='utf-8'):
def call_output(*popenargs, **kwargs):
"""Similar to check_output, but instead returns output and exception"""
# So we can capture complete output, redirect sderr to stdout
kwargs.setdefault('stderr', subprocess.STDOUT)
kwargs.setdefault("stderr", subprocess.STDOUT)
output, ex = None, None
try:
output = check_output(*popenargs, **kwargs)
except CalledProcessError as e:
output, ex = e.output, e
output = output.rstrip(b'\n')
output = output.rstrip(b"\n")
return output, ex
@ -113,23 +108,23 @@ class Monitor(object):
def __init__(self, config, counter=None, logger=None):
"""Accepts a dictionary of configuration items to override defaults"""
settings = {
'alerts': ['log'],
'check_interval': 30,
'alert_after': 4,
'alert_every': -1,
"alerts": ["log"],
"check_interval": 30,
"alert_after": 4,
"alert_every": -1,
}
settings.update(config)
validate_monitor_settings(settings)
self.name = settings['name']
self.command = settings['command']
self.alert_down = settings.get('alert_down', [])
self.name = settings["name"]
self.command = settings["command"]
self.alert_down = settings.get("alert_down", [])
if not self.alert_down:
self.alert_down = settings.get('alerts', [])
self.alert_up = settings.get('alert_up', [])
self.check_interval = settings.get('check_interval')
self.alert_after = settings.get('alert_after')
self.alert_every = settings.get('alert_every')
self.alert_down = settings.get("alerts", [])
self.alert_up = settings.get("alert_up", [])
self.check_interval = settings.get("check_interval")
self.alert_after = settings.get("alert_after")
self.alert_every = settings.get("alert_every")
self.alert_count = 0
self.last_check = None
@ -140,18 +135,18 @@ class Monitor(object):
self._counter = counter
if logger is None:
self._logger = logging.getLogger(
'{}({})'.format(self.__class__.__name__, self.name)
"{}({})".format(self.__class__.__name__, self.name)
)
else:
self._logger = logger.getChild(
'{}({})'.format(self.__class__.__name__, self.name)
"{}({})".format(self.__class__.__name__, self.name)
)
def _count_check(self, is_success=True, is_alert=False):
if self._counter is not None:
self._counter.labels(
monitor=self.name,
status=('success' if is_success else 'failure'),
status=("success" if is_success else "failure"),
is_alert=is_alert,
).inc()
@ -199,7 +194,7 @@ class Monitor(object):
back_up = None
if not self.is_up():
back_up = MinitorAlert(
'{} check is up again!'.format(self.name),
"{} check is up again!".format(self.name),
self,
)
self.total_failure_count = 0
@ -215,7 +210,7 @@ class Monitor(object):
if self.total_failure_count < self.alert_after:
return
failure_count = (self.total_failure_count - self.alert_after)
failure_count = self.total_failure_count - self.alert_after
if self.alert_every > 0:
# Otherwise, we should check against our alert_every
should_alert = (failure_count % self.alert_every) == 0
@ -223,15 +218,15 @@ class Monitor(object):
# Only alert on the first failure
should_alert = failure_count == 1
else:
should_alert = (failure_count >= (2 ** self.alert_count) - 1)
should_alert = failure_count >= (2**self.alert_count) - 1
if should_alert:
self.alert_count += 1
raise MinitorAlert(
'{} check has failed {} times'.format(
"{} check has failed {} times".format(
self.name, self.total_failure_count
),
self
self,
)
def is_up(self):
@ -243,18 +238,18 @@ class Alert(object):
def __init__(self, name, config, counter=None, logger=None):
"""An alert must be named and have a config dict"""
self.name = name
self.command = config.get('command')
self.command = config.get("command")
if not self.command:
raise InvalidAlertException('Invalid alert {}'.format(self.name))
raise InvalidAlertException("Invalid alert {}".format(self.name))
self._counter = counter
if logger is None:
self._logger = logging.getLogger(
'{}({})'.format(self.__class__.__name__, self.name)
"{}({})".format(self.__class__.__name__, self.name)
)
else:
self._logger = logger.getChild(
'{}({})'.format(self.__class__.__name__, self.name)
"{}({})".format(self.__class__.__name__, self.name)
)
def _count_alert(self, monitor):
@ -277,7 +272,7 @@ class Alert(object):
def _format_datetime(self, dt):
"""Formats a datetime for an alert"""
if dt is None:
return 'Never'
return "Never"
return dt.isoformat()
def alert(self, message, monitor):
@ -313,64 +308,72 @@ class Minitor(object):
def _parse_args(self, args=None):
"""Parses command line arguments and returns them"""
parser = ArgumentParser(description='Minimal monitoring')
parser = ArgumentParser(description="Minimal monitoring")
parser.add_argument(
'--config', '-c',
dest='config_path',
default='config.yml',
help='Path to the config YAML file to use',
"--config",
"-c",
dest="config_path",
default="config.yml",
help="Path to the config YAML file to use",
)
parser.add_argument(
'--metrics', '-m',
dest='metrics',
action='store_true',
help='Start webserver with metrics',
"--metrics",
"-m",
dest="metrics",
action="store_true",
help="Start webserver with metrics",
)
parser.add_argument(
'--metrics-port', '-p',
dest='metrics_port',
"--metrics-port",
"-p",
dest="metrics_port",
type=int,
default=DEFAULT_METRICS_PORT,
help='Port to use when serving metrics',
help="Port to use when serving metrics",
)
parser.add_argument(
'--verbose', '-v',
action='count',
help=('Adjust log verbosity by increasing arg count. Default log',
'level is ERROR. Level increases with each `v`'),
"--verbose",
"-v",
action="count",
help=(
"Adjust log verbosity by increasing arg count. Default log",
"level is ERROR. Level increases with each `v`",
),
)
return parser.parse_args(args)
def _setup(self, config_path):
"""Load all setup from YAML file at provided path"""
config = read_yaml(config_path)
self.check_interval = config.get('check_interval', 30)
self.check_interval = config.get("check_interval", 30)
self.monitors = [
Monitor(
mon,
counter=self._monitor_counter,
logger=self._logger,
)
for mon in config.get('monitors', [])
for mon in config.get("monitors", [])
]
# Add default alert for logging
self.alerts = {
'log': Alert(
'log',
{'command': ['echo', '{alert_message}!']},
"log": Alert(
"log",
{"command": ["echo", "{alert_message}!"]},
counter=self._alert_counter,
logger=self._logger,
)
}
self.alerts.update({
self.alerts.update(
{
alert_name: Alert(
alert_name,
alert,
counter=self._alert_counter,
logger=self._logger,
)
for alert_name, alert in config.get('alerts', {}).items()
})
for alert_name, alert in config.get("alerts", {}).items()
}
)
def _validate_monitors(self):
"""Validates monitors are valid against other config values"""
@ -378,7 +381,7 @@ class Minitor(object):
# Validate that the interval is valid
if monitor.check_interval < self.check_interval:
raise InvalidMonitorException(
'Monitor {} check interval is lower global value {}'.format(
"Monitor {} check interval is lower global value {}".format(
monitor.name, self.check_interval
)
)
@ -386,26 +389,26 @@ class Minitor(object):
for alert in chain(monitor.alert_down, monitor.alert_up):
if alert not in self.alerts:
raise InvalidMonitorException(
'Monitor {} contains an unknown alert: {}'.format(
"Monitor {} contains an unknown alert: {}".format(
monitor.name, alert
)
)
def _init_metrics(self):
self._alert_counter = Counter(
'minitor_alert_total',
'Number of Minitor alerts',
['alert', 'monitor'],
"minitor_alert_total",
"Number of Minitor alerts",
["alert", "monitor"],
)
self._monitor_counter = Counter(
'minitor_check_total',
'Number of Minitor checks',
['monitor', 'status', 'is_alert'],
"minitor_check_total",
"Number of Minitor checks",
["monitor", "status", "is_alert"],
)
self._monitor_status_gauge = Gauge(
'minitor_monitor_up_count',
'Currently responsive monitors',
['monitor'],
"minitor_monitor_up_count",
"Currently responsive monitors",
["monitor"],
)
def _loop(self):
@ -420,9 +423,7 @@ class Minitor(object):
result = monitor.check()
if result is not None:
self._logger.info(
'%s: %s',
monitor.name,
'SUCCESS' if result else 'FAILURE'
"%s: %s", monitor.name, "SUCCESS" if result else "FAILURE"
)
except MinitorAlert as minitor_alert:
self._logger.warning(minitor_alert)
@ -475,5 +476,5 @@ def main(args=None):
return 0
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View File

@ -7,47 +7,50 @@ from setuptools import setup
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name='minitor',
version='1.0.3',
description='A minimal monitoring tool',
name="minitor",
version="1.0.3",
description="A minimal monitoring tool",
long_description=long_description,
long_description_content_type='text/markdown',
url='https://git.iamthefij.com/iamthefij/minitor',
download_url=(
'https://git.iamthefij.com/iamthefij/minitor/archive/master.tar.gz'
),
author='Ian Fijolek',
author_email='ian@iamthefij.com',
long_description_content_type="text/markdown",
url="https://git.iamthefij.com/iamthefij/minitor",
download_url=("https://git.iamthefij.com/iamthefij/minitor/archive/master.tar.gz"),
author="Ian Fijolek",
author_email="ian@iamthefij.com",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Topic :: System :: Monitoring",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
keywords='minitor monitoring alerting',
packages=find_packages(exclude=[
'contrib',
'docs',
'examples',
'scripts',
'tests',
]),
keywords="minitor monitoring alerting",
packages=find_packages(
exclude=[
"contrib",
"docs",
"examples",
"scripts",
"tests",
]
),
install_requires=[
'prometheus_client',
'yamlenv',
"prometheus_client",
"yamlenv",
],
scripts=["scripts/docker_check.sh"],
entry_points={
'console_scripts': [
'minitor=minitor.main:main',
"console_scripts": [
"minitor=minitor.main:main",
],
},
)

View File

@ -9,54 +9,47 @@ from tests.util import assert_called_once_with
class TestAlert(object):
@pytest.fixture
def monitor(self):
return Monitor({
'name': 'Dummy Monitor',
'command': ['echo', 'foo'],
})
return Monitor(
{
"name": "Dummy Monitor",
"command": ["echo", "foo"],
}
)
@pytest.fixture
def echo_alert(self):
return Alert(
'log',
"log",
{
'command': [
'echo', (
'{monitor_name} has failed {failure_count} time(s)!\n'
'We have alerted {alert_count} time(s)\n'
'Last success was {last_success}\n'
'Last output was: {last_output}'
)
"command": [
"echo",
(
"{monitor_name} has failed {failure_count} time(s)!\n"
"We have alerted {alert_count} time(s)\n"
"Last success was {last_success}\n"
"Last output was: {last_output}"
),
]
}
},
)
@pytest.mark.parametrize(
'last_success,expected_success',
[
(None, 'Never'),
(datetime(2018, 4, 10), '2018-04-10T00:00:00')
]
"last_success,expected_success",
[(None, "Never"), (datetime(2018, 4, 10), "2018-04-10T00:00:00")],
)
def test_simple_alert(
self,
monitor,
echo_alert,
last_success,
expected_success
):
def test_simple_alert(self, monitor, echo_alert, last_success, expected_success):
monitor.alert_count = 1
monitor.last_output = 'beep boop'
monitor.last_output = "beep boop"
monitor.last_success = last_success
monitor.total_failure_count = 1
with patch.object(echo_alert._logger, 'error') as mock_error:
echo_alert.alert('Exception message', monitor)
with patch.object(echo_alert._logger, "error") as mock_error:
echo_alert.alert("Exception message", monitor)
assert_called_once_with(
mock_error,
'Dummy Monitor has failed 1 time(s)!\n'
'We have alerted 1 time(s)\n'
'Last success was ' + expected_success + '\n'
'Last output was: beep boop'
"Dummy Monitor has failed 1 time(s)!\n"
"We have alerted 1 time(s)\n"
"Last success was " + expected_success + "\n"
"Last output was: beep boop",
)

View File

@ -6,30 +6,31 @@ from minitor.main import Minitor
class TestMinitor(object):
def test_call_output(self):
# valid command should have result and no exception
output, ex = call_output(['echo', 'test'])
assert output == b'test'
output, ex = call_output(["echo", "test"])
assert output == b"test"
assert ex is None
output, ex = call_output(['ls', '--not-a-real-flag'])
assert output.startswith(b'ls: ')
output, ex = call_output(["ls", "--not-a-real-flag"])
assert output.startswith(b"ls: ")
assert ex is not None
def test_run(self):
"""Doesn't really check much, but a simple integration sanity test"""
test_loop_count = 5
os.environ.update({
'MAILGUN_API_KEY': 'test-mg-key',
'AVAILABLE_NUMBER': '555-555-5050',
'MY_PHONE': '555-555-0505',
'ACCOUNT_SID': 'test-account-id',
'AUTH_TOKEN': 'test-account-token',
})
args = '--config ./sample-config.yml'.split(' ')
os.environ.update(
{
"MAILGUN_API_KEY": "test-mg-key",
"AVAILABLE_NUMBER": "555-555-5050",
"MY_PHONE": "555-555-0505",
"ACCOUNT_SID": "test-account-id",
"AUTH_TOKEN": "test-account-token",
}
)
args = "--config ./sample-config.yml".split(" ")
minitor = Minitor()
with patch.object(minitor, '_loop'):
with patch.object(minitor, "_loop"):
minitor.run(args)
# Skip the loop, but run a single check
for _ in range(test_loop_count):

View File

@ -11,40 +11,44 @@ from tests.util import assert_called_once
class TestMonitor(object):
@pytest.fixture
def monitor(self):
return Monitor({
'name': 'Sample Monitor',
'command': ['echo', 'foo'],
'alert_down': ['log'],
'alert_up': ['log'],
'check_interval': 1,
'alert_after': 1,
'alert_every': 1,
})
return Monitor(
{
"name": "Sample Monitor",
"command": ["echo", "foo"],
"alert_down": ["log"],
"alert_up": ["log"],
"check_interval": 1,
"alert_after": 1,
"alert_every": 1,
}
)
@pytest.mark.parametrize('settings', [
{'alert_after': 0},
{'alert_every': 0},
{'check_interval': 0},
{'alert_after': 'invalid'},
{'alert_every': 'invalid'},
{'check_interval': 'invalid'},
])
@pytest.mark.parametrize(
"settings",
[
{"alert_after": 0},
{"alert_every": 0},
{"check_interval": 0},
{"alert_after": "invalid"},
{"alert_every": "invalid"},
{"check_interval": "invalid"},
],
)
def test_monitor_invalid_configuration(self, settings):
with pytest.raises(InvalidMonitorException):
validate_monitor_settings(settings)
@pytest.mark.parametrize(
'alert_after',
"alert_after",
[1, 20],
ids=lambda arg: 'alert_after({})'.format(arg),
ids=lambda arg: "alert_after({})".format(arg),
)
@pytest.mark.parametrize(
'alert_every',
"alert_every",
[-1, 1, 2, 1440],
ids=lambda arg: 'alert_every({})'.format(arg),
ids=lambda arg: "alert_every({})".format(arg),
)
def test_monitor_alert_after(self, monitor, alert_after, alert_every):
monitor.alert_after = alert_after
@ -59,14 +63,14 @@ class TestMonitor(object):
monitor.failure()
@pytest.mark.parametrize(
'alert_after',
"alert_after",
[1, 20],
ids=lambda arg: 'alert_after({})'.format(arg),
ids=lambda arg: "alert_after({})".format(arg),
)
@pytest.mark.parametrize(
'alert_every',
"alert_every",
[1, 2, 1440],
ids=lambda arg: 'alert_every({})'.format(arg),
ids=lambda arg: "alert_every({})".format(arg),
)
def test_monitor_alert_every(self, monitor, alert_after, alert_every):
monitor.alert_after = alert_after
@ -102,27 +106,27 @@ class TestMonitor(object):
else:
monitor.failure()
@pytest.mark.parametrize('last_check', [None, datetime(2018, 4, 10)])
@pytest.mark.parametrize("last_check", [None, datetime(2018, 4, 10)])
def test_monitor_should_check(self, monitor, last_check):
monitor.last_check = last_check
assert monitor.should_check()
def test_monitor_check_fail(self, monitor):
assert monitor.last_output is None
with patch.object(monitor, 'failure') as mock_failure:
monitor.command = ['ls', '--not-real']
with patch.object(monitor, "failure") as mock_failure:
monitor.command = ["ls", "--not-real"]
assert not monitor.check()
assert_called_once(mock_failure)
assert monitor.last_output is not None
def test_monitor_check_success(self, monitor):
assert monitor.last_output is None
with patch.object(monitor, 'success') as mock_success:
with patch.object(monitor, "success") as mock_success:
assert monitor.check()
assert_called_once(mock_success)
assert monitor.last_output is not None
@pytest.mark.parametrize('failure_count', [0, 1])
@pytest.mark.parametrize("failure_count", [0, 1])
def test_monitor_success(self, monitor, failure_count):
monitor.alert_count = 0
monitor.total_failure_count = failure_count