working on cleaning up the logs

This commit is contained in:
forest 2021-01-04 17:20:03 -06:00
parent df7a750794
commit d82aa99c40
3 changed files with 87 additions and 17 deletions

View File

@ -61,15 +61,45 @@ app.config.from_mapping(
app.config['HUB_URL'] = os.environ.get("HUB_URL", default=app.config['BASE_URL'])
class SetLogLevelToDebugForHeartbeatRelatedMessagesFilter(logging.Filter):
def isHeartbeatRelatedString(self, thing):
# thing_string = "<error>"
is_in_string = False
try:
thing_string = "%s" % thing
is_in_string = 'heartbeat-task' in thing_string or 'hub/heartbeat' in thing_string or 'spoke/heartbeat' in thing_string
except:
pass
# self.warning("isHeartbeatRelatedString(%s): %s", thing_string, is_in_string )
return is_in_string
def filter(self, record):
if app.config['LOG_LEVEL'] == "DEBUG":
return True
if self.isHeartbeatRelatedString(record.msg):
return False
for arg in record.args:
if self.isHeartbeatRelatedString(arg):
return False
return True
logging_dict_config({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
}},
'filters': {
'setLogLevelToDebugForHeartbeatRelatedMessages': {
'()': SetLogLevelToDebugForHeartbeatRelatedMessagesFilter,
}
},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://flask.logging.wsgi_errors_stream',
'formatter': 'default'
'formatter': 'default',
'filters': ['setLogLevelToDebugForHeartbeatRelatedMessages']
}},
'root': {
'level': app.config['LOG_LEVEL'],
@ -77,6 +107,45 @@ logging_dict_config({
}
})
# class LoggerWhichLogsHeartbeatInfoMessagesAtDebugLevel(logging.Logger):
# def isHeartbeatRelatedString(self, thing):
# # thing_string = "<error>"
# is_in_string = False
# try:
# thing_string = "%s" % thing
# is_in_string = 'heartbeat-task' in thing_string or 'hub/heartbeat' in thing_string or 'spoke/heartbeat' in thing_string
# except:
# pass
# # self.warning("isHeartbeatRelatedString(%s): %s", thing_string, is_in_string )
# return is_in_string
# def info(self, msg, *args, **kwargs):
# log_level_to_use = logging.INFO
# if self.isHeartbeatRelatedString(msg):
# log_level_to_use = logging.DEBUG
# for arg in args:
# if self.isHeartbeatRelatedString(arg):
# log_level_to_use = logging.DEBUG
# self._log(log_level_to_use, msg, args, **kwargs)
# def monkeyPatchLoggerToLogHeartbeatInfoMessagesAtDebugLevel(name: str):
# logger = logging.getLogger(name)
# original_info = logger.info
# logger.__class__ = LoggerWhichLogsHeartbeatInfoMessagesAtDebugLevel
# logger.unfiltered_info = original_info
# monkeyPatchLoggerToLogHeartbeatInfoMessagesAtDebugLevel('flask')
# monkeyPatchLoggerToLogHeartbeatInfoMessagesAtDebugLevel('werkzeug')
# # monkeyPatchLoggerToLogHeartbeatInfoMessagesAtDebugLevel('apscheduler.scheduler')
# monkeyPatchLoggerToLogHeartbeatInfoMessagesAtDebugLevel('apscheduler.executors')
# monkeyPatchLoggerToLogHeartbeatInfoMessagesAtDebugLevel('apscheduler.executors.default')
# # monkeyPatchLoggerToLogHeartbeatInfoMessagesAtDebugLevel('apscheduler.jobstores.default')
#monkeyPatchLoggerToLogInfoMessagesAtDebugLevel('apscheduler.executors.asyncio')
# app.logger.critical("critical")
# app.logger.error("error")
# app.logger.warning("warning")
@ -102,7 +171,7 @@ if app.config['HUB_MODE_ENABLED']:
heartbeat_task_url = f"{app.config['HUB_URL']}/hub/heartbeat-task"
heartbeat_task_headers = {'Authorization': f"Bearer {app.config['HUB_TOKEN']}"}
heartbeat_task = lambda: requests.post(heartbeat_task_url, headers=heartbeat_task_headers)
scheduler.add_job(name="heartbeat_task", func=heartbeat_task, trigger="interval", seconds=5)
scheduler.add_job(name="heartbeat-task", func=heartbeat_task, trigger="interval", seconds=5)
scheduler.start()
atexit.register(lambda: scheduler.shutdown())
@ -183,4 +252,5 @@ def url_for_with_cache_bust(endpoint, **values):
return url_for(endpoint, **values)

View File

@ -19,19 +19,19 @@ def authorized_for_host(id):
def ping_all_hosts_task():
if authorized_as_hub(request.headers):
all_hosts = get_model().get_all_hosts()
current_app.logger.info(f"pinging {len(all_hosts)} hosts...")
current_app.logger.debug(f"pinging {len(all_hosts)} hosts...")
authorization_header = f"Bearer {current_app.config['HUB_TOKEN']}"
results = current_app.config["HTTP_CLIENT"].make_requests_sync(all_hosts, "/spoke/heartbeat", None, authorization_header=authorization_header)
for i in range(len(all_hosts)):
host = all_hosts[i]
result = results[i]
current_app.logger.info(f"response from {host.id} ({host.url}): {result.status_code} {result.body}")
current_app.logger.debug(f"response from {host.id} ({host.url}): {result.status_code} {result.body}")
if result.status_code == 200:
get_model().host_heartbeat(host.id)
return "ok"
else:
current_app.logger.info(f"/hub/heartbeat-task returned 401: invalid hub token")
current_app.logger.warning(f"/hub/heartbeat-task returned 401: invalid hub token")
return abort(401, "invalid hub token")
@ -40,7 +40,7 @@ def heartbeat(host_id):
if authorized_for_host(host_id):
return "ok"
else:
current_app.logger.info(f"/hub/heartbeat/{host_id} returned 401: invalid token")
current_app.logger.warning(f"/hub/heartbeat/{host_id} returned 401: invalid token")
return abort(401, "invalid host id or token")
@bp.route("/claim-operation/<int:operation_id>/<string:host_id>", methods=("POST",))
@ -55,6 +55,6 @@ def claim_operation(operation_id: int, host_id: str):
else:
return abort(409, "operation was already assigned to another host")
else:
current_app.logger.info(f"/hub/claim-operation/{operation_id}/{host_id} returned 401: invalid token")
current_app.logger.warning(f"/hub/claim-operation/{operation_id}/{host_id} returned 401: invalid token")
return abort(401, "invalid host id or token")

View File

@ -45,7 +45,7 @@ def operation_impl(operation_id: int):
if authorized_as_hub(request.headers):
request_body_json = request.json
request_body = json.loads(request_body_json)
current_app.logger.info(f"request.json: {request_body}")
#current_app.logger.info(f"request.json: {request_body}")
handlers = {
"capacity_avaliable": handle_capacity_avaliable,
"get": handle_get,
@ -70,15 +70,15 @@ def operation_impl(operation_id: int):
error_message = "'type' json property is required"
if error_message != "":
current_app.logger.info(f"/hosts/operation returned 400: {error_message}")
current_app.logger.error(f"/hosts/operation returned 400: {error_message}")
return abort(400, f"bad request; {error_message}")
else:
current_app.logger.info(f"/hosts/operation returned 401: invalid hub token")
current_app.logger.warning(f"/hosts/operation returned 401: invalid hub token")
return abort(401, "invalid hub token")
def handle_capacity_avaliable(operation_id, request_body):
if 'additional_ram_bytes' not in request_body:
current_app.logger.info(f"/hosts/operation returned 400: additional_ram_bytes is required for capacity_avaliable")
current_app.logger.error(f"/hosts/operation returned 400: additional_ram_bytes is required for capacity_avaliable")
return abort(400, f"bad request; additional_ram_bytes is required for capacity_avaliable")
has_capacity = current_app.config['SPOKE_MODEL'].capacity_avaliable(request_body['additional_ram_bytes'])
@ -86,7 +86,7 @@ def handle_capacity_avaliable(operation_id, request_body):
def handle_get(operation_id, request_body):
if 'id' not in request_body:
current_app.logger.info(f"/hosts/operation returned 400: id is required for get")
current_app.logger.error(f"/hosts/operation returned 400: id is required for get")
return abort(400, f"bad request; id is required for get")
vm = current_app.config['SPOKE_MODEL'].get(request_body['id'])
@ -98,7 +98,7 @@ def handle_list_ids(operation_id, request_body):
def handle_create(operation_id, request_body):
if not operation_id:
current_app.logger.info(f"/hosts/operation returned 400: operation_id is required for create ")
current_app.logger.error(f"/hosts/operation returned 400: operation_id is required for create ")
return abort(400, f"bad request; operation_id is required. try POST /spoke/operation/<id>")
parameters = ["email", "id", "template_image_file_name", "vcpus", "memory_mb", "ssh_public_keys"]
@ -108,7 +108,7 @@ def handle_create(operation_id, request_body):
error_message = f"{error_message}\n{parameter} is required for create"
if error_message != "":
current_app.logger.info(f"/hosts/opasdascasderation returned 400: {error_message}")
current_app.logger.error(f"/hosts/operation returned 400: {error_message}")
return abort(400, f"bad request; {error_message}")
# only one host should create the vm, so we first race to assign this create operation to ourselves.
@ -123,7 +123,7 @@ def handle_create(operation_id, request_body):
elif result.status_code == 409:
assignment_status = "assigned_to_other_host"
else:
current_app.logger.info(f"{url} returned {result.status_code}: {result.body}")
current_app.logger.error(f"{url} returned {result.status_code}: {result.body}")
return abort(503, f"hub did not cleanly handle our request to claim the create operation")
if assignment_status == "assigned":
@ -148,11 +148,11 @@ def handle_create(operation_id, request_body):
def handle_destroy(operation_id, request_body):
if 'id' not in request_body:
current_app.logger.info(f"/hosts/operation returned 400: id is required for destroy")
current_app.logger.error(f"/hosts/operation returned 400: id is required for destroy")
return abort(400, f"bad request; id is required for destroy")
if 'email' not in request_body:
current_app.logger.info(f"/hosts/operation returned 400: email is required for destroy")
current_app.logger.error(f"/hosts/operation returned 400: email is required for destroy")
return abort(400, f"bad request; email is required for destroy")
try: