2020-09-04 14:12:20 +00:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
2020-11-20 20:45:19 +00:00
|
|
|
import asyncio
|
|
|
|
import json
|
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import sys
|
|
|
|
import time
|
|
|
|
import traceback
|
2020-09-04 14:12:20 +00:00
|
|
|
from datetime import datetime, timedelta
|
2020-11-20 20:45:19 +00:00
|
|
|
|
|
|
|
import discord
|
|
|
|
import requests
|
2020-09-04 15:07:47 +00:00
|
|
|
from bot_utils import setup, send_msg
|
2020-09-04 14:12:20 +00:00
|
|
|
|
|
|
|
"""
|
|
|
|
health-checker reads the /health-check endpoint of the portal and dispatches
|
|
|
|
messages to a Discord channel.
|
|
|
|
"""
|
|
|
|
|
2020-11-20 21:08:04 +00:00
|
|
|
# Get the container name as an argument or use "sia" as default.
|
|
|
|
CONTAINER_NAME = "sia"
|
|
|
|
if len(sys.argv) > 2:
|
|
|
|
CONTAINER_NAME = sys.argv[2]
|
|
|
|
|
2020-09-04 15:13:36 +00:00
|
|
|
# Get the number of hours to look back in the logs or use 1 as default.
|
|
|
|
CHECK_HOURS = 1
|
|
|
|
if len(sys.argv) > 3:
|
|
|
|
CHECK_HOURS = int(sys.argv[3])
|
|
|
|
|
|
|
|
# Discord messages have a limit on their length set at 2000 bytes. We use
|
|
|
|
# a lower limit in order to leave some space for additional message text.
|
2020-09-04 15:07:47 +00:00
|
|
|
DISCORD_MAX_MESSAGE_LENGTH = 1900
|
2020-09-04 14:12:20 +00:00
|
|
|
|
2020-09-08 16:07:33 +00:00
|
|
|
GB = 1 << 30 # 1 GiB in bytes
|
2020-10-06 09:24:18 +00:00
|
|
|
|
|
|
|
# Free disk space threshold used for notices and shutting down siad.
|
2020-09-08 16:07:33 +00:00
|
|
|
FREE_DISK_SPACE_THRESHOLD = 50 * GB
|
2020-10-06 09:24:18 +00:00
|
|
|
FREE_DISK_SPACE_THRESHOLD_CRITICAL = 20 * GB
|
2020-09-07 15:56:47 +00:00
|
|
|
|
2020-09-04 14:12:20 +00:00
|
|
|
bot_token = setup()
|
|
|
|
client = discord.Client()
|
|
|
|
|
|
|
|
|
|
|
|
# exit_after kills the script if it hasn't exited on its own after `delay` seconds
|
|
|
|
async def exit_after(delay):
|
|
|
|
await asyncio.sleep(delay)
|
|
|
|
os._exit(0)
|
|
|
|
|
|
|
|
|
|
|
|
@client.event
|
|
|
|
async def on_ready():
|
|
|
|
await run_checks()
|
|
|
|
asyncio.create_task(exit_after(3))
|
|
|
|
|
|
|
|
|
|
|
|
async def run_checks():
|
|
|
|
print("Running Skynet portal health checks")
|
|
|
|
try:
|
2020-09-07 15:56:47 +00:00
|
|
|
await check_load_average()
|
|
|
|
await check_disk()
|
2020-09-29 10:32:45 +00:00
|
|
|
await check_health()
|
2020-11-18 17:21:06 +00:00
|
|
|
await check_alerts()
|
2020-11-20 18:26:20 +00:00
|
|
|
await check_portal_size()
|
2020-09-04 15:07:47 +00:00
|
|
|
except:
|
2020-09-04 14:12:20 +00:00
|
|
|
trace = traceback.format_exc()
|
|
|
|
print("[DEBUG] run_checks() failed.")
|
2020-09-29 10:32:45 +00:00
|
|
|
await send_msg(
|
|
|
|
client,
|
|
|
|
"Failed to run the portal health checks!",
|
|
|
|
file=trace,
|
|
|
|
force_notify=True,
|
|
|
|
)
|
2020-09-04 14:12:20 +00:00
|
|
|
|
|
|
|
|
2020-09-29 10:32:45 +00:00
|
|
|
# check_load_average monitors the system load average value and issues a
|
2020-09-07 15:56:47 +00:00
|
|
|
# warning message if it exceeds 10.
|
|
|
|
async def check_load_average():
|
|
|
|
uptime_string = os.popen("uptime").read().strip()
|
|
|
|
if sys.platform == "Darwin":
|
|
|
|
pattern = "^.*load averages: \d*\.\d* \d*\.\d* (\d*\.\d*)$"
|
|
|
|
else:
|
|
|
|
pattern = "^.*load average: \d*\.\d*, \d*\.\d*, (\d*\.\d*)$"
|
|
|
|
load_av = re.match(pattern, uptime_string).group(1)
|
|
|
|
if float(load_av) > 10:
|
2021-06-07 13:08:18 +00:00
|
|
|
message = "High system load detected in uptime output: {}".format(uptime_string)
|
2020-09-29 10:32:45 +00:00
|
|
|
await send_msg(client, message, force_notify=True)
|
2020-09-07 15:56:47 +00:00
|
|
|
|
|
|
|
|
|
|
|
# check_disk checks the amount of free space on the /home partition and issues
|
2020-09-07 15:59:39 +00:00
|
|
|
# a warning message if it's under FREE_DISK_SPACE_THRESHOLD GB.
|
2020-09-07 15:56:47 +00:00
|
|
|
async def check_disk():
|
2020-09-07 15:59:39 +00:00
|
|
|
# We check free disk space in 1024 byte units, so it's easy to convert.
|
2020-09-07 15:56:47 +00:00
|
|
|
df = os.popen("df --block-size=1024").read().strip()
|
|
|
|
volumes = {}
|
2020-11-20 18:26:20 +00:00
|
|
|
# Iterate over the output, ignoring the header line
|
2020-09-07 15:56:47 +00:00
|
|
|
for line in df.split("\n")[1:]:
|
|
|
|
fields = list(filter(None, line.split(" ")))
|
2020-09-08 16:07:33 +00:00
|
|
|
# -1 is "mounted on", 3 is "available space" in KiB which we want in bytes
|
2020-10-05 08:03:10 +00:00
|
|
|
volumes[fields[-1]] = int(fields[3]) * 1024
|
2020-09-07 15:56:47 +00:00
|
|
|
# List of mount point, longest to shortest. We'll use that to find the best
|
|
|
|
# fit for the volume we want to check.
|
|
|
|
mount_points = sorted(volumes.keys(), key=len, reverse=True)
|
|
|
|
wd = os.popen("pwd").read().strip()
|
|
|
|
vol = ""
|
|
|
|
for mp in mount_points:
|
|
|
|
if wd.startswith(mp):
|
|
|
|
vol = mp
|
|
|
|
break
|
|
|
|
if vol == "":
|
2020-09-29 10:32:45 +00:00
|
|
|
message = "Failed to check free disk space! Didn't find a suitable mount point to check."
|
|
|
|
return await send_msg(client, message, file=df)
|
2020-10-06 09:24:18 +00:00
|
|
|
|
2020-11-20 18:33:07 +00:00
|
|
|
# if we've reached a critical free disk space threshold we need to send proper notice
|
2020-10-06 09:24:18 +00:00
|
|
|
# and shut down sia container so it doesn't get corrupted
|
|
|
|
if int(volumes[vol]) < FREE_DISK_SPACE_THRESHOLD_CRITICAL:
|
|
|
|
free_space_gb = "{:.2f}".format(int(volumes[vol]) / GB)
|
2020-11-20 18:33:07 +00:00
|
|
|
message = "CRITICAL! Very low disk space: {}GiB, **siad stopped**!".format(
|
2021-06-07 13:08:18 +00:00
|
|
|
free_space_gb
|
|
|
|
)
|
2020-10-06 09:24:18 +00:00
|
|
|
inspect = os.popen("docker inspect sia").read().strip()
|
|
|
|
inspect_json = json.loads(inspect)
|
|
|
|
if inspect_json[0]["State"]["Running"] == True:
|
2020-11-20 18:33:07 +00:00
|
|
|
# mark portal as unhealthy
|
|
|
|
os.popen("docker exec health-check cli/disable")
|
|
|
|
time.sleep(300) # wait 5 minutes to propagate dns changes
|
|
|
|
os.popen("docker stop sia") # stop sia container
|
2020-10-06 09:24:18 +00:00
|
|
|
return await send_msg(client, message, force_notify=True)
|
|
|
|
|
|
|
|
# if we're reached a free disk space threshold we need to send proper notice
|
2020-09-07 15:56:47 +00:00
|
|
|
if int(volumes[vol]) < FREE_DISK_SPACE_THRESHOLD:
|
2020-09-08 16:07:33 +00:00
|
|
|
free_space_gb = "{:.2f}".format(int(volumes[vol]) / GB)
|
2020-09-29 10:32:45 +00:00
|
|
|
message = "WARNING! Low disk space: {}GiB".format(free_space_gb)
|
|
|
|
return await send_msg(client, message, force_notify=True)
|
2020-09-07 15:56:47 +00:00
|
|
|
|
|
|
|
|
2020-09-04 14:12:20 +00:00
|
|
|
# check_health checks /health-check endpoint and reports recent issues
|
|
|
|
async def check_health():
|
2020-09-04 14:44:19 +00:00
|
|
|
print("\nChecking portal health status...")
|
2020-09-04 14:12:20 +00:00
|
|
|
|
|
|
|
try:
|
2021-06-07 13:08:18 +00:00
|
|
|
res_check = requests.get("https://127.0.0.1/health-check", verify=False)
|
2020-09-29 10:32:45 +00:00
|
|
|
json_check = res_check.json()
|
|
|
|
json_critical = requests.get(
|
2021-06-07 13:08:18 +00:00
|
|
|
"https://127.0.0.1/health-check/critical", verify=False
|
2020-09-29 10:32:45 +00:00
|
|
|
).json()
|
2021-04-29 11:43:40 +00:00
|
|
|
json_extended = requests.get(
|
2021-06-07 13:08:18 +00:00
|
|
|
"https://127.0.0.1/health-check/extended", verify=False
|
2020-09-29 10:32:45 +00:00
|
|
|
).json()
|
2020-09-04 15:07:47 +00:00
|
|
|
except:
|
2020-09-04 14:12:20 +00:00
|
|
|
trace = traceback.format_exc()
|
|
|
|
print("[DEBUG] check_health() failed.")
|
2020-09-29 10:32:45 +00:00
|
|
|
return await send_msg(
|
|
|
|
client, "Failed to run the checks!", file=trace, force_notify=True
|
|
|
|
)
|
|
|
|
|
|
|
|
critical_checks_total = 0
|
|
|
|
critical_checks_failed = 0
|
|
|
|
|
2021-04-29 11:43:40 +00:00
|
|
|
extended_checks_total = 0
|
|
|
|
extended_checks_failed = 0
|
2020-09-29 10:32:45 +00:00
|
|
|
|
2020-09-08 16:07:33 +00:00
|
|
|
failed_records = []
|
2020-09-29 10:32:45 +00:00
|
|
|
failed_records_file = None
|
|
|
|
|
|
|
|
time_limit = datetime.utcnow() - timedelta(hours=CHECK_HOURS)
|
|
|
|
|
|
|
|
for critical in json_critical:
|
|
|
|
time = datetime.strptime(critical["date"], "%Y-%m-%dT%H:%M:%S.%fZ")
|
2020-09-04 14:12:20 +00:00
|
|
|
if time < time_limit:
|
|
|
|
continue
|
2020-09-04 15:07:47 +00:00
|
|
|
bad = False
|
2020-09-29 10:32:45 +00:00
|
|
|
for check in critical["checks"]:
|
|
|
|
critical_checks_total += 1
|
|
|
|
if check["up"] == False:
|
|
|
|
critical_checks_failed += 1
|
2020-09-04 15:07:47 +00:00
|
|
|
bad = True
|
|
|
|
if bad:
|
2020-09-29 10:32:45 +00:00
|
|
|
failed_records.append(critical)
|
|
|
|
|
2021-04-29 11:43:40 +00:00
|
|
|
for extended in json_extended:
|
|
|
|
time = datetime.strptime(extended["date"], "%Y-%m-%dT%H:%M:%S.%fZ")
|
2020-09-29 10:32:45 +00:00
|
|
|
if time < time_limit:
|
|
|
|
continue
|
|
|
|
bad = False
|
2021-04-29 11:43:40 +00:00
|
|
|
for check in extended["checks"]:
|
|
|
|
extended_checks_total += 1
|
2020-09-29 10:32:45 +00:00
|
|
|
if check["up"] == False:
|
2021-04-29 11:43:40 +00:00
|
|
|
extended_checks_failed += 1
|
2020-09-29 10:32:45 +00:00
|
|
|
bad = True
|
|
|
|
if bad:
|
2021-04-29 11:43:40 +00:00
|
|
|
failed_records.append(extended)
|
2020-09-29 10:32:45 +00:00
|
|
|
|
|
|
|
################################################################################
|
2020-11-20 18:33:07 +00:00
|
|
|
# create a message
|
2020-09-29 10:32:45 +00:00
|
|
|
################################################################################
|
|
|
|
|
|
|
|
message = ""
|
|
|
|
force_notify = False
|
|
|
|
|
|
|
|
if json_check["disabled"]:
|
|
|
|
message += "__Portal manually disabled!__ "
|
|
|
|
elif res_check.status_code is not requests.codes["ok"]:
|
|
|
|
message += "__Portal down!!!__ "
|
|
|
|
force_notify = True
|
|
|
|
|
|
|
|
if critical_checks_failed:
|
|
|
|
message += "{}/{} CRITICAL checks failed over the last {} hours! ".format(
|
|
|
|
critical_checks_failed, critical_checks_total, CHECK_HOURS
|
|
|
|
)
|
2020-09-30 14:20:55 +00:00
|
|
|
force_notify = True
|
2020-09-29 10:32:45 +00:00
|
|
|
else:
|
2021-06-07 13:08:18 +00:00
|
|
|
message += "All {} critical checks passed. ".format(critical_checks_total)
|
2020-09-29 10:32:45 +00:00
|
|
|
|
2021-04-29 11:43:40 +00:00
|
|
|
if extended_checks_failed:
|
|
|
|
message += "{}/{} extended checks failed over the last {} hours! ".format(
|
|
|
|
extended_checks_failed, extended_checks_total, CHECK_HOURS
|
2020-09-29 10:32:45 +00:00
|
|
|
)
|
2020-09-30 14:20:55 +00:00
|
|
|
force_notify = True
|
2020-09-29 10:32:45 +00:00
|
|
|
else:
|
2021-06-07 13:08:18 +00:00
|
|
|
message += "All {} extended checks passed. ".format(extended_checks_total)
|
2020-09-29 10:32:45 +00:00
|
|
|
|
|
|
|
if len(failed_records):
|
|
|
|
failed_records_file = json.dumps(failed_records, indent=2)
|
|
|
|
|
|
|
|
# send a message if we force notification, there is a failures dump or just once daily (heartbeat) on 1 AM
|
2021-06-07 13:08:18 +00:00
|
|
|
if (
|
|
|
|
force_notify
|
|
|
|
or json_check["disabled"]
|
|
|
|
or failed_records_file
|
|
|
|
or datetime.utcnow().hour == 1
|
|
|
|
):
|
2020-09-29 10:32:45 +00:00
|
|
|
return await send_msg(
|
|
|
|
client, message, file=failed_records_file, force_notify=force_notify
|
|
|
|
)
|
2020-09-04 14:12:20 +00:00
|
|
|
|
2020-11-20 18:26:20 +00:00
|
|
|
|
|
|
|
# contains_string is a simple helper to check if a string contains a string.
|
|
|
|
# This is faster and easier than regex for word comparisons
|
|
|
|
def contains_string(string_to_check, string_to_find):
|
2020-11-20 18:33:07 +00:00
|
|
|
return string_to_find in string_to_check
|
2020-11-20 18:26:20 +00:00
|
|
|
|
2020-11-20 20:45:19 +00:00
|
|
|
|
2020-11-18 17:21:06 +00:00
|
|
|
# check_alerts checks the alerts returned from siad's daemon/alerts API
|
|
|
|
async def check_alerts():
|
|
|
|
print("\nChecking portal siad alerts...")
|
|
|
|
|
2021-02-03 16:42:55 +00:00
|
|
|
################################################################################
|
|
|
|
# parse siac
|
|
|
|
################################################################################
|
|
|
|
|
2021-06-07 13:08:18 +00:00
|
|
|
# Alerts
|
2021-02-03 16:42:55 +00:00
|
|
|
# Execute 'siac alerts' and read the response
|
2020-11-20 20:45:19 +00:00
|
|
|
cmd_string = "docker exec {} siac alerts".format(CONTAINER_NAME)
|
2020-11-20 18:26:20 +00:00
|
|
|
siac_alert_output = os.popen(cmd_string).read().strip()
|
|
|
|
|
|
|
|
# Initialize variables
|
2020-11-20 18:33:07 +00:00
|
|
|
num_critical_alerts = 0
|
|
|
|
num_error_alerts = 0
|
|
|
|
num_warning_alerts = 0
|
|
|
|
num_siafile_alerts = 0
|
2020-11-18 17:21:06 +00:00
|
|
|
siafile_alerts = []
|
|
|
|
|
2020-11-20 18:26:20 +00:00
|
|
|
# Pattern strings to search for
|
2021-06-07 13:08:18 +00:00
|
|
|
critical = "Severity: critical"
|
|
|
|
error = "Severity: error"
|
|
|
|
warning = "Severity: warning"
|
|
|
|
health_of = "has a health of"
|
|
|
|
siafile_alert_message = (
|
|
|
|
"The SiaFile mentioned in the 'Cause' is below 75% redundancy"
|
|
|
|
)
|
2020-11-20 18:33:07 +00:00
|
|
|
|
2020-11-20 18:26:20 +00:00
|
|
|
# Split the output by line and check for type of alert and siafile alerts
|
|
|
|
for line in siac_alert_output.split("\n"):
|
2020-11-20 18:33:07 +00:00
|
|
|
# Check for the type of alert
|
2020-11-20 20:45:19 +00:00
|
|
|
if contains_string(line, critical):
|
|
|
|
num_critical_alerts += 1
|
|
|
|
if contains_string(line, error):
|
|
|
|
num_error_alerts += 1
|
|
|
|
if contains_string(line, warning):
|
|
|
|
num_warning_alerts += 1
|
2020-11-20 18:26:20 +00:00
|
|
|
|
|
|
|
# Check for siafile alerts in alerts. This is so that the alert
|
|
|
|
# severity can change and this doesn't need to be updated
|
2020-11-20 18:33:07 +00:00
|
|
|
if contains_string(line, siafile_alert_message):
|
2020-11-20 20:45:19 +00:00
|
|
|
num_siafile_alerts += 1
|
|
|
|
if contains_string(line, health_of):
|
|
|
|
siafile_alerts.append(line)
|
2020-11-18 17:21:06 +00:00
|
|
|
|
2021-06-07 13:08:18 +00:00
|
|
|
# Repair Size
|
2021-02-03 16:42:55 +00:00
|
|
|
# Execute 'siac renter' and read the response
|
|
|
|
cmd_string = "docker exec {} siac renter".format(CONTAINER_NAME)
|
2021-02-03 17:22:24 +00:00
|
|
|
siac_renter_output = os.popen(cmd_string).read().strip()
|
2021-02-03 16:42:55 +00:00
|
|
|
|
|
|
|
# Initialize variables
|
2021-06-07 13:08:18 +00:00
|
|
|
repair_remaining = ""
|
2021-02-03 16:42:55 +00:00
|
|
|
|
|
|
|
# Pattern strings to search for
|
2021-06-07 13:08:18 +00:00
|
|
|
repair_str = "Repair Data Remaining"
|
|
|
|
|
2021-02-03 16:42:55 +00:00
|
|
|
# Split the output by line and check for the repair remaining
|
2021-02-03 17:30:27 +00:00
|
|
|
for line in siac_renter_output.split("\n"):
|
2021-02-03 16:42:55 +00:00
|
|
|
# Check for the type of alert
|
|
|
|
if contains_string(line, repair_str):
|
2021-04-14 10:17:01 +00:00
|
|
|
repair_remaining = line.split(":")[1].strip()
|
2021-06-07 13:08:18 +00:00
|
|
|
|
2020-11-18 17:21:06 +00:00
|
|
|
################################################################################
|
2020-11-20 18:33:07 +00:00
|
|
|
# create a message
|
2020-11-18 17:21:06 +00:00
|
|
|
################################################################################
|
|
|
|
|
|
|
|
message = ""
|
|
|
|
force_notify = False
|
|
|
|
|
2020-11-20 18:26:20 +00:00
|
|
|
if num_critical_alerts > 0:
|
|
|
|
message += "{} CRITICAL Alerts found! ".format(num_critical_alerts)
|
2020-11-18 17:21:06 +00:00
|
|
|
force_notify = True
|
2020-11-20 18:26:20 +00:00
|
|
|
if num_error_alerts > 0:
|
|
|
|
message += "{} Error Alerts found! ".format(num_error_alerts)
|
2020-11-20 18:33:07 +00:00
|
|
|
|
2020-11-24 14:49:51 +00:00
|
|
|
# Subtract out the siafile alerts from the warning alerts since we announce
|
|
|
|
# them separately
|
|
|
|
num_warning_alerts -= num_siafile_alerts
|
2020-11-20 18:26:20 +00:00
|
|
|
message += "{} Warning Alerts found. ".format(num_warning_alerts)
|
|
|
|
message += "{} SiaFiles with bad health found. ".format(num_siafile_alerts)
|
2021-06-07 13:08:18 +00:00
|
|
|
|
2021-02-03 16:42:55 +00:00
|
|
|
# Add repair size
|
|
|
|
message += "{} of repair remaining. ".format(repair_remaining)
|
2020-11-18 17:21:06 +00:00
|
|
|
|
2020-11-20 18:26:20 +00:00
|
|
|
# send a message if we force notification, or just once daily (heartbeat)
|
|
|
|
# on 1 AM
|
|
|
|
if force_notify or datetime.utcnow().hour == 1:
|
|
|
|
return await send_msg(
|
|
|
|
client, message, file=siac_alert_output, force_notify=force_notify
|
|
|
|
)
|
|
|
|
|
2020-11-20 20:45:19 +00:00
|
|
|
|
2020-11-20 18:26:20 +00:00
|
|
|
# check_portal_size checks the number of files that the portal is managing to
|
|
|
|
# determine if it is time to rotate it out
|
|
|
|
async def check_portal_size():
|
|
|
|
print("\nChecking portal size...")
|
|
|
|
|
|
|
|
# Execute siac renter to check the size of the portal
|
|
|
|
#
|
|
|
|
# NOTE: we should leave this as always trying to execute the docker command
|
|
|
|
# against the sia container as this will then fail for maintenance severs
|
|
|
|
# were we don't care about this check.
|
|
|
|
cmd_string = "docker exec sia siac renter"
|
|
|
|
siac_renter_output = os.popen(cmd_string).read().strip()
|
|
|
|
|
|
|
|
# Initialize variables
|
|
|
|
num_files = 0
|
2021-06-07 13:08:18 +00:00
|
|
|
max_files = 1500000 # 1.5 mln
|
2021-06-15 09:41:01 +00:00
|
|
|
files_text = "Files:"
|
2020-11-20 18:26:20 +00:00
|
|
|
for line in siac_renter_output.split("\n"):
|
2021-06-15 09:41:01 +00:00
|
|
|
if line.strip().startswith(files_text):
|
2020-11-20 18:26:20 +00:00
|
|
|
for el in line.split():
|
|
|
|
if el.isdigit():
|
|
|
|
num_files = int(el)
|
|
|
|
|
|
|
|
################################################################################
|
2020-11-20 18:33:07 +00:00
|
|
|
# create a message
|
2020-11-20 18:26:20 +00:00
|
|
|
################################################################################
|
|
|
|
|
|
|
|
message = ""
|
|
|
|
force_notify = False
|
|
|
|
|
|
|
|
if num_files > max_files:
|
|
|
|
message += "Portal has {} files! Consider rotating! ".format(num_files)
|
2020-11-24 12:15:50 +00:00
|
|
|
# send notification when above 40% of the limit
|
|
|
|
force_notify = num_files > max_files * 1.4
|
2020-11-20 18:33:07 +00:00
|
|
|
else:
|
2020-11-20 18:26:20 +00:00
|
|
|
message += "Portal has {} files. ".format(num_files)
|
2020-11-18 17:21:06 +00:00
|
|
|
|
|
|
|
# send a message if we force notification, or just once daily (heartbeat) on 1 AM
|
2020-11-24 14:26:51 +00:00
|
|
|
if force_notify or datetime.utcnow().hour == 1:
|
2021-06-07 13:08:18 +00:00
|
|
|
return await send_msg(client, message, force_notify=force_notify)
|
2020-11-18 17:21:06 +00:00
|
|
|
|
2020-11-20 20:45:19 +00:00
|
|
|
|
2020-09-04 14:12:20 +00:00
|
|
|
client.run(bot_token)
|