Weekly Deploy for Dec 6, 2021. Including cache pruning fix

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEl73IdNcm91gzFlRkmt3TRPEwV/YFAmGuXlsACgkQmt3TRPEw
 V/b+dA//THc5/LWTRs8r7BAOQj8V9QzxJsuS7g+gV4zeDW5ymXlK0lSRSXvjNsOW
 mQ21Qx5fE05ztRlUNOUPKd1bZUgnL4O5AUBWUm87rq6NXHVop1JYaFGOpbfUpRZT
 HWE2SwGgwBRUjSgkwkz+jsg8SkajADwThLf8aSNAmKWienondtEFA8hMtBqUcyjK
 A7AsQbjjjS62lUsP78SJEPM5pG0vX+FxHzMAUtkqT94foJlI4RosyAp3yDPjrR/8
 eUn/CCci9p/ORzKnKos9DAo+1G5X0qgrP/9aGlnpDlkYZ2SQfJVMAKWaMpfIUsWi
 zihJYPogFAUHaDY1ddEnx47aYRN7hmumDGM+iUvbi39LshPBdBaeW56RRyA3qBel
 pD5eXr53C6kje5Wu6t9SbzILjh5cuQ8vd50hnrJ282vSMZbwqwwKjpliZu4FsrQC
 OiA1K7l7P8gAiL8d5YQOJ2uBTviAA0A8wNVPHx8qd/hWNOr7WxSM75POyTvRNtRq
 GdZko6/FDrK48Gw8LNxZMUa7x5LfjgtyiWfGsYomRG3GLBcy44neEhk3P37sELJv
 /jr0aOhLW+95r3OI7sUqnMppWdCKa5XFEqxTHc5Zax+NKzao3aSvRDNtujKxe+IC
 NfzJVXLtsde/Bho1B1q7GSG3YTINcJ422YwDNWfbCV0qajW40GU=
 =JEZB
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEWWr7bKPf/zmGZCrIBrk1SrCL6cYFAmGyAhsACgkQBrk1SrCL
 6ca/cwf/eXbHwojF27fYPoziWWskatsdCKJ2jnu1ovqVceVYztX9Kt+CQ++Ri4A9
 LqKvDPwLfKXF0W3x/nK6FtL2CSq81SPED/mLKMH/XTsbwtsnPFbnAzxdl7tqSA2Y
 6mqyBnyeyGby6tGW2ZE5EAYRv/dkKYzlTp7WMqoalleoRy6sbS6RZ8bJMA8xrbPo
 RwIAfvl4MVItt/qpws+J9g/qQ2QR8ZZKZtDxLxGNSFw9GDQBk/hKxRI9nP1WEsVp
 SEHCmfveOAdbzf8Cbr4j/m6wmoXPr+2qF5QIJ8eDiEAQ2YLPKbMpk6xO500geAFl
 k+6BHOQR2dOfsyyo33IFIcV9NyvDAQ==
 =F885
 -----END PGP SIGNATURE-----

Merge tag 'deploy-2021-12-06' into ivo/clamav

Weekly Deploy for Dec 6, 2021. Including cache pruning fix

# gpg: Signature made Mon Dec  6 20:02:51 2021 CET
# gpg:                using RSA key 97BDC874D726F758331654649ADDD344F13057F6
# gpg: key 9ADDD344F13057F6: public key "Matthew Sevey (Created on MacOS Big Sur for Yubi Key Nano 5) <mjsevey@gmail.com>" imported
# gpg: Total number processed: 1
# gpg:               imported: 1
# gpg: Good signature from "Matthew Sevey (Created on MacOS Big Sur for Yubi Key Nano 5) <mjsevey@gmail.com>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 97BD C874 D726 F758 3316  5464 9ADD D344 F130 57F6
This commit is contained in:
Ivaylo Novakov 2021-12-09 14:18:19 +01:00
commit 39c4790364
31 changed files with 353 additions and 57 deletions

4
.gitignore vendored
View File

@ -96,3 +96,7 @@ docker/kratos/cr_certs/*.key
# Oathkeeper JWKS signing token
docker/kratos/oathkeeper/id_token.jwks.json
/docker/kratos/config/kratos.yml
# Setup-script log files
/setup-scripts/serverload.log
/setup-scripts/serverload.json

View File

@ -0,0 +1 @@
- Add `/serverload` endpoint for CPU usage and free disk space

View File

@ -0,0 +1 @@
- Add abuse report configuration

View File

@ -0,0 +1,2 @@
- Remove hardcoded Airtable default values from blocklist script. Portal
operators need to define their own values in portal common config (LastPass).

View File

@ -0,0 +1,2 @@
- Dump disk space usage when health-checker script disables portal due to
critical free disk space.

View File

@ -0,0 +1 @@
- Set `min_free` parameter on the `proxy_cache_path` directive to `100g`

View File

@ -0,0 +1 @@
- Added script to prune nginx cache.

View File

@ -0,0 +1 @@
- Add trimming Airtable skylinks from Takedown Request table.

View File

@ -0,0 +1 @@
- Update handshake to use v3.0.1

5
dc
View File

@ -13,6 +13,11 @@ for i in $(seq 1 ${#PORTAL_MODULES}); do
COMPOSE_FILES+=" -f docker-compose.mongodb.yml -f docker-compose.accounts.yml"
fi
# blocker module - alias "b"
if [[ ${PORTAL_MODULES:i-1:1} == "b" ]]; then
COMPOSE_FILES+=" -f docker-compose.blocker.yml"
fi
# jaeger module - alias "j"
if [[ ${PORTAL_MODULES:i-1:1} == "j" ]]; then
COMPOSE_FILES+=" -f docker-compose.jaeger.yml"

View File

@ -41,6 +41,8 @@ services:
- SKYNET_ACCOUNTS_LOG_LEVEL=${SKYNET_ACCOUNTS_LOG_LEVEL}
- KRATOS_ADDR=${KRATOS_ADDR}
- OATHKEEPER_ADDR=${OATHKEEPER_ADDR}
volumes:
- ./docker/accounts/conf:/accounts/conf
expose:
- 3000
networks:

View File

@ -0,0 +1,26 @@
version: "3.7"
x-logging: &default-logging
driver: json-file
options:
max-size: "10m"
max-file: "3"
services:
blocker:
build:
context: ./docker/blocker
dockerfile: Dockerfile
container_name: blocker
restart: unless-stopped
logging: *default-logging
env_file:
- .env
expose:
- 4000
networks:
shared:
ipv4_address: 10.10.10.102
depends_on:
- mongo
- sia

View File

@ -103,7 +103,7 @@ services:
build:
context: ./docker/handshake
dockerfile: Dockerfile
command: --chain-migrate=1 --wallet-migrate=1
command: --chain-migrate=2 --wallet-migrate=1
container_name: handshake
restart: unless-stopped
logging: *default-logging

16
docker/blocker/Dockerfile Normal file
View File

@ -0,0 +1,16 @@
FROM golang:1.16.7
LABEL maintainer="NebulousLabs <devs@nebulous.tech>"
ENV GOOS linux
ENV GOARCH amd64
ARG branch=main
WORKDIR /root
RUN git clone --single-branch --branch ${branch} https://github.com/SkynetLabs/blocker.git && \
cd blocker && \
go mod download && \
make release
ENTRYPOINT ["blocker"]

View File

@ -3,13 +3,8 @@ FROM node:16.13.0-alpine
WORKDIR /opt/hsd
RUN apk update && apk add bash unbound-dev gmp-dev g++ gcc make python2 git
# Checkout a specific commit until Handshake releases the next release after
# 2.4.0 then we should switch to that tag.
#
# The commit we are targetting right now contains a fix for handling the chain
# migration code for new portals.
RUN git clone https://github.com/handshake-org/hsd.git /opt/hsd && \
cd /opt/hsd && git checkout 6f0927db32723d6320c8bff255a6ccf70b2ccd32 && cd -
cd /opt/hsd && git checkout v3.0.1 && cd -
RUN npm install --production
ENV PATH="${PATH}:/opt/hsd/bin:/opt/hsd/node_modules/.bin"

View File

@ -71,6 +71,21 @@ location /skynet/stats {
proxy_pass http://sia:9980/skynet/stats;
}
# Define path for server load endpoint
location /serverload {
# Define root directory in the nginx container to load file from
root /usr/local/share;
# including this because of peer pressure from the other routes
include /etc/nginx/conf.d/include/cors;
# tell nginx to expect json
default_type 'application/json';
# Allow for /serverload to load /serverload.json file
try_files $uri $uri.json =404;
}
location /skynet/health {
include /etc/nginx/conf.d/include/cors;
@ -90,6 +105,29 @@ location /health-check {
proxy_pass http://10.10.10.60:3100; # hardcoded ip because health-check waits for nginx
}
location /abuse/ {
if ($request_method = 'OPTIONS') {
add_header 'Access-Control-Allow-Origin' 'https://0404guluqu38oaqapku91ed11kbhkge55smh9lhjukmlrj37lfpm8no.siasky.net';
add_header 'Access-Control-Allow-Credentials' 'true';
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
# pre-flight info is valid for 20 days
add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain charset=UTF-8';
add_header 'Content-Length' 0;
return 204;
}
proxy_pass http://10.10.10.102:4000/;
}
location /report-abuse {
# TODO: do a proxy_pass
return https://0404guluqu38oaqapku91ed11kbhkge55smh9lhjukmlrj37lfpm8no.siasky.net;
}
location /hns {
# match the request_uri and extract the hns domain and anything that is passed in the uri after it
# example: /hns/something/foo/bar matches:
@ -136,10 +174,10 @@ location /skynet/skyfile {
include /etc/nginx/conf.d/include/track-upload;
include /etc/nginx/conf.d/include/generate-siapath;
limit_req zone=uploads_by_ip burst=100 nodelay;
limit_req zone=uploads_by_ip burst=10 nodelay;
limit_req zone=uploads_by_ip_throttled;
limit_conn upload_conn 10;
limit_conn upload_conn 5;
limit_conn upload_conn_rl 1;
client_max_body_size 1000M; # make sure to limit the size of upload to a sane value
@ -174,6 +212,12 @@ location /skynet/tus {
include /etc/nginx/conf.d/include/cors-headers; # include cors headers but do not overwrite OPTIONS response
include /etc/nginx/conf.d/include/track-upload;
limit_req zone=uploads_by_ip burst=10 nodelay;
limit_req zone=uploads_by_ip_throttled;
limit_conn upload_conn 5;
limit_conn upload_conn_rl 1;
# TUS chunks size is 40M + leaving 10M of breathing room
client_max_body_size 50M;
@ -239,6 +283,12 @@ location /skynet/pin {
include /etc/nginx/conf.d/include/track-upload;
include /etc/nginx/conf.d/include/generate-siapath;
limit_req zone=uploads_by_ip burst=10 nodelay;
limit_req zone=uploads_by_ip_throttled;
limit_conn upload_conn 5;
limit_conn upload_conn_rl 1;
proxy_set_header User-Agent: Sia-Agent;
proxy_pass http://sia:9980$uri?siapath=$dir1/$dir2/$dir3&$args;
}

View File

@ -70,7 +70,7 @@ http {
proxy_http_version 1.1;
# proxy cache definition
proxy_cache_path /data/nginx/cache levels=1:2 keys_zone=skynet:10m max_size=50g inactive=48h use_temp_path=off;
proxy_cache_path /data/nginx/cache levels=1:2 keys_zone=skynet:10m max_size=50g min_free=100g inactive=48h use_temp_path=off;
# this runs before forking out nginx worker processes
init_by_lua_block {

View File

@ -13,7 +13,7 @@
"http-status-codes": "^2.1.2",
"lodash": "^4.17.21",
"lowdb": "^1.0.0",
"skynet-js": "^4.0.18-beta",
"skynet-js": "^4.0.19-beta",
"write-file-atomic": "^3.0.3",
"yargs": "^17.2.1"
},

View File

@ -78,12 +78,12 @@ asynckit@^0.4.0:
resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
integrity sha1-x57Zf380y48robyXkLzDZkdLS3k=
axios@^0.21.1:
version "0.21.4"
resolved "https://registry.yarnpkg.com/axios/-/axios-0.21.4.tgz#c67b90dc0568e5c1cf2b0b858c43ba28e2eda575"
integrity sha512-ut5vewkiu8jjGBdqpM44XxjuCjq9LAKeHVmoVfHVzy8eHgxxq8SbAVQNovDA8mVi05kP0Ea/n/UzcSHcTJQfNg==
axios@^0.24.0:
version "0.24.0"
resolved "https://registry.yarnpkg.com/axios/-/axios-0.24.0.tgz#804e6fa1e4b9c5288501dd9dff56a7a0940d20d6"
integrity sha512-Q6cWsys88HoPgAaFAVUb0WpPk0O8iTeisR9IMqy9G8AbO4NlpVknrnQS03zzF9PGAWgO3cgletO3VjV/P7VztA==
dependencies:
follow-redirects "^1.14.0"
follow-redirects "^1.14.4"
base32-decode@^1.0.0:
version "1.0.0"
@ -354,10 +354,10 @@ finalhandler@~1.1.2:
statuses "~1.5.0"
unpipe "~1.0.0"
follow-redirects@^1.14.0:
version "1.14.4"
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.4.tgz#838fdf48a8bbdd79e52ee51fb1c94e3ed98b9379"
integrity sha512-zwGkiSXC1MUJG/qmeIFH2HBJx9u0V46QGUe3YR1fXG8bXQxq7fLj0RjLZQ5nubr9qNJUZrH+xUcwXEoXNpfS+g==
follow-redirects@^1.14.4:
version "1.14.5"
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.5.tgz#f09a5848981d3c772b5392309778523f8d85c381"
integrity sha512-wtphSXy7d4/OR+MvIFbCVBDzZ5520qV8XfPklSN5QtxuMUJZ+b0Wnst1e1lCDocfzuCkHqj8k0FpZqO+UIaKNA==
form-data@^4.0.0:
version "4.0.0"
@ -863,12 +863,12 @@ sjcl@^1.0.8:
resolved "https://registry.yarnpkg.com/sjcl/-/sjcl-1.0.8.tgz#f2ec8d7dc1f0f21b069b8914a41a8f236b0e252a"
integrity sha512-LzIjEQ0S0DpIgnxMEayM1rq9aGwGRG4OnZhCdjx7glTaJtf4zRfpg87ImfjSJjoW9vKpagd82McDOwbRT5kQKQ==
skynet-js@^4.0.18-beta:
version "4.0.18-beta"
resolved "https://registry.yarnpkg.com/skynet-js/-/skynet-js-4.0.18-beta.tgz#4683f0837ae552802f39c0e7081a1b978b79ef4a"
integrity sha512-7mE9xrejTpRacZfhhCqx+dm7k1y6ITLZMWZnsPp13D2N9CNroyzB75Yi7033qwPtdo9i6BEzIDolZl66j+uALw==
skynet-js@^4.0.19-beta:
version "4.0.19-beta"
resolved "https://registry.yarnpkg.com/skynet-js/-/skynet-js-4.0.19-beta.tgz#d4c640898c79cf69e45aa1c3c1ed5c80aa1aeced"
integrity sha512-d8/q3E3OjUxgCCAW28gNFvbahj0ks8ym122XTopbRyvAZKk9+/Z4ians9v8Tov36Z4k/un+Ilw/0i6DtM8c8Dw==
dependencies:
axios "^0.21.1"
axios "^0.24.0"
base32-decode "^1.0.0"
base32-encode "^1.1.1"
base64-js "^1.3.1"

View File

@ -59,7 +59,7 @@
"react-svg-loader": "^3.0.3",
"react-syntax-highlighter": "^15.4.4",
"react-use": "^17.3.1",
"skynet-js": "^4.0.11-beta",
"skynet-js": "^4.0.19-beta",
"stream-browserify": "^3.0.0",
"swr": "^1.0.1",
"tailwindcss": "^2.2.19"

Binary file not shown.

View File

@ -2844,13 +2844,20 @@ axe-core@^4.0.2:
resolved "https://registry.yarnpkg.com/axe-core/-/axe-core-4.3.3.tgz#b55cd8e8ddf659fe89b064680e1c6a4dceab0325"
integrity sha512-/lqqLAmuIPi79WYfRpy2i8z+x+vxU3zX2uAm0gs1q52qTuKwolOj1P8XbufpXcsydrpKx2yGn2wzAnxCMV86QA==
axios@^0.21.0, axios@^0.21.1, axios@^0.21.4:
axios@^0.21.1, axios@^0.21.4:
version "0.21.4"
resolved "https://registry.yarnpkg.com/axios/-/axios-0.21.4.tgz#c67b90dc0568e5c1cf2b0b858c43ba28e2eda575"
integrity sha512-ut5vewkiu8jjGBdqpM44XxjuCjq9LAKeHVmoVfHVzy8eHgxxq8SbAVQNovDA8mVi05kP0Ea/n/UzcSHcTJQfNg==
dependencies:
follow-redirects "^1.14.0"
axios@^0.24.0:
version "0.24.0"
resolved "https://registry.yarnpkg.com/axios/-/axios-0.24.0.tgz#804e6fa1e4b9c5288501dd9dff56a7a0940d20d6"
integrity sha512-Q6cWsys88HoPgAaFAVUb0WpPk0O8iTeisR9IMqy9G8AbO4NlpVknrnQS03zzF9PGAWgO3cgletO3VjV/P7VztA==
dependencies:
follow-redirects "^1.14.4"
axobject-query@^2.2.0:
version "2.2.0"
resolved "https://registry.yarnpkg.com/axobject-query/-/axobject-query-2.2.0.tgz#943d47e10c0b704aa42275e20edf3722648989be"
@ -5783,6 +5790,11 @@ follow-redirects@^1.0.0, follow-redirects@^1.14.0:
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.4.tgz#838fdf48a8bbdd79e52ee51fb1c94e3ed98b9379"
integrity sha512-zwGkiSXC1MUJG/qmeIFH2HBJx9u0V46QGUe3YR1fXG8bXQxq7fLj0RjLZQ5nubr9qNJUZrH+xUcwXEoXNpfS+g==
follow-redirects@^1.14.4:
version "1.14.5"
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.5.tgz#f09a5848981d3c772b5392309778523f8d85c381"
integrity sha512-wtphSXy7d4/OR+MvIFbCVBDzZ5520qV8XfPklSN5QtxuMUJZ+b0Wnst1e1lCDocfzuCkHqj8k0FpZqO+UIaKNA==
for-in@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
@ -10318,17 +10330,7 @@ polished@^4.1.3:
dependencies:
"@babel/runtime" "^7.14.0"
popmotion@11.0.0:
version "11.0.0"
resolved "https://registry.yarnpkg.com/popmotion/-/popmotion-11.0.0.tgz#910e2e7077d9aeba520db8744d40bb5354992212"
integrity sha512-kJDyaG00TtcANP5JZ51od+DCqopxBm2a/Txh3Usu23L9qntjY5wumvcVf578N8qXEHR1a+jx9XCv8zOntdYalQ==
dependencies:
framesync "^6.0.1"
hey-listen "^1.0.8"
style-value-types "5.0.0"
tslib "^2.1.0"
popmotion@^11.0.0:
popmotion@11.0.0, popmotion@^11.0.0:
version "11.0.0"
resolved "https://registry.yarnpkg.com/popmotion/-/popmotion-11.0.0.tgz#910e2e7077d9aeba520db8744d40bb5354992212"
integrity sha512-kJDyaG00TtcANP5JZ51od+DCqopxBm2a/Txh3Usu23L9qntjY5wumvcVf578N8qXEHR1a+jx9XCv8zOntdYalQ==
@ -12064,12 +12066,12 @@ sjcl@^1.0.8:
resolved "https://registry.yarnpkg.com/sjcl/-/sjcl-1.0.8.tgz#f2ec8d7dc1f0f21b069b8914a41a8f236b0e252a"
integrity sha512-LzIjEQ0S0DpIgnxMEayM1rq9aGwGRG4OnZhCdjx7glTaJtf4zRfpg87ImfjSJjoW9vKpagd82McDOwbRT5kQKQ==
skynet-js@^4.0.11-beta:
version "4.0.11-beta"
resolved "https://registry.yarnpkg.com/skynet-js/-/skynet-js-4.0.11-beta.tgz#ec313d586f8e026e0b3b4b608f2f3b4a449e8a71"
integrity sha512-/fpxBeegGJeK+VWE8svUhBc2tVX0kJAZod5K172kKPO1g/GECeQLLSuWL2yvCMbSSTyH0dIFr5gemwGNsZHlMQ==
skynet-js@^4.0.19-beta:
version "4.0.19-beta"
resolved "https://registry.yarnpkg.com/skynet-js/-/skynet-js-4.0.19-beta.tgz#d4c640898c79cf69e45aa1c3c1ed5c80aa1aeced"
integrity sha512-d8/q3E3OjUxgCCAW28gNFvbahj0ks8ym122XTopbRyvAZKk9+/Z4ians9v8Tov36Z4k/un+Ilw/0i6DtM8c8Dw==
dependencies:
axios "^0.21.0"
axios "^0.24.0"
base32-decode "^1.0.0"
base32-encode "^1.1.1"
base64-js "^1.3.1"
@ -12080,16 +12082,16 @@ skynet-js@^4.0.11-beta:
post-me "^0.4.5"
randombytes "^2.1.0"
sjcl "^1.0.8"
skynet-mysky-utils "^0.2.2"
skynet-mysky-utils "^0.3.0"
tus-js-client "^2.2.0"
tweetnacl "^1.0.3"
url-join "^4.0.1"
url-parse "^1.5.1"
skynet-mysky-utils@^0.2.2:
version "0.2.3"
resolved "https://registry.yarnpkg.com/skynet-mysky-utils/-/skynet-mysky-utils-0.2.3.tgz#5007cf8f7599b665ccf016003b37a4ed0fb19abf"
integrity sha512-wRrAASn4haux2fu+2pJLv+uV/TGbBecXT1jaqD3/IQgqbEwZUpDNJJrYnYAfp/0cY5Xmuc2ZX90NNr34neAcWg==
skynet-mysky-utils@^0.3.0:
version "0.3.0"
resolved "https://registry.yarnpkg.com/skynet-mysky-utils/-/skynet-mysky-utils-0.3.0.tgz#87fdc0a5f8547cf660280ef86b7a762269919bad"
integrity sha512-X9L6SrVTdwTUFook/E6zUWCOpXHdyspLAu0elQbbPkZCWeFpr/XXTMbiyPV3m1liYsesngAKxzaSqylaTWOGUA==
dependencies:
post-me "^0.4.5"

View File

@ -29,6 +29,12 @@ the health check.
The `portal-upgrade.sh` script upgrades the docker images for a portal and
clears and leftover images.
**nginx-prune.sh**\
The `nginx-prune.sh` script deletes all entries from nginx cache larger than
the given size and smaller entries until nginx cache disk size is smaller than
the given cache size limit. Both values are configured in
`lib/nginx-prune-cache-subscript.sh`. The script doesn't require `sudo`.
## Webportal Upgrade Procedures
TODO...

View File

@ -0,0 +1,30 @@
#!/usr/local/bin/bash
# This subscript is expected to be run inside docker container using 'bash'
# image. The image is based on Alpine Linux. It's tools (find, stat, awk, sort)
# are non-standard versions from BusyBox.
MAX_CACHE_DIR_SIZE=20000000000
MAX_KEEP_FILE_SIZE=1000000000
total=0
# We sort files by time, newest files are first. Format is:
# time (last modification as seconds since Epoch), filepath, size (bytes)
find /home/user/skynet-webportal/docker/data/nginx/cache -type f -exec stat -c "%Y %n %s" {} + | sort -rgk1 | while read line
do
size=$(echo $line | awk '{print $3}')
new_total=$(($total + $size))
# We always delete all files larger than MAX_KEEP_FILE_SIZE.
# We keep all files smaller than MAX_KEEP_FILE_SIZE when cache size is
# below MAX_CACHE_DIR_SIZE, then we delete also smaller files.
if (("$size" <= "$MAX_KEEP_FILE_SIZE" && "$new_total" < "$MAX_CACHE_DIR_SIZE"))
then
total=$new_total
continue
fi
filename=$(echo $line | awk '{print $2}')
rm $filename
done

6
scripts/nginx-prune.sh Executable file
View File

@ -0,0 +1,6 @@
#!/bin/bash
# We execute the nginx cache pruning subscript from docker container so that we
# can run the pruning script in user crontab without sudo.
docker run --rm -v /home/user:/home/user bash /home/user/skynet-webportal/scripts/lib/nginx-prune-cache-subscript.sh

View File

@ -6,6 +6,7 @@ from time import sleep
import traceback
import os
import sys
import re
import asyncio
import requests
@ -13,10 +14,16 @@ import json
setup()
AIRTABLE_API_KEY = os.getenv("AIRTABLE_API_KEY")
AIRTABLE_BASE = os.getenv("AIRTABLE_BASE", "app89plJvA9EqTJEc")
AIRTABLE_TABLE = os.getenv("AIRTABLE_TABLE", "Table%201")
AIRTABLE_FIELD = os.getenv("AIRTABLE_FIELD", "Link")
AIRTABLE_BASE = os.getenv("AIRTABLE_BASE")
AIRTABLE_TABLE = os.getenv("AIRTABLE_TABLE")
AIRTABLE_FIELD = os.getenv("AIRTABLE_FIELD")
# Check environment variables are defined
for value in [AIRTABLE_API_KEY, AIRTABLE_BASE, AIRTABLE_TABLE, AIRTABLE_FIELD]:
if not value:
sys.exit("Configuration error: Missing AirTable environment variable.")
async def run_checks():
@ -93,8 +100,8 @@ async def block_skylinks_from_airtable():
entry["fields"].get(AIRTABLE_FIELD, "") for entry in data["records"]
]
skylinks = [
skylink for skylink in skylinks if skylink
] # filter empty skylinks, most likely empty rows
skylink.strip() for skylink in skylinks if skylink
] # filter empty skylinks, most likely empty rows, trim whitespace
offset = data.get("offset")

View File

@ -0,0 +1,59 @@
#!/bin/bash
# Dumps disk usage to stdout or to the file
#
# Parameters:
# - $1 (optional): Filename to append the output to.
#
# Usage:
# - Dump disk usage to stdout:
# ./disk-usage-dump.sh
#
# - Dump disk usage appending to th file:
# ./disk-usage-dump.sh my-log-file.log
#
# Use docker container to get root (script can be run under regular user, no
# need for sudo)
dump () {
echo
echo "### Disk usage dump at $(date) ###"
# Free disk space
echo
df -h /home/user
# Home dirs
echo
echo "Home dirs:"
docker run -v /home/user:/home/user alpine:3.15.0 du -hs /home/user/*
# Docker data dirs
echo
echo "Docker data dirs:"
docker run -v /home/user:/home/user alpine:3.15.0 du -hs /home/user/skynet-webportal/docker/data/*
# Largest dirs/files
echo
echo "Dirs or files over 1GB (first 100):"
docker run -v /home/user:/home/user alpine:3.15.0 du -h /home/user | grep -E "^[0-9]+\.?[0-9]*G" | sort -r -n | head -100
}
# Check argument is present
if [ -z "$1" ]; then
# Dump to stdout
dump
else
# Handle log paths
filename=$(basename "$1")
dirname=$(dirname "$1")
abs_dirname=$(realpath "$dirname")
# Make sure log dir exists
mkdir -p "$abs_dirname"
# Append to file
{
dump
} >> "$abs_dirname/$filename" 2>&1
fi

View File

@ -37,6 +37,9 @@ GB = 1 << 30 # 1 GiB in bytes
FREE_DISK_SPACE_THRESHOLD = 100 * GB
FREE_DISK_SPACE_THRESHOLD_CRITICAL = 60 * GB
# Disk usage dump log file (relative to this .py script).
DISK_USAGE_DUMP_LOG = "../../devops/disk-monitor/disk-usage-dump.log"
setup()
@ -69,7 +72,9 @@ async def check_load_average():
load_av = re.match(pattern, uptime_string).group(1)
if float(load_av) > 10:
message = "High system load detected in uptime output: {}".format(uptime_string)
await send_msg(message, force_notify=True)
# Disabling pings until we have metrics solution and process to better
# address
await send_msg(message, force_notify=False)
# check_disk checks the amount of free space on the /home partition and issues
@ -103,11 +108,18 @@ async def check_disk():
message = "CRITICAL! Very low disk space: {}GiB, **siad stopped**!".format(
free_space_gb
)
# dump disk usage
script_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
os.popen(
script_dir + "/disk-usage-dump.sh " + script_dir + "/" + DISK_USAGE_DUMP_LOG
)
inspect = os.popen("docker inspect sia").read().strip()
inspect_json = json.loads(inspect)
if inspect_json[0]["State"]["Running"] is True:
# mark portal as unhealthy
os.popen("docker exec health-check cli/disable")
os.popen("docker exec health-check cli disable 'critical free disk space'")
time.sleep(300) # wait 5 minutes to propagate dns changes
os.popen("docker stop sia") # stop sia container
return await send_msg(message, force_notify=True)
@ -214,7 +226,8 @@ async def check_health():
message += "{}/{} CRITICAL checks failed over the last {} hours! ".format(
critical_checks_failed, critical_checks_total, CHECK_HOURS
)
force_notify = True
# Disabling as it creates notification fatigue.
# force_notify = True
else:
message += "All {} critical checks passed. ".format(critical_checks_total)
@ -222,7 +235,8 @@ async def check_health():
message += "{}/{} extended checks failed over the last {} hours! ".format(
extended_checks_failed, extended_checks_total, CHECK_HOURS
)
force_notify = True
# Disabling as it creates notification fatigue.
# force_notify = True
else:
message += "All {} extended checks passed. ".format(extended_checks_total)

View File

@ -0,0 +1,8 @@
[Unit]
Description=Ensure serverload script is running to provide serverload stats.
[Service]
ExecStart=/bin/bash /home/user/skynet-webportal/serverload.sh
[Install]
WantedBy=multi-user.target

55
setup-scripts/serverload.sh Executable file
View File

@ -0,0 +1,55 @@
#!/bin/bash
: '
This script writes the CPU usage and the free disk space to a file in a loop.
The results are prepended to the file, so the most recent results are at the
top. This is so that the most recent information can easily be read from the
top of the file and the file can easily be truncated if needed.
This script is run by the serverload.service systemd process. The
serverload.service file should be copied to
/etc/systemd/system/serverload.service.
The systemd process can then be started with the following commands:
sudo systemctl start serverload.service
The status of the process can be checked with:
sudo systemctl is-active serverload.service
'
# Define Loop Interval
loop_interval=60
webportal_repo_setup_scripts="/home/user/skynet-webportal/setup-scripts"
logfile_name="serverload.log"
logfile=$webportal_repo_setup_scripts/$logfile_name
jsonfile="serverload.json"
nginx_docker_path="/usr/local/share"
# Create logfile if it doesn't exist
if [[ ! -e $logfile ]]; then
echo "init" > $logfile
fi
# Write the output in an infinite loop.
while true; do
# CPU usage
cpu=$(echo $[100-$(vmstat 1 2|tail -1|awk '{print $15}')])
sed -i "1iCPU: ${cpu}" $logfile
# Disk Usage
disk=$(df -Ph . | tail -1 | awk '{print $4}')
sed -i "1iDISK: ${disk}" $logfile
# Write the timestamp
timestamp=$(date)
sed -i "1iTIMESTAMP: ${timestamp}" $logfile
# Write and copy a json file of the latest results to nginx docker container
# to serve
printf '{"cpu":"%s","disk":"%s","timestamp":"%s"}' "$cpu" "$disk" "$timestamp" > $webportal_repo_setup_scripts/$jsonfile
docker cp $webportal_repo_setup_scripts/$jsonfile nginx:$nginx_docker_path/$jsonfile
# Sleep
sleep $loop_interval
done

View File

@ -4,3 +4,4 @@
30 */4 * * * /home/user/skynet-webportal/setup-scripts/blocklist-airtable.py /home/user/skynet-webportal/.env
0 4 * * * /home/user/skynet-webportal/scripts/db_backup.sh 1 >> /home/user/skynet-webportal/logs/db_backup_`date +"%Y-%m-%d-%H%M"`.log 2 > &1
0 5 * * * /home/user/skynet-webportal/scripts/es_cleaner.py 1 http://localhost:9200
15 * * * * /home/user/skynet-webportal/scripts/nginx-prune.sh