Merge branch 'master' into ivo/sia_docker

# Conflicts:
#	packages/website/src/pages/index.js
This commit is contained in:
Ivaylo Novakov 2021-05-17 09:30:45 +02:00
commit ad79bb369c
No known key found for this signature in database
GPG Key ID: 06B9354AB08BE9C6
52 changed files with 4666 additions and 6580 deletions

View File

@ -8,10 +8,9 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Use Node.js
uses: actions/setup-node@v1
- uses: actions/setup-node@v2
with:
node-version: 14.x
node-version: 15.x
- name: Install dependencies
run: yarn
@ -22,24 +21,27 @@ jobs:
- name: "Static code analysis: health-check"
run: yarn workspace health-check prettier --check .
- name: "Static code analysis: webapp"
run: yarn workspace webapp prettier --check .
- name: "Static code analysis: website"
run: yarn workspace website prettier --check .
- name: "Build webapp"
run: yarn workspace webapp build
env:
GATSBY_API_URL: "https://siasky.net"
- name: "Static code analysis: dashboard"
run: yarn workspace dashboard prettier --check .
- name: Cypress run
uses: cypress-io/github-action@v2
env:
CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
working-directory: packages/webapp
record: true
start: npx http-server public --port 8000
wait-on: "http://localhost:8000"
# - name: "Build webapp"
# run: yarn workspace webapp build
# env:
# GATSBY_API_URL: "https://siasky.net"
- name: Cypress cache prune
run: yarn cypress cache prune
# - name: Cypress run
# uses: cypress-io/github-action@v2
# env:
# CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }}
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# with:
# working-directory: packages/webapp
# record: true
# start: npx http-server public --port 8000
# wait-on: "http://localhost:8000"
# - name: Cypress cache prune
# run: yarn cypress cache prune

View File

@ -169,7 +169,6 @@ Steps:
There is some configuration that needs to be added to your `.env`file, namely:
1. CR_NODE - the name of your node
1. CR_IP - the public IP of your node
1. CR_CLUSTER_NODES - a list of IPs and ports which make up your cluster, e.g.
`95.216.13.185:26257,147.135.37.21:26257,144.76.136.122:26257`. This will be the list of nodes that will make up your

View File

@ -15,6 +15,10 @@ services:
depends_on:
- accounts
health-check:
environment:
- ACCOUNTS_ENABLED=1
accounts:
build:
context: ./docker/accounts
@ -121,8 +125,6 @@ services:
- NEXT_PUBLIC_KRATOS_BROWSER_URL=${SKYNET_DASHBOARD_URL}/.ory/kratos/public
- NEXT_PUBLIC_KRATOS_PUBLIC_URL=http://oathkeeper:4455/.ory/kratos/public
- NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY=${STRIPE_PUBLISHABLE_KEY}
volumes:
- ./docker/data/dashboard/.next:/usr/app/.next
networks:
shared:
ipv4_address: 10.10.10.85

96
docker-compose.jaeger.yml Normal file
View File

@ -0,0 +1,96 @@
version: '3.7'
services:
sia:
environment:
- JAEGER_DISABLED=${JAEGER_DISABLED:-true} # Enable/Disable tracing
- JAEGER_SERVICE_NAME=${PORTAL_NAME:-Skyd} # change to e.g. eu-ger-1
# Configuration
# See https://github.com/jaegertracing/jaeger-client-go#environment-variables
# for all options.
- JAEGER_SAMPLER_TYPE=probabilistic
- JAEGER_SAMPLER_PARAM=0.1
- JAEGER_AGENT_HOST=jaeger-agent
- JAEGER_AGENT_PORT=6831
- JAEGER_REPORTER_LOG_SPANS=false
jaeger-agent:
image: jaegertracing/jaeger-agent
command: [ "--reporter.grpc.host-port=jaeger-collector:14250", "--reporter.grpc.retry.max=1000" ]
container_name: jaeger-agent
restart: on-failure
expose:
- 6831
- 6832
- 5778
environment:
- LOG_LEVEL=debug
networks:
shared:
ipv4_address: 10.10.10.90
depends_on:
- jaeger-collector
jaeger-collector:
image: jaegertracing/jaeger-collector
entrypoint: /wait_to_start.sh
container_name: jaeger-collector
restart: on-failure
expose:
- 14269
- 14268
- 14250
environment:
- SPAN_STORAGE_TYPE=elasticsearch
- LOG_LEVEL=debug
- WAIT_START_CMD=/go/bin/collector-linux --es.num-shards=1 --es.num-replicas=0 --es.server-urls=http://elasticsearch:9200
- WAIT_COMMAND=wget -qO index.html http://elasticsearch:9200
- WAIT_SLEEP=1
- WAIT_LOOPS=600
volumes:
- ./scripts/wait_to_start.sh:/wait_to_start.sh:ro
networks:
shared:
ipv4_address: 10.10.10.91
depends_on:
- elasticsearch
jaeger-query:
image: jaegertracing/jaeger-query
entrypoint: /wait_to_start.sh
container_name: jaeger-query
restart: on-failure
ports:
- "127.0.0.1:16686:16686"
expose:
- 16687
environment:
- SPAN_STORAGE_TYPE=elasticsearch
- LOG_LEVEL=debug
- WAIT_START_CMD=/go/bin/query-linux --es.num-shards=1 --es.num-replicas=0 --es.server-urls=http://elasticsearch:9200
- WAIT_COMMAND=wget -qO index.html http://elasticsearch:9200
- WAIT_SLEEP=1
- WAIT_LOOPS=600
volumes:
- ./scripts/wait_to_start.sh:/wait_to_start.sh:ro
networks:
shared:
ipv4_address: 10.10.10.92
depends_on:
- elasticsearch
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.8.15
container_name: elasticsearch
restart: on-failure
environment:
- discovery.type=single-node
volumes:
# This dir needs to be chowned to 1000:1000
- ./docker/data/elasticsearch/data:/usr/share/elasticsearch/data
ports:
# We need to expose this port, so we can prune the indexes.
- "127.0.0.1:9200:9200"
networks:
shared:
ipv4_address: 10.10.10.93

View File

@ -80,7 +80,6 @@ services:
- 80
depends_on:
- sia
- health-check
- handshake-api
- website
@ -155,12 +154,12 @@ services:
networks:
shared:
ipv4_address: 10.10.10.60
env_file:
- .env
environment:
- HOSTNAME=0.0.0.0
- PORTAL_URL=http://nginx
- STATE_DIR=/usr/app/state
expose:
- 3100
depends_on:
- handshake
- handshake-api
- caddy

View File

@ -1,30 +1,41 @@
(custom.domain) {
{$DOMAIN_NAME} {
tls {$EMAIL_ADDRESS}
reverse_proxy nginx:80
# This block below is optional if you want to generate an internal certificate for the server ip address.
# It is useful in case you have services trying to reach the server through ip and not domain like health checks.
# It will generate an internal certificate so browsers will warn you when connecting but that not a problem.
:443 {
tls internal {
on_demand
}
reverse_proxy nginx:80
}
(siasky.net) {
siasky.net, *.siasky.net, *.hns.siasky.net {
tls {
dns route53 {
max_retries 50
}
# Make sure you have SSL_CERTIFICATE_STRING specified in .env file because you need it to fetch correct certificates.
# It needs to have at least 3 parts, the absolute part (ie. example.com), the wildcard part (ie. *.example.com) and
# the hns wildcard part (ie. *.hns.example.com). The resulting string should look like:
# example.com, *.example.com, *.hns.example.com
# In addition, if you are running multiple servers for the single portal like we do on siasky.net, you might want to
# add an aliased string that is going to help you access and distinguish between servers, the result would look like:
# example.com, *.example.com, *.hns.example.com, *.germany.example.com, *.hns.germany.example.com
# Note that you don't need to specify the absolute part for the alias since it's already covered in the wildcard part
# of the original certificate string (*.example.com).
{$SSL_CERTIFICATE_STRING} {
# If you want to use basic http-01 (basic, good for one server setup) certificate challenge
# then uncomment the line below and make sure you have EMAIL_ADDRESS specified in .env file
# and comment the tls block that contains the dns challenge configuration.
# tls {$EMAIL_ADDRESS}
tls {
# We are using route53 as our dns provider and it requires additional AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
# environment variables in .env file. You can use other providers by using specific package from
# https://github.com/caddy-dns in the docker/caddy/Dockerfile instead of our route53 one.
dns route53 {
max_retries 50
}
reverse_proxy nginx:80
}
}
(localhost) {
:443 {
tls internal {
on_demand
}
reverse_proxy nginx:80
}
reverse_proxy nginx:80
}
import localhost
# import custom.domain
# import siasky.net

View File

@ -1,4 +1,4 @@
FROM node:15.14.0-alpine
FROM node:16.1.0-alpine
WORKDIR /opt/hsd

View File

@ -23,7 +23,7 @@
preserve_host: true
url: "http://dashboard:3000"
match:
url: "http://oathkeeper:4455/<{_next/*,auth/*,recovery,verify,error,favicon.ico}{/,}>"
url: "http://oathkeeper:4455/<{_next/**,auth/**,recovery,verify,error,favicon.ico}{/,}>"
methods:
- GET
authenticators:
@ -38,7 +38,7 @@
preserve_host: true
url: "http://dashboard:3000"
match:
url: "http://oathkeeper:4455/<{,api/*,settings,uploads,downloads,payments}>"
url: "http://oathkeeper:4455/<{,api/**,settings,uploads,downloads,payments}>"
methods:
- GET
- POST
@ -65,7 +65,7 @@
preserve_host: true
url: "http://accounts:3000"
match:
url: "http://oathkeeper<{,:4455}>/<{stripe/prices,stripe/webhook}>"
url: "http://oathkeeper<{,:4455}>/<{health,stripe/prices,stripe/webhook}>"
methods:
- GET
- POST

View File

@ -1,4 +1,4 @@
FROM openresty/openresty:1.19.3.1-2-bionic
FROM openresty/openresty:1.19.3.1-8-bionic
# RUN apt-get update -qq && apt-get install cron logrotate -qq
RUN luarocks install luasocket

View File

@ -39,6 +39,13 @@ set_real_ip_from 172.16.0.0/12;
set_real_ip_from 192.168.0.0/16;
real_ip_header X-Forwarded-For;
# skynet-jwt contains dash so we cannot use $cookie_skynet-jwt
# https://richardhart.me/2012/03/18/logging-nginx-cookies-with-dashes/
map $http_cookie $skynet_jwt {
default '';
~skynet-jwt=(?<match>[^\;]+) $match;
}
upstream siad {
server sia:9980;
}
@ -65,23 +72,23 @@ server {
rewrite ^/skynet/blacklist /skynet/blocklist permanent;
rewrite ^/account/(.*) https://account.$domain.$tld/$1 permanent;
# This is only safe workaround to reroute based on some conditions
# See https://www.nginx.com/resources/wiki/start/topics/depth/ifisevil/
recursive_error_pages on;
# redirect links with base32 encoded skylink in subdomain
error_page 460 = @base32_subdomain;
if ($base32_subdomain != "") {
return 460;
}
# redirect links with handshake domain on hns subdomain
error_page 461 = @hns_domain;
if ($hns_domain != "") {
return 461;
}
location / {
# This is only safe workaround to reroute based on some conditions
# See https://www.nginx.com/resources/wiki/start/topics/depth/ifisevil/
recursive_error_pages on;
# redirect links with base32 encoded skylink in subdomain
error_page 460 = @base32_subdomain;
if ($base32_subdomain != "") {
return 460;
}
# redirect links with handshake domain on hns subdomain
error_page 461 = @hns_domain;
if ($hns_domain != "") {
return 461;
}
include /etc/nginx/conf.d/include/cors;
proxy_pass http://website:9000;
@ -140,7 +147,7 @@ server {
}
proxy_cache skynet;
proxy_cache_valid any 10m; # cache stats for 10 minutes
proxy_cache_valid any 1m; # cache stats for 1 minute
proxy_set_header User-Agent: Sia-Agent;
proxy_read_timeout 5m; # extend the read timeout
proxy_pass http://siad/skynet/stats;
@ -151,7 +158,7 @@ server {
access_log off; # do not log traffic to health-check endpoint
proxy_pass http://health-check:3100;
proxy_pass http://10.10.10.60:3100; # hardcoded ip because health-check waits for nginx
}
location /hns {
@ -379,6 +386,26 @@ server {
}
}
# endpoing implementing resumable file uploads open protocol https://tus.io
location /skynet/tus {
include /etc/nginx/conf.d/include/cors;
client_max_body_size 1000M; # make sure to limit the size of upload to a sane value
proxy_read_timeout 600;
proxy_request_buffering off; # stream uploaded files through the proxy as it comes in
proxy_set_header Expect $http_expect;
# proxy /skynet/tus requests to siad endpoint with all arguments
proxy_pass http://siad;
}
location /skynet/metadata {
include /etc/nginx/conf.d/include/cors;
proxy_set_header User-Agent: Sia-Agent;
proxy_pass http://siad;
}
location ~ "^/(([a-zA-Z0-9-_]{46}|[a-z0-9]{55})(/.*)?)$" {
include /etc/nginx/conf.d/include/cors;
include /etc/nginx/conf.d/include/proxy-buffer;
@ -496,8 +523,12 @@ server {
internal; # internal endpoint only
access_log off; # do not log traffic
proxy_cache skynet; # use general nginx cache
proxy_cache_key $uri+$skynet_jwt; # include skynet-jwt cookie (mapped to skynet_jwt)
proxy_cache_valid 200 401 1m; # cache success and unauthorized responses for 1 minute
rewrite /accounts(.*) $1 break; # drop the /accounts prefix from uri
proxy_pass http://accounts:3000;
proxy_pass http://10.10.10.70:3000; # hardcoded ip because accounts might not be available
}
# include custom locations, specific to the server

View File

@ -1,8 +1,8 @@
if ($request_method = 'OPTIONS') {
more_set_headers 'Access-Control-Allow-Origin: $http_origin';
more_set_headers 'Access-Control-Allow-Credentials: true';
more_set_headers 'Access-Control-Allow-Methods: GET, POST, OPTIONS, PUT, DELETE';
more_set_headers 'Access-Control-Allow-Headers: DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range';
more_set_headers 'Access-Control-Allow-Methods: GET, POST, HEAD, OPTIONS, PUT, PATCH, DELETE';
more_set_headers 'Access-Control-Allow-Headers: DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,X-HTTP-Method-Override,upload-offset,upload-length,tus-version,tus-resumable,tus-extension,tus-max-size,location';
more_set_headers 'Access-Control-Max-Age: 1728000';
more_set_headers 'Content-Type: text/plain; charset=utf-8';
more_set_headers 'Content-Length: 0';
@ -11,6 +11,6 @@ if ($request_method = 'OPTIONS') {
more_set_headers 'Access-Control-Allow-Origin: $http_origin';
more_set_headers 'Access-Control-Allow-Credentials: true';
more_set_headers 'Access-Control-Allow-Methods: GET, POST, OPTIONS, PUT, DELETE';
more_set_headers 'Access-Control-Allow-Headers: DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range';
more_set_headers 'Access-Control-Expose-Headers: Content-Length,Content-Range,Skynet-File-Metadata,Skynet-Skylink,Skynet-Portal-Api';
more_set_headers 'Access-Control-Allow-Methods: GET, POST, HEAD, OPTIONS, PUT, PATCH, DELETE';
more_set_headers 'Access-Control-Allow-Headers: DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,X-HTTP-Method-Override,upload-offset,upload-length,tus-version,tus-resumable,tus-extension,tus-max-size,location';
more_set_headers 'Access-Control-Expose-Headers: Content-Length,Content-Range,Skynet-File-Metadata,Skynet-Skylink,Skynet-Portal-Api,upload-offset,upload-length,tus-version,tus-resumable,tus-extension,tus-max-size,location';

View File

@ -1,4 +1,5 @@
# if you are expecting large headers (ie. Skynet-Skyfile-Metadata), tune these values to your needs
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
# read more: https://www.getpagespeed.com/server-setup/nginx/tuning-proxy_buffer_size-in-nginx
proxy_buffer_size 4096k;
proxy_buffers 64 256k;
proxy_busy_buffers_size 4096k; # at least as high as proxy_buffer_size

View File

@ -1,4 +1,4 @@
FROM node:15.14.0-alpine
FROM node:16.1.0-alpine
WORKDIR /usr/app

View File

@ -8,36 +8,32 @@
"start": "next start"
},
"dependencies": {
"@fontsource/metropolis": "^4.1.0",
"@ory/kratos-client": "^0.5.4-alpha.1",
"@stripe/react-stripe-js": "^1.4.0",
"@stripe/stripe-js": "^1.13.0",
"@tailwindcss/forms": "^0.2.1",
"autoprefixer": "^10.2.5",
"classnames": "^2.2.6",
"clipboardy": "^2.3.0",
"dayjs": "^1.10.4",
"express-jwt": "^6.0.0",
"fast-levenshtein": "^3.0.0",
"formik": "^2.2.6",
"http-status-codes": "^2.1.4",
"jwks-rsa": "^1.12.2",
"@fontsource/metropolis": "4.3.0",
"@ory/kratos-client": "0.5.4-alpha.1",
"@stripe/react-stripe-js": "1.4.0",
"@stripe/stripe-js": "1.14.0",
"@tailwindcss/forms": "0.3.2",
"autoprefixer": "10.2.5",
"classnames": "2.3.1",
"clipboardy": "2.3.0",
"dayjs": "1.10.4",
"express-jwt": "6.0.0",
"fast-levenshtein": "3.0.0",
"formik": "2.2.6",
"http-status-codes": "2.1.4",
"ky": "0.25.1",
"next": "^10.0.8",
"postcss": "^8.2.8",
"prettier": "^2.2.1",
"pretty-bytes": "^5.5.0",
"react": "17.0.1",
"react-dom": "17.0.1",
"skynet-js": "^3.0.0",
"square": "^9.0.0",
"stripe": "^8.137.0",
"superagent": "^6.1.0",
"swr": "^0.5.0",
"tailwindcss": "^2.0.3",
"yup": "^0.32.9"
},
"devDependencies": {
"@tailwindcss/forms": "^0.2.1"
"next": "10.2.0",
"postcss": "8.2.14",
"prettier": "2.3.0",
"pretty-bytes": "5.6.0",
"react": "17.0.2",
"react-dom": "17.0.2",
"skynet-js": "3.0.2",
"square": "10.0.0",
"stripe": "8.148.0",
"superagent": "6.1.0",
"swr": "0.5.6",
"tailwindcss": "2.1.2",
"yup": "0.32.9"
}
}

View File

@ -200,7 +200,9 @@ export default function Home({ plans }) {
<div className="ml-5 w-0 flex-1">
<dt className="text-sm font-medium text-gray-500 truncate">Storage used</dt>
<dd className="flex items-baseline">
<div className="text-2xl font-semibold text-grey-900">{prettyBytes(stats?.totalUploadsSize ?? 0)}</div>
<div className="text-2xl font-semibold text-grey-900">
{prettyBytes(stats?.totalUploadsSize ?? 0)}
</div>
</dd>
</div>
</div>

View File

@ -1,4 +1,4 @@
FROM node:15.14.0-alpine
FROM node:16.1.0-alpine
WORKDIR /usr/app

View File

@ -1,4 +1,4 @@
FROM node:15.14.0-alpine
FROM node:16.1.0-alpine
WORKDIR /usr/app
@ -7,9 +7,9 @@ RUN yarn --no-lockfile
COPY src src
COPY cli cli
RUN echo '*/5 * * * * /usr/app/cli/run critical' >> /etc/crontabs/root
RUN echo '0 * * * * /usr/app/cli/run verbose' >> /etc/crontabs/root
RUN echo '*/5 * * * * /usr/app/cli/run critical > /dev/stdout' >> /etc/crontabs/root
RUN echo '0 * * * * /usr/app/cli/run extended > /dev/stdout' >> /etc/crontabs/root
EXPOSE 3100
ENV NODE_ENV production
CMD [ "sh", "-c", "crond ; node --max-http-header-size=64000 src/index.js" ]
CMD [ "sh", "-c", "crond ; echo $(node src/whatismyip.js) siasky.net account.siasky.net >> /etc/hosts ; node --max-http-header-size=64000 src/index.js" ]

View File

@ -6,13 +6,16 @@
"dependencies": {
"deep-object-diff": "^1.1.0",
"express": "^4.17.1",
"form-data": "^4.0.0",
"got": "^11.8.2",
"graceful-fs": "^4.2.6",
"hasha": "^5.2.2",
"http-status-codes": "^2.1.2",
"lodash": "^4.17.21",
"lowdb": "^1.0.0",
"object-hash": "^2.1.1",
"superagent": "^6.0.0",
"tmp": "^0.2.1",
"yargs": "^16.2.0"
"skynet-js": "^3.0.2",
"write-file-atomic": "^3.0.3",
"yargs": "^17.0.1"
},
"devDependencies": {
"prettier": "^2.2.1"

View File

@ -0,0 +1,28 @@
const fs = require("graceful-fs");
const Base = require("lowdb/adapters/Base");
const { sync: writeFileAtomicSync } = require("write-file-atomic");
class FileSyncAtomic extends Base {
read() {
if (fs.existsSync(this.source)) {
try {
const data = fs.readFileSync(this.source, "utf-8").trim();
return data ? this.deserialize(data) : this.defaultValue;
} catch (e) {
if (e instanceof SyntaxError) {
e.message = `Malformed JSON in file: ${this.source}\n${e.message}`;
}
throw e;
}
} else {
writeFileAtomicSync(this.source, this.serialize(this.defaultValue));
return this.defaultValue;
}
}
write(data) {
return writeFileAtomicSync(this.source, this.serialize(data));
}
}
module.exports = FileSyncAtomic;

View File

@ -1,11 +1,11 @@
const db = require("../db");
const { getYesterdayISOString } = require("../utils");
// returns all verbose health check entries
// returns all extended health check entries
module.exports = (req, res) => {
const yesterday = getYesterdayISOString();
const entries = db
.get("verbose")
.get("extended")
.orderBy("date", "desc")
.filter(({ date }) => date > yesterday)
.value();

View File

@ -1,56 +1,92 @@
const fs = require("fs");
const superagent = require("superagent");
const tmp = require("tmp");
const got = require("got");
const FormData = require("form-data");
const { StatusCodes } = require("http-status-codes");
const { calculateElapsedTime, getResponseContent } = require("../utils");
// uploadCheck returns the result of uploading a sample file
async function uploadCheck(done) {
const time = process.hrtime();
const file = tmp.fileSync();
const form = new FormData();
const payload = Buffer.from(new Date()); // current date to ensure data uniqueness
const data = { up: false };
fs.writeSync(file.fd, Buffer.from(new Date())); // write current date to temp file
form.append("file", payload, { filename: "time.txt", contentType: "text/plain" });
superagent
.post(`${process.env.PORTAL_URL}/skynet/skyfile`)
.attach("file", file.name, file.name)
.end((error, response) => {
file.removeCallback();
try {
const response = await got.post(`${process.env.SKYNET_PORTAL_API}/skynet/skyfile`, { body: form });
const statusCode = (response && response.statusCode) || (error && error.statusCode) || null;
data.statusCode = response.statusCode;
data.up = true;
data.ip = response.ip;
} catch (error) {
data.statusCode = error.response?.statusCode || error.statusCode || error.status;
data.errorMessage = error.message;
data.errorResponseContent = getResponseContent(error.response);
data.ip = error?.response?.ip ?? null;
}
done({
name: "upload_file",
up: statusCode === StatusCodes.OK,
statusCode,
errorResponseContent: getResponseContent(error?.response),
time: calculateElapsedTime(time),
});
});
done({
name: "upload_file",
time: calculateElapsedTime(time),
...data,
});
}
// downloadCheck returns the result of downloading the hard coded link
async function downloadCheck(done) {
const time = process.hrtime();
const skylink = "AACogzrAimYPG42tDOKhS3lXZD8YvlF8Q8R17afe95iV2Q";
let statusCode, errorResponseContent;
const data = { up: false };
try {
const response = await superagent.get(`${process.env.PORTAL_URL}/${skylink}?nocache=true`);
const response = await got(`${process.env.SKYNET_PORTAL_API}/${skylink}?nocache=true`);
statusCode = response.statusCode;
data.statusCode = response.statusCode;
data.up = true;
data.ip = response.ip;
} catch (error) {
statusCode = error.statusCode || error.status;
errorResponseContent = getResponseContent(error.response);
data.statusCode = error?.response?.statusCode || error.statusCode || error.status;
data.errorMessage = error.message;
data.errorResponseContent = getResponseContent(error.response);
data.ip = error?.response?.ip ?? null;
}
done({
name: "download_file",
up: statusCode === StatusCodes.OK,
statusCode,
errorResponseContent,
time: calculateElapsedTime(time),
...data,
});
}
module.exports = [uploadCheck, downloadCheck];
async function accountHealthCheck(done) {
const time = process.hrtime();
const data = { up: false };
try {
const response = await got(`${process.env.SKYNET_DASHBOARD_URL}/health`, { responseType: "json" });
data.statusCode = response.statusCode;
data.response = response.body;
data.up = response.body.dbAlive === true;
data.ip = response.ip;
} catch (error) {
data.statusCode = error?.response?.statusCode || error.statusCode || error.status;
data.errorMessage = error.message;
data.errorResponseContent = getResponseContent(error.response);
data.ip = error?.response?.ip ?? null;
}
done({
name: "account_health",
time: calculateElapsedTime(time),
...data,
});
}
const checks = [uploadCheck, downloadCheck];
if (process.env.ACCOUNTS_ENABLED) {
checks.push(accountHealthCheck);
}
module.exports = checks;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,12 @@
const fs = require("fs");
const fs = require("graceful-fs");
const low = require("lowdb");
const FileSync = require("lowdb/adapters/FileSync");
const FileSyncAtomic = require("./adapters/FileSyncAtomic");
if (!fs.existsSync(process.env.STATE_DIR)) fs.mkdirSync(process.env.STATE_DIR);
const adapter = new FileSync(`${process.env.STATE_DIR}/state.json`);
const adapter = new FileSyncAtomic(`${process.env.STATE_DIR}/state.json`);
const db = low(adapter);
db.defaults({ disabled: false, critical: [], verbose: [] }).write();
db.defaults({ disabled: false, critical: [], extended: [] }).write();
module.exports = db;

View File

@ -1,7 +1,11 @@
process.env.NODE_ENV = process.env.NODE_ENV || "development";
if (!process.env.PORTAL_URL) {
throw new Error("You need to provide PORTAL_URL environment variable");
if (!process.env.SKYNET_PORTAL_API) {
throw new Error("You need to provide SKYNET_PORTAL_API environment variable");
}
if (process.env.ACCOUNTS_ENABLED && !process.env.SKYNET_DASHBOARD_URL) {
throw new Error("You need to provide SKYNET_DASHBOARD_URL environment variable when accounts are enabled");
}
const express = require("express");
@ -21,7 +25,7 @@ server.use((req, res, next) => {
server.get("/health-check", require("./api/index"));
server.get("/health-check/critical", require("./api/critical"));
server.get("/health-check/verbose", require("./api/verbose"));
server.get("/health-check/extended", require("./api/extended"));
server.get("/health-check/disabled", require("./api/disabled"));
server.listen(port, host, (error) => {

View File

@ -8,11 +8,11 @@ require("yargs/yargs")(process.argv.slice(2)).command(
.positional("type", {
describe: "Type of checks to run",
type: "string",
choices: ["critical", "verbose"],
choices: ["critical", "extended"],
})
.option("portal-url", {
describe: "Skynet portal url",
default: process.env.PORTAL_URL || "https://siasky.net",
default: process.env.SKYNET_PORTAL_API || "https://siasky.net",
type: "string",
})
.option("state-dir", {
@ -22,7 +22,7 @@ require("yargs/yargs")(process.argv.slice(2)).command(
});
},
async ({ type, portalUrl, stateDir }) => {
process.env.PORTAL_URL = portalUrl;
process.env.SKYNET_PORTAL_API = portalUrl;
process.env.STATE_DIR = stateDir;
const db = require("../src/db");

View File

@ -23,9 +23,9 @@ function getYesterdayISOString() {
*/
function getResponseContent(response) {
try {
return JSON.parse(response?.text);
return JSON.parse(response?.body || response?.text);
} catch {
return response?.text;
return response?.body || response?.text;
}
}

View File

@ -0,0 +1,13 @@
const http = require("http");
const request = http.request({ host: "whatismyip.akamai.com" }, (response) => {
response.on("data", (data) => {
process.stdout.write(data);
});
});
request.on("error", (error) => {
console.error(error);
});
request.end();

View File

@ -1,4 +1,4 @@
FROM node:15.14.0-alpine
FROM node:16.1.0-alpine
RUN apk update && apk add autoconf automake libtool gcc make g++ zlib-dev file nasm util-linux

View File

@ -8,13 +8,13 @@
"axios": "0.21.1",
"boolean": "^3.0.2",
"bytes": "3.1.0",
"classnames": "2.2.6",
"classnames": "2.3.1",
"gatsby": "^3.0.4",
"gatsby-plugin-manifest": "^3.0.0",
"gatsby-plugin-matomo": "0.9.0",
"gatsby-plugin-react-helmet": "^4.0.0",
"gatsby-plugin-remove-serviceworker": "1.0.0",
"gatsby-plugin-robots-txt": "1.5.5",
"gatsby-plugin-robots-txt": "1.5.6",
"gatsby-plugin-sass": "^4.0.2",
"gatsby-source-filesystem": "^3.0.0",
"http-status-codes": "2.1.4",
@ -26,21 +26,21 @@
"react": "17.0.1",
"react-countup": "4.3.3",
"react-dom": "17.0.1",
"react-dropzone": "11.3.1",
"react-dropzone": "11.3.2",
"react-helmet": "6.1.0",
"react-mailchimp-form": "1.0.2",
"react-mailchimp-subscribe": "^2.1.3",
"react-syntax-highlighter": "15.4.3",
"react-visibility-sensor": "5.1.1",
"skynet-js": "3.0.0"
"skynet-js": "3.0.2"
},
"devDependencies": {
"cypress": "^6.6.0",
"cypress-file-upload": "5.0.3",
"eslint": "7.22.0",
"eslint-config-prettier": "8.1.0",
"cypress": "^7.1.0",
"cypress-file-upload": "5.0.7",
"eslint": "7.26.0",
"eslint-config-prettier": "8.2.0",
"eslint-plugin-cypress": "2.11.2",
"eslint-plugin-react": "7.23.0",
"eslint-plugin-react": "7.23.2",
"husky": "4.3.8",
"lint-staged": "10.5.4",
"prettier": "2.2.1"

View File

@ -100,7 +100,7 @@ export default function Footer() {
</a>
</li>
<li>
<a href="https://jobs.lever.co/nebulous" target="_blank" rel="noopener noreferrer">
<a href="https://jobs.lever.co/SkynetLabs" target="_blank" rel="noopener noreferrer">
Jobs
</a>
</li>

View File

@ -1,4 +1,4 @@
FROM node:15.14.0-alpine
FROM node:16.1.0-alpine
RUN apk update && apk add autoconf automake build-base libtool nasm pkgconfig

View File

@ -19,4 +19,4 @@
module.exports = (on, config) => {
// `on` is used to hook into various events Cypress emits
// `config` is the resolved Cypress config
}
};

View File

@ -14,7 +14,7 @@
// ***********************************************************
// Import commands.js using ES2015 syntax:
import './commands'
import "./commands";
// Alternatively you can use CommonJS syntax:
// require('./commands')

View File

@ -5,7 +5,7 @@
- title: Brand Guidelines
href: https://support.siasky.net/key-concepts/skynet-brand-guidelines
- title: Careers
href: https://jobs.lever.co/nebulous
href: https://jobs.lever.co/SkynetLabs
- title: Terms of Use
href: /terms.pdf
- title: Privacy Policy

View File

@ -1,6 +1,7 @@
const { defaultIcons } = require("gatsby-plugin-manifest/common");
module.exports = {
flags: { PRESERVE_WEBPACK_CACHE: true },
siteMetadata: {
title: `Skynet`,
description: `Skynet is a decentralized file sharing and content distribution protocol`,

File diff suppressed because it is too large Load Diff

View File

@ -5,8 +5,8 @@
"version": "0.1.0",
"author": "Skynet Labs.",
"dependencies": {
"@fontsource/sora": "^4.2.2",
"@fontsource/source-sans-pro": "^4.2.2",
"@fontsource/sora": "^4.3.0",
"@fontsource/source-sans-pro": "^4.3.0",
"@svgr/webpack": "^5.5.0",
"@tailwindcss/typography": "^0.4.0",
"autoprefixer": "^10.2.5",
@ -14,40 +14,40 @@
"classnames": "^2.3.1",
"copy-text-to-clipboard": "^3.0.1",
"crypto-browserify": "^3.12.0",
"framer-motion": "^4.0.3",
"gatsby": "^3.2.1",
"gatsby-background-image": "^1.5.0",
"gatsby-image": "^3.2.0",
"gatsby-plugin-image": "^1.1.2",
"gatsby-plugin-manifest": "^3.0.0",
"framer-motion": "^4.1.15",
"gatsby": "^3.4.2",
"gatsby-background-image": "^1.5.3",
"gatsby-image": "^3.4.0",
"gatsby-plugin-image": "^1.4.1",
"gatsby-plugin-manifest": "^3.4.0",
"gatsby-plugin-matomo": "^0.9.0",
"gatsby-plugin-offline": "^4.0.0",
"gatsby-plugin-postcss": "^4.0.0",
"gatsby-plugin-offline": "^4.4.0",
"gatsby-plugin-postcss": "^4.4.0",
"gatsby-plugin-purgecss": "^6.0.0",
"gatsby-plugin-react-helmet": "^4.0.0",
"gatsby-plugin-react-helmet": "^4.4.0",
"gatsby-plugin-react-svg": "^3.0.0",
"gatsby-plugin-robots-txt": "^1.5.5",
"gatsby-plugin-sharp": "^3.1.2",
"gatsby-plugin-robots-txt": "^1.6.2",
"gatsby-plugin-sharp": "^3.4.2",
"gatsby-remark-classes": "^1.0.0",
"gatsby-remark-copy-linked-files": "^4.0.0",
"gatsby-remark-images": "^5.0.0",
"gatsby-remark-prismjs": "^5.0.0",
"gatsby-remark-responsive-iframe": "^4.0.0",
"gatsby-remark-smartypants": "^4.0.0",
"gatsby-source-filesystem": "^3.0.0",
"gatsby-transformer-json": "^3.1.0",
"gatsby-transformer-remark": "^4.0.0",
"gatsby-transformer-sharp": "^3.0.0",
"gatsby-transformer-yaml": "^3.2.0",
"gbimage-bridge": "^0.1.1",
"gatsby-remark-copy-linked-files": "^4.1.0",
"gatsby-remark-images": "^5.1.0",
"gatsby-remark-prismjs": "^5.1.0",
"gatsby-remark-responsive-iframe": "^4.1.0",
"gatsby-remark-smartypants": "^4.1.0",
"gatsby-source-filesystem": "^3.4.0",
"gatsby-transformer-json": "^3.4.0",
"gatsby-transformer-remark": "^4.1.0",
"gatsby-transformer-sharp": "^3.4.0",
"gatsby-transformer-yaml": "^3.4.0",
"gbimage-bridge": "^0.1.4",
"http-status-codes": "^2.1.4",
"jsonp": "^0.2.1",
"ms": "^2.1.2",
"normalize.css": "^8.0.1",
"path-browserify": "^1.0.1",
"polished": "^4.1.1",
"polished": "^4.1.2",
"popmotion": "^9.3.4",
"postcss": "^8.2.8",
"postcss": "^8.2.15",
"preact-svg-loader": "^0.2.1",
"prop-types": "^15.7.2",
"react": "^17.0.2",
@ -57,17 +57,17 @@
"react-share": "^4.4.0",
"react-svg-loader": "^3.0.3",
"react-syntax-highlighter": "^15.4.3",
"react-use": "^17.2.3",
"react-use": "^17.2.4",
"skynet-js": "^3.0.2",
"stream-browserify": "^3.0.0",
"swr": "^0.5.5",
"tailwindcss": "^2.1.1"
"swr": "^0.5.6",
"tailwindcss": "^2.1.2"
},
"devDependencies": {
"cross-env": "^7.0.3",
"cypress": "^7.1.0",
"cypress-file-upload": "^5.0.5",
"prettier": "^2.2.1"
"cypress": "^7.3.0",
"cypress-file-upload": "^5.0.7",
"prettier": "^2.3.0"
},
"keywords": [
"gatsby"

View File

@ -54,7 +54,7 @@ export const Carousel = ({ Component, items }) => {
<div className="relative overflow-hidden">
<div className="opacity-0 flex flex-row">
{items.map((item, index) => (
<div key={index} className="flex-shrink-0 w-screen">
<div key={index} className="flex-shrink-0 w-full">
<Component {...item} />
</div>
))}

View File

@ -24,45 +24,39 @@ const aboutCards = [
{
Icon: UserAtom,
title: "Own your data",
text:
"No one owns or controls your account data except for you. Ownership extends to original blogs, music, and videos too. This is all possible through decentralized apps built on decentralized storage.",
text: "No one owns or controls your account data except for you. Ownership extends to original blogs, music, and videos too. This is all possible through decentralized apps built on decentralized storage.",
},
{
Icon: Shield,
title: "Censorship-resistant content",
text:
"Today, censorship can come arbitrarily, top-down, and as a tool to silence expression. Post and share content on Skynet, or use Skynet as a fail-over for your website if a service provider goes down.",
text: "Today, censorship can come arbitrarily, top-down, and as a tool to silence expression. Post and share content on Skynet, or use Skynet as a fail-over for your website if a service provider goes down.",
},
{
Icon: Fingerprint,
title: "One universal digital identity",
text:
"Log into any Skynet app with just one ID. Once logged in, your storage and data can follow you across the ecosystem. Access your friend lists, followers, and content from any Skynet app.",
text: "Log into any Skynet app with just one ID. Once logged in, your storage and data can follow you across the ecosystem. Access your friend lists, followers, and content from any Skynet app.",
},
{
Icon: UserArrows,
title: "Innovation built for users",
text:
"All Skynet apps are open-source. If you dislike an apps feature or want to make your own improvements, youre welcome to do so. (We of course encourage collaboration and hope you chat with the developer first!) Existing users can then consent to the migration of all their account data to the latest version. ",
text: "All Skynet apps are open-source. If you dislike an apps feature or want to make your own improvements, youre welcome to do so. (We of course encourage collaboration and hope you chat with the developer first!) Existing users can then consent to the migration of all their account data to the latest version. ",
},
{
Icon: ComputerScreen,
label: "Coming soon",
title: "Control your content feed",
text:
"We believe that users, not tech platforms should fully control how content is moderated. A decentralized internet is not an information free-for-all. It means that the individual holds the power to personalize their online experiences. For example, users will decide what content appears in their social media feeds, not a corporate algorithm.",
text: "We believe that users, not tech platforms should fully control how content is moderated. A decentralized internet is not an information free-for-all. It means that the individual holds the power to personalize their online experiences. For example, users will decide what content appears in their social media feeds, not a corporate algorithm.",
},
{
Icon: Cogs,
label: "Coming soon",
title: "Developer and Creator-centric monetization",
text:
"As a content creator, set your own terms and price for your art. You and your collaborators can get paid directly, fairly, and automatically in crypto without relying on advertising as a sole source of income.",
text: "As a content creator, set your own terms and price for your art. You and your collaborators can get paid directly, fairly, and automatically in crypto without relying on advertising as a sole source of income.",
},
];
const showCareersCTA = false; // change to true to display the careers CTA section
const careers = { href: "https://jobs.lever.co/nebulous", target: "_blank", rel: "noopener noreferrer" };
const showCareersCTA = true; // switch to hide or display the careers CTA section
const careers = { href: "https://jobs.lever.co/SkynetLabs", target: "_blank", rel: "noopener noreferrer" };
const paginate = (array, size) =>
array.reduce((acc, item, index) => {

View File

@ -19,32 +19,27 @@ const reasonCards = [
{
Icon: DataSwap,
title: "Immutable Data, Globally Available & Trustless",
text:
"Our immutable data layer means files are instantly accessible on any device, by any portal and are fully verifiable, by leveraging trustless, decentralized storage on the Sia blockchain.",
text: "Our immutable data layer means files are instantly accessible on any device, by any portal and are fully verifiable, by leveraging trustless, decentralized storage on the Sia blockchain.",
},
{
Icon: Encryption,
title: "Dynamic Content with a User-Focus",
text:
"SkyDB enables complex apps by providing a key-value store for mutable data secured by the private key of the user.",
text: "SkyDB enables complex apps by providing a key-value store for mutable data secured by the private key of the user.",
},
{
Icon: Layers,
title: "BYO Frontend Library",
text:
"Our SDKs are built with web2 developers in mind. Client-side web apps and static generators are perfect for using Skynet to deploy with.",
text: "Our SDKs are built with web2 developers in mind. Client-side web apps and static generators are perfect for using Skynet to deploy with.",
},
{
Icon: Mesh,
title: "Decentralized Stack-Friendly",
text:
"With integrations with HNS & ENS, along with easy-access for off-chain storage, Skynet is positioned to connect with the DWeb and web3 technologies you need.",
text: "With integrations with HNS & ENS, along with easy-access for off-chain storage, Skynet is positioned to connect with the DWeb and web3 technologies you need.",
},
{
Icon: Toolkit,
title: "Hack Today & Activate an Existing User Base",
text:
"Start building without worrying about server overhead costs or where users will come from. Bootstrap the user experience with interoperable storage and user-identity right out of the box.",
text: "Start building without worrying about server overhead costs or where users will come from. Bootstrap the user experience with interoperable storage and user-identity right out of the box.",
},
];

View File

@ -36,14 +36,12 @@ const ecosystemCards = [
{
Icon: SkynetUsageSmall,
title: "Easy to use",
text:
"Decentralized storage without needing to run a node or wallet. Skynet also includes SDKs for popular programming languages and APIs that integrate seamlessly with your existing apps.",
text: "Decentralized storage without needing to run a node or wallet. Skynet also includes SDKs for popular programming languages and APIs that integrate seamlessly with your existing apps.",
},
{
Icon: SkynetSpeedSmall,
title: "Fast",
text:
"Skynet's speeds rival centralized providers and surpass all decentralized offerings. A typical Skynet download starts in under 500 ms and can stream at rates as high as 1 Gbps!",
text: "Skynet's speeds rival centralized providers and surpass all decentralized offerings. A typical Skynet download starts in under 500 ms and can stream at rates as high as 1 Gbps!",
},
{
Icon: SkynetSiaSmall,
@ -54,14 +52,12 @@ const ecosystemCards = [
{
Icon: SkynetMonetizationSmall,
title: "Monetization",
text:
"Profit directly from the success of your skapp. Now you can truly prioritize your users, instead of advertisers.",
text: "Profit directly from the success of your skapp. Now you can truly prioritize your users, instead of advertisers.",
},
{
Icon: SkynetPersistenceSmall,
title: "Persistence",
text:
"Your skapp and data stay live, even if corporations pull your access to their resources. You can also use Skynet as a failover site for when centralized providers go down.",
text: "Your skapp and data stay live, even if corporations pull your access to their resources. You can also use Skynet as a failover site for when centralized providers go down.",
},
];
@ -109,7 +105,7 @@ const IndexPage = () => {
<div className="flex flex-col items-center mt-16">
<p className="max-w-screen-md text-center text-base font-content text-palette-400">
Skynet apps pave the way for a new web that priorities the privacy, security, and experience of users. Join
Skynet apps pave the way for a new web that prioritizes the privacy, security, and experience of users. Join
our decentralized ecosystem and revolution.
</p>

39
scripts/README.md Normal file
View File

@ -0,0 +1,39 @@
# Skynet Webportal Scripts
This package contains useful scripts for managing a Skynet Webportal.
## Available Scripts
**blocklist-skylink.sh**\
The `blocklist-skylink.sh` script adds a skylink to the blocklist on all
servers.
**maintenance-upgrade.sh**\
The `maintenance-upgrade.sh` script upgrades the docker images for nodes on
a maintenance server.
**portal-down.sh**\
The `portal-down.sh` script takes a portal out of the load balancer by disabling
the health check.
**portal-restart.sh**\
The `portal-restart.sh` script restarts a portal by taking it out of the load
balancer, restarting the docker containers, and adding the portal back to the
load balancer.
**portal-up.sh**\
The `portal-up.sh` script puts a portal back into the load balancer by enabling
the health check.
**portal-upgrade.**\
The `portal-upgrade.sh` script upgrades the docker images for a portal and
clears and leftover images.
## Webportal Upgrade Procedures
TODO...
1. 1 server upgraded at a time
1. Clusters of servers upgraded at a time
1. How to safetly revert to previous stable version. Document what those
versions were.
1. Upgrading single subsystem
1. Upgrading multiple subsystems

View File

@ -7,7 +7,7 @@ if [ -z "$1" ]; then
fi
#########################################################
# read either a file containing skylinks separated by new
# read either a file containing skylinks separated by new
# lines or a single skylink and put them in an array
#########################################################
skylinks=()
@ -32,7 +32,13 @@ fi
#########################################################################
# iterate through all servers, block the skylinks and purge it from cache
#########################################################################
for server in "eu-ger-1.siasky.net" "eu-ger-2.siasky.net" "eu-fin-1.siasky.net" "us-or-1.siasky.net" "us-or-2.siasky.net" "us-va-1.siasky.net" "us-pa-1.siasky.net" "us-pa-2.siasky.net" "siasky.xyz";
declare -a servers=( "eu-ger-1.siasky.net" "eu-ger-2.siasky.net" "eu-ger-3.siasky.net" "eu-ger-4.siasky.net"
"eu-fin-1.siasky.net" "eu-fin-2.siasky.net"
"us-or-1.siasky.net" "us-or-2.siasky.net"
"us-pa-1.siasky.net" "us-pa-2.siasky.net"
"us-va-1.siasky.net"
"siasky.xyz" "siasky.dev")
for server in "${servers[@]}";
do
for skylink in "${skylinks[@]}";
do

117
scripts/es_cleaner.py Normal file
View File

@ -0,0 +1,117 @@
#!/usr/bin/env python3
import curator
import elasticsearch
import os
import ssl
import sys
TIMEOUT=120
def main():
if len(sys.argv) != 3:
print('USAGE: [INDEX_PREFIX=(default "")] [ARCHIVE=(default false)] ... {} NUM_OF_DAYS http://HOSTNAME[:PORT]'.format(sys.argv[0]))
print('NUM_OF_DAYS ... delete indices that are older than the given number of days.')
print('HOSTNAME ... specifies which Elasticsearch hosts URL to search and delete indices from.')
print('TIMEOUT ... number of seconds to wait for master node response.'.format(TIMEOUT))
print('INDEX_PREFIX ... specifies index prefix.')
print('INDEX_DATE_SEPARATOR ... specifies index date separator.')
print('ARCHIVE ... specifies whether to remove archive indices (only works for rollover) (default false).')
print('ROLLOVER ... specifies whether to remove indices created by rollover (default false).')
print('ES_USERNAME ... The username required by Elasticsearch.')
print('ES_PASSWORD ... The password required by Elasticsearch.')
print('ES_TLS ... enable TLS (default false).')
print('ES_TLS_CA ... Path to TLS CA file.')
print('ES_TLS_CERT ... Path to TLS certificate file.')
print('ES_TLS_KEY ... Path to TLS key file.')
print('ES_TLS_SKIP_HOST_VERIFY ... (insecure) Skip server\'s certificate chain and host name verification.')
sys.exit(1)
client = create_client(os.getenv("ES_USERNAME"), os.getenv("ES_PASSWORD"), str2bool(os.getenv("ES_TLS", 'false')), os.getenv("ES_TLS_CA"), os.getenv("ES_TLS_CERT"), os.getenv("ES_TLS_KEY"), str2bool(os.getenv("ES_TLS_SKIP_HOST_VERIFY", 'false')))
ilo = curator.IndexList(client)
empty_list(ilo, 'Elasticsearch has no indices')
prefix = os.getenv("INDEX_PREFIX", '')
if prefix != '':
prefix += '-'
separator = os.getenv("INDEX_DATE_SEPARATOR", '-')
if str2bool(os.getenv("ARCHIVE", 'false')):
filter_archive_indices_rollover(ilo, prefix)
else:
if str2bool(os.getenv("ROLLOVER", 'false')):
filter_main_indices_rollover(ilo, prefix)
else:
filter_main_indices(ilo, prefix, separator)
empty_list(ilo, 'No indices to delete')
for index in ilo.working_list():
print("Removing", index)
timeout = int(os.getenv("TIMEOUT", TIMEOUT))
delete_indices = curator.DeleteIndices(ilo, master_timeout=timeout)
delete_indices.do_action()
def filter_main_indices(ilo, prefix, separator):
date_regex = "\d{4}" + separator + "\d{2}" + separator + "\d{2}"
time_string = "%Y" + separator + "%m" + separator + "%d"
ilo.filter_by_regex(kind='regex', value=prefix + "jaeger-(span|service|dependencies)-" + date_regex)
empty_list(ilo, "No indices to delete")
# This excludes archive index as we use source='name'
# source `creation_date` would include archive index
ilo.filter_by_age(source='name', direction='older', timestring=time_string, unit='days', unit_count=int(sys.argv[1]))
def filter_main_indices_rollover(ilo, prefix):
ilo.filter_by_regex(kind='regex', value=prefix + "jaeger-(span|service)-\d{6}")
empty_list(ilo, "No indices to delete")
# do not remove active write indices
ilo.filter_by_alias(aliases=[prefix + 'jaeger-span-write'], exclude=True)
empty_list(ilo, "No indices to delete")
ilo.filter_by_alias(aliases=[prefix + 'jaeger-service-write'], exclude=True)
empty_list(ilo, "No indices to delete")
ilo.filter_by_age(source='creation_date', direction='older', unit='days', unit_count=int(sys.argv[1]))
def filter_archive_indices_rollover(ilo, prefix):
# Remove only rollover archive indices
# Do not remove active write archive index
ilo.filter_by_regex(kind='regex', value=prefix + "jaeger-span-archive-\d{6}")
empty_list(ilo, "No indices to delete")
ilo.filter_by_alias(aliases=[prefix + 'jaeger-span-archive-write'], exclude=True)
empty_list(ilo, "No indices to delete")
ilo.filter_by_age(source='creation_date', direction='older', unit='days', unit_count=int(sys.argv[1]))
def empty_list(ilo, error_msg):
try:
ilo.empty_list_check()
except curator.NoIndices:
print(error_msg)
sys.exit(0)
def str2bool(v):
return v.lower() in ('true', '1')
def create_client(username, password, tls, ca, cert, key, skipHostVerify):
context = ssl.create_default_context()
if ca is not None:
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=ca)
elif skipHostVerify:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
if username is not None and password is not None:
return elasticsearch.Elasticsearch(sys.argv[2:], http_auth=(username, password), ssl_context=context)
elif tls:
context.load_cert_chain(certfile=cert, keyfile=key)
return elasticsearch.Elasticsearch(sys.argv[2:], ssl_context=context)
else:
return elasticsearch.Elasticsearch(sys.argv[2:], ssl_context=context)
if __name__ == "__main__":
main()

23
scripts/wait_to_start.sh Executable file
View File

@ -0,0 +1,23 @@
#!/bin/sh
echo $WAIT_COMMAND
echo $WAIT_START_CMD
is_ready() {
eval "$WAIT_COMMAND"
}
# wait until is ready
i=0
while ! is_ready; do
i=`expr $i + 1`
if [ $i -ge $WAIT_LOOPS ]; then
echo "$(date) - still not ready, giving up"
exit 1
fi
echo "$(date) - waiting to be ready"
sleep $WAIT_SLEEP
done
#start the script
exec $WAIT_START_CMD

View File

@ -48,8 +48,8 @@ You a can now ssh into your machine as the user `user`.
**Following step will be executed on remote host logged in as a `user`:**
1. `sudo apt-get install git -y` to install git
1. `git clone https://github.com/NebulousLabs/skynet-webportal`
1. `cd skynet-webportal`
1. `git clone https://github.com/SkynetLabs/skynet-webportal`
1. `cd skynet-webportal`
1. run setup scripts in the exact order and provide sudo password when asked (if one of them fails, you can retry just this one before proceeding further)
1. `/home/user/skynet-webportal/setup-scripts/setup-server.sh`
1. `/home/user/skynet-webportal/setup-scripts/setup-docker-services.sh`
@ -81,14 +81,15 @@ At this point we have almost everything running, we just need to set up your wal
### Step 4: configuring docker services
1. edit `/home/user/skynet-webportal/.env` and configure following environment variables
- `DOMAIN_NAME` (optional) is your domain name if you have it
- `EMAIL_ADDRESS` (required) is your email address used for communication regarding SSL certification (required)
- `SIA_WALLET_PASSWORD` (required) is your wallet password (or seed if you did not set a password)
- `HSD_API_KEY` (optional) this is a random security key for a handshake integration that gets generated automatically
- `SSL_CERTIFICATE_STRING` is a list of comma separated paths that caddy will generate ssl certificates for
- `EMAIL_ADDRESS` is your email address used for communication regarding SSL certification (required if you're using http-01 challenge)
- `SIA_WALLET_PASSWORD` is your wallet password (or seed if you did not set a password)
- `HSD_API_KEY` this is a random security key for a handshake integration that gets generated automatically
- `CLOUDFLARE_AUTH_TOKEN` (optional) if using cloudflare as dns loadbalancer (need to change it in Caddyfile too)
- `AWS_ACCESS_KEY_ID` (optional) if using route53 as a dns loadbalancer
- `AWS_SECRET_ACCESS_KEY` (optional) if using route53 as a dns loadbalancer
- `PORTAL_NAME` (optional) e.g. `siasky.xyz`
- `PORTAL_NAME` a string representing name of your portal e.g. `siasky.xyz` or `my skynet portal`
- `DISCORD_BOT_TOKEN` (optional) if you're using Discord notifications for health checks and such
- `SKYNET_DB_USER` (optional) if using `accounts` this is the MongoDB username
- `SKYNET_DB_PASS` (optional) if using `accounts` this is the MongoDB password
@ -100,8 +101,6 @@ At this point we have almost everything running, we just need to set up your wal
- `S3_BACKUP_PATH` (optional) is using `accounts` and backing up the databases to S3. This path should be an S3 bucket
with path to the location in the bucket where we want to store the daily backups.
1. if you have a custom domain and you configured it in `DOMAIN_NAME`, edit `/home/user/skynet-webportal/docker/caddy/Caddyfile` and uncomment `import custom.domain`
1. only for siasky.net domain instances: edit `/home/user/skynet-webportal/docker/caddy/Caddyfile`, uncomment `import siasky.net`
1. `docker-compose up -d` to restart the services so they pick up new env variables
1. `docker exec caddy caddy reload --config /etc/caddy/Caddyfile` to reload Caddyfile configuration
1. add your custom Kratos configuration to `/home/user/skynet-webportal/docker/kratos/config/kratos.yml` (in particular, the credentials for your mail server should be here, rather than in your source control). For a starting point you can take `docker/kratos/config/kratos.yml.sample`.
@ -120,16 +119,17 @@ To configure this on your portal, you have to make sure to configure the followi
We need to ensure SSL encryption for skapps that are accessed through their
subdomain, therefore we need to have a wildcard certificate. This is very easily
achieved using Caddy.
achieved using wildcard certificates in Caddy.
```
(siasky.net) {
siasky.net, *.siasky.net, *.hns.siasky.net {
...
}
{$SSL_CERTIFICATE_STRING} {
...
}
```
Where `SSL_CERTIFICATE_STRING` environment variable should contain the wildcard for subdomains (ie. _.example.com) and
wildcard for hns subdomains (ie. _.hns.example.com).
(see [docker/caddy/Caddyfile](../docker/Caddy/Caddyfile))
### Nginx configuration

View File

@ -144,8 +144,8 @@ async def check_health():
json_critical = requests.get(
"http://localhost/health-check/critical", verify=False
).json()
json_verbose = requests.get(
"http://localhost/health-check/verbose", verify=False
json_extended = requests.get(
"http://localhost/health-check/extended", verify=False
).json()
except:
trace = traceback.format_exc()
@ -157,8 +157,8 @@ async def check_health():
critical_checks_total = 0
critical_checks_failed = 0
verbose_checks_total = 0
verbose_checks_failed = 0
extended_checks_total = 0
extended_checks_failed = 0
failed_records = []
failed_records_file = None
@ -178,18 +178,18 @@ async def check_health():
if bad:
failed_records.append(critical)
for verbose in json_verbose:
time = datetime.strptime(verbose["date"], "%Y-%m-%dT%H:%M:%S.%fZ")
for extended in json_extended:
time = datetime.strptime(extended["date"], "%Y-%m-%dT%H:%M:%S.%fZ")
if time < time_limit:
continue
bad = False
for check in verbose["checks"]:
verbose_checks_total += 1
for check in extended["checks"]:
extended_checks_total += 1
if check["up"] == False:
verbose_checks_failed += 1
extended_checks_failed += 1
bad = True
if bad:
failed_records.append(verbose)
failed_records.append(extended)
################################################################################
# create a message
@ -213,14 +213,14 @@ async def check_health():
message += "All {} critical checks passed. ".format(
critical_checks_total)
if verbose_checks_failed:
message += "{}/{} verbose checks failed over the last {} hours! ".format(
verbose_checks_failed, verbose_checks_total, CHECK_HOURS
if extended_checks_failed:
message += "{}/{} extended checks failed over the last {} hours! ".format(
extended_checks_failed, extended_checks_total, CHECK_HOURS
)
force_notify = True
else:
message += "All {} verbose checks passed. ".format(
verbose_checks_total)
message += "All {} extended checks passed. ".format(
extended_checks_total)
if len(failed_records):
failed_records_file = json.dumps(failed_records, indent=2)

View File

@ -21,7 +21,7 @@ sudo chmod +x /usr/local/bin/docker-compose
docker-compose --version # sanity check
# Create dummy .env file for docker-compose usage with variables
# * DOMAIN_NAME - the domain name your server is using ie. example.com
# * SSL_CERTIFICATE_STRING - certificate string that will be used to generate ssl certificates, read more in docker/caddy/Caddyfile
# * SKYNET_PORTAL_API - absolute url to the portal api ie. https://example.com
# * SKYNET_DASHBOARD_URL - (optional) absolute url to the portal dashboard ie. https://account.example.com
# * EMAIL_ADDRESS - this is the administrator contact email you need to supply for communication regarding SSL certification
@ -43,7 +43,7 @@ docker-compose --version # sanity check
# * CR_CLUSTER_NODES - (optional) if using `accounts` the list of servers (with ports) which make up your CockroachDB cluster, e.g. `helsinki.siasky.net:26257,germany.siasky.net:26257,us-east.siasky.net:26257`
if ! [ -f /home/user/skynet-webportal/.env ]; then
HSD_API_KEY=$(openssl rand -base64 32) # generate safe random key for handshake
printf "DOMAIN_NAME=example.com\nSKYNET_PORTAL_API=https://example.com\nSKYNET_DASHBOARD_URL=https://account.example.com\nEMAIL_ADDRESS=email@example.com\nSIA_WALLET_PASSWORD=\nHSD_API_KEY=${HSD_API_KEY}\nCLOUDFLARE_AUTH_TOKEN=\nAWS_ACCESS_KEY_ID=\nAWS_SECRET_ACCESS_KEY=\nPORTAL_NAME=\nDISCORD_BOT_TOKEN=\n" > /home/user/skynet-webportal/.env
printf "SSL_CERTIFICATE_STRING=example.com, *.example.com, *.hns.example.com\nSKYNET_PORTAL_API=https://example.com\nSKYNET_DASHBOARD_URL=https://account.example.com\nEMAIL_ADDRESS=email@example.com\nSIA_WALLET_PASSWORD=\nHSD_API_KEY=${HSD_API_KEY}\nCLOUDFLARE_AUTH_TOKEN=\nAWS_ACCESS_KEY_ID=\nAWS_SECRET_ACCESS_KEY=\nPORTAL_NAME=\nDISCORD_BOT_TOKEN=\n" > /home/user/skynet-webportal/.env
fi
# Start docker container with nginx and client

View File

@ -5,7 +5,7 @@ set -e # exit on first error
sudo apt-get update
sudo apt-get -y install python3-pip
pip3 install discord.py python-dotenv requests
pip3 install discord.py python-dotenv requests elasticsearch-curator
# add cron entries to user crontab
crontab -u user /home/user/skynet-webportal/setup-scripts/support/crontab

View File

@ -1,8 +1,9 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCpBsw5mPBVIvVd5GX43VXWHWuLeR2h0lfw8vRyDFgmV0TqC9r0POfmWOdSo/QlxHOeI+7S8Ahj/JdDarrx3vJ2vJQkK/tN2bPS30tR0pbCkr0vE/lUWsEuVxOXK132wtFQ/pF3CoVipI8THUS7/Dtap/9fujcEm59dIi3obYGc9F+UetmNtrc+mb6KJ6a1hkaXjD12qP03srSQSDBjch/7nbFFzrRwCZ9DJntMu6Ux6NZ7RcFuXPCg0dK0lggEX/Agzh3KHe69dgiMh8sG0WwCb9vWqd6dtapCt7XKZSnEvyFE1YVZgpsd7bCnGe4vPS3kLsvxeojruDo8Oj3b0exHL9+3Rr4ndVVNHkDxhvlQFbGrd5eiG/brvGjS+ibscTuNukLeiCmBrI5KULObynI2dEQVQKREVywU/qX+xm68noEGBbiRt2L2ImyJvgpNdlyCkDyFhBTo/HtH1WHP1WJijfCHM3jxigeLPRV0GChKK1RbYjZIi6JNsalW7yad/qzHDzht+jBHHAjD4qGlfuNtzP4hs3FErGiQMVZ8g9Tgq8SxPLNOULpcCSwsLLlzfrLYdv52IgkwTIAFR9W+xHGrWypCba9pfskXWXlRNM61qYf3//H0BGHxtuNAASkJrVWwcCuOVN6/EcJOTS9qkg3JiWqs79z0F2I14+AfPFgBKQ== david@nebulouslabs.com
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCr3nrSQ+ag5gEm9LBoiw68UKALboot+Eemv0TbP6BPnvB6bnSdDstb7Eu1Dkla8uiyw3w2ZYi5Pg4dS5W8vnxwXvey8gBd3GYLpjtnSp9ukeYjHK0J2aX4PBC4GXvRSRjKxYfHauUqm8PaA4uQ4sBkblfwWDEH94um1yyqIamTabH6mfsYiaiiwTNu7ldZOAIlKR/G7cXlLmFz46An7Mn2wwbuv2Khin/f2bLtUF/smOolI7pjOH6ifhHR9LxotcY/xL+E5jRbU1XxldFvVXkL5CU8tEinE6oigwMH9zsPZr+Z70Q/wm20cylxNJu8qdMGQW+WhDg3S70KpCmjYlWJ6bF1HL3z9UkN0lS1EM21n13RIx1iEO7SEC3YPl8VqZiZS7P9Uf5D5z/vTG+fWouCsCBMSbq3HUcNXlm5MLGSdBWPKzZsUaCkHkQks/sxHVy21YAM/3xgST1a05PbIJU1RsqJ0wh0J2gg7/fBUE0ljFyKZ36mvfg6BNlwCUydAiVaQt1geqh+8/VRwjTw/jtHb8G7QhSNwDNo1BcQPU3LkdKePqgldyP5EYGl9bI4E4sYc2DooeJ22fXpWfuClLB+JcHGuCJf/Hg6si9IeeXKm8PwaBdxIVytRPEeJR+q5uOwzI4XWNgERdGU/UVbgfnrAPMuVPa9Jhyl96U9uUl+Cw== peterjan.brone@gmail.com
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDMmZFdJsqig/zX7Ly7qJMMDomsdAKLgl7W7ET1w7xH9BBM48OCWmozuLCfCG8MUCEYSUD575hA028hNi6CAK40J3fF74IDDyc9DUb+le8Y8EuzHPKxYLE/gWsjr70XOcZcC4IxLcADQgpeLjrPZQs7A4EYfdxnTTLJVYrowZ9RR5ivcKBjyFOiQyCuFSIvtYMo11Xm2gU48SKYGJThhHUiE2kMOlH3notXJ+T81927IGJdza7J3DAyKtMGB2HEMA89ma3mvEvbPTDMggJFJ3VG7sukRLq6UmT7BT+f3BW+Nr87A1o4upkAuXdkL9cUrris7kQN61AcaCNFU/CuIJa4dUZ0nt+z5X7kWtc0zD75EPj3w6AjB+E1+MSPsqnxd5PnGtSCQqHoa5hg4hQMSweC2tQhSKoWDfx9W2fZiLpg1IL6QB5xCxjg+YKCXEJKxRwXDtbh1DHFdJ5N1kM7IDSeeblc80HNxYrJUPNH1ExWsPl11gmBEEWDAiRSet4bAnOmgDYcJ9Aw2KAndb01cNsw5RL0Dg/W63tb8S5Y9kz6spX6X91yz53JzrozZO7VFfKxa17nubPEeWPTqAQ3uRWPvpdbivVnOAoFCLacRvtTfvetuz/vGZ3JTpr6Ylb9Z76cIqpFe70+bnauZwmxjF+EEq2+u3gd2uewuV2//o+CYQ== kwypchlo@gmail.com
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDgiq1etF0aD94rG/UVmYEt4ij5K8MvHZwb4wIUi6Ihr david@nebulouslabs.com
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAyIT2HqzDhQs6jS89ZsnY6+GJEklVMqF6fXe/i5s8d7 chris@nebulous.tech
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFxLuZzjmFN9CgVOI5vaiVhQgMwG9dLQJ688wrsbpHH/ ivaylo@nebulous.tech
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINbAhwjJNAud7YIJvLth2bmeUg3kO20xl7ZfqBTvoXn8 Filip Rysavy
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIG67M3zC4eDJEjma0iKKksGclteKbB86ONQtBaWY93M6 mjsevey@gmail.com
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDgiq1etF0aD94rG/UVmYEt4ij5K8MvHZwb4wIUi6Ihr david@siasky.net
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAyIT2HqzDhQs6jS89ZsnY6+GJEklVMqF6fXe/i5s8d7 chris@siasky.net
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFxLuZzjmFN9CgVOI5vaiVhQgMwG9dLQJ688wrsbpHH/ ivaylo@siasky.net
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINbAhwjJNAud7YIJvLth2bmeUg3kO20xl7ZfqBTvoXn8 filip@siasky.net
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIG67M3zC4eDJEjma0iKKksGclteKbB86ONQtBaWY93M6 matt@siasky.net
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIF+XC8f0dumhzDE93i9IIMsMp7/MJPwGH+Uc9JFKOvyw karol@siasky.net
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPM43lzbKjFLChe5rKETxDpWpNlqXCGTBPiWlDN2vlLD pj@siasky.net
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN6Kcx8yetova4/ALUQHigo/PBMJO33ZTKOsg2jxSO2a user@deploy.siasky.dev

6591
yarn.lock

File diff suppressed because it is too large Load Diff