Improve server infrastructure setup and scripts (#231)
This commit is contained in:
parent
cd93302a46
commit
f067c50648
|
@ -71,3 +71,9 @@ yarn-error.log
|
|||
# Cypress
|
||||
cypress/screenshots
|
||||
cypress/videos
|
||||
|
||||
# Docker data
|
||||
docker/data
|
||||
|
||||
# Cache files
|
||||
__pycache__
|
||||
|
|
25
README.md
25
README.md
|
@ -1,31 +1,20 @@
|
|||
# Skynet Portal
|
||||
|
||||
## Setup Guide
|
||||
|
||||
A setup guide with scripts to install prerequisites can be found in the [setup-scripts](./setup-scripts) directory.
|
||||
|
||||
Once the setup guide is complete you will be running:
|
||||
|
||||
- `siad` configured as a Skynet Portal
|
||||
- an nginx webserver serving webportal
|
||||
|
||||
## Web application
|
||||
|
||||
### Development
|
||||
|
||||
Use `yarn start` to start the development server.
|
||||
|
||||
### Production build
|
||||
|
||||
Use `yarn build` to compile the application to `/public` directory.
|
||||
|
||||
### Build parameters
|
||||
|
||||
You can use the below build parameters to customize your application. You can use them both on development and production builds.
|
||||
You can use the below build parameters to customize your web application.
|
||||
|
||||
- development example `GATSBY_API_URL=https://siasky.dev yarn start`
|
||||
- production example `GATSBY_API_URL=https://siasky.net yarn build`
|
||||
|
||||
#### List of available parameters
|
||||
List of available parameters:
|
||||
|
||||
- `GATSBY_API_URL`: you can override the api url if it is different than the location origin
|
||||
- `GATSBY_API_URL`: override api url (defaults to location origin)
|
||||
|
||||
## Setting up complete skynet server
|
||||
|
||||
A setup guide with installation scripts can be found in [setup-scripts/README.md](./setup-scripts/README.md).
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
version: "3.7"
|
||||
|
||||
networks:
|
||||
shared:
|
||||
driver: bridge
|
||||
|
||||
services:
|
||||
docker-host:
|
||||
image: qoomon/docker-host
|
||||
container_name: docker-host
|
||||
restart: on-failure
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
networks:
|
||||
- shared
|
||||
|
||||
caddy:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./docker/caddy/Dockerfile
|
||||
container_name: caddy
|
||||
restart: on-failure
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- ./docker/data/caddy/data:/data
|
||||
- ./docker/data/caddy/config:/config
|
||||
- ./docker/caddy/Caddyfile:/etc/caddy/Caddyfile
|
||||
networks:
|
||||
- shared
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
depends_on:
|
||||
- docker-host
|
||||
- nginx
|
||||
|
||||
nginx:
|
||||
image: openresty/openresty:1.15.8.3-2-xenial
|
||||
container_name: nginx
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- ./docker/nginx/conf.d:/etc/nginx/conf.d:ro
|
||||
- ./docker/data/nginx/cache:/data/nginx/cache
|
||||
- ./docker/data/nginx/logs:/usr/local/openresty/nginx/logs
|
||||
networks:
|
||||
- shared
|
||||
expose:
|
||||
- 80
|
||||
depends_on:
|
||||
- docker-host
|
||||
|
||||
health-check:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./docker/health-check/Dockerfile
|
||||
container_name: health-check
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- ./health-check:/usr/app/health-check
|
||||
- ./docker/data/health-check/state:/usr/app/state
|
||||
networks:
|
||||
- shared
|
||||
environment:
|
||||
- PORTAL_URL=caddy
|
||||
- HOSTNAME=health-check
|
||||
- NODE_TLS_REJECT_UNAUTHORIZED=0
|
||||
expose:
|
||||
- 3100
|
||||
depends_on:
|
||||
- docker-host
|
||||
- caddy
|
|
@ -0,0 +1,87 @@
|
|||
(webserver) {
|
||||
root * /home/user/public_html
|
||||
file_server
|
||||
encode zstd gzip
|
||||
|
||||
@skylink {
|
||||
path_regexp skylink ^/([a-zA-Z0-9-_]{46}(/.*)?)$
|
||||
}
|
||||
|
||||
@skylink_file {
|
||||
path_regexp skylink_file ^/file/([a-zA-Z0-9-_]{46}(/.*)?)$
|
||||
}
|
||||
|
||||
@options {
|
||||
method OPTIONS
|
||||
}
|
||||
|
||||
# OPTIONS headers to allow CORS https://enable-cors.org
|
||||
handle @options {
|
||||
header {
|
||||
Access-Control-Allow-Origin *
|
||||
Access-Control-Allow-Methods GET,POST,OPTIONS
|
||||
Access-Control-Allow-Headers DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range
|
||||
Access-Control-Max-Age 1728000
|
||||
}
|
||||
respond 204
|
||||
}
|
||||
|
||||
reverse_proxy /health-check health-check:3100
|
||||
|
||||
reverse_proxy /portals nginx:80 {
|
||||
header_up User-Agent Sia-Agent
|
||||
header_down Access-Control-Allow-Origin *
|
||||
}
|
||||
reverse_proxy /stats nginx:80 {
|
||||
header_up User-Agent Sia-Agent
|
||||
header_down Access-Control-Allow-Origin *
|
||||
}
|
||||
reverse_proxy /statsdown nginx:80 {
|
||||
header_up User-Agent Sia-Agent
|
||||
header_down Access-Control-Allow-Origin *
|
||||
}
|
||||
reverse_proxy @skylink nginx:80 {
|
||||
header_up User-Agent Sia-Agent
|
||||
header_up Access-Control-Expose-Headers skynet-file-metadata
|
||||
header_down Access-Control-Allow-Origin *
|
||||
}
|
||||
reverse_proxy @skylink_file nginx:80 {
|
||||
header_up User-Agent Sia-Agent
|
||||
header_up Access-Control-Expose-Headers skynet-file-metadata
|
||||
header_down Access-Control-Allow-Origin *
|
||||
}
|
||||
reverse_proxy /skynet/skyfile* nginx:80 {
|
||||
header_up User-Agent Sia-Agent
|
||||
header_up Authorization "Basic {env.SIA_API_AUTHORIZATION}"
|
||||
header_down Access-Control-Allow-Origin *
|
||||
}
|
||||
}
|
||||
|
||||
(custom.domain) {
|
||||
{$DOMAIN_NAME} {
|
||||
tls {$EMAIL_ADDRESS}
|
||||
import webserver
|
||||
}
|
||||
}
|
||||
|
||||
(siasky.net) {
|
||||
siasky.net, *.siasky.net {
|
||||
tls {
|
||||
dns cloudflare {env.CLOUDFLARE_AUTH_TOKEN}
|
||||
}
|
||||
import webserver
|
||||
}
|
||||
}
|
||||
|
||||
(localhost) {
|
||||
:443 {
|
||||
tls internal {
|
||||
on_demand
|
||||
}
|
||||
import webserver
|
||||
}
|
||||
}
|
||||
|
||||
import localhost
|
||||
import custom.domain
|
||||
# import siasky.net
|
|
@ -0,0 +1,19 @@
|
|||
FROM node:12.18.0 AS client-builder
|
||||
|
||||
COPY src ./src
|
||||
COPY static ./static
|
||||
COPY gatsby-config.js .
|
||||
COPY package.json .
|
||||
COPY yarn.lock .
|
||||
|
||||
RUN yarn --frozen-lockfile
|
||||
RUN yarn build
|
||||
|
||||
FROM caddy:2.0.0-builder AS caddy-builder
|
||||
|
||||
RUN caddy-builder github.com/caddy-dns/cloudflare
|
||||
|
||||
FROM caddy:2.0.0
|
||||
|
||||
COPY --from=client-builder /public /home/user/public_html
|
||||
COPY --from=caddy-builder /usr/bin/caddy /usr/bin/caddy
|
|
@ -0,0 +1,11 @@
|
|||
FROM node:12.18.0
|
||||
|
||||
WORKDIR /usr/app
|
||||
|
||||
RUN yarn init -y
|
||||
RUN yarn add express body-parser lowdb node-schedule superagent lodash http-status-codes
|
||||
|
||||
EXPOSE 3100
|
||||
|
||||
ENV NODE_ENV production
|
||||
CMD [ "node", "health-check/index.js" ]
|
|
@ -0,0 +1,145 @@
|
|||
proxy_cache_path /data/nginx/cache levels=1:2 keys_zone=skynet:10m max_size=10g use_temp_path=off;
|
||||
limit_req_zone $binary_remote_addr zone=stats_by_ip:10m rate=10r/m;
|
||||
limit_conn_zone $binary_remote_addr zone=uploads_by_ip:10m;
|
||||
limit_conn_zone $binary_remote_addr zone=downloads_by_ip:10m;
|
||||
limit_req_status 429;
|
||||
limit_conn_status 429;
|
||||
|
||||
# since we are proxying request to nginx from caddy, access logs will contain caddy's ip address
|
||||
# as the request address so we need to use real_ip_header module to use ip address from
|
||||
# X-Forwarded-For header as a real ip address of the request
|
||||
set_real_ip_from 10.0.0.0/8;
|
||||
set_real_ip_from 172.16.0.0/12;
|
||||
set_real_ip_from 192.168.0.0/16;
|
||||
real_ip_header X-Forwarded-For;
|
||||
|
||||
# note that we point uploads to port '9970', do this when you want to
|
||||
# run in a configuration where you have two siad instances, one for
|
||||
# downloads and one for uploads. This drastically improves the up - and
|
||||
# download speed of your portal. When running your portal in this double
|
||||
# siad setup, make sure only the download portal runs in 'portal mode'.
|
||||
# The upload siad can be run in normal mode. Set the port to '9980' if
|
||||
# you do not want to run your portal in the double siad setup.
|
||||
upstream siad-upload {
|
||||
server docker-host:9970;
|
||||
}
|
||||
|
||||
upstream siad-download {
|
||||
server docker-host:9980;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
|
||||
# ddos protection: closing slow connections
|
||||
client_body_timeout 5s;
|
||||
client_header_timeout 5s;
|
||||
|
||||
# Increase the body buffer size, to ensure the internal POSTs can always
|
||||
# parse the full POST contents into memory.
|
||||
client_body_buffer_size 128k;
|
||||
client_max_body_size 128k;
|
||||
|
||||
location /portals {
|
||||
proxy_cache skynet;
|
||||
proxy_cache_valid any 1m; # cache portals for 1 minute
|
||||
proxy_pass http://siad-download/skynet/portals;
|
||||
}
|
||||
|
||||
location /stats {
|
||||
proxy_cache skynet;
|
||||
proxy_cache_valid any 1m; # cache stats for 1 minute
|
||||
proxy_pass http://siad-upload/skynet/stats;
|
||||
}
|
||||
|
||||
location /statsdown {
|
||||
proxy_cache skynet;
|
||||
proxy_cache_valid any 1m; # cache stats for 1 minute
|
||||
proxy_pass http://siad-download/skynet/stats;
|
||||
}
|
||||
|
||||
location /skynet/skyfile {
|
||||
limit_conn uploads_by_ip 10; # ddos protection: max 10 uploads at a time
|
||||
client_max_body_size 1000M; # make sure to limit the size of upload to a sane value
|
||||
proxy_read_timeout 600;
|
||||
proxy_request_buffering off; # stream uploaded files through the proxy as it comes in
|
||||
proxy_set_header Expect $http_expect;
|
||||
|
||||
# Extract 3 sets of 2 characters from $request_id and assign to $dir1, $dir2, $dir3
|
||||
# respectfully. The rest of the $request_id is going to be assigned to $dir4.
|
||||
# We use those variables to automatically generate a unique path for the uploaded file.
|
||||
# This ensures that not all uploaded files end up in the same directory, which is something
|
||||
# that causes performance issues in the renter.
|
||||
# Example path result: /af/24/9b/c5ec894920ccc45634dc9a8065
|
||||
if ($request_id ~* "(\w{2})(\w{2})(\w{2})(\w+)") {
|
||||
set $dir1 $1;
|
||||
set $dir2 $2;
|
||||
set $dir3 $3;
|
||||
set $dir4 $4;
|
||||
}
|
||||
|
||||
# proxy this call to siad endpoint (make sure the ip is correct)
|
||||
proxy_pass http://siad-upload/skynet/skyfile/$dir1/$dir2/$dir3/$dir4$is_args$args;
|
||||
}
|
||||
|
||||
location ~ "/skynet/skyfile/(.*)" {
|
||||
limit_conn uploads_by_ip 10; # ddos protection: max 10 uploads at a time
|
||||
client_max_body_size 1000M; # make sure to limit the size of upload to a sane value
|
||||
proxy_read_timeout 600;
|
||||
proxy_request_buffering off; # stream uploaded files through the proxy as it comes in
|
||||
proxy_set_header Expect $http_expect;
|
||||
|
||||
# we need to explicitly use set directive here because $1 will contain the siapath with
|
||||
# decoded whitespaces and set will re-encode it for us before passing it to proxy_pass
|
||||
set $siapath $1;
|
||||
|
||||
# proxy this call to siad endpoint (make sure the ip is correct)
|
||||
proxy_pass http://siad-upload/skynet/skyfile/$siapath$is_args$args;
|
||||
}
|
||||
|
||||
location ~ "^/([a-zA-Z0-9-_]{46}(/.*)?)$" {
|
||||
limit_conn downloads_by_ip 10; # ddos protection: max 10 downloads at a time
|
||||
|
||||
# we need to explicitly use set directive here because $1 will contain the skylink with
|
||||
# decoded whitespaces and set will re-encode it for us before passing it to proxy_pass
|
||||
set $skylink $1;
|
||||
|
||||
proxy_read_timeout 600;
|
||||
# proxy this call to siad /skynet/skylink/ endpoint (make sure the ip is correct)
|
||||
proxy_pass http://siad-download/skynet/skylink/$skylink$is_args$args;
|
||||
|
||||
# if you are expecting large headers (ie. Skynet-Skyfile-Metadata), tune these values to your needs
|
||||
proxy_buffer_size 128k;
|
||||
proxy_buffers 4 128k;
|
||||
|
||||
# cache frequent (> 10) downloads for 24 hours
|
||||
proxy_cache skynet;
|
||||
proxy_cache_key $uri;
|
||||
proxy_cache_min_uses 10;
|
||||
proxy_cache_valid 200 1440m;
|
||||
}
|
||||
|
||||
location ~ "^/file/([a-zA-Z0-9-_]{46}(/.*)?)$" {
|
||||
limit_conn downloads_by_ip 10; # ddos protection: max 10 downloads at a time
|
||||
|
||||
# we need to explicitly use set directive here because $1 will contain the skylink with
|
||||
# decoded whitespaces and set will re-encode it for us before passing it to proxy_pass
|
||||
set $skylink $1;
|
||||
|
||||
proxy_read_timeout 600;
|
||||
# proxy this call to siad /skynet/skylink/ endpoint (make sure the ip is correct)
|
||||
# this alias also adds attachment=true url param to force download the file
|
||||
proxy_pass http://siad-download/skynet/skylink/$skylink?attachment=true&$args;
|
||||
|
||||
# if you are expecting large headers (ie. Skynet-Skyfile-Metadata), tune these values to your needs
|
||||
proxy_buffer_size 128k;
|
||||
proxy_buffers 4 128k;
|
||||
|
||||
# cache frequent (> 10) downloads for 24 hours
|
||||
proxy_cache skynet;
|
||||
proxy_cache_key $uri;
|
||||
proxy_cache_min_uses 10;
|
||||
proxy_cache_valid 200 1440m;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
const superagent = require("superagent");
|
||||
const HttpStatus = require("http-status-codes");
|
||||
|
||||
async function uploadCheck(done) {
|
||||
const time = process.hrtime();
|
||||
|
||||
superagent
|
||||
.post(`https://${process.env.PORTAL_URL}/skynet/skyfile`)
|
||||
.attach("file", "package.json", "package.json")
|
||||
.end((err, res) => {
|
||||
const statusCode = (res && res.statusCode) || (err && err.statusCode) || null;
|
||||
|
||||
done({
|
||||
name: "upload_file",
|
||||
up: statusCode === HttpStatus.OK,
|
||||
statusCode,
|
||||
time: catchRequestTime(time),
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function downloadCheck(done) {
|
||||
const time = process.hrtime();
|
||||
|
||||
superagent.get(`https://${process.env.PORTAL_URL}`).end((err, res) => {
|
||||
const statusCode = (res && res.statusCode) || (err && err.statusCode) || null;
|
||||
|
||||
done({
|
||||
name: "download_file",
|
||||
up: statusCode === HttpStatus.OK,
|
||||
statusCode,
|
||||
time: catchRequestTime(time),
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function catchRequestTime(start) {
|
||||
const diff = process.hrtime(start);
|
||||
|
||||
return Math.round((diff[0] * 1e9 + diff[1]) / 1e6); // msec
|
||||
}
|
||||
|
||||
module.exports.checks = [uploadCheck, downloadCheck];
|
|
@ -0,0 +1,10 @@
|
|||
const low = require("lowdb");
|
||||
const FileSync = require("lowdb/adapters/FileSync");
|
||||
const Memory = require("lowdb/adapters/Memory");
|
||||
|
||||
const adapter = process.env.NODE_ENV === "production" ? new FileSync("state/state.json") : new Memory();
|
||||
const db = low(adapter);
|
||||
|
||||
db.defaults({ entries: [] }).write();
|
||||
|
||||
module.exports = db;
|
|
@ -0,0 +1,46 @@
|
|||
const HttpStatus = require("http-status-codes");
|
||||
const { sum, sumBy } = require("lodash");
|
||||
const db = require("./db");
|
||||
|
||||
function getStatus() {
|
||||
const entry = db.get("entries").orderBy("date", "desc").head().value();
|
||||
|
||||
if (entry && entry.checks.every(({ up }) => up)) {
|
||||
return HttpStatus.OK;
|
||||
}
|
||||
|
||||
return HttpStatus.SERVICE_UNAVAILABLE;
|
||||
}
|
||||
|
||||
function getTimeout() {
|
||||
if (getStatus() === HttpStatus.SERVICE_UNAVAILABLE) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const sample = db
|
||||
.get("entries")
|
||||
.orderBy("date", "desc")
|
||||
.filter(({ checks }) => checks.every(({ up }) => up))
|
||||
.take(10)
|
||||
.value();
|
||||
|
||||
return Math.round(sum(sample.map(({ checks }) => sumBy(checks, "time"))) / sample.size);
|
||||
}
|
||||
|
||||
function getEntriesSinceYesterday() {
|
||||
const yesterday = new Date();
|
||||
|
||||
yesterday.setDate(yesterday.getDate() - 1);
|
||||
|
||||
return db
|
||||
.get("entries")
|
||||
.orderBy("date", "desc")
|
||||
.filter(({ date }) => date >= yesterday.toISOString())
|
||||
.value();
|
||||
}
|
||||
|
||||
module.exports = (req, res) => {
|
||||
setTimeout(() => {
|
||||
res.status(getStatus()).send(getEntriesSinceYesterday());
|
||||
}, getTimeout());
|
||||
};
|
|
@ -0,0 +1,26 @@
|
|||
process.env.NODE_ENV = process.env.NODE_ENV || "development";
|
||||
|
||||
if (!process.env.PORTAL_URL) {
|
||||
throw new Error("You need to provide PORTAL_URL environment variable");
|
||||
}
|
||||
|
||||
const express = require("express");
|
||||
const bodyparser = require("body-parser");
|
||||
|
||||
require("./schedule");
|
||||
|
||||
const host = process.env.HOSTNAME || "localhost";
|
||||
const port = process.env.PORT || 3100;
|
||||
|
||||
const server = express();
|
||||
|
||||
server.use(bodyparser.urlencoded({ extended: false }));
|
||||
server.use(bodyparser.json());
|
||||
|
||||
server.get("/health-check", require("./endpointHealthCheck"));
|
||||
|
||||
server.listen(port, host, (error) => {
|
||||
if (error) throw error;
|
||||
|
||||
console.info(`Server listening at http://${host}:${port} (NODE_ENV: ${process.env.NODE_ENV})`);
|
||||
});
|
|
@ -0,0 +1,14 @@
|
|||
const schedule = require("node-schedule");
|
||||
const db = require("./db");
|
||||
const { checks } = require("./checks");
|
||||
|
||||
// execute the health-check script every 5 mintues
|
||||
const job = schedule.scheduleJob("*/5 * * * *", async () => {
|
||||
const entry = { date: new Date().toISOString(), checks: [] };
|
||||
|
||||
entry.checks = await Promise.all(checks.map((check) => new Promise(check)));
|
||||
|
||||
db.get("entries").push(entry).write();
|
||||
});
|
||||
|
||||
job.invoke();
|
|
@ -6,140 +6,123 @@ that we are working with a Debian Buster Minimal system or similar.
|
|||
|
||||
## Initial Setup
|
||||
|
||||
(Assumes we are logged in as root on a fresh installation of Debian)
|
||||
You may want to fork this repository and replace ssh keys in
|
||||
`setup-scripts/support/authorized_keys` and optionally edit the `setup-scripts/support/tmux.conf` and `setup-scripts/support/bashrc` configurations to fit your needs.
|
||||
|
||||
You may want to fork this repository and add your ssh pubkey to
|
||||
`authorized_keys` and optionally edit the `tmux` and `bash` configurations.
|
||||
### Step 0: stack overview
|
||||
|
||||
NOTE: nginx version 1.11.0 or higher is required.
|
||||
If you use the install script below, the correct version should be installed.
|
||||
- dockerized services inside `docker-compose.yml`
|
||||
- [docker-host](https://github.com/qoomon/docker-host) ([docker hub](https://hub.docker.com/r/qoomon/docker-host)): service that exposes server ip to docker container so we could access siad from within the nginx container
|
||||
- [caddy](https://caddyserver.com) ([docker hub](https://hub.docker.com/r/caddy/caddy)): reverse proxy (similar to nginx) that handles ssl out of a box and acts as an entry point
|
||||
- [openresty](https://openresty.org) ([docker hub](https://hub.docker.com/r/openresty/openresty)): nginx custom build, acts as a cached proxy to siad (we only use it because caddy doesn't support proxy caching, otherwise we could drop it)
|
||||
- health-check: this is a simple service that runs periodically and collects health data about the server (status and response times) and exposes `/health-check` api endpoint that is deliberately delayed based on the response times of the server so potential load balancer could prioritize servers based on that (we use it with cloudflare)
|
||||
- siad setup: we use "double siad" setup that has one node solely for download and one for upload to improve performance
|
||||
- we use systemd to manage siad services
|
||||
- siad is not installed as docker service for improved performance
|
||||
- discord integration
|
||||
- [funds-checker](funds-checker.py): script that checks wallet balance and sends status messages to discord periodically
|
||||
- [log-checker](log-checker.py): script that scans siad logs for critical errors and reports them to discord periodically
|
||||
- [blacklist-skylink](blacklist-skylink.sh): script that can be run locally from a machine that has access to all your skynet portal servers that blacklists provided skylink and prunes nginx cache to ensure it's not available any more (that is a bit much but that's the best we can do right now without paid nginx version) - if you want to use it, make sure to adjust the server addresses
|
||||
|
||||
0. SSH in a freshly installed Debian machine.
|
||||
1. `apt-get update && apt-get install sudo`
|
||||
1. `adduser user`
|
||||
1. `usermod -a -G sudo user`
|
||||
1. Quit the ssh session.
|
||||
### Step 1: setting up server user
|
||||
|
||||
1. SSH in a freshly installed Debian machine on a user with sudo access (can be root)
|
||||
1. `apt-get update && apt-get install sudo` to make sure `sudo` is available
|
||||
1. `adduser user` to create user called `user` (creates `/home/user` directory)
|
||||
1. `usermod -a -G sudo user` to add this new user to sudo group
|
||||
1. `usermod -a -G systemd-journal user` to add this new user to systemd-journal group
|
||||
1. Quit the ssh session with `exit` command
|
||||
|
||||
You a can now ssh into your machine as the user `user`.
|
||||
|
||||
5. On your local machine: `ssh-copy-id user@ip-addr`
|
||||
6. On your local machine: `ssh user@ip-addr`
|
||||
7. Now logged in as `user`: `sudo apt-get install git`
|
||||
8. `git clone https://github.com/NebulousLabs/skynet-webportal`
|
||||
9. `cd skynet-webportal/setup-scripts`
|
||||
10. `./setup.sh`
|
||||
11. Once DNS records are set you can run: `./letsencrypt-setup.sh`
|
||||
12. This should edit your nginx configuration for you. If not, you should check
|
||||
that keys were created by letsencrypt in `/etc/letsencrypt/live/` and add
|
||||
the following lines into your nginx configuration. Make sure to replace
|
||||
`YOUR-DOMAIN` with your domain name.
|
||||
```
|
||||
ssl_certificate /etc/letsencrypt/live/YOUR-DOMAIN/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/YOUR-DOMAIN/privkey.pem;
|
||||
```
|
||||
13. Finally make sure to check your nginx conf and reload nginx:
|
||||
`sudo nginx -t`
|
||||
`sudo systemctl reload nginx`
|
||||
### Step 2: setting up environment
|
||||
|
||||
## Running siad
|
||||
1. On your local machine: `ssh-copy-id user@ip-addr` to copy over your ssh key to server
|
||||
1. On your local machine: `ssh user@ip-addr` to log in to server as user `user`
|
||||
1. You are now logged in as `user`
|
||||
|
||||
NOTE: You must be running `siad` and `siac` by building from a version at least
|
||||
as recent as `v1.4.4`.
|
||||
**Following step will be executed on remote host logged in as a `user`:**
|
||||
|
||||
You still need to setup `siad` for the backend to be complete.
|
||||
1. `sudo apt-get install git` to install git
|
||||
1. `git clone https://github.com/NebulousLabs/skynet-webportal`
|
||||
1. run setup scripts in the exact order and provide sudo password when asked (if one of them fails, you can retry just this one before proceeding further)
|
||||
1. `/home/user/skynet-webportal/setup-scripts/setup-server.sh`
|
||||
1. `/home/user/skynet-webportal/setup-scripts/setup-siad.sh`
|
||||
1. `/home/user/skynet-webportal/setup-scripts/setup-docker-services.sh`
|
||||
1. `/home/user/skynet-webportal/setup-scripts/setup-health-check-scripts.sh` (optional)
|
||||
|
||||
The setup script creates a systemd user service that will run `siad` in the
|
||||
background and automatically restart upon failure. The `siad.service` file must
|
||||
be placed in `~/.config/systemd/user/`
|
||||
### Step 3: configuring siad
|
||||
|
||||
To use the `siad.service`, first fill out `~/.sia/sia.env` environment variables with the
|
||||
correct values. You will need to initialize your wallet if you have not already
|
||||
done so.
|
||||
At this point we have almost everything set up. We have 2 siad instances running as services and we need to set up the wallets and allowance on those.
|
||||
|
||||
To enable the service: `systemctl --user enable siad.service`
|
||||
1. Create new wallet for both siad instances (remember to save the seeds)
|
||||
1. `siac wallet init` to init download node wallet
|
||||
1. `siac-upload wallet init` to init upload node wallet
|
||||
1. Unlock both wallets
|
||||
1. `siac wallet unlock` to unlock download node wallet (use seed as password)
|
||||
1. `siac-upload wallet unlock` to unlock upload node wallet (use seed as password)
|
||||
1. Generate wallet addresses for both siad instances (save them for later to transfer the funds)
|
||||
1. `siac wallet address` to generate address for download node wallet
|
||||
1. `siac-upload wallet address` to generate address for upload node wallet
|
||||
1. Set up allowance on both siad instances
|
||||
1. `siac renter setallowance` to set allowance on download node
|
||||
1. 10 KS (keep 25 KS in your wallet)
|
||||
1. default period
|
||||
1. default number of hosts
|
||||
1. 8 week renewal time
|
||||
1. 500 GB expected storage
|
||||
1. 500 GB expected upload
|
||||
1. 5 TB expected download
|
||||
1. default redundancy
|
||||
1. `siac-upload renter setallowance` to set allowance on upload node
|
||||
1. use the same allowance settings as download node
|
||||
1. Run `siac renter setallowance --payment-contract-initial-funding 10SC` so that your download node will start making 10 contracts per block with many hosts to potentially view the whole network's files
|
||||
1. Copy over apipassword from `/home/user/.sia/apipassword` and save it for the next step
|
||||
1. Edit environment files for both siad instances
|
||||
1. `/home/user/.sia/sia.env` for the download node
|
||||
1. `SIA_API_PASSWORD` to previously copied apipassword (same for both instances)
|
||||
1. `SIA_WALLET_PASSWORD` to be the wallet seed
|
||||
1. `PORTAL_NAME` xxxxed part to some meaningful name like `warsaw.siasky.net`
|
||||
1. `DISCORD_BOT_TOKEN` for discord health check scripts integration
|
||||
1. `/home/user/.sia/sia-upload.env` for the upload node
|
||||
1. `SIA_API_PASSWORD` to previously copied apipassword (same for both instances)
|
||||
1. `SIA_WALLET_PASSWORD` to be the wallet seed
|
||||
1. `PORTAL_NAME` xxxxed part to some meaningful name like `warsaw.siasky.net`
|
||||
1. `DISCORD_BOT_TOKEN` for discord health check scripts integration
|
||||
|
||||
### Running 2 siad instances
|
||||
### Step 4: configuring docker services
|
||||
|
||||
It is recommended to run 2 `siad` nodes on the same server. One node to
|
||||
prioritize downloads and one to prioritze uploads. This will drastically improve
|
||||
performance of both up - and download. The setup scripts assume this double siad
|
||||
setup and perform the initial setup for a 2nd `siad` instance running as a
|
||||
systemd service `siad-upload.service` in the `~/siad-upload/` directory with
|
||||
environment variables in `sia-upload.env`. You must fill out the correct values
|
||||
for those environment variables.
|
||||
|
||||
Note that running 2 `siad` nodes is not obligatory. You can run a portal with
|
||||
just one `siad` node just fine. If you choose to do so, simply ignore the second
|
||||
`siad` node and point everything to your single node instead.
|
||||
|
||||
The `bashrc` file in this repository also provides an alias `siac-upload` that
|
||||
loads the correct environment variables and sets the correct ports to interact
|
||||
with the 2nd `siad` node.
|
||||
|
||||
`siac` is used to operate node 1, and `siac-upload` is used to operate node 2.
|
||||
|
||||
To enable the 2nd service: `systemctl --user enable siad-upload.service`
|
||||
1. generate and copy sia api token `printf ":$(cat /home/user/.sia/apipassword)" | base64`
|
||||
1. edit `/home/user/skynet-webportal/.env` and configure following environment variables
|
||||
- `DOMAIN_NAME` is your domain name
|
||||
- `EMAIL_ADDRESS` is your email address used for communication regarding SSL certification
|
||||
- `SIA_API_AUTHORIZATION` is token you just generated in the previous point
|
||||
- `CLOUDFLARE_AUTH_TOKEN` if using cloudflare as dns loadbalancer (just for siasky.net)
|
||||
1. only for siasky.net domain instances: edit `/home/user/skynet-webportal/docker/caddy/Caddyfile`, uncomment `import siasky.net` and comment out `import custom.domain`
|
||||
1. `sudo docker-compose up -d` to restart the services so they pick up new configuration
|
||||
|
||||
### Useful Commands
|
||||
|
||||
To start the service: `systemctl --user start siad`
|
||||
|
||||
To stop it: `systemctl --user stop siad`
|
||||
|
||||
To check the status of it: `systemctl --user status siad`
|
||||
|
||||
To check standard err/standard out: `journalctl --user-unit siad`. In addition you can add:
|
||||
|
||||
- `-r` to view journal from the newest entry
|
||||
- `-f` to follow and `-n INTEGER` to specify number of lines
|
||||
|
||||
## Portal Setup
|
||||
|
||||
When `siad` is done syncing, create a new wallet and unlock the wallet.
|
||||
|
||||
Then set an allowance (`siac renter setallowance`), with the suggested values
|
||||
below:
|
||||
|
||||
- 10 KS (keep 25 KS in your wallet)
|
||||
- default period
|
||||
- default number of hosts
|
||||
- 8 week renewal time
|
||||
- 500 GB expected storage
|
||||
- 500 GB expected upload
|
||||
- 5 TB expected download
|
||||
- default redundancy
|
||||
|
||||
Once your allowance is set you need to set your node to be a viewnode with the
|
||||
following command:
|
||||
`siac renter setallowance --payment-contract-initial-funding 10SC`
|
||||
|
||||
Now your node will begin making 10 contracts per block with many hosts so it can
|
||||
potentially view the whole network's files.
|
||||
|
||||
## Running the Portal
|
||||
|
||||
Make sure you have [nodejs](https://nodejs.org/en/download/package-manager/) and [yarn](https://yarnpkg.com/getting-started/install) installed.
|
||||
You can check that with `node -v` and `yarn -v` commands respectively.
|
||||
|
||||
- run `cd /home/user/skynet-webportal`
|
||||
- run `yarn` to build dependencies
|
||||
- run `yarn build` to build the client package
|
||||
|
||||
Client package will be outputted to `/public` and nginx configuration will pick it up automatically.
|
||||
|
||||
## Health Check Scripts.
|
||||
|
||||
There are 2 optional health check scripts that can be setup using
|
||||
`setup-health-check-scripts.sh`. That command will install the necesary Python
|
||||
dependencies and setup 2 cronjobs for each script: one for a downloading `siad`
|
||||
and for an uploading `siad` service.
|
||||
|
||||
To use the scripts you must setup a Discord bot and provide a bot token. The bot
|
||||
scripts take in 1 or more arguments, the first always being the path to an
|
||||
`.env` file.
|
||||
|
||||
`funds-checker` checks that the wallet balance and allowance settings are
|
||||
sufficient for portal usage.
|
||||
|
||||
`log-checker` checks if there are any critical warnings in the journal for the
|
||||
running services.
|
||||
- Accessing siac for both nodes
|
||||
- `siac` for download node
|
||||
- `siac-upload` for upload node
|
||||
- Checking status of siad service
|
||||
- `systemctl --user status siad` for download node
|
||||
- `systemctl --user status siad-upload` for upload node
|
||||
- Stopping siad service
|
||||
- `systemctl --user stop siad` for download node
|
||||
- `systemctl --user stop siad-upload` for upload node
|
||||
- Starting siad service
|
||||
- `systemctl --user start siad` for download node
|
||||
- `systemctl --user start siad-upload` for upload node
|
||||
- Restarting siad service
|
||||
- `systemctl --user restart siad` for download node
|
||||
- `systemctl --user restart siad-upload` for upload node
|
||||
- Checking siad service logs (follow last 50 lines)
|
||||
- `journalctl -f -n 50 --user-unit siad` for download node
|
||||
- `journalctl -f -n 50 --user-unit siad-upload` for upload node
|
||||
- Checking caddy logs (for example in case ssl certificate fails)
|
||||
- `sudo docker logs caddy -f`
|
||||
- Checking nginx logs (nginx handles all communication to siad instances)
|
||||
- `tail -n 50 docker/data/nginx/logs/access.log` to follow last 50 lines of access log
|
||||
- `tail -n 50 docker/data/nginx/logs/error.log` to follow last 50 lines of error log
|
||||
|
|
|
@ -9,8 +9,8 @@ fi
|
|||
for server in "germany.siasky.net" "us-east.siasky.net" "us-west.siasky.net" "helsinki.siasky.net" "siasky.dev";
|
||||
do
|
||||
echo "⌁ Blacklisting on ${server}"
|
||||
ssh -q -t user@${server} 'curl -A Sia-Agent --user "":$(cat ~/.sia/apipassword) --data '"'"'{"add":["'$1'"]}'"'"' "localhost:9980/skynet/blacklist"'
|
||||
ssh -q -t user@${server} 'rm -rf ~/skynet_webportal/docker/data/nginx/cache' # remove cache from docker-managed portals
|
||||
ssh -q -t user@${server} 'curl -A Sia-Agent --user "":$(cat /home/user/.sia/apipassword) --data '"'"'{"add":["'$1'"]}'"'"' "localhost:9980/skynet/blacklist"'
|
||||
ssh -q -t user@${server} 'rm -rf /home/user/skynet_webportal/docker/data/nginx/cache' # remove cache from docker-managed portals
|
||||
ssh -q -t user@${server} 'sudo rm -rf /tmp/nginx' # remove cache from legacy non-docker portals
|
||||
done
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
#! /usr/bin/env bash
|
||||
set -e
|
||||
|
||||
domain="$1"
|
||||
if [[ -z $domain ]]; then
|
||||
echo "Usage $0 DOMAIN_NAME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sudo certbot --nginx -d "$domain" -d www."$domain"
|
||||
sudo certbot renew --dry-run
|
|
@ -0,0 +1,30 @@
|
|||
#! /usr/bin/env bash
|
||||
|
||||
set -e # exit on first error
|
||||
|
||||
# Install docker (cleans up old docker installation)
|
||||
# sudo apt-get remove -y docker docker-engine docker.io containerd runc # fails if it is the first installation
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable"
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y docker-ce docker-ce-cli containerd.io
|
||||
docker --version # sanity check
|
||||
|
||||
# Install docker-compose
|
||||
sudo curl -L "https://github.com/docker/compose/releases/download/1.25.5/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
sudo chmod +x /usr/local/bin/docker-compose
|
||||
docker-compose --version # sanity check
|
||||
|
||||
# Start docker container with nginx and client
|
||||
sudo docker-compose -f docker-compose.yml up --build -d
|
||||
|
||||
# Create dummy .env file for docker-compose usage with veriables
|
||||
# DOMAIN_NAME - the domain name your server is using ie. example.com
|
||||
# EMAIL_ADDRESS - this is the administrator contact email you need to supply for communication regarding SSL certification
|
||||
# SIA_API_AUTHORIZATION - the base64 encoded :apipassword string
|
||||
# CLOUDFLARE_AUTH_TOKEN - cloudflare auth token for ssl generation (just for siasky.net)
|
||||
if ! [ -f /home/user/skynet-webportal/.env ]; then
|
||||
printf "DOMAIN_NAME=example.com\nEMAIL_ADDRESS=email@example.com\nSIA_API_AUTHORIZATION=\nCLOUDFLARE_AUTH_TOKEN=\n" > /home/user/skynet-webportal/.env
|
||||
fi
|
|
@ -1,5 +1,6 @@
|
|||
#! /usr/bin/env bash
|
||||
set -e
|
||||
|
||||
set -e # exit on first error
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install python3-pip
|
||||
|
@ -7,11 +8,11 @@ sudo apt-get -y install python3-pip
|
|||
pip3 install discord.py
|
||||
pip3 install python-dotenv
|
||||
|
||||
downloadCheck="0 0,8,16 * * * ~/skynet-webportal/setup-scripts/funds-checker.py ~/.sia/sia.env"
|
||||
uploadCheck="0 0,8,16 * * * ~/skynet-webportal/setup-scripts/funds-checker.py ~/.sia/sia-upload.env"
|
||||
downloadCheck="0 0,8,16 * * * /home/user/skynet-webportal/setup-scripts/funds-checker.py /home/user/.sia/sia.env"
|
||||
uploadCheck="0 0,8,16 * * * /home/user/skynet-webportal/setup-scripts/funds-checker.py /home/user/.sia/sia-upload.env"
|
||||
|
||||
logCheck1="0 0,8,16 * * * ~/skynet-webportal/setup-scripts/log-checker.py ~/.sia/sia.env siad 8"
|
||||
logCheck2="0 0,8,16 * * * ~/skynet-webportal/setup-scripts/log-checker.py ~/.sia/sia-upload.env siad-upload 8"
|
||||
logCheck1="0 0,8,16 * * * /home/user/skynet-webportal/setup-scripts/log-checker.py /home/user/.sia/sia.env siad 8"
|
||||
logCheck2="0 0,8,16 * * * /home/user/skynet-webportal/setup-scripts/log-checker.py /home/user/.sia/sia-upload.env siad-upload 8"
|
||||
|
||||
(crontab -u user -l; echo "$downloadCheck" ) | crontab -u user -
|
||||
(crontab -u user -l; echo "$uploadCheck" ) | crontab -u user -
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
#! /usr/bin/env bash
|
||||
|
||||
set -e # exit on first error
|
||||
|
||||
# Copy over basic configuration files
|
||||
cp /home/user/skynet-webportal/setup-scripts/support/tmux.conf /home/user/.tmux.conf
|
||||
cp /home/user/skynet-webportal/setup-scripts/support/bashrc /home/user/.bashrc
|
||||
source /home/user/.bashrc
|
||||
|
||||
# Add SSH keys and set SSH configs
|
||||
sudo cp /home/user/skynet-webportal/setup-scripts/support/ssh_config /etc/ssh/ssh_config
|
||||
mkdir -p /home/user/.ssh
|
||||
cat /home/user/skynet-webportal/setup-scripts/support/authorized_keys >> /home/user/.ssh/authorized_keys
|
||||
|
||||
# Install apt packages
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install ufw tmux ranger htop nload gcc g++ make git vim unzip curl
|
||||
|
||||
# Setup GIT credentials (so commands like git stash would work)
|
||||
git config --global user.email "devs@nebulous.tech"
|
||||
git config --global user.name "Sia Dev"
|
||||
|
||||
# Setup firewall
|
||||
sudo ufw --force enable # --force to make it non-interactive
|
||||
sudo ufw logging low # enable logging for debugging purpose: tail -f /var/log/ufw.log
|
||||
sudo ufw allow ssh # allow ssh connection to server
|
||||
sudo ufw allow 80,443/tcp # allow http and https ports
|
||||
sudo ufw allow proto tcp from any to 172.0.0.0/8 port 9970,9980 # expose siad api ports to local network
|
||||
sudo ufw allow proto tcp from any to 192.168.0.0/16 port 9970,9980 # expose siad api ports to local network
|
||||
|
||||
# Setup periodical /tmp cleanup so we don't run out of disk space
|
||||
# - deletes anything older than 10 days from /tmp, crontab is set to run it every day at midnight
|
||||
# WARNING: if you run this job more than once, make sure to either comment this out or clean crontab from duplicates
|
||||
(sudo crontab -l 2>/dev/null; echo "0 0 * * * find /tmp -type f -atime +10 -delete >/dev/null 2>&1") | sudo crontab -
|
||||
|
||||
# OPTIONAL: terminfo for alacritty terminal via ssh
|
||||
# If you don't use the alacritty terminal you can remove this step.
|
||||
wget -c https://raw.githubusercontent.com/alacritty/alacritty/master/extra/alacritty.info
|
||||
sudo tic -xe alacritty,alacritty-direct alacritty.info
|
||||
rm alacritty.info
|
||||
|
||||
# Set up file limits - siad uses a lot so we need to adjust so it doesn't choke up
|
||||
sudo cp /home/user/skynet-webportal/setup-scripts/support/limits.conf /etc/security/limits.conf
|
||||
|
||||
# Enable lingering services, it prevents services shutdown when you log out of the server
|
||||
loginctl enable-linger user
|
|
@ -0,0 +1,64 @@
|
|||
#! /usr/bin/env bash
|
||||
|
||||
set -e # exit on first error
|
||||
|
||||
# Setup constants
|
||||
GO_VERSION=1.13.11
|
||||
SIA_VERSION=1.4.11
|
||||
|
||||
# Install Go
|
||||
wget -c https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz
|
||||
sudo tar -C /usr/local -xzf go${GO_VERSION}.linux-amd64.tar.gz
|
||||
rm go${GO_VERSION}.linux-amd64.tar.gz
|
||||
|
||||
# add gopath to PATH and persist it in /etc/profile
|
||||
export PATH="${PATH}:/usr/local/go/bin:/home/user/go/bin"
|
||||
echo "export PATH=${PATH}" | sudo tee /etc/profile.d/go_path.sh
|
||||
|
||||
# Sanity check that will pass if go was installed correctly.
|
||||
go version
|
||||
|
||||
# Install Sia
|
||||
rm -rf /home/user/Sia
|
||||
git clone -b v${SIA_VERSION} https://gitlab.com/NebulousLabs/Sia.git /home/user/Sia
|
||||
make --directory /home/user/Sia
|
||||
|
||||
# Setup systemd files and restart daemon
|
||||
mkdir -p /home/user/.config/systemd/user
|
||||
cp /home/user/skynet-webportal/setup-scripts/support/siad.service /home/user/.config/systemd/user/siad.service
|
||||
cp /home/user/skynet-webportal/setup-scripts/support/siad-upload.service /home/user/.config/systemd/user/siad-upload.service
|
||||
|
||||
# Create siad data directories
|
||||
mkdir -p /home/user/siad
|
||||
mkdir -p /home/user/siad-upload
|
||||
|
||||
# Setup files for storing environment variables
|
||||
mkdir -p /home/user/.sia
|
||||
# use -n flag to not override because these files store wallet information
|
||||
cp -n /home/user/skynet-webportal/setup-scripts/support/sia.env /home/user/.sia/sia.env
|
||||
cp -n /home/user/skynet-webportal/setup-scripts/support/sia-upload.env /home/user/.sia/sia-upload.env
|
||||
|
||||
# Setup persistent journal
|
||||
sudo mkdir -p /var/log/journal
|
||||
sudo cp /home/user/skynet-webportal/setup-scripts/support/journald.conf /etc/systemd/journald.conf
|
||||
sudo systemctl restart systemd-journald
|
||||
|
||||
# Restart a daemon and enable both siad nodes (don't start yet)
|
||||
systemctl --user daemon-reload
|
||||
systemctl --user enable siad
|
||||
systemctl --user enable siad-upload
|
||||
|
||||
# download siastats bootstrap (consensus and transactionpool) and apply it
|
||||
if ! [ -f /home/user/consensus.zip ]; then
|
||||
curl https://siastats.info/bootstrap/bootstrap.zip -o /home/user/consensus.zip
|
||||
fi
|
||||
if ! [ -f /home/user/siad/consensus/consensus.db ]; then
|
||||
unzip -o /home/user/consensus.zip -d /home/user/siad
|
||||
fi
|
||||
if ! [ -f /home/user/siad-upload/consensus/consensus.db ]; then
|
||||
unzip -o /home/user/consensus.zip -d /home/user/siad-upload
|
||||
fi
|
||||
|
||||
# start siad after the consesnsus has beed bootstraped
|
||||
systemctl --user start siad
|
||||
systemctl --user start siad-upload
|
|
@ -1,84 +0,0 @@
|
|||
#! /usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# Copy over basic configuration files.
|
||||
cp ./tmux.conf ~/.tmux.conf
|
||||
cp ./bashrc ~/.bashrc
|
||||
source ~/.bashrc
|
||||
|
||||
# Add SSH keys and set SSH configs
|
||||
sudo cp ./ssh_config /etc/ssh/ssh_config
|
||||
mkdir -p ~/.ssh
|
||||
cat ./authorized_keys >> ~/.ssh/authorized_keys
|
||||
|
||||
# Nodejs install prerequisite https://nodejs.org/en/download/package-manager/
|
||||
curl -sL https://deb.nodesource.com/setup_13.x | sudo -E bash -
|
||||
|
||||
# Yarn install prerequisite https://classic.yarnpkg.com/en/docs/install
|
||||
curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
|
||||
echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
|
||||
|
||||
# Apt installations.
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install ufw tmux ranger htop nload nginx certbot \
|
||||
python-certbot-nginx nodejs gcc g++ make yarn git vim
|
||||
|
||||
# terminfo for alacritty terminal via ssh
|
||||
# If you don't use the alacritty terminal you can remove this step.
|
||||
wget -c https://raw.githubusercontent.com/alacritty/alacritty/master/extra/alacritty.info
|
||||
sudo tic -xe alacritty,alacritty-direct alacritty.info
|
||||
rm alacritty.info
|
||||
|
||||
# Setup nginx config
|
||||
sudo cp ./skynet-nginx.conf /etc/nginx/sites-available/skynet
|
||||
sudo nginx -t
|
||||
sudo ln -sf /etc/nginx/sites-available/skynet /etc/nginx/sites-enabled/skynet
|
||||
sudo rm /etc/nginx/sites-enabled/default --force
|
||||
sudo systemctl reload nginx
|
||||
|
||||
# Setup firewall
|
||||
# TODO: disable plain HTTP eventually
|
||||
sudo ufw enable
|
||||
sudo ufw allow ssh
|
||||
sudo ufw allow 'Nginx Full'
|
||||
sudo ufw allow 'Nginx HTTP'
|
||||
|
||||
# Install Go 1.13.11.
|
||||
wget -c https://dl.google.com/go/go1.13.11.linux-amd64.tar.gz
|
||||
sudo tar -C /usr/local -xzf go1.13.11.linux-amd64.tar.gz
|
||||
export PATH=$PATH:/usr/local/go/bin
|
||||
rm go1.13.11.linux-amd64.tar.gz
|
||||
|
||||
# Sanity check that will pass if go was installed correctly.
|
||||
go version
|
||||
|
||||
# Install Sia
|
||||
git clone -b v1.4.8 https://gitlab.com/NebulousLabs/Sia ~/Sia
|
||||
make --directory ~/Sia
|
||||
|
||||
# Setup systemd files
|
||||
mkdir -p ~/.config/systemd/user
|
||||
cp siad.service ~/.config/systemd/user/siad.service
|
||||
cp siad-upload.service ~/.config/systemd/user/siad-upload.service
|
||||
|
||||
# Setup files for storing environment variables
|
||||
mkdir -p ~/.sia
|
||||
cp sia.env ~/.sia/
|
||||
cp sia.env ~/.sia/sia-upload.env
|
||||
|
||||
# Setup persistent journal
|
||||
sudo mkdir -p /var/log/journal
|
||||
sudo cp journald.conf /etc/systemd/journald.conf
|
||||
sudo systemctl restart systemd-journald
|
||||
|
||||
# Set up file limits.
|
||||
sudo cp limits.conf /etc/security/limits.conf
|
||||
|
||||
# Setup periodical /tmp cleanup so we don't run out of disk space
|
||||
# - deletes anything older than 10 days from /tmp, crontab is set to run it every day at midnight
|
||||
(sudo crontab -l 2>/dev/null; echo "0 0 * * * find /tmp -type f -atime +10 -delete >/dev/null 2>&1") | sudo crontab -
|
||||
|
||||
# Setup skynet frontend.
|
||||
cd ..
|
||||
yarn
|
||||
yarn build
|
|
@ -1,6 +0,0 @@
|
|||
SIA_DATA_DIR=""
|
||||
SIA_API_PASSWORD=""
|
||||
SIA_WALLET_PASSWORD=""
|
||||
API_PORT=""
|
||||
PORTAL_NAME=""
|
||||
DISCORD_BOT_TOKEN=""
|
|
@ -1,189 +0,0 @@
|
|||
limit_req_zone $binary_remote_addr zone=stats_by_ip:10m rate=10r/m;
|
||||
limit_conn_zone $binary_remote_addr zone=uploads_by_ip:10m;
|
||||
limit_conn_zone $binary_remote_addr zone=downloads_by_ip:10m;
|
||||
limit_req_status 429;
|
||||
limit_conn_status 429;
|
||||
|
||||
# max_size sets the upper limit of the size of the cache
|
||||
proxy_cache_path /tmp/nginx levels=1:2 keys_zone=skynet:10m inactive=1d max_size=10g use_temp_path=off;
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
server_name _;
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
listen [::]:443 ssl http2;
|
||||
server_name siasky.net www.siasky.net; # replace with actual server names
|
||||
|
||||
# ddos protection: closing slow connections
|
||||
client_body_timeout 5s;
|
||||
client_header_timeout 5s;
|
||||
|
||||
# NOTE: make sure to enable any additional configuration you might need like gzip
|
||||
|
||||
location / {
|
||||
root /home/user/skynet-webportal/public; # path to root of index.html
|
||||
}
|
||||
|
||||
location /stats {
|
||||
limit_req zone=stats_by_ip; # ddos protection: max 10 requests per minute
|
||||
|
||||
proxy_set_header Access-Control-Allow-Origin: *;
|
||||
proxy_set_header User-Agent: Sia-Agent;
|
||||
|
||||
# replace BASE64_AUTHENTICATION with base64 encoded <user>:<password>
|
||||
# for sia user is empty so it's just :<password>
|
||||
# to generate the passcode use https://www.base64encode.org or any other base64 encoder
|
||||
proxy_set_header Authorization "Basic BASE64_AUTHENTICATION";
|
||||
proxy_pass http://127.0.0.1:9970/skynet/stats;
|
||||
}
|
||||
|
||||
location /statsdown {
|
||||
limit_req zone=stats_by_ip; # ddos protection: max 10 requests per minute
|
||||
|
||||
proxy_set_header Access-Control-Allow-Origin: *;
|
||||
proxy_set_header User-Agent: Sia-Agent;
|
||||
|
||||
# replace BASE64_AUTHENTICATION with base64 encoded <user>:<password>
|
||||
# for sia user is empty so it's just :<password>
|
||||
# to generate the passcode use https://www.base64encode.org or any other base64 encoder
|
||||
proxy_set_header Authorization "Basic BASE64_AUTHENTICATION";
|
||||
proxy_pass http://127.0.0.1:9980/skynet/stats;
|
||||
}
|
||||
|
||||
location /skynet/skyfile {
|
||||
limit_conn uploads_by_ip 10; # ddos protection: max 10 uploads at a time
|
||||
client_max_body_size 1000M; # make sure to limit the size of upload to a sane value
|
||||
proxy_read_timeout 600;
|
||||
proxy_request_buffering off; # stream uploaded files through the proxy as it comes in
|
||||
proxy_set_header Expect $http_expect;
|
||||
|
||||
# Extract 3 sets of 2 characters from $request_id and assign to $dir1, $dir2, $dir3
|
||||
# respectfully. The rest of the $request_id is going to be assigned to $dir4.
|
||||
# We use those variables to automatically generate a unique path for the uploaded file.
|
||||
# This ensures that not all uploaded files end up in the same directory, which is something
|
||||
# that causes performance issues in the renter.
|
||||
# Example path result: /af/24/9b/c5ec894920ccc45634dc9a8065
|
||||
if ($request_id ~* "(\w{2})(\w{2})(\w{2})(\w+)") {
|
||||
set $dir1 $1;
|
||||
set $dir2 $2;
|
||||
set $dir3 $3;
|
||||
set $dir4 $4;
|
||||
}
|
||||
|
||||
# proxy this call to siad endpoint (make sure the ip is correct)
|
||||
#
|
||||
# note that we point uploads to port '9970', do this when you want to
|
||||
# run in a configuration where you have two siad instances, one for
|
||||
# downloads and one for uploads. This drastically improves the up - and
|
||||
# download speed of your portal. When running your portal in this double
|
||||
# siad setup, make sure only the download portal runs in 'portal mode'.
|
||||
# The upload siad can be run in normal mode. Set the port to '9980' if
|
||||
# you do not want to run your portal in the double siad setup.
|
||||
proxy_pass http://127.0.0.1:9970/skynet/skyfile/$dir1/$dir2/$dir3/$dir4$is_args$args;
|
||||
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
proxy_hide_header Access-Control-Allow-Origin;
|
||||
# make sure to override user agent header - siad requirement
|
||||
proxy_set_header User-Agent: Sia-Agent;
|
||||
# replace BASE64_AUTHENTICATION with base64 encoded <user>:<password>
|
||||
# for sia user is empty so it's just :<password>
|
||||
# to generate the passcode use https://www.base64encode.org or any other base64 encoder
|
||||
proxy_set_header Authorization "Basic BASE64_AUTHENTICATION";
|
||||
}
|
||||
|
||||
location ~ "/skynet/skyfile/(.*)" {
|
||||
limit_conn uploads_by_ip 10; # ddos protection: max 10 uploads at a time
|
||||
client_max_body_size 1000M; # make sure to limit the size of upload to a sane value
|
||||
proxy_read_timeout 600;
|
||||
proxy_request_buffering off; # stream uploaded files through the proxy as it comes in
|
||||
proxy_set_header Expect $http_expect;
|
||||
|
||||
# we need to explicitly use set directive here because $1 will contain the siapath with
|
||||
# decoded whitespaces and set will re-encode it for us before passing it to proxy_pass
|
||||
set $siapath $1;
|
||||
|
||||
# proxy this call to siad endpoint (make sure the ip is correct)
|
||||
#
|
||||
# note that we point uploads to port '9970', do this when you want to
|
||||
# run in a configuration where you have two siad instances, one for
|
||||
# downloads and one for uploads. This drastically improves the up - and
|
||||
# download speed of your portal. When running your portal in this double
|
||||
# siad setup, make sure only the download portal runs in 'portal mode'.
|
||||
# The upload siad can be run in normal mode. Set the port to '9980' if
|
||||
# you do not want to run your portal in the double siad setup.
|
||||
proxy_pass http://127.0.0.1:9970/skynet/skyfile/$siapath$is_args$args;
|
||||
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
proxy_hide_header Access-Control-Allow-Origin;
|
||||
# make sure to override user agent header - siad requirement
|
||||
proxy_set_header User-Agent: Sia-Agent;
|
||||
# replace BASE64_AUTHENTICATION with base64 encoded <user>:<password>
|
||||
# for sia user is empty so it's just :<password>
|
||||
# to generate the passcode use https://www.base64encode.org or any other base64 encoder
|
||||
proxy_set_header Authorization "Basic BASE64_AUTHENTICATION";
|
||||
}
|
||||
|
||||
location ~ "^/([a-zA-Z0-9-_]{46}(/.*)?)$" {
|
||||
limit_conn downloads_by_ip 10; # ddos protection: max 10 downloads at a time
|
||||
|
||||
# we need to explicitly use set directive here because $1 will contain the skylink with
|
||||
# decoded whitespaces and set will re-encode it for us before passing it to proxy_pass
|
||||
set $skylink $1;
|
||||
|
||||
proxy_read_timeout 600;
|
||||
# proxy this call to siad /skynet/skylink/ endpoint (make sure the ip is correct)
|
||||
proxy_pass http://127.0.0.1:9980/skynet/skylink/$skylink$is_args$args;
|
||||
proxy_set_header Access-Control-Allow-Origin: *;
|
||||
# make sure to override user agent header - siad requirement
|
||||
proxy_set_header User-Agent: Sia-Agent;
|
||||
|
||||
# make sure the Skynet-File-Metadata header gets exposed in the response
|
||||
add_header Access-Control-Expose-Headers skynet-file-metadata;
|
||||
|
||||
# if you are expecting large headers (ie. Skynet-Skyfile-Metadata), tune these values to your needs
|
||||
#proxy_buffer_size 128k;
|
||||
#proxy_buffers 4 128k;
|
||||
|
||||
# cache frequent (> 10) downloads for 24 hours
|
||||
proxy_cache skynet;
|
||||
proxy_cache_key $uri;
|
||||
proxy_cache_min_uses 10;
|
||||
proxy_cache_valid 200 1440m;
|
||||
}
|
||||
|
||||
location ~ "^/file/([a-zA-Z0-9-_]{46}(/.*)?)$" {
|
||||
limit_conn downloads_by_ip 10; # ddos protection: max 10 downloads at a time
|
||||
|
||||
# we need to explicitly use set directive here because $1 will contain the skylink with
|
||||
# decoded whitespaces and set will re-encode it for us before passing it to proxy_pass
|
||||
set $skylink $1;
|
||||
|
||||
proxy_read_timeout 600;
|
||||
# proxy this call to siad /skynet/skylink/ endpoint (make sure the ip is correct)
|
||||
# this alias also adds attachment=true url param to force download the file
|
||||
proxy_pass http://127.0.0.1:9980/skynet/skylink/$skylink?attachment=true&$args;
|
||||
proxy_set_header Access-Control-Allow-Origin: *;
|
||||
# make sure to override user agent header - siad requirement
|
||||
proxy_set_header User-Agent: Sia-Agent;
|
||||
|
||||
# make sure the Skynet-File-Metadata header gets exposed in the response
|
||||
add_header Access-Control-Expose-Headers skynet-file-metadata;
|
||||
|
||||
# if you are expecting large headers (ie. Skynet-Skyfile-Metadata), tune these values to your needs
|
||||
#proxy_buffer_size 128k;
|
||||
#proxy_buffers 4 128k;
|
||||
|
||||
# cache frequent (> 10) downloads for 24 hours
|
||||
proxy_cache skynet;
|
||||
proxy_cache_key $uri;
|
||||
proxy_cache_min_uses 10;
|
||||
proxy_cache_valid 200 1440m;
|
||||
}
|
||||
|
||||
# SLL CERTIFICATES BELOW THIS LINE
|
||||
}
|
|
@ -7,10 +7,10 @@ SIA_PORT=${SIA_PORT:-9980}
|
|||
# You should probably run it using crontab, most likely as a root due to access_log read restrictions.
|
||||
#
|
||||
# basic usage:
|
||||
# setup-scripts/stats-logger.sh public/logs.json
|
||||
# /home/user/skynet-webportal/setup-scripts/stats-logger.sh public/logs.json
|
||||
#
|
||||
# usage with custom sia port:
|
||||
# SIA_PORT=9970 setup-scripts/stats-logger.sh public/logs.json
|
||||
# SIA_PORT=9970 /home/user/skynet-webportal/setup-scripts/stats-logger.sh public/logs.json
|
||||
#
|
||||
# configuring hourly logging with crontab (run crontab -e)
|
||||
# 0 * * * * /home/user/skynet-webportal/setup-scripts/stats-logger.sh /home/user/skynet-webportal/public/stats.json >/dev/null 2>&1
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGYvvU/12GHURRz1wg/8goacZbktAwI/288TlxnYJne3 marcin.jachymiak1@gmail.com
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCpBsw5mPBVIvVd5GX43VXWHWuLeR2h0lfw8vRyDFgmV0TqC9r0POfmWOdSo/QlxHOeI+7S8Ahj/JdDarrx3vJ2vJQkK/tN2bPS30tR0pbCkr0vE/lUWsEuVxOXK132wtFQ/pF3CoVipI8THUS7/Dtap/9fujcEm59dIi3obYGc9F+UetmNtrc+mb6KJ6a1hkaXjD12qP03srSQSDBjch/7nbFFzrRwCZ9DJntMu6Ux6NZ7RcFuXPCg0dK0lggEX/Agzh3KHe69dgiMh8sG0WwCb9vWqd6dtapCt7XKZSnEvyFE1YVZgpsd7bCnGe4vPS3kLsvxeojruDo8Oj3b0exHL9+3Rr4ndVVNHkDxhvlQFbGrd5eiG/brvGjS+ibscTuNukLeiCmBrI5KULObynI2dEQVQKREVywU/qX+xm68noEGBbiRt2L2ImyJvgpNdlyCkDyFhBTo/HtH1WHP1WJijfCHM3jxigeLPRV0GChKK1RbYjZIi6JNsalW7yad/qzHDzht+jBHHAjD4qGlfuNtzP4hs3FErGiQMVZ8g9Tgq8SxPLNOULpcCSwsLLlzfrLYdv52IgkwTIAFR9W+xHGrWypCba9pfskXWXlRNM61qYf3//H0BGHxtuNAASkJrVWwcCuOVN6/EcJOTS9qkg3JiWqs79z0F2I14+AfPFgBKQ== david@nebulouslabs.com
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCr3nrSQ+ag5gEm9LBoiw68UKALboot+Eemv0TbP6BPnvB6bnSdDstb7Eu1Dkla8uiyw3w2ZYi5Pg4dS5W8vnxwXvey8gBd3GYLpjtnSp9ukeYjHK0J2aX4PBC4GXvRSRjKxYfHauUqm8PaA4uQ4sBkblfwWDEH94um1yyqIamTabH6mfsYiaiiwTNu7ldZOAIlKR/G7cXlLmFz46An7Mn2wwbuv2Khin/f2bLtUF/smOolI7pjOH6ifhHR9LxotcY/xL+E5jRbU1XxldFvVXkL5CU8tEinE6oigwMH9zsPZr+Z70Q/wm20cylxNJu8qdMGQW+WhDg3S70KpCmjYlWJ6bF1HL3z9UkN0lS1EM21n13RIx1iEO7SEC3YPl8VqZiZS7P9Uf5D5z/vTG+fWouCsCBMSbq3HUcNXlm5MLGSdBWPKzZsUaCkHkQks/sxHVy21YAM/3xgST1a05PbIJU1RsqJ0wh0J2gg7/fBUE0ljFyKZ36mvfg6BNlwCUydAiVaQt1geqh+8/VRwjTw/jtHb8G7QhSNwDNo1BcQPU3LkdKePqgldyP5EYGl9bI4E4sYc2DooeJ22fXpWfuClLB+JcHGuCJf/Hg6si9IeeXKm8PwaBdxIVytRPEeJR+q5uOwzI4XWNgERdGU/UVbgfnrAPMuVPa9Jhyl96U9uUl+Cw== peterjan.brone@gmail.com
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7jGVK5wsmzOCHBXYxu1ihaCbOS3m71QZg/djDAQoZ546XV/TZnCuEEHcm3l5jujqQDKPrKBb8tRd3tTXGqygLUsPvwtUjQsfi4HTQFv0NwadydFZW05d8MI2s/mhJxyxOXedKiXOR6kO5lipvKCf2WVweByyrW47tgENWzzyqtHOSfkoLCVcJWTUqn4s56LBoDop3G79lUQY2IK1GcliFc0XLLis1GiH1k6TD7RWXWVgdG/uatyMJp0FvyEsas/53JaKDmVywki8EMOEsyWVqsj6fnioZsz1NGjuWe77CXsiHbC4EL5rfI5gcOtUH8ss7/fY2uCjm3TBD5dwomhWb karol@nebulous.tech
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDgiq1etF0aD94rG/UVmYEt4ij5K8MvHZwb4wIUi6Ihr david@nebulouslabs.com
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDFO8IxPO3CjfBzm3rAI0sof0BuEBVj6g1UY4hEm9Wn3PXx/iHn96ZP/nSh37X5e5KABCq7ob18T16B4U9JVlARvpozvCCUso28C/Vm44Vt/Q4xoQAYX4eLlRGkPJHhEtA+GhTt4HSE06IZkegAlZ6HVSpSxNiFmSWytIQIa2uTVDel16U+N0PiwQ/9ZS6c/MeC6ZebVEeyEBHNTOL3vkrtFzD/Iupi4QKASK8ejCKEnzCjwoWNyZPUJJLwyUC1ttZOH0cKQid9rcwQDqwM6clnJ5OAAdMkD9GbHs1ItyeC5M1m/KwunmlGSc1eIpIYLvp/0cHrh6/0j8utO3hkqDD3pTWP8TEzw2f5TQVlFHNotcNZimJz8XU8X2k2fHTgyoYKL12HjhokObqBHBUAXol9vCkw0z05U8wVcBemzHrI+6GHnn2pLETshd8Ar8bJ0wQ08+3Agf+KmJuVoHOFdc314AkUX/5QHMrws1/GeS8urLR9FciEiUks8X790LF/sP0= cschinnerl@Christophers-MacBook-Pro.local
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFxLuZzjmFN9CgVOI5vaiVhQgMwG9dLQJ688wrsbpHH/ ivaylo@nebulous.tech
|
|
@ -1,4 +1,4 @@
|
|||
# ~/.bashrc: executed by bash(1) for non-login shells.
|
||||
# /home/user/.bashrc: executed by bash(1) for non-login shells.
|
||||
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
|
||||
# for examples
|
||||
|
||||
|
@ -74,7 +74,7 @@ esac
|
|||
|
||||
# enable color support of ls and also add handy aliases
|
||||
if [ -x /usr/bin/dircolors ]; then
|
||||
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
|
||||
test -r /home/user/.dircolors && eval "$(dircolors -b /home/user/.dircolors)" || eval "$(dircolors -b)"
|
||||
alias ls='ls --color=auto'
|
||||
#alias dir='dir --color=auto'
|
||||
#alias vdir='vdir --color=auto'
|
||||
|
@ -94,11 +94,11 @@ fi
|
|||
|
||||
# Alias definitions.
|
||||
# You may want to put all your additions into a separate file like
|
||||
# ~/.bash_aliases, instead of adding them here directly.
|
||||
# /home/user/.bash_aliases, instead of adding them here directly.
|
||||
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
|
||||
|
||||
if [ -f ~/.bash_aliases ]; then
|
||||
. ~/.bash_aliases
|
||||
if [ -f /home/user/.bash_aliases ]; then
|
||||
. /home/user/.bash_aliases
|
||||
fi
|
||||
|
||||
# enable programmable completion features (you don't need to enable
|
||||
|
@ -114,7 +114,7 @@ fi
|
|||
export PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games:/usr/local/go/bin:/home/user/go/bin
|
||||
|
||||
set -o allexport
|
||||
source ~/.sia/sia.env
|
||||
source /home/user/.sia/sia.env
|
||||
set +o allexport
|
||||
|
||||
alias siac-upload="source ~/.sia/source-upload.sh; siac --addr 'localhost:9970'"
|
||||
alias siac-upload="source /home/user/.sia/source-upload.sh; siac --addr 'localhost:9970'"
|
|
@ -0,0 +1,10 @@
|
|||
# siad environment variables
|
||||
SIA_API_PASSWORD=""
|
||||
SIA_DATA_DIR="/home/user/.sia"
|
||||
SIAD_DATA_DIR="/home/user/siad-upload"
|
||||
SIA_WALLET_PASSWORD=""
|
||||
|
||||
# portal specific environment variables
|
||||
API_PORT="9970"
|
||||
PORTAL_NAME="XXXXX | upload"
|
||||
DISCORD_BOT_TOKEN=""
|
|
@ -0,0 +1,10 @@
|
|||
# siad environment variables
|
||||
SIA_API_PASSWORD=""
|
||||
SIA_DATA_DIR="/home/user/.sia"
|
||||
SIAD_DATA_DIR="/home/user/siad"
|
||||
SIA_WALLET_PASSWORD=""
|
||||
|
||||
# portal specific environment variables
|
||||
API_PORT="9980"
|
||||
PORTAL_NAME="XXXXX | download"
|
||||
DISCORD_BOT_TOKEN=""
|
|
@ -5,8 +5,8 @@ Description=siad-upload
|
|||
Type=simple
|
||||
WorkingDirectory=/home/user/siad-upload
|
||||
EnvironmentFile=/home/user/.sia/sia-upload.env
|
||||
ExecStart=/home/user/go/bin/siad --api-addr "localhost:9970" --rpc-addr ":9971" --host-addr ":9972" --siamux-addr ":9973" --siamux-addr-ws ":9974"
|
||||
ExecStop=/home/user/go/bin/siac --addr "localhost:9970" stop
|
||||
ExecStart=/home/user/go/bin/siad --modules cgtwrf --disable-api-security --api-addr :9970 --rpc-addr :9971 --host-addr :9972 --siamux-addr :9973 --siamux-addr-ws :9974
|
||||
ExecStop=/home/user/go/bin/siac --addr :9970 stop
|
||||
Restart=on-failure
|
||||
SyslogIdentifier=siad-upload
|
||||
LimitNOFILE=10000
|
|
@ -5,8 +5,8 @@ Description=siad
|
|||
Type=simple
|
||||
WorkingDirectory=/home/user/siad
|
||||
EnvironmentFile=/home/user/.sia/sia.env
|
||||
ExecStart=/home/user/go/bin/siad
|
||||
ExecStop=/home/user/go/bin/siac stop
|
||||
ExecStart=/home/user/go/bin/siad --modules cgtwrf --disable-api-security --api-addr :9980
|
||||
ExecStop=/home/user/go/bin/siac --addr :9980 stop
|
||||
Restart=on-failure
|
||||
SyslogIdentifier=siad
|
||||
LimitNOFILE=10000
|
|
@ -2,5 +2,5 @@
|
|||
set -e
|
||||
|
||||
set -o allexport
|
||||
source ~/.sia/sia-upload.env
|
||||
source /home/user/.sia/sia-upload.env
|
||||
set +o allexport
|
|
@ -10,7 +10,7 @@ unbind '"'
|
|||
unbind %
|
||||
|
||||
# reload config file (change file location to your the tmux.conf you want to use)
|
||||
bind r source-file ~/.tmux.conf
|
||||
bind r source-file /home/user/.tmux.conf
|
||||
|
||||
set -g visual-activity off
|
||||
set -g mouse on
|
Reference in New Issue