Compare commits

..

4 Commits

Author SHA1 Message Date
Ivaylo Novakov ae524bc364
Limit downloads to 3 per IP. 2021-12-08 19:19:57 +01:00
Ivaylo Novakov 26831e7a3e
Limit downloads to 5 per IP. 2021-12-08 19:15:23 +01:00
Ivaylo Novakov 1e16caeaeb
Limit downloads to 10 per IP. 2021-12-08 19:15:00 +01:00
Ivaylo Novakov 9d8d31feff
Limit downloads to 20 per IP. 2021-12-08 19:08:18 +01:00
501 changed files with 38870 additions and 1389 deletions

1
.github/CODEOWNERS vendored
View File

@ -1 +0,0 @@
* @kwypchlo @meeh0w

View File

@ -1,6 +1,62 @@
version: 2 version: 2
updates: updates:
- package-ecosystem: npm
directory: "/packages/dashboard"
schedule:
interval: weekly
- package-ecosystem: npm
directory: "/packages/dnslink-api"
schedule:
interval: weekly
- package-ecosystem: npm
directory: "/packages/handshake-api"
schedule:
interval: weekly
- package-ecosystem: npm
directory: "/packages/health-check"
schedule:
interval: weekly
- package-ecosystem: npm
directory: "/packages/website"
schedule:
interval: weekly
- package-ecosystem: docker
directory: "/docker/accounts"
schedule:
interval: weekly
- package-ecosystem: docker
directory: "/docker/caddy"
schedule:
interval: weekly
- package-ecosystem: docker
directory: "/docker/handshake"
schedule:
interval: weekly
- package-ecosystem: docker
directory: "/docker/nginx"
schedule:
interval: weekly
- package-ecosystem: docker - package-ecosystem: docker
directory: "/docker/sia" directory: "/docker/sia"
schedule: schedule:
interval: monthly interval: weekly
- package-ecosystem: docker
directory: "/packages/dashboard"
schedule:
interval: weekly
- package-ecosystem: docker
directory: "/packages/dnslink-api"
schedule:
interval: weekly
- package-ecosystem: docker
directory: "/packages/handshake-api"
schedule:
interval: weekly
- package-ecosystem: docker
directory: "/packages/health-check"
schedule:
interval: weekly
- package-ecosystem: docker
directory: "/packages/website"
schedule:
interval: weekly

71
.github/workflows/codeql-analysis.yml vendored Normal file
View File

@ -0,0 +1,71 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ main ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ main ]
schedule:
- cron: '32 21 * * 0'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'javascript', 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

47
.github/workflows/deploy-website.yml vendored Normal file
View File

@ -0,0 +1,47 @@
name: Deploy website to Skynet
on:
push:
branches:
- master
paths:
- "packages/website/**"
pull_request:
paths:
- "packages/website/**"
defaults:
run:
working-directory: packages/website
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v2
with:
node-version: 16.x
- run: yarn
- run: yarn build
- name: "Integration tests"
uses: cypress-io/github-action@v2
env:
CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
working-directory: packages/website
install: false
record: true
start: yarn serve
wait-on: "http://127.0.0.1:9000"
- name: "Deploy to Skynet"
uses: skynetlabs/deploy-to-skynet-action@v2
with:
upload-dir: packages/website/public
github-token: ${{ secrets.GITHUB_TOKEN }}
registry-seed: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' && secrets.WEBSITE_REGISTRY_SEED || '' }}

View File

@ -0,0 +1,32 @@
# Install and run unit tests with busted
# Docs: http://olivinelabs.com/busted/
name: Nginx Lua Unit Tests
on:
pull_request:
paths:
- "docker/nginx/libs/**.lua"
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: "3.x"
architecture: "x64"
- name: Install Dependencies
run: |
pip install hererocks
hererocks env --lua=5.1 -rlatest
source env/bin/activate
luarocks install busted
- name: Unit Tests
run: |
source env/bin/activate
busted --verbose --pattern=spec --directory=docker/nginx/libs .

View File

@ -1,4 +1,4 @@
name: Lint - Python Scripts name: Python Lint
on: on:
push: push:

View File

@ -0,0 +1,25 @@
name: Static Code Analysis
on: [pull_request]
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
package: [dashboard, dnslink-api, handshake-api, health-check, website]
fail-fast: false
defaults:
run:
working-directory: packages/${{ matrix.package }}
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v2
with:
node-version: 16.x
- run: yarn
- run: yarn prettier --check .

21
.gitignore vendored
View File

@ -53,6 +53,7 @@ typings/
# dotenv environment variable files # dotenv environment variable files
.env* .env*
./docker/kratos/config/kratos.yml
# Mac files # Mac files
.DS_Store .DS_Store
@ -67,6 +68,10 @@ yarn-error.log
# Yarn Integrity file # Yarn Integrity file
.yarn-integrity .yarn-integrity
# Cypress
packages/website/cypress/screenshots
packages/website/cypress/videos
# Docker data # Docker data
docker/data docker/data
@ -82,10 +87,16 @@ __pycache__
/.idea/ /.idea/
/venv* /venv*
# Luacov file # CockroachDB certificates
luacov.stats.out docker/cockroach/certs/*.crt
luacov.report.out docker/cockroach/certs/*.key
docker/kratos/cr_certs/*.crt
docker/kratos/cr_certs/*.key
# Oathkeeper JWKS signing token
docker/kratos/oathkeeper/id_token.jwks.json
/docker/kratos/config/kratos.yml
# Setup-script log files # Setup-script log files
setup-scripts/serverload.log /setup-scripts/serverload.log
setup-scripts/serverload.json /setup-scripts/serverload.json

View File

@ -10,46 +10,6 @@ Version History
Latest: Latest:
## Mar 8, 2022:
### v0.1.4
**Key Updates**
- expose generic skylink serving endpoint on domain aliases
- Add abuse scanner service, activated by adding `u` to `PORTAL_MODULES`
- Add malware scanner service, activated by adding `s` to `PORTAL_MODULES`
- Remove ORY Kratos, ORY Oathkeeper, CockroachDB.
- Add `/serverload` endpoint for CPU usage and free disk space
**Bugs Fixed**
- Add missing servers and blocklist command to the manual blocklist script.
- fixed a bug when accessing file from skylink via subdomain with a filename that had escaped characters
- Fix `blocklist-skylink.sh` script that didn't removed blocked skylink from
nginx cache.
- fixed uploaded directory name (was "undefined" before)
- fixed empty directory upload progress (size was not calculated for directories)
**Other**
- add new critical health check that scans config and makes sure that all relevant configurations are set
- Add abuse report configuration
- Remove hardcoded Airtable default values from blocklist script. Portal
operators need to define their own values in portal common config (LastPass).
- Add health check for the blocker container
- Drop `Skynet-Requested-Skylink` header
- Dump disk space usage when health-checker script disables portal due to
critical free disk space.
- Enable the accounting module for skyd
- Add link to supported setup process in Gitbook.
- Set `min_free` parameter on the `proxy_cache_path` directive to `100g`
- Parameterize MongoDB replicaset in `docker-compose.mongodb.yml` via
`SKYNET_DB_REPLICASET` from `.env` file.
- Hot reload Nginx after pruning cache files.
- Added script to prune nginx cache.
- Remove hardcoded server list from `blocklist-skylink.sh` so it removes server
list duplication and can also be called from Ansible.
- Remove outdated portal setup documentation and point to developer docs.
- Block skylinks in batches to improve performance.
- Add trimming Airtable skylinks from Takedown Request table.
- Update handshake to use v3.0.1
## Oct 18, 2021: ## Oct 18, 2021:
### v0.1.3 ### v0.1.3
**Key Updates** **Key Updates**

187
README.md
View File

@ -1,12 +1,21 @@
# Skynet Portal # Skynet Portal
## Latest Setup Documentation ## Web application
Latest Skynet Webportal setup documentation and the setup process Skynet Labs Change current directory with `cd packages/website`.
supports is located at https://portal-docs.skynetlabs.com/.
Some scripts and setup documentation contained in this repository Use `yarn start` to start the development server.
(`skynet-webportal`) may be outdated and generally should not be used.
Use `yarn build` to compile the application to `/public` directory.
You can use the below build parameters to customize your web application.
- development example `GATSBY_API_URL=https://siasky.dev yarn start`
- production example `GATSBY_API_URL=https://siasky.net yarn build`
List of available parameters:
- `GATSBY_API_URL`: override api url (defaults to location origin)
## License ## License
@ -16,3 +25,171 @@ and distribute the software, but you must preserve the payment mechanism in the
For the purposes of complying with our code license, you can use the following Siacoin address: For the purposes of complying with our code license, you can use the following Siacoin address:
`fb6c9320bc7e01fbb9cd8d8c3caaa371386928793c736837832e634aaaa484650a3177d6714a` `fb6c9320bc7e01fbb9cd8d8c3caaa371386928793c736837832e634aaaa484650a3177d6714a`
### MongoDB Setup
Mongo needs a couple of extra steps in order to start a secure cluster.
- Open port 27017 on all nodes that will take part in the cluster. Ideally, you would only open the port for the other
nodes in the cluster.
- Manually add a `mgkey` file under `./docker/data/mongo` with the respective secret (
see [Mongo's keyfile access control](https://docs.mongodb.com/manual/tutorial/enforce-keyfile-access-control-in-existing-replica-set/)
for details).
- Manually run an initialisation `docker run` with extra environment variables that will initialise the admin user with
a password (example below).
- During the initialisation run mentioned above, we need to make two extra steps within the container:
- Change the ownership of `mgkey` to `mongodb:mongodb`
- Change its permissions to 400
- After these steps are done we can open a mongo shell on the primary node and run `rs.add()` in order to add the new
node to the cluster. If you don't know which node is primary, log onto any server and jump into the Mongo's container
(`docker exec -it mongo mongo -u admin -p`) and then get the status of the replica set (`rs.status()`).
Example initialisation docker run command:
```
docker run \
--rm \
--name mg \
-p 27017:27017 \
-e MONGO_INITDB_ROOT_USERNAME=<admin username> \
-e MONGO_INITDB_ROOT_PASSWORD=<admin password> \
-v /home/user/skynet-webportal/docker/data/mongo/db:/data/db \
-v /home/user/skynet-webportal/docker/data/mongo/mgkey:/data/mgkey \
mongo --keyFile=/data/mgkey --replSet=skynet
```
Regular docker run command:
```
docker run \
--rm \
--name mg \
-p 27017:27017 \
-v /home/user/skynet-webportal/docker/data/mongo/db:/data/db \
-v /home/user/skynet-webportal/docker/data/mongo/mgkey:/data/mgkey \
mongo --keyFile=/data/mgkey --replSet=skynet
```
Cluster initialisation mongo command:
```
rs.initiate(
{
_id : "skynet",
members: [
{ _id : 0, host : "mongo:27017" }
]
}
)
```
Add more nodes when they are ready:
```
rs.add("second.node.net:27017")
```
### Kratos & Oathkeeper Setup
[Kratos](https://www.ory.sh/kratos) is our user management system of choice and
[Oathkeeper](https://www.ory.sh/oathkeeper) is the identity and access proxy.
Most of the needed config is already under `docker/kratos`. The only two things that need to be changed are the config
for Kratos that might contain you email server password, and the JWKS Oathkeeper uses to sign its JWT tokens.
Make sure to create your own`docker/kratos/config/kratos.yml` by copying the `kratos.yml.sample` in the same directory.
Also make sure to never add that file to source control because it will most probably contain your email password in
plain text!
To override the JWKS you will need to directly edit
`docker/kratos/oathkeeper/id_token.jwks.json` and replace it with your generated key set. If you don't know how to
generate a key set you can use this code:
```go
package main
import (
"encoding/json"
"log"
"os"
"github.com/ory/hydra/jwk"
)
func main() {
gen := jwk.RS256Generator{
KeyLength: 2048,
}
jwks, err := gen.Generate("", "sig")
if err != nil {
log.Fatal(err)
}
jsonbuf, err := json.MarshalIndent(jwks, "", " ")
if err != nil {
log.Fatal("failed to generate JSON: %s", err)
}
os.Stdout.Write(jsonbuf)
}
```
While you can directly put the output of this programme into the file mentioned above, you can also remove the public
key from the set and change the `kid` of the private key to not include the prefix `private:`.
### CockroachDB Setup
Kratos uses CockroachDB to store its data. For that data to be shared across all nodes that comprise your portal cluster
setup, we need to set up a CockroachDB cluster, complete with secure communication.
#### Generate the certificates for secure communication
For a detailed walk-through, please check [this guide](https://www.cockroachlabs.com/docs/v20.2/secure-a-cluster.html)
out.
Steps:
1. Start a local cockroach docker instance:
`docker run -d -v "<local dir>:/cockroach/cockroach-secure" --name=crdb cockroachdb/cockroach start --insecure`
1. Get a shall into that instance: `docker exec -it crdb /bin/bash`
1. Go to the directory we which we mapped to a local dir: `cd /cockroach/cockroach-secure`
1. Create the subdirectories in which to create certificates and keys: `mkdir certs my-safe-directory`
1. Create the CA (Certificate Authority) certificate and key
pair: `cockroach cert create-ca --certs-dir=certs --ca-key=my-safe-directory/ca.key`
1. Create a client certificate and key pair for the root
user: `cockroach cert create-client root --certs-dir=certs --ca-key=my-safe-directory/ca.key`
1. Create the certificate and key pair for your
nodes: `cockroach cert create-node cockroach mynode.siasky.net --certs-dir=certs --ca-key=my-safe-directory/ca.key`.
Don't forget the `cockroach` node name - it's needed by our docker-compose setup. If you want to create certificates
for more nodes, just delete the `node.*` files (after you've finished the next steps for this node!) and re-run the
above command with the new node name.
1. Put the contents of the `certs` folder under `docker/cockroach/certs/*` under your portal's root dir and store the
content of `my-safe-directory` somewhere safe.
1. Put _another copy_ of those certificates under `docker/kratos/cr_certs` and change permissions of the `*.key` files,
so they can be read by anyone (644).
#### Configure your CockroachDB node
Open port 26257 on all nodes that will take part in the cluster. Ideally, you would only open the port for the other
nodes in the cluster.
There is some configuration that needs to be added to your `.env`file, namely:
1. CR_IP - the public IP of your node
1. CR_CLUSTER_NODES - a list of IPs and ports which make up your cluster, e.g.
`95.216.13.185:26257,147.135.37.21:26257,144.76.136.122:26257`. This will be the list of nodes that will make up your
cluster, so make sure those are accurate.
## Contributing
### Testing Your Code
Before pushing your code, you should verify that it will pass our online test suite.
**Cypress Tests**
Verify the Cypress test suite by doing the following:
1. In one terminal screen run `GATSBY_API_URL=https://siasky.net website serve`
1. In a second terminal screen run `yarn cypress run`
## Setting up complete skynet server
A setup guide with installation scripts can be found in [setup-scripts/README.md](./setup-scripts/README.md).

View File

@ -1,43 +1,3 @@
## Mar 8, 2022:
### v0.1.4
**Key Updates**
- expose generic skylink serving endpoint on domain aliases
- Add abuse scanner service, activated by adding `u` to `PORTAL_MODULES`
- Add malware scanner service, activated by adding `s` to `PORTAL_MODULES`
- Remove ORY Kratos, ORY Oathkeeper, CockroachDB.
- Add `/serverload` endpoint for CPU usage and free disk space
**Bugs Fixed**
- Add missing servers and blocklist command to the manual blocklist script.
- fixed a bug when accessing file from skylink via subdomain with a filename that had escaped characters
- Fix `blocklist-skylink.sh` script that didn't removed blocked skylink from
nginx cache.
- fixed uploaded directory name (was "undefined" before)
- fixed empty directory upload progress (size was not calculated for directories)
**Other**
- add new critical health check that scans config and makes sure that all relevant configurations are set
- Add abuse report configuration
- Remove hardcoded Airtable default values from blocklist script. Portal
operators need to define their own values in portal common config (LastPass).
- Add health check for the blocker container
- Drop `Skynet-Requested-Skylink` header
- Dump disk space usage when health-checker script disables portal due to
critical free disk space.
- Enable the accounting module for skyd
- Add link to supported setup process in Gitbook.
- Set `min_free` parameter on the `proxy_cache_path` directive to `100g`
- Parameterize MongoDB replicaset in `docker-compose.mongodb.yml` via
`SKYNET_DB_REPLICASET` from `.env` file.
- Hot reload Nginx after pruning cache files.
- Added script to prune nginx cache.
- Remove hardcoded server list from `blocklist-skylink.sh` so it removes server
list duplication and can also be called from Ansible.
- Remove outdated portal setup documentation and point to developer docs.
- Block skylinks in batches to improve performance.
- Add trimming Airtable skylinks from Takedown Request table.
- Update handshake to use v3.0.1
## Oct 18, 2021: ## Oct 18, 2021:
### v0.1.3 ### v0.1.3
**Key Updates** **Key Updates**

View File

@ -0,0 +1 @@
- Add missing servers and blocklist command to the manual blocklist script.

View File

@ -0,0 +1,2 @@
- Fix `blocklist-skylink.sh` script that didn't removed blocked skylink from
nginx cache.

View File

@ -1,2 +0,0 @@
- Fix `dashboard-v2` Dockerfile context in `docker-compose.accounts.yml` to
avoid Ansible deploy (docker compose build) `permission denied` issues.

View File

@ -1 +0,0 @@
- Fix missing `logs` dir that is required for backup scripts (cron jobs).

View File

@ -0,0 +1,2 @@
- fixed uploaded directory name (was "undefined" before)
- fixed empty directory upload progress (size was not calculated for directories)

View File

@ -0,0 +1 @@
- expose generic skylink serving endpoint on domain aliases

View File

@ -1 +0,0 @@
- Add Pinner service to the portal stack. Activate it by selecting the 'p' module.

View File

@ -0,0 +1 @@
- Add `/serverload` endpoint for CPU usage and free disk space

View File

@ -0,0 +1 @@
- add new critical health check that scans config and makes sure that all relevant configurations are set

View File

@ -0,0 +1 @@
- Add abuse report configuration

View File

@ -0,0 +1,2 @@
- Remove hardcoded Airtable default values from blocklist script. Portal
operators need to define their own values in portal common config (LastPass).

View File

@ -0,0 +1 @@
- Drop `Skynet-Requested-Skylink` header

View File

@ -0,0 +1,2 @@
- Dump disk space usage when health-checker script disables portal due to
critical free disk space.

View File

@ -0,0 +1 @@
- Enable the accounting module for skyd

View File

@ -0,0 +1 @@
- Set `min_free` parameter on the `proxy_cache_path` directive to `100g`

View File

@ -0,0 +1,2 @@
- Parameterize MongoDB replicaset in `docker-compose.mongodb.yml` via
`SKYNET_DB_REPLICASET` from `.env` file.

View File

@ -0,0 +1 @@
- Added script to prune nginx cache.

View File

@ -0,0 +1,2 @@
- Remove hardcoded server list from `blocklist-skylink.sh` so it removes server
list duplication and can also be called from Ansible.

View File

@ -0,0 +1 @@
- Add trimming Airtable skylinks from Takedown Request table.

View File

@ -0,0 +1 @@
- Update handshake to use v3.0.1

44
dc
View File

@ -1,63 +1,37 @@
#!/bin/bash #!/bin/bash
# The dc command is an alias to docker-compose which also scans the current portal configuration (as defined in .env) if [ -f .env ]; then
# and selects the right docker-compose files to include in the operation. You can use the command in the same way you OLD_IFS=$IFS; IFS=$'\n'; for x in `grep -v '^#.*' .env`; do export $x; done; IFS=$OLD_IFS
# would use docker-compose with the only difference being that you don't need to specify compose files. For more
# information you can run `./dc` or `./dc help`.
# get current working directory of this script and prefix all files with it to
# be able to call this script from anywhere and not only root directory of
# skynet-webportal project
cwd="$(dirname -- "$0";)";
# get portal modules configuration from .env file (if defined more than once, the last one is used)
if [[ -f "${cwd}/.env" ]]; then
PORTAL_MODULES=$(grep -e "^PORTAL_MODULES=" ${cwd}/.env | tail -1 | sed "s/PORTAL_MODULES=//")
fi fi
# include base docker compose file # include base docker compose file
COMPOSE_FILES="-f ${cwd}/docker-compose.yml" COMPOSE_FILES="-f docker-compose.yml"
for i in $(seq 1 ${#PORTAL_MODULES}); do for i in $(seq 1 ${#PORTAL_MODULES}); do
# accounts module - alias "a" # accounts module - alias "a"
if [[ ${PORTAL_MODULES:i-1:1} == "a" ]]; then if [[ ${PORTAL_MODULES:i-1:1} == "a" ]]; then
COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.accounts.yml" COMPOSE_FILES+=" -f docker-compose.mongodb.yml -f docker-compose.accounts.yml"
fi fi
# blocker module - alias "b" # blocker module - alias "b"
if [[ ${PORTAL_MODULES:i-1:1} == "b" ]]; then if [[ ${PORTAL_MODULES:i-1:1} == "b" ]]; then
COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.blocker.yml" COMPOSE_FILES+=" -f docker-compose.blocker.yml"
fi fi
# jaeger module - alias "j" # jaeger module - alias "j"
if [[ ${PORTAL_MODULES:i-1:1} == "j" ]]; then if [[ ${PORTAL_MODULES:i-1:1} == "j" ]]; then
COMPOSE_FILES+=" -f ${cwd}/docker-compose.jaeger.yml" COMPOSE_FILES+=" -f docker-compose.jaeger.yml"
fi fi
# malware-scanner module - alias "s" # mongodb module - alias "m". implied by "a"
if [[ ${PORTAL_MODULES:i-1:1} == "s" ]]; then
COMPOSE_FILES+=" -f ${cwd}/docker-compose.blocker.yml -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.malware-scanner.yml"
fi
# mongodb module - alias "m"
if [[ ${PORTAL_MODULES:i-1:1} == "m" ]]; then if [[ ${PORTAL_MODULES:i-1:1} == "m" ]]; then
COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml" COMPOSE_FILES+=" -f docker-compose.mongodb.yml"
fi
# abuse-scanner module - alias "u"
if [[ ${PORTAL_MODULES:i-1:1} == "u" ]]; then
COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.blocker.yml -f ${cwd}/docker-compose.abuse-scanner.yml"
fi
# pinner module - alias "p"
if [[ ${PORTAL_MODULES:i-1:1} == "p" ]]; then
COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.pinner.yml"
fi fi
done done
# override file if exists # override file if exists
if [[ -f docker-compose.override.yml ]]; then if [[ -f docker-compose.override.yml ]]; then
COMPOSE_FILES+=" -f ${cwd}/docker-compose.override.yml" COMPOSE_FILES+=" -f docker-compose.override.yml"
fi fi
docker-compose $COMPOSE_FILES $@ docker-compose $COMPOSE_FILES $@

View File

@ -1,41 +0,0 @@
version: "3.8"
x-logging: &default-logging
driver: json-file
options:
max-size: "10m"
max-file: "3"
services:
abuse-scanner:
# uncomment "build" and comment out "image" to build from sources
# build: https://github.com/SkynetLabs/abuse-scanner.git#main
image: skynetlabs/abuse-scanner:0.4.0
container_name: abuse-scanner
restart: unless-stopped
logging: *default-logging
env_file:
- .env
environment:
- ABUSE_LOG_LEVEL=${ABUSE_LOG_LEVEL}
- ABUSE_MAILADDRESS=${ABUSE_MAILADDRESS}
- ABUSE_MAILBOX=${ABUSE_MAILBOX}
- ABUSE_SPONSOR=${ABUSE_SPONSOR}
- BLOCKER_HOST=10.10.10.110
- BLOCKER_PORT=4000
- EMAIL_SERVER=${EMAIL_SERVER}
- EMAIL_USERNAME=${EMAIL_USERNAME}
- EMAIL_PASSWORD=${EMAIL_PASSWORD}
- SKYNET_DB_HOST=${SKYNET_DB_HOST}
- SKYNET_DB_PORT=${SKYNET_DB_PORT}
- SKYNET_DB_USER=${SKYNET_DB_USER}
- SKYNET_DB_PASS=${SKYNET_DB_PASS}
networks:
shared:
ipv4_address: 10.10.10.120
depends_on:
- mongo
- blocker
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /tmp:/tmp

View File

@ -1,4 +1,4 @@
version: "3.8" version: "3.7"
x-logging: &default-logging x-logging: &default-logging
driver: json-file driver: json-file
@ -10,42 +10,39 @@ services:
nginx: nginx:
environment: environment:
- ACCOUNTS_ENABLED=true - ACCOUNTS_ENABLED=true
- ACCOUNTS_LIMIT_ACCESS=${ACCOUNTS_LIMIT_ACCESS:-authenticated} # default to authenticated access only
depends_on: depends_on:
- accounts - accounts
health-check: health-check:
environment: environment:
- ACCOUNTS_ENABLED=true - ACCOUNTS_ENABLED=true
- ACCOUNTS_LIMIT_ACCESS=${ACCOUNTS_LIMIT_ACCESS:-authenticated} # default to authenticated access only
accounts: accounts:
# uncomment "build" and comment out "image" to build from sources build:
# build: https://github.com/SkynetLabs/skynet-accounts.git#main context: ./docker/accounts
image: skynetlabs/skynet-accounts:1.3.0 dockerfile: Dockerfile
args:
branch: main
container_name: accounts container_name: accounts
restart: unless-stopped restart: unless-stopped
logging: *default-logging logging: *default-logging
env_file: env_file:
- .env - .env
environment: environment:
- ACCOUNTS_EMAIL_URI=${ACCOUNTS_EMAIL_URI} - SKYNET_DB_HOST=${SKYNET_DB_HOST}
- ACCOUNTS_JWKS_FILE=/conf/jwks.json - SKYNET_DB_PORT=${SKYNET_DB_PORT}
- SKYNET_DB_USER=${SKYNET_DB_USER}
- SKYNET_DB_PASS=${SKYNET_DB_PASS}
- COOKIE_DOMAIN=${COOKIE_DOMAIN} - COOKIE_DOMAIN=${COOKIE_DOMAIN}
- COOKIE_HASH_KEY=${COOKIE_HASH_KEY} - COOKIE_HASH_KEY=${COOKIE_HASH_KEY}
- COOKIE_ENC_KEY=${COOKIE_ENC_KEY} - COOKIE_ENC_KEY=${COOKIE_ENC_KEY}
- PORTAL_DOMAIN=${PORTAL_DOMAIN}
- SERVER_DOMAIN=${SERVER_DOMAIN}
- SKYNET_DB_HOST=${SKYNET_DB_HOST:-mongo}
- SKYNET_DB_PORT=${SKYNET_DB_PORT:-27017}
- SKYNET_DB_USER=${SKYNET_DB_USER}
- SKYNET_DB_PASS=${SKYNET_DB_PASS}
- STRIPE_API_KEY=${STRIPE_API_KEY} - STRIPE_API_KEY=${STRIPE_API_KEY}
- STRIPE_WEBHOOK_SECRET=${STRIPE_WEBHOOK_SECRET} - STRIPE_WEBHOOK_SECRET=${STRIPE_WEBHOOK_SECRET}
- SKYNET_ACCOUNTS_LOG_LEVEL=${SKYNET_ACCOUNTS_LOG_LEVEL:-info} - SKYNET_ACCOUNTS_LOG_LEVEL=${SKYNET_ACCOUNTS_LOG_LEVEL}
- KRATOS_ADDR=${KRATOS_ADDR}
- OATHKEEPER_ADDR=${OATHKEEPER_ADDR}
volumes: volumes:
- ./docker/data/accounts:/data - ./docker/accounts/conf:/accounts/conf
- ./docker/accounts/conf:/conf
expose: expose:
- 3000 - 3000
networks: networks:
@ -53,25 +50,107 @@ services:
ipv4_address: 10.10.10.70 ipv4_address: 10.10.10.70
depends_on: depends_on:
- mongo - mongo
- oathkeeper
kratos-migrate:
image: oryd/kratos:v0.5.5-alpha.1
container_name: kratos-migrate
restart: "no"
logging: *default-logging
environment:
- DSN=cockroach://root@cockroach:26257/defaultdb?max_conns=20&max_idle_conns=4&sslmode=verify-full&sslcert=/certs/node.crt&sslkey=/certs/node.key&sslrootcert=/certs/ca.crt
- SQA_OPT_OUT=true
volumes:
- ./docker/kratos/config:/etc/config/kratos
- ./docker/data/cockroach/sqlite:/var/lib/sqlite
- ./docker/kratos/cr_certs:/certs
command: -c /etc/config/kratos/kratos.yml migrate sql -e --yes
networks:
shared:
ipv4_address: 10.10.10.80
depends_on:
- cockroach
kratos:
image: oryd/kratos:v0.5.5-alpha.1
container_name: kratos
restart: unless-stopped
logging: *default-logging
expose:
- 4433 # public
- 4434 # admin
environment:
- DSN=cockroach://root@cockroach:26257/defaultdb?max_conns=20&max_idle_conns=4&sslmode=verify-full&sslcert=/certs/node.crt&sslkey=/certs/node.key&sslrootcert=/certs/ca.crt
- LOG_LEVEL=trace
- SERVE_PUBLIC_BASE_URL=${SKYNET_DASHBOARD_URL}/.ory/kratos/public/
- SQA_OPT_OUT=true
command: serve -c /etc/config/kratos/kratos.yml
volumes:
- ./docker/kratos/config:/etc/config/kratos
- ./docker/data/cockroach/sqlite:/var/lib/sqlite
- ./docker/kratos/cr_certs:/certs
networks:
shared:
ipv4_address: 10.10.10.81
depends_on:
- kratos-migrate
dashboard: dashboard:
# uncomment "build" and comment out "image" to build from sources build:
# build: context: ./packages/dashboard
# context: https://github.com/SkynetLabs/webportal-accounts-dashboard.git#main dockerfile: Dockerfile
# dockerfile: Dockerfile
image: skynetlabs/webportal-accounts-dashboard:2.1.1
container_name: dashboard container_name: dashboard
restart: unless-stopped restart: unless-stopped
logging: *default-logging logging: *default-logging
env_file: env_file:
- .env - .env
volumes: environment:
- ./docker/data/dashboard/.cache:/usr/app/.cache - NEXT_PUBLIC_SKYNET_PORTAL_API=${SKYNET_PORTAL_API}
- ./docker/data/dashboard/public:/usr/app/public - NEXT_PUBLIC_SKYNET_DASHBOARD_URL=${SKYNET_DASHBOARD_URL}
- NEXT_PUBLIC_KRATOS_BROWSER_URL=${SKYNET_DASHBOARD_URL}/.ory/kratos/public
- NEXT_PUBLIC_KRATOS_PUBLIC_URL=http://oathkeeper:4455/.ory/kratos/public
- NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY=${STRIPE_PUBLISHABLE_KEY}
networks: networks:
shared: shared:
ipv4_address: 10.10.10.85 ipv4_address: 10.10.10.85
expose: expose:
- 9000 - 3000
depends_on: depends_on:
- mongo - oathkeeper
oathkeeper:
image: oryd/oathkeeper:v0.38
container_name: oathkeeper
expose:
- 4455
- 4456
command: serve proxy -c "/etc/config/oathkeeper/oathkeeper.yml"
environment:
- LOG_LEVEL=debug
volumes:
- ./docker/kratos/oathkeeper:/etc/config/oathkeeper
restart: on-failure
logging: *default-logging
networks:
shared:
ipv4_address: 10.10.10.83
depends_on:
- kratos
cockroach:
image: cockroachdb/cockroach:v20.2.3
container_name: cockroach
restart: unless-stopped
logging: *default-logging
env_file:
- .env
command: start --advertise-addr=${CR_IP} --join=${CR_CLUSTER_NODES} --certs-dir=/certs --listen-addr=0.0.0.0:26257 --http-addr=0.0.0.0:8080
volumes:
- ./docker/data/cockroach/sqlite:/cockroach/cockroach-data
- ./docker/cockroach/certs:/certs
ports:
- "4080:8080"
- "26257:26257"
networks:
shared:
ipv4_address: 10.10.10.84

View File

@ -1,4 +1,4 @@
version: "3.8" version: "3.7"
x-logging: &default-logging x-logging: &default-logging
driver: json-file driver: json-file
@ -7,27 +7,20 @@ x-logging: &default-logging
max-file: "3" max-file: "3"
services: services:
health-check:
environment:
- BLOCKER_HOST=10.10.10.110
- BLOCKER_PORT=4000
blocker: blocker:
# uncomment "build" and comment out "image" to build from sources build:
# build: https://github.com/SkynetLabs/blocker.git#main context: ./docker/blocker
image: skynetlabs/blocker:0.1.2 dockerfile: Dockerfile
container_name: blocker container_name: blocker
restart: unless-stopped restart: unless-stopped
logging: *default-logging logging: *default-logging
env_file: env_file:
- .env - .env
volumes:
- ./docker/data/nginx/blocker:/data/nginx/blocker
expose: expose:
- 4000 - 4000
networks: networks:
shared: shared:
ipv4_address: 10.10.10.110 ipv4_address: 10.10.10.102
depends_on: depends_on:
- mongo - mongo
- sia - sia

View File

@ -1,4 +1,4 @@
version: "3.8" version: "3.7"
x-logging: &default-logging x-logging: &default-logging
driver: json-file driver: json-file
@ -10,7 +10,7 @@ services:
sia: sia:
environment: environment:
- JAEGER_DISABLED=${JAEGER_DISABLED:-false} # Enable/Disable tracing - JAEGER_DISABLED=${JAEGER_DISABLED:-false} # Enable/Disable tracing
- JAEGER_SERVICE_NAME=${SERVER_DOMAIN:-Skyd} # change to e.g. eu-ger-1 - JAEGER_SERVICE_NAME=${PORTAL_NAME:-Skyd} # change to e.g. eu-ger-1
# Configuration # Configuration
# See https://github.com/jaegertracing/jaeger-client-go#environment-variables # See https://github.com/jaegertracing/jaeger-client-go#environment-variables
# for all options. # for all options.
@ -21,7 +21,7 @@ services:
- JAEGER_REPORTER_LOG_SPANS=false - JAEGER_REPORTER_LOG_SPANS=false
jaeger-agent: jaeger-agent:
image: jaegertracing/jaeger-agent:1.38.1 image: jaegertracing/jaeger-agent
command: command:
[ [
"--reporter.grpc.host-port=jaeger-collector:14250", "--reporter.grpc.host-port=jaeger-collector:14250",
@ -43,7 +43,7 @@ services:
- jaeger-collector - jaeger-collector
jaeger-collector: jaeger-collector:
image: jaegertracing/jaeger-collector:1.38.1 image: jaegertracing/jaeger-collector
entrypoint: /wait_to_start.sh entrypoint: /wait_to_start.sh
container_name: jaeger-collector container_name: jaeger-collector
restart: on-failure restart: on-failure
@ -68,7 +68,7 @@ services:
- elasticsearch - elasticsearch
jaeger-query: jaeger-query:
image: jaegertracing/jaeger-query:1.38.1 image: jaegertracing/jaeger-query
entrypoint: /wait_to_start.sh entrypoint: /wait_to_start.sh
container_name: jaeger-query container_name: jaeger-query
restart: on-failure restart: on-failure
@ -93,7 +93,7 @@ services:
- elasticsearch - elasticsearch
elasticsearch: elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.6 image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2
container_name: elasticsearch container_name: elasticsearch
restart: on-failure restart: on-failure
logging: *default-logging logging: *default-logging

View File

@ -1,46 +0,0 @@
version: "3.8"
x-logging: &default-logging
driver: json-file
options:
max-size: "10m"
max-file: "3"
services:
clamav:
image: clamav/clamav:stable_base
container_name: clamav
restart: on-failure
logging: *default-logging
volumes:
- ./docker/data/clamav/clamav/defs:/var/lib/clamav
- ./docker/clamav/clamd.conf:/etc/clamav/clamd.conf:ro
expose:
- 3310 # NEVER expose this outside of the local network!
networks:
shared:
ipv4_address: 10.10.10.100
malware-scanner:
# uncomment "build" and comment out "image" to build from sources
# build: https://github.com/SkynetLabs/malware-scanner.git#main
image: skynetlabs/malware-scanner:0.1.0
container_name: malware-scanner
restart: unless-stopped
logging: *default-logging
env_file:
- .env
environment:
- CLAMAV_IP=10.10.10.100
- CLAMAV_PORT=3310
- BLOCKER_IP=10.10.10.110
- BLOCKER_PORT=4000
expose:
- 4000
networks:
shared:
ipv4_address: 10.10.10.101
depends_on:
- mongo
- clamav
- blocker

View File

@ -1,4 +1,4 @@
version: "3.8" version: "3.7"
x-logging: &default-logging x-logging: &default-logging
driver: json-file driver: json-file
@ -7,15 +7,9 @@ x-logging: &default-logging
max-file: "3" max-file: "3"
services: services:
sia:
environment:
- MONGODB_URI=mongodb://${SKYNET_DB_HOST}:${SKYNET_DB_PORT}
- MONGODB_USER=${SKYNET_DB_USER}
- MONGODB_PASSWORD=${SKYNET_DB_PASS}
mongo: mongo:
image: mongo:4.4.17 image: mongo:4.4.1
command: --keyFile=/data/mgkey --replSet=${SKYNET_DB_REPLICASET:-skynet} --setParameter ShardingTaskExecutorPoolMinSize=10 command: --keyFile=/data/mgkey --replSet=${SKYNET_DB_REPLICASET:-skynet}
container_name: mongo container_name: mongo
restart: unless-stopped restart: unless-stopped
logging: *default-logging logging: *default-logging

View File

@ -1,30 +0,0 @@
version: "3.8"
x-logging: &default-logging
driver: json-file
options:
max-size: "10m"
max-file: "3"
services:
pinner:
# uncomment "build" and comment out "image" to build from sources
# build: https://github.com/SkynetLabs/pinner.git#main
image: skynetlabs/pinner:0.7.8
container_name: pinner
restart: unless-stopped
logging: *default-logging
env_file:
- .env
volumes:
- ./docker/data/pinner/logs:/logs
environment:
- PINNER_LOG_LEVEL=${PINNER_LOG_LEVEL:-info}
expose:
- 4000
networks:
shared:
ipv4_address: 10.10.10.130
depends_on:
- mongo
- sia

View File

@ -0,0 +1,12 @@
version: "3.7"
services:
nginx:
build:
context: ./docker/nginx
dockerfile: Dockerfile.bionic
args:
RESTY_ADD_PACKAGE_BUILDDEPS: git
RESTY_EVAL_PRE_CONFIGURE: git clone https://github.com/fdintino/nginx-upload-module /tmp/nginx-upload-module
RESTY_CONFIG_OPTIONS_MORE: --add-module=/tmp/nginx-upload-module
RESTY_EVAL_POST_MAKE: /usr/local/openresty/luajit/bin/luarocks install luasocket

View File

@ -1,4 +1,4 @@
version: "3.8" version: "3.7"
x-logging: &default-logging x-logging: &default-logging
driver: json-file driver: json-file
@ -15,23 +15,20 @@ networks:
services: services:
sia: sia:
# uncomment "build" and comment out "image" to build from sources build:
# build: context: ./docker/sia
# context: https://github.com/SkynetLabs/docker-skyd.git#main dockerfile: Dockerfile
# dockerfile: scratch/Dockerfile args:
# args: branch: portal-latest
# branch: master
image: skynetlabs/skyd:1.6.9
command: --disable-api-security --api-addr :9980 --modules gctwra
container_name: sia container_name: sia
restart: unless-stopped restart: unless-stopped
stop_grace_period: 5m
logging: *default-logging logging: *default-logging
environment: environment:
- SKYD_DISK_CACHE_ENABLED=${SKYD_DISK_CACHE_ENABLED:-true} - SIA_MODULES=gctwra
- SKYD_DISK_CACHE_SIZE=${SKYD_DISK_CACHE_SIZE:-53690000000} # 50GB - MONGODB_URI=mongodb://${SKYNET_DB_HOST}:${SKYNET_DB_PORT}
- SKYD_DISK_CACHE_MIN_HITS=${SKYD_DISK_CACHE_MIN_HITS:-3} - MONGODB_USER=${SKYNET_DB_USER}
- SKYD_DISK_CACHE_HIT_PERIOD=${SKYD_DISK_CACHE_HIT_PERIOD:-3600} # 1h - MONGODB_PASSWORD=${SKYNET_DB_PASS}
env_file: env_file:
- .env - .env
volumes: volumes:
@ -42,55 +39,38 @@ services:
expose: expose:
- 9980 - 9980
certbot: caddy:
# replace this image with the image supporting your dns provider from build:
# https://hub.docker.com/r/certbot/certbot and adjust CERTBOT_ARGS env variable context: ./docker/caddy
# note: you will need to authenticate your dns request so consult the plugin docs dockerfile: Dockerfile
# configuration https://eff-certbot.readthedocs.io/en/stable/using.html#dns-plugins container_name: caddy
#
# =================================================================================
# example docker-compose.yml changes required for Cloudflare dns provider:
#
# image: certbot/dns-cloudflare
# environment:
# - CERTBOT_ARGS=--dns-cloudflare --dns-cloudflare-credentials /etc/letsencrypt/cloudflare.ini
#
# create ./docker/data/certbot/cloudflare.ini file with the following content:
# dns_cloudflare_api_token = <api key generated at https://dash.cloudflare.com/profile/api-tokens>
#
# make sure that the file has 0400 permissions with:
# chmod 0400 ./docker/data/certbot/cloudflare.ini
image: certbot/dns-route53:v1.31.0
entrypoint: sh /entrypoint.sh
container_name: certbot
restart: unless-stopped restart: unless-stopped
logging: *default-logging logging: *default-logging
env_file: env_file:
- .env - .env
environment:
- CERTBOT_ARGS=--dns-route53
volumes: volumes:
- ./docker/certbot/entrypoint.sh:/entrypoint.sh - ./docker/data/caddy/data:/data
- ./docker/data/certbot:/etc/letsencrypt - ./docker/data/caddy/config:/config
networks:
shared:
ipv4_address: 10.10.10.20
nginx: nginx:
# uncomment "build" and comment out "image" to build from sources build:
# build: context: ./docker/nginx
# context: https://github.com/SkynetLabs/webportal-nginx.git#main dockerfile: Dockerfile
# dockerfile: Dockerfile
image: skynetlabs/webportal-nginx:1.0.0
container_name: nginx container_name: nginx
restart: unless-stopped restart: unless-stopped
logging: *default-logging logging: *default-logging
env_file: env_file:
- .env - .env
volumes: volumes:
- ./docker/nginx/nginx.conf:/usr/local/openresty/nginx/conf/nginx.conf:ro
- ./docker/data/nginx/cache:/data/nginx/cache - ./docker/data/nginx/cache:/data/nginx/cache
- ./docker/data/nginx/blocker:/data/nginx/blocker
- ./docker/data/nginx/logs:/usr/local/openresty/nginx/logs - ./docker/data/nginx/logs:/usr/local/openresty/nginx/logs
- ./docker/data/nginx/skynet:/data/nginx/skynet:ro - ./docker/data/nginx/skynet:/data/nginx/skynet:ro
- ./docker/data/sia/apipassword:/data/sia/apipassword:ro - ./docker/data/sia/apipassword:/data/sia/apipassword:ro
- ./docker/data/certbot:/etc/letsencrypt - ./docker/data/caddy/data:/data/caddy:ro
networks: networks:
shared: shared:
ipv4_address: 10.10.10.30 ipv4_address: 10.10.10.30
@ -99,22 +79,18 @@ services:
- "80:80" - "80:80"
depends_on: depends_on:
- sia - sia
- caddy
- handshake-api - handshake-api
- dnslink-api - dnslink-api
- website - website
website: website:
# uncomment "build" and comment out "image" to build from sources build:
# build: context: ./packages/website
# context: https://github.com/SkynetLabs/webportal-website.git#main dockerfile: Dockerfile
# dockerfile: Dockerfile
image: skynetlabs/webportal-website:0.2.3
container_name: website container_name: website
restart: unless-stopped restart: unless-stopped
logging: *default-logging logging: *default-logging
volumes:
- ./docker/data/website/.cache:/usr/app/.cache
- ./docker/data/website/.public:/usr/app/public
env_file: env_file:
- .env - .env
networks: networks:
@ -124,11 +100,20 @@ services:
- 9000 - 9000
handshake: handshake:
image: handshakeorg/hsd:4.0.2 build:
command: --chain-migrate=3 --no-wallet --no-auth --compact-tree-on-init --network=main --http-host=0.0.0.0 context: ./docker/handshake
dockerfile: Dockerfile
command: --chain-migrate=2 --wallet-migrate=1
container_name: handshake container_name: handshake
restart: unless-stopped restart: unless-stopped
logging: *default-logging logging: *default-logging
environment:
- HSD_LOG_CONSOLE=false
- HSD_HTTP_HOST=0.0.0.0
- HSD_NETWORK=main
- HSD_PORT=12037
env_file:
- .env
volumes: volumes:
- ./docker/data/handshake/.hsd:/root/.hsd - ./docker/data/handshake/.hsd:/root/.hsd
networks: networks:
@ -138,11 +123,9 @@ services:
- 12037 - 12037
handshake-api: handshake-api:
# uncomment "build" and comment out "image" to build from sources build:
# build: context: ./packages/handshake-api
# context: https://github.com/SkynetLabs/webportal-handshake-api.git#main dockerfile: Dockerfile
# dockerfile: Dockerfile
image: skynetlabs/webportal-handshake-api:0.1.3
container_name: handshake-api container_name: handshake-api
restart: unless-stopped restart: unless-stopped
logging: *default-logging logging: *default-logging
@ -162,11 +145,9 @@ services:
- handshake - handshake
dnslink-api: dnslink-api:
# uncomment "build" and comment out "image" to build from sources build:
# build: context: ./packages/dnslink-api
# context: https://github.com/SkynetLabs/webportal-dnslink-api.git#main dockerfile: Dockerfile
# dockerfile: Dockerfile
image: skynetlabs/webportal-dnslink-api:0.2.1
container_name: dnslink-api container_name: dnslink-api
restart: unless-stopped restart: unless-stopped
logging: *default-logging logging: *default-logging
@ -177,11 +158,9 @@ services:
- 3100 - 3100
health-check: health-check:
# uncomment "build" and comment out "image" to build from sources build:
# build: context: ./packages/health-check
# context: https://github.com/SkynetLabs/webportal-health-check.git#main dockerfile: Dockerfile
# dockerfile: Dockerfile
image: skynetlabs/webportal-health-check:1.0.0
container_name: health-check container_name: health-check
restart: unless-stopped restart: unless-stopped
logging: *default-logging logging: *default-logging
@ -197,3 +176,5 @@ services:
- STATE_DIR=/usr/app/state - STATE_DIR=/usr/app/state
expose: expose:
- 3100 - 3100
depends_on:
- caddy

View File

@ -0,0 +1,22 @@
FROM golang:1.16.7
LABEL maintainer="NebulousLabs <devs@nebulous.tech>"
ENV GOOS linux
ENV GOARCH amd64
ARG branch=main
WORKDIR /root
RUN git clone --single-branch --branch ${branch} https://github.com/SkynetLabs/skynet-accounts.git && \
cd skynet-accounts && \
go mod download && \
make release
ENV SKYNET_DB_HOST="localhost"
ENV SKYNET_DB_PORT="27017"
ENV SKYNET_DB_USER="username"
ENV SKYNET_DB_PASS="password"
ENV SKYNET_ACCOUNTS_PORT=3000
ENTRYPOINT ["skynet-accounts"]

16
docker/blocker/Dockerfile Normal file
View File

@ -0,0 +1,16 @@
FROM golang:1.16.7
LABEL maintainer="NebulousLabs <devs@nebulous.tech>"
ENV GOOS linux
ENV GOARCH amd64
ARG branch=main
WORKDIR /root
RUN git clone --single-branch --branch ${branch} https://github.com/SkynetLabs/blocker.git && \
cd blocker && \
go mod download && \
make release
ENTRYPOINT ["blocker"]

18
docker/caddy/Dockerfile Normal file
View File

@ -0,0 +1,18 @@
FROM caddy:2.4.6-builder AS caddy-builder
# available dns resolvers: https://github.com/caddy-dns
RUN xcaddy build --with github.com/caddy-dns/route53
FROM caddy:2.4.6-alpine
COPY --from=caddy-builder /usr/bin/caddy /usr/bin/caddy
# bash required for mo to work (mo is mustache templating engine - https://github.com/tests-always-included/mo)
RUN apk add --no-cache bash
COPY caddy.json.template mo /etc/caddy/
CMD [ "sh", "-c", \
"/etc/caddy/mo < /etc/caddy/caddy.json.template > /etc/caddy/caddy.json ; \
caddy run --config /etc/caddy/caddy.json" \
]

View File

@ -0,0 +1,38 @@
{
"apps": {
"tls": {
"certificates": {
"automate": [
{{#PORTAL_DOMAIN}}
"{{PORTAL_DOMAIN}}", "*.{{PORTAL_DOMAIN}}", "*.hns.{{PORTAL_DOMAIN}}"
{{/PORTAL_DOMAIN}}
{{#PORTAL_DOMAIN}}{{#SERVER_DOMAIN}},{{/SERVER_DOMAIN}}{{/PORTAL_DOMAIN}}
{{#SERVER_DOMAIN}}
"{{SERVER_DOMAIN}}", "*.{{SERVER_DOMAIN}}", "*.hns.{{SERVER_DOMAIN}}"
{{/SERVER_DOMAIN}}
]
},
"automation": {
"policies": [
{
"issuers": [
{
"module": "acme",
"email": "{{EMAIL_ADDRESS}}",
"challenges": {
"dns": {
"provider": {
"name": "route53"
}
}
}
}
]
}
]
}
}
}
}

1106
docker/caddy/mo Executable file

File diff suppressed because it is too large Load Diff

View File

@ -1,55 +0,0 @@
#!/bin/bash
# Portal domain requires 3 domain certificates:
# - exact portal domain, ie. example.com
# - wildcard subdomain on portal domain, ie. *.example.com
# used for skylinks served from portal subdomain
# - wildcard subdomain on hns portal domain subdomain, ie. *.hns.example.com
# used for resolving handshake domains
DOMAINS=${PORTAL_DOMAIN},*.${PORTAL_DOMAIN},*.hns.${PORTAL_DOMAIN}
# Add server domain when it is not empty and different from portal domain
if [ ! -z "${SERVER_DOMAIN}" ] && [ "${PORTAL_DOMAIN}" != "${SERVER_DOMAIN}" ]; then
# In case where server domain is not covered by portal domain's
# wildcard certificate, add server domain name to domains list.
# - server-001.example.com is covered by *.example.com
# - server-001.servers.example.com or server-001.example-severs.com
# are not covered by any already requested wildcard certificates
#
# The condition checks whether server domain does not match portal domain
# with exactly one level of subdomain (portal domain wildcard cert):
# (start) [anything but the dot] + [dot] + [portal domain] (end)
if ! printf "${SERVER_DOMAIN}" | grep -q -E "^[^\.]+\.${PORTAL_DOMAIN}$"; then
DOMAINS=${DOMAINS},${SERVER_DOMAIN}
fi
# Server domain requires the same set of domain certificates as portal domain.
# Exact server domain case is handled above.
DOMAINS=${DOMAINS},*.${SERVER_DOMAIN},*.hns.${SERVER_DOMAIN}
fi
# The "wait" will prevent an exit from the script while background tasks are
# still active, so we are adding the line below as a method to prevent orphaning
# the background child processe. The trap fires when docker terminates the container.
trap exit TERM
while :; do
# Execute certbot and generate or maintain certificates for given domain string.
# --non-interactive: we are running this as an automation so we cannot be prompted
# --agree-tos: required flag marking agreement with letsencrypt tos
# --cert-name: output directory name
# --email: required for generating certificates, used for communication with CA
# --domains: comma separated list of domains (will generate one bundled SAN cert)
# Use CERTBOT_ARGS env variable to pass any additional arguments, ie --dns-route53
certbot certonly \
--non-interactive --agree-tos --cert-name skynet-portal \
--email ${EMAIL_ADDRESS} --domains ${DOMAINS} ${CERTBOT_ARGS}
# Run a background sleep process that counts down given time
# Certbot docs advise running maintenance process every 12 hours
sleep 12h &
# Await execution until sleep process is finished (it's a background process)
# Syntax explanation: ${!} expands to a pid of last ran process
wait ${!}
done

View File

@ -1,794 +0,0 @@
##
## Example config file for the Clam AV daemon
## Please read the clamd.conf(5) manual before editing this file.
##
# Comment or remove the line below.
# Example
# Uncomment this option to enable logging.
# LogFile must be writable for the user running daemon.
# A full path is required.
# Default: disabled
LogFile /var/log/clamav/clamd.log
# By default the log file is locked for writing - the lock protects against
# running clamd multiple times (if want to run another clamd, please
# copy the configuration file, change the LogFile variable, and run
# the daemon with --config-file option).
# This option disables log file locking.
# Default: no
#LogFileUnlock yes
# Maximum size of the log file.
# Value of 0 disables the limit.
# You may use 'M' or 'm' for megabytes (1M = 1m = 1048576 bytes)
# and 'K' or 'k' for kilobytes (1K = 1k = 1024 bytes). To specify the size
# in bytes just don't use modifiers. If LogFileMaxSize is enabled, log
# rotation (the LogRotate option) will always be enabled.
# Default: 1M
LogFileMaxSize 50M
# Log time with each message.
# Default: no
LogTime yes
# Also log clean files. Useful in debugging but drastically increases the
# log size.
# Default: no
#LogClean yes
# Use system logger (can work together with LogFile).
# Default: no
#LogSyslog yes
# Specify the type of syslog messages - please refer to 'man syslog'
# for facility names.
# Default: LOG_LOCAL6
#LogFacility LOG_MAIL
# Enable verbose logging.
# Default: no
#LogVerbose yes
# Enable log rotation. Always enabled when LogFileMaxSize is enabled.
# Default: no
#LogRotate yes
# Enable Prelude output.
# Default: no
#PreludeEnable yes
#
# Set the name of the analyzer used by prelude-admin.
# Default: ClamAV
#PreludeAnalyzerName ClamAV
# Log additional information about the infected file, such as its
# size and hash, together with the virus name.
#ExtendedDetectionInfo yes
# This option allows you to save a process identifier of the listening
# daemon (main thread).
# This file will be owned by root, as long as clamd was started by root.
# It is recommended that the directory where this file is stored is
# also owned by root to keep other users from tampering with it.
# Default: disabled
PidFile /run/lock/clamd.pid
# Optional path to the global temporary directory.
# Default: system specific (usually /tmp or /var/tmp).
#TemporaryDirectory /var/tmp
# Path to the database directory.
# Default: hardcoded (depends on installation options)
#DatabaseDirectory /var/lib/clamav
# Only load the official signatures published by the ClamAV project.
# Default: no
#OfficialDatabaseOnly no
# The daemon can work in local mode, network mode or both.
# Due to security reasons we recommend the local mode.
# Path to a local socket file the daemon will listen on.
# Default: disabled (must be specified by a user)
LocalSocket /run/clamav/clamd.sock
# Sets the group ownership on the unix socket.
# Default: disabled (the primary group of the user running clamd)
#LocalSocketGroup virusgroup
# Sets the permissions on the unix socket to the specified mode.
# Default: disabled (socket is world accessible)
#LocalSocketMode 660
# Remove stale socket after unclean shutdown.
# Default: yes
#FixStaleSocket yes
# TCP port address.
# Default: no
TCPSocket 3310
# TCP address.
# By default we bind to INADDR_ANY, probably not wise.
# Enable the following to provide some degree of protection
# from the outside world. This option can be specified multiple
# times if you want to listen on multiple IPs. IPv6 is now supported.
# Default: no
TCPAddr 0.0.0.0
# Maximum length the queue of pending connections may grow to.
# Default: 200
#MaxConnectionQueueLength 30
# Clamd uses FTP-like protocol to receive data from remote clients.
# If you are using clamav-milter to balance load between remote clamd daemons
# on firewall servers you may need to tune the options below.
# Close the connection when the data size limit is exceeded.
# The value should match your MTA's limit for a maximum attachment size.
# Default: 25M
StreamMaxLength 100M
# Limit port range.
# Default: 1024
#StreamMinPort 30000
# Default: 2048
#StreamMaxPort 32000
# Maximum number of threads running at the same time.
# Default: 10
#MaxThreads 20
# Waiting for data from a client socket will timeout after this time (seconds).
# Default: 120
#ReadTimeout 300
# This option specifies the time (in seconds) after which clamd should
# timeout if a client doesn't provide any initial command after connecting.
# Default: 30
#CommandReadTimeout 30
# This option specifies how long to wait (in milliseconds) if the send buffer
# is full.
# Keep this value low to prevent clamd hanging.
#
# Default: 500
#SendBufTimeout 200
# Maximum number of queued items (including those being processed by
# MaxThreads threads).
# It is recommended to have this value at least twice MaxThreads if possible.
# WARNING: you shouldn't increase this too much to avoid running out of file
# descriptors, the following condition should hold:
# MaxThreads*MaxRecursion + (MaxQueue - MaxThreads) + 6< RLIMIT_NOFILE (usual
# max is 1024).
#
# Default: 100
#MaxQueue 200
# Waiting for a new job will timeout after this time (seconds).
# Default: 30
#IdleTimeout 60
# Don't scan files and directories matching regex
# This directive can be used multiple times
# Default: scan all
#ExcludePath ^/proc/
#ExcludePath ^/sys/
# Maximum depth directories are scanned at.
# Default: 15
#MaxDirectoryRecursion 20
# Follow directory symlinks.
# Default: no
#FollowDirectorySymlinks yes
# Follow regular file symlinks.
# Default: no
#FollowFileSymlinks yes
# Scan files and directories on other filesystems.
# Default: yes
#CrossFilesystems yes
# Perform a database check.
# Default: 600 (10 min)
#SelfCheck 600
# Enable non-blocking (multi-threaded/concurrent) database reloads.
# This feature will temporarily load a second scanning engine while scanning
# continues using the first engine. Once loaded, the new engine takes over.
# The old engine is removed as soon as all scans using the old engine have
# completed.
# This feature requires more RAM, so this option is provided in case users are
# willing to block scans during reload in exchange for lower RAM requirements.
# Default: yes
ConcurrentDatabaseReload no
# Execute a command when virus is found. In the command string %v will
# be replaced with the virus name and %f will be replaced with the file name.
# Additionally, two environment variables will be defined: $CLAM_VIRUSEVENT_FILENAME
# and $CLAM_VIRUSEVENT_VIRUSNAME.
# Default: no
#VirusEvent /usr/local/bin/send_sms 123456789 "VIRUS ALERT: %v in %f"
# Run as another user (clamd must be started by root for this option to work)
# Default: don't drop privileges
User clamav
# Stop daemon when libclamav reports out of memory condition.
#ExitOnOOM yes
# Don't fork into background.
# Default: no
#Foreground yes
# Enable debug messages in libclamav.
# Default: no
#Debug yes
# Do not remove temporary files (for debug purposes).
# Default: no
#LeaveTemporaryFiles yes
# Permit use of the ALLMATCHSCAN command. If set to no, clamd will reject
# any ALLMATCHSCAN command as invalid.
# Default: yes
#AllowAllMatchScan no
# Detect Possibly Unwanted Applications.
# Default: no
#DetectPUA yes
# Exclude a specific PUA category. This directive can be used multiple times.
# See https://github.com/vrtadmin/clamav-faq/blob/master/faq/faq-pua.md for
# the complete list of PUA categories.
# Default: Load all categories (if DetectPUA is activated)
#ExcludePUA NetTool
#ExcludePUA PWTool
# Only include a specific PUA category. This directive can be used multiple
# times.
# Default: Load all categories (if DetectPUA is activated)
#IncludePUA Spy
#IncludePUA Scanner
#IncludePUA RAT
# This option causes memory or nested map scans to dump the content to disk.
# If you turn on this option, more data is written to disk and is available
# when the LeaveTemporaryFiles option is enabled.
#ForceToDisk yes
# This option allows you to disable the caching feature of the engine. By
# default, the engine will store an MD5 in a cache of any files that are
# not flagged as virus or that hit limits checks. Disabling the cache will
# have a negative performance impact on large scans.
# Default: no
#DisableCache yes
# In some cases (eg. complex malware, exploits in graphic files, and others),
# ClamAV uses special algorithms to detect abnormal patterns and behaviors that
# may be malicious. This option enables alerting on such heuristically
# detected potential threats.
# Default: yes
#HeuristicAlerts yes
# Allow heuristic alerts to take precedence.
# When enabled, if a heuristic scan (such as phishingScan) detects
# a possible virus/phish it will stop scan immediately. Recommended, saves CPU
# scan-time.
# When disabled, virus/phish detected by heuristic scans will be reported only
# at the end of a scan. If an archive contains both a heuristically detected
# virus/phish, and a real malware, the real malware will be reported
#
# Keep this disabled if you intend to handle "Heuristics.*" viruses
# differently from "real" malware.
# If a non-heuristically-detected virus (signature-based) is found first,
# the scan is interrupted immediately, regardless of this config option.
#
# Default: no
#HeuristicScanPrecedence yes
##
## Heuristic Alerts
##
# With this option clamav will try to detect broken executables (both PE and
# ELF) and alert on them with the Broken.Executable heuristic signature.
# Default: no
#AlertBrokenExecutables yes
# With this option clamav will try to detect broken media file (JPEG,
# TIFF, PNG, GIF) and alert on them with a Broken.Media heuristic signature.
# Default: no
#AlertBrokenMedia yes
# Alert on encrypted archives _and_ documents with heuristic signature
# (encrypted .zip, .7zip, .rar, .pdf).
# Default: no
#AlertEncrypted yes
# Alert on encrypted archives with heuristic signature (encrypted .zip, .7zip,
# .rar).
# Default: no
#AlertEncryptedArchive yes
# Alert on encrypted archives with heuristic signature (encrypted .pdf).
# Default: no
#AlertEncryptedDoc yes
# With this option enabled OLE2 files containing VBA macros, which were not
# detected by signatures will be marked as "Heuristics.OLE2.ContainsMacros".
# Default: no
#AlertOLE2Macros yes
# Alert on SSL mismatches in URLs, even if the URL isn't in the database.
# This can lead to false positives.
# Default: no
#AlertPhishingSSLMismatch yes
# Alert on cloaked URLs, even if URL isn't in database.
# This can lead to false positives.
# Default: no
#AlertPhishingCloak yes
# Alert on raw DMG image files containing partition intersections
# Default: no
#AlertPartitionIntersection yes
##
## Executable files
##
# PE stands for Portable Executable - it's an executable file format used
# in all 32 and 64-bit versions of Windows operating systems. This option
# allows ClamAV to perform a deeper analysis of executable files and it's also
# required for decompression of popular executable packers such as UPX, FSG,
# and Petite. If you turn off this option, the original files will still be
# scanned, but without additional processing.
# Default: yes
#ScanPE yes
# Certain PE files contain an authenticode signature. By default, we check
# the signature chain in the PE file against a database of trusted and
# revoked certificates if the file being scanned is marked as a virus.
# If any certificate in the chain validates against any trusted root, but
# does not match any revoked certificate, the file is marked as trusted.
# If the file does match a revoked certificate, the file is marked as virus.
# The following setting completely turns off authenticode verification.
# Default: no
#DisableCertCheck yes
# Executable and Linking Format is a standard format for UN*X executables.
# This option allows you to control the scanning of ELF files.
# If you turn off this option, the original files will still be scanned, but
# without additional processing.
# Default: yes
#ScanELF yes
##
## Documents
##
# This option enables scanning of OLE2 files, such as Microsoft Office
# documents and .msi files.
# If you turn off this option, the original files will still be scanned, but
# without additional processing.
# Default: yes
#ScanOLE2 yes
# This option enables scanning within PDF files.
# If you turn off this option, the original files will still be scanned, but
# without decoding and additional processing.
# Default: yes
#ScanPDF yes
# This option enables scanning within SWF files.
# If you turn off this option, the original files will still be scanned, but
# without decoding and additional processing.
# Default: yes
#ScanSWF yes
# This option enables scanning xml-based document files supported by libclamav.
# If you turn off this option, the original files will still be scanned, but
# without additional processing.
# Default: yes
#ScanXMLDOCS yes
# This option enables scanning of HWP3 files.
# If you turn off this option, the original files will still be scanned, but
# without additional processing.
# Default: yes
#ScanHWP3 yes
##
## Mail files
##
# Enable internal e-mail scanner.
# If you turn off this option, the original files will still be scanned, but
# without parsing individual messages/attachments.
# Default: yes
#ScanMail yes
# Scan RFC1341 messages split over many emails.
# You will need to periodically clean up $TemporaryDirectory/clamav-partial
# directory.
# WARNING: This option may open your system to a DoS attack.
# Never use it on loaded servers.
# Default: no
#ScanPartialMessages yes
# With this option enabled ClamAV will try to detect phishing attempts by using
# HTML.Phishing and Email.Phishing NDB signatures.
# Default: yes
#PhishingSignatures no
# With this option enabled ClamAV will try to detect phishing attempts by
# analyzing URLs found in emails using WDB and PDB signature databases.
# Default: yes
#PhishingScanURLs no
##
## Data Loss Prevention (DLP)
##
# Enable the DLP module
# Default: No
#StructuredDataDetection yes
# This option sets the lowest number of Credit Card numbers found in a file
# to generate a detect.
# Default: 3
#StructuredMinCreditCardCount 5
# With this option enabled the DLP module will search for valid Credit Card
# numbers only. Debit and Private Label cards will not be searched.
# Default: no
#StructuredCCOnly yes
# This option sets the lowest number of Social Security Numbers found
# in a file to generate a detect.
# Default: 3
#StructuredMinSSNCount 5
# With this option enabled the DLP module will search for valid
# SSNs formatted as xxx-yy-zzzz
# Default: yes
#StructuredSSNFormatNormal yes
# With this option enabled the DLP module will search for valid
# SSNs formatted as xxxyyzzzz
# Default: no
#StructuredSSNFormatStripped yes
##
## HTML
##
# Perform HTML normalisation and decryption of MS Script Encoder code.
# Default: yes
# If you turn off this option, the original files will still be scanned, but
# without additional processing.
#ScanHTML yes
##
## Archives
##
# ClamAV can scan within archives and compressed files.
# If you turn off this option, the original files will still be scanned, but
# without unpacking and additional processing.
# Default: yes
#ScanArchive yes
##
## Limits
##
# The options below protect your system against Denial of Service attacks
# using archive bombs.
# This option sets the maximum amount of time to a scan may take.
# In this version, this field only affects the scan time of ZIP archives.
# Value of 0 disables the limit.
# Note: disabling this limit or setting it too high may result allow scanning
# of certain files to lock up the scanning process/threads resulting in a
# Denial of Service.
# Time is in milliseconds.
# Default: 120000
MaxScanTime 300000
# This option sets the maximum amount of data to be scanned for each input
# file. Archives and other containers are recursively extracted and scanned
# up to this value.
# Value of 0 disables the limit
# Note: disabling this limit or setting it too high may result in severe damage
# to the system.
# Default: 100M
MaxScanSize 1024M
# Files larger than this limit won't be scanned. Affects the input file itself
# as well as files contained inside it (when the input file is an archive, a
# document or some other kind of container).
# Value of 0 disables the limit.
# Note: disabling this limit or setting it too high may result in severe damage
# to the system.
# Technical design limitations prevent ClamAV from scanning files greater than
# 2 GB at this time.
# Default: 25M
MaxFileSize 1024M
# Nested archives are scanned recursively, e.g. if a Zip archive contains a RAR
# file, all files within it will also be scanned. This options specifies how
# deeply the process should be continued.
# Note: setting this limit too high may result in severe damage to the system.
# Default: 17
#MaxRecursion 10
# Number of files to be scanned within an archive, a document, or any other
# container file.
# Value of 0 disables the limit.
# Note: disabling this limit or setting it too high may result in severe damage
# to the system.
# Default: 10000
#MaxFiles 15000
# Maximum size of a file to check for embedded PE. Files larger than this value
# will skip the additional analysis step.
# Note: disabling this limit or setting it too high may result in severe damage
# to the system.
# Default: 10M
#MaxEmbeddedPE 10M
# Maximum size of a HTML file to normalize. HTML files larger than this value
# will not be normalized or scanned.
# Note: disabling this limit or setting it too high may result in severe damage
# to the system.
# Default: 10M
#MaxHTMLNormalize 10M
# Maximum size of a normalized HTML file to scan. HTML files larger than this
# value after normalization will not be scanned.
# Note: disabling this limit or setting it too high may result in severe damage
# to the system.
# Default: 2M
#MaxHTMLNoTags 2M
# Maximum size of a script file to normalize. Script content larger than this
# value will not be normalized or scanned.
# Note: disabling this limit or setting it too high may result in severe damage
# to the system.
# Default: 5M
#MaxScriptNormalize 5M
# Maximum size of a ZIP file to reanalyze type recognition. ZIP files larger
# than this value will skip the step to potentially reanalyze as PE.
# Note: disabling this limit or setting it too high may result in severe damage
# to the system.
# Default: 1M
#MaxZipTypeRcg 1M
# This option sets the maximum number of partitions of a raw disk image to be
# scanned.
# Raw disk images with more partitions than this value will have up to
# the value number partitions scanned. Negative values are not allowed.
# Note: setting this limit too high may result in severe damage or impact
# performance.
# Default: 50
#MaxPartitions 128
# This option sets the maximum number of icons within a PE to be scanned.
# PE files with more icons than this value will have up to the value number
# icons scanned.
# Negative values are not allowed.
# WARNING: setting this limit too high may result in severe damage or impact
# performance.
# Default: 100
#MaxIconsPE 200
# This option sets the maximum recursive calls for HWP3 parsing during
# scanning. HWP3 files using more than this limit will be terminated and
# alert the user.
# Scans will be unable to scan any HWP3 attachments if the recursive limit
# is reached.
# Negative values are not allowed.
# WARNING: setting this limit too high may result in severe damage or impact
# performance.
# Default: 16
#MaxRecHWP3 16
# This option sets the maximum calls to the PCRE match function during
# an instance of regex matching.
# Instances using more than this limit will be terminated and alert the user
# but the scan will continue.
# For more information on match_limit, see the PCRE documentation.
# Negative values are not allowed.
# WARNING: setting this limit too high may severely impact performance.
# Default: 100000
#PCREMatchLimit 20000
# This option sets the maximum recursive calls to the PCRE match function
# during an instance of regex matching.
# Instances using more than this limit will be terminated and alert the user
# but the scan will continue.
# For more information on match_limit_recursion, see the PCRE documentation.
# Negative values are not allowed and values > PCREMatchLimit are superfluous.
# WARNING: setting this limit too high may severely impact performance.
# Default: 2000
#PCRERecMatchLimit 10000
# This option sets the maximum filesize for which PCRE subsigs will be
# executed. Files exceeding this limit will not have PCRE subsigs executed
# unless a subsig is encompassed to a smaller buffer.
# Negative values are not allowed.
# Setting this value to zero disables the limit.
# WARNING: setting this limit too high or disabling it may severely impact
# performance.
# Default: 25M
#PCREMaxFileSize 100M
# When AlertExceedsMax is set, files exceeding the MaxFileSize, MaxScanSize, or
# MaxRecursion limit will be flagged with the virus name starting with
# "Heuristics.Limits.Exceeded".
# Default: no
#AlertExceedsMax yes
##
## On-access Scan Settings
##
# Don't scan files larger than OnAccessMaxFileSize
# Value of 0 disables the limit.
# Default: 5M
#OnAccessMaxFileSize 10M
# Max number of scanning threads to allocate to the OnAccess thread pool at
# startup. These threads are the ones responsible for creating a connection
# with the daemon and kicking off scanning after an event has been processed.
# To prevent clamonacc from consuming all clamd's resources keep this lower
# than clamd's max threads.
# Default: 5
#OnAccessMaxThreads 10
# Max amount of time (in milliseconds) that the OnAccess client should spend
# for every connect, send, and recieve attempt when communicating with clamd
# via curl.
# Default: 5000 (5 seconds)
# OnAccessCurlTimeout 10000
# Toggles dynamic directory determination. Allows for recursively watching
# include paths.
# Default: no
#OnAccessDisableDDD yes
# Set the include paths (all files inside them will be scanned). You can have
# multiple OnAccessIncludePath directives but each directory must be added
# in a separate line.
# Default: disabled
#OnAccessIncludePath /home
#OnAccessIncludePath /students
# Set the exclude paths. All subdirectories are also excluded.
# Default: disabled
#OnAccessExcludePath /home/user
# Modifies fanotify blocking behaviour when handling permission events.
# If off, fanotify will only notify if the file scanned is a virus,
# and not perform any blocking.
# Default: no
#OnAccessPrevention yes
# When using prevention, if this option is turned on, any errors that occur
# during scanning will result in the event attempt being denied. This could
# potentially lead to unwanted system behaviour with certain configurations,
# so the client defaults this to off and prefers allowing access events in
# case of scan or connection error.
# Default: no
#OnAccessDenyOnError yes
# Toggles extra scanning and notifications when a file or directory is
# created or moved.
# Requires the DDD system to kick-off extra scans.
# Default: no
#OnAccessExtraScanning yes
# Set the mount point to be scanned. The mount point specified, or the mount
# point containing the specified directory will be watched. If any directories
# are specified, this option will preempt (disable and ignore all options
# related to) the DDD system. This option will result in verdicts only.
# Note that prevention is explicitly disallowed to prevent common, fatal
# misconfigurations. (e.g. watching "/" with prevention on and no exclusions
# made on vital system directories)
# It can be used multiple times.
# Default: disabled
#OnAccessMountPath /
#OnAccessMountPath /home/user
# With this option you can exclude the root UID (0). Processes run under
# root with be able to access all files without triggering scans or
# permission denied events.
# Note that if clamd cannot check the uid of the process that generated an
# on-access scan event (e.g., because OnAccessPrevention was not enabled, and
# the process already exited), clamd will perform a scan. Thus, setting
# OnAccessExcludeRootUID is not *guaranteed* to prevent every access by the
# root user from triggering a scan (unless OnAccessPrevention is enabled).
# Default: no
#OnAccessExcludeRootUID no
# With this option you can exclude specific UIDs. Processes with these UIDs
# will be able to access all files without triggering scans or permission
# denied events.
# This option can be used multiple times (one per line).
# Using a value of 0 on any line will disable this option entirely.
# To exclude the root UID (0) please enable the OnAccessExcludeRootUID
# option.
# Also note that if clamd cannot check the uid of the process that generated an
# on-access scan event (e.g., because OnAccessPrevention was not enabled, and
# the process already exited), clamd will perform a scan. Thus, setting
# OnAccessExcludeUID is not *guaranteed* to prevent every access by the
# specified uid from triggering a scan (unless OnAccessPrevention is enabled).
# Default: disabled
#OnAccessExcludeUID -1
# This option allows exclusions via user names when using the on-access
# scanning client. It can be used multiple times.
# It has the same potential race condition limitations of the
# OnAccessExcludeUID option.
# Default: disabled
#OnAccessExcludeUname clamav
# Number of times the OnAccess client will retry a failed scan due to
# connection problems (or other issues).
# Default: 0
#OnAccessRetryAttempts 3
##
## Bytecode
##
# With this option enabled ClamAV will load bytecode from the database.
# It is highly recommended you keep this option on, otherwise you'll miss
# detections for many new viruses.
# Default: yes
#Bytecode yes
# Set bytecode security level.
# Possible values:
# None - No security at all, meant for debugging.
# DO NOT USE THIS ON PRODUCTION SYSTEMS.
# This value is only available if clamav was built
# with --enable-debug!
# TrustSigned - Trust bytecode loaded from signed .c[lv]d files, insert
# runtime safety checks for bytecode loaded from other sources.
# Paranoid - Don't trust any bytecode, insert runtime checks for all.
# Recommended: TrustSigned, because bytecode in .cvd files already has these
# checks.
# Note that by default only signed bytecode is loaded, currently you can only
# load unsigned bytecode in --enable-debug mode.
#
# Default: TrustSigned
#BytecodeSecurity TrustSigned
# Allow loading bytecode from outside digitally signed .c[lv]d files.
# **Caution**: You should NEVER run bytecode signatures from untrusted sources.
# Doing so may result in arbitrary code execution.
# Default: no
#BytecodeUnsigned yes
# Set bytecode timeout in milliseconds.
#
# Default: 5000
# BytecodeTimeout 1000

View File

@ -0,0 +1,2 @@
This directory needs to contain all certificates needed by this cockroachdb node. Those can be generated by the steps
outlined in the README in the root directory, under "Setting up CockroachDB".

View File

@ -0,0 +1,12 @@
FROM node:16.13.0-alpine
WORKDIR /opt/hsd
RUN apk update && apk add bash unbound-dev gmp-dev g++ gcc make python2 git
RUN git clone https://github.com/handshake-org/hsd.git /opt/hsd && \
cd /opt/hsd && git checkout v3.0.1 && cd -
RUN npm install --production
ENV PATH="${PATH}:/opt/hsd/bin:/opt/hsd/node_modules/.bin"
ENTRYPOINT ["hsd"]

View File

@ -0,0 +1,31 @@
{
"$id": "https://schemas.ory.sh/presets/kratos/quickstart/email-password/identity.schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Person",
"type": "object",
"properties": {
"traits": {
"type": "object",
"properties": {
"email": {
"type": "string",
"format": "email",
"title": "E-Mail",
"minLength": 3,
"ory.sh/kratos": {
"credentials": {
"password": {
"identifier": true
}
},
"recovery": {
"via": "email"
}
}
}
},
"required": ["email"],
"additionalProperties": true
}
}
}

View File

@ -0,0 +1,86 @@
version: v0.5.5-alpha.1
dsn: memory
serve:
public:
base_url: http://127.0.0.1/
cors:
enabled: true
admin:
base_url: http://127.0.0.1/admin/
selfservice:
default_browser_return_url: http://127.0.0.1/
whitelisted_return_urls:
- http://127.0.0.1/
methods:
password:
enabled: true
flows:
error:
ui_url: http://127.0.0.1/error
settings:
ui_url: http://127.0.0.1/settings
privileged_session_max_age: 15m
recovery:
enabled: true
ui_url: http://127.0.0.1/recovery
verification:
enabled: true
ui_url: http://127.0.0.1/verify
after:
default_browser_return_url: http://127.0.0.1/
logout:
after:
default_browser_return_url: http://127.0.0.1/auth/login
login:
ui_url: http://127.0.0.1/auth/login
lifespan: 10m
registration:
lifespan: 10m
ui_url: http://127.0.0.1/auth/registration
after:
password:
hooks:
- hook: session
log:
level: debug
format: text
leak_sensitive_values: true
password:
max_breaches: 100
secrets:
cookie:
- PLEASE-CHANGE-ME-I-AM-VERY-INSECURE
session:
cookie:
domain: account.siasky.net
lifespan: "720h"
hashers:
argon2:
parallelism: 1
memory: 131072
iterations: 2
salt_length: 16
key_length: 16
identity:
default_schema_url: file:///etc/config/kratos/identity.schema.json
courier:
smtp:
connection_uri: smtps://test:test@mailslurper:1025/?skip_ssl_verify=true

View File

@ -0,0 +1,37 @@
{
"$id": "https://schemas.ory.sh/presets/kratos/quickstart/email-password/identity.schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Person",
"type": "object",
"properties": {
"traits": {
"type": "object",
"properties": {
"email": {
"type": "string",
"format": "email",
"title": "E-Mail",
"minLength": 3,
"ory.sh/kratos": {
"credentials": {
"password": {
"identifier": true
}
},
"verification": {
"via": "email"
},
"recovery": {
"via": "email"
}
}
},
"website": {
"type": "object"
}
},
"required": ["website", "email"],
"additionalProperties": false
}
}
}

View File

@ -0,0 +1,17 @@
local claims = {
email_verified: false
} + std.extVar('claims');
{
identity: {
traits: {
// Allowing unverified email addresses enables account
// enumeration attacks, especially if the value is used for
// e.g. verification or as a password login identifier.
//
// Therefore we only return the email if it (a) exists and (b) is marked verified
// by GitHub.
[if "email" in claims && claims.email_verified then "email" else null]: claims.email,
},
},
}

View File

@ -0,0 +1,7 @@
This directory needs to contain all certificates needed by this cockroachdb node. Those can be generated by the steps
outlined in the README in the root directory, under "Setting up CockroachDB".
The only difference between the files here and those under
`docker/cockroach/certs` is that the files here need to be readable by anyone, while the files under `cockroach` need to
have their original access rights
(all \*.key files should be 600 instead of 644 there).

View File

@ -0,0 +1,116 @@
- id: "ory:kratos:public"
upstream:
preserve_host: true
url: "http://kratos:4433"
strip_path: /.ory/kratos/public
match:
url: "http://oathkeeper:4455/.ory/kratos/public/<**>"
methods:
- GET
- POST
- PUT
- DELETE
- PATCH
authenticators:
- handler: noop
authorizer:
handler: allow
mutators:
- handler: noop
- id: "dashboard:anonymous"
upstream:
preserve_host: true
url: "http://dashboard:3000"
match:
url: "http://oathkeeper:4455/<{_next/**,auth/**,recovery,verify,error,favicon.ico}{/,}>"
methods:
- GET
authenticators:
- handler: anonymous
authorizer:
handler: allow
mutators:
- handler: noop
- id: "dashboard:protected"
upstream:
preserve_host: true
url: "http://dashboard:3000"
match:
url: "http://oathkeeper:4455/<{,api/**,settings,uploads,downloads,payments}>"
methods:
- GET
- POST
- PUT
- DELETE
- PATCH
authenticators:
- handler: cookie_session
authorizer:
handler: allow
mutators:
- handler: id_token
- handler: header
config:
headers:
X-User: "{{ print .Subject }}"
errors:
- handler: redirect
config:
to: http://127.0.0.1/auth/login
- id: "accounts:anonymous"
upstream:
preserve_host: true
url: "http://accounts:3000"
match:
url: "http://oathkeeper<{,:4455}>/<{health,stripe/prices,stripe/webhook}>"
methods:
- GET
- POST
authenticators:
- handler: anonymous
authorizer:
handler: allow
mutators:
- handler: noop
- id: "accounts:public"
upstream:
preserve_host: true
url: "http://accounts:3000"
match:
url: "http://oathkeeper<{,:4455}>/<{user/limits}>"
methods:
- GET
authenticators:
- handler: cookie_session
- handler: noop
authorizer:
handler: allow
mutators:
- handler: id_token
- id: "accounts:protected"
upstream:
preserve_host: true
url: "http://accounts:3000"
match:
url: "http://oathkeeper<{,:4455}>/<{login,logout,user,user/uploads,user/uploads/*,user/downloads,user/stats}>"
methods:
- GET
- POST
- PUT
- DELETE
- PATCH
authenticators:
- handler: cookie_session
authorizer:
handler: allow
mutators:
- handler: id_token
errors:
- handler: redirect
config:
to: http://127.0.0.1/auth/login

View File

@ -0,0 +1,94 @@
log:
level: debug
format: json
serve:
proxy:
cors:
enabled: true
allowed_origins:
- "*"
allowed_methods:
- POST
- GET
- PUT
- PATCH
- DELETE
allowed_headers:
- Authorization
- Content-Type
exposed_headers:
- Content-Type
allow_credentials: true
debug: true
errors:
fallback:
- json
handlers:
redirect:
enabled: true
config:
to: http://127.0.0.1/auth/login
when:
- error:
- unauthorized
- forbidden
request:
header:
accept:
- text/html
json:
enabled: true
config:
verbose: true
access_rules:
matching_strategy: glob
repositories:
- file:///etc/config/oathkeeper/access-rules.yml
authenticators:
anonymous:
enabled: true
config:
subject: guest
cookie_session:
enabled: true
config:
check_session_url: http://kratos:4433/sessions/whoami
preserve_path: true
extra_from: "@this"
subject_from: "identity.id"
only:
- ory_kratos_session
noop:
enabled: true
authorizers:
allow:
enabled: true
mutators:
noop:
enabled: true
header:
enabled: true
config:
headers:
X-User: "{{ print .Subject }}"
id_token:
enabled: true
config:
issuer_url: http://oathkeeper:4455/
jwks_url: file:///etc/config/oathkeeper/id_token.jwks.json
ttl: 720h
claims: |
{
"session": {{ .Extra | toJson }}
}

20
docker/nginx/Dockerfile Normal file
View File

@ -0,0 +1,20 @@
FROM openresty/openresty:1.19.9.1-bionic
RUN luarocks install lua-resty-http && \
openssl req -new -newkey rsa:2048 -days 3650 -nodes -x509 \
-subj '/CN=local-certificate' \
-keyout /etc/ssl/local-certificate.key \
-out /etc/ssl/local-certificate.crt
COPY mo ./
COPY libs /etc/nginx/libs
COPY conf.d /etc/nginx/conf.d
COPY conf.d.templates /etc/nginx/conf.d.templates
CMD [ "bash", "-c", \
"./mo < /etc/nginx/conf.d.templates/server.account.conf > /etc/nginx/conf.d/server.account.conf ; \
./mo < /etc/nginx/conf.d.templates/server.api.conf > /etc/nginx/conf.d/server.api.conf; \
./mo < /etc/nginx/conf.d.templates/server.hns.conf > /etc/nginx/conf.d/server.hns.conf; \
./mo < /etc/nginx/conf.d.templates/server.skylink.conf > /etc/nginx/conf.d/server.skylink.conf ; \
/usr/local/openresty/bin/openresty '-g daemon off;'" \
]

View File

@ -0,0 +1,39 @@
{{#ACCOUNTS_ENABLED}}
{{#PORTAL_DOMAIN}}
server {
server_name account.{{PORTAL_DOMAIN}}; # example: account.siasky.net
include /etc/nginx/conf.d/server/server.http;
}
server {
server_name account.{{PORTAL_DOMAIN}}; # example: account.siasky.net
ssl_certificate /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/wildcard_.{{PORTAL_DOMAIN}}/wildcard_.{{PORTAL_DOMAIN}}.crt;
ssl_certificate_key /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/wildcard_.{{PORTAL_DOMAIN}}/wildcard_.{{PORTAL_DOMAIN}}.key;
include /etc/nginx/conf.d/server/server.account;
}
{{/PORTAL_DOMAIN}}
{{#SERVER_DOMAIN}}
server {
server_name account.{{SERVER_DOMAIN}}; # example: account.eu-ger-1.siasky.net
include /etc/nginx/conf.d/server/server.http;
set_by_lua_block $server_alias { return string.match("{{SERVER_DOMAIN}}", "^([^.]+)") }
}
server {
server_name account.{{SERVER_DOMAIN}}; # example: account.eu-ger-1.siasky.net
ssl_certificate /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/wildcard_.{{SERVER_DOMAIN}}/wildcard_.{{SERVER_DOMAIN}}.crt;
ssl_certificate_key /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/wildcard_.{{SERVER_DOMAIN}}/wildcard_.{{SERVER_DOMAIN}}.key;
include /etc/nginx/conf.d/server/server.account;
set_by_lua_block $server_alias { return string.match("{{SERVER_DOMAIN}}", "^([^.]+)") }
}
{{/SERVER_DOMAIN}}
{{/ACCOUNTS_ENABLED}}

View File

@ -0,0 +1,37 @@
{{#PORTAL_DOMAIN}}
server {
server_name {{PORTAL_DOMAIN}}; # example: siasky.net
include /etc/nginx/conf.d/server/server.http;
}
server {
server_name {{PORTAL_DOMAIN}}; # example: siasky.net
ssl_certificate /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/{{PORTAL_DOMAIN}}/{{PORTAL_DOMAIN}}.crt;
ssl_certificate_key /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/{{PORTAL_DOMAIN}}/{{PORTAL_DOMAIN}}.key;
include /etc/nginx/conf.d/server/server.api;
}
{{/PORTAL_DOMAIN}}
{{#SERVER_DOMAIN}}
server {
server_name {{SERVER_DOMAIN}}; # example: eu-ger-1.siasky.net
include /etc/nginx/conf.d/server/server.http;
set_by_lua_block $server_alias { return string.match("{{SERVER_DOMAIN}}", "^([^.]+)") }
}
server {
server_name {{SERVER_DOMAIN}}; # example: eu-ger-1.siasky.net
ssl_certificate /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/{{SERVER_DOMAIN}}/{{SERVER_DOMAIN}}.crt;
ssl_certificate_key /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/{{SERVER_DOMAIN}}/{{SERVER_DOMAIN}}.key;
include /etc/nginx/conf.d/server/server.api;
set_by_lua_block $server_alias { return string.match("{{SERVER_DOMAIN}}", "^([^.]+)") }
}
{{/SERVER_DOMAIN}}

View File

@ -0,0 +1,39 @@
{{#PORTAL_DOMAIN}}
server {
server_name *.hns.{{PORTAL_DOMAIN}}; # example: *.hns.siasky.net
include /etc/nginx/conf.d/server/server.http;
}
server {
server_name *.hns.{{PORTAL_DOMAIN}}; # example: *.hns.siasky.net
ssl_certificate /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/wildcard_.hns.{{PORTAL_DOMAIN}}/wildcard_.hns.{{PORTAL_DOMAIN}}.crt;
ssl_certificate_key /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/wildcard_.hns.{{PORTAL_DOMAIN}}/wildcard_.hns.{{PORTAL_DOMAIN}}.key;
proxy_set_header Host {{PORTAL_DOMAIN}};
include /etc/nginx/conf.d/server/server.hns;
}
{{/PORTAL_DOMAIN}}
{{#SERVER_DOMAIN}}
server {
server_name *.hns.{{SERVER_DOMAIN}}; # example: *.hns.eu-ger-1.siasky.net
include /etc/nginx/conf.d/server/server.http;
set_by_lua_block $server_alias { return string.match("{{SERVER_DOMAIN}}", "^([^.]+)") }
}
server {
server_name *.hns.{{SERVER_DOMAIN}}; # example: *.hns.eu-ger-1.siasky.net
ssl_certificate /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/wildcard_.hns.{{SERVER_DOMAIN}}/wildcard_.hns.{{SERVER_DOMAIN}}.crt;
ssl_certificate_key /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/wildcard_.hns.{{SERVER_DOMAIN}}/wildcard_.hns.{{SERVER_DOMAIN}}.key;
proxy_set_header Host {{SERVER_DOMAIN}};
include /etc/nginx/conf.d/server/server.hns;
set_by_lua_block $server_alias { return string.match("{{SERVER_DOMAIN}}", "^([^.]+)") }
}
{{/SERVER_DOMAIN}}

View File

@ -0,0 +1,37 @@
{{#PORTAL_DOMAIN}}
server {
server_name *.{{PORTAL_DOMAIN}}; # example: *.siasky.net
include /etc/nginx/conf.d/server/server.http;
}
server {
server_name *.{{PORTAL_DOMAIN}}; # example: *.siasky.net
ssl_certificate /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/wildcard_.{{PORTAL_DOMAIN}}/wildcard_.{{PORTAL_DOMAIN}}.crt;
ssl_certificate_key /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/wildcard_.{{PORTAL_DOMAIN}}/wildcard_.{{PORTAL_DOMAIN}}.key;
include /etc/nginx/conf.d/server/server.skylink;
}
{{/PORTAL_DOMAIN}}
{{#SERVER_DOMAIN}}
server {
server_name *.{{SERVER_DOMAIN}}; # example: *.eu-ger-1.siasky.net
include /etc/nginx/conf.d/server/server.http;
set_by_lua_block $server_alias { return string.match("{{SERVER_DOMAIN}}", "^([^.]+)") }
}
server {
server_name *.{{SERVER_DOMAIN}}; # example: *.eu-ger-1.siasky.net
ssl_certificate /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/wildcard_.{{SERVER_DOMAIN}}/wildcard_.{{SERVER_DOMAIN}}.crt;
ssl_certificate_key /data/caddy/caddy/certificates/acme-v02.api.letsencrypt.org-directory/wildcard_.{{SERVER_DOMAIN}}/wildcard_.{{SERVER_DOMAIN}}.key;
include /etc/nginx/conf.d/server/server.skylink;
set_by_lua_block $server_alias { return string.match("{{SERVER_DOMAIN}}", "^([^.]+)") }
}
{{/SERVER_DOMAIN}}

View File

@ -0,0 +1,8 @@
-----BEGIN DH PARAMETERS-----
MIIBCAKCAQEA//////////+t+FRYortKmq/cViAnPTzx2LnFg84tNpWp4TZBFGQz
+8yTnc4kmz75fS/jY2MMddj2gbICrsRhetPfHtXV/WVhJDP1H18GbtCFY2VVPe0a
87VXE15/V8k1mE8McODmi3fipona8+/och3xWKE2rec1MKzKT0g6eXq8CrGCsyT7
YdEIqUuyyOP7uWrat2DX9GgdT0Kj3jlN9K5W7edjcrsZCwenyO4KbXCeAvzhzffi
7MA0BM0oNC9hkXL+nOmFg/+OTxIy7vKBg8P+OxtMb61zO7X8vC7CIAXFjvGDfRaD
ssbzSibBsu/6iGtCOGEoXJf//////////wIBAg==
-----END DH PARAMETERS-----

View File

@ -0,0 +1,18 @@
# enables gzip compression
gzip on;
# set the gzip compression level (1-9)
gzip_comp_level 6;
# tells proxies to cache both gzipped and regular versions of a resource
gzip_vary on;
# informs NGINX to not compress anything smaller than the defined size
gzip_min_length 256;
# compress data even for clients that are connecting via proxies if a response header includes
# the "expired", "no-cache", "no-store", "private", and "Authorization" parameters
gzip_proxied expired no-cache no-store private auth;
# enables the types of files that can be compressed
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/vnd.ms-fontobject application/x-font-ttf font/opentype image/svg+xml image/x-icon;

View File

@ -0,0 +1,9 @@
if ($request_method = 'OPTIONS') {
include /etc/nginx/conf.d/include/cors-headers;
more_set_headers 'Access-Control-Max-Age: 1728000';
more_set_headers 'Content-Type: text/plain; charset=utf-8';
more_set_headers 'Content-Length: 0';
return 204;
}
include /etc/nginx/conf.d/include/cors-headers;

View File

@ -0,0 +1,5 @@
more_set_headers 'Access-Control-Allow-Origin: $http_origin';
more_set_headers 'Access-Control-Allow-Credentials: true';
more_set_headers 'Access-Control-Allow-Methods: GET, POST, HEAD, OPTIONS, PUT, PATCH, DELETE';
more_set_headers 'Access-Control-Allow-Headers: DNT,User-Agent,X-Requested-With,If-Modified-Since,If-None-Match,Cache-Control,Content-Type,Range,X-HTTP-Method-Override,upload-offset,upload-metadata,upload-length,tus-version,tus-resumable,tus-extension,tus-max-size,upload-concat,location';
more_set_headers 'Access-Control-Expose-Headers: Content-Length,Content-Range,ETag,Skynet-File-Metadata,Skynet-Skylink,Skynet-Proof,Skynet-Portal-Api,Skynet-Server-Api,upload-offset,upload-metadata,upload-length,tus-version,tus-resumable,tus-extension,tus-max-size,upload-concat,location';

View File

@ -0,0 +1,11 @@
# Extract 2 sets of 2 characters from $request_id and assign to $dir1, $dir2
# respectfully. The rest of the $request_id is going to be assigned to $dir3.
# We use those variables to automatically generate a unique path for the uploaded file.
# This ensures that not all uploaded files end up in the same directory, which is something
# that causes performance issues in the renter.
# Example path result: /af/24/9bc5ec894920ccc45634dc9a8065
if ($request_id ~* "(\w{2})(\w{2})(\w+)") {
set $dir1 $1;
set $dir2 $2;
set $dir3 $3;
}

View File

@ -0,0 +1,12 @@
# optional variables initialisation - those variables are used in log_format
# but are not set on every route so we need to initialise them with empty value
# because otherwise logger with throw error
# set only on hns routes
set $hns_domain '';
# set only if server has been access through SERVER_DOMAIN
set $server_alias '';
# expose skylink variable so we can use it in access log
set $skylink '';

View File

@ -0,0 +1,94 @@
include /etc/nginx/conf.d/include/proxy-buffer;
include /etc/nginx/conf.d/include/proxy-pass-internal;
# variable definititions - we need to define a variable to be able to access it in lua by ngx.var.something
set $skylink ''; # placeholder for the raw 46 bit skylink
# resolve handshake domain by requesting to /hnsres endpoint and assign correct values to $skylink and $rest
access_by_lua_block {
local json = require('cjson')
local httpc = require("resty.http").new()
-- make a get request to /hnsres endpoint with the domain name from request_uri
-- 10.10.10.50 points to handshake-api service (alias not available when using resty-http)
local hnsres_res, hnsres_err = httpc:request_uri("http://10.10.10.50:3100/hnsres/" .. ngx.var.hns_domain)
-- print error and exit with 500 or exit with response if status is not 200
if hnsres_err or (hnsres_res and hnsres_res.status ~= ngx.HTTP_OK) then
ngx.status = (hnsres_err and ngx.HTTP_INTERNAL_SERVER_ERROR) or hnsres_res.status
ngx.header["content-type"] = "text/plain"
ngx.say(hnsres_err or hnsres_res.body)
return ngx.exit(ngx.status)
end
-- since /hnsres endpoint response is a json, we need to decode it before we access it
-- example response: '{"skylink":"sia://XABvi7JtJbQSMAcDwnUnmp2FKDPjg8_tTTFP4BwMSxVdEg"}'
local hnsres_json = json.decode(hnsres_res.body)
-- define local variable containing rest of the skylink if provided
local skylink_rest
if hnsres_json.skylink then
-- try to match the skylink with sia:// prefix
skylink, skylink_rest = string.match(hnsres_json.skylink, "sia://([^/?]+)(.*)")
-- in case the skylink did not match, assume that there is no sia:// prefix and try to match again
if skylink == nil then
skylink, skylink_rest = string.match(hnsres_json.skylink, "/?([^/?]+)(.*)")
end
elseif hnsres_json.registry then
local publickey = hnsres_json.registry.publickey
local datakey = hnsres_json.registry.datakey
-- make a get request to /skynet/registry endpoint with the credentials from text record
-- 10.10.10.10 points to sia service (alias not available when using resty-http)
local registry_res, registry_err = httpc:request_uri("http://10.10.10.10:9980/skynet/registry?publickey=" .. publickey .. "&datakey=" .. datakey, {
headers = { ["User-Agent"] = "Sia-Agent" }
})
-- print error and exit with 500 or exit with response if status is not 200
if registry_err or (registry_res and registry_res.status ~= ngx.HTTP_OK) then
ngx.status = (registry_err and ngx.HTTP_INTERNAL_SERVER_ERROR) or registry_res.status
ngx.header["content-type"] = "text/plain"
ngx.say(registry_err or registry_res.body)
return ngx.exit(ngx.status)
end
-- since /skynet/registry endpoint response is a json, we need to decode it before we access it
local registry_json = json.decode(registry_res.body)
-- response will contain a hex encoded skylink, we need to decode it
local data = (registry_json.data:gsub('..', function (cc)
return string.char(tonumber(cc, 16))
end))
skylink = data
end
-- fail with a generic 404 if skylink has not been extracted from a valid /hnsres response for some reason
if not skylink then
return ngx.exit(ngx.HTTP_NOT_FOUND)
end
ngx.var.skylink = skylink
if ngx.var.path == "/" and skylink_rest ~= nil and skylink_rest ~= "" and skylink_rest ~= "/" then
ngx.var.path = skylink_rest
end
}
# we proxy to another nginx location rather than directly to siad because we do not want to deal with caching here
proxy_pass https://127.0.0.1/$skylink$path$is_args$args;
# in case siad returns location header, we need to replace the skylink with the domain name
header_filter_by_lua_block {
ngx.header["Skynet-Portal-Api"] = os.getenv("SKYNET_PORTAL_API")
ngx.header["Skynet-Server-Api"] = os.getenv("SKYNET_SERVER_API")
if ngx.header.location then
-- match location redirect part after the skylink
local path = string.match(ngx.header.location, "[^/?]+(.*)");
-- because siad will set the location header to ie. XABvi7JtJbQSMAcDwnUnmp2FKDPjg8_tTTFP4BwMSxVdEg/index.html
-- we need to replace the skylink with the domain_name so we are not redirected to skylink
ngx.header.location = ngx.var.hns_domain .. path
end
}

View File

@ -0,0 +1,102 @@
include /etc/nginx/conf.d/include/cors;
include /etc/nginx/conf.d/include/proxy-buffer;
include /etc/nginx/conf.d/include/proxy-cache-downloads;
include /etc/nginx/conf.d/include/track-download;
# redirect purge calls to separate location
error_page 462 = @purge;
if ($request_method = PURGE) {
return 462;
}
limit_conn downloads_by_ip 3; # ddos protection: max 100 downloads at a time
# ensure that skylink that we pass around is base64 encoded (transform base32 encoded ones)
# this is important because we want only one format in cache keys and logs
set_by_lua_block $skylink { return require("skynet.skylink").parse(ngx.var.skylink) }
# $skylink_v1 and $skylink_v2 variables default to the same value but in case the requested skylink was:
# a) skylink v1 - it would not matter, no additional logic is executed
# b) skylink v2 - in a lua block below we will resolve the skylink v2 into skylink v1 and update
# $skylink_v1 variable so then the proxy request to skyd can be cached in nginx (proxy_cache_key
# in proxy-cache-downloads includes $skylink_v1 as a part of the cache key)
set $skylink_v1 $skylink;
set $skylink_v2 $skylink;
# variable for Skynet-Proof header that we need to inject
# into a response if the request was for skylink v2
set $skynet_proof '';
# default download rate to unlimited
set $limit_rate 0;
access_by_lua_block {
local httpc = require("resty.http").new()
-- detect whether requested skylink is v2
local isBase32v2 = string.len(ngx.var.skylink) == 55 and string.sub(ngx.var.skylink, 0, 2) == "04"
local isBase64v2 = string.len(ngx.var.skylink) == 46 and string.sub(ngx.var.skylink, 0, 2) == "AQ"
if isBase32v2 or isBase64v2 then
-- 10.10.10.10 points to sia service (alias not available when using resty-http)
local res, err = httpc:request_uri("http://10.10.10.10:9980/skynet/resolve/" .. ngx.var.skylink_v2, {
headers = { ["User-Agent"] = "Sia-Agent" }
})
-- print error and exit with 500 or exit with response if status is not 200
if err or (res and res.status ~= ngx.HTTP_OK) then
ngx.status = (err and ngx.HTTP_INTERNAL_SERVER_ERROR) or res.status
ngx.header["content-type"] = "text/plain"
ngx.say(err or res.body)
return ngx.exit(ngx.status)
end
local json = require('cjson')
local resolve = json.decode(res.body)
ngx.var.skylink_v1 = resolve.skylink
ngx.var.skynet_proof = res.headers["Skynet-Proof"]
end
-- this block runs only when accounts are enabled
if os.getenv("ACCOUNTS_ENABLED") ~= "true" then return end
-- 10.10.10.70 points to accounts service (alias not available when using resty-http)
local res, err = httpc:request_uri("http://10.10.10.70:3000/user/limits", {
headers = { ["Cookie"] = "skynet-jwt=" .. ngx.var.skynet_jwt }
})
-- fail gracefully in case /user/limits failed
if err or (res and res.status ~= ngx.HTTP_OK) then
ngx.log(ngx.ERR, "Failed accounts service request /user/limits: ", err or ("[HTTP " .. res.status .. "] " .. res.body))
ngx.var.limit_rate = 2621440 -- (20 * 1024 * 1024 / 8) conservative fallback to 20 mbps in case accounts failed to return limits
elseif res and res.status == ngx.HTTP_OK then
local json = require('cjson')
local limits = json.decode(res.body)
ngx.var.limit_rate = limits.download
end
}
header_filter_by_lua_block {
ngx.header["Skynet-Portal-Api"] = os.getenv("SKYNET_PORTAL_API")
ngx.header["Skynet-Server-Api"] = os.getenv("SKYNET_SERVER_API")
-- not empty skynet_proof means this is a skylink v2 request
-- so we should replace the Skynet-Proof header with the one
-- we got from /skynet/resolve/ endpoint, otherwise we would
-- be serving cached empty v1 skylink Skynet-Proof header
if ngx.var.skynet_proof and ngx.var.skynet_proof ~= "" then
ngx.header["Skynet-Proof"] = ngx.var.skynet_proof
end
}
limit_rate_after 512k;
limit_rate $limit_rate;
proxy_read_timeout 600;
proxy_set_header User-Agent: Sia-Agent;
# in case the requested skylink was v2 and we already resolved it to skylink v1, we are going to pass resolved
# skylink v1 to skyd to save that extra skylink v2 lookup in skyd but in turn, in case skyd returns a redirect
# we need to rewrite the skylink v1 to skylink v2 in the location header with proxy_redirect
proxy_redirect $skylink_v1 $skylink_v2;
proxy_pass http://sia:9980/skynet/skylink/$skylink_v1$path$is_args$args;

View File

@ -0,0 +1,33 @@
include /etc/nginx/conf.d/include/cors;
include /etc/nginx/conf.d/include/sia-auth;
include /etc/nginx/conf.d/include/track-registry;
limit_req zone=registry_access_by_ip burst=600 nodelay;
limit_req zone=registry_access_by_ip_throttled burst=200 nodelay;
proxy_set_header User-Agent: Sia-Agent;
proxy_read_timeout 600; # siad should timeout with 404 after 5 minutes
proxy_pass http://sia:9980/skynet/registry;
access_by_lua_block {
-- this block runs only when accounts are enabled
if os.getenv("ACCOUNTS_ENABLED") ~= "true" then return end
local httpc = require("resty.http").new()
-- 10.10.10.70 points to accounts service (alias not available when using resty-http)
local res, err = httpc:request_uri("http://10.10.10.70:3000/user/limits", {
headers = { ["Cookie"] = "skynet-jwt=" .. ngx.var.skynet_jwt }
})
-- fail gracefully in case /user/limits failed
if err or (res and res.status ~= ngx.HTTP_OK) then
ngx.log(ngx.ERR, "Failed accounts service request /user/limits: ", err or ("[HTTP " .. res.status .. "] " .. res.body))
elseif res and res.status == ngx.HTTP_OK then
local json = require('cjson')
local limits = json.decode(res.body)
if limits.registry > 0 then
ngx.sleep(limits.registry / 1000)
end
end
}

View File

@ -0,0 +1,5 @@
# if you are expecting large headers (ie. Skynet-Skyfile-Metadata), tune these values to your needs
# read more: https://www.getpagespeed.com/server-setup/nginx/tuning-proxy_buffer_size-in-nginx
proxy_buffer_size 4096k;
proxy_buffers 64 256k;
proxy_busy_buffers_size 4096k; # at least as high as proxy_buffer_size

View File

@ -0,0 +1,7 @@
set $nocache 0; # internal variable for bypassing the cache, nginx expects 0/1 for boolean
proxy_cache skynet; # cache name
proxy_cache_key $skylink_v1$path$arg_format$arg_attachment$arg_start$arg_end$http_range; # unique cache key
proxy_cache_min_uses 3; # cache after 3 uses
proxy_cache_valid 200 206 307 308 48h; # keep 200, 206, 307 and 308 responses valid for up to 2 days
proxy_cache_bypass $nocache $cookie_nocache $arg_nocache; # add cache bypass option
add_header X-Proxy-Cache $upstream_cache_status; # add response header to indicate cache hits and misses

View File

@ -0,0 +1,10 @@
# ----------------------------------------------------------------
# this file should be included on all locations that proxy_pass to
# another nginx location - internal nginx traffic
# ----------------------------------------------------------------
# increase the timeout on internal nginx proxy_pass locations to a
# value that is significantly higher than expected and let the end
# location handle correct timeout
proxy_read_timeout 30m;
proxy_send_timeout 30m;

View File

@ -0,0 +1,6 @@
# Add a list of IPs here that should be severely rate limited on upload.
# Note that it is possible to add IP ranges as well as the full IP address.
#
# Examples:
# 192.168.0.0/24 1;
# 79.85.222.247 1;

View File

@ -0,0 +1,15 @@
rewrite_by_lua_block {
local b64 = require("ngx.base64")
-- open apipassword file for reading (b flag is required for some reason)
-- (file /etc/.sia/apipassword has to be mounted from the host system)
local apipassword_file = io.open("/data/sia/apipassword", "rb")
-- read apipassword file contents and trim newline (important)
local apipassword = apipassword_file:read("*all"):gsub("%s+", "")
-- make sure to close file after reading the password
apipassword_file.close()
-- encode the user:password authorization string
-- (in our case user is empty so it is just :password)
local content = b64.encode_base64url(":" .. apipassword)
-- set authorization header with proper base64 encoded string
ngx.req.set_header("Authorization", "Basic " .. content)
}

View File

@ -0,0 +1,13 @@
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1d&hsts=false&ocsp=false&guideline=5.6
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
# curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam
ssl_dhparam /etc/nginx/conf.d/dhparam.pem;
# intermediate configuration
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;

View File

@ -0,0 +1,27 @@
# register the download in accounts service (cookies should contain jwt)
log_by_lua_block {
-- this block runs only when accounts are enabled
if os.getenv("ACCOUNTS_ENABLED") ~= "true" then return end
local function track(premature, skylink, status, body_bytes_sent, jwt)
if premature then return end
local httpc = require("resty.http").new()
local query = table.concat({ "status=" .. status, "bytes=" .. body_bytes_sent }, "&")
-- 10.10.10.70 points to accounts service (alias not available when using resty-http)
local res, err = httpc:request_uri("http://10.10.10.70:3000/track/download/" .. skylink .. "?" .. query, {
method = "POST",
headers = { ["Cookie"] = "skynet-jwt=" .. jwt },
})
if err or (res and res.status ~= ngx.HTTP_NO_CONTENT) then
ngx.log(ngx.ERR, "Failed accounts service request /track/download/" .. skylink .. ": ", err or ("[HTTP " .. res.status .. "] " .. res.body))
end
end
if ngx.header["Skynet-Skylink"] and ngx.var.skynet_jwt ~= "" and ngx.status >= ngx.HTTP_OK and ngx.status < ngx.HTTP_SPECIAL_RESPONSE then
local ok, err = ngx.timer.at(0, track, ngx.header["Skynet-Skylink"], ngx.status, ngx.var.body_bytes_sent, ngx.var.skynet_jwt)
if err then ngx.log(ngx.ERR, "Failed to create timer: ", err) end
end
}

View File

@ -0,0 +1,27 @@
# register the registry access in accounts service (cookies should contain jwt)
log_by_lua_block {
-- this block runs only when accounts are enabled
if os.getenv("ACCOUNTS_ENABLED") ~= "true" then return end
local function track(premature, request_method, jwt)
if premature then return end
local httpc = require("resty.http").new()
local method = request_method == "GET" and "read" or "write"
-- 10.10.10.70 points to accounts service (alias not available when using resty-http)
local res, err = httpc:request_uri("http://10.10.10.70:3000/track/registry/" .. method, {
method = "POST",
headers = { ["Cookie"] = "skynet-jwt=" .. jwt },
})
if err or (res and res.status ~= ngx.HTTP_NO_CONTENT) then
ngx.log(ngx.ERR, "Failed accounts service request /track/registry/" .. method .. ": ", err or ("[HTTP " .. res.status .. "] " .. res.body))
end
end
if ngx.var.skynet_jwt ~= "" and (ngx.status == ngx.HTTP_OK or ngx.status == ngx.HTTP_NOT_FOUND) then
local ok, err = ngx.timer.at(0, track, ngx.req.get_method(), ngx.var.skynet_jwt)
if err then ngx.log(ngx.ERR, "Failed to create timer: ", err) end
end
}

View File

@ -0,0 +1,26 @@
# register the upload in accounts service (cookies should contain jwt)
log_by_lua_block {
-- this block runs only when accounts are enabled
if os.getenv("ACCOUNTS_ENABLED") ~= "true" then return end
local function track(premature, skylink, jwt)
if premature then return end
local httpc = require("resty.http").new()
-- 10.10.10.70 points to accounts service (alias not available when using resty-http)
local res, err = httpc:request_uri("http://10.10.10.70:3000/track/upload/" .. skylink, {
method = "POST",
headers = { ["Cookie"] = "skynet-jwt=" .. jwt },
})
if err or (res and res.status ~= ngx.HTTP_NO_CONTENT) then
ngx.log(ngx.ERR, "Failed accounts service request /track/upload/" .. skylink .. ": ", err or ("[HTTP " .. res.status .. "] " .. res.body))
end
end
if ngx.header["Skynet-Skylink"] and ngx.var.skynet_jwt ~= "" then
local ok, err = ngx.timer.at(0, track, ngx.header["Skynet-Skylink"], ngx.var.skynet_jwt)
if err then ngx.log(ngx.ERR, "Failed to create timer: ", err) end
end
}

View File

@ -0,0 +1,68 @@
-- Tit Petric, Monotek d.o.o., Tue 03 Jan 2017 06:54:56 PM CET
--
-- Delete nginx cached assets with a PURGE request against an endpoint
-- supports extended regular expression PURGE requests (/upload/.*)
--
-- https://scene-si.org/2017/01/08/improving-nginx-lua-cache-purge/
--
function file_exists(name)
local f = io.open(name, "r")
if f~=nil then io.close(f) return true else return false end
end
function explode(d, p)
local t, ll
t={}
ll=0
if(#p == 1) then return {p} end
while true do
l=string.find(p, d, ll, true) -- find the next d in the string
if l~=nil then -- if "not not" found then..
table.insert(t, string.sub(p, ll, l-1)) -- Save it in our array.
ll=l+1 -- save just after where we found it for searching next time.
else
table.insert(t, string.sub(p, ll)) -- Save what's left in our array.
break -- Break at end, as it should be, according to the lua manual.
end
end
return t
end
function purge(filename)
if (file_exists(filename)) then
os.remove(filename)
end
end
function trim(s)
return (string.gsub(s, "^%s*(.-)%s*$", "%1"))
end
function exec(cmd)
local handle = io.popen(cmd)
local result = handle:read("*all")
handle:close()
return trim(result)
end
function list_files(cache_path, purge_pattern)
local result = exec("/usr/bin/find " .. cache_path .. " -type f | /usr/bin/xargs --no-run-if-empty -n1000 /bin/grep -El -m 1 '^KEY: " .. purge_pattern .. "' 2>&1")
if result == "" then
return {}
end
return explode("\n", result)
end
if ngx ~= nil then
-- list all cached items matching uri
local files = list_files(ngx.var.lua_purge_path, ngx.var.uri)
ngx.header["Content-type"] = "text/plain; charset=utf-8"
ngx.header["X-Purged-Count"] = table.getn(files)
for k, v in pairs(files) do
purge(v)
end
ngx.say("OK")
ngx.exit(ngx.OK)
end

View File

@ -0,0 +1,7 @@
# Every file from within this directory will be included in the server block
# of the nginx configuration, at the very end. See client.conf.
#
# Example:
# location /blog {
# root /var/www/blog;
# }

View File

@ -0,0 +1,18 @@
lua_shared_dict dnslink 10m;
server {
listen 80 default_server;
listen [::]:80 default_server;
include /etc/nginx/conf.d/server/server.dnslink;
}
server {
listen 443 default_server;
listen [::]:443 default_server;
ssl_certificate /etc/ssl/local-certificate.crt;
ssl_certificate_key /etc/ssl/local-certificate.key;
include /etc/nginx/conf.d/server/server.dnslink;
}

View File

@ -0,0 +1,10 @@
listen 443 ssl http2;
listen [::]:443 ssl http2;
include /etc/nginx/conf.d/include/ssl-settings;
include /etc/nginx/conf.d/include/init-optional-variables;
location / {
proxy_redirect http://127.0.0.1/ https://$host/;
proxy_pass http://oathkeeper:4455;
}

View File

@ -0,0 +1,382 @@
listen 443 ssl http2;
listen [::]:443 ssl http2;
include /etc/nginx/conf.d/include/ssl-settings;
include /etc/nginx/conf.d/include/init-optional-variables;
# ddos protection: closing slow connections
client_body_timeout 1h;
client_header_timeout 1h;
send_timeout 1h;
proxy_connect_timeout 1h;
proxy_read_timeout 1h;
proxy_send_timeout 1h;
# Increase the body buffer size, to ensure the internal POSTs can always
# parse the full POST contents into memory.
client_body_buffer_size 128k;
client_max_body_size 128k;
# legacy endpoint rewrite
rewrite ^/portals /skynet/portals permanent;
rewrite ^/stats /skynet/stats permanent;
rewrite ^/skynet/blacklist /skynet/blocklist permanent;
location / {
include /etc/nginx/conf.d/include/cors;
set $skylink "0404dsjvti046fsua4ktor9grrpe76erq9jot9cvopbhsvsu76r4r30";
set $path $uri;
include /etc/nginx/conf.d/include/location-skylink;
proxy_intercept_errors on;
error_page 400 404 490 500 502 503 504 =200 @fallback;
}
location @fallback {
proxy_pass http://website:9000;
}
location /docs {
proxy_pass https://skynetlabs.github.io/skynet-docs;
}
location /skynet/blocklist {
include /etc/nginx/conf.d/include/cors;
proxy_cache skynet;
proxy_cache_valid any 1m; # cache blocklist for 1 minute
proxy_set_header User-Agent: Sia-Agent;
proxy_pass http://sia:9980/skynet/blocklist;
}
location /skynet/portals {
include /etc/nginx/conf.d/include/cors;
proxy_cache skynet;
proxy_cache_valid any 1m; # cache portals for 1 minute
proxy_set_header User-Agent: Sia-Agent;
proxy_pass http://sia:9980/skynet/portals;
}
location /skynet/stats {
include /etc/nginx/conf.d/include/cors;
proxy_cache skynet;
proxy_cache_valid any 1m; # cache stats for 1 minute
proxy_set_header User-Agent: Sia-Agent;
proxy_read_timeout 5m; # extend the read timeout
proxy_pass http://sia:9980/skynet/stats;
}
# Define path for server load endpoint
location /serverload {
# Define root directory in the nginx container to load file from
root /usr/local/share;
# including this because of peer pressure from the other routes
include /etc/nginx/conf.d/include/cors;
# tell nginx to expect json
default_type 'application/json';
# Allow for /serverload to load /serverload.json file
try_files $uri $uri.json =404;
}
location /skynet/health {
include /etc/nginx/conf.d/include/cors;
proxy_cache skynet;
proxy_cache_key $request_uri; # use whole request uri (uri + args) as cache key
proxy_cache_valid any 1m; # cache responses for 1 minute
proxy_set_header User-Agent: Sia-Agent;
proxy_read_timeout 5m; # extend the read timeout
proxy_pass http://sia:9980;
}
location /health-check {
include /etc/nginx/conf.d/include/cors;
access_log off; # do not log traffic to health-check endpoint
proxy_pass http://10.10.10.60:3100; # hardcoded ip because health-check waits for nginx
}
location /abuse/ {
if ($request_method = 'OPTIONS') {
add_header 'Access-Control-Allow-Origin' 'https://0404guluqu38oaqapku91ed11kbhkge55smh9lhjukmlrj37lfpm8no.siasky.net';
add_header 'Access-Control-Allow-Credentials' 'true';
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
# pre-flight info is valid for 20 days
add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain charset=UTF-8';
add_header 'Content-Length' 0;
return 204;
}
proxy_pass http://10.10.10.102:4000/;
}
location /report-abuse {
# TODO: do a proxy_pass
return https://0404guluqu38oaqapku91ed11kbhkge55smh9lhjukmlrj37lfpm8no.siasky.net;
}
location /hns {
# match the request_uri and extract the hns domain and anything that is passed in the uri after it
# example: /hns/something/foo/bar matches:
# > hns_domain: something
# > path: /foo/bar/
set_by_lua_block $hns_domain { return string.match(ngx.var.uri, "/hns/([^/?]+)") }
set_by_lua_block $path { return string.match(ngx.var.uri, "/hns/[^/?]+(.*)") }
proxy_set_header Host $host;
include /etc/nginx/conf.d/include/location-hns;
}
location /hnsres {
include /etc/nginx/conf.d/include/cors;
proxy_pass http://handshake-api:3100;
}
location /skynet/registry {
include /etc/nginx/conf.d/include/location-skynet-registry;
}
location /skynet/restore {
include /etc/nginx/conf.d/include/cors;
include /etc/nginx/conf.d/include/sia-auth;
client_max_body_size 5M;
# increase request timeouts
proxy_read_timeout 600;
proxy_send_timeout 600;
proxy_request_buffering off; # stream uploaded files through the proxy as it comes in
proxy_set_header Expect $http_expect;
proxy_set_header User-Agent: Sia-Agent;
# proxy this call to siad endpoint (make sure the ip is correct)
proxy_pass http://sia:9980;
}
location /skynet/skyfile {
include /etc/nginx/conf.d/include/cors;
include /etc/nginx/conf.d/include/sia-auth;
include /etc/nginx/conf.d/include/track-upload;
include /etc/nginx/conf.d/include/generate-siapath;
limit_req zone=uploads_by_ip burst=10 nodelay;
limit_req zone=uploads_by_ip_throttled;
limit_conn upload_conn 5;
limit_conn upload_conn_rl 1;
client_max_body_size 1000M; # make sure to limit the size of upload to a sane value
# increase request timeouts
proxy_read_timeout 600;
proxy_send_timeout 600;
proxy_request_buffering off; # stream uploaded files through the proxy as it comes in
proxy_set_header Expect $http_expect;
proxy_set_header User-Agent: Sia-Agent;
# access_by_lua_block {
# -- this block runs only when accounts are enabled
# if os.getenv("ACCOUNTS_ENABLED") ~= "true" then return end
# ngx.var.upload_limit_rate = 5 * 1024 * 1024
# local res = ngx.location.capture("/accounts/user", { copy_all_vars = true })
# if res.status == ngx.HTTP_OK then
# local json = require('cjson')
# local user = json.decode(res.body)
# ngx.var.upload_limit_rate = ngx.var.upload_limit_rate * (user.tier + 1)
# end
# }
# proxy this call to siad endpoint (make sure the ip is correct)
proxy_pass http://sia:9980/skynet/skyfile/$dir1/$dir2/$dir3$is_args$args;
}
# endpoint implementing resumable file uploads open protocol https://tus.io
location /skynet/tus {
include /etc/nginx/conf.d/include/cors-headers; # include cors headers but do not overwrite OPTIONS response
include /etc/nginx/conf.d/include/track-upload;
limit_req zone=uploads_by_ip burst=10 nodelay;
limit_req zone=uploads_by_ip_throttled;
limit_conn upload_conn 5;
limit_conn upload_conn_rl 1;
# TUS chunks size is 40M + leaving 10M of breathing room
client_max_body_size 50M;
# Those timeouts need to be elevated since skyd can stall reading
# data for a while when overloaded which would terminate connection
client_body_timeout 1h;
proxy_send_timeout 1h;
# Add X-Forwarded-* headers
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $scheme;
# rewrite proxy request to use correct host uri from env variable (required to return correct location header)
set_by_lua $SKYNET_SERVER_API 'return os.getenv("SKYNET_SERVER_API")';
proxy_redirect $scheme://$host $SKYNET_SERVER_API;
# proxy /skynet/tus requests to siad endpoint with all arguments
proxy_pass http://sia:9980;
# set max upload size dynamically based on account limits
rewrite_by_lua_block {
-- set default limit value to 1 GB
ngx.req.set_header("SkynetMaxUploadSize", 1073741824)
-- this block runs only when accounts are enabled
if os.getenv("ACCOUNTS_ENABLED") ~= "true" then return end
local httpc = require("resty.http").new()
-- fetch account limits and set max upload size accordingly
local res, err = httpc:request_uri("http://10.10.10.70:3000/user/limits", {
headers = { ["Cookie"] = "skynet-jwt=" .. ngx.var.skynet_jwt }
})
-- fail gracefully in case /user/limits failed
if err or (res and res.status ~= ngx.HTTP_OK) then
ngx.log(ngx.ERR, "Failed accounts service request /user/limits: ", err or ("[HTTP " .. res.status .. "] " .. res.body))
elseif res and res.status == ngx.HTTP_OK then
local json = require('cjson')
local limits = json.decode(res.body)
ngx.req.set_header("SkynetMaxUploadSize", limits.maxUploadSize)
end
}
# extract skylink from base64 encoded upload metadata and assign to a proper header
header_filter_by_lua_block {
ngx.header["Skynet-Portal-Api"] = os.getenv("SKYNET_PORTAL_API")
ngx.header["Skynet-Server-Api"] = os.getenv("SKYNET_SERVER_API")
if ngx.header["Upload-Metadata"] then
local encodedSkylink = string.match(ngx.header["Upload-Metadata"], "Skylink ([^,?]+)")
if encodedSkylink then
ngx.header["Skynet-Skylink"] = ngx.decode_base64(encodedSkylink)
end
end
}
}
location /skynet/pin {
include /etc/nginx/conf.d/include/cors;
include /etc/nginx/conf.d/include/sia-auth;
include /etc/nginx/conf.d/include/track-upload;
include /etc/nginx/conf.d/include/generate-siapath;
limit_req zone=uploads_by_ip burst=10 nodelay;
limit_req zone=uploads_by_ip_throttled;
limit_conn upload_conn 5;
limit_conn upload_conn_rl 1;
proxy_set_header User-Agent: Sia-Agent;
proxy_pass http://sia:9980$uri?siapath=$dir1/$dir2/$dir3&$args;
}
location /skynet/metadata {
include /etc/nginx/conf.d/include/cors;
header_filter_by_lua_block {
ngx.header["Skynet-Portal-Api"] = os.getenv("SKYNET_PORTAL_API")
ngx.header["Skynet-Server-Api"] = os.getenv("SKYNET_SERVER_API")
}
proxy_set_header User-Agent: Sia-Agent;
proxy_pass http://sia:9980;
}
location /skynet/resolve {
include /etc/nginx/conf.d/include/cors;
header_filter_by_lua_block {
ngx.header["Skynet-Portal-Api"] = os.getenv("SKYNET_PORTAL_API")
ngx.header["Skynet-Server-Api"] = os.getenv("SKYNET_SERVER_API")
}
proxy_set_header User-Agent: Sia-Agent;
proxy_pass http://sia:9980;
}
location ~ "^/(([a-zA-Z0-9-_]{46}|[a-z0-9]{55})(/.*)?)$" {
set $skylink $2;
set $path $3;
include /etc/nginx/conf.d/include/location-skylink;
}
location ~ "^/file/(([a-zA-Z0-9-_]{46}|[a-z0-9]{55})(/.*)?)$" {
set $skylink $2;
set $path $3;
set $args attachment=true&$args;
#set $is_args ?;
include /etc/nginx/conf.d/include/location-skylink;
}
location @purge {
allow 10.0.0.0/8;
allow 127.0.0.1/32;
allow 172.16.0.0/12;
allow 192.168.0.0/16;
deny all;
set $lua_purge_path "/data/nginx/cache/";
content_by_lua_file /etc/nginx/conf.d/scripts/purge-multi.lua;
}
location /__internal/do/not/use/authenticated {
include /etc/nginx/conf.d/include/cors;
charset utf-8;
charset_types application/json;
default_type application/json;
content_by_lua_block {
local json = require('cjson')
-- this block runs only when accounts are enabled
if os.getenv("ACCOUNTS_ENABLED") ~= "true" then
ngx.say(json.encode{authenticated = false})
return ngx.exit(ngx.HTTP_OK)
end
local httpc = require("resty.http").new()
-- 10.10.10.70 points to accounts service (alias not available when using resty-http)
local res, err = httpc:request_uri("http://10.10.10.70:3000/user", {
headers = { ["Cookie"] = "skynet-jwt=" .. ngx.var.skynet_jwt }
})
-- endpoint /user should return HTTP_OK for authenticated and HTTP_UNAUTHORIZED for not authenticated
if res and (res.status == ngx.HTTP_OK or res.status == ngx.HTTP_UNAUTHORIZED) then
ngx.say(json.encode{authenticated = res.status == ngx.HTTP_OK})
return ngx.exit(ngx.HTTP_OK)
else
ngx.log(ngx.ERR, "Failed accounts service request /user: ", err or ("[HTTP " .. res.status .. "] " .. res.body))
ngx.say(json.encode{authenticated = false})
return ngx.exit(ngx.HTTP_OK)
end
}
}
include /etc/nginx/conf.d/server-override/*;

View File

@ -0,0 +1,46 @@
include /etc/nginx/conf.d/include/init-optional-variables;
location / {
set $skylink "";
set $path $uri;
rewrite_by_lua_block {
local cache = ngx.shared.dnslink
local cache_value = cache:get(ngx.var.host)
if cache_value == nil then
local httpc = require("resty.http").new()
-- 10.10.10.55 points to dnslink-api service (alias not available when using resty-http)
local res, err = httpc:request_uri("http://10.10.10.55:3100/dnslink/" .. ngx.var.host)
if err or (res and res.status ~= ngx.HTTP_OK) then
-- check whether we can fallback to regular skylink request
local match_skylink = ngx.re.match(ngx.var.uri, "^/([a-zA-Z0-9-_]{46}|[a-z0-9]{55})(/.*)?")
if match_skylink then
ngx.var.skylink = match_skylink[1]
ngx.var.path = match_skylink[2] or "/"
else
ngx.status = (err and ngx.HTTP_INTERNAL_SERVER_ERROR) or res.status
ngx.header["content-type"] = "text/plain"
ngx.say(err or res.body)
ngx.exit(ngx.status)
end
else
ngx.var.skylink = res.body
local cache_ttl = 300 -- 5 minutes cache expire time
cache:set(ngx.var.host, ngx.var.skylink, cache_ttl)
end
else
ngx.var.skylink = cache_value
end
ngx.var.skylink = require("skynet.skylink").parse(ngx.var.skylink)
ngx.var.skylink_v1 = ngx.var.skylink
ngx.var.skylink_v2 = ngx.var.skylink
}
include /etc/nginx/conf.d/include/location-skylink;
}

View File

@ -0,0 +1,12 @@
listen 443 ssl http2;
listen [::]:443 ssl http2;
include /etc/nginx/conf.d/include/ssl-settings;
include /etc/nginx/conf.d/include/init-optional-variables;
location / {
set_by_lua_block $hns_domain { return string.match(ngx.var.host, "[^%.]+") }
set $path $uri;
include /etc/nginx/conf.d/include/location-hns;
}

View File

@ -0,0 +1,8 @@
listen 80;
listen [::]:80;
include /etc/nginx/conf.d/include/init-optional-variables;
location / {
return 301 https://$host$request_uri;
}

View File

@ -0,0 +1,12 @@
listen 443 ssl http2;
listen [::]:443 ssl http2;
include /etc/nginx/conf.d/include/ssl-settings;
include /etc/nginx/conf.d/include/init-optional-variables;
location / {
set_by_lua_block $skylink { return string.match(ngx.var.host, "%w+") }
set $path $uri;
include /etc/nginx/conf.d/include/location-skylink;
}

View File

@ -0,0 +1,301 @@
-- source: https://github.com/aiq/basexx
-- license: MIT
-- modified: exposed from_basexx and to_basexx generic functions
--------------------------------------------------------------------------------
-- util functions
--------------------------------------------------------------------------------
local function divide_string( str, max )
local result = {}
local start = 1
for i = 1, #str do
if i % max == 0 then
table.insert( result, str:sub( start, i ) )
start = i + 1
elseif i == #str then
table.insert( result, str:sub( start, i ) )
end
end
return result
end
local function number_to_bit( num, length )
local bits = {}
while num > 0 do
local rest = math.floor( math.fmod( num, 2 ) )
table.insert( bits, rest )
num = ( num - rest ) / 2
end
while #bits < length do
table.insert( bits, "0" )
end
return string.reverse( table.concat( bits ) )
end
local function ignore_set( str, set )
if set then
str = str:gsub( "["..set.."]", "" )
end
return str
end
local function pure_from_bit( str )
return ( str:gsub( '........', function ( cc )
return string.char( tonumber( cc, 2 ) )
end ) )
end
local function unexpected_char_error( str, pos )
local c = string.sub( str, pos, pos )
return string.format( "unexpected character at position %d: '%s'", pos, c )
end
--------------------------------------------------------------------------------
local basexx = {}
--------------------------------------------------------------------------------
-- base2(bitfield) decode and encode function
--------------------------------------------------------------------------------
local bitMap = { o = "0", i = "1", l = "1" }
function basexx.from_bit( str, ignore )
str = ignore_set( str, ignore )
str = string.lower( str )
str = str:gsub( '[ilo]', function( c ) return bitMap[ c ] end )
local pos = string.find( str, "[^01]" )
if pos then return nil, unexpected_char_error( str, pos ) end
return pure_from_bit( str )
end
function basexx.to_bit( str )
return ( str:gsub( '.', function ( c )
local byte = string.byte( c )
local bits = {}
for _ = 1,8 do
table.insert( bits, byte % 2 )
byte = math.floor( byte / 2 )
end
return table.concat( bits ):reverse()
end ) )
end
--------------------------------------------------------------------------------
-- base16(hex) decode and encode function
--------------------------------------------------------------------------------
function basexx.from_hex( str, ignore )
str = ignore_set( str, ignore )
local pos = string.find( str, "[^%x]" )
if pos then return nil, unexpected_char_error( str, pos ) end
return ( str:gsub( '..', function ( cc )
return string.char( tonumber( cc, 16 ) )
end ) )
end
function basexx.to_hex( str )
return ( str:gsub( '.', function ( c )
return string.format('%02X', string.byte( c ) )
end ) )
end
--------------------------------------------------------------------------------
-- generic function to decode and encode base32/base64
--------------------------------------------------------------------------------
function basexx.from_basexx( str, alphabet, bits )
local result = {}
for i = 1, #str do
local c = string.sub( str, i, i )
if c ~= '=' then
local index = string.find( alphabet, c, 1, true )
if not index then
return nil, unexpected_char_error( str, i )
end
table.insert( result, number_to_bit( index - 1, bits ) )
end
end
local value = table.concat( result )
local pad = #value % 8
return pure_from_bit( string.sub( value, 1, #value - pad ) )
end
function basexx.to_basexx( str, alphabet, bits, pad )
local bitString = basexx.to_bit( str )
local chunks = divide_string( bitString, bits )
local result = {}
for _,value in ipairs( chunks ) do
if ( #value < bits ) then
value = value .. string.rep( '0', bits - #value )
end
local pos = tonumber( value, 2 ) + 1
table.insert( result, alphabet:sub( pos, pos ) )
end
table.insert( result, pad )
return table.concat( result )
end
--------------------------------------------------------------------------------
-- rfc 3548: http://www.rfc-editor.org/rfc/rfc3548.txt
--------------------------------------------------------------------------------
local base32Alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"
local base32PadMap = { "", "======", "====", "===", "=" }
function basexx.from_base32( str, ignore )
str = ignore_set( str, ignore )
return basexx.from_basexx( string.upper( str ), base32Alphabet, 5 )
end
function basexx.to_base32( str )
return basexx.to_basexx( str, base32Alphabet, 5, base32PadMap[ #str % 5 + 1 ] )
end
--------------------------------------------------------------------------------
-- crockford: http://www.crockford.com/wrmg/base32.html
--------------------------------------------------------------------------------
local crockfordAlphabet = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
local crockfordMap = { O = "0", I = "1", L = "1" }
function basexx.from_crockford( str, ignore )
str = ignore_set( str, ignore )
str = string.upper( str )
str = str:gsub( '[ILOU]', function( c ) return crockfordMap[ c ] end )
return basexx.from_basexx( str, crockfordAlphabet, 5 )
end
function basexx.to_crockford( str )
return basexx.to_basexx( str, crockfordAlphabet, 5, "" )
end
--------------------------------------------------------------------------------
-- base64 decode and encode function
--------------------------------------------------------------------------------
local base64Alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"..
"abcdefghijklmnopqrstuvwxyz"..
"0123456789+/"
local base64PadMap = { "", "==", "=" }
function basexx.from_base64( str, ignore )
str = ignore_set( str, ignore )
return basexx.from_basexx( str, base64Alphabet, 6 )
end
function basexx.to_base64( str )
return basexx.to_basexx( str, base64Alphabet, 6, base64PadMap[ #str % 3 + 1 ] )
end
--------------------------------------------------------------------------------
-- URL safe base64 decode and encode function
--------------------------------------------------------------------------------
local url64Alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"..
"abcdefghijklmnopqrstuvwxyz"..
"0123456789-_"
function basexx.from_url64( str, ignore )
str = ignore_set( str, ignore )
return basexx.from_basexx( str, url64Alphabet, 6 )
end
function basexx.to_url64( str )
return basexx.to_basexx( str, url64Alphabet, 6, "" )
end
--------------------------------------------------------------------------------
--
--------------------------------------------------------------------------------
local function length_error( len, d )
return string.format( "invalid length: %d - must be a multiple of %d", len, d )
end
local z85Decoder = { 0x00, 0x44, 0x00, 0x54, 0x53, 0x52, 0x48, 0x00,
0x4B, 0x4C, 0x46, 0x41, 0x00, 0x3F, 0x3E, 0x45,
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x40, 0x00, 0x49, 0x42, 0x4A, 0x47,
0x51, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A,
0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32,
0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A,
0x3B, 0x3C, 0x3D, 0x4D, 0x00, 0x4E, 0x43, 0x00,
0x00, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20,
0x21, 0x22, 0x23, 0x4F, 0x00, 0x50, 0x00, 0x00 }
function basexx.from_z85( str, ignore )
str = ignore_set( str, ignore )
if ( #str % 5 ) ~= 0 then
return nil, length_error( #str, 5 )
end
local result = {}
local value = 0
for i = 1, #str do
local index = string.byte( str, i ) - 31
if index < 1 or index >= #z85Decoder then
return nil, unexpected_char_error( str, i )
end
value = ( value * 85 ) + z85Decoder[ index ]
if ( i % 5 ) == 0 then
local divisor = 256 * 256 * 256
while divisor ~= 0 do
local b = math.floor( value / divisor ) % 256
table.insert( result, string.char( b ) )
divisor = math.floor( divisor / 256 )
end
value = 0
end
end
return table.concat( result )
end
local z85Encoder = "0123456789"..
"abcdefghijklmnopqrstuvwxyz"..
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"..
".-:+=^!/*?&<>()[]{}@%$#"
function basexx.to_z85( str )
if ( #str % 4 ) ~= 0 then
return nil, length_error( #str, 4 )
end
local result = {}
local value = 0
for i = 1, #str do
local b = string.byte( str, i )
value = ( value * 256 ) + b
if ( i % 4 ) == 0 then
local divisor = 85 * 85 * 85 * 85
while divisor ~= 0 do
local index = ( math.floor( value / divisor ) % 85 ) + 1
table.insert( result, z85Encoder:sub( index, index ) )
divisor = math.floor( divisor / 85 )
end
value = 0
end
end
return table.concat( result )
end
--------------------------------------------------------------------------------
return basexx

View File

@ -0,0 +1,16 @@
local _M = {}
local basexx = require("basexx")
-- parse any skylink and return base64 version
function _M.parse(skylink)
if string.len(skylink) == 55 then
local decoded = basexx.from_basexx(string.upper(skylink), "0123456789ABCDEFGHIJKLMNOPQRSTUV", 5)
return basexx.to_url64(decoded)
end
return skylink
end
return _M

View File

@ -0,0 +1,14 @@
skylink = require("skynet/skylink")
describe("parse", function()
local base32 = "0404dsjvti046fsua4ktor9grrpe76erq9jot9cvopbhsvsu76r4r30"
local base64 = "AQBG8n_sgEM_nlEp3G0w3vLjmdvSZ46ln8ZXHn-eObZNjA"
it("should return unchanged base64 skylink", function()
assert.is.same(skylink.parse(base64), base64)
end)
it("should transform base32 skylink into base64", function()
assert.is.same(skylink.parse(base32), base64)
end)
end)

1106
docker/nginx/mo Executable file

File diff suppressed because it is too large Load Diff

126
docker/nginx/nginx.conf Normal file
View File

@ -0,0 +1,126 @@
# nginx.conf -- docker-openresty
#
# This file is installed to:
# `/usr/local/openresty/nginx/conf/nginx.conf`
# and is the file loaded by nginx at startup,
# unless the user specifies otherwise.
#
# It tracks the upstream OpenResty's `nginx.conf`, but removes the `server`
# section and adds this directive:
# `include /etc/nginx/conf.d/*.conf;`
#
# The `docker-openresty` file `nginx.vh.default.conf` is copied to
# `/etc/nginx/conf.d/default.conf`. It contains the `server section
# of the upstream `nginx.conf`.
#
# See https://github.com/openresty/docker-openresty/blob/master/README.md#nginx-config-files
#
user root;
worker_processes auto;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
# declare env variables to use it in config
env SKYNET_PORTAL_API;
env SKYNET_SERVER_API;
env ACCOUNTS_ENABLED;
events {
worker_connections 8192;
}
http {
include mime.types;
default_type application/octet-stream;
lua_package_path "/etc/nginx/libs/?.lua;;";
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" $upstream_response_time '
'$upstream_bytes_sent $upstream_bytes_received '
'"$upstream_http_content_type" "$upstream_cache_status" '
'"$server_alias" "$sent_http_skynet_skylink" '
'$upstream_connect_time $upstream_header_time '
'$request_time "$hns_domain" "$skylink"';
access_log logs/access.log main;
# See Move default writable paths to a dedicated directory (#119)
# https://github.com/openresty/docker-openresty/issues/119
client_body_temp_path /var/run/openresty/nginx-client-body;
proxy_temp_path /var/run/openresty/nginx-proxy;
fastcgi_temp_path /var/run/openresty/nginx-fastcgi;
uwsgi_temp_path /var/run/openresty/nginx-uwsgi;
scgi_temp_path /var/run/openresty/nginx-scgi;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
# globally enable http 1.1 on all proxied requests
# http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version
proxy_http_version 1.1;
# proxy cache definition
proxy_cache_path /data/nginx/cache levels=1:2 keys_zone=skynet:10m max_size=50g min_free=100g inactive=48h use_temp_path=off;
# this runs before forking out nginx worker processes
init_by_lua_block {
require "cjson"
require "resty.http"
require "skynet.skylink"
}
# include skynet-portal-api and skynet-server-api header on every request
header_filter_by_lua_block {
ngx.header["Skynet-Portal-Api"] = os.getenv("SKYNET_PORTAL_API")
ngx.header["Skynet-Server-Api"] = os.getenv("SKYNET_SERVER_API")
}
# ratelimit specified IPs
geo $limit {
default 0;
include /etc/nginx/conf.d/include/ratelimited;
}
map $limit $limit_key {
0 "";
1 $binary_remote_addr;
}
limit_req_zone $binary_remote_addr zone=uploads_by_ip:10m rate=10r/s;
limit_req_zone $limit_key zone=uploads_by_ip_throttled:10m rate=10r/m;
limit_req_zone $binary_remote_addr zone=registry_access_by_ip:10m rate=60r/m;
limit_req_zone $limit_key zone=registry_access_by_ip_throttled:10m rate=20r/m;
limit_conn_zone $binary_remote_addr zone=upload_conn:10m;
limit_conn_zone $limit_key zone=upload_conn_rl:10m;
limit_conn_zone $binary_remote_addr zone=downloads_by_ip:10m;
limit_req_status 429;
limit_conn_status 429;
# Add X-Forwarded-* headers
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $scheme;
# skynet-jwt contains dash so we cannot use $cookie_skynet-jwt
# https://richardhart.me/2012/03/18/logging-nginx-cookies-with-dashes/
map $http_cookie $skynet_jwt {
default '';
~skynet-jwt=(?<match>[^\;]+) $match;
}
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/conf.extra.d/*.conf;
}

16
docker/sia/Dockerfile Normal file
View File

@ -0,0 +1,16 @@
FROM golang:1.16.7 AS sia-builder
ENV GOOS linux
ENV GOARCH amd64
ARG branch=portal-latest
RUN git clone https://gitlab.com/SkynetLabs/skyd.git Sia --single-branch --branch ${branch}
RUN make release --directory Sia
FROM nebulouslabs/sia:latest
COPY --from=sia-builder /go/bin/ /usr/bin/
RUN mv /usr/bin/skyd /usr/bin/siad || true && \
mv /usr/bin/skyc /usr/bin/siac || true

View File

@ -1,2 +0,0 @@
Purpose of this file is that `logs` dir can be commited to git and it will be
present on portal servers. The rest of files in `logs` dir are git ignored.

4
packages/dashboard/.env Normal file
View File

@ -0,0 +1,4 @@
NEXT_PUBLIC_SKYNET_PORTAL_API=https://siasky.net
NEXT_PUBLIC_SKYNET_DASHBOARD_URL=https://account.siasky.net
NEXT_PUBLIC_KRATOS_BROWSER_URL=https://account.siasky.net/.ory/kratos/public
NEXT_PUBLIC_KRATOS_PUBLIC_URL=https://account.siasky.net/.ory/kratos/public

38
packages/dashboard/.gitignore vendored Normal file
View File

@ -0,0 +1,38 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies
/node_modules
/.pnp
.pnp.js
# testing
/coverage
# next.js
/.next/
/out/
# production
/build
# misc
.DS_Store
*.pem
# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# env defaults
!.env
# local env files
.env.local
.env.development.local
.env.test.local
.env.production.local
# vercel
.vercel
.next

View File

@ -0,0 +1,3 @@
.next
package.json
package-lock.json

View File

@ -0,0 +1,3 @@
{
"printWidth": 120
}

Some files were not shown because too many files have changed in this diff Show More