Merge branch 'master' into ipfs
This commit is contained in:
commit
193b1b1b05
|
@ -40,7 +40,7 @@ jobs:
|
|||
wait-on: "http://127.0.0.1:9000"
|
||||
|
||||
- name: "Deploy to Skynet"
|
||||
uses: kwypchlo/deploy-to-skynet-action@main
|
||||
uses: skynetlabs/deploy-to-skynet-action@v2
|
||||
with:
|
||||
upload-dir: packages/website/public
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
name: Lint - packages/dashboard
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- packages/dashboard/**
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: packages/dashboard
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 16.x
|
||||
|
||||
- run: yarn
|
||||
- run: yarn prettier --check .
|
||||
- run: yarn next lint
|
|
@ -0,0 +1,23 @@
|
|||
name: Lint - packages/dnslink-api
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- packages/dnslink-api/**
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: packages/dnslink-api
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 16.x
|
||||
|
||||
- run: yarn
|
||||
- run: yarn prettier --check .
|
|
@ -0,0 +1,23 @@
|
|||
name: Lint - packages/handshake-api
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- packages/handshake-api/**
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: packages/handshake-api
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 16.x
|
||||
|
||||
- run: yarn
|
||||
- run: yarn prettier --check .
|
|
@ -0,0 +1,23 @@
|
|||
name: Lint - packages/health-check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- packages/health-check/**
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: packages/health-check
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 16.x
|
||||
|
||||
- run: yarn
|
||||
- run: yarn prettier --check .
|
|
@ -0,0 +1,23 @@
|
|||
name: Lint - packages/website
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- packages/website/**
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: packages/website
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 16.x
|
||||
|
||||
- run: yarn
|
||||
- run: yarn prettier --check .
|
|
@ -0,0 +1,37 @@
|
|||
name: Lint - Python Scripts
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "**.py"
|
||||
|
||||
jobs:
|
||||
black:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.x"
|
||||
architecture: x64
|
||||
|
||||
- run: pip install black
|
||||
- run: black --check .
|
||||
|
||||
flake8:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.x"
|
||||
architecture: x64
|
||||
|
||||
- run: pip install flake8
|
||||
|
||||
# E203: https://www.flake8rules.com/rules/E203.html - Whitespace before ':'
|
||||
# E501: https://www.flake8rules.com/rules/E501.html - Line too long
|
||||
# W503: https://www.flake8rules.com/rules/W503.html - Line break occurred before a binary operator
|
||||
# W605: https://www.flake8rules.com/rules/W605.html - Invalid escape sequence
|
||||
# E722: https://www.flake8rules.com/rules/E722.html - Do not use bare except, specify exception instead
|
||||
- run: flake8 --max-line-length 88 --ignore E203,E501,W503,W605,E722
|
|
@ -0,0 +1,33 @@
|
|||
# Install and run unit tests with busted
|
||||
# Docs: http://olivinelabs.com/busted/
|
||||
|
||||
name: Nginx Lua Unit Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "docker/nginx/libs/**.lua"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.x"
|
||||
architecture: "x64"
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
pip install hererocks
|
||||
hererocks env --lua=5.1 -rlatest
|
||||
source env/bin/activate
|
||||
luarocks install busted
|
||||
luarocks install hasher
|
||||
|
||||
- name: Unit Tests
|
||||
run: |
|
||||
source env/bin/activate
|
||||
busted --verbose --pattern=spec --directory=docker/nginx/libs .
|
|
@ -1,25 +0,0 @@
|
|||
name: Static Code Analysis
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
package: [dashboard, dnslink-api, handshake-api, health-check, website]
|
||||
fail-fast: false
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: packages/${{ matrix.package }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 16.x
|
||||
|
||||
- run: yarn
|
||||
- run: yarn prettier --check .
|
|
@ -0,0 +1,23 @@
|
|||
name: Test - packages/health-check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- packages/health-check/**
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: packages/health-check
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 16.x
|
||||
|
||||
- run: yarn
|
||||
- run: yarn jest
|
|
@ -53,7 +53,6 @@ typings/
|
|||
|
||||
# dotenv environment variable files
|
||||
.env*
|
||||
./docker/kratos/config/kratos.yml
|
||||
|
||||
# Mac files
|
||||
.DS_Store
|
||||
|
@ -87,12 +86,6 @@ __pycache__
|
|||
/.idea/
|
||||
/venv*
|
||||
|
||||
# CockroachDB certificates
|
||||
docker/cockroach/certs/*.crt
|
||||
docker/cockroach/certs/*.key
|
||||
docker/kratos/cr_certs/*.crt
|
||||
docker/kratos/cr_certs/*.key
|
||||
|
||||
# Oathkeeper JWKS signing token
|
||||
docker/kratos/oathkeeper/id_token.jwks.json
|
||||
/docker/kratos/config/kratos.yml
|
||||
# Setup-script log files
|
||||
setup-scripts/serverload.log
|
||||
setup-scripts/serverload.json
|
||||
|
|
22
CHANGELOG.md
22
CHANGELOG.md
|
@ -10,6 +10,28 @@ Version History
|
|||
|
||||
Latest:
|
||||
|
||||
## Oct 18, 2021:
|
||||
### v0.1.3
|
||||
**Key Updates**
|
||||
- Change skyd 307 redirect code to 308
|
||||
- Set caddy dns entry ttl limit to 15 minutes to remove stranded entries.
|
||||
- Set skyd up to connect to the local mongodb cluster for storing TUS metadata
|
||||
- Update health check disable command to require reason.
|
||||
- Move MongoDB to a separate service (use `PORTAL_MODULES=m` to use it without accounts)
|
||||
- Add proper handling for options response on /skynet/tus endpoint
|
||||
- added unpinning skylinks from account dashboard
|
||||
|
||||
**Bugs Fixed**
|
||||
- include tus header upload-concat in cors requests
|
||||
- fixed issue with caddy requesting new certificates instead of using existing ones from file storage
|
||||
- fixed the latest news link redirect in the news header
|
||||
- Fix extended checks error by rounding the reported datetime.
|
||||
|
||||
**Other**
|
||||
- Remove outdated references to NebulousLabs
|
||||
|
||||
|
||||
|
||||
## August 9th, 2021:
|
||||
### v0.1.1
|
||||
Monthly release
|
||||
|
|
164
README.md
164
README.md
|
@ -1,5 +1,13 @@
|
|||
# Skynet Portal
|
||||
|
||||
## Latest Setup Documentation
|
||||
|
||||
Latest Skynet Webportal setup documentation and the setup process Skynet Labs
|
||||
supports is located at https://docs.siasky.net/webportal-management/overview.
|
||||
|
||||
Some of the scripts and setup documentation contained in this repository
|
||||
(`skynet-webportal`) can be outdated and generally should not be used.
|
||||
|
||||
## Web application
|
||||
|
||||
Change current directory with `cd packages/website`.
|
||||
|
@ -26,157 +34,8 @@ For the purposes of complying with our code license, you can use the following S
|
|||
|
||||
`fb6c9320bc7e01fbb9cd8d8c3caaa371386928793c736837832e634aaaa484650a3177d6714a`
|
||||
|
||||
### MongoDB Setup
|
||||
|
||||
Mongo needs a couple of extra steps in order to start a secure cluster.
|
||||
|
||||
- Open port 27017 on all nodes that will take part in the cluster. Ideally, you would only open the port for the other
|
||||
nodes in the cluster.
|
||||
- Manually add a `mgkey` file under `./docker/data/mongo` with the respective secret (
|
||||
see [Mongo's keyfile access control](https://docs.mongodb.com/manual/tutorial/enforce-keyfile-access-control-in-existing-replica-set/)
|
||||
for details).
|
||||
- Manually run an initialisation `docker run` with extra environment variables that will initialise the admin user with
|
||||
a password (example below).
|
||||
- During the initialisation run mentioned above, we need to make two extra steps within the container:
|
||||
- Change the ownership of `mgkey` to `mongodb:mongodb`
|
||||
- Change its permissions to 400
|
||||
- After these steps are done we can open a mongo shell on the primary node and run `rs.add()` in order to add the new
|
||||
node to the cluster. If you don't know which node is primary, log onto any server and jump into the Mongo's container
|
||||
(`docker exec -it mongo mongo -u admin -p`) and then get the status of the replica set (`rs.status()`).
|
||||
|
||||
Example initialisation docker run command:
|
||||
|
||||
```
|
||||
docker run \
|
||||
--rm \
|
||||
--name mg \
|
||||
-p 27017:27017 \
|
||||
-e MONGO_INITDB_ROOT_USERNAME=<admin username> \
|
||||
-e MONGO_INITDB_ROOT_PASSWORD=<admin password> \
|
||||
-v /home/user/skynet-webportal/docker/data/mongo/db:/data/db \
|
||||
-v /home/user/skynet-webportal/docker/data/mongo/mgkey:/data/mgkey \
|
||||
mongo --keyFile=/data/mgkey --replSet=skynet
|
||||
```
|
||||
|
||||
Regular docker run command:
|
||||
|
||||
```
|
||||
docker run \
|
||||
--rm \
|
||||
--name mg \
|
||||
-p 27017:27017 \
|
||||
-v /home/user/skynet-webportal/docker/data/mongo/db:/data/db \
|
||||
-v /home/user/skynet-webportal/docker/data/mongo/mgkey:/data/mgkey \
|
||||
mongo --keyFile=/data/mgkey --replSet=skynet
|
||||
```
|
||||
|
||||
Cluster initialisation mongo command:
|
||||
|
||||
```
|
||||
rs.initiate(
|
||||
{
|
||||
_id : "skynet",
|
||||
members: [
|
||||
{ _id : 0, host : "mongo:27017" }
|
||||
]
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
Add more nodes when they are ready:
|
||||
|
||||
```
|
||||
rs.add("second.node.net:27017")
|
||||
```
|
||||
|
||||
### Kratos & Oathkeeper Setup
|
||||
|
||||
[Kratos](https://www.ory.sh/kratos) is our user management system of choice and
|
||||
[Oathkeeper](https://www.ory.sh/oathkeeper) is the identity and access proxy.
|
||||
|
||||
Most of the needed config is already under `docker/kratos`. The only two things that need to be changed are the config
|
||||
for Kratos that might contain you email server password, and the JWKS Oathkeeper uses to sign its JWT tokens.
|
||||
|
||||
Make sure to create your own`docker/kratos/config/kratos.yml` by copying the `kratos.yml.sample` in the same directory.
|
||||
Also make sure to never add that file to source control because it will most probably contain your email password in
|
||||
plain text!
|
||||
|
||||
To override the JWKS you will need to directly edit
|
||||
`docker/kratos/oathkeeper/id_token.jwks.json` and replace it with your generated key set. If you don't know how to
|
||||
generate a key set you can use this code:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ory/hydra/jwk"
|
||||
)
|
||||
|
||||
func main() {
|
||||
gen := jwk.RS256Generator{
|
||||
KeyLength: 2048,
|
||||
}
|
||||
jwks, err := gen.Generate("", "sig")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
jsonbuf, err := json.MarshalIndent(jwks, "", " ")
|
||||
if err != nil {
|
||||
log.Fatal("failed to generate JSON: %s", err)
|
||||
}
|
||||
os.Stdout.Write(jsonbuf)
|
||||
}
|
||||
```
|
||||
|
||||
While you can directly put the output of this programme into the file mentioned above, you can also remove the public
|
||||
key from the set and change the `kid` of the private key to not include the prefix `private:`.
|
||||
|
||||
### CockroachDB Setup
|
||||
|
||||
Kratos uses CockroachDB to store its data. For that data to be shared across all nodes that comprise your portal cluster
|
||||
setup, we need to set up a CockroachDB cluster, complete with secure communication.
|
||||
|
||||
#### Generate the certificates for secure communication
|
||||
|
||||
For a detailed walk-through, please check [this guide](https://www.cockroachlabs.com/docs/v20.2/secure-a-cluster.html)
|
||||
out.
|
||||
|
||||
Steps:
|
||||
|
||||
1. Start a local cockroach docker instance:
|
||||
`docker run -d -v "<local dir>:/cockroach/cockroach-secure" --name=crdb cockroachdb/cockroach start --insecure`
|
||||
1. Get a shall into that instance: `docker exec -it crdb /bin/bash`
|
||||
1. Go to the directory we which we mapped to a local dir: `cd /cockroach/cockroach-secure`
|
||||
1. Create the subdirectories in which to create certificates and keys: `mkdir certs my-safe-directory`
|
||||
1. Create the CA (Certificate Authority) certificate and key
|
||||
pair: `cockroach cert create-ca --certs-dir=certs --ca-key=my-safe-directory/ca.key`
|
||||
1. Create a client certificate and key pair for the root
|
||||
user: `cockroach cert create-client root --certs-dir=certs --ca-key=my-safe-directory/ca.key`
|
||||
1. Create the certificate and key pair for your
|
||||
nodes: `cockroach cert create-node cockroach mynode.siasky.net --certs-dir=certs --ca-key=my-safe-directory/ca.key`.
|
||||
Don't forget the `cockroach` node name - it's needed by our docker-compose setup. If you want to create certificates
|
||||
for more nodes, just delete the `node.*` files (after you've finished the next steps for this node!) and re-run the
|
||||
above command with the new node name.
|
||||
1. Put the contents of the `certs` folder under `docker/cockroach/certs/*` under your portal's root dir and store the
|
||||
content of `my-safe-directory` somewhere safe.
|
||||
1. Put _another copy_ of those certificates under `docker/kratos/cr_certs` and change permissions of the `*.key` files,
|
||||
so they can be read by anyone (644).
|
||||
|
||||
#### Configure your CockroachDB node
|
||||
|
||||
Open port 26257 on all nodes that will take part in the cluster. Ideally, you would only open the port for the other
|
||||
nodes in the cluster.
|
||||
|
||||
There is some configuration that needs to be added to your `.env`file, namely:
|
||||
|
||||
1. CR_IP - the public IP of your node
|
||||
1. CR_CLUSTER_NODES - a list of IPs and ports which make up your cluster, e.g.
|
||||
`95.216.13.185:26257,147.135.37.21:26257,144.76.136.122:26257`. This will be the list of nodes that will make up your
|
||||
cluster, so make sure those are accurate.
|
||||
## Running a Portal
|
||||
For those interested in running a Webportal, head over to our developer docs [here](https://docs.siasky.net/webportal-management/overview.) to learn more.
|
||||
|
||||
## Contributing
|
||||
|
||||
|
@ -190,6 +49,3 @@ Verify the Cypress test suite by doing the following:
|
|||
1. In one terminal screen run `GATSBY_API_URL=https://siasky.net website serve`
|
||||
1. In a second terminal screen run `yarn cypress run`
|
||||
|
||||
## Setting up complete skynet server
|
||||
|
||||
A setup guide with installation scripts can be found in [setup-scripts/README.md](./setup-scripts/README.md).
|
||||
|
|
|
@ -1,3 +1,23 @@
|
|||
## Oct 18, 2021:
|
||||
### v0.1.3
|
||||
**Key Updates**
|
||||
- Change skyd 307 redirect code to 308
|
||||
- Set caddy dns entry ttl limit to 15 minutes to remove stranded entries.
|
||||
- Set skyd up to connect to the local mongodb cluster for storing TUS metadata
|
||||
- Update health check disable command to require reason.
|
||||
- Move MongoDB to a separate service (use `PORTAL_MODULES=m` to use it without accounts)
|
||||
- Add proper handling for options response on /skynet/tus endpoint
|
||||
- added unpinning skylinks from account dashboard
|
||||
|
||||
**Bugs Fixed**
|
||||
- include tus header upload-concat in cors requests
|
||||
- fixed issue with caddy requesting new certificates instead of using existing ones from file storage
|
||||
- fixed the latest news link redirect in the news header
|
||||
- Fix extended checks error by rounding the reported datetime.
|
||||
|
||||
**Other**
|
||||
- Remove outdated references to NebulousLabs
|
||||
|
||||
|
||||
|
||||
## August 9th, 2021:
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
- Add missing servers and blocklist command to the manual blocklist script.
|
|
@ -0,0 +1 @@
|
|||
- fixed a bug when accessing file from skylink via subdomain with a filename that had escaped characters
|
|
@ -0,0 +1,2 @@
|
|||
- Fix `blocklist-skylink.sh` script that didn't removed blocked skylink from
|
||||
nginx cache.
|
|
@ -1 +0,0 @@
|
|||
- fixed issue with caddy requesting new certificates instead of using existing ones from file storage
|
|
@ -1 +0,0 @@
|
|||
- Fix extended checks error by rounding the reported datetime.
|
|
@ -0,0 +1,2 @@
|
|||
- fixed uploaded directory name (was "undefined" before)
|
||||
- fixed empty directory upload progress (size was not calculated for directories)
|
|
@ -0,0 +1 @@
|
|||
- expose generic skylink serving endpoint on domain aliases
|
|
@ -1 +0,0 @@
|
|||
- Change skyd 307 redirect code to 308
|
|
@ -0,0 +1 @@
|
|||
- Add abuse scanner service, activated by adding `u` to `PORTAL_MODULES`
|
|
@ -1 +0,0 @@
|
|||
- Set caddy dns entry ttl limit to 15 minutes to remove stranded entries.
|
|
@ -1 +0,0 @@
|
|||
- Update health check disable command to require reason.
|
|
@ -0,0 +1 @@
|
|||
- Add malware scanner service, activated by adding `s` to `PORTAL_MODULES`
|
|
@ -0,0 +1 @@
|
|||
- Remove ORY Kratos, ORY Oathkeeper, CockroachDB.
|
|
@ -0,0 +1 @@
|
|||
- Add `/serverload` endpoint for CPU usage and free disk space
|
|
@ -1 +0,0 @@
|
|||
- Add proper handling for options response on /skynet/tus endpoint
|
|
@ -1 +0,0 @@
|
|||
- added unpinning skylinks from account dashboard
|
|
@ -0,0 +1 @@
|
|||
- add new critical health check that scans config and makes sure that all relevant configurations are set
|
|
@ -0,0 +1 @@
|
|||
- Add abuse report configuration
|
|
@ -0,0 +1,2 @@
|
|||
- Remove hardcoded Airtable default values from blocklist script. Portal
|
||||
operators need to define their own values in portal common config (LastPass).
|
|
@ -0,0 +1 @@
|
|||
- Add health check for the blocker container
|
|
@ -0,0 +1 @@
|
|||
- Drop `Skynet-Requested-Skylink` header
|
|
@ -0,0 +1,2 @@
|
|||
- Dump disk space usage when health-checker script disables portal due to
|
||||
critical free disk space.
|
|
@ -0,0 +1 @@
|
|||
- Enable the accounting module for skyd
|
|
@ -0,0 +1 @@
|
|||
- Add link to supported setup process in Gitbook.
|
|
@ -0,0 +1 @@
|
|||
- Set `min_free` parameter on the `proxy_cache_path` directive to `100g`
|
|
@ -0,0 +1,2 @@
|
|||
- Parameterize MongoDB replicaset in `docker-compose.mongodb.yml` via
|
||||
`SKYNET_DB_REPLICASET` from `.env` file.
|
|
@ -0,0 +1 @@
|
|||
- Hot reload Nginx after pruning cache files.
|
|
@ -0,0 +1 @@
|
|||
- Added script to prune nginx cache.
|
|
@ -0,0 +1,2 @@
|
|||
- Remove hardcoded server list from `blocklist-skylink.sh` so it removes server
|
||||
list duplication and can also be called from Ansible.
|
|
@ -1 +0,0 @@
|
|||
- Remove outdated references to NebulousLabs
|
|
@ -0,0 +1 @@
|
|||
- Remove outdated portal setup documentation and point to developer docs.
|
|
@ -0,0 +1 @@
|
|||
- Block skylinks in batches to improve performance.
|
|
@ -0,0 +1 @@
|
|||
- Add trimming Airtable skylinks from Takedown Request table.
|
|
@ -0,0 +1 @@
|
|||
- Update handshake to use v3.0.1
|
32
dc
32
dc
|
@ -1,7 +1,15 @@
|
|||
#!/bin/bash
|
||||
|
||||
# The dc command is an alias to docker-compose which also scans the current portal configuration (as defined in .env)
|
||||
# and selects the right docker-compose files to include in the operation. You can use the command in the same way you
|
||||
# would use docker-compose with the only difference being that you don't need to specify compose files. For more
|
||||
# information you can run `./dc` or `./dc help`.
|
||||
|
||||
if [ -f .env ]; then
|
||||
OLD_IFS=$IFS; IFS=$'\n'; for x in `grep -v '^#.*' .env`; do export $x; done; IFS=$OLD_IFS
|
||||
OLD_IFS=$IFS
|
||||
IFS=$'\n'
|
||||
for x in $(grep -v '^#.*' .env); do export $x; done
|
||||
IFS=$OLD_IFS
|
||||
fi
|
||||
|
||||
# include base docker compose file
|
||||
|
@ -10,13 +18,33 @@ COMPOSE_FILES="-f docker-compose.yml"
|
|||
for i in $(seq 1 ${#PORTAL_MODULES}); do
|
||||
# accounts module - alias "a"
|
||||
if [[ ${PORTAL_MODULES:i-1:1} == "a" ]]; then
|
||||
COMPOSE_FILES+=" -f docker-compose.accounts.yml"
|
||||
COMPOSE_FILES+=" -f docker-compose.mongodb.yml -f docker-compose.accounts.yml"
|
||||
fi
|
||||
|
||||
# blocker module - alias "b"
|
||||
if [[ ${PORTAL_MODULES:i-1:1} == "b" ]]; then
|
||||
COMPOSE_FILES+=" -f docker-compose.mongodb.yml -f docker-compose.blocker.yml"
|
||||
fi
|
||||
|
||||
# jaeger module - alias "j"
|
||||
if [[ ${PORTAL_MODULES:i-1:1} == "j" ]]; then
|
||||
COMPOSE_FILES+=" -f docker-compose.jaeger.yml"
|
||||
fi
|
||||
|
||||
# malware-scanner module - alias "s"
|
||||
if [[ ${PORTAL_MODULES:i-1:1} == "s" ]]; then
|
||||
COMPOSE_FILES+=" -f docker-compose.blocker.yml -f docker-compose.mongodb.yml -f docker-compose.malware-scanner.yml"
|
||||
fi
|
||||
|
||||
# mongodb module - alias "m"
|
||||
if [[ ${PORTAL_MODULES:i-1:1} == "m" ]]; then
|
||||
COMPOSE_FILES+=" -f docker-compose.mongodb.yml"
|
||||
fi
|
||||
|
||||
# abuse module - alias "u"
|
||||
if [[ ${PORTAL_MODULES:i-1:1} == "u" ]]; then
|
||||
COMPOSE_FILES+=" -f docker-compose.mongodb.yml -f docker-compose.blocker.yml -f docker-compose.abuse.yml"
|
||||
fi
|
||||
done
|
||||
|
||||
# override file if exists
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
version: "3.7"
|
||||
|
||||
x-logging: &default-logging
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
abuse:
|
||||
build:
|
||||
context: ./docker/abuse
|
||||
dockerfile: Dockerfile
|
||||
container_name: abuse
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
- ABUSE_LOG_LEVEL=${ABUSE_LOG_LEVEL}
|
||||
- ABUSE_MAILADDRESS=${ABUSE_MAILADDRESS}
|
||||
- ABUSE_MAILBOX=${ABUSE_MAILBOX}
|
||||
- ABUSE_SPONSOR=${ABUSE_SPONSOR}
|
||||
- BLOCKER_HOST=10.10.10.110
|
||||
- BLOCKER_PORT=4000
|
||||
- EMAIL_SERVER=${EMAIL_SERVER}
|
||||
- EMAIL_USERNAME=${EMAIL_USERNAME}
|
||||
- EMAIL_PASSWORD=${EMAIL_PASSWORD}
|
||||
- SKYNET_DB_HOST=${SKYNET_DB_HOST}
|
||||
- SKYNET_DB_PORT=${SKYNET_DB_PORT}
|
||||
- SKYNET_DB_USER=${SKYNET_DB_USER}
|
||||
- SKYNET_DB_PASS=${SKYNET_DB_PASS}
|
||||
networks:
|
||||
shared:
|
||||
ipv4_address: 10.10.10.120
|
||||
depends_on:
|
||||
- mongo
|
||||
- blocker
|
|
@ -10,12 +10,14 @@ services:
|
|||
nginx:
|
||||
environment:
|
||||
- ACCOUNTS_ENABLED=true
|
||||
- ACCOUNTS_LIMIT_ACCESS=${ACCOUNTS_LIMIT_ACCESS:-authenticated} # default to authenticated access only
|
||||
depends_on:
|
||||
- accounts
|
||||
|
||||
health-check:
|
||||
environment:
|
||||
- ACCOUNTS_ENABLED=true
|
||||
- ACCOUNTS_LIMIT_ACCESS=${ACCOUNTS_LIMIT_ACCESS:-authenticated} # default to authenticated access only
|
||||
|
||||
accounts:
|
||||
build:
|
||||
|
@ -29,18 +31,23 @@ services:
|
|||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
- SKYNET_DB_HOST=${SKYNET_DB_HOST}
|
||||
- SKYNET_DB_PORT=${SKYNET_DB_PORT}
|
||||
- SKYNET_DB_USER=${SKYNET_DB_USER}
|
||||
- SKYNET_DB_PASS=${SKYNET_DB_PASS}
|
||||
- ACCOUNTS_EMAIL_URI=${ACCOUNTS_EMAIL_URI}
|
||||
- ACCOUNTS_JWKS_FILE=/conf/jwks.json
|
||||
- COOKIE_DOMAIN=${COOKIE_DOMAIN}
|
||||
- COOKIE_HASH_KEY=${COOKIE_HASH_KEY}
|
||||
- COOKIE_ENC_KEY=${COOKIE_ENC_KEY}
|
||||
- PORTAL_DOMAIN=${PORTAL_DOMAIN}
|
||||
- SERVER_DOMAIN=${SERVER_DOMAIN}
|
||||
- SKYNET_DB_HOST=${SKYNET_DB_HOST:-mongo}
|
||||
- SKYNET_DB_PORT=${SKYNET_DB_PORT:-27017}
|
||||
- SKYNET_DB_USER=${SKYNET_DB_USER}
|
||||
- SKYNET_DB_PASS=${SKYNET_DB_PASS}
|
||||
- STRIPE_API_KEY=${STRIPE_API_KEY}
|
||||
- STRIPE_WEBHOOK_SECRET=${STRIPE_WEBHOOK_SECRET}
|
||||
- SKYNET_ACCOUNTS_LOG_LEVEL=${SKYNET_ACCOUNTS_LOG_LEVEL}
|
||||
- KRATOS_ADDR=${KRATOS_ADDR}
|
||||
- OATHKEEPER_ADDR=${OATHKEEPER_ADDR}
|
||||
- SKYNET_ACCOUNTS_LOG_LEVEL=${SKYNET_ACCOUNTS_LOG_LEVEL:-info}
|
||||
volumes:
|
||||
- ./docker/data/accounts:/data
|
||||
- ./docker/accounts/conf:/conf
|
||||
expose:
|
||||
- 3000
|
||||
networks:
|
||||
|
@ -48,65 +55,6 @@ services:
|
|||
ipv4_address: 10.10.10.70
|
||||
depends_on:
|
||||
- mongo
|
||||
- oathkeeper
|
||||
|
||||
mongo:
|
||||
image: mongo:4.4.1
|
||||
command: --keyFile=/data/mgkey --replSet=skynet
|
||||
container_name: mongo
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
volumes:
|
||||
- ./docker/data/mongo/db:/data/db
|
||||
- ./docker/data/mongo/mgkey:/data/mgkey:rw
|
||||
networks:
|
||||
shared:
|
||||
ipv4_address: 10.10.10.71
|
||||
ports:
|
||||
- "27017:27017"
|
||||
|
||||
kratos-migrate:
|
||||
image: oryd/kratos:v0.5.5-alpha.1
|
||||
container_name: kratos-migrate
|
||||
restart: "no"
|
||||
logging: *default-logging
|
||||
environment:
|
||||
- DSN=cockroach://root@cockroach:26257/defaultdb?max_conns=20&max_idle_conns=4&sslmode=verify-full&sslcert=/certs/node.crt&sslkey=/certs/node.key&sslrootcert=/certs/ca.crt
|
||||
- SQA_OPT_OUT=true
|
||||
volumes:
|
||||
- ./docker/kratos/config:/etc/config/kratos
|
||||
- ./docker/data/cockroach/sqlite:/var/lib/sqlite
|
||||
- ./docker/kratos/cr_certs:/certs
|
||||
command: -c /etc/config/kratos/kratos.yml migrate sql -e --yes
|
||||
networks:
|
||||
shared:
|
||||
ipv4_address: 10.10.10.80
|
||||
depends_on:
|
||||
- cockroach
|
||||
|
||||
kratos:
|
||||
image: oryd/kratos:v0.5.5-alpha.1
|
||||
container_name: kratos
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
expose:
|
||||
- 4433 # public
|
||||
- 4434 # admin
|
||||
environment:
|
||||
- DSN=cockroach://root@cockroach:26257/defaultdb?max_conns=20&max_idle_conns=4&sslmode=verify-full&sslcert=/certs/node.crt&sslkey=/certs/node.key&sslrootcert=/certs/ca.crt
|
||||
- LOG_LEVEL=trace
|
||||
- SERVE_PUBLIC_BASE_URL=${SKYNET_DASHBOARD_URL}/.ory/kratos/public/
|
||||
- SQA_OPT_OUT=true
|
||||
command: serve -c /etc/config/kratos/kratos.yml
|
||||
volumes:
|
||||
- ./docker/kratos/config:/etc/config/kratos
|
||||
- ./docker/data/cockroach/sqlite:/var/lib/sqlite
|
||||
- ./docker/kratos/cr_certs:/certs
|
||||
networks:
|
||||
shared:
|
||||
ipv4_address: 10.10.10.81
|
||||
depends_on:
|
||||
- kratos-migrate
|
||||
|
||||
dashboard:
|
||||
build:
|
||||
|
@ -119,51 +67,14 @@ services:
|
|||
- .env
|
||||
environment:
|
||||
- NEXT_PUBLIC_SKYNET_PORTAL_API=${SKYNET_PORTAL_API}
|
||||
- NEXT_PUBLIC_SKYNET_DASHBOARD_URL=${SKYNET_DASHBOARD_URL}
|
||||
- NEXT_PUBLIC_KRATOS_BROWSER_URL=${SKYNET_DASHBOARD_URL}/.ory/kratos/public
|
||||
- NEXT_PUBLIC_KRATOS_PUBLIC_URL=http://oathkeeper:4455/.ory/kratos/public
|
||||
- NEXT_PUBLIC_PORTAL_DOMAIN=${PORTAL_DOMAIN}
|
||||
- NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY=${STRIPE_PUBLISHABLE_KEY}
|
||||
volumes:
|
||||
- ./docker/data/dashboard/.next:/usr/app/.next
|
||||
networks:
|
||||
shared:
|
||||
ipv4_address: 10.10.10.85
|
||||
expose:
|
||||
- 3000
|
||||
depends_on:
|
||||
- oathkeeper
|
||||
|
||||
oathkeeper:
|
||||
image: oryd/oathkeeper:v0.38
|
||||
container_name: oathkeeper
|
||||
expose:
|
||||
- 4455
|
||||
- 4456
|
||||
command: serve proxy -c "/etc/config/oathkeeper/oathkeeper.yml"
|
||||
environment:
|
||||
- LOG_LEVEL=debug
|
||||
volumes:
|
||||
- ./docker/kratos/oathkeeper:/etc/config/oathkeeper
|
||||
restart: on-failure
|
||||
logging: *default-logging
|
||||
networks:
|
||||
shared:
|
||||
ipv4_address: 10.10.10.83
|
||||
depends_on:
|
||||
- kratos
|
||||
|
||||
cockroach:
|
||||
image: cockroachdb/cockroach:v20.2.3
|
||||
container_name: cockroach
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
env_file:
|
||||
- .env
|
||||
command: start --advertise-addr=${CR_IP} --join=${CR_CLUSTER_NODES} --certs-dir=/certs --listen-addr=0.0.0.0:26257 --http-addr=0.0.0.0:8080
|
||||
volumes:
|
||||
- ./docker/data/cockroach/sqlite:/cockroach/cockroach-data
|
||||
- ./docker/cockroach/certs:/certs
|
||||
ports:
|
||||
- "4080:8080"
|
||||
- "26257:26257"
|
||||
networks:
|
||||
shared:
|
||||
ipv4_address: 10.10.10.84
|
||||
- mongo
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
version: "3.7"
|
||||
|
||||
x-logging: &default-logging
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
health-check:
|
||||
environment:
|
||||
- BLOCKER_HOST=10.10.10.110
|
||||
- BLOCKER_PORT=4000
|
||||
|
||||
blocker:
|
||||
build:
|
||||
context: ./docker/blocker
|
||||
dockerfile: Dockerfile
|
||||
container_name: blocker
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- ./docker/data/nginx/blocker:/data/nginx/blocker
|
||||
expose:
|
||||
- 4000
|
||||
networks:
|
||||
shared:
|
||||
ipv4_address: 10.10.10.110
|
||||
depends_on:
|
||||
- mongo
|
||||
- sia
|
|
@ -0,0 +1,52 @@
|
|||
version: "3.7"
|
||||
|
||||
x-logging: &default-logging
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
clamav:
|
||||
image: clamav/clamav:stable_base
|
||||
container_name: clamav
|
||||
restart: on-failure
|
||||
logging: *default-logging
|
||||
volumes:
|
||||
- ./docker/data/clamav/clamav/defs:/var/lib/clamav
|
||||
- ./docker/clamav/clamd.conf:/etc/clamav/clamd.conf:ro
|
||||
expose:
|
||||
- 3310 # NEVER expose this outside of the local network!
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: "${CLAMAV_CPU:-0.50}"
|
||||
networks:
|
||||
shared:
|
||||
ipv4_address: 10.10.10.100
|
||||
|
||||
malware-scanner:
|
||||
build:
|
||||
context: ./docker/malware-scanner
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
branch: main
|
||||
container_name: malware-scanner
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
- CLAMAV_IP=${CLAMAV_IP:-10.10.10.100}
|
||||
- CLAMAV_PORT=${CLAMAV_PORT:-3310}
|
||||
- BLOCKER_IP=10.10.10.110
|
||||
- BLOCKER_PORT=4000
|
||||
expose:
|
||||
- 4000
|
||||
networks:
|
||||
shared:
|
||||
ipv4_address: 10.10.10.101
|
||||
depends_on:
|
||||
- mongo
|
||||
- clamav
|
||||
- blocker
|
|
@ -0,0 +1,29 @@
|
|||
version: "3.7"
|
||||
|
||||
x-logging: &default-logging
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
sia:
|
||||
environment:
|
||||
- MONGODB_URI=mongodb://${SKYNET_DB_HOST}:${SKYNET_DB_PORT}
|
||||
- MONGODB_USER=${SKYNET_DB_USER}
|
||||
- MONGODB_PASSWORD=${SKYNET_DB_PASS}
|
||||
|
||||
mongo:
|
||||
image: mongo:4.4.1
|
||||
command: --keyFile=/data/mgkey --replSet=${SKYNET_DB_REPLICASET:-skynet}
|
||||
container_name: mongo
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
volumes:
|
||||
- ./docker/data/mongo/db:/data/db
|
||||
- ./docker/data/mongo/mgkey:/data/mgkey:rw
|
||||
networks:
|
||||
shared:
|
||||
ipv4_address: 10.10.10.71
|
||||
ports:
|
||||
- "${SKYNET_DB_PORT}:27017"
|
|
@ -24,7 +24,11 @@ services:
|
|||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
environment:
|
||||
- SIA_MODULES=gctwr
|
||||
- SIA_MODULES=gctwra
|
||||
- SKYD_DISK_CACHE_ENABLED=${SKYD_DISK_CACHE_ENABLED:-false}
|
||||
- SKYD_DISK_CACHE_SIZE=${SKYD_DISK_CACHE_SIZE:-53690000000} # 50GB
|
||||
- SKYD_DISK_CACHE_MIN_HITS=${SKYD_DISK_CACHE_MIN_HITS:-3}
|
||||
- SKYD_DISK_CACHE_HIT_PERIOD=${SKYD_DISK_CACHE_HIT_PERIOD:-3600} # 1h
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
|
@ -60,9 +64,11 @@ services:
|
|||
logging: *default-logging
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
- SKYD_DISK_CACHE_ENABLED=${SKYD_DISK_CACHE_ENABLED:-false}
|
||||
volumes:
|
||||
- ./docker/nginx/nginx.conf:/usr/local/openresty/nginx/conf/nginx.conf:ro
|
||||
- ./docker/data/nginx/cache:/data/nginx/cache
|
||||
- ./docker/data/nginx/blocker:/data/nginx/blocker
|
||||
- ./docker/data/nginx/logs:/usr/local/openresty/nginx/logs
|
||||
- ./docker/data/nginx/skynet:/data/nginx/skynet:ro
|
||||
- ./docker/data/sia/apipassword:/data/sia/apipassword:ro
|
||||
|
@ -100,7 +106,7 @@ services:
|
|||
build:
|
||||
context: ./docker/handshake
|
||||
dockerfile: Dockerfile
|
||||
command: --chain-migrate=1 --wallet-migrate=1
|
||||
command: --chain-migrate=2 --wallet-migrate=1
|
||||
container_name: handshake
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
FROM golang:1.16.7
|
||||
LABEL maintainer="SkynetLabs <devs@siasky.net>"
|
||||
|
||||
ENV GOOS linux
|
||||
ENV GOARCH amd64
|
||||
|
||||
ARG branch=main
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
RUN git clone --single-branch --branch ${branch} https://github.com/SkynetLabs/abuse-scanner.git && \
|
||||
cd abuse-scanner && \
|
||||
go mod download && \
|
||||
make release
|
||||
|
||||
ENTRYPOINT ["abuse-scanner"]
|
|
@ -1,5 +1,5 @@
|
|||
FROM golang:1.16.7
|
||||
LABEL maintainer="NebulousLabs <devs@nebulous.tech>"
|
||||
LABEL maintainer="SkynetLabs <devs@siasky.net>"
|
||||
|
||||
ENV GOOS linux
|
||||
ENV GOARCH amd64
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
FROM golang:1.16.7
|
||||
LABEL maintainer="SkynetLabs <devs@siasky.net>"
|
||||
|
||||
ENV GOOS linux
|
||||
ENV GOARCH amd64
|
||||
|
||||
ARG branch=main
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
RUN git clone --single-branch --branch ${branch} https://github.com/SkynetLabs/blocker.git && \
|
||||
cd blocker && \
|
||||
go mod download && \
|
||||
make release
|
||||
|
||||
ENTRYPOINT ["blocker"]
|
|
@ -1,9 +1,9 @@
|
|||
FROM caddy:2.4.5-builder AS caddy-builder
|
||||
FROM caddy:2.4.6-builder AS caddy-builder
|
||||
|
||||
# available dns resolvers: https://github.com/caddy-dns
|
||||
RUN xcaddy build --with github.com/caddy-dns/route53
|
||||
|
||||
FROM caddy:2.4.5-alpine
|
||||
FROM caddy:2.4.6-alpine
|
||||
|
||||
COPY --from=caddy-builder /usr/bin/caddy /usr/bin/caddy
|
||||
|
||||
|
|
|
@ -25,7 +25,8 @@
|
|||
"dns": {
|
||||
"provider": {
|
||||
"name": "route53"
|
||||
}
|
||||
},
|
||||
"ttl": "30m"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,794 @@
|
|||
##
|
||||
## Example config file for the Clam AV daemon
|
||||
## Please read the clamd.conf(5) manual before editing this file.
|
||||
##
|
||||
|
||||
|
||||
# Comment or remove the line below.
|
||||
# Example
|
||||
|
||||
# Uncomment this option to enable logging.
|
||||
# LogFile must be writable for the user running daemon.
|
||||
# A full path is required.
|
||||
# Default: disabled
|
||||
LogFile /var/log/clamav/clamd.log
|
||||
|
||||
# By default the log file is locked for writing - the lock protects against
|
||||
# running clamd multiple times (if want to run another clamd, please
|
||||
# copy the configuration file, change the LogFile variable, and run
|
||||
# the daemon with --config-file option).
|
||||
# This option disables log file locking.
|
||||
# Default: no
|
||||
#LogFileUnlock yes
|
||||
|
||||
# Maximum size of the log file.
|
||||
# Value of 0 disables the limit.
|
||||
# You may use 'M' or 'm' for megabytes (1M = 1m = 1048576 bytes)
|
||||
# and 'K' or 'k' for kilobytes (1K = 1k = 1024 bytes). To specify the size
|
||||
# in bytes just don't use modifiers. If LogFileMaxSize is enabled, log
|
||||
# rotation (the LogRotate option) will always be enabled.
|
||||
# Default: 1M
|
||||
LogFileMaxSize 50M
|
||||
|
||||
# Log time with each message.
|
||||
# Default: no
|
||||
LogTime yes
|
||||
|
||||
# Also log clean files. Useful in debugging but drastically increases the
|
||||
# log size.
|
||||
# Default: no
|
||||
#LogClean yes
|
||||
|
||||
# Use system logger (can work together with LogFile).
|
||||
# Default: no
|
||||
#LogSyslog yes
|
||||
|
||||
# Specify the type of syslog messages - please refer to 'man syslog'
|
||||
# for facility names.
|
||||
# Default: LOG_LOCAL6
|
||||
#LogFacility LOG_MAIL
|
||||
|
||||
# Enable verbose logging.
|
||||
# Default: no
|
||||
#LogVerbose yes
|
||||
|
||||
# Enable log rotation. Always enabled when LogFileMaxSize is enabled.
|
||||
# Default: no
|
||||
#LogRotate yes
|
||||
|
||||
# Enable Prelude output.
|
||||
# Default: no
|
||||
#PreludeEnable yes
|
||||
#
|
||||
# Set the name of the analyzer used by prelude-admin.
|
||||
# Default: ClamAV
|
||||
#PreludeAnalyzerName ClamAV
|
||||
|
||||
# Log additional information about the infected file, such as its
|
||||
# size and hash, together with the virus name.
|
||||
#ExtendedDetectionInfo yes
|
||||
|
||||
# This option allows you to save a process identifier of the listening
|
||||
# daemon (main thread).
|
||||
# This file will be owned by root, as long as clamd was started by root.
|
||||
# It is recommended that the directory where this file is stored is
|
||||
# also owned by root to keep other users from tampering with it.
|
||||
# Default: disabled
|
||||
PidFile /run/lock/clamd.pid
|
||||
|
||||
# Optional path to the global temporary directory.
|
||||
# Default: system specific (usually /tmp or /var/tmp).
|
||||
#TemporaryDirectory /var/tmp
|
||||
|
||||
# Path to the database directory.
|
||||
# Default: hardcoded (depends on installation options)
|
||||
#DatabaseDirectory /var/lib/clamav
|
||||
|
||||
# Only load the official signatures published by the ClamAV project.
|
||||
# Default: no
|
||||
#OfficialDatabaseOnly no
|
||||
|
||||
# The daemon can work in local mode, network mode or both.
|
||||
# Due to security reasons we recommend the local mode.
|
||||
|
||||
# Path to a local socket file the daemon will listen on.
|
||||
# Default: disabled (must be specified by a user)
|
||||
LocalSocket /run/clamav/clamd.sock
|
||||
|
||||
# Sets the group ownership on the unix socket.
|
||||
# Default: disabled (the primary group of the user running clamd)
|
||||
#LocalSocketGroup virusgroup
|
||||
|
||||
# Sets the permissions on the unix socket to the specified mode.
|
||||
# Default: disabled (socket is world accessible)
|
||||
#LocalSocketMode 660
|
||||
|
||||
# Remove stale socket after unclean shutdown.
|
||||
# Default: yes
|
||||
#FixStaleSocket yes
|
||||
|
||||
# TCP port address.
|
||||
# Default: no
|
||||
TCPSocket 3310
|
||||
|
||||
# TCP address.
|
||||
# By default we bind to INADDR_ANY, probably not wise.
|
||||
# Enable the following to provide some degree of protection
|
||||
# from the outside world. This option can be specified multiple
|
||||
# times if you want to listen on multiple IPs. IPv6 is now supported.
|
||||
# Default: no
|
||||
TCPAddr 0.0.0.0
|
||||
|
||||
# Maximum length the queue of pending connections may grow to.
|
||||
# Default: 200
|
||||
#MaxConnectionQueueLength 30
|
||||
|
||||
# Clamd uses FTP-like protocol to receive data from remote clients.
|
||||
# If you are using clamav-milter to balance load between remote clamd daemons
|
||||
# on firewall servers you may need to tune the options below.
|
||||
|
||||
# Close the connection when the data size limit is exceeded.
|
||||
# The value should match your MTA's limit for a maximum attachment size.
|
||||
# Default: 25M
|
||||
StreamMaxLength 100M
|
||||
|
||||
# Limit port range.
|
||||
# Default: 1024
|
||||
#StreamMinPort 30000
|
||||
# Default: 2048
|
||||
#StreamMaxPort 32000
|
||||
|
||||
# Maximum number of threads running at the same time.
|
||||
# Default: 10
|
||||
#MaxThreads 20
|
||||
|
||||
# Waiting for data from a client socket will timeout after this time (seconds).
|
||||
# Default: 120
|
||||
#ReadTimeout 300
|
||||
|
||||
# This option specifies the time (in seconds) after which clamd should
|
||||
# timeout if a client doesn't provide any initial command after connecting.
|
||||
# Default: 30
|
||||
#CommandReadTimeout 30
|
||||
|
||||
# This option specifies how long to wait (in milliseconds) if the send buffer
|
||||
# is full.
|
||||
# Keep this value low to prevent clamd hanging.
|
||||
#
|
||||
# Default: 500
|
||||
#SendBufTimeout 200
|
||||
|
||||
# Maximum number of queued items (including those being processed by
|
||||
# MaxThreads threads).
|
||||
# It is recommended to have this value at least twice MaxThreads if possible.
|
||||
# WARNING: you shouldn't increase this too much to avoid running out of file
|
||||
# descriptors, the following condition should hold:
|
||||
# MaxThreads*MaxRecursion + (MaxQueue - MaxThreads) + 6< RLIMIT_NOFILE (usual
|
||||
# max is 1024).
|
||||
#
|
||||
# Default: 100
|
||||
#MaxQueue 200
|
||||
|
||||
# Waiting for a new job will timeout after this time (seconds).
|
||||
# Default: 30
|
||||
#IdleTimeout 60
|
||||
|
||||
# Don't scan files and directories matching regex
|
||||
# This directive can be used multiple times
|
||||
# Default: scan all
|
||||
#ExcludePath ^/proc/
|
||||
#ExcludePath ^/sys/
|
||||
|
||||
# Maximum depth directories are scanned at.
|
||||
# Default: 15
|
||||
#MaxDirectoryRecursion 20
|
||||
|
||||
# Follow directory symlinks.
|
||||
# Default: no
|
||||
#FollowDirectorySymlinks yes
|
||||
|
||||
# Follow regular file symlinks.
|
||||
# Default: no
|
||||
#FollowFileSymlinks yes
|
||||
|
||||
# Scan files and directories on other filesystems.
|
||||
# Default: yes
|
||||
#CrossFilesystems yes
|
||||
|
||||
# Perform a database check.
|
||||
# Default: 600 (10 min)
|
||||
#SelfCheck 600
|
||||
|
||||
# Enable non-blocking (multi-threaded/concurrent) database reloads.
|
||||
# This feature will temporarily load a second scanning engine while scanning
|
||||
# continues using the first engine. Once loaded, the new engine takes over.
|
||||
# The old engine is removed as soon as all scans using the old engine have
|
||||
# completed.
|
||||
# This feature requires more RAM, so this option is provided in case users are
|
||||
# willing to block scans during reload in exchange for lower RAM requirements.
|
||||
# Default: yes
|
||||
ConcurrentDatabaseReload no
|
||||
|
||||
# Execute a command when virus is found. In the command string %v will
|
||||
# be replaced with the virus name and %f will be replaced with the file name.
|
||||
# Additionally, two environment variables will be defined: $CLAM_VIRUSEVENT_FILENAME
|
||||
# and $CLAM_VIRUSEVENT_VIRUSNAME.
|
||||
# Default: no
|
||||
#VirusEvent /usr/local/bin/send_sms 123456789 "VIRUS ALERT: %v in %f"
|
||||
|
||||
# Run as another user (clamd must be started by root for this option to work)
|
||||
# Default: don't drop privileges
|
||||
User clamav
|
||||
|
||||
# Stop daemon when libclamav reports out of memory condition.
|
||||
#ExitOnOOM yes
|
||||
|
||||
# Don't fork into background.
|
||||
# Default: no
|
||||
#Foreground yes
|
||||
|
||||
# Enable debug messages in libclamav.
|
||||
# Default: no
|
||||
#Debug yes
|
||||
|
||||
# Do not remove temporary files (for debug purposes).
|
||||
# Default: no
|
||||
#LeaveTemporaryFiles yes
|
||||
|
||||
# Permit use of the ALLMATCHSCAN command. If set to no, clamd will reject
|
||||
# any ALLMATCHSCAN command as invalid.
|
||||
# Default: yes
|
||||
#AllowAllMatchScan no
|
||||
|
||||
# Detect Possibly Unwanted Applications.
|
||||
# Default: no
|
||||
#DetectPUA yes
|
||||
|
||||
# Exclude a specific PUA category. This directive can be used multiple times.
|
||||
# See https://github.com/vrtadmin/clamav-faq/blob/master/faq/faq-pua.md for
|
||||
# the complete list of PUA categories.
|
||||
# Default: Load all categories (if DetectPUA is activated)
|
||||
#ExcludePUA NetTool
|
||||
#ExcludePUA PWTool
|
||||
|
||||
# Only include a specific PUA category. This directive can be used multiple
|
||||
# times.
|
||||
# Default: Load all categories (if DetectPUA is activated)
|
||||
#IncludePUA Spy
|
||||
#IncludePUA Scanner
|
||||
#IncludePUA RAT
|
||||
|
||||
# This option causes memory or nested map scans to dump the content to disk.
|
||||
# If you turn on this option, more data is written to disk and is available
|
||||
# when the LeaveTemporaryFiles option is enabled.
|
||||
#ForceToDisk yes
|
||||
|
||||
# This option allows you to disable the caching feature of the engine. By
|
||||
# default, the engine will store an MD5 in a cache of any files that are
|
||||
# not flagged as virus or that hit limits checks. Disabling the cache will
|
||||
# have a negative performance impact on large scans.
|
||||
# Default: no
|
||||
#DisableCache yes
|
||||
|
||||
# In some cases (eg. complex malware, exploits in graphic files, and others),
|
||||
# ClamAV uses special algorithms to detect abnormal patterns and behaviors that
|
||||
# may be malicious. This option enables alerting on such heuristically
|
||||
# detected potential threats.
|
||||
# Default: yes
|
||||
#HeuristicAlerts yes
|
||||
|
||||
# Allow heuristic alerts to take precedence.
|
||||
# When enabled, if a heuristic scan (such as phishingScan) detects
|
||||
# a possible virus/phish it will stop scan immediately. Recommended, saves CPU
|
||||
# scan-time.
|
||||
# When disabled, virus/phish detected by heuristic scans will be reported only
|
||||
# at the end of a scan. If an archive contains both a heuristically detected
|
||||
# virus/phish, and a real malware, the real malware will be reported
|
||||
#
|
||||
# Keep this disabled if you intend to handle "Heuristics.*" viruses
|
||||
# differently from "real" malware.
|
||||
# If a non-heuristically-detected virus (signature-based) is found first,
|
||||
# the scan is interrupted immediately, regardless of this config option.
|
||||
#
|
||||
# Default: no
|
||||
#HeuristicScanPrecedence yes
|
||||
|
||||
|
||||
##
|
||||
## Heuristic Alerts
|
||||
##
|
||||
|
||||
# With this option clamav will try to detect broken executables (both PE and
|
||||
# ELF) and alert on them with the Broken.Executable heuristic signature.
|
||||
# Default: no
|
||||
#AlertBrokenExecutables yes
|
||||
|
||||
# With this option clamav will try to detect broken media file (JPEG,
|
||||
# TIFF, PNG, GIF) and alert on them with a Broken.Media heuristic signature.
|
||||
# Default: no
|
||||
#AlertBrokenMedia yes
|
||||
|
||||
# Alert on encrypted archives _and_ documents with heuristic signature
|
||||
# (encrypted .zip, .7zip, .rar, .pdf).
|
||||
# Default: no
|
||||
#AlertEncrypted yes
|
||||
|
||||
# Alert on encrypted archives with heuristic signature (encrypted .zip, .7zip,
|
||||
# .rar).
|
||||
# Default: no
|
||||
#AlertEncryptedArchive yes
|
||||
|
||||
# Alert on encrypted archives with heuristic signature (encrypted .pdf).
|
||||
# Default: no
|
||||
#AlertEncryptedDoc yes
|
||||
|
||||
# With this option enabled OLE2 files containing VBA macros, which were not
|
||||
# detected by signatures will be marked as "Heuristics.OLE2.ContainsMacros".
|
||||
# Default: no
|
||||
#AlertOLE2Macros yes
|
||||
|
||||
# Alert on SSL mismatches in URLs, even if the URL isn't in the database.
|
||||
# This can lead to false positives.
|
||||
# Default: no
|
||||
#AlertPhishingSSLMismatch yes
|
||||
|
||||
# Alert on cloaked URLs, even if URL isn't in database.
|
||||
# This can lead to false positives.
|
||||
# Default: no
|
||||
#AlertPhishingCloak yes
|
||||
|
||||
# Alert on raw DMG image files containing partition intersections
|
||||
# Default: no
|
||||
#AlertPartitionIntersection yes
|
||||
|
||||
|
||||
##
|
||||
## Executable files
|
||||
##
|
||||
|
||||
# PE stands for Portable Executable - it's an executable file format used
|
||||
# in all 32 and 64-bit versions of Windows operating systems. This option
|
||||
# allows ClamAV to perform a deeper analysis of executable files and it's also
|
||||
# required for decompression of popular executable packers such as UPX, FSG,
|
||||
# and Petite. If you turn off this option, the original files will still be
|
||||
# scanned, but without additional processing.
|
||||
# Default: yes
|
||||
#ScanPE yes
|
||||
|
||||
# Certain PE files contain an authenticode signature. By default, we check
|
||||
# the signature chain in the PE file against a database of trusted and
|
||||
# revoked certificates if the file being scanned is marked as a virus.
|
||||
# If any certificate in the chain validates against any trusted root, but
|
||||
# does not match any revoked certificate, the file is marked as trusted.
|
||||
# If the file does match a revoked certificate, the file is marked as virus.
|
||||
# The following setting completely turns off authenticode verification.
|
||||
# Default: no
|
||||
#DisableCertCheck yes
|
||||
|
||||
# Executable and Linking Format is a standard format for UN*X executables.
|
||||
# This option allows you to control the scanning of ELF files.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without additional processing.
|
||||
# Default: yes
|
||||
#ScanELF yes
|
||||
|
||||
|
||||
##
|
||||
## Documents
|
||||
##
|
||||
|
||||
# This option enables scanning of OLE2 files, such as Microsoft Office
|
||||
# documents and .msi files.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without additional processing.
|
||||
# Default: yes
|
||||
#ScanOLE2 yes
|
||||
|
||||
# This option enables scanning within PDF files.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without decoding and additional processing.
|
||||
# Default: yes
|
||||
#ScanPDF yes
|
||||
|
||||
# This option enables scanning within SWF files.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without decoding and additional processing.
|
||||
# Default: yes
|
||||
#ScanSWF yes
|
||||
|
||||
# This option enables scanning xml-based document files supported by libclamav.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without additional processing.
|
||||
# Default: yes
|
||||
#ScanXMLDOCS yes
|
||||
|
||||
# This option enables scanning of HWP3 files.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without additional processing.
|
||||
# Default: yes
|
||||
#ScanHWP3 yes
|
||||
|
||||
|
||||
##
|
||||
## Mail files
|
||||
##
|
||||
|
||||
# Enable internal e-mail scanner.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without parsing individual messages/attachments.
|
||||
# Default: yes
|
||||
#ScanMail yes
|
||||
|
||||
# Scan RFC1341 messages split over many emails.
|
||||
# You will need to periodically clean up $TemporaryDirectory/clamav-partial
|
||||
# directory.
|
||||
# WARNING: This option may open your system to a DoS attack.
|
||||
# Never use it on loaded servers.
|
||||
# Default: no
|
||||
#ScanPartialMessages yes
|
||||
|
||||
# With this option enabled ClamAV will try to detect phishing attempts by using
|
||||
# HTML.Phishing and Email.Phishing NDB signatures.
|
||||
# Default: yes
|
||||
#PhishingSignatures no
|
||||
|
||||
# With this option enabled ClamAV will try to detect phishing attempts by
|
||||
# analyzing URLs found in emails using WDB and PDB signature databases.
|
||||
# Default: yes
|
||||
#PhishingScanURLs no
|
||||
|
||||
|
||||
##
|
||||
## Data Loss Prevention (DLP)
|
||||
##
|
||||
|
||||
# Enable the DLP module
|
||||
# Default: No
|
||||
#StructuredDataDetection yes
|
||||
|
||||
# This option sets the lowest number of Credit Card numbers found in a file
|
||||
# to generate a detect.
|
||||
# Default: 3
|
||||
#StructuredMinCreditCardCount 5
|
||||
|
||||
# With this option enabled the DLP module will search for valid Credit Card
|
||||
# numbers only. Debit and Private Label cards will not be searched.
|
||||
# Default: no
|
||||
#StructuredCCOnly yes
|
||||
|
||||
# This option sets the lowest number of Social Security Numbers found
|
||||
# in a file to generate a detect.
|
||||
# Default: 3
|
||||
#StructuredMinSSNCount 5
|
||||
|
||||
# With this option enabled the DLP module will search for valid
|
||||
# SSNs formatted as xxx-yy-zzzz
|
||||
# Default: yes
|
||||
#StructuredSSNFormatNormal yes
|
||||
|
||||
# With this option enabled the DLP module will search for valid
|
||||
# SSNs formatted as xxxyyzzzz
|
||||
# Default: no
|
||||
#StructuredSSNFormatStripped yes
|
||||
|
||||
|
||||
##
|
||||
## HTML
|
||||
##
|
||||
|
||||
# Perform HTML normalisation and decryption of MS Script Encoder code.
|
||||
# Default: yes
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without additional processing.
|
||||
#ScanHTML yes
|
||||
|
||||
|
||||
##
|
||||
## Archives
|
||||
##
|
||||
|
||||
# ClamAV can scan within archives and compressed files.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without unpacking and additional processing.
|
||||
# Default: yes
|
||||
#ScanArchive yes
|
||||
|
||||
|
||||
##
|
||||
## Limits
|
||||
##
|
||||
|
||||
# The options below protect your system against Denial of Service attacks
|
||||
# using archive bombs.
|
||||
|
||||
# This option sets the maximum amount of time to a scan may take.
|
||||
# In this version, this field only affects the scan time of ZIP archives.
|
||||
# Value of 0 disables the limit.
|
||||
# Note: disabling this limit or setting it too high may result allow scanning
|
||||
# of certain files to lock up the scanning process/threads resulting in a
|
||||
# Denial of Service.
|
||||
# Time is in milliseconds.
|
||||
# Default: 120000
|
||||
MaxScanTime 300000
|
||||
|
||||
# This option sets the maximum amount of data to be scanned for each input
|
||||
# file. Archives and other containers are recursively extracted and scanned
|
||||
# up to this value.
|
||||
# Value of 0 disables the limit
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Default: 100M
|
||||
MaxScanSize 1024M
|
||||
|
||||
# Files larger than this limit won't be scanned. Affects the input file itself
|
||||
# as well as files contained inside it (when the input file is an archive, a
|
||||
# document or some other kind of container).
|
||||
# Value of 0 disables the limit.
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Technical design limitations prevent ClamAV from scanning files greater than
|
||||
# 2 GB at this time.
|
||||
# Default: 25M
|
||||
MaxFileSize 1024M
|
||||
|
||||
# Nested archives are scanned recursively, e.g. if a Zip archive contains a RAR
|
||||
# file, all files within it will also be scanned. This options specifies how
|
||||
# deeply the process should be continued.
|
||||
# Note: setting this limit too high may result in severe damage to the system.
|
||||
# Default: 17
|
||||
#MaxRecursion 10
|
||||
|
||||
# Number of files to be scanned within an archive, a document, or any other
|
||||
# container file.
|
||||
# Value of 0 disables the limit.
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Default: 10000
|
||||
#MaxFiles 15000
|
||||
|
||||
# Maximum size of a file to check for embedded PE. Files larger than this value
|
||||
# will skip the additional analysis step.
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Default: 10M
|
||||
#MaxEmbeddedPE 10M
|
||||
|
||||
# Maximum size of a HTML file to normalize. HTML files larger than this value
|
||||
# will not be normalized or scanned.
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Default: 10M
|
||||
#MaxHTMLNormalize 10M
|
||||
|
||||
# Maximum size of a normalized HTML file to scan. HTML files larger than this
|
||||
# value after normalization will not be scanned.
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Default: 2M
|
||||
#MaxHTMLNoTags 2M
|
||||
|
||||
# Maximum size of a script file to normalize. Script content larger than this
|
||||
# value will not be normalized or scanned.
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Default: 5M
|
||||
#MaxScriptNormalize 5M
|
||||
|
||||
# Maximum size of a ZIP file to reanalyze type recognition. ZIP files larger
|
||||
# than this value will skip the step to potentially reanalyze as PE.
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Default: 1M
|
||||
#MaxZipTypeRcg 1M
|
||||
|
||||
# This option sets the maximum number of partitions of a raw disk image to be
|
||||
# scanned.
|
||||
# Raw disk images with more partitions than this value will have up to
|
||||
# the value number partitions scanned. Negative values are not allowed.
|
||||
# Note: setting this limit too high may result in severe damage or impact
|
||||
# performance.
|
||||
# Default: 50
|
||||
#MaxPartitions 128
|
||||
|
||||
# This option sets the maximum number of icons within a PE to be scanned.
|
||||
# PE files with more icons than this value will have up to the value number
|
||||
# icons scanned.
|
||||
# Negative values are not allowed.
|
||||
# WARNING: setting this limit too high may result in severe damage or impact
|
||||
# performance.
|
||||
# Default: 100
|
||||
#MaxIconsPE 200
|
||||
|
||||
# This option sets the maximum recursive calls for HWP3 parsing during
|
||||
# scanning. HWP3 files using more than this limit will be terminated and
|
||||
# alert the user.
|
||||
# Scans will be unable to scan any HWP3 attachments if the recursive limit
|
||||
# is reached.
|
||||
# Negative values are not allowed.
|
||||
# WARNING: setting this limit too high may result in severe damage or impact
|
||||
# performance.
|
||||
# Default: 16
|
||||
#MaxRecHWP3 16
|
||||
|
||||
# This option sets the maximum calls to the PCRE match function during
|
||||
# an instance of regex matching.
|
||||
# Instances using more than this limit will be terminated and alert the user
|
||||
# but the scan will continue.
|
||||
# For more information on match_limit, see the PCRE documentation.
|
||||
# Negative values are not allowed.
|
||||
# WARNING: setting this limit too high may severely impact performance.
|
||||
# Default: 100000
|
||||
#PCREMatchLimit 20000
|
||||
|
||||
# This option sets the maximum recursive calls to the PCRE match function
|
||||
# during an instance of regex matching.
|
||||
# Instances using more than this limit will be terminated and alert the user
|
||||
# but the scan will continue.
|
||||
# For more information on match_limit_recursion, see the PCRE documentation.
|
||||
# Negative values are not allowed and values > PCREMatchLimit are superfluous.
|
||||
# WARNING: setting this limit too high may severely impact performance.
|
||||
# Default: 2000
|
||||
#PCRERecMatchLimit 10000
|
||||
|
||||
# This option sets the maximum filesize for which PCRE subsigs will be
|
||||
# executed. Files exceeding this limit will not have PCRE subsigs executed
|
||||
# unless a subsig is encompassed to a smaller buffer.
|
||||
# Negative values are not allowed.
|
||||
# Setting this value to zero disables the limit.
|
||||
# WARNING: setting this limit too high or disabling it may severely impact
|
||||
# performance.
|
||||
# Default: 25M
|
||||
#PCREMaxFileSize 100M
|
||||
|
||||
# When AlertExceedsMax is set, files exceeding the MaxFileSize, MaxScanSize, or
|
||||
# MaxRecursion limit will be flagged with the virus name starting with
|
||||
# "Heuristics.Limits.Exceeded".
|
||||
# Default: no
|
||||
#AlertExceedsMax yes
|
||||
|
||||
##
|
||||
## On-access Scan Settings
|
||||
##
|
||||
|
||||
# Don't scan files larger than OnAccessMaxFileSize
|
||||
# Value of 0 disables the limit.
|
||||
# Default: 5M
|
||||
#OnAccessMaxFileSize 10M
|
||||
|
||||
# Max number of scanning threads to allocate to the OnAccess thread pool at
|
||||
# startup. These threads are the ones responsible for creating a connection
|
||||
# with the daemon and kicking off scanning after an event has been processed.
|
||||
# To prevent clamonacc from consuming all clamd's resources keep this lower
|
||||
# than clamd's max threads.
|
||||
# Default: 5
|
||||
#OnAccessMaxThreads 10
|
||||
|
||||
# Max amount of time (in milliseconds) that the OnAccess client should spend
|
||||
# for every connect, send, and recieve attempt when communicating with clamd
|
||||
# via curl.
|
||||
# Default: 5000 (5 seconds)
|
||||
# OnAccessCurlTimeout 10000
|
||||
|
||||
# Toggles dynamic directory determination. Allows for recursively watching
|
||||
# include paths.
|
||||
# Default: no
|
||||
#OnAccessDisableDDD yes
|
||||
|
||||
# Set the include paths (all files inside them will be scanned). You can have
|
||||
# multiple OnAccessIncludePath directives but each directory must be added
|
||||
# in a separate line.
|
||||
# Default: disabled
|
||||
#OnAccessIncludePath /home
|
||||
#OnAccessIncludePath /students
|
||||
|
||||
# Set the exclude paths. All subdirectories are also excluded.
|
||||
# Default: disabled
|
||||
#OnAccessExcludePath /home/user
|
||||
|
||||
# Modifies fanotify blocking behaviour when handling permission events.
|
||||
# If off, fanotify will only notify if the file scanned is a virus,
|
||||
# and not perform any blocking.
|
||||
# Default: no
|
||||
#OnAccessPrevention yes
|
||||
|
||||
# When using prevention, if this option is turned on, any errors that occur
|
||||
# during scanning will result in the event attempt being denied. This could
|
||||
# potentially lead to unwanted system behaviour with certain configurations,
|
||||
# so the client defaults this to off and prefers allowing access events in
|
||||
# case of scan or connection error.
|
||||
# Default: no
|
||||
#OnAccessDenyOnError yes
|
||||
|
||||
# Toggles extra scanning and notifications when a file or directory is
|
||||
# created or moved.
|
||||
# Requires the DDD system to kick-off extra scans.
|
||||
# Default: no
|
||||
#OnAccessExtraScanning yes
|
||||
|
||||
# Set the mount point to be scanned. The mount point specified, or the mount
|
||||
# point containing the specified directory will be watched. If any directories
|
||||
# are specified, this option will preempt (disable and ignore all options
|
||||
# related to) the DDD system. This option will result in verdicts only.
|
||||
# Note that prevention is explicitly disallowed to prevent common, fatal
|
||||
# misconfigurations. (e.g. watching "/" with prevention on and no exclusions
|
||||
# made on vital system directories)
|
||||
# It can be used multiple times.
|
||||
# Default: disabled
|
||||
#OnAccessMountPath /
|
||||
#OnAccessMountPath /home/user
|
||||
|
||||
# With this option you can exclude the root UID (0). Processes run under
|
||||
# root with be able to access all files without triggering scans or
|
||||
# permission denied events.
|
||||
# Note that if clamd cannot check the uid of the process that generated an
|
||||
# on-access scan event (e.g., because OnAccessPrevention was not enabled, and
|
||||
# the process already exited), clamd will perform a scan. Thus, setting
|
||||
# OnAccessExcludeRootUID is not *guaranteed* to prevent every access by the
|
||||
# root user from triggering a scan (unless OnAccessPrevention is enabled).
|
||||
# Default: no
|
||||
#OnAccessExcludeRootUID no
|
||||
|
||||
# With this option you can exclude specific UIDs. Processes with these UIDs
|
||||
# will be able to access all files without triggering scans or permission
|
||||
# denied events.
|
||||
# This option can be used multiple times (one per line).
|
||||
# Using a value of 0 on any line will disable this option entirely.
|
||||
# To exclude the root UID (0) please enable the OnAccessExcludeRootUID
|
||||
# option.
|
||||
# Also note that if clamd cannot check the uid of the process that generated an
|
||||
# on-access scan event (e.g., because OnAccessPrevention was not enabled, and
|
||||
# the process already exited), clamd will perform a scan. Thus, setting
|
||||
# OnAccessExcludeUID is not *guaranteed* to prevent every access by the
|
||||
# specified uid from triggering a scan (unless OnAccessPrevention is enabled).
|
||||
# Default: disabled
|
||||
#OnAccessExcludeUID -1
|
||||
|
||||
# This option allows exclusions via user names when using the on-access
|
||||
# scanning client. It can be used multiple times.
|
||||
# It has the same potential race condition limitations of the
|
||||
# OnAccessExcludeUID option.
|
||||
# Default: disabled
|
||||
#OnAccessExcludeUname clamav
|
||||
|
||||
# Number of times the OnAccess client will retry a failed scan due to
|
||||
# connection problems (or other issues).
|
||||
# Default: 0
|
||||
#OnAccessRetryAttempts 3
|
||||
|
||||
##
|
||||
## Bytecode
|
||||
##
|
||||
|
||||
# With this option enabled ClamAV will load bytecode from the database.
|
||||
# It is highly recommended you keep this option on, otherwise you'll miss
|
||||
# detections for many new viruses.
|
||||
# Default: yes
|
||||
#Bytecode yes
|
||||
|
||||
# Set bytecode security level.
|
||||
# Possible values:
|
||||
# None - No security at all, meant for debugging.
|
||||
# DO NOT USE THIS ON PRODUCTION SYSTEMS.
|
||||
# This value is only available if clamav was built
|
||||
# with --enable-debug!
|
||||
# TrustSigned - Trust bytecode loaded from signed .c[lv]d files, insert
|
||||
# runtime safety checks for bytecode loaded from other sources.
|
||||
# Paranoid - Don't trust any bytecode, insert runtime checks for all.
|
||||
# Recommended: TrustSigned, because bytecode in .cvd files already has these
|
||||
# checks.
|
||||
# Note that by default only signed bytecode is loaded, currently you can only
|
||||
# load unsigned bytecode in --enable-debug mode.
|
||||
#
|
||||
# Default: TrustSigned
|
||||
#BytecodeSecurity TrustSigned
|
||||
|
||||
# Allow loading bytecode from outside digitally signed .c[lv]d files.
|
||||
# **Caution**: You should NEVER run bytecode signatures from untrusted sources.
|
||||
# Doing so may result in arbitrary code execution.
|
||||
# Default: no
|
||||
#BytecodeUnsigned yes
|
||||
|
||||
# Set bytecode timeout in milliseconds.
|
||||
#
|
||||
# Default: 5000
|
||||
# BytecodeTimeout 1000
|
|
@ -1,2 +0,0 @@
|
|||
This directory needs to contain all certificates needed by this cockroachdb node. Those can be generated by the steps
|
||||
outlined in the README in the root directory, under "Setting up CockroachDB".
|
|
@ -1,15 +1,10 @@
|
|||
FROM node:16.10.0-alpine
|
||||
FROM node:16.14.0-alpine
|
||||
|
||||
WORKDIR /opt/hsd
|
||||
|
||||
RUN apk update && apk add bash unbound-dev gmp-dev g++ gcc make python2 git
|
||||
# Checkout a specific commit until Handshake releases the next release after
|
||||
# 2.4.0 then we should switch to that tag.
|
||||
#
|
||||
# The commit we are targetting right now contains a fix for handling the chain
|
||||
# migration code for new portals.
|
||||
RUN git clone https://github.com/handshake-org/hsd.git /opt/hsd && \
|
||||
cd /opt/hsd && git checkout 6f0927db32723d6320c8bff255a6ccf70b2ccd32 && cd -
|
||||
cd /opt/hsd && git checkout v3.0.1 && cd -
|
||||
RUN npm install --production
|
||||
|
||||
ENV PATH="${PATH}:/opt/hsd/bin:/opt/hsd/node_modules/.bin"
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
{
|
||||
"$id": "https://schemas.ory.sh/presets/kratos/quickstart/email-password/identity.schema.json",
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "Person",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"traits": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"email": {
|
||||
"type": "string",
|
||||
"format": "email",
|
||||
"title": "E-Mail",
|
||||
"minLength": 3,
|
||||
"ory.sh/kratos": {
|
||||
"credentials": {
|
||||
"password": {
|
||||
"identifier": true
|
||||
}
|
||||
},
|
||||
"recovery": {
|
||||
"via": "email"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["email"],
|
||||
"additionalProperties": true
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,86 +0,0 @@
|
|||
version: v0.5.5-alpha.1
|
||||
|
||||
dsn: memory
|
||||
|
||||
serve:
|
||||
public:
|
||||
base_url: http://127.0.0.1/
|
||||
cors:
|
||||
enabled: true
|
||||
admin:
|
||||
base_url: http://127.0.0.1/admin/
|
||||
|
||||
selfservice:
|
||||
default_browser_return_url: http://127.0.0.1/
|
||||
whitelisted_return_urls:
|
||||
- http://127.0.0.1/
|
||||
|
||||
methods:
|
||||
password:
|
||||
enabled: true
|
||||
|
||||
flows:
|
||||
error:
|
||||
ui_url: http://127.0.0.1/error
|
||||
|
||||
settings:
|
||||
ui_url: http://127.0.0.1/settings
|
||||
privileged_session_max_age: 15m
|
||||
|
||||
recovery:
|
||||
enabled: true
|
||||
ui_url: http://127.0.0.1/recovery
|
||||
|
||||
verification:
|
||||
enabled: true
|
||||
ui_url: http://127.0.0.1/verify
|
||||
after:
|
||||
default_browser_return_url: http://127.0.0.1/
|
||||
|
||||
logout:
|
||||
after:
|
||||
default_browser_return_url: http://127.0.0.1/auth/login
|
||||
|
||||
login:
|
||||
ui_url: http://127.0.0.1/auth/login
|
||||
lifespan: 10m
|
||||
|
||||
registration:
|
||||
lifespan: 10m
|
||||
ui_url: http://127.0.0.1/auth/registration
|
||||
after:
|
||||
password:
|
||||
hooks:
|
||||
- hook: session
|
||||
|
||||
log:
|
||||
level: debug
|
||||
format: text
|
||||
leak_sensitive_values: true
|
||||
|
||||
password:
|
||||
max_breaches: 100
|
||||
|
||||
secrets:
|
||||
cookie:
|
||||
- PLEASE-CHANGE-ME-I-AM-VERY-INSECURE
|
||||
|
||||
session:
|
||||
cookie:
|
||||
domain: account.siasky.net
|
||||
lifespan: "720h"
|
||||
|
||||
hashers:
|
||||
argon2:
|
||||
parallelism: 1
|
||||
memory: 131072
|
||||
iterations: 2
|
||||
salt_length: 16
|
||||
key_length: 16
|
||||
|
||||
identity:
|
||||
default_schema_url: file:///etc/config/kratos/identity.schema.json
|
||||
|
||||
courier:
|
||||
smtp:
|
||||
connection_uri: smtps://test:test@mailslurper:1025/?skip_ssl_verify=true
|
|
@ -1,37 +0,0 @@
|
|||
{
|
||||
"$id": "https://schemas.ory.sh/presets/kratos/quickstart/email-password/identity.schema.json",
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "Person",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"traits": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"email": {
|
||||
"type": "string",
|
||||
"format": "email",
|
||||
"title": "E-Mail",
|
||||
"minLength": 3,
|
||||
"ory.sh/kratos": {
|
||||
"credentials": {
|
||||
"password": {
|
||||
"identifier": true
|
||||
}
|
||||
},
|
||||
"verification": {
|
||||
"via": "email"
|
||||
},
|
||||
"recovery": {
|
||||
"via": "email"
|
||||
}
|
||||
}
|
||||
},
|
||||
"website": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": ["website", "email"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
local claims = {
|
||||
email_verified: false
|
||||
} + std.extVar('claims');
|
||||
|
||||
{
|
||||
identity: {
|
||||
traits: {
|
||||
// Allowing unverified email addresses enables account
|
||||
// enumeration attacks, especially if the value is used for
|
||||
// e.g. verification or as a password login identifier.
|
||||
//
|
||||
// Therefore we only return the email if it (a) exists and (b) is marked verified
|
||||
// by GitHub.
|
||||
[if "email" in claims && claims.email_verified then "email" else null]: claims.email,
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
This directory needs to contain all certificates needed by this cockroachdb node. Those can be generated by the steps
|
||||
outlined in the README in the root directory, under "Setting up CockroachDB".
|
||||
|
||||
The only difference between the files here and those under
|
||||
`docker/cockroach/certs` is that the files here need to be readable by anyone, while the files under `cockroach` need to
|
||||
have their original access rights
|
||||
(all \*.key files should be 600 instead of 644 there).
|
|
@ -1,116 +0,0 @@
|
|||
- id: "ory:kratos:public"
|
||||
upstream:
|
||||
preserve_host: true
|
||||
url: "http://kratos:4433"
|
||||
strip_path: /.ory/kratos/public
|
||||
match:
|
||||
url: "http://oathkeeper:4455/.ory/kratos/public/<**>"
|
||||
methods:
|
||||
- GET
|
||||
- POST
|
||||
- PUT
|
||||
- DELETE
|
||||
- PATCH
|
||||
authenticators:
|
||||
- handler: noop
|
||||
authorizer:
|
||||
handler: allow
|
||||
mutators:
|
||||
- handler: noop
|
||||
|
||||
- id: "dashboard:anonymous"
|
||||
upstream:
|
||||
preserve_host: true
|
||||
url: "http://dashboard:3000"
|
||||
match:
|
||||
url: "http://oathkeeper:4455/<{_next/**,auth/**,recovery,verify,error,favicon.ico}{/,}>"
|
||||
methods:
|
||||
- GET
|
||||
authenticators:
|
||||
- handler: anonymous
|
||||
authorizer:
|
||||
handler: allow
|
||||
mutators:
|
||||
- handler: noop
|
||||
|
||||
- id: "dashboard:protected"
|
||||
upstream:
|
||||
preserve_host: true
|
||||
url: "http://dashboard:3000"
|
||||
match:
|
||||
url: "http://oathkeeper:4455/<{,api/**,settings,uploads,downloads,payments}>"
|
||||
methods:
|
||||
- GET
|
||||
- POST
|
||||
- PUT
|
||||
- DELETE
|
||||
- PATCH
|
||||
authenticators:
|
||||
- handler: cookie_session
|
||||
authorizer:
|
||||
handler: allow
|
||||
mutators:
|
||||
- handler: id_token
|
||||
- handler: header
|
||||
config:
|
||||
headers:
|
||||
X-User: "{{ print .Subject }}"
|
||||
errors:
|
||||
- handler: redirect
|
||||
config:
|
||||
to: http://127.0.0.1/auth/login
|
||||
|
||||
- id: "accounts:anonymous"
|
||||
upstream:
|
||||
preserve_host: true
|
||||
url: "http://accounts:3000"
|
||||
match:
|
||||
url: "http://oathkeeper<{,:4455}>/<{health,stripe/prices,stripe/webhook}>"
|
||||
methods:
|
||||
- GET
|
||||
- POST
|
||||
authenticators:
|
||||
- handler: anonymous
|
||||
authorizer:
|
||||
handler: allow
|
||||
mutators:
|
||||
- handler: noop
|
||||
|
||||
- id: "accounts:public"
|
||||
upstream:
|
||||
preserve_host: true
|
||||
url: "http://accounts:3000"
|
||||
match:
|
||||
url: "http://oathkeeper<{,:4455}>/<{user/limits}>"
|
||||
methods:
|
||||
- GET
|
||||
authenticators:
|
||||
- handler: cookie_session
|
||||
- handler: noop
|
||||
authorizer:
|
||||
handler: allow
|
||||
mutators:
|
||||
- handler: id_token
|
||||
|
||||
- id: "accounts:protected"
|
||||
upstream:
|
||||
preserve_host: true
|
||||
url: "http://accounts:3000"
|
||||
match:
|
||||
url: "http://oathkeeper<{,:4455}>/<{login,logout,user,user/uploads,user/uploads/*,user/downloads,user/stats}>"
|
||||
methods:
|
||||
- GET
|
||||
- POST
|
||||
- PUT
|
||||
- DELETE
|
||||
- PATCH
|
||||
authenticators:
|
||||
- handler: cookie_session
|
||||
authorizer:
|
||||
handler: allow
|
||||
mutators:
|
||||
- handler: id_token
|
||||
errors:
|
||||
- handler: redirect
|
||||
config:
|
||||
to: http://127.0.0.1/auth/login
|
|
@ -1,94 +0,0 @@
|
|||
log:
|
||||
level: debug
|
||||
format: json
|
||||
|
||||
serve:
|
||||
proxy:
|
||||
cors:
|
||||
enabled: true
|
||||
allowed_origins:
|
||||
- "*"
|
||||
allowed_methods:
|
||||
- POST
|
||||
- GET
|
||||
- PUT
|
||||
- PATCH
|
||||
- DELETE
|
||||
allowed_headers:
|
||||
- Authorization
|
||||
- Content-Type
|
||||
exposed_headers:
|
||||
- Content-Type
|
||||
allow_credentials: true
|
||||
debug: true
|
||||
|
||||
errors:
|
||||
fallback:
|
||||
- json
|
||||
|
||||
handlers:
|
||||
redirect:
|
||||
enabled: true
|
||||
config:
|
||||
to: http://127.0.0.1/auth/login
|
||||
when:
|
||||
- error:
|
||||
- unauthorized
|
||||
- forbidden
|
||||
request:
|
||||
header:
|
||||
accept:
|
||||
- text/html
|
||||
json:
|
||||
enabled: true
|
||||
config:
|
||||
verbose: true
|
||||
|
||||
access_rules:
|
||||
matching_strategy: glob
|
||||
repositories:
|
||||
- file:///etc/config/oathkeeper/access-rules.yml
|
||||
|
||||
authenticators:
|
||||
anonymous:
|
||||
enabled: true
|
||||
config:
|
||||
subject: guest
|
||||
|
||||
cookie_session:
|
||||
enabled: true
|
||||
config:
|
||||
check_session_url: http://kratos:4433/sessions/whoami
|
||||
preserve_path: true
|
||||
extra_from: "@this"
|
||||
subject_from: "identity.id"
|
||||
only:
|
||||
- ory_kratos_session
|
||||
|
||||
noop:
|
||||
enabled: true
|
||||
|
||||
authorizers:
|
||||
allow:
|
||||
enabled: true
|
||||
|
||||
mutators:
|
||||
noop:
|
||||
enabled: true
|
||||
|
||||
header:
|
||||
enabled: true
|
||||
config:
|
||||
headers:
|
||||
X-User: "{{ print .Subject }}"
|
||||
|
||||
id_token:
|
||||
enabled: true
|
||||
config:
|
||||
issuer_url: http://oathkeeper:4455/
|
||||
jwks_url: file:///etc/config/oathkeeper/id_token.jwks.json
|
||||
ttl: 720h
|
||||
claims: |
|
||||
{
|
||||
"session": {{ .Extra | toJson }}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
FROM golang:1.17.3
|
||||
LABEL maintainer="SkynetLabs <devs@siasky.net>"
|
||||
|
||||
ENV GOOS linux
|
||||
ENV GOARCH amd64
|
||||
|
||||
ARG branch=main
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
RUN git clone --single-branch --branch ${branch} https://github.com/SkynetLabs/malware-scanner.git && \
|
||||
cd malware-scanner && \
|
||||
go mod download && \
|
||||
make release
|
||||
|
||||
ENV SKYNET_DB_HOST="localhost"
|
||||
ENV SKYNET_DB_PORT="27017"
|
||||
ENV SKYNET_DB_USER="username"
|
||||
ENV SKYNET_DB_PASS="password"
|
||||
ENV CLAMAV_IP=127.0.0.1
|
||||
ENV CLAMAV_PORT=3310
|
||||
|
||||
ENTRYPOINT ["malware-scanner"]
|
|
@ -1,14 +1,17 @@
|
|||
FROM openresty/openresty:1.19.9.1-bionic
|
||||
|
||||
RUN luarocks install lua-resty-http && \
|
||||
luarocks install hasher && \
|
||||
openssl req -new -newkey rsa:2048 -days 3650 -nodes -x509 \
|
||||
-subj '/CN=local-certificate' \
|
||||
-keyout /etc/ssl/local-certificate.key \
|
||||
-out /etc/ssl/local-certificate.crt
|
||||
|
||||
COPY mo ./
|
||||
COPY libs /etc/nginx/libs
|
||||
COPY conf.d /etc/nginx/conf.d
|
||||
COPY conf.d.templates /etc/nginx/conf.d.templates
|
||||
COPY nginx.conf /usr/local/openresty/nginx/conf/nginx.conf
|
||||
|
||||
CMD [ "bash", "-c", \
|
||||
"./mo < /etc/nginx/conf.d.templates/server.account.conf > /etc/nginx/conf.d/server.account.conf ; \
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
more_set_headers 'Access-Control-Allow-Origin: $http_origin';
|
||||
more_set_headers 'Access-Control-Allow-Credentials: true';
|
||||
more_set_headers 'Access-Control-Allow-Methods: GET, POST, HEAD, OPTIONS, PUT, PATCH, DELETE';
|
||||
more_set_headers 'Access-Control-Allow-Headers: DNT,User-Agent,X-Requested-With,If-Modified-Since,If-None-Match,Cache-Control,Content-Type,Range,X-HTTP-Method-Override,upload-offset,upload-metadata,upload-length,tus-version,tus-resumable,tus-extension,tus-max-size,location';
|
||||
more_set_headers 'Access-Control-Expose-Headers: Content-Length,Content-Range,ETag,Skynet-File-Metadata,Skynet-Skylink,Skynet-Proof,Skynet-Portal-Api,Skynet-Server-Api,upload-offset,upload-metadata,upload-length,tus-version,tus-resumable,tus-extension,tus-max-size,location';
|
||||
more_set_headers 'Access-Control-Allow-Headers: DNT,User-Agent,X-Requested-With,If-Modified-Since,If-None-Match,Cache-Control,Content-Type,Range,X-HTTP-Method-Override,upload-offset,upload-metadata,upload-length,tus-version,tus-resumable,tus-extension,tus-max-size,upload-concat,location,Skynet-API-Key';
|
||||
more_set_headers 'Access-Control-Expose-Headers: Content-Length,Content-Range,ETag,Skynet-File-Metadata,Skynet-Skylink,Skynet-Proof,Skynet-Portal-Api,Skynet-Server-Api,upload-offset,upload-metadata,upload-length,tus-version,tus-resumable,tus-extension,tus-max-size,upload-concat,location';
|
||||
|
|
|
@ -3,7 +3,16 @@
|
|||
# because otherwise logger with throw error
|
||||
|
||||
# set only on hns routes
|
||||
set $hns_domain '';
|
||||
set $hns_domain "";
|
||||
|
||||
# set only if server has been access through SERVER_DOMAIN
|
||||
set $server_alias '';
|
||||
set $server_alias "";
|
||||
|
||||
# expose skylink variable so we can use it in access log
|
||||
set $skylink "";
|
||||
|
||||
# cached account limits (json string) - applies only if accounts are enabled
|
||||
set $account_limits "";
|
||||
|
||||
# set this internal flag to true if current request should not be limited in any way
|
||||
set $internal_no_limits "false";
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
allow 127.0.0.1/32; # localhost
|
||||
allow 10.10.10.0/24; # docker network
|
||||
deny all;
|
|
@ -1,11 +1,12 @@
|
|||
include /etc/nginx/conf.d/include/proxy-buffer;
|
||||
include /etc/nginx/conf.d/include/proxy-pass-internal;
|
||||
include /etc/nginx/conf.d/include/portal-access-check;
|
||||
|
||||
# variable definititions - we need to define a variable to be able to access it in lua by ngx.var.something
|
||||
set $skylink ''; # placeholder for the raw 46 bit skylink
|
||||
|
||||
# resolve handshake domain by requesting to /hnsres endpoint and assign correct values to $skylink and $rest
|
||||
access_by_lua_block {
|
||||
rewrite_by_lua_block {
|
||||
local json = require('cjson')
|
||||
local httpc = require("resty.http").new()
|
||||
|
||||
|
@ -75,7 +76,7 @@ access_by_lua_block {
|
|||
end
|
||||
}
|
||||
|
||||
# we proxy to another nginx location rather than directly to siad because we don't want to deal with caching here
|
||||
# we proxy to another nginx location rather than directly to siad because we do not want to deal with caching here
|
||||
proxy_pass https://127.0.0.1/$skylink$path$is_args$args;
|
||||
|
||||
# in case siad returns location header, we need to replace the skylink with the domain name
|
||||
|
|
|
@ -3,16 +3,14 @@ include /etc/nginx/conf.d/include/proxy-buffer;
|
|||
include /etc/nginx/conf.d/include/proxy-cache-downloads;
|
||||
include /etc/nginx/conf.d/include/track-download;
|
||||
|
||||
# redirect purge calls to separate location
|
||||
error_page 462 = @purge;
|
||||
if ($request_method = PURGE) {
|
||||
return 462;
|
||||
}
|
||||
|
||||
limit_conn downloads_by_ip 100; # ddos protection: max 100 downloads at a time
|
||||
|
||||
# ensure that skylink that we pass around is base64 encoded (transform base32 encoded ones)
|
||||
# this is important because we want only one format in cache keys and logs
|
||||
set_by_lua_block $skylink { return require("skynet.skylink").parse(ngx.var.skylink) }
|
||||
|
||||
# $skylink_v1 and $skylink_v2 variables default to the same value but in case the requested skylink was:
|
||||
# a) skylink v1 - it wouldn't matter, no additional logic is executed
|
||||
# a) skylink v1 - it would not matter, no additional logic is executed
|
||||
# b) skylink v2 - in a lua block below we will resolve the skylink v2 into skylink v1 and update
|
||||
# $skylink_v1 variable so then the proxy request to skyd can be cached in nginx (proxy_cache_key
|
||||
# in proxy-cache-downloads includes $skylink_v1 as a part of the cache key)
|
||||
|
@ -53,21 +51,32 @@ access_by_lua_block {
|
|||
ngx.var.skynet_proof = res.headers["Skynet-Proof"]
|
||||
end
|
||||
|
||||
-- this block runs only when accounts are enabled
|
||||
if os.getenv("ACCOUNTS_ENABLED") ~= "true" then return end
|
||||
-- check if skylink v1 is present on blocklist (compare hashes)
|
||||
if require("skynet.blocklist").is_blocked(ngx.var.skylink_v1) then
|
||||
return require("skynet.blocklist").exit_illegal()
|
||||
end
|
||||
|
||||
-- 10.10.10.70 points to accounts service (alias not available when using resty-http)
|
||||
local res, err = httpc:request_uri("http://10.10.10.70:3000/user/limits", {
|
||||
headers = { ["Cookie"] = "skynet-jwt=" .. ngx.var.skynet_jwt }
|
||||
})
|
||||
-- if skylink is found on nocache list then set internal nocache variable
|
||||
-- to tell nginx that it should not try and cache this file (too large)
|
||||
if ngx.shared.nocache:get(ngx.var.skylink_v1) then
|
||||
ngx.var.nocache = "1"
|
||||
end
|
||||
|
||||
-- fail gracefully in case /user/limits failed
|
||||
if err or (res and res.status ~= ngx.HTTP_OK) then
|
||||
ngx.log(ngx.ERR, "Failed accounts service request /user/limits: ", err or ("[HTTP " .. res.status .. "] " .. res.body))
|
||||
ngx.var.limit_rate = 2621440 -- (20 * 1024 * 1024 / 8) conservative fallback to 20 mbps in case accounts failed to return limits
|
||||
elseif res and res.status == ngx.HTTP_OK then
|
||||
local json = require('cjson')
|
||||
local limits = json.decode(res.body)
|
||||
if require("skynet.account").accounts_enabled() then
|
||||
-- check if portal is in authenticated only mode
|
||||
if require("skynet.account").is_access_unauthorized() then
|
||||
return require("skynet.account").exit_access_unauthorized()
|
||||
end
|
||||
|
||||
-- check if portal is in subscription only mode
|
||||
if require("skynet.account").is_access_forbidden() then
|
||||
return require("skynet.account").exit_access_forbidden()
|
||||
end
|
||||
|
||||
-- get account limits of currently authenticated user
|
||||
local limits = require("skynet.account").get_account_limits()
|
||||
|
||||
-- apply download speed limit
|
||||
ngx.var.limit_rate = limits.download
|
||||
end
|
||||
}
|
||||
|
@ -83,6 +92,12 @@ header_filter_by_lua_block {
|
|||
if ngx.var.skynet_proof and ngx.var.skynet_proof ~= "" then
|
||||
ngx.header["Skynet-Proof"] = ngx.var.skynet_proof
|
||||
end
|
||||
|
||||
-- add skylink to nocache list if it exceeds 1GB (1e+9 bytes) threshold
|
||||
-- (content length can be nil for already cached files - we can ignore them)
|
||||
if ngx.header["Content-Length"] and tonumber(ngx.header["Content-Length"]) > 1e+9 then
|
||||
ngx.shared.nocache:set(ngx.var.skylink_v1, ngx.header["Content-Length"])
|
||||
end
|
||||
}
|
||||
|
||||
limit_rate_after 512k;
|
||||
|
@ -91,7 +106,7 @@ limit_rate $limit_rate;
|
|||
proxy_read_timeout 600;
|
||||
proxy_set_header User-Agent: Sia-Agent;
|
||||
|
||||
# in case the requested skylink was v2 and we already resolved it to skylink v1, we're going to pass resolved
|
||||
# in case the requested skylink was v2 and we already resolved it to skylink v1, we are going to pass resolved
|
||||
# skylink v1 to skyd to save that extra skylink v2 lookup in skyd but in turn, in case skyd returns a redirect
|
||||
# we need to rewrite the skylink v1 to skylink v2 in the location header with proxy_redirect
|
||||
proxy_redirect $skylink_v1 $skylink_v2;
|
||||
|
|
|
@ -10,22 +10,21 @@ proxy_read_timeout 600; # siad should timeout with 404 after 5 minutes
|
|||
proxy_pass http://sia:9980/skynet/registry;
|
||||
|
||||
access_by_lua_block {
|
||||
-- this block runs only when accounts are enabled
|
||||
if os.getenv("ACCOUNTS_ENABLED") ~= "true" then return end
|
||||
if require("skynet.account").accounts_enabled() then
|
||||
-- check if portal is in authenticated only mode
|
||||
if require("skynet.account").is_access_unauthorized() then
|
||||
return require("skynet.account").exit_access_unauthorized()
|
||||
end
|
||||
|
||||
local httpc = require("resty.http").new()
|
||||
-- check if portal is in subscription only mode
|
||||
if require("skynet.account").is_access_forbidden() then
|
||||
return require("skynet.account").exit_access_forbidden()
|
||||
end
|
||||
|
||||
-- 10.10.10.70 points to accounts service (alias not available when using resty-http)
|
||||
local res, err = httpc:request_uri("http://10.10.10.70:3000/user/limits", {
|
||||
headers = { ["Cookie"] = "skynet-jwt=" .. ngx.var.skynet_jwt }
|
||||
})
|
||||
-- get account limits of currently authenticated user
|
||||
local limits = require("skynet.account").get_account_limits()
|
||||
|
||||
-- fail gracefully in case /user/limits failed
|
||||
if err or (res and res.status ~= ngx.HTTP_OK) then
|
||||
ngx.log(ngx.ERR, "Failed accounts service request /user/limits: ", err or ("[HTTP " .. res.status .. "] " .. res.body))
|
||||
elseif res and res.status == ngx.HTTP_OK then
|
||||
local json = require('cjson')
|
||||
local limits = json.decode(res.body)
|
||||
-- apply registry rate limits (forced delay)
|
||||
if limits.registry > 0 then
|
||||
ngx.sleep(limits.registry / 1000)
|
||||
end
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
access_by_lua_block {
|
||||
-- check portal access rules and exit if access is restricted
|
||||
if require("skynet.account").is_access_unauthorized() then
|
||||
return require("skynet.account").exit_access_unauthorized()
|
||||
end
|
||||
|
||||
-- check if portal is in subscription only mode
|
||||
if require("skynet.account").is_access_forbidden() then
|
||||
return require("skynet.account").exit_access_forbidden()
|
||||
end
|
||||
}
|
|
@ -1,7 +1,21 @@
|
|||
set $nocache 0; # internal variable for bypassing the cache, nginx expects 0/1 for boolean
|
||||
proxy_cache skynet; # cache name
|
||||
proxy_cache_key $skylink_v1$path$arg_format$arg_attachment$arg_start$arg_end$http_range; # unique cache key
|
||||
proxy_cache_min_uses 3; # cache after 3 uses
|
||||
proxy_cache_valid 200 206 307 308 48h; # keep 200, 206, 307 and 308 responses valid for up to 2 days
|
||||
proxy_cache_bypass $nocache $cookie_nocache $arg_nocache; # add cache bypass option
|
||||
add_header X-Proxy-Cache $upstream_cache_status; # add response header to indicate cache hits and misses
|
||||
|
||||
# bypass - this will bypass cache hit on request (status BYPASS)
|
||||
# but still stores file in cache if cache conditions are met
|
||||
proxy_cache_bypass $cookie_nocache $arg_nocache;
|
||||
|
||||
# no cache - this will ignore cache on request (status MISS)
|
||||
# and does not store file in cache under no condition
|
||||
set_if_empty $nocache "0";
|
||||
|
||||
# map skyd env variable value to "1" for true and "0" for false (expected by proxy_no_cache)
|
||||
set_by_lua_block $skyd_disk_cache_enabled {
|
||||
return os.getenv("SKYD_DISK_CACHE_ENABLED") == "true" and "1" or "0"
|
||||
}
|
||||
|
||||
# disable cache when nocache is set or skyd cache is enabled
|
||||
proxy_no_cache $nocache $skyd_disk_cache_enabled;
|
||||
|
|
|
@ -1,15 +1,4 @@
|
|||
rewrite_by_lua_block {
|
||||
local b64 = require("ngx.base64")
|
||||
-- open apipassword file for reading (b flag is required for some reason)
|
||||
-- (file /etc/.sia/apipassword has to be mounted from the host system)
|
||||
local apipassword_file = io.open("/data/sia/apipassword", "rb")
|
||||
-- read apipassword file contents and trim newline (important)
|
||||
local apipassword = apipassword_file:read("*all"):gsub("%s+", "")
|
||||
-- make sure to close file after reading the password
|
||||
apipassword_file.close()
|
||||
-- encode the user:password authorization string
|
||||
-- (in our case user is empty so it is just :password)
|
||||
local content = b64.encode_base64url(":" .. apipassword)
|
||||
-- set authorization header with proper base64 encoded string
|
||||
ngx.req.set_header("Authorization", "Basic " .. content)
|
||||
-- set basic authorization header with base64 encoded apipassword
|
||||
ngx.req.set_header("Authorization", require("skynet.utils").authorization_header())
|
||||
}
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
# register the download in accounts service (cookies should contain jwt)
|
||||
log_by_lua_block {
|
||||
-- this block runs only when accounts are enabled
|
||||
if os.getenv("ACCOUNTS_ENABLED") ~= "true" then return end
|
||||
|
||||
if require("skynet.account").accounts_enabled() then
|
||||
local function track(premature, skylink, status, body_bytes_sent, jwt)
|
||||
if premature then return end
|
||||
|
||||
|
@ -24,4 +23,29 @@ log_by_lua_block {
|
|||
local ok, err = ngx.timer.at(0, track, ngx.header["Skynet-Skylink"], ngx.status, ngx.var.body_bytes_sent, ngx.var.skynet_jwt)
|
||||
if err then ngx.log(ngx.ERR, "Failed to create timer: ", err) end
|
||||
end
|
||||
end
|
||||
|
||||
-- this block runs only when scanner module is enabled
|
||||
if os.getenv("PORTAL_MODULES"):match("s") then
|
||||
local function scan(premature, skylink)
|
||||
if premature then return end
|
||||
|
||||
local httpc = require("resty.http").new()
|
||||
|
||||
-- 10.10.10.101 points to malware-scanner service (alias not available when using resty-http)
|
||||
local res, err = httpc:request_uri("http://10.10.10.101:4000/scan/" .. skylink, {
|
||||
method = "POST",
|
||||
})
|
||||
|
||||
if err or (res and res.status ~= ngx.HTTP_OK) then
|
||||
ngx.log(ngx.ERR, "Failed malware-scanner request /scan/" .. skylink .. ": ", err or ("[HTTP " .. res.status .. "] " .. res.body))
|
||||
end
|
||||
end
|
||||
|
||||
-- scan all skylinks but make sure to only run if skylink is present (empty if request failed)
|
||||
if ngx.header["Skynet-Skylink"] then
|
||||
local ok, err = ngx.timer.at(0, scan, ngx.header["Skynet-Skylink"])
|
||||
if err then ngx.log(ngx.ERR, "Failed to create timer: ", err) end
|
||||
end
|
||||
end
|
||||
}
|
||||
|
|
|
@ -1,22 +1,24 @@
|
|||
# register the registry access in accounts service (cookies should contain jwt)
|
||||
log_by_lua_block {
|
||||
-- this block runs only when accounts are enabled
|
||||
if os.getenv("ACCOUNTS_ENABLED") ~= "true" then return end
|
||||
|
||||
if require("skynet.account").accounts_enabled() then
|
||||
local function track(premature, request_method, jwt)
|
||||
if premature then return end
|
||||
|
||||
local httpc = require("resty.http").new()
|
||||
local method = request_method == ngx.HTTP_GET and "read" or "write"
|
||||
|
||||
-- based on request method we assign a registry action string used
|
||||
-- in track endpoint namely "read" for GET and "write" for POST
|
||||
local registry_action = request_method == "GET" and "read" or "write"
|
||||
|
||||
-- 10.10.10.70 points to accounts service (alias not available when using resty-http)
|
||||
local res, err = httpc:request_uri("http://10.10.10.70:3000/track/registry/" .. method, {
|
||||
local res, err = httpc:request_uri("http://10.10.10.70:3000/track/registry/" .. registry_action, {
|
||||
method = "POST",
|
||||
headers = { ["Cookie"] = "skynet-jwt=" .. jwt },
|
||||
})
|
||||
|
||||
if err or (res and res.status ~= ngx.HTTP_NO_CONTENT) then
|
||||
ngx.log(ngx.ERR, "Failed accounts service request /track/registry/" .. method .. ": ", err or ("[HTTP " .. res.status .. "] " .. res.body))
|
||||
ngx.log(ngx.ERR, "Failed accounts service request /track/registry/" .. registry_action .. ": ", err or ("[HTTP " .. res.status .. "] " .. res.body))
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -24,4 +26,5 @@ log_by_lua_block {
|
|||
local ok, err = ngx.timer.at(0, track, ngx.req.get_method(), ngx.var.skynet_jwt)
|
||||
if err then ngx.log(ngx.ERR, "Failed to create timer: ", err) end
|
||||
end
|
||||
end
|
||||
}
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
# register the upload in accounts service (cookies should contain jwt)
|
||||
log_by_lua_block {
|
||||
-- this block runs only when accounts are enabled
|
||||
if os.getenv("ACCOUNTS_ENABLED") ~= "true" then return end
|
||||
|
||||
if require("skynet.account").accounts_enabled() then
|
||||
local function track(premature, skylink, jwt)
|
||||
if premature then return end
|
||||
|
||||
|
@ -19,8 +18,34 @@ log_by_lua_block {
|
|||
end
|
||||
end
|
||||
|
||||
-- report all skylinks (header empty if request failed) but only if jwt is preset (user is authenticated)
|
||||
if ngx.header["Skynet-Skylink"] and ngx.var.skynet_jwt ~= "" then
|
||||
local ok, err = ngx.timer.at(0, track, ngx.header["Skynet-Skylink"], ngx.var.skynet_jwt)
|
||||
if err then ngx.log(ngx.ERR, "Failed to create timer: ", err) end
|
||||
end
|
||||
end
|
||||
|
||||
-- this block runs only when scanner module is enabled
|
||||
if os.getenv("PORTAL_MODULES"):match("s") then
|
||||
local function scan(premature, skylink)
|
||||
if premature then return end
|
||||
|
||||
local httpc = require("resty.http").new()
|
||||
|
||||
-- 10.10.10.101 points to malware-scanner service (alias not available when using resty-http)
|
||||
local res, err = httpc:request_uri("http://10.10.10.101:4000/scan/" .. skylink, {
|
||||
method = "POST",
|
||||
})
|
||||
|
||||
if err or (res and res.status ~= ngx.HTTP_OK) then
|
||||
ngx.log(ngx.ERR, "Failed malware-scanner request /scan/" .. skylink .. ": ", err or ("[HTTP " .. res.status .. "] " .. res.body))
|
||||
end
|
||||
end
|
||||
|
||||
-- scan all skylinks but make sure to only run if skylink is present (empty if request failed)
|
||||
if ngx.header["Skynet-Skylink"] then
|
||||
local ok, err = ngx.timer.at(0, scan, ngx.header["Skynet-Skylink"])
|
||||
if err then ngx.log(ngx.ERR, "Failed to create timer: ", err) end
|
||||
end
|
||||
end
|
||||
}
|
||||
|
|
|
@ -1,68 +0,0 @@
|
|||
-- Tit Petric, Monotek d.o.o., Tue 03 Jan 2017 06:54:56 PM CET
|
||||
--
|
||||
-- Delete nginx cached assets with a PURGE request against an endpoint
|
||||
-- supports extended regular expression PURGE requests (/upload/.*)
|
||||
--
|
||||
-- https://scene-si.org/2017/01/08/improving-nginx-lua-cache-purge/
|
||||
--
|
||||
|
||||
function file_exists(name)
|
||||
local f = io.open(name, "r")
|
||||
if f~=nil then io.close(f) return true else return false end
|
||||
end
|
||||
|
||||
function explode(d, p)
|
||||
local t, ll
|
||||
t={}
|
||||
ll=0
|
||||
if(#p == 1) then return {p} end
|
||||
while true do
|
||||
l=string.find(p, d, ll, true) -- find the next d in the string
|
||||
if l~=nil then -- if "not not" found then..
|
||||
table.insert(t, string.sub(p, ll, l-1)) -- Save it in our array.
|
||||
ll=l+1 -- save just after where we found it for searching next time.
|
||||
else
|
||||
table.insert(t, string.sub(p, ll)) -- Save what's left in our array.
|
||||
break -- Break at end, as it should be, according to the lua manual.
|
||||
end
|
||||
end
|
||||
return t
|
||||
end
|
||||
|
||||
function purge(filename)
|
||||
if (file_exists(filename)) then
|
||||
os.remove(filename)
|
||||
end
|
||||
end
|
||||
|
||||
function trim(s)
|
||||
return (string.gsub(s, "^%s*(.-)%s*$", "%1"))
|
||||
end
|
||||
|
||||
function exec(cmd)
|
||||
local handle = io.popen(cmd)
|
||||
local result = handle:read("*all")
|
||||
handle:close()
|
||||
return trim(result)
|
||||
end
|
||||
|
||||
function list_files(cache_path, purge_pattern)
|
||||
local result = exec("/usr/bin/find " .. cache_path .. " -type f | /usr/bin/xargs --no-run-if-empty -n1000 /bin/grep -El -m 1 '^KEY: " .. purge_pattern .. "' 2>&1")
|
||||
if result == "" then
|
||||
return {}
|
||||
end
|
||||
return explode("\n", result)
|
||||
end
|
||||
|
||||
if ngx ~= nil then
|
||||
-- list all cached items matching uri
|
||||
local files = list_files(ngx.var.lua_purge_path, ngx.var.uri)
|
||||
|
||||
ngx.header["Content-type"] = "text/plain; charset=utf-8"
|
||||
ngx.header["X-Purged-Count"] = table.getn(files)
|
||||
for k, v in pairs(files) do
|
||||
purge(v)
|
||||
end
|
||||
ngx.say("OK")
|
||||
ngx.exit(ngx.OK)
|
||||
end
|
|
@ -1,3 +1,5 @@
|
|||
lua_shared_dict dnslink 10m;
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
server {
|
||||
# local server - do not expose this port externally
|
||||
listen 8000;
|
||||
listen [::]:8000;
|
||||
|
||||
# secure traffic by limiting to only local networks
|
||||
include /etc/nginx/conf.d/include/local-network-only;
|
||||
|
||||
include /etc/nginx/conf.d/server/server.local;
|
||||
}
|
|
@ -5,6 +5,40 @@ include /etc/nginx/conf.d/include/ssl-settings;
|
|||
include /etc/nginx/conf.d/include/init-optional-variables;
|
||||
|
||||
location / {
|
||||
proxy_redirect http://127.0.0.1/ https://$host/;
|
||||
proxy_pass http://oathkeeper:4455;
|
||||
proxy_pass http://dashboard:3000;
|
||||
}
|
||||
|
||||
location /health {
|
||||
proxy_pass http://accounts:3000;
|
||||
}
|
||||
|
||||
location /stripe/webhook {
|
||||
proxy_pass http://accounts:3000;
|
||||
}
|
||||
|
||||
location /api/stripe/billing {
|
||||
proxy_pass http://dashboard:3000;
|
||||
}
|
||||
|
||||
location /api/stripe/checkout {
|
||||
proxy_pass http://dashboard:3000;
|
||||
}
|
||||
|
||||
location /api {
|
||||
rewrite /api/(.*) /$1 break;
|
||||
proxy_pass http://accounts:3000;
|
||||
}
|
||||
|
||||
location /api/register {
|
||||
include /etc/nginx/conf.d/include/cors;
|
||||
|
||||
rewrite /api/(.*) /$1 break;
|
||||
proxy_pass http://accounts:3000;
|
||||
}
|
||||
|
||||
location /api/login {
|
||||
include /etc/nginx/conf.d/include/cors;
|
||||
|
||||
rewrite /api/(.*) /$1 break;
|
||||
proxy_pass http://accounts:3000;
|
||||
}
|
||||
|
|
|
@ -26,6 +26,17 @@ rewrite ^/skynet/blacklist /skynet/blocklist permanent;
|
|||
location / {
|
||||
include /etc/nginx/conf.d/include/cors;
|
||||
|
||||
set $skylink "0404dsjvti046fsua4ktor9grrpe76erq9jot9cvopbhsvsu76r4r30";
|
||||
set $path $uri;
|
||||
set $internal_no_limits "true";
|
||||
|
||||
include /etc/nginx/conf.d/include/location-skylink;
|
||||
|
||||
proxy_intercept_errors on;
|
||||
error_page 400 404 490 500 502 503 504 =200 @fallback;
|
||||
}
|
||||
|
||||
location @fallback {
|
||||
proxy_pass http://website:9000;
|
||||
}
|
||||
|
||||
|
@ -61,6 +72,21 @@ location /skynet/stats {
|
|||
proxy_pass http://sia:9980/skynet/stats;
|
||||
}
|
||||
|
||||
# Define path for server load endpoint
|
||||
location /serverload {
|
||||
# Define root directory in the nginx container to load file from
|
||||
root /usr/local/share;
|
||||
|
||||
# including this because of peer pressure from the other routes
|
||||
include /etc/nginx/conf.d/include/cors;
|
||||
|
||||
# tell nginx to expect json
|
||||
default_type 'application/json';
|
||||
|
||||
# Allow for /serverload to load /serverload.json file
|
||||
try_files $uri $uri.json =404;
|
||||
}
|
||||
|
||||
location /skynet/health {
|
||||
include /etc/nginx/conf.d/include/cors;
|
||||
|
||||
|
@ -80,7 +106,20 @@ location /health-check {
|
|||
proxy_pass http://10.10.10.60:3100; # hardcoded ip because health-check waits for nginx
|
||||
}
|
||||
|
||||
location /abuse {
|
||||
return 308 /0404guluqu38oaqapku91ed11kbhkge55smh9lhjukmlrj37lfpm8no/;
|
||||
}
|
||||
|
||||
location /abuse/report {
|
||||
include /etc/nginx/conf.d/include/cors;
|
||||
|
||||
# 10.10.10.110 points to blocker service
|
||||
proxy_pass http://10.10.10.110:4000/powblock;
|
||||
}
|
||||
|
||||
location /hns {
|
||||
include /etc/nginx/conf.d/include/cors;
|
||||
|
||||
# match the request_uri and extract the hns domain and anything that is passed in the uri after it
|
||||
# example: /hns/something/foo/bar matches:
|
||||
# > hns_domain: something
|
||||
|
@ -110,6 +149,7 @@ location /ipfs/api {
|
|||
|
||||
location /hnsres {
|
||||
include /etc/nginx/conf.d/include/cors;
|
||||
include /etc/nginx/conf.d/include/portal-access-check;
|
||||
|
||||
proxy_pass http://handshake-api:3100;
|
||||
}
|
||||
|
@ -118,19 +158,12 @@ location /skynet/registry {
|
|||
include /etc/nginx/conf.d/include/location-skynet-registry;
|
||||
}
|
||||
|
||||
location /skynet/skyfile {
|
||||
location /skynet/restore {
|
||||
include /etc/nginx/conf.d/include/cors;
|
||||
include /etc/nginx/conf.d/include/sia-auth;
|
||||
include /etc/nginx/conf.d/include/track-upload;
|
||||
include /etc/nginx/conf.d/include/generate-siapath;
|
||||
include /etc/nginx/conf.d/include/portal-access-check;
|
||||
|
||||
limit_req zone=uploads_by_ip burst=100 nodelay;
|
||||
limit_req zone=uploads_by_ip_throttled;
|
||||
|
||||
limit_conn upload_conn 10;
|
||||
limit_conn upload_conn_rl 1;
|
||||
|
||||
client_max_body_size 1000M; # make sure to limit the size of upload to a sane value
|
||||
client_max_body_size 5M;
|
||||
|
||||
# increase request timeouts
|
||||
proxy_read_timeout 600;
|
||||
|
@ -140,18 +173,68 @@ location /skynet/skyfile {
|
|||
proxy_set_header Expect $http_expect;
|
||||
proxy_set_header User-Agent: Sia-Agent;
|
||||
|
||||
# access_by_lua_block {
|
||||
# -- this block runs only when accounts are enabled
|
||||
# if os.getenv("ACCOUNTS_ENABLED") ~= "true" then return end
|
||||
# proxy this call to siad endpoint (make sure the ip is correct)
|
||||
proxy_pass http://sia:9980;
|
||||
}
|
||||
|
||||
# ngx.var.upload_limit_rate = 5 * 1024 * 1024
|
||||
# local res = ngx.location.capture("/accounts/user", { copy_all_vars = true })
|
||||
# if res.status == ngx.HTTP_OK then
|
||||
# local json = require('cjson')
|
||||
# local user = json.decode(res.body)
|
||||
# ngx.var.upload_limit_rate = ngx.var.upload_limit_rate * (user.tier + 1)
|
||||
# end
|
||||
# }
|
||||
location /skynet/registry/subscription {
|
||||
include /etc/nginx/conf.d/include/cors;
|
||||
|
||||
# default to unlimited bandwidth and no delay
|
||||
set $bandwidthlimit "0";
|
||||
set $notificationdelay "0";
|
||||
|
||||
rewrite_by_lua_block {
|
||||
-- this block runs only when accounts are enabled
|
||||
if os.getenv("PORTAL_MODULES"):match("a") then
|
||||
local httpc = require("resty.http").new()
|
||||
|
||||
-- fetch account limits and set download bandwidth and registry delays accordingly
|
||||
local res, err = httpc:request_uri("http://10.10.10.70:3000/user/limits", {
|
||||
headers = { ["Cookie"] = "skynet-jwt=" .. ngx.var.skynet_jwt }
|
||||
})
|
||||
|
||||
-- fail gracefully in case /user/limits failed
|
||||
if err or (res and res.status ~= ngx.HTTP_OK) then
|
||||
ngx.log(ngx.ERR, "Failed accounts service request /user/limits: ", err or ("[HTTP " .. res.status .. "] " .. res.body))
|
||||
elseif res and res.status == ngx.HTTP_OK then
|
||||
local json = require('cjson')
|
||||
local limits = json.decode(res.body)
|
||||
ngx.var.bandwidthlimit = limits.download
|
||||
ngx.var.notificationdelay = limits.registry
|
||||
end
|
||||
end
|
||||
}
|
||||
|
||||
proxy_set_header User-Agent: Sia-Agent;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
|
||||
proxy_pass http://sia:9980/skynet/registry/subscription?bandwidthlimit=$bandwidthlimit¬ificationdelay=$notificationdelay;
|
||||
}
|
||||
|
||||
location /skynet/skyfile {
|
||||
include /etc/nginx/conf.d/include/cors;
|
||||
include /etc/nginx/conf.d/include/sia-auth;
|
||||
include /etc/nginx/conf.d/include/track-upload;
|
||||
include /etc/nginx/conf.d/include/generate-siapath;
|
||||
include /etc/nginx/conf.d/include/portal-access-check;
|
||||
|
||||
limit_req zone=uploads_by_ip burst=10 nodelay;
|
||||
limit_req zone=uploads_by_ip_throttled;
|
||||
|
||||
limit_conn upload_conn 5;
|
||||
limit_conn upload_conn_rl 1;
|
||||
|
||||
client_max_body_size 5000M; # make sure to limit the size of upload to a sane value
|
||||
|
||||
# increase request timeouts
|
||||
proxy_read_timeout 600;
|
||||
proxy_send_timeout 600;
|
||||
|
||||
proxy_request_buffering off; # stream uploaded files through the proxy as it comes in
|
||||
proxy_set_header Expect $http_expect;
|
||||
proxy_set_header User-Agent: Sia-Agent;
|
||||
|
||||
# proxy this call to siad endpoint (make sure the ip is correct)
|
||||
proxy_pass http://sia:9980/skynet/skyfile/$dir1/$dir2/$dir3$is_args$args;
|
||||
|
@ -162,6 +245,12 @@ location /skynet/tus {
|
|||
include /etc/nginx/conf.d/include/cors-headers; # include cors headers but do not overwrite OPTIONS response
|
||||
include /etc/nginx/conf.d/include/track-upload;
|
||||
|
||||
limit_req zone=uploads_by_ip burst=10 nodelay;
|
||||
limit_req zone=uploads_by_ip_throttled;
|
||||
|
||||
limit_conn upload_conn 5;
|
||||
limit_conn upload_conn_rl 1;
|
||||
|
||||
# TUS chunks size is 40M + leaving 10M of breathing room
|
||||
client_max_body_size 50M;
|
||||
|
||||
|
@ -181,27 +270,22 @@ location /skynet/tus {
|
|||
# proxy /skynet/tus requests to siad endpoint with all arguments
|
||||
proxy_pass http://sia:9980;
|
||||
|
||||
# set max upload size dynamically based on account limits
|
||||
rewrite_by_lua_block {
|
||||
-- set default limit value to 1 GB
|
||||
ngx.req.set_header("SkynetMaxUploadSize", 1073741824)
|
||||
access_by_lua_block {
|
||||
if require("skynet.account").accounts_enabled() then
|
||||
-- check if portal is in authenticated only mode
|
||||
if require("skynet.account").is_access_unauthorized() then
|
||||
return require("skynet.account").exit_access_unauthorized()
|
||||
end
|
||||
|
||||
-- this block runs only when accounts are enabled
|
||||
if os.getenv("ACCOUNTS_ENABLED") ~= "true" then return end
|
||||
-- check if portal is in subscription only mode
|
||||
if require("skynet.account").is_access_forbidden() then
|
||||
return require("skynet.account").exit_access_forbidden()
|
||||
end
|
||||
|
||||
local httpc = require("resty.http").new()
|
||||
-- get account limits of currently authenticated user
|
||||
local limits = require("skynet.account").get_account_limits()
|
||||
|
||||
-- fetch account limits and set max upload size accordingly
|
||||
local res, err = httpc:request_uri("http://10.10.10.70:3000/user/limits", {
|
||||
headers = { ["Cookie"] = "skynet-jwt=" .. ngx.var.skynet_jwt }
|
||||
})
|
||||
|
||||
-- fail gracefully in case /user/limits failed
|
||||
if err or (res and res.status ~= ngx.HTTP_OK) then
|
||||
ngx.log(ngx.ERR, "Failed accounts service request /user/limits: ", err or ("[HTTP " .. res.status .. "] " .. res.body))
|
||||
elseif res and res.status == ngx.HTTP_OK then
|
||||
local json = require('cjson')
|
||||
local limits = json.decode(res.body)
|
||||
-- apply upload size limits
|
||||
ngx.req.set_header("SkynetMaxUploadSize", limits.maxUploadSize)
|
||||
end
|
||||
}
|
||||
|
@ -226,6 +310,13 @@ location /skynet/pin {
|
|||
include /etc/nginx/conf.d/include/sia-auth;
|
||||
include /etc/nginx/conf.d/include/track-upload;
|
||||
include /etc/nginx/conf.d/include/generate-siapath;
|
||||
include /etc/nginx/conf.d/include/portal-access-check;
|
||||
|
||||
limit_req zone=uploads_by_ip burst=10 nodelay;
|
||||
limit_req zone=uploads_by_ip_throttled;
|
||||
|
||||
limit_conn upload_conn 5;
|
||||
limit_conn upload_conn_rl 1;
|
||||
|
||||
proxy_set_header User-Agent: Sia-Agent;
|
||||
proxy_pass http://sia:9980$uri?siapath=$dir1/$dir2/$dir3&$args;
|
||||
|
@ -233,13 +324,11 @@ location /skynet/pin {
|
|||
|
||||
location /skynet/metadata {
|
||||
include /etc/nginx/conf.d/include/cors;
|
||||
include /etc/nginx/conf.d/include/portal-access-check;
|
||||
|
||||
header_filter_by_lua_block {
|
||||
ngx.header["Skynet-Portal-Api"] = os.getenv("SKYNET_PORTAL_API")
|
||||
ngx.header["Skynet-Server-Api"] = os.getenv("SKYNET_SERVER_API")
|
||||
|
||||
-- do not expose internal header
|
||||
ngx.header["Skynet-Requested-Skylink"] = ""
|
||||
}
|
||||
|
||||
proxy_set_header User-Agent: Sia-Agent;
|
||||
|
@ -248,13 +337,11 @@ location /skynet/metadata {
|
|||
|
||||
location /skynet/resolve {
|
||||
include /etc/nginx/conf.d/include/cors;
|
||||
include /etc/nginx/conf.d/include/portal-access-check;
|
||||
|
||||
header_filter_by_lua_block {
|
||||
ngx.header["Skynet-Portal-Api"] = os.getenv("SKYNET_PORTAL_API")
|
||||
ngx.header["Skynet-Server-Api"] = os.getenv("SKYNET_SERVER_API")
|
||||
|
||||
-- do not expose internal header
|
||||
ngx.header["Skynet-Requested-Skylink"] = ""
|
||||
}
|
||||
|
||||
proxy_set_header User-Agent: Sia-Agent;
|
||||
|
@ -277,18 +364,7 @@ location ~ "^/file/(([a-zA-Z0-9-_]{46}|[a-z0-9]{55})(/.*)?)$" {
|
|||
include /etc/nginx/conf.d/include/location-skylink;
|
||||
}
|
||||
|
||||
location @purge {
|
||||
allow 10.0.0.0/8;
|
||||
allow 127.0.0.1/32;
|
||||
allow 172.16.0.0/12;
|
||||
allow 192.168.0.0/16;
|
||||
deny all;
|
||||
|
||||
set $lua_purge_path "/data/nginx/cache/";
|
||||
content_by_lua_file /etc/nginx/conf.d/scripts/purge-multi.lua;
|
||||
}
|
||||
|
||||
location /__internal/do/not/use/authenticated {
|
||||
location /__internal/do/not/use/accounts {
|
||||
include /etc/nginx/conf.d/include/cors;
|
||||
|
||||
charset utf-8;
|
||||
|
@ -297,29 +373,16 @@ location /__internal/do/not/use/authenticated {
|
|||
|
||||
content_by_lua_block {
|
||||
local json = require('cjson')
|
||||
local accounts_enabled = require("skynet.account").accounts_enabled()
|
||||
local is_auth_required = require("skynet.account").is_auth_required()
|
||||
local is_authenticated = accounts_enabled and require("skynet.account").is_authenticated()
|
||||
|
||||
-- this block runs only when accounts are enabled
|
||||
if os.getenv("ACCOUNTS_ENABLED") ~= "true" then
|
||||
ngx.say(json.encode{authenticated = false})
|
||||
return ngx.exit(ngx.HTTP_OK)
|
||||
end
|
||||
|
||||
local httpc = require("resty.http").new()
|
||||
|
||||
-- 10.10.10.70 points to accounts service (alias not available when using resty-http)
|
||||
local res, err = httpc:request_uri("http://10.10.10.70:3000/user", {
|
||||
headers = { ["Cookie"] = "skynet-jwt=" .. ngx.var.skynet_jwt }
|
||||
ngx.say(json.encode{
|
||||
enabled = accounts_enabled,
|
||||
auth_required = is_auth_required,
|
||||
authenticated = is_authenticated,
|
||||
})
|
||||
|
||||
-- endpoint /user should return HTTP_OK for authenticated and HTTP_UNAUTHORIZED for not authenticated
|
||||
if res and (res.status == ngx.HTTP_OK or res.status == ngx.HTTP_UNAUTHORIZED) then
|
||||
ngx.say(json.encode{authenticated = res.status == ngx.HTTP_OK})
|
||||
return ngx.exit(ngx.HTTP_OK)
|
||||
else
|
||||
ngx.log(ngx.ERR, "Failed accounts service request /user: ", err or ("[HTTP " .. res.status .. "] " .. res.body))
|
||||
ngx.say(json.encode{authenticated = false})
|
||||
return ngx.exit(ngx.HTTP_OK)
|
||||
end
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5,21 +5,41 @@ location / {
|
|||
set $path $uri;
|
||||
|
||||
rewrite_by_lua_block {
|
||||
local cache = ngx.shared.dnslink
|
||||
local cache_value = cache:get(ngx.var.host)
|
||||
|
||||
if cache_value == nil then
|
||||
local httpc = require("resty.http").new()
|
||||
|
||||
-- 10.10.10.55 points to dnslink-api service (alias not available when using resty-http)
|
||||
local res, err = httpc:request_uri("http://10.10.10.55:3100/dnslink/" .. ngx.var.host)
|
||||
|
||||
if err or (res and res.status ~= ngx.HTTP_OK) then
|
||||
-- check whether we can fallback to regular skylink request
|
||||
local match_skylink = ngx.re.match(ngx.var.uri, "^/([a-zA-Z0-9-_]{46}|[a-z0-9]{55})(/.*)?")
|
||||
|
||||
if match_skylink then
|
||||
ngx.var.skylink = match_skylink[1]
|
||||
ngx.var.path = match_skylink[2] or "/"
|
||||
else
|
||||
ngx.status = (err and ngx.HTTP_INTERNAL_SERVER_ERROR) or res.status
|
||||
ngx.header["content-type"] = "text/plain"
|
||||
ngx.say(err or res.body)
|
||||
ngx.exit(ngx.status)
|
||||
end
|
||||
else
|
||||
ngx.var.skylink = res.body
|
||||
|
||||
local cache_ttl = 300 -- 5 minutes cache expire time
|
||||
cache:set(ngx.var.host, ngx.var.skylink, cache_ttl)
|
||||
end
|
||||
else
|
||||
ngx.var.skylink = cache_value
|
||||
end
|
||||
|
||||
ngx.var.skylink = require("skynet.skylink").parse(ngx.var.skylink)
|
||||
ngx.var.skylink_v1 = ngx.var.skylink
|
||||
ngx.var.skylink_v2 = ngx.var.skylink
|
||||
end
|
||||
}
|
||||
|
||||
include /etc/nginx/conf.d/include/location-skylink;
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
include /etc/nginx/conf.d/include/init-optional-variables;
|
||||
|
||||
location /skynet/blocklist {
|
||||
client_max_body_size 10m; # increase max body size to account for large lists
|
||||
client_body_buffer_size 10m; # force whole body to memory so we can read it
|
||||
|
||||
content_by_lua_block {
|
||||
local httpc = require("resty.http").new()
|
||||
|
||||
ngx.req.read_body() -- ensure the post body data is read before using get_body_data
|
||||
|
||||
-- proxy blocklist update request
|
||||
-- 10.10.10.10 points to sia service (alias not available when using resty-http)
|
||||
local res, err = httpc:request_uri("http://10.10.10.10:9980/skynet/blocklist", {
|
||||
method = "POST",
|
||||
body = ngx.req.get_body_data(),
|
||||
headers = {
|
||||
["Content-Type"] = "application/x-www-form-urlencoded",
|
||||
["Authorization"] = require("skynet.utils").authorization_header(),
|
||||
["User-Agent"] = "Sia-Agent",
|
||||
}
|
||||
})
|
||||
|
||||
-- print error and exit with 500 or exit with response if status is not 204
|
||||
if err or (res and res.status ~= ngx.HTTP_NO_CONTENT) then
|
||||
ngx.status = (err and ngx.HTTP_INTERNAL_SERVER_ERROR) or res.status
|
||||
ngx.header["content-type"] = "text/plain"
|
||||
ngx.say(err or res.body)
|
||||
return ngx.exit(ngx.status)
|
||||
end
|
||||
|
||||
require("skynet.blocklist").reload()
|
||||
|
||||
ngx.status = ngx.HTTP_NO_CONTENT
|
||||
return ngx.exit(ngx.status)
|
||||
}
|
||||
}
|
|
@ -6,7 +6,12 @@ include /etc/nginx/conf.d/include/init-optional-variables;
|
|||
|
||||
location / {
|
||||
set_by_lua_block $skylink { return string.match(ngx.var.host, "%w+") }
|
||||
set $path $uri;
|
||||
set_by_lua_block $path {
|
||||
-- strip ngx.var.request_uri from query params - this is basically the same as ngx.var.uri but
|
||||
-- do not use ngx.var.uri because it will already be unescaped and we need to use escaped path
|
||||
-- examples: escaped uri "/b%20r56+7" and unescaped uri "/b r56 7"
|
||||
return string.gsub(ngx.var.request_uri, "?.*", "")
|
||||
}
|
||||
|
||||
include /etc/nginx/conf.d/include/location-skylink;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,301 @@
|
|||
-- source: https://github.com/aiq/basexx
|
||||
-- license: MIT
|
||||
-- modified: exposed from_basexx and to_basexx generic functions
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- util functions
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
local function divide_string( str, max )
|
||||
local result = {}
|
||||
|
||||
local start = 1
|
||||
for i = 1, #str do
|
||||
if i % max == 0 then
|
||||
table.insert( result, str:sub( start, i ) )
|
||||
start = i + 1
|
||||
elseif i == #str then
|
||||
table.insert( result, str:sub( start, i ) )
|
||||
end
|
||||
end
|
||||
|
||||
return result
|
||||
end
|
||||
|
||||
local function number_to_bit( num, length )
|
||||
local bits = {}
|
||||
|
||||
while num > 0 do
|
||||
local rest = math.floor( math.fmod( num, 2 ) )
|
||||
table.insert( bits, rest )
|
||||
num = ( num - rest ) / 2
|
||||
end
|
||||
|
||||
while #bits < length do
|
||||
table.insert( bits, "0" )
|
||||
end
|
||||
|
||||
return string.reverse( table.concat( bits ) )
|
||||
end
|
||||
|
||||
local function ignore_set( str, set )
|
||||
if set then
|
||||
str = str:gsub( "["..set.."]", "" )
|
||||
end
|
||||
return str
|
||||
end
|
||||
|
||||
local function pure_from_bit( str )
|
||||
return ( str:gsub( '........', function ( cc )
|
||||
return string.char( tonumber( cc, 2 ) )
|
||||
end ) )
|
||||
end
|
||||
|
||||
local function unexpected_char_error( str, pos )
|
||||
local c = string.sub( str, pos, pos )
|
||||
return string.format( "unexpected character at position %d: '%s'", pos, c )
|
||||
end
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
local basexx = {}
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- base2(bitfield) decode and encode function
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
local bitMap = { o = "0", i = "1", l = "1" }
|
||||
|
||||
function basexx.from_bit( str, ignore )
|
||||
str = ignore_set( str, ignore )
|
||||
str = string.lower( str )
|
||||
str = str:gsub( '[ilo]', function( c ) return bitMap[ c ] end )
|
||||
local pos = string.find( str, "[^01]" )
|
||||
if pos then return nil, unexpected_char_error( str, pos ) end
|
||||
|
||||
return pure_from_bit( str )
|
||||
end
|
||||
|
||||
function basexx.to_bit( str )
|
||||
return ( str:gsub( '.', function ( c )
|
||||
local byte = string.byte( c )
|
||||
local bits = {}
|
||||
for _ = 1,8 do
|
||||
table.insert( bits, byte % 2 )
|
||||
byte = math.floor( byte / 2 )
|
||||
end
|
||||
return table.concat( bits ):reverse()
|
||||
end ) )
|
||||
end
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- base16(hex) decode and encode function
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
function basexx.from_hex( str, ignore )
|
||||
str = ignore_set( str, ignore )
|
||||
local pos = string.find( str, "[^%x]" )
|
||||
if pos then return nil, unexpected_char_error( str, pos ) end
|
||||
|
||||
return ( str:gsub( '..', function ( cc )
|
||||
return string.char( tonumber( cc, 16 ) )
|
||||
end ) )
|
||||
end
|
||||
|
||||
function basexx.to_hex( str )
|
||||
return ( str:gsub( '.', function ( c )
|
||||
return string.format('%02X', string.byte( c ) )
|
||||
end ) )
|
||||
end
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- generic function to decode and encode base32/base64
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
function basexx.from_basexx( str, alphabet, bits )
|
||||
local result = {}
|
||||
for i = 1, #str do
|
||||
local c = string.sub( str, i, i )
|
||||
if c ~= '=' then
|
||||
local index = string.find( alphabet, c, 1, true )
|
||||
if not index then
|
||||
return nil, unexpected_char_error( str, i )
|
||||
end
|
||||
table.insert( result, number_to_bit( index - 1, bits ) )
|
||||
end
|
||||
end
|
||||
|
||||
local value = table.concat( result )
|
||||
local pad = #value % 8
|
||||
return pure_from_bit( string.sub( value, 1, #value - pad ) )
|
||||
end
|
||||
|
||||
function basexx.to_basexx( str, alphabet, bits, pad )
|
||||
local bitString = basexx.to_bit( str )
|
||||
|
||||
local chunks = divide_string( bitString, bits )
|
||||
local result = {}
|
||||
for _,value in ipairs( chunks ) do
|
||||
if ( #value < bits ) then
|
||||
value = value .. string.rep( '0', bits - #value )
|
||||
end
|
||||
local pos = tonumber( value, 2 ) + 1
|
||||
table.insert( result, alphabet:sub( pos, pos ) )
|
||||
end
|
||||
|
||||
table.insert( result, pad )
|
||||
return table.concat( result )
|
||||
end
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- rfc 3548: http://www.rfc-editor.org/rfc/rfc3548.txt
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
local base32Alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"
|
||||
local base32PadMap = { "", "======", "====", "===", "=" }
|
||||
|
||||
function basexx.from_base32( str, ignore )
|
||||
str = ignore_set( str, ignore )
|
||||
return basexx.from_basexx( string.upper( str ), base32Alphabet, 5 )
|
||||
end
|
||||
|
||||
function basexx.to_base32( str )
|
||||
return basexx.to_basexx( str, base32Alphabet, 5, base32PadMap[ #str % 5 + 1 ] )
|
||||
end
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- crockford: http://www.crockford.com/wrmg/base32.html
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
local crockfordAlphabet = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
|
||||
local crockfordMap = { O = "0", I = "1", L = "1" }
|
||||
|
||||
function basexx.from_crockford( str, ignore )
|
||||
str = ignore_set( str, ignore )
|
||||
str = string.upper( str )
|
||||
str = str:gsub( '[ILOU]', function( c ) return crockfordMap[ c ] end )
|
||||
return basexx.from_basexx( str, crockfordAlphabet, 5 )
|
||||
end
|
||||
|
||||
function basexx.to_crockford( str )
|
||||
return basexx.to_basexx( str, crockfordAlphabet, 5, "" )
|
||||
end
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- base64 decode and encode function
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
local base64Alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"..
|
||||
"abcdefghijklmnopqrstuvwxyz"..
|
||||
"0123456789+/"
|
||||
local base64PadMap = { "", "==", "=" }
|
||||
|
||||
function basexx.from_base64( str, ignore )
|
||||
str = ignore_set( str, ignore )
|
||||
return basexx.from_basexx( str, base64Alphabet, 6 )
|
||||
end
|
||||
|
||||
function basexx.to_base64( str )
|
||||
return basexx.to_basexx( str, base64Alphabet, 6, base64PadMap[ #str % 3 + 1 ] )
|
||||
end
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- URL safe base64 decode and encode function
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
local url64Alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"..
|
||||
"abcdefghijklmnopqrstuvwxyz"..
|
||||
"0123456789-_"
|
||||
|
||||
function basexx.from_url64( str, ignore )
|
||||
str = ignore_set( str, ignore )
|
||||
return basexx.from_basexx( str, url64Alphabet, 6 )
|
||||
end
|
||||
|
||||
function basexx.to_url64( str )
|
||||
return basexx.to_basexx( str, url64Alphabet, 6, "" )
|
||||
end
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
--
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
local function length_error( len, d )
|
||||
return string.format( "invalid length: %d - must be a multiple of %d", len, d )
|
||||
end
|
||||
|
||||
local z85Decoder = { 0x00, 0x44, 0x00, 0x54, 0x53, 0x52, 0x48, 0x00,
|
||||
0x4B, 0x4C, 0x46, 0x41, 0x00, 0x3F, 0x3E, 0x45,
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
|
||||
0x08, 0x09, 0x40, 0x00, 0x49, 0x42, 0x4A, 0x47,
|
||||
0x51, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A,
|
||||
0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32,
|
||||
0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A,
|
||||
0x3B, 0x3C, 0x3D, 0x4D, 0x00, 0x4E, 0x43, 0x00,
|
||||
0x00, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
|
||||
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
|
||||
0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20,
|
||||
0x21, 0x22, 0x23, 0x4F, 0x00, 0x50, 0x00, 0x00 }
|
||||
|
||||
function basexx.from_z85( str, ignore )
|
||||
str = ignore_set( str, ignore )
|
||||
if ( #str % 5 ) ~= 0 then
|
||||
return nil, length_error( #str, 5 )
|
||||
end
|
||||
|
||||
local result = {}
|
||||
|
||||
local value = 0
|
||||
for i = 1, #str do
|
||||
local index = string.byte( str, i ) - 31
|
||||
if index < 1 or index >= #z85Decoder then
|
||||
return nil, unexpected_char_error( str, i )
|
||||
end
|
||||
value = ( value * 85 ) + z85Decoder[ index ]
|
||||
if ( i % 5 ) == 0 then
|
||||
local divisor = 256 * 256 * 256
|
||||
while divisor ~= 0 do
|
||||
local b = math.floor( value / divisor ) % 256
|
||||
table.insert( result, string.char( b ) )
|
||||
divisor = math.floor( divisor / 256 )
|
||||
end
|
||||
value = 0
|
||||
end
|
||||
end
|
||||
|
||||
return table.concat( result )
|
||||
end
|
||||
|
||||
local z85Encoder = "0123456789"..
|
||||
"abcdefghijklmnopqrstuvwxyz"..
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"..
|
||||
".-:+=^!/*?&<>()[]{}@%$#"
|
||||
|
||||
function basexx.to_z85( str )
|
||||
if ( #str % 4 ) ~= 0 then
|
||||
return nil, length_error( #str, 4 )
|
||||
end
|
||||
|
||||
local result = {}
|
||||
|
||||
local value = 0
|
||||
for i = 1, #str do
|
||||
local b = string.byte( str, i )
|
||||
value = ( value * 256 ) + b
|
||||
if ( i % 4 ) == 0 then
|
||||
local divisor = 85 * 85 * 85 * 85
|
||||
while divisor ~= 0 do
|
||||
local index = ( math.floor( value / divisor ) % 85 ) + 1
|
||||
table.insert( result, z85Encoder:sub( index, index ) )
|
||||
divisor = math.floor( divisor / 85 )
|
||||
end
|
||||
value = 0
|
||||
end
|
||||
end
|
||||
|
||||
return table.concat( result )
|
||||
end
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
return basexx
|
|
@ -0,0 +1,107 @@
|
|||
local _M = {}
|
||||
|
||||
-- fallback - remember to keep those updated
|
||||
local anon_limits = { ["tierName"] = "anonymous", ["upload"] = 655360, ["download"] = 655360, ["maxUploadSize"] = 1073741824, ["registry"] = 250 }
|
||||
|
||||
-- no limits applied
|
||||
local no_limits = { ["tierName"] = "internal", ["upload"] = 0, ["download"] = 0, ["maxUploadSize"] = 0, ["registry"] = 0 }
|
||||
|
||||
-- free tier name
|
||||
local free_tier = "free"
|
||||
|
||||
-- handle request exit when access to portal should be restricted to authenticated users only
|
||||
function _M.exit_access_unauthorized(message)
|
||||
ngx.status = ngx.HTTP_UNAUTHORIZED
|
||||
ngx.header["content-type"] = "text/plain"
|
||||
ngx.say(message or "Portal operator restricted access to authenticated users only")
|
||||
return ngx.exit(ngx.status)
|
||||
end
|
||||
|
||||
-- handle request exit when access to portal should be restricted to subscription users only
|
||||
function _M.exit_access_forbidden(message)
|
||||
ngx.status = ngx.HTTP_FORBIDDEN
|
||||
ngx.header["content-type"] = "text/plain"
|
||||
ngx.say(message or "Portal operator restricted access to users with active subscription only")
|
||||
return ngx.exit(ngx.status)
|
||||
end
|
||||
|
||||
function _M.accounts_enabled()
|
||||
return os.getenv("PORTAL_MODULES"):match("a") ~= nil
|
||||
end
|
||||
|
||||
function _M.get_account_limits()
|
||||
local cjson = require('cjson')
|
||||
|
||||
if ngx.var.internal_no_limits == "true" then
|
||||
return no_limits
|
||||
end
|
||||
|
||||
if ngx.var.skynet_jwt == "" then
|
||||
return anon_limits
|
||||
end
|
||||
|
||||
if ngx.var.account_limits == "" then
|
||||
local httpc = require("resty.http").new()
|
||||
|
||||
-- 10.10.10.70 points to accounts service (alias not available when using resty-http)
|
||||
local res, err = httpc:request_uri("http://10.10.10.70:3000/user/limits", {
|
||||
headers = { ["Cookie"] = "skynet-jwt=" .. ngx.var.skynet_jwt }
|
||||
})
|
||||
|
||||
-- fail gracefully in case /user/limits failed
|
||||
if err or (res and res.status ~= ngx.HTTP_OK) then
|
||||
ngx.log(ngx.ERR, "Failed accounts service request /user/limits: ", err or ("[HTTP " .. res.status .. "] " .. res.body))
|
||||
ngx.var.account_limits = cjson.encode(anon_limits)
|
||||
elseif res and res.status == ngx.HTTP_OK then
|
||||
ngx.var.account_limits = res.body
|
||||
end
|
||||
end
|
||||
|
||||
return cjson.decode(ngx.var.account_limits)
|
||||
end
|
||||
|
||||
-- detect whether current user is authenticated
|
||||
function _M.is_authenticated()
|
||||
local limits = _M.get_account_limits()
|
||||
|
||||
return limits.tierName ~= anon_limits.tierName
|
||||
end
|
||||
|
||||
-- detect whether current user has active subscription
|
||||
function _M.is_subscription_account()
|
||||
local limits = _M.get_account_limits()
|
||||
|
||||
return limits.tierName ~= anon_limits.tierName and limits.tierName ~= free_tier
|
||||
end
|
||||
|
||||
function _M.is_auth_required()
|
||||
return os.getenv("ACCOUNTS_LIMIT_ACCESS") == "authenticated"
|
||||
end
|
||||
|
||||
function _M.is_subscription_required()
|
||||
return os.getenv("ACCOUNTS_LIMIT_ACCESS") == "subscription"
|
||||
end
|
||||
|
||||
function is_access_always_allowed()
|
||||
-- options requests do not attach cookies - should always be available
|
||||
-- requests should not be limited based on accounts if accounts are not enabled
|
||||
return ngx.req.get_method() == "OPTIONS" or not _M.accounts_enabled()
|
||||
end
|
||||
|
||||
-- check whether access is restricted if portal requires authorization
|
||||
function _M.is_access_unauthorized()
|
||||
if is_access_always_allowed() then return false end
|
||||
|
||||
-- check if authentication is required and request is not authenticated
|
||||
return _M.is_auth_required() and not _M.is_authenticated()
|
||||
end
|
||||
|
||||
-- check whether user is authenticated but does not have access to given resources
|
||||
function _M.is_access_forbidden()
|
||||
if is_access_always_allowed() then return false end
|
||||
|
||||
-- check if active subscription is required and request is from user without it
|
||||
return _M.is_subscription_required() and not _M.is_subscription_account()
|
||||
end
|
||||
|
||||
return _M
|
|
@ -0,0 +1,66 @@
|
|||
local _M = {}
|
||||
|
||||
function _M.reload()
|
||||
local httpc = require("resty.http").new()
|
||||
|
||||
-- fetch blocklist records (all blocked skylink hashes)
|
||||
-- 10.10.10.10 points to sia service (alias not available when using resty-http)
|
||||
local res, err = httpc:request_uri("http://10.10.10.10:9980/skynet/blocklist", {
|
||||
headers = {
|
||||
["User-Agent"] = "Sia-Agent",
|
||||
}
|
||||
})
|
||||
|
||||
-- fail whole request in case this request failed, we want to make sure
|
||||
-- the blocklist is pre cached before serving first skylink
|
||||
if err or (res and res.status ~= ngx.HTTP_OK) then
|
||||
ngx.log(ngx.ERR, "Failed skyd service request /skynet/blocklist: ", err or ("[HTTP " .. res.status .. "] " .. res.body))
|
||||
ngx.status = (err and ngx.HTTP_INTERNAL_SERVER_ERROR) or res.status
|
||||
ngx.header["content-type"] = "text/plain"
|
||||
ngx.say(err or res.body)
|
||||
return ngx.exit(ngx.status)
|
||||
elseif res and res.status == ngx.HTTP_OK then
|
||||
local json = require('cjson')
|
||||
local data = json.decode(res.body)
|
||||
|
||||
-- mark all existing entries as expired
|
||||
ngx.shared.blocklist:flush_all()
|
||||
|
||||
-- check if blocklist is table (it is null when empty)
|
||||
if type(data.blocklist) == "table" then
|
||||
-- set all cache entries one by one (resets expiration)
|
||||
for i, hash in ipairs(data.blocklist) do
|
||||
ngx.shared.blocklist:set(hash, true)
|
||||
end
|
||||
end
|
||||
|
||||
-- ensure that init flag is persisted
|
||||
ngx.shared.blocklist:set("__init", true)
|
||||
|
||||
-- remove all leftover expired entries
|
||||
ngx.shared.blocklist:flush_expired()
|
||||
end
|
||||
end
|
||||
|
||||
function _M.is_blocked(skylink)
|
||||
-- make sure that blocklist has been preloaded
|
||||
if not ngx.shared.blocklist:get("__init") then _M.reload() end
|
||||
|
||||
-- hash skylink before comparing it with blocklist
|
||||
local hash = require("skynet.skylink").hash(skylink)
|
||||
|
||||
-- we need to use get_stale because we are expiring previous
|
||||
-- entries when the blocklist is reloading and we still want
|
||||
-- to block them until the reloading is finished
|
||||
return ngx.shared.blocklist:get_stale(hash) == true
|
||||
end
|
||||
|
||||
-- exit with 416 illegal content status code
|
||||
function _M.exit_illegal()
|
||||
ngx.status = ngx.HTTP_ILLEGAL
|
||||
ngx.header["content-type"] = "text/plain"
|
||||
ngx.say("Unavailable For Legal Reasons")
|
||||
return ngx.exit(ngx.status)
|
||||
end
|
||||
|
||||
return _M
|
|
@ -0,0 +1,40 @@
|
|||
local _M = {}
|
||||
|
||||
local basexx = require("basexx")
|
||||
local hasher = require("hasher")
|
||||
|
||||
-- parse any skylink and return base64 version
|
||||
function _M.parse(skylink)
|
||||
if string.len(skylink) == 55 then
|
||||
local decoded = basexx.from_basexx(string.upper(skylink), "0123456789ABCDEFGHIJKLMNOPQRSTUV", 5)
|
||||
|
||||
return basexx.to_url64(decoded)
|
||||
end
|
||||
|
||||
return skylink
|
||||
end
|
||||
|
||||
-- hash skylink into 32 bytes hash used in blocklist
|
||||
function _M.hash(skylink)
|
||||
-- ensure that the skylink is base64 encoded
|
||||
local base64Skylink = _M.parse(skylink)
|
||||
|
||||
-- decode skylink from base64 encoding
|
||||
local rawSkylink = basexx.from_url64(base64Skylink)
|
||||
|
||||
-- drop first two bytes and leave just merkle root
|
||||
local rawMerkleRoot = string.sub(rawSkylink, 3)
|
||||
|
||||
-- parse with blake2b with key length of 32
|
||||
local blake2bHashed = hasher.blake2b(rawMerkleRoot, 32)
|
||||
|
||||
-- hex encode the blake hash
|
||||
local hexHashed = basexx.to_hex(blake2bHashed)
|
||||
|
||||
-- lowercase the hex encoded hash
|
||||
local lowerHexHashed = string.lower(hexHashed)
|
||||
|
||||
return lowerHexHashed
|
||||
end
|
||||
|
||||
return _M
|
|
@ -0,0 +1,23 @@
|
|||
local skynet_skylink = require("skynet.skylink")
|
||||
|
||||
describe("parse", function()
|
||||
local base32 = "0404dsjvti046fsua4ktor9grrpe76erq9jot9cvopbhsvsu76r4r30"
|
||||
local base64 = "AQBG8n_sgEM_nlEp3G0w3vLjmdvSZ46ln8ZXHn-eObZNjA"
|
||||
|
||||
it("should return unchanged base64 skylink", function()
|
||||
assert.is.same(skynet_skylink.parse(base64), base64)
|
||||
end)
|
||||
|
||||
it("should transform base32 skylink into base64", function()
|
||||
assert.is.same(skynet_skylink.parse(base32), base64)
|
||||
end)
|
||||
end)
|
||||
|
||||
describe("hash", function()
|
||||
local base64 = "EADi4QZWt87sSDCSjVTcmyI5tE_YAsuC90BcCi_jEmG5NA"
|
||||
local hash = "6cfb9996ad74e5614bbb8e7228e72f1c1bc14dd9ce8a83b3ccabdb6d8d70f330"
|
||||
|
||||
it("should hash skylink", function()
|
||||
assert.is.same(hash, skynet_skylink.hash(base64))
|
||||
end)
|
||||
end)
|
|
@ -0,0 +1,23 @@
|
|||
local _M = {}
|
||||
|
||||
function _M.authorization_header()
|
||||
-- read api password from env variable
|
||||
local apipassword = os.getenv("SIA_API_PASSWORD")
|
||||
-- if api password is not available as env variable, read it from disk
|
||||
if apipassword == nil or apipassword == "" then
|
||||
-- open apipassword file for reading (b flag is required for some reason)
|
||||
-- (file /etc/.sia/apipassword has to be mounted from the host system)
|
||||
local apipassword_file = io.open("/data/sia/apipassword", "rb")
|
||||
-- read apipassword file contents and trim newline (important)
|
||||
apipassword = apipassword_file:read("*all"):gsub("%s+", "")
|
||||
-- make sure to close file after reading the password
|
||||
apipassword_file.close()
|
||||
end
|
||||
-- encode the user:password authorization string
|
||||
-- (in our case user is empty so it is just :password)
|
||||
local content = require("ngx.base64").encode_base64url(":" .. apipassword)
|
||||
-- set authorization header with proper base64 encoded string
|
||||
return "Basic " .. content
|
||||
end
|
||||
|
||||
return _M
|
|
@ -28,7 +28,10 @@ worker_processes auto;
|
|||
# declare env variables to use it in config
|
||||
env SKYNET_PORTAL_API;
|
||||
env SKYNET_SERVER_API;
|
||||
env ACCOUNTS_ENABLED;
|
||||
env PORTAL_MODULES;
|
||||
env ACCOUNTS_LIMIT_ACCESS;
|
||||
env SIA_API_PASSWORD;
|
||||
env SKYD_DISK_CACHE_ENABLED;
|
||||
|
||||
events {
|
||||
worker_connections 8192;
|
||||
|
@ -38,6 +41,8 @@ http {
|
|||
include mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
lua_package_path "/etc/nginx/libs/?.lua;;";
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" $upstream_response_time '
|
||||
|
@ -45,7 +50,7 @@ http {
|
|||
'"$upstream_http_content_type" "$upstream_cache_status" '
|
||||
'"$server_alias" "$sent_http_skynet_skylink" '
|
||||
'$upstream_connect_time $upstream_header_time '
|
||||
'$request_time "$hns_domain"';
|
||||
'$request_time "$hns_domain" "$skylink"';
|
||||
|
||||
access_log logs/access.log main;
|
||||
|
||||
|
@ -68,12 +73,24 @@ http {
|
|||
proxy_http_version 1.1;
|
||||
|
||||
# proxy cache definition
|
||||
proxy_cache_path /data/nginx/cache levels=1:2 keys_zone=skynet:10m max_size=50g inactive=48h use_temp_path=off;
|
||||
proxy_cache_path /data/nginx/cache levels=1:2 keys_zone=skynet:10m max_size=50g min_free=100g inactive=48h use_temp_path=off;
|
||||
|
||||
# create a shared blocklist dictionary with size of 30 megabytes
|
||||
# estimated capacity of 1 megabyte dictionary is 3500 blocklist entries
|
||||
# that gives us capacity of around 100k entries in 30 megabyte dictionary
|
||||
lua_shared_dict blocklist 30m;
|
||||
|
||||
# create a shared dictionary to fill with skylinks that should not
|
||||
# be cached due to the large size or some other reasons
|
||||
lua_shared_dict nocache 10m;
|
||||
|
||||
# this runs before forking out nginx worker processes
|
||||
init_by_lua_block {
|
||||
require "cjson"
|
||||
require "resty.http"
|
||||
require "skynet.blocklist"
|
||||
require "skynet.skylink"
|
||||
require "skynet.utils"
|
||||
}
|
||||
|
||||
# include skynet-portal-api and skynet-server-api header on every request
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
NEXT_PUBLIC_SKYNET_PORTAL_API=https://siasky.net
|
||||
NEXT_PUBLIC_SKYNET_DASHBOARD_URL=https://account.siasky.net
|
||||
NEXT_PUBLIC_KRATOS_BROWSER_URL=https://account.siasky.net/.ory/kratos/public
|
||||
NEXT_PUBLIC_KRATOS_PUBLIC_URL=https://account.siasky.net/.ory/kratos/public
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue