Compare commits
202 Commits
use-skyd-d
...
master
Author | SHA1 | Date |
---|---|---|
Karol Wypchło | 3da3cf949f | |
renovate[bot] | 15d5d7c0d7 | |
Karol Wypchło | 9559ec160c | |
renovate[bot] | f21f718c13 | |
Karol Wypchło | f1ac4c5653 | |
Karol Wypchło | 17ea999288 | |
renovate[bot] | f91cb72db7 | |
renovate[bot] | f83f3cefe8 | |
Karol Wypchło | e63fcbecb7 | |
Karol Wypchło | 5ff2f5891e | |
renovate[bot] | b7afcd1feb | |
Karol Wypchlo | ba106d69f2 | |
Karol Wypchło | 8ce5c9d7e6 | |
Karol Wypchlo | b2313c602a | |
Karol Wypchlo | 2ea337a3b0 | |
Karol Wypchło | 919d22b314 | |
renovate[bot] | 983602e5aa | |
Karol Wypchło | bb8485b1cc | |
renovate[bot] | 91e8fad3b1 | |
Karol Wypchło | 74b72f4f47 | |
Karol Wypchło | a940f2728f | |
Karol Wypchło | 634c623e48 | |
Karol Wypchło | d926c22aa4 | |
Karol Wypchło | 9958b66faf | |
Karol Wypchło | 8381555ce7 | |
Karol Wypchło | 94860262a5 | |
Karol Wypchło | c4243968e1 | |
Karol Wypchło | c49cb57315 | |
Karol Wypchło | 1334537729 | |
Karol Wypchło | 823efb2238 | |
renovate[bot] | b3c300d7bf | |
renovate[bot] | 89a263bfc6 | |
renovate[bot] | d5cc81f934 | |
renovate[bot] | 1f0d66a33a | |
Karol Wypchło | 686e20b8a3 | |
Karol Wypchło | 70b80bb072 | |
Karol Wypchło | 167a56383f | |
Karol Wypchło | c054ffb0ea | |
Karol Wypchlo | 17e4d782ca | |
Karol Wypchlo | 4b52d3c671 | |
Karol Wypchlo | c85d788939 | |
Karol Wypchło | e4cd4bf991 | |
renovate[bot] | 263b733480 | |
Ivaylo Novakov | 7ddec93e59 | |
Christopher Schinnerl | ac7942640f | |
Christopher Schinnerl | 46f8ef0836 | |
Karol Wypchlo | 63323685cc | |
Ivaylo Novakov | e281e9ca78 | |
Karol Wypchlo | d753be9383 | |
Karol Wypchło | bcb0c0cf24 | |
Karol Wypchło | 47046ab31e | |
renovate[bot] | be8359791e | |
renovate[bot] | 4689844014 | |
Karol Wypchło | 1d1096fd3b | |
Karol Wypchlo | 08c21cafe4 | |
Ivaylo Novakov | 378ce234dc | |
renovate[bot] | 3f66dd30dd | |
Karol Wypchło | 1c3bcbc063 | |
Ivaylo Novakov | 4ccbd351f5 | |
Ivaylo Novakov | 00a1481091 | |
Ivaylo Novakov | e566dad718 | |
renovate[bot] | 2ec8b1bdec | |
renovate[bot] | 72e7de53db | |
Ivaylo Novakov | cb81ff738b | |
Ivaylo Novakov | 9477ae03a5 | |
renovate[bot] | 88bd55fe64 | |
PJ | 2de3d03cd0 | |
PJ | 47175686db | |
Christopher Schinnerl | b510f2e5c3 | |
renovate[bot] | be3db7aa0f | |
Christopher Schinnerl | fd1dfb2185 | |
renovate[bot] | 7e0f429ec4 | |
Christopher Schinnerl | 61cd003013 | |
renovate[bot] | 9d728b1855 | |
Karol Wypchło | 681d0d95bc | |
renovate[bot] | 739b96cd5a | |
Ivaylo Novakov | e629e7bc7f | |
Karol Wypchło | 7d33fbd93f | |
renovate[bot] | 500fb3bfd7 | |
Karol Wypchło | fd53ef99c1 | |
renovate[bot] | 9964e276fc | |
Karol Wypchło | e378e3adf9 | |
Karol Wypchło | 1caaebbefe | |
Karol Wypchło | a6403b94c1 | |
renovate[bot] | 0e841cacda | |
renovate[bot] | 185fc811fb | |
renovate[bot] | eacdd78cc1 | |
Matthew Sevey | 9ca5c48629 | |
Matthew Sevey | 0aeb98ff76 | |
Matthew Sevey | dfb5e32db5 | |
Ivaylo Novakov | 4a7ea93076 | |
Ivaylo Novakov | 89f7b96d5e | |
Ivaylo Novakov | 475bcc310a | |
renovate[bot] | d24edc4285 | |
renovate[bot] | d1e0ee0fe6 | |
renovate[bot] | ffd9af803a | |
Christopher Schinnerl | 655851cb69 | |
renovate[bot] | 61e9d3170d | |
Ivaylo Novakov | 47cd62f793 | |
renovate[bot] | 36d65fadc8 | |
Ivaylo Novakov | 96d876b1d2 | |
renovate[bot] | 8414d9b793 | |
Ivaylo Novakov | c70bc4b953 | |
Karol Wypchlo | 169ca25d67 | |
Karol Wypchło | 36f37e9be8 | |
Ivaylo Novakov | ab4468ebf5 | |
Matthew Sevey | e35a0c8354 | |
Ivaylo Novakov | 350d2e30fa | |
Matthew Sevey | 7deaf1c0b9 | |
Ivaylo Novakov | fd41aa0518 | |
Karol Wypchło | 5e1fd0dcf1 | |
Karol Wypchlo | 063f0b0b7b | |
Matthew Sevey | fea1366075 | |
Karol Wypchlo | 747bc1659a | |
Karol Wypchlo | ad423e9050 | |
Karol Wypchło | 259b25c8c1 | |
renovate[bot] | ed8a2e1f17 | |
Karol Wypchlo | e53a8ba46a | |
Karol Wypchlo | 9e1a8600a5 | |
Karol Wypchlo | 0a26516bdf | |
Karol Wypchło | 08dae2e238 | |
renovate[bot] | 1b1ecb2c11 | |
Christopher Schinnerl | 088bef8937 | |
Karol Wypchlo | 43d1da72fb | |
Karol Wypchło | 67313136df | |
Karol Wypchło | 60d767cf69 | |
renovate[bot] | 7a34fc914d | |
renovate[bot] | d85f24428b | |
Matthew Sevey | d6a6f4f878 | |
Karol Wypchło | c8ff1fb7f4 | |
Karol Wypchło | a63689ed53 | |
renovate[bot] | 63465b930e | |
Karol Wypchlo | a7c18b85c1 | |
Ivaylo Novakov | fb2ba712f4 | |
Ivaylo Novakov | 7e2d69cc9f | |
Ivaylo Novakov | 591ef6ab69 | |
Ivaylo Novakov | 649914dc18 | |
Karol Wypchlo | aa0a91472b | |
renovate[bot] | 45feb45d31 | |
renovate[bot] | 1f13674785 | |
renovate[bot] | ad743b3a3d | |
renovate[bot] | a78f1d2745 | |
Karol Wypchło | a41ea10f64 | |
PJ | 82e869bf34 | |
Ivaylo Novakov | c7d445e44a | |
Ivaylo Novakov | 61f153efe6 | |
Ivaylo Novakov | 03df7c7b33 | |
Ivaylo Novakov | deaf20306a | |
Karol Wypchło | 4ca5bb397e | |
Karol Wypchło | f6fbf4242d | |
Michał Leszczyk | 453f1210ab | |
renovate[bot] | b636894aa2 | |
Michał Leszczyk | fb75018ce0 | |
Michał Leszczyk | 8ca17995e2 | |
Michał Leszczyk | dd74ad8b3b | |
Michał Leszczyk | d13a4ae154 | |
renovate[bot] | 79c8cd997a | |
renovate[bot] | 2c3978babf | |
Karol Wypchło | 2f2c90f83e | |
Karol Wypchlo | f3ec6f5f33 | |
Karol Wypchło | b2814a68d9 | |
Ivaylo Novakov | 4f7729052c | |
renovate[bot] | 60717027dd | |
Karol Wypchlo | 266e9a9307 | |
Karol Wypchlo | dd5e362b2d | |
Karol Wypchlo | 5031229e18 | |
Karol Wypchlo | 241de3f2ae | |
Matthew Sevey | 7b3d8d1430 | |
David Vorick | a395f84cdb | |
Karol Wypchło | 59af9b8ae6 | |
Karol Wypchło | 743b5940d6 | |
Karol Wypchło | a6868085a6 | |
Ivaylo Novakov | 44a0f74bd2 | |
Ivaylo Novakov | 1f2e274e55 | |
Ivaylo Novakov | 2d28208722 | |
renovate[bot] | 2941d33ee5 | |
renovate[bot] | 62e300ca0c | |
renovate[bot] | 04f3579f60 | |
renovate[bot] | 9e8def4c5d | |
Ivaylo Novakov | 6bfc12287d | |
Ivaylo Novakov | 919061a6ec | |
renovate[bot] | 975ede05f5 | |
Karol Wypchlo | db579f6ebd | |
Michał Leszczyk | beb6e2c3af | |
Ivaylo Novakov | 5a47c89e59 | |
renovate[bot] | b3d344ec81 | |
Matthew Sevey | 6343cff81b | |
Ivaylo Novakov | c341e3e4f8 | |
Ivaylo Novakov | e3be698f34 | |
Karol Wypchło | dd192c11b2 | |
renovate[bot] | 8bc3da2bfb | |
renovate[bot] | b57dd97b24 | |
Ivaylo Novakov | d00a472b94 | |
Ivaylo Novakov | e4178ec85a | |
Ivaylo Novakov | e9c7613c08 | |
Ivaylo Novakov | 6327e5a961 | |
renovate[bot] | fa156a208d | |
renovate[bot] | 46d1099561 | |
renovate[bot] | cdf5c3f732 | |
renovate[bot] | f17e78f7c4 | |
Matthew Sevey | e273491360 | |
renovate[bot] | a603879277 |
|
@ -0,0 +1 @@
|
||||||
|
* @kwypchlo @meeh0w
|
|
@ -1,71 +0,0 @@
|
||||||
# For most projects, this workflow file will not need changing; you simply need
|
|
||||||
# to commit it to your repository.
|
|
||||||
#
|
|
||||||
# You may wish to alter this file to override the set of languages analyzed,
|
|
||||||
# or to provide custom queries or build logic.
|
|
||||||
#
|
|
||||||
# ******** NOTE ********
|
|
||||||
# We have attempted to detect the languages in your repository. Please check
|
|
||||||
# the `language` matrix defined below to confirm you have the correct set of
|
|
||||||
# supported CodeQL languages.
|
|
||||||
#
|
|
||||||
name: "CodeQL"
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [ main ]
|
|
||||||
pull_request:
|
|
||||||
# The branches below must be a subset of the branches above
|
|
||||||
branches: [ main ]
|
|
||||||
schedule:
|
|
||||||
- cron: '32 21 * * 0'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
analyze:
|
|
||||||
name: Analyze
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
actions: read
|
|
||||||
contents: read
|
|
||||||
security-events: write
|
|
||||||
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
language: [ 'javascript', 'python' ]
|
|
||||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
|
||||||
# Learn more:
|
|
||||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
|
||||||
- name: Initialize CodeQL
|
|
||||||
uses: github/codeql-action/init@v1
|
|
||||||
with:
|
|
||||||
languages: ${{ matrix.language }}
|
|
||||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
|
||||||
# By default, queries listed here will override any specified in a config file.
|
|
||||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
|
||||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
|
||||||
|
|
||||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
|
||||||
# If this step fails, then you should remove it and run the build manually (see below)
|
|
||||||
- name: Autobuild
|
|
||||||
uses: github/codeql-action/autobuild@v1
|
|
||||||
|
|
||||||
# ℹ️ Command-line programs to run using the OS shell.
|
|
||||||
# 📚 https://git.io/JvXDl
|
|
||||||
|
|
||||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
|
||||||
# and modify them (or add more) to build your code if your project
|
|
||||||
# uses a compiled language
|
|
||||||
|
|
||||||
#- run: |
|
|
||||||
# make bootstrap
|
|
||||||
# make release
|
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
|
||||||
uses: github/codeql-action/analyze@v1
|
|
|
@ -1,21 +0,0 @@
|
||||||
name: Dockerfile Lint
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
hadolint:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
dockerfile:
|
|
||||||
- docker/sia/Dockerfile
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: hadolint/hadolint-action@v2.0.0
|
|
||||||
with:
|
|
||||||
dockerfile: ${{ matrix.dockerfile }}
|
|
33
README.md
33
README.md
|
@ -8,23 +8,6 @@ supports is located at https://portal-docs.skynetlabs.com/.
|
||||||
Some scripts and setup documentation contained in this repository
|
Some scripts and setup documentation contained in this repository
|
||||||
(`skynet-webportal`) may be outdated and generally should not be used.
|
(`skynet-webportal`) may be outdated and generally should not be used.
|
||||||
|
|
||||||
## Web application
|
|
||||||
|
|
||||||
Change current directory with `cd packages/website`.
|
|
||||||
|
|
||||||
Use `yarn start` to start the development server.
|
|
||||||
|
|
||||||
Use `yarn build` to compile the application to `/public` directory.
|
|
||||||
|
|
||||||
You can use the below build parameters to customize your web application.
|
|
||||||
|
|
||||||
- development example `GATSBY_API_URL=https://siasky.dev yarn start`
|
|
||||||
- production example `GATSBY_API_URL=https://siasky.net yarn build`
|
|
||||||
|
|
||||||
List of available parameters:
|
|
||||||
|
|
||||||
- `GATSBY_API_URL`: override api url (defaults to location origin)
|
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
Skynet uses a custom [License](./LICENSE.md). The Skynet License is a source code license that allows you to use, modify
|
Skynet uses a custom [License](./LICENSE.md). The Skynet License is a source code license that allows you to use, modify
|
||||||
|
@ -33,19 +16,3 @@ and distribute the software, but you must preserve the payment mechanism in the
|
||||||
For the purposes of complying with our code license, you can use the following Siacoin address:
|
For the purposes of complying with our code license, you can use the following Siacoin address:
|
||||||
|
|
||||||
`fb6c9320bc7e01fbb9cd8d8c3caaa371386928793c736837832e634aaaa484650a3177d6714a`
|
`fb6c9320bc7e01fbb9cd8d8c3caaa371386928793c736837832e634aaaa484650a3177d6714a`
|
||||||
|
|
||||||
## Running a Portal
|
|
||||||
For those interested in running a Webportal, head over to our developer docs [here](https://portal-docs.skynetlabs.com/) to learn more.
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
### Testing Your Code
|
|
||||||
|
|
||||||
Before pushing your code, you should verify that it will pass our online test suite.
|
|
||||||
|
|
||||||
**Cypress Tests**
|
|
||||||
Verify the Cypress test suite by doing the following:
|
|
||||||
|
|
||||||
1. In one terminal screen run `GATSBY_API_URL=https://siasky.net website serve`
|
|
||||||
1. In a second terminal screen run `yarn cypress run`
|
|
||||||
|
|
||||||
|
|
31
dc
31
dc
|
@ -5,56 +5,59 @@
|
||||||
# would use docker-compose with the only difference being that you don't need to specify compose files. For more
|
# would use docker-compose with the only difference being that you don't need to specify compose files. For more
|
||||||
# information you can run `./dc` or `./dc help`.
|
# information you can run `./dc` or `./dc help`.
|
||||||
|
|
||||||
if [ -f .env ]; then
|
# get current working directory of this script and prefix all files with it to
|
||||||
OLD_IFS=$IFS
|
# be able to call this script from anywhere and not only root directory of
|
||||||
IFS=$'\n'
|
# skynet-webportal project
|
||||||
for x in $(grep -v '^#.*' .env); do export $x; done
|
cwd="$(dirname -- "$0";)";
|
||||||
IFS=$OLD_IFS
|
|
||||||
|
# get portal modules configuration from .env file (if defined more than once, the last one is used)
|
||||||
|
if [[ -f "${cwd}/.env" ]]; then
|
||||||
|
PORTAL_MODULES=$(grep -e "^PORTAL_MODULES=" ${cwd}/.env | tail -1 | sed "s/PORTAL_MODULES=//")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# include base docker compose file
|
# include base docker compose file
|
||||||
COMPOSE_FILES="-f docker-compose.yml"
|
COMPOSE_FILES="-f ${cwd}/docker-compose.yml"
|
||||||
|
|
||||||
for i in $(seq 1 ${#PORTAL_MODULES}); do
|
for i in $(seq 1 ${#PORTAL_MODULES}); do
|
||||||
# accounts module - alias "a"
|
# accounts module - alias "a"
|
||||||
if [[ ${PORTAL_MODULES:i-1:1} == "a" ]]; then
|
if [[ ${PORTAL_MODULES:i-1:1} == "a" ]]; then
|
||||||
COMPOSE_FILES+=" -f docker-compose.mongodb.yml -f docker-compose.accounts.yml"
|
COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.accounts.yml"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# blocker module - alias "b"
|
# blocker module - alias "b"
|
||||||
if [[ ${PORTAL_MODULES:i-1:1} == "b" ]]; then
|
if [[ ${PORTAL_MODULES:i-1:1} == "b" ]]; then
|
||||||
COMPOSE_FILES+=" -f docker-compose.mongodb.yml -f docker-compose.blocker.yml"
|
COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.blocker.yml"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# jaeger module - alias "j"
|
# jaeger module - alias "j"
|
||||||
if [[ ${PORTAL_MODULES:i-1:1} == "j" ]]; then
|
if [[ ${PORTAL_MODULES:i-1:1} == "j" ]]; then
|
||||||
COMPOSE_FILES+=" -f docker-compose.jaeger.yml"
|
COMPOSE_FILES+=" -f ${cwd}/docker-compose.jaeger.yml"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# malware-scanner module - alias "s"
|
# malware-scanner module - alias "s"
|
||||||
if [[ ${PORTAL_MODULES:i-1:1} == "s" ]]; then
|
if [[ ${PORTAL_MODULES:i-1:1} == "s" ]]; then
|
||||||
COMPOSE_FILES+=" -f docker-compose.blocker.yml -f docker-compose.mongodb.yml -f docker-compose.malware-scanner.yml"
|
COMPOSE_FILES+=" -f ${cwd}/docker-compose.blocker.yml -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.malware-scanner.yml"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# mongodb module - alias "m"
|
# mongodb module - alias "m"
|
||||||
if [[ ${PORTAL_MODULES:i-1:1} == "m" ]]; then
|
if [[ ${PORTAL_MODULES:i-1:1} == "m" ]]; then
|
||||||
COMPOSE_FILES+=" -f docker-compose.mongodb.yml"
|
COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# abuse-scanner module - alias "u"
|
# abuse-scanner module - alias "u"
|
||||||
if [[ ${PORTAL_MODULES:i-1:1} == "u" ]]; then
|
if [[ ${PORTAL_MODULES:i-1:1} == "u" ]]; then
|
||||||
COMPOSE_FILES+=" -f docker-compose.mongodb.yml -f docker-compose.blocker.yml -f docker-compose.abuse-scanner.yml"
|
COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.blocker.yml -f ${cwd}/docker-compose.abuse-scanner.yml"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# pinner module - alias "p"
|
# pinner module - alias "p"
|
||||||
if [[ ${PORTAL_MODULES:i-1:1} == "p" ]]; then
|
if [[ ${PORTAL_MODULES:i-1:1} == "p" ]]; then
|
||||||
COMPOSE_FILES+=" -f docker-compose.mongodb.yml -f docker-compose.pinner.yml"
|
COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.pinner.yml"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
# override file if exists
|
# override file if exists
|
||||||
if [[ -f docker-compose.override.yml ]]; then
|
if [[ -f docker-compose.override.yml ]]; then
|
||||||
COMPOSE_FILES+=" -f docker-compose.override.yml"
|
COMPOSE_FILES+=" -f ${cwd}/docker-compose.override.yml"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
docker-compose $COMPOSE_FILES $@
|
docker-compose $COMPOSE_FILES $@
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
version: "3.7"
|
version: "3.8"
|
||||||
|
|
||||||
x-logging: &default-logging
|
x-logging: &default-logging
|
||||||
driver: json-file
|
driver: json-file
|
||||||
|
@ -10,7 +10,7 @@ services:
|
||||||
abuse-scanner:
|
abuse-scanner:
|
||||||
# uncomment "build" and comment out "image" to build from sources
|
# uncomment "build" and comment out "image" to build from sources
|
||||||
# build: https://github.com/SkynetLabs/abuse-scanner.git#main
|
# build: https://github.com/SkynetLabs/abuse-scanner.git#main
|
||||||
image: skynetlabs/abuse-scanner:0.1.1
|
image: skynetlabs/abuse-scanner:0.4.0
|
||||||
container_name: abuse-scanner
|
container_name: abuse-scanner
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging: *default-logging
|
logging: *default-logging
|
||||||
|
@ -36,3 +36,6 @@ services:
|
||||||
depends_on:
|
depends_on:
|
||||||
- mongo
|
- mongo
|
||||||
- blocker
|
- blocker
|
||||||
|
volumes:
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
- /tmp:/tmp
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
version: "3.7"
|
version: "3.8"
|
||||||
|
|
||||||
x-logging: &default-logging
|
x-logging: &default-logging
|
||||||
driver: json-file
|
driver: json-file
|
||||||
|
@ -22,7 +22,7 @@ services:
|
||||||
accounts:
|
accounts:
|
||||||
# uncomment "build" and comment out "image" to build from sources
|
# uncomment "build" and comment out "image" to build from sources
|
||||||
# build: https://github.com/SkynetLabs/skynet-accounts.git#main
|
# build: https://github.com/SkynetLabs/skynet-accounts.git#main
|
||||||
image: skynetlabs/skynet-accounts:1.2.0
|
image: skynetlabs/skynet-accounts:1.3.0
|
||||||
container_name: accounts
|
container_name: accounts
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging: *default-logging
|
logging: *default-logging
|
||||||
|
@ -59,15 +59,12 @@ services:
|
||||||
# build:
|
# build:
|
||||||
# context: https://github.com/SkynetLabs/webportal-accounts-dashboard.git#main
|
# context: https://github.com/SkynetLabs/webportal-accounts-dashboard.git#main
|
||||||
# dockerfile: Dockerfile
|
# dockerfile: Dockerfile
|
||||||
image: skynetlabs/webportal-accounts-dashboard:1.1.2
|
image: skynetlabs/webportal-accounts-dashboard:2.1.1
|
||||||
container_name: dashboard
|
container_name: dashboard
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging: *default-logging
|
logging: *default-logging
|
||||||
env_file:
|
env_file:
|
||||||
- .env
|
- .env
|
||||||
environment:
|
|
||||||
- GATSBY_PORTAL_DOMAIN=${PORTAL_DOMAIN}
|
|
||||||
- GATSBY_STRIPE_PUBLISHABLE_KEY=${STRIPE_PUBLISHABLE_KEY}
|
|
||||||
volumes:
|
volumes:
|
||||||
- ./docker/data/dashboard/.cache:/usr/app/.cache
|
- ./docker/data/dashboard/.cache:/usr/app/.cache
|
||||||
- ./docker/data/dashboard/public:/usr/app/public
|
- ./docker/data/dashboard/public:/usr/app/public
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
version: "3.7"
|
version: "3.8"
|
||||||
|
|
||||||
x-logging: &default-logging
|
x-logging: &default-logging
|
||||||
driver: json-file
|
driver: json-file
|
||||||
|
@ -15,7 +15,7 @@ services:
|
||||||
blocker:
|
blocker:
|
||||||
# uncomment "build" and comment out "image" to build from sources
|
# uncomment "build" and comment out "image" to build from sources
|
||||||
# build: https://github.com/SkynetLabs/blocker.git#main
|
# build: https://github.com/SkynetLabs/blocker.git#main
|
||||||
image: skynetlabs/blocker:0.1.1
|
image: skynetlabs/blocker:0.1.2
|
||||||
container_name: blocker
|
container_name: blocker
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging: *default-logging
|
logging: *default-logging
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
version: "3.7"
|
version: "3.8"
|
||||||
|
|
||||||
x-logging: &default-logging
|
x-logging: &default-logging
|
||||||
driver: json-file
|
driver: json-file
|
||||||
|
@ -21,7 +21,7 @@ services:
|
||||||
- JAEGER_REPORTER_LOG_SPANS=false
|
- JAEGER_REPORTER_LOG_SPANS=false
|
||||||
|
|
||||||
jaeger-agent:
|
jaeger-agent:
|
||||||
image: jaegertracing/jaeger-agent:1.32.0
|
image: jaegertracing/jaeger-agent:1.38.1
|
||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"--reporter.grpc.host-port=jaeger-collector:14250",
|
"--reporter.grpc.host-port=jaeger-collector:14250",
|
||||||
|
@ -43,7 +43,7 @@ services:
|
||||||
- jaeger-collector
|
- jaeger-collector
|
||||||
|
|
||||||
jaeger-collector:
|
jaeger-collector:
|
||||||
image: jaegertracing/jaeger-collector:1.32.0
|
image: jaegertracing/jaeger-collector:1.38.1
|
||||||
entrypoint: /wait_to_start.sh
|
entrypoint: /wait_to_start.sh
|
||||||
container_name: jaeger-collector
|
container_name: jaeger-collector
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
|
@ -68,7 +68,7 @@ services:
|
||||||
- elasticsearch
|
- elasticsearch
|
||||||
|
|
||||||
jaeger-query:
|
jaeger-query:
|
||||||
image: jaegertracing/jaeger-query:1.32.0
|
image: jaegertracing/jaeger-query:1.38.1
|
||||||
entrypoint: /wait_to_start.sh
|
entrypoint: /wait_to_start.sh
|
||||||
container_name: jaeger-query
|
container_name: jaeger-query
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
|
@ -93,7 +93,7 @@ services:
|
||||||
- elasticsearch
|
- elasticsearch
|
||||||
|
|
||||||
elasticsearch:
|
elasticsearch:
|
||||||
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.4
|
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.6
|
||||||
container_name: elasticsearch
|
container_name: elasticsearch
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
logging: *default-logging
|
logging: *default-logging
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
version: "3.7"
|
version: "3.8"
|
||||||
|
|
||||||
x-logging: &default-logging
|
x-logging: &default-logging
|
||||||
driver: json-file
|
driver: json-file
|
||||||
|
@ -17,10 +17,6 @@ services:
|
||||||
- ./docker/clamav/clamd.conf:/etc/clamav/clamd.conf:ro
|
- ./docker/clamav/clamd.conf:/etc/clamav/clamd.conf:ro
|
||||||
expose:
|
expose:
|
||||||
- 3310 # NEVER expose this outside of the local network!
|
- 3310 # NEVER expose this outside of the local network!
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpus: "${CLAMAV_CPU:-0.50}"
|
|
||||||
networks:
|
networks:
|
||||||
shared:
|
shared:
|
||||||
ipv4_address: 10.10.10.100
|
ipv4_address: 10.10.10.100
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
version: "3.7"
|
version: "3.8"
|
||||||
|
|
||||||
x-logging: &default-logging
|
x-logging: &default-logging
|
||||||
driver: json-file
|
driver: json-file
|
||||||
|
@ -14,7 +14,7 @@ services:
|
||||||
- MONGODB_PASSWORD=${SKYNET_DB_PASS}
|
- MONGODB_PASSWORD=${SKYNET_DB_PASS}
|
||||||
|
|
||||||
mongo:
|
mongo:
|
||||||
image: mongo:4.4.14
|
image: mongo:4.4.17
|
||||||
command: --keyFile=/data/mgkey --replSet=${SKYNET_DB_REPLICASET:-skynet} --setParameter ShardingTaskExecutorPoolMinSize=10
|
command: --keyFile=/data/mgkey --replSet=${SKYNET_DB_REPLICASET:-skynet} --setParameter ShardingTaskExecutorPoolMinSize=10
|
||||||
container_name: mongo
|
container_name: mongo
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
version: "3.7"
|
version: "3.8"
|
||||||
|
|
||||||
x-logging: &default-logging
|
x-logging: &default-logging
|
||||||
driver: json-file
|
driver: json-file
|
||||||
|
@ -10,12 +10,14 @@ services:
|
||||||
pinner:
|
pinner:
|
||||||
# uncomment "build" and comment out "image" to build from sources
|
# uncomment "build" and comment out "image" to build from sources
|
||||||
# build: https://github.com/SkynetLabs/pinner.git#main
|
# build: https://github.com/SkynetLabs/pinner.git#main
|
||||||
image: skynetlabs/pinner:0.3.1
|
image: skynetlabs/pinner:0.7.8
|
||||||
container_name: pinner
|
container_name: pinner
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging: *default-logging
|
logging: *default-logging
|
||||||
env_file:
|
env_file:
|
||||||
- .env
|
- .env
|
||||||
|
volumes:
|
||||||
|
- ./docker/data/pinner/logs:/logs
|
||||||
environment:
|
environment:
|
||||||
- PINNER_LOG_LEVEL=${PINNER_LOG_LEVEL:-info}
|
- PINNER_LOG_LEVEL=${PINNER_LOG_LEVEL:-info}
|
||||||
expose:
|
expose:
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
version: "3.7"
|
version: "3.8"
|
||||||
|
|
||||||
x-logging: &default-logging
|
x-logging: &default-logging
|
||||||
driver: json-file
|
driver: json-file
|
||||||
|
@ -15,18 +15,19 @@ networks:
|
||||||
|
|
||||||
services:
|
services:
|
||||||
sia:
|
sia:
|
||||||
build:
|
# uncomment "build" and comment out "image" to build from sources
|
||||||
context: https://github.com/SkynetLabs/docker-skyd.git#main
|
# build:
|
||||||
dockerfile: bullseye-slim/Dockerfile
|
# context: https://github.com/SkynetLabs/docker-skyd.git#main
|
||||||
args:
|
# dockerfile: scratch/Dockerfile
|
||||||
branch: portal-latest
|
# args:
|
||||||
command: --disable-api-security --api-addr :9980 --modules gtcwra
|
# branch: master
|
||||||
|
image: skynetlabs/skyd:1.6.9
|
||||||
|
command: --disable-api-security --api-addr :9980 --modules gctwra
|
||||||
container_name: sia
|
container_name: sia
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
stop_grace_period: 5m
|
stop_grace_period: 5m
|
||||||
logging: *default-logging
|
logging: *default-logging
|
||||||
environment:
|
environment:
|
||||||
- SIA_MODULES=gctwra
|
|
||||||
- SKYD_DISK_CACHE_ENABLED=${SKYD_DISK_CACHE_ENABLED:-true}
|
- SKYD_DISK_CACHE_ENABLED=${SKYD_DISK_CACHE_ENABLED:-true}
|
||||||
- SKYD_DISK_CACHE_SIZE=${SKYD_DISK_CACHE_SIZE:-53690000000} # 50GB
|
- SKYD_DISK_CACHE_SIZE=${SKYD_DISK_CACHE_SIZE:-53690000000} # 50GB
|
||||||
- SKYD_DISK_CACHE_MIN_HITS=${SKYD_DISK_CACHE_MIN_HITS:-3}
|
- SKYD_DISK_CACHE_MIN_HITS=${SKYD_DISK_CACHE_MIN_HITS:-3}
|
||||||
|
@ -42,7 +43,24 @@ services:
|
||||||
- 9980
|
- 9980
|
||||||
|
|
||||||
certbot:
|
certbot:
|
||||||
image: certbot/dns-route53:v1.28.0
|
# replace this image with the image supporting your dns provider from
|
||||||
|
# https://hub.docker.com/r/certbot/certbot and adjust CERTBOT_ARGS env variable
|
||||||
|
# note: you will need to authenticate your dns request so consult the plugin docs
|
||||||
|
# configuration https://eff-certbot.readthedocs.io/en/stable/using.html#dns-plugins
|
||||||
|
#
|
||||||
|
# =================================================================================
|
||||||
|
# example docker-compose.yml changes required for Cloudflare dns provider:
|
||||||
|
#
|
||||||
|
# image: certbot/dns-cloudflare
|
||||||
|
# environment:
|
||||||
|
# - CERTBOT_ARGS=--dns-cloudflare --dns-cloudflare-credentials /etc/letsencrypt/cloudflare.ini
|
||||||
|
#
|
||||||
|
# create ./docker/data/certbot/cloudflare.ini file with the following content:
|
||||||
|
# dns_cloudflare_api_token = <api key generated at https://dash.cloudflare.com/profile/api-tokens>
|
||||||
|
#
|
||||||
|
# make sure that the file has 0400 permissions with:
|
||||||
|
# chmod 0400 ./docker/data/certbot/cloudflare.ini
|
||||||
|
image: certbot/dns-route53:v1.31.0
|
||||||
entrypoint: sh /entrypoint.sh
|
entrypoint: sh /entrypoint.sh
|
||||||
container_name: certbot
|
container_name: certbot
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
@ -60,7 +78,7 @@ services:
|
||||||
# build:
|
# build:
|
||||||
# context: https://github.com/SkynetLabs/webportal-nginx.git#main
|
# context: https://github.com/SkynetLabs/webportal-nginx.git#main
|
||||||
# dockerfile: Dockerfile
|
# dockerfile: Dockerfile
|
||||||
image: skynetlabs/webportal-nginx:0.3.1
|
image: skynetlabs/webportal-nginx:1.0.0
|
||||||
container_name: nginx
|
container_name: nginx
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging: *default-logging
|
logging: *default-logging
|
||||||
|
@ -90,7 +108,7 @@ services:
|
||||||
# build:
|
# build:
|
||||||
# context: https://github.com/SkynetLabs/webportal-website.git#main
|
# context: https://github.com/SkynetLabs/webportal-website.git#main
|
||||||
# dockerfile: Dockerfile
|
# dockerfile: Dockerfile
|
||||||
image: skynetlabs/webportal-website:0.2.0
|
image: skynetlabs/webportal-website:0.2.3
|
||||||
container_name: website
|
container_name: website
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging: *default-logging
|
logging: *default-logging
|
||||||
|
@ -106,18 +124,11 @@ services:
|
||||||
- 9000
|
- 9000
|
||||||
|
|
||||||
handshake:
|
handshake:
|
||||||
image: skynetlabs/hsd:3.0.1
|
image: handshakeorg/hsd:4.0.2
|
||||||
command: --chain-migrate=2 --wallet-migrate=1
|
command: --chain-migrate=3 --no-wallet --no-auth --compact-tree-on-init --network=main --http-host=0.0.0.0
|
||||||
container_name: handshake
|
container_name: handshake
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging: *default-logging
|
logging: *default-logging
|
||||||
environment:
|
|
||||||
- HSD_LOG_CONSOLE=false
|
|
||||||
- HSD_HTTP_HOST=0.0.0.0
|
|
||||||
- HSD_NETWORK=main
|
|
||||||
- HSD_PORT=12037
|
|
||||||
env_file:
|
|
||||||
- .env
|
|
||||||
volumes:
|
volumes:
|
||||||
- ./docker/data/handshake/.hsd:/root/.hsd
|
- ./docker/data/handshake/.hsd:/root/.hsd
|
||||||
networks:
|
networks:
|
||||||
|
@ -131,7 +142,7 @@ services:
|
||||||
# build:
|
# build:
|
||||||
# context: https://github.com/SkynetLabs/webportal-handshake-api.git#main
|
# context: https://github.com/SkynetLabs/webportal-handshake-api.git#main
|
||||||
# dockerfile: Dockerfile
|
# dockerfile: Dockerfile
|
||||||
image: skynetlabs/webportal-handshake-api:0.1.1
|
image: skynetlabs/webportal-handshake-api:0.1.3
|
||||||
container_name: handshake-api
|
container_name: handshake-api
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging: *default-logging
|
logging: *default-logging
|
||||||
|
@ -155,7 +166,7 @@ services:
|
||||||
# build:
|
# build:
|
||||||
# context: https://github.com/SkynetLabs/webportal-dnslink-api.git#main
|
# context: https://github.com/SkynetLabs/webportal-dnslink-api.git#main
|
||||||
# dockerfile: Dockerfile
|
# dockerfile: Dockerfile
|
||||||
image: skynetlabs/webportal-dnslink-api:0.1.1
|
image: skynetlabs/webportal-dnslink-api:0.2.1
|
||||||
container_name: dnslink-api
|
container_name: dnslink-api
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging: *default-logging
|
logging: *default-logging
|
||||||
|
@ -170,7 +181,7 @@ services:
|
||||||
# build:
|
# build:
|
||||||
# context: https://github.com/SkynetLabs/webportal-health-check.git#main
|
# context: https://github.com/SkynetLabs/webportal-health-check.git#main
|
||||||
# dockerfile: Dockerfile
|
# dockerfile: Dockerfile
|
||||||
image: skynetlabs/webportal-health-check:0.1.3
|
image: skynetlabs/webportal-health-check:1.0.0
|
||||||
container_name: health-check
|
container_name: health-check
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging: *default-logging
|
logging: *default-logging
|
||||||
|
|
|
@ -1,16 +0,0 @@
|
||||||
FROM golang:1.16.7 AS sia-builder
|
|
||||||
|
|
||||||
ENV GOOS linux
|
|
||||||
ENV GOARCH amd64
|
|
||||||
|
|
||||||
ARG branch=portal-latest
|
|
||||||
|
|
||||||
RUN git clone https://gitlab.com/SkynetLabs/skyd.git Sia --single-branch --branch ${branch} && \
|
|
||||||
make release --directory Sia
|
|
||||||
|
|
||||||
FROM nebulouslabs/sia:1.5.6
|
|
||||||
|
|
||||||
COPY --from=sia-builder /go/bin/ /usr/bin/
|
|
||||||
|
|
||||||
RUN if [ -f "/usr/bin/skyd" ]; then mv /usr/bin/skyd /usr/bin/siad; fi && \
|
|
||||||
if [ -f "/usr/bin/skyc" ]; then mv /usr/bin/skyc /usr/bin/siac; fi
|
|
|
@ -50,6 +50,7 @@ aws s3 sync --no-progress /home/user/skynet-webportal/docker/data/nginx/logs s3:
|
||||||
# generate and sync skylinks dump
|
# generate and sync skylinks dump
|
||||||
SKYLINKS_PATH=logs/skylinks/$(date +"%Y-%m-%d").log
|
SKYLINKS_PATH=logs/skylinks/$(date +"%Y-%m-%d").log
|
||||||
mkdir -p /home/user/skynet-webportal/logs/skylinks # ensure path exists
|
mkdir -p /home/user/skynet-webportal/logs/skylinks # ensure path exists
|
||||||
|
find /home/user/skynet-webportal/logs/skylinks -type f -mtime +7 -delete # delete skylink dumps older than 7 days
|
||||||
docker exec sia siac skynet ls --recursive --alert-suppress > /home/user/skynet-webportal/${SKYLINKS_PATH}
|
docker exec sia siac skynet ls --recursive --alert-suppress > /home/user/skynet-webportal/${SKYLINKS_PATH}
|
||||||
aws s3 cp --no-progress /home/user/skynet-webportal/${SKYLINKS_PATH} s3://${BUCKET_NAME}/${SERVER_PREFIX}/${SKYLINKS_PATH}
|
aws s3 cp --no-progress /home/user/skynet-webportal/${SKYLINKS_PATH} s3://${BUCKET_NAME}/${SERVER_PREFIX}/${SKYLINKS_PATH}
|
||||||
|
|
||||||
|
|
|
@ -3,8 +3,6 @@
|
||||||
# This script is for manual skylink blocking. It accepts either a single
|
# This script is for manual skylink blocking. It accepts either a single
|
||||||
# skylink or a file containing list of skylinks. The script is intented
|
# skylink or a file containing list of skylinks. The script is intented
|
||||||
# for manual use and it should be run locally on each skynet webportal server.
|
# for manual use and it should be run locally on each skynet webportal server.
|
||||||
# The automatic script that is used to continuously sync an Airtable sheet
|
|
||||||
# list with the blocklist on the web portals is /setup-scripts/blocklist-airtable.py
|
|
||||||
|
|
||||||
set -e # exit on first error
|
set -e # exit on first error
|
||||||
|
|
||||||
|
|
|
@ -1,164 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import curator
|
|
||||||
import elasticsearch
|
|
||||||
import os
|
|
||||||
import ssl
|
|
||||||
import sys
|
|
||||||
|
|
||||||
TIMEOUT = 120
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
if len(sys.argv) != 3:
|
|
||||||
print(
|
|
||||||
'USAGE: [INDEX_PREFIX=(default "")] [ARCHIVE=(default false)] ... {} NUM_OF_DAYS http://HOSTNAME[:PORT]'.format(
|
|
||||||
sys.argv[0]
|
|
||||||
)
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"NUM_OF_DAYS ... delete indices that are older than the given number of days."
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"HOSTNAME ... specifies which Elasticsearch hosts URL to search and delete indices from."
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"TIMEOUT ... number of seconds to wait for master node response, default: {}".format(
|
|
||||||
TIMEOUT
|
|
||||||
)
|
|
||||||
)
|
|
||||||
print("INDEX_PREFIX ... specifies index prefix.")
|
|
||||||
print("INDEX_DATE_SEPARATOR ... specifies index date separator.")
|
|
||||||
print(
|
|
||||||
"ARCHIVE ... specifies whether to remove archive indices (only works for rollover) (default false)."
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"ROLLOVER ... specifies whether to remove indices created by rollover (default false)."
|
|
||||||
)
|
|
||||||
print("ES_USERNAME ... The username required by Elasticsearch.")
|
|
||||||
print("ES_PASSWORD ... The password required by Elasticsearch.")
|
|
||||||
print("ES_TLS ... enable TLS (default false).")
|
|
||||||
print("ES_TLS_CA ... Path to TLS CA file.")
|
|
||||||
print("ES_TLS_CERT ... Path to TLS certificate file.")
|
|
||||||
print("ES_TLS_KEY ... Path to TLS key file.")
|
|
||||||
print(
|
|
||||||
"ES_TLS_SKIP_HOST_VERIFY ... (insecure) Skip server's certificate chain and host name verification."
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
client = create_client(
|
|
||||||
os.getenv("ES_USERNAME"),
|
|
||||||
os.getenv("ES_PASSWORD"),
|
|
||||||
str2bool(os.getenv("ES_TLS", "false")),
|
|
||||||
os.getenv("ES_TLS_CA"),
|
|
||||||
os.getenv("ES_TLS_CERT"),
|
|
||||||
os.getenv("ES_TLS_KEY"),
|
|
||||||
str2bool(os.getenv("ES_TLS_SKIP_HOST_VERIFY", "false")),
|
|
||||||
)
|
|
||||||
ilo = curator.IndexList(client)
|
|
||||||
empty_list(ilo, "Elasticsearch has no indices")
|
|
||||||
|
|
||||||
prefix = os.getenv("INDEX_PREFIX", "")
|
|
||||||
if prefix != "":
|
|
||||||
prefix += "-"
|
|
||||||
separator = os.getenv("INDEX_DATE_SEPARATOR", "-")
|
|
||||||
|
|
||||||
if str2bool(os.getenv("ARCHIVE", "false")):
|
|
||||||
filter_archive_indices_rollover(ilo, prefix)
|
|
||||||
else:
|
|
||||||
if str2bool(os.getenv("ROLLOVER", "false")):
|
|
||||||
filter_main_indices_rollover(ilo, prefix)
|
|
||||||
else:
|
|
||||||
filter_main_indices(ilo, prefix, separator)
|
|
||||||
|
|
||||||
empty_list(ilo, "No indices to delete")
|
|
||||||
|
|
||||||
for index in ilo.working_list():
|
|
||||||
print("Removing", index)
|
|
||||||
timeout = int(os.getenv("TIMEOUT", TIMEOUT))
|
|
||||||
delete_indices = curator.DeleteIndices(ilo, master_timeout=timeout)
|
|
||||||
delete_indices.do_action()
|
|
||||||
|
|
||||||
|
|
||||||
def filter_main_indices(ilo, prefix, separator):
|
|
||||||
date_regex = "\d{4}" + separator + "\d{2}" + separator + "\d{2}"
|
|
||||||
time_string = "%Y" + separator + "%m" + separator + "%d"
|
|
||||||
|
|
||||||
ilo.filter_by_regex(
|
|
||||||
kind="regex", value=prefix + "jaeger-(span|service|dependencies)-" + date_regex
|
|
||||||
)
|
|
||||||
empty_list(ilo, "No indices to delete")
|
|
||||||
# This excludes archive index as we use source='name'
|
|
||||||
# source `creation_date` would include archive index
|
|
||||||
ilo.filter_by_age(
|
|
||||||
source="name",
|
|
||||||
direction="older",
|
|
||||||
timestring=time_string,
|
|
||||||
unit="days",
|
|
||||||
unit_count=int(sys.argv[1]),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def filter_main_indices_rollover(ilo, prefix):
|
|
||||||
ilo.filter_by_regex(kind="regex", value=prefix + "jaeger-(span|service)-\d{6}")
|
|
||||||
empty_list(ilo, "No indices to delete")
|
|
||||||
# do not remove active write indices
|
|
||||||
ilo.filter_by_alias(aliases=[prefix + "jaeger-span-write"], exclude=True)
|
|
||||||
empty_list(ilo, "No indices to delete")
|
|
||||||
ilo.filter_by_alias(aliases=[prefix + "jaeger-service-write"], exclude=True)
|
|
||||||
empty_list(ilo, "No indices to delete")
|
|
||||||
ilo.filter_by_age(
|
|
||||||
source="creation_date",
|
|
||||||
direction="older",
|
|
||||||
unit="days",
|
|
||||||
unit_count=int(sys.argv[1]),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def filter_archive_indices_rollover(ilo, prefix):
|
|
||||||
# Remove only rollover archive indices
|
|
||||||
# Do not remove active write archive index
|
|
||||||
ilo.filter_by_regex(kind="regex", value=prefix + "jaeger-span-archive-\d{6}")
|
|
||||||
empty_list(ilo, "No indices to delete")
|
|
||||||
ilo.filter_by_alias(aliases=[prefix + "jaeger-span-archive-write"], exclude=True)
|
|
||||||
empty_list(ilo, "No indices to delete")
|
|
||||||
ilo.filter_by_age(
|
|
||||||
source="creation_date",
|
|
||||||
direction="older",
|
|
||||||
unit="days",
|
|
||||||
unit_count=int(sys.argv[1]),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def empty_list(ilo, error_msg):
|
|
||||||
try:
|
|
||||||
ilo.empty_list_check()
|
|
||||||
except curator.NoIndices:
|
|
||||||
print(error_msg)
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
|
|
||||||
def str2bool(v):
|
|
||||||
return v.lower() in ("true", "1")
|
|
||||||
|
|
||||||
|
|
||||||
def create_client(username, password, tls, ca, cert, key, skipHostVerify):
|
|
||||||
context = ssl.create_default_context()
|
|
||||||
if ca is not None:
|
|
||||||
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=ca)
|
|
||||||
elif skipHostVerify:
|
|
||||||
context.check_hostname = False
|
|
||||||
context.verify_mode = ssl.CERT_NONE
|
|
||||||
if username is not None and password is not None:
|
|
||||||
return elasticsearch.Elasticsearch(
|
|
||||||
sys.argv[2:], http_auth=(username, password), ssl_context=context
|
|
||||||
)
|
|
||||||
elif tls:
|
|
||||||
context.load_cert_chain(certfile=cert, keyfile=key)
|
|
||||||
return elasticsearch.Elasticsearch(sys.argv[2:], ssl_context=context)
|
|
||||||
else:
|
|
||||||
return elasticsearch.Elasticsearch(sys.argv[2:], ssl_context=context)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
|
@ -1,16 +0,0 @@
|
||||||
#! /usr/bin/env bash
|
|
||||||
|
|
||||||
###############################################################
|
|
||||||
# this script is an automation for restarting docker containers
|
|
||||||
# on maintenance nodes strictly built for purpose of siasky.net
|
|
||||||
###############################################################
|
|
||||||
|
|
||||||
set -e # exit on first error
|
|
||||||
|
|
||||||
docker build --no-cache --quiet --build-arg branch=master -t sia-master /home/user/sia-dockerfile
|
|
||||||
|
|
||||||
for container in `docker container ls --format '{{.Names}}'`; do
|
|
||||||
docker stop $container
|
|
||||||
docker rm $container
|
|
||||||
docker run -d -v /home/user/nodes/$container/sia-data:/sia-data --env-file /home/user/nodes/$container/.env --name $container --log-opt max-size=100m --log-opt max-file=3 sia-master
|
|
||||||
done
|
|
|
@ -1,18 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e # exit on first error
|
|
||||||
|
|
||||||
# get current working directory (pwd doesn't cut it)
|
|
||||||
cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
|
|
||||||
|
|
||||||
# put the server down for maintenance
|
|
||||||
. ${cwd}/portal-down.sh
|
|
||||||
|
|
||||||
# stop the docker services
|
|
||||||
docker-compose down
|
|
||||||
|
|
||||||
# start the docker services
|
|
||||||
docker-compose up -d
|
|
||||||
|
|
||||||
# enable the server again
|
|
||||||
. ${cwd}/portal-up.sh
|
|
|
@ -1,26 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e # exit on first error
|
|
||||||
|
|
||||||
# get current working directory (pwd doesn't cut it)
|
|
||||||
cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
|
|
||||||
|
|
||||||
# put the server down for maintenance
|
|
||||||
. ${cwd}/portal-down.sh
|
|
||||||
|
|
||||||
# build all container without cache
|
|
||||||
docker-compose build --no-cache --parallel --pull --quiet
|
|
||||||
|
|
||||||
# stop the docker services
|
|
||||||
docker-compose down -v
|
|
||||||
|
|
||||||
# clear unused docker containers so we don't run into out of disk space
|
|
||||||
# it should be done after the container have been stopped and before
|
|
||||||
# building them again
|
|
||||||
docker system prune --force
|
|
||||||
|
|
||||||
# start the docker services
|
|
||||||
docker-compose up -d
|
|
||||||
|
|
||||||
# enable the server again
|
|
||||||
. ${cwd}/portal-up.sh
|
|
|
@ -1,155 +0,0 @@
|
||||||
# Skynet Portal Setup Scripts
|
|
||||||
|
|
||||||
> :warning: This documentation is outdated and should be used for reference
|
|
||||||
only. Portal setup documentation is located at
|
|
||||||
https://portal-docs.skynetlabs.com/.
|
|
||||||
|
|
||||||
This directory contains a setup guide and scripts that will install and
|
|
||||||
configure some basic requirements for running a Skynet Portal. The assumption is
|
|
||||||
that we are working with a Debian Buster Minimal system or similar.
|
|
||||||
|
|
||||||
## Latest Setup Documentation
|
|
||||||
|
|
||||||
Latest Skynet Webportal setup documentation and the setup process Skynet Labs
|
|
||||||
supports is located at https://docs.siasky.net/webportal-management/overview.
|
|
||||||
|
|
||||||
Some of the scripts and setup documentation contained in this repository
|
|
||||||
(`skynet-webportal`) can be outdated and generally should not be used.
|
|
||||||
|
|
||||||
## Initial Setup
|
|
||||||
|
|
||||||
You may want to fork this repository and replace ssh keys in
|
|
||||||
`setup-scripts/support/authorized_keys` and optionally edit the `setup-scripts/support/tmux.conf` and `setup-scripts/support/bashrc` configurations to fit your needs.
|
|
||||||
|
|
||||||
### Step 0: stack overview
|
|
||||||
|
|
||||||
- dockerized services inside `docker-compose.yml`
|
|
||||||
- [sia](https://sia.tech) ([docker hub](https://hub.docker.com/r/nebulouslabs/sia)): storage provider, heart of the portal setup
|
|
||||||
- [caddy](https://caddyserver.com) ([docker hub](https://hub.docker.com/r/caddy/caddy)): reverse proxy (similar to nginx) that handles ssl out of a box and acts as a transparent entry point
|
|
||||||
- [openresty](https://openresty.org) ([docker hub](https://hub.docker.com/r/openresty/openresty)): nginx custom build, acts as a cached proxy to siad and exposes all api endpoints
|
|
||||||
- [health-check](https://github.com/SkynetLabs/skynet-webportal/tree/master/packages/health-check): simple service that runs periodically and collects health data about the server (status and response times) - [read more](https://github.com/SkynetLabs/skynet-webportal/blob/master/packages/health-check/README.md)
|
|
||||||
- [handshake](https://handshake.org) ([github](https://github.com/handshake-org/hsd)): full handshake node
|
|
||||||
- [handshake-api](https://github.com/SkynetLabs/skynet-webportal/tree/master/packages/handshake-api): simple API talking to the handshake node - [read more](https://github.com/SkynetLabs/skynet-webportal/blob/master/packages/handshake-api/README.md)
|
|
||||||
- [website](https://github.com/SkynetLabs/skynet-webportal/tree/master/packages/website): portal frontend application - [read more](https://github.com/SkynetLabs/skynet-webportal/blob/master/packages/website/README.md)
|
|
||||||
- discord integration
|
|
||||||
- [funds-checker](funds-checker.py): script that checks wallet balance and sends status messages to discord periodically
|
|
||||||
- [health-checker](health-checker.py): script that monitors health-check service for server health issues and reports them to discord periodically
|
|
||||||
- [log-checker](log-checker.py): script that scans siad logs for critical errors and reports them to discord periodically
|
|
||||||
- [blocklist-skylink](../scripts/blocklist-skylink.sh): script that can be run locally from a machine that has access to all your skynet portal servers that blocklists provided skylink and prunes nginx cache to ensure it's not available any more (that is a bit much but that's the best we can do right now without paid nginx version) - if you want to use it, make sure to adjust the server addresses
|
|
||||||
|
|
||||||
### Step 1: setting up server user
|
|
||||||
|
|
||||||
1. SSH in a freshly installed Debian machine on a user with sudo access (can be root)
|
|
||||||
1. `apt-get update && apt-get install sudo libnss3-tools -y` to make sure `sudo` is available
|
|
||||||
1. `adduser user` to create user called `user` (creates `/home/user` directory)
|
|
||||||
1. `usermod -aG sudo user` to add this new user to sudo group
|
|
||||||
1. `sudo groupadd docker` to create a group for docker (it might already exist)
|
|
||||||
1. `sudo usermod -aG docker user` to add your user to that group
|
|
||||||
1. Quit the ssh session with `exit` command
|
|
||||||
|
|
||||||
You can now ssh into your machine as the user `user`.
|
|
||||||
|
|
||||||
### Step 2: setting up environment
|
|
||||||
|
|
||||||
1. On your local machine: `ssh-copy-id user@ip-addr` to copy over your ssh key to server
|
|
||||||
1. On your local machine: `ssh user@ip-addr` to log in to server as user `user`
|
|
||||||
1. You are now logged in as `user`
|
|
||||||
|
|
||||||
**Following step will be executed on remote host logged in as a `user`:**
|
|
||||||
|
|
||||||
1. `sudo apt-get install git -y` to install git
|
|
||||||
1. `git clone https://github.com/SkynetLabs/skynet-webportal`
|
|
||||||
1. `cd skynet-webportal`
|
|
||||||
1. run setup scripts in the exact order and provide sudo password when asked (if one of them fails, you can retry just this one before proceeding further)
|
|
||||||
1. `/home/user/skynet-webportal/setup-scripts/setup-server.sh`
|
|
||||||
1. `/home/user/skynet-webportal/setup-scripts/setup-docker-services.sh`
|
|
||||||
1. `/home/user/skynet-webportal/setup-scripts/setup-health-check-scripts.sh` (optional)
|
|
||||||
|
|
||||||
### Step 3: configuring siad
|
|
||||||
|
|
||||||
At this point we have almost everything running, we just need to set up your wallet and allowance:
|
|
||||||
|
|
||||||
1. Create a new wallet (remember to save the seed)
|
|
||||||
> `docker exec -it sia siac wallet init`
|
|
||||||
1. Unlock the wallet (use the seed as password)
|
|
||||||
> `docker exec -it sia siac wallet unlock`
|
|
||||||
1. Generate a new wallet address (save it for later to transfer the funds)
|
|
||||||
> `docker exec -it sia siac wallet address`
|
|
||||||
1. Set up allowance
|
|
||||||
> `docker exec -it sia siac renter setallowance`
|
|
||||||
1. 10 KS (keep 25 KS in your wallet)
|
|
||||||
1. default period
|
|
||||||
1. default number of hosts
|
|
||||||
1. 4 week renewal time
|
|
||||||
1. 500 GB expected storage
|
|
||||||
1. 500 GB expected upload
|
|
||||||
1. 5 TB expected download
|
|
||||||
1. default redundancy
|
|
||||||
1. Set a maximum storage price
|
|
||||||
> `docker exec -it sia siac renter setallowance --max-storage-price 100SC`
|
|
||||||
1. Instruct siad to start making 10 contracts per block with many hosts to potentially view the whole network's files
|
|
||||||
> `docker exec -it sia siac renter setallowance --payment-contract-initial-funding 10SC`
|
|
||||||
|
|
||||||
### Step 4: configuring docker services
|
|
||||||
|
|
||||||
1. edit `/home/user/skynet-webportal/.env` and configure following environment variables
|
|
||||||
|
|
||||||
- `PORTAL_DOMAIN` (required) is a skynet portal domain (ex. siasky.net)
|
|
||||||
- `SERVER_DOMAIN` (optional) is an optional direct server domain (ex. eu-ger-1.siasky.net) - leave blank unless it is different than PORTAL_DOMAIN
|
|
||||||
- `EMAIL_ADDRESS` is your email address used for communication regarding SSL certification (required if you're using http-01 challenge)
|
|
||||||
- `SIA_WALLET_PASSWORD` is your wallet password (or seed if you did not set a password)
|
|
||||||
- `HSD_API_KEY` this is a random security key for a handshake integration that gets generated automatically
|
|
||||||
- `CLOUDFLARE_AUTH_TOKEN` (optional) if using cloudflare as dns loadbalancer (need to change it in Caddyfile too)
|
|
||||||
- `AWS_ACCESS_KEY_ID` (optional) if using route53 as a dns loadbalancer
|
|
||||||
- `AWS_SECRET_ACCESS_KEY` (optional) if using route53 as a dns loadbalancer
|
|
||||||
- `DISCORD_WEBHOOK_URL` (required if using Discord notifications) discord webhook url (generate from discord app)
|
|
||||||
- `DISCORD_MENTION_USER_ID` (optional) add `/cc @user` mention to important messages from webhook (has to be id not user name)
|
|
||||||
- `DISCORD_MENTION_ROLE_ID` (optional) add `/cc @role` mention to important messages from webhook (has to be id not role name)
|
|
||||||
- `SKYNET_DB_USER` (optional) if using `accounts` this is the MongoDB username
|
|
||||||
- `SKYNET_DB_PASS` (optional) if using `accounts` this is the MongoDB password
|
|
||||||
- `SKYNET_DB_HOST` (optional) if using `accounts` this is the MongoDB address or container name
|
|
||||||
- `SKYNET_DB_PORT` (optional) if using `accounts` this is the MongoDB port
|
|
||||||
- `COOKIE_DOMAIN` (optional) if using `accounts` this is the domain to which your cookies will be issued
|
|
||||||
- `COOKIE_HASH_KEY` (optional) if using `accounts` hashing secret, at least 32 bytes
|
|
||||||
- `COOKIE_ENC_KEY` (optional) if using `accounts` encryption key, at least 32 bytes
|
|
||||||
- `S3_BACKUP_PATH` (optional) is using `accounts` and backing up the databases to S3. This path should be an S3 bucket
|
|
||||||
with path to the location in the bucket where we want to store the daily backups.
|
|
||||||
|
|
||||||
1. `docker-compose up -d` to restart the services so they pick up new env variables
|
|
||||||
|
|
||||||
## Subdomains
|
|
||||||
|
|
||||||
It might prove useful for certain skapps to be accessible through a custom subdomain. So instead of being accessed through `https://portal.com/[skylink]`, it would be accessible through `https://[skylink_base32].portal.com`. We call this "subdomain access" and it is made possible by encoding Skylinks using a base32 encoding. We have to use a base32 encoding scheme because subdomains have to be all lower case and the base64 encoded Skylink is case sensitive and thus might contain uppercase characters.
|
|
||||||
|
|
||||||
You can convert Skylinks using this [converter skapp](https://convert-skylink.hns.siasky.net). To see how the encoding and decoding works, please follow the link to the repo in the application itself.
|
|
||||||
|
|
||||||
There is also an option to access handshake domain through the subdomain using `https://[domain_name].hns.portal.com`.
|
|
||||||
|
|
||||||
To configure this on your portal, you have to make sure to configure the following:
|
|
||||||
|
|
||||||
## Useful Commands
|
|
||||||
|
|
||||||
- Starting the whole stack
|
|
||||||
> `docker-compose up -d`
|
|
||||||
- Stopping the whole stack
|
|
||||||
> `docker-compose down`
|
|
||||||
- Accessing siac
|
|
||||||
> `docker exec -it sia siac`
|
|
||||||
- Portal maintenance
|
|
||||||
- Pulling portal out for maintenance
|
|
||||||
> `scripts/portal-down.sh`
|
|
||||||
- Putting portal back into place after maintenance
|
|
||||||
> `scripts/portal-up.sh`
|
|
||||||
- Upgrading portal containers (takes care of pulling it and putting it back)
|
|
||||||
> `scripts/portal-upgrade.sh`
|
|
||||||
- Restarting caddy gracefully after making changes to Caddyfile (no downtime)
|
|
||||||
> `docker exec caddy caddy reload --config /etc/caddy/Caddyfile`
|
|
||||||
- Restarting nginx gracefully after making changes to nginx configs (no downtime)
|
|
||||||
> `docker exec nginx openresty -s reload`
|
|
||||||
- Checking siad service logs (since last hour)
|
|
||||||
> `docker logs --since 1h $(docker ps -q --filter "name=^sia$")`
|
|
||||||
- Checking caddy logs (for example in case ssl certificate fails)
|
|
||||||
> `docker logs caddy -f`
|
|
||||||
- Checking nginx logs (nginx handles all communication to siad instances)
|
|
||||||
> `tail -n 50 docker/data/nginx/logs/access.log` to follow last 50 lines of access log
|
|
||||||
> `tail -n 50 docker/data/nginx/logs/error.log` to follow last 50 lines of error log
|
|
|
@ -1,161 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
from bot_utils import get_api_password, setup, send_msg
|
|
||||||
from random import randint
|
|
||||||
from time import sleep
|
|
||||||
|
|
||||||
import traceback
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import asyncio
|
|
||||||
import requests
|
|
||||||
import json
|
|
||||||
|
|
||||||
from requests.auth import HTTPBasicAuth
|
|
||||||
|
|
||||||
setup()
|
|
||||||
|
|
||||||
|
|
||||||
AIRTABLE_API_KEY = os.getenv("AIRTABLE_API_KEY")
|
|
||||||
AIRTABLE_BASE = os.getenv("AIRTABLE_BASE")
|
|
||||||
AIRTABLE_TABLE = os.getenv("AIRTABLE_TABLE")
|
|
||||||
AIRTABLE_FIELD = os.getenv("AIRTABLE_FIELD")
|
|
||||||
|
|
||||||
# Check environment variables are defined
|
|
||||||
for value in [AIRTABLE_API_KEY, AIRTABLE_BASE, AIRTABLE_TABLE, AIRTABLE_FIELD]:
|
|
||||||
if not value:
|
|
||||||
sys.exit("Configuration error: Missing AirTable environment variable.")
|
|
||||||
|
|
||||||
|
|
||||||
async def run_checks():
|
|
||||||
try:
|
|
||||||
await block_skylinks_from_airtable()
|
|
||||||
except: # catch all exceptions
|
|
||||||
trace = traceback.format_exc()
|
|
||||||
await send_msg("```\n{}\n```".format(trace), force_notify=True)
|
|
||||||
|
|
||||||
|
|
||||||
def exec(command):
|
|
||||||
return os.popen(command).read().strip()
|
|
||||||
|
|
||||||
|
|
||||||
async def block_skylinks_from_airtable():
|
|
||||||
# Get sia IP before doing anything else. If this step fails we don't
|
|
||||||
# need to continue with the execution of the script.
|
|
||||||
ipaddress = exec(
|
|
||||||
"docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' sia"
|
|
||||||
)
|
|
||||||
|
|
||||||
if ipaddress == "":
|
|
||||||
print("Skyd IP could not be detected. Exiting.")
|
|
||||||
return
|
|
||||||
|
|
||||||
print("Pulling blocked skylinks from Airtable via api integration")
|
|
||||||
headers = {"Authorization": "Bearer " + AIRTABLE_API_KEY}
|
|
||||||
skylinks = []
|
|
||||||
offset = None
|
|
||||||
retry = 0
|
|
||||||
while len(skylinks) == 0 or offset:
|
|
||||||
print(
|
|
||||||
"Requesting a batch of records from Airtable with "
|
|
||||||
+ (offset if offset else "empty")
|
|
||||||
+ " offset"
|
|
||||||
+ (" (retry " + str(retry) + ")" if retry else "")
|
|
||||||
)
|
|
||||||
query = "&".join(
|
|
||||||
["fields%5B%5D=" + AIRTABLE_FIELD, ("offset=" + offset) if offset else ""]
|
|
||||||
)
|
|
||||||
response = requests.get(
|
|
||||||
"https://api.airtable.com/v0/"
|
|
||||||
+ AIRTABLE_BASE
|
|
||||||
+ "/"
|
|
||||||
+ AIRTABLE_TABLE
|
|
||||||
+ "?"
|
|
||||||
+ query,
|
|
||||||
headers=headers,
|
|
||||||
)
|
|
||||||
|
|
||||||
# rate limited - sleep for 2-10 secs and retry (up to 100 times, ~10 minutes)
|
|
||||||
# https://support.airtable.com/hc/en-us/articles/203313985-Public-REST-API
|
|
||||||
# > 5 requests per second, per base
|
|
||||||
if response.status_code == 429:
|
|
||||||
if retry < 100:
|
|
||||||
retry = retry + 1
|
|
||||||
sleep(randint(1, 10))
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
return await send_msg(
|
|
||||||
"Airtable: too many retries, aborting!", force_notify=True
|
|
||||||
)
|
|
||||||
retry = 0 # reset retry counter
|
|
||||||
|
|
||||||
if response.status_code != 200:
|
|
||||||
status_code = str(response.status_code)
|
|
||||||
response_text = response.text or "empty response"
|
|
||||||
message = (
|
|
||||||
"Airtable blocklist integration responded with code "
|
|
||||||
+ status_code
|
|
||||||
+ ": "
|
|
||||||
+ response_text
|
|
||||||
)
|
|
||||||
return await send_msg(message, force_notify=False)
|
|
||||||
|
|
||||||
data = response.json()
|
|
||||||
|
|
||||||
if len(data["records"]) == 0:
|
|
||||||
return print(
|
|
||||||
"Airtable returned 0 records - make sure your configuration is correct"
|
|
||||||
)
|
|
||||||
|
|
||||||
skylinks = skylinks + [
|
|
||||||
entry["fields"].get(AIRTABLE_FIELD, "") for entry in data["records"]
|
|
||||||
]
|
|
||||||
skylinks = [
|
|
||||||
skylink.strip() for skylink in skylinks if skylink
|
|
||||||
] # filter empty skylinks, most likely empty rows, trim whitespace
|
|
||||||
|
|
||||||
offset = data.get("offset")
|
|
||||||
|
|
||||||
print(
|
|
||||||
"Sending /skynet/blocklist request with "
|
|
||||||
+ str(len(skylinks))
|
|
||||||
+ " skylinks to siad"
|
|
||||||
)
|
|
||||||
response = requests.post(
|
|
||||||
"http://" + ipaddress + ":9980/skynet/blocklist",
|
|
||||||
data=json.dumps({"add": skylinks}),
|
|
||||||
headers={"User-Agent": "Sia-Agent"},
|
|
||||||
auth=HTTPBasicAuth("", get_api_password()),
|
|
||||||
)
|
|
||||||
|
|
||||||
if response.status_code != 200:
|
|
||||||
status_code = str(response.status_code)
|
|
||||||
response_text = response.text or "empty response"
|
|
||||||
message = (
|
|
||||||
"Airtable blocklist request responded with code "
|
|
||||||
+ status_code
|
|
||||||
+ ": "
|
|
||||||
+ response_text
|
|
||||||
)
|
|
||||||
return await send_msg(message, force_notify=False)
|
|
||||||
|
|
||||||
response_json = json.loads(response.text)
|
|
||||||
invalid_skylinks = response_json["invalids"]
|
|
||||||
|
|
||||||
if invalid_skylinks is None:
|
|
||||||
return await send_msg("Blocklist successfully updated all skylinks")
|
|
||||||
return await send_msg(
|
|
||||||
"Blocklist responded ok but failed to update "
|
|
||||||
+ str(len(invalid_skylinks))
|
|
||||||
+ " skylinks: "
|
|
||||||
+ json.dumps(invalid_skylinks)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
loop.run_until_complete(run_checks())
|
|
||||||
|
|
||||||
# --- BASH EQUIVALENT
|
|
||||||
# skylinks=$(curl "https://api.airtable.com/v0/${AIRTABLE_BASE}/${AIRTABLE_TABLE}?fields%5B%5D=${AIRTABLE_FIELD}" -H "Authorization: Bearer ${AIRTABLE_KEY}" | python3 -c "import sys, json; print('[\"' + '\",\"'.join([entry['fields']['Link'] for entry in json.load(sys.stdin)['records']]) + '\"]')")
|
|
||||||
# ipaddress=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' sia)
|
|
||||||
# curl --data "{\"add\" : ${skylinks}}" "${ipaddress}:8000/skynet/blocklist"
|
|
|
@ -232,8 +232,7 @@ async def check_health():
|
||||||
message += "{}/{} CRITICAL checks failed over the last {} hours! ".format(
|
message += "{}/{} CRITICAL checks failed over the last {} hours! ".format(
|
||||||
critical_checks_failed, critical_checks_total, CHECK_HOURS
|
critical_checks_failed, critical_checks_total, CHECK_HOURS
|
||||||
)
|
)
|
||||||
# Disabling as it creates notification fatigue.
|
force_notify = True
|
||||||
# force_notify = True
|
|
||||||
else:
|
else:
|
||||||
message += "All {} critical checks passed. ".format(critical_checks_total)
|
message += "All {} critical checks passed. ".format(critical_checks_total)
|
||||||
|
|
||||||
|
@ -241,8 +240,7 @@ async def check_health():
|
||||||
message += "{}/{} extended checks failed over the last {} hours! ".format(
|
message += "{}/{} extended checks failed over the last {} hours! ".format(
|
||||||
extended_checks_failed, extended_checks_total, CHECK_HOURS
|
extended_checks_failed, extended_checks_total, CHECK_HOURS
|
||||||
)
|
)
|
||||||
# Disabling as it creates notification fatigue.
|
force_notify = True
|
||||||
# force_notify = True
|
|
||||||
else:
|
else:
|
||||||
message += "All {} extended checks passed. ".format(extended_checks_total)
|
message += "All {} extended checks passed. ".format(extended_checks_total)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,16 @@
|
||||||
|
/home/user/skynet-webportal/docker/data/nginx/logs/*.log {
|
||||||
|
daily
|
||||||
|
rotate 3650
|
||||||
|
minsize 500M
|
||||||
|
create 644 root root
|
||||||
|
notifempty
|
||||||
|
dateext
|
||||||
|
missingok
|
||||||
|
compress
|
||||||
|
compressoptions --best
|
||||||
|
delaycompress
|
||||||
|
sharedscripts
|
||||||
|
postrotate
|
||||||
|
docker exec nginx nginx -s reopen
|
||||||
|
endscript
|
||||||
|
}
|
|
@ -0,0 +1,11 @@
|
||||||
|
/home/user/skynet-webportal/docker/data/pinner/*.log {
|
||||||
|
daily
|
||||||
|
rotate 10
|
||||||
|
minsize 100M
|
||||||
|
copytruncate
|
||||||
|
notifempty
|
||||||
|
dateext
|
||||||
|
missingok
|
||||||
|
compress
|
||||||
|
compressoptions --best
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
/home/user/skynet-webportal/docker/data/sia/*.log
|
||||||
|
/home/user/skynet-webportal/docker/data/sia/*/*.log {
|
||||||
|
daily
|
||||||
|
rotate 10
|
||||||
|
minsize 100M
|
||||||
|
copytruncate
|
||||||
|
notifempty
|
||||||
|
dateext
|
||||||
|
missingok
|
||||||
|
compress
|
||||||
|
compressoptions --best
|
||||||
|
}
|
|
@ -1,48 +0,0 @@
|
||||||
#! /usr/bin/env bash
|
|
||||||
|
|
||||||
set -e # exit on first error
|
|
||||||
|
|
||||||
# Install docker (cleans up old docker installation)
|
|
||||||
# sudo apt-get remove -y docker docker-engine docker.io containerd runc # fails if it is the first installation
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common
|
|
||||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
|
||||||
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable"
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y docker-ce docker-ce-cli containerd.io
|
|
||||||
docker --version # sanity check
|
|
||||||
|
|
||||||
# add user to docker group to avoid having to use sudo for every docker command
|
|
||||||
sudo usermod -aG docker user
|
|
||||||
|
|
||||||
# Install docker-compose
|
|
||||||
sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
|
||||||
sudo chmod +x /usr/local/bin/docker-compose
|
|
||||||
docker-compose --version # sanity check
|
|
||||||
|
|
||||||
# Create dummy .env file for docker-compose usage with variables
|
|
||||||
# * PORTAL_DOMAIN - (required) is a skynet portal domain (ex. siasky.net)
|
|
||||||
# * SERVER_DOMAIN - (optional) is an optional direct server domain (ex. eu-ger-1.siasky.net) - leave blank unless it is different than PORTAL_DOMAIN
|
|
||||||
# * EMAIL_ADDRESS - this is the administrator contact email you need to supply for communication regarding SSL certification
|
|
||||||
# * HSD_API_KEY - this is auto generated secure key for your handshake service integration
|
|
||||||
# * CLOUDFLARE_AUTH_TOKEN - (optional) if using cloudflare as dns loadbalancer (need to change it in Caddyfile too)
|
|
||||||
# * AWS_ACCESS_KEY_ID - (optional) if using route53 as a dns loadbalancer
|
|
||||||
# * AWS_SECRET_ACCESS_KEY - (optional) if using route53 as a dns loadbalancer
|
|
||||||
# * API_PORT - (optional) the port on which siad is listening, defaults to 9980
|
|
||||||
# * DISCORD_WEBHOOK_URL - (required if using Discord notifications) discord webhook url (generate from discord app)
|
|
||||||
# * DISCORD_MENTION_USER_ID - (optional) add `/cc @user` mention to important messages from webhook (has to be id not user name)
|
|
||||||
# * DISCORD_MENTION_ROLE_ID - (optional) add `/cc @role` mention to important messages from webhook (has to be id not role name)
|
|
||||||
# * SKYNET_DB_USER - (optional) if using `accounts` this is the MongoDB username
|
|
||||||
# * SKYNET_DB_PASS - (optional) if using `accounts` this is the MongoDB password
|
|
||||||
# * SKYNET_DB_HOST - (optional) if using `accounts` this is the MongoDB address or container name
|
|
||||||
# * SKYNET_DB_PORT - (optional) if using `accounts` this is the MongoDB port
|
|
||||||
# * COOKIE_DOMAIN - (optional) if using `accounts` this is the domain to which your cookies will be issued
|
|
||||||
# * COOKIE_HASH_KEY - (optional) if using `accounts` hashing secret, at least 32 bytes
|
|
||||||
# * COOKIE_ENC_KEY - (optional) if using `accounts` encryption key, at least 32 bytes
|
|
||||||
if ! [ -f /home/user/skynet-webportal/.env ]; then
|
|
||||||
HSD_API_KEY=$(openssl rand -base64 32) # generate safe random key for handshake
|
|
||||||
printf "PORTAL_DOMAIN=siasky.net\nSERVER_DOMAIN=\nEMAIL_ADDRESS=email@example.com\nSIA_WALLET_PASSWORD=\nHSD_API_KEY=${HSD_API_KEY}\nCLOUDFLARE_AUTH_TOKEN=\nAWS_ACCESS_KEY_ID=\nAWS_SECRET_ACCESS_KEY=\nDISCORD_WEBHOOK_URL=\nDISCORD_MENTION_USER_ID=\nDISCORD_MENTION_ROLE_ID=\n" > /home/user/skynet-webportal/.env
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Start docker container with nginx and client
|
|
||||||
docker-compose -f docker-compose.yml up --build -d
|
|
|
@ -1,11 +0,0 @@
|
||||||
#! /usr/bin/env bash
|
|
||||||
|
|
||||||
set -e # exit on first error
|
|
||||||
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get -y install python3-pip
|
|
||||||
|
|
||||||
pip3 install discord-webhook python-dotenv requests elasticsearch-curator
|
|
||||||
|
|
||||||
# add cron entries to user crontab
|
|
||||||
crontab -u user /home/user/skynet-webportal/setup-scripts/support/crontab
|
|
|
@ -1,48 +0,0 @@
|
||||||
#! /usr/bin/env bash
|
|
||||||
|
|
||||||
set -e # exit on first error
|
|
||||||
|
|
||||||
# Copy over basic configuration files
|
|
||||||
cp /home/user/skynet-webportal/setup-scripts/support/tmux.conf /home/user/.tmux.conf
|
|
||||||
cp /home/user/skynet-webportal/setup-scripts/support/bashrc /home/user/.bashrc
|
|
||||||
source /home/user/.bashrc
|
|
||||||
|
|
||||||
# Add SSH keys and set SSH configs
|
|
||||||
sudo cp /home/user/skynet-webportal/setup-scripts/support/ssh_config /etc/ssh/ssh_config
|
|
||||||
mkdir -p /home/user/.ssh
|
|
||||||
# cat /home/user/skynet-webportal/setup-scripts/support/authorized_keys >> /home/user/.ssh/authorized_keys
|
|
||||||
|
|
||||||
# Install apt packages
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get -y install ufw tmux ranger htop nload gcc g++ make git vim unzip curl awscli
|
|
||||||
|
|
||||||
# Setup GIT credentials (so commands like git stash would work)
|
|
||||||
git config --global user.email "devs@nebulous.tech"
|
|
||||||
git config --global user.name "Sia Dev"
|
|
||||||
|
|
||||||
# Setup firewall
|
|
||||||
sudo ufw --force enable # --force to make it non-interactive
|
|
||||||
sudo ufw logging low # enable logging for debugging purpose: tail -f /var/log/ufw.log
|
|
||||||
sudo ufw allow ssh # allow ssh connection to server
|
|
||||||
sudo ufw allow 80,443/tcp # allow http and https ports
|
|
||||||
|
|
||||||
# Block outgoing traffic to local networks
|
|
||||||
# https://community.hetzner.com/tutorials/block-outgoing-traffic-to-private-networks
|
|
||||||
sudo ufw deny out from any to 10.0.0.0/8
|
|
||||||
sudo ufw deny out from any to 172.16.0.0/12
|
|
||||||
sudo ufw deny out from any to 192.168.0.0/16
|
|
||||||
sudo ufw deny out from any to 100.64.0.0/10
|
|
||||||
sudo ufw deny out from any to 198.18.0.0/15
|
|
||||||
sudo ufw deny out from any to 169.254.0.0/16
|
|
||||||
|
|
||||||
# OPTIONAL: terminfo for alacritty terminal via ssh
|
|
||||||
# If you don't use the alacritty terminal you can remove this step.
|
|
||||||
wget -c https://raw.githubusercontent.com/alacritty/alacritty/master/extra/alacritty.info
|
|
||||||
sudo tic -xe alacritty,alacritty-direct alacritty.info
|
|
||||||
rm alacritty.info
|
|
||||||
|
|
||||||
# Set up file limits - siad uses a lot so we need to adjust so it doesn't choke up
|
|
||||||
sudo cp /home/user/skynet-webportal/setup-scripts/support/limits.conf /etc/security/limits.conf
|
|
||||||
|
|
||||||
# Set UTC timezone so all of the servers report the same time
|
|
||||||
sudo timedatectl set-timezone UTC
|
|
|
@ -6,6 +6,5 @@ ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIG67M3zC4eDJEjma0iKKksGclteKbB86ONQtBaWY93M6
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIF+XC8f0dumhzDE93i9IIMsMp7/MJPwGH+Uc9JFKOvyw karol@siasky.net
|
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIF+XC8f0dumhzDE93i9IIMsMp7/MJPwGH+Uc9JFKOvyw karol@siasky.net
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPM43lzbKjFLChe5rKETxDpWpNlqXCGTBPiWlDN2vlLD pj@siasky.net
|
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPM43lzbKjFLChe5rKETxDpWpNlqXCGTBPiWlDN2vlLD pj@siasky.net
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN6Kcx8yetova4/ALUQHigo/PBMJO33ZTKOsg2jxSO2a user@deploy.siasky.dev
|
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN6Kcx8yetova4/ALUQHigo/PBMJO33ZTKOsg2jxSO2a user@deploy.siasky.dev
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDcenWnMQ6q/OEC4ZmQgjLDV2obWlR3fENV0zRGFvJF+ marcins@siasky.net
|
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB7prtVOTwtcSN9HkXum107RwcW5H8Vggx6Qv7T57ItT daniel@siasky.net
|
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB7prtVOTwtcSN9HkXum107RwcW5H8Vggx6Qv7T57ItT daniel@siasky.net
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH+4IrfEM9H16jqvPZncHkWWoHO4/BVq7d4pEyzK4e0W michal.leszczyk@skynetlabs.com
|
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHptEpqs57lhnHkfa+0SQgXQ4A63/YGV2cNTcGMQW+Jt david@skynetlabs.com
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
0 0,8,16 * * * /home/user/skynet-webportal/setup-scripts/funds-checker.py /home/user/skynet-webportal/.env
|
0 0,8,16 * * * /home/user/skynet-webportal/setup-scripts/funds-checker.py /home/user/skynet-webportal/.env
|
||||||
0 0,8,16 * * * /home/user/skynet-webportal/setup-scripts/log-checker.py /home/user/skynet-webportal/.env sia 8
|
0 0,8,16 * * * /home/user/skynet-webportal/setup-scripts/log-checker.py /home/user/skynet-webportal/.env sia 8
|
||||||
0 * * * * /home/user/skynet-webportal/setup-scripts/health-checker.py /home/user/skynet-webportal/.env sia 1
|
0 * * * * /home/user/skynet-webportal/setup-scripts/health-checker.py /home/user/skynet-webportal/.env sia 1
|
||||||
30 */4 * * * /home/user/skynet-webportal/setup-scripts/blocklist-airtable.py /home/user/skynet-webportal/.env
|
|
||||||
44 5 * * * /home/user/skynet-webportal/scripts/backup-aws-s3.sh 1>>/home/user/skynet-webportal/logs/backup-aws-s3.log 2>>/home/user/skynet-webportal/logs/backup-aws-s3.log
|
44 5 * * * /home/user/skynet-webportal/scripts/backup-aws-s3.sh 1>>/home/user/skynet-webportal/logs/backup-aws-s3.log 2>>/home/user/skynet-webportal/logs/backup-aws-s3.log
|
||||||
6 13 * * * /home/user/skynet-webportal/scripts/db_backup.sh 1>>/home/user/skynet-webportal/logs/db_backup.log 2>>/home/user/skynet-webportal/logs/db_backup.log
|
6 13 * * * /home/user/skynet-webportal/scripts/db_backup.sh 1>>/home/user/skynet-webportal/logs/db_backup.log 2>>/home/user/skynet-webportal/logs/db_backup.log
|
||||||
0 5 * * * /home/user/skynet-webportal/scripts/es_cleaner.py 1 http://localhost:9200
|
0 5 * * * /usr/bin/docker run --rm --net=host -e ROLLOVER=true jaegertracing/jaeger-es-index-cleaner:latest 1 http://localhost:9200
|
||||||
|
|
Reference in New Issue