Merge branch 'master' into add-shell-scripts-lint
This commit is contained in:
commit
5e5241d45f
|
@ -0,0 +1 @@
|
|||
* @kwypchlo @meeh0w
|
|
@ -1,71 +0,0 @@
|
|||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ main ]
|
||||
schedule:
|
||||
- cron: '32 21 * * 0'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'javascript', 'python' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||
# Learn more:
|
||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
|
@ -1,21 +0,0 @@
|
|||
name: Dockerfile Lint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
hadolint:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
dockerfile:
|
||||
- docker/sia/Dockerfile
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: hadolint/hadolint-action@v2.0.0
|
||||
with:
|
||||
dockerfile: ${{ matrix.dockerfile }}
|
31
dc
31
dc
|
@ -5,56 +5,59 @@
|
|||
# would use docker-compose with the only difference being that you don't need to specify compose files. For more
|
||||
# information you can run `./dc` or `./dc help`.
|
||||
|
||||
if [ -f .env ]; then
|
||||
OLD_IFS=$IFS
|
||||
IFS=$'\n'
|
||||
for x in $(grep -v '^#.*' .env); do export $x; done
|
||||
IFS=$OLD_IFS
|
||||
# get current working directory of this script and prefix all files with it to
|
||||
# be able to call this script from anywhere and not only root directory of
|
||||
# skynet-webportal project
|
||||
cwd="$(dirname -- "$0";)";
|
||||
|
||||
# get portal modules configuration from .env file (if defined more than once, the last one is used)
|
||||
if [[ -f "${cwd}/.env" ]]; then
|
||||
PORTAL_MODULES=$(grep -e "^PORTAL_MODULES=" ${cwd}/.env | tail -1 | sed "s/PORTAL_MODULES=//")
|
||||
fi
|
||||
|
||||
# include base docker compose file
|
||||
COMPOSE_FILES="-f docker-compose.yml"
|
||||
COMPOSE_FILES="-f ${cwd}/docker-compose.yml"
|
||||
|
||||
for i in $(seq 1 ${#PORTAL_MODULES}); do
|
||||
# accounts module - alias "a"
|
||||
if [[ ${PORTAL_MODULES:i-1:1} == "a" ]]; then
|
||||
COMPOSE_FILES+=" -f docker-compose.mongodb.yml -f docker-compose.accounts.yml"
|
||||
COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.accounts.yml"
|
||||
fi
|
||||
|
||||
# blocker module - alias "b"
|
||||
if [[ ${PORTAL_MODULES:i-1:1} == "b" ]]; then
|
||||
COMPOSE_FILES+=" -f docker-compose.mongodb.yml -f docker-compose.blocker.yml"
|
||||
COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.blocker.yml"
|
||||
fi
|
||||
|
||||
# jaeger module - alias "j"
|
||||
if [[ ${PORTAL_MODULES:i-1:1} == "j" ]]; then
|
||||
COMPOSE_FILES+=" -f docker-compose.jaeger.yml"
|
||||
COMPOSE_FILES+=" -f ${cwd}/docker-compose.jaeger.yml"
|
||||
fi
|
||||
|
||||
# malware-scanner module - alias "s"
|
||||
if [[ ${PORTAL_MODULES:i-1:1} == "s" ]]; then
|
||||
COMPOSE_FILES+=" -f docker-compose.blocker.yml -f docker-compose.mongodb.yml -f docker-compose.malware-scanner.yml"
|
||||
COMPOSE_FILES+=" -f ${cwd}/docker-compose.blocker.yml -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.malware-scanner.yml"
|
||||
fi
|
||||
|
||||
# mongodb module - alias "m"
|
||||
if [[ ${PORTAL_MODULES:i-1:1} == "m" ]]; then
|
||||
COMPOSE_FILES+=" -f docker-compose.mongodb.yml"
|
||||
COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml"
|
||||
fi
|
||||
|
||||
# abuse-scanner module - alias "u"
|
||||
if [[ ${PORTAL_MODULES:i-1:1} == "u" ]]; then
|
||||
COMPOSE_FILES+=" -f docker-compose.mongodb.yml -f docker-compose.blocker.yml -f docker-compose.abuse-scanner.yml"
|
||||
COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.blocker.yml -f ${cwd}/docker-compose.abuse-scanner.yml"
|
||||
fi
|
||||
|
||||
# pinner module - alias "p"
|
||||
if [[ ${PORTAL_MODULES:i-1:1} == "p" ]]; then
|
||||
COMPOSE_FILES+=" -f docker-compose.mongodb.yml -f docker-compose.pinner.yml"
|
||||
COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.pinner.yml"
|
||||
fi
|
||||
done
|
||||
|
||||
# override file if exists
|
||||
if [[ -f docker-compose.override.yml ]]; then
|
||||
COMPOSE_FILES+=" -f docker-compose.override.yml"
|
||||
COMPOSE_FILES+=" -f ${cwd}/docker-compose.override.yml"
|
||||
fi
|
||||
|
||||
docker-compose $COMPOSE_FILES $@
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
version: "3.7"
|
||||
version: "3.8"
|
||||
|
||||
x-logging: &default-logging
|
||||
driver: json-file
|
||||
|
@ -10,7 +10,7 @@ services:
|
|||
abuse-scanner:
|
||||
# uncomment "build" and comment out "image" to build from sources
|
||||
# build: https://github.com/SkynetLabs/abuse-scanner.git#main
|
||||
image: skynetlabs/abuse-scanner:0.1.1
|
||||
image: skynetlabs/abuse-scanner:0.4.0
|
||||
container_name: abuse-scanner
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
|
@ -36,3 +36,6 @@ services:
|
|||
depends_on:
|
||||
- mongo
|
||||
- blocker
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /tmp:/tmp
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
version: "3.7"
|
||||
version: "3.8"
|
||||
|
||||
x-logging: &default-logging
|
||||
driver: json-file
|
||||
|
@ -22,7 +22,7 @@ services:
|
|||
accounts:
|
||||
# uncomment "build" and comment out "image" to build from sources
|
||||
# build: https://github.com/SkynetLabs/skynet-accounts.git#main
|
||||
image: skynetlabs/skynet-accounts:1.2.0
|
||||
image: skynetlabs/skynet-accounts:1.3.0
|
||||
container_name: accounts
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
|
@ -59,15 +59,12 @@ services:
|
|||
# build:
|
||||
# context: https://github.com/SkynetLabs/webportal-accounts-dashboard.git#main
|
||||
# dockerfile: Dockerfile
|
||||
image: skynetlabs/webportal-accounts-dashboard:1.1.1
|
||||
image: skynetlabs/webportal-accounts-dashboard:2.1.0
|
||||
container_name: dashboard
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
- GATSBY_PORTAL_DOMAIN=${PORTAL_DOMAIN}
|
||||
- GATSBY_STRIPE_PUBLISHABLE_KEY=${STRIPE_PUBLISHABLE_KEY}
|
||||
volumes:
|
||||
- ./docker/data/dashboard/.cache:/usr/app/.cache
|
||||
- ./docker/data/dashboard/public:/usr/app/public
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
version: "3.7"
|
||||
version: "3.8"
|
||||
|
||||
x-logging: &default-logging
|
||||
driver: json-file
|
||||
|
@ -15,7 +15,7 @@ services:
|
|||
blocker:
|
||||
# uncomment "build" and comment out "image" to build from sources
|
||||
# build: https://github.com/SkynetLabs/blocker.git#main
|
||||
image: skynetlabs/blocker:0.1.1
|
||||
image: skynetlabs/blocker:0.1.2
|
||||
container_name: blocker
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
version: "3.7"
|
||||
version: "3.8"
|
||||
|
||||
x-logging: &default-logging
|
||||
driver: json-file
|
||||
|
@ -21,7 +21,7 @@ services:
|
|||
- JAEGER_REPORTER_LOG_SPANS=false
|
||||
|
||||
jaeger-agent:
|
||||
image: jaegertracing/jaeger-agent:1.32.0
|
||||
image: jaegertracing/jaeger-agent:1.37.0
|
||||
command:
|
||||
[
|
||||
"--reporter.grpc.host-port=jaeger-collector:14250",
|
||||
|
@ -43,7 +43,7 @@ services:
|
|||
- jaeger-collector
|
||||
|
||||
jaeger-collector:
|
||||
image: jaegertracing/jaeger-collector:1.32.0
|
||||
image: jaegertracing/jaeger-collector:1.37.0
|
||||
entrypoint: /wait_to_start.sh
|
||||
container_name: jaeger-collector
|
||||
restart: on-failure
|
||||
|
@ -68,7 +68,7 @@ services:
|
|||
- elasticsearch
|
||||
|
||||
jaeger-query:
|
||||
image: jaegertracing/jaeger-query:1.32.0
|
||||
image: jaegertracing/jaeger-query:1.37.0
|
||||
entrypoint: /wait_to_start.sh
|
||||
container_name: jaeger-query
|
||||
restart: on-failure
|
||||
|
@ -93,7 +93,7 @@ services:
|
|||
- elasticsearch
|
||||
|
||||
elasticsearch:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.6
|
||||
container_name: elasticsearch
|
||||
restart: on-failure
|
||||
logging: *default-logging
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
version: "3.7"
|
||||
version: "3.8"
|
||||
|
||||
x-logging: &default-logging
|
||||
driver: json-file
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
version: "3.7"
|
||||
version: "3.8"
|
||||
|
||||
x-logging: &default-logging
|
||||
driver: json-file
|
||||
|
@ -14,7 +14,7 @@ services:
|
|||
- MONGODB_PASSWORD=${SKYNET_DB_PASS}
|
||||
|
||||
mongo:
|
||||
image: mongo:4.4.1
|
||||
image: mongo:4.4.16
|
||||
command: --keyFile=/data/mgkey --replSet=${SKYNET_DB_REPLICASET:-skynet} --setParameter ShardingTaskExecutorPoolMinSize=10
|
||||
container_name: mongo
|
||||
restart: unless-stopped
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
version: "3.7"
|
||||
version: "3.8"
|
||||
|
||||
x-logging: &default-logging
|
||||
driver: json-file
|
||||
|
@ -10,12 +10,14 @@ services:
|
|||
pinner:
|
||||
# uncomment "build" and comment out "image" to build from sources
|
||||
# build: https://github.com/SkynetLabs/pinner.git#main
|
||||
image: skynetlabs/pinner:0.3.1
|
||||
image: skynetlabs/pinner:0.7.5
|
||||
container_name: pinner
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- ./docker/data/pinner/logs:/logs
|
||||
environment:
|
||||
- PINNER_LOG_LEVEL=${PINNER_LOG_LEVEL:-info}
|
||||
expose:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
version: "3.7"
|
||||
version: "3.8"
|
||||
|
||||
x-logging: &default-logging
|
||||
driver: json-file
|
||||
|
@ -15,17 +15,19 @@ networks:
|
|||
|
||||
services:
|
||||
sia:
|
||||
build:
|
||||
context: ./docker/sia
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
branch: portal-latest
|
||||
# uncomment "build" and comment out "image" to build from sources
|
||||
# build:
|
||||
# context: https://github.com/SkynetLabs/docker-skyd.git#main
|
||||
# dockerfile: scratch/Dockerfile
|
||||
# args:
|
||||
# branch: master
|
||||
image: skynetlabs/skyd:1.6.8
|
||||
command: --disable-api-security --api-addr :9980 --modules gctwra
|
||||
container_name: sia
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 5m
|
||||
logging: *default-logging
|
||||
environment:
|
||||
- SIA_MODULES=gctwra
|
||||
- SKYD_DISK_CACHE_ENABLED=${SKYD_DISK_CACHE_ENABLED:-true}
|
||||
- SKYD_DISK_CACHE_SIZE=${SKYD_DISK_CACHE_SIZE:-53690000000} # 50GB
|
||||
- SKYD_DISK_CACHE_MIN_HITS=${SKYD_DISK_CACHE_MIN_HITS:-3}
|
||||
|
@ -41,7 +43,24 @@ services:
|
|||
- 9980
|
||||
|
||||
certbot:
|
||||
image: certbot/dns-route53:v1.25.0
|
||||
# replace this image with the image supporting your dns provider from
|
||||
# https://hub.docker.com/r/certbot/certbot and adjust CERTBOT_ARGS env variable
|
||||
# note: you will need to authenticate your dns request so consult the plugin docs
|
||||
# configuration https://eff-certbot.readthedocs.io/en/stable/using.html#dns-plugins
|
||||
#
|
||||
# =================================================================================
|
||||
# example docker-compose.yml changes required for Cloudflare dns provider:
|
||||
#
|
||||
# image: certbot/dns-cloudflare
|
||||
# environment:
|
||||
# - CERTBOT_ARGS=--dns-cloudflare --dns-cloudflare-credentials /etc/letsencrypt/cloudflare.ini
|
||||
#
|
||||
# create ./docker/data/certbot/cloudflare.ini file with the following content:
|
||||
# dns_cloudflare_api_token = <api key generated at https://dash.cloudflare.com/profile/api-tokens>
|
||||
#
|
||||
# make sure that the file has 0400 permissions with:
|
||||
# chmod 0400 ./docker/data/certbot/cloudflare.ini
|
||||
image: certbot/dns-route53:v1.30.0
|
||||
entrypoint: sh /entrypoint.sh
|
||||
container_name: certbot
|
||||
restart: unless-stopped
|
||||
|
@ -59,7 +78,7 @@ services:
|
|||
# build:
|
||||
# context: https://github.com/SkynetLabs/webportal-nginx.git#main
|
||||
# dockerfile: Dockerfile
|
||||
image: skynetlabs/webportal-nginx:0.2.0
|
||||
image: skynetlabs/webportal-nginx:0.5.2
|
||||
container_name: nginx
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
|
@ -89,7 +108,7 @@ services:
|
|||
# build:
|
||||
# context: https://github.com/SkynetLabs/webportal-website.git#main
|
||||
# dockerfile: Dockerfile
|
||||
image: skynetlabs/webportal-website:0.1.0
|
||||
image: skynetlabs/webportal-website:0.2.2
|
||||
container_name: website
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
|
@ -105,18 +124,11 @@ services:
|
|||
- 9000
|
||||
|
||||
handshake:
|
||||
image: skynetlabs/hsd:3.0.1
|
||||
command: --chain-migrate=2 --wallet-migrate=1
|
||||
image: handshakeorg/hsd:4.0.2
|
||||
command: --chain-migrate=3 --no-wallet --no-auth --compact-tree-on-init --network=main --http-host=0.0.0.0
|
||||
container_name: handshake
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
environment:
|
||||
- HSD_LOG_CONSOLE=false
|
||||
- HSD_HTTP_HOST=0.0.0.0
|
||||
- HSD_NETWORK=main
|
||||
- HSD_PORT=12037
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- ./docker/data/handshake/.hsd:/root/.hsd
|
||||
networks:
|
||||
|
@ -130,7 +142,7 @@ services:
|
|||
# build:
|
||||
# context: https://github.com/SkynetLabs/webportal-handshake-api.git#main
|
||||
# dockerfile: Dockerfile
|
||||
image: skynetlabs/webportal-handshake-api:0.1.1
|
||||
image: skynetlabs/webportal-handshake-api:0.1.2
|
||||
container_name: handshake-api
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
|
@ -154,7 +166,7 @@ services:
|
|||
# build:
|
||||
# context: https://github.com/SkynetLabs/webportal-dnslink-api.git#main
|
||||
# dockerfile: Dockerfile
|
||||
image: skynetlabs/webportal-dnslink-api:0.1.1
|
||||
image: skynetlabs/webportal-dnslink-api:0.1.2
|
||||
container_name: dnslink-api
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
|
@ -169,7 +181,7 @@ services:
|
|||
# build:
|
||||
# context: https://github.com/SkynetLabs/webportal-health-check.git#main
|
||||
# dockerfile: Dockerfile
|
||||
image: skynetlabs/webportal-health-check:0.1.3
|
||||
image: skynetlabs/webportal-health-check:0.5.0
|
||||
container_name: health-check
|
||||
restart: unless-stopped
|
||||
logging: *default-logging
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
FROM golang:1.16.7 AS sia-builder
|
||||
|
||||
ENV GOOS linux
|
||||
ENV GOARCH amd64
|
||||
|
||||
ARG branch=portal-latest
|
||||
|
||||
RUN git clone https://gitlab.com/SkynetLabs/skyd.git Sia --single-branch --branch ${branch} && \
|
||||
make release --directory Sia
|
||||
|
||||
FROM nebulouslabs/sia:1.5.6
|
||||
|
||||
COPY --from=sia-builder /go/bin/ /usr/bin/
|
||||
|
||||
RUN if [ -f "/usr/bin/skyd" ]; then mv /usr/bin/skyd /usr/bin/siad; fi && \
|
||||
if [ -f "/usr/bin/skyc" ]; then mv /usr/bin/skyc /usr/bin/siac; fi
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"extends": ["config:base", ":prHourlyLimitNone"],
|
||||
"enabledManagers": ["docker-compose"],
|
||||
"packageRules": [
|
||||
{ "groupName": "jaegertracing", "matchPackagePatterns": ["jaegertracing"] }
|
||||
]
|
||||
}
|
|
@ -3,8 +3,6 @@
|
|||
# This script is for manual skylink blocking. It accepts either a single
|
||||
# skylink or a file containing list of skylinks. The script is intented
|
||||
# for manual use and it should be run locally on each skynet webportal server.
|
||||
# The automatic script that is used to continuously sync an Airtable sheet
|
||||
# list with the blocklist on the web portals is /setup-scripts/blocklist-airtable.py
|
||||
|
||||
set -e # exit on first error
|
||||
|
||||
|
|
|
@ -1,164 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import curator
|
||||
import elasticsearch
|
||||
import os
|
||||
import ssl
|
||||
import sys
|
||||
|
||||
TIMEOUT = 120
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 3:
|
||||
print(
|
||||
'USAGE: [INDEX_PREFIX=(default "")] [ARCHIVE=(default false)] ... {} NUM_OF_DAYS http://HOSTNAME[:PORT]'.format(
|
||||
sys.argv[0]
|
||||
)
|
||||
)
|
||||
print(
|
||||
"NUM_OF_DAYS ... delete indices that are older than the given number of days."
|
||||
)
|
||||
print(
|
||||
"HOSTNAME ... specifies which Elasticsearch hosts URL to search and delete indices from."
|
||||
)
|
||||
print(
|
||||
"TIMEOUT ... number of seconds to wait for master node response, default: {}".format(
|
||||
TIMEOUT
|
||||
)
|
||||
)
|
||||
print("INDEX_PREFIX ... specifies index prefix.")
|
||||
print("INDEX_DATE_SEPARATOR ... specifies index date separator.")
|
||||
print(
|
||||
"ARCHIVE ... specifies whether to remove archive indices (only works for rollover) (default false)."
|
||||
)
|
||||
print(
|
||||
"ROLLOVER ... specifies whether to remove indices created by rollover (default false)."
|
||||
)
|
||||
print("ES_USERNAME ... The username required by Elasticsearch.")
|
||||
print("ES_PASSWORD ... The password required by Elasticsearch.")
|
||||
print("ES_TLS ... enable TLS (default false).")
|
||||
print("ES_TLS_CA ... Path to TLS CA file.")
|
||||
print("ES_TLS_CERT ... Path to TLS certificate file.")
|
||||
print("ES_TLS_KEY ... Path to TLS key file.")
|
||||
print(
|
||||
"ES_TLS_SKIP_HOST_VERIFY ... (insecure) Skip server's certificate chain and host name verification."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
client = create_client(
|
||||
os.getenv("ES_USERNAME"),
|
||||
os.getenv("ES_PASSWORD"),
|
||||
str2bool(os.getenv("ES_TLS", "false")),
|
||||
os.getenv("ES_TLS_CA"),
|
||||
os.getenv("ES_TLS_CERT"),
|
||||
os.getenv("ES_TLS_KEY"),
|
||||
str2bool(os.getenv("ES_TLS_SKIP_HOST_VERIFY", "false")),
|
||||
)
|
||||
ilo = curator.IndexList(client)
|
||||
empty_list(ilo, "Elasticsearch has no indices")
|
||||
|
||||
prefix = os.getenv("INDEX_PREFIX", "")
|
||||
if prefix != "":
|
||||
prefix += "-"
|
||||
separator = os.getenv("INDEX_DATE_SEPARATOR", "-")
|
||||
|
||||
if str2bool(os.getenv("ARCHIVE", "false")):
|
||||
filter_archive_indices_rollover(ilo, prefix)
|
||||
else:
|
||||
if str2bool(os.getenv("ROLLOVER", "false")):
|
||||
filter_main_indices_rollover(ilo, prefix)
|
||||
else:
|
||||
filter_main_indices(ilo, prefix, separator)
|
||||
|
||||
empty_list(ilo, "No indices to delete")
|
||||
|
||||
for index in ilo.working_list():
|
||||
print("Removing", index)
|
||||
timeout = int(os.getenv("TIMEOUT", TIMEOUT))
|
||||
delete_indices = curator.DeleteIndices(ilo, master_timeout=timeout)
|
||||
delete_indices.do_action()
|
||||
|
||||
|
||||
def filter_main_indices(ilo, prefix, separator):
|
||||
date_regex = "\d{4}" + separator + "\d{2}" + separator + "\d{2}"
|
||||
time_string = "%Y" + separator + "%m" + separator + "%d"
|
||||
|
||||
ilo.filter_by_regex(
|
||||
kind="regex", value=prefix + "jaeger-(span|service|dependencies)-" + date_regex
|
||||
)
|
||||
empty_list(ilo, "No indices to delete")
|
||||
# This excludes archive index as we use source='name'
|
||||
# source `creation_date` would include archive index
|
||||
ilo.filter_by_age(
|
||||
source="name",
|
||||
direction="older",
|
||||
timestring=time_string,
|
||||
unit="days",
|
||||
unit_count=int(sys.argv[1]),
|
||||
)
|
||||
|
||||
|
||||
def filter_main_indices_rollover(ilo, prefix):
|
||||
ilo.filter_by_regex(kind="regex", value=prefix + "jaeger-(span|service)-\d{6}")
|
||||
empty_list(ilo, "No indices to delete")
|
||||
# do not remove active write indices
|
||||
ilo.filter_by_alias(aliases=[prefix + "jaeger-span-write"], exclude=True)
|
||||
empty_list(ilo, "No indices to delete")
|
||||
ilo.filter_by_alias(aliases=[prefix + "jaeger-service-write"], exclude=True)
|
||||
empty_list(ilo, "No indices to delete")
|
||||
ilo.filter_by_age(
|
||||
source="creation_date",
|
||||
direction="older",
|
||||
unit="days",
|
||||
unit_count=int(sys.argv[1]),
|
||||
)
|
||||
|
||||
|
||||
def filter_archive_indices_rollover(ilo, prefix):
|
||||
# Remove only rollover archive indices
|
||||
# Do not remove active write archive index
|
||||
ilo.filter_by_regex(kind="regex", value=prefix + "jaeger-span-archive-\d{6}")
|
||||
empty_list(ilo, "No indices to delete")
|
||||
ilo.filter_by_alias(aliases=[prefix + "jaeger-span-archive-write"], exclude=True)
|
||||
empty_list(ilo, "No indices to delete")
|
||||
ilo.filter_by_age(
|
||||
source="creation_date",
|
||||
direction="older",
|
||||
unit="days",
|
||||
unit_count=int(sys.argv[1]),
|
||||
)
|
||||
|
||||
|
||||
def empty_list(ilo, error_msg):
|
||||
try:
|
||||
ilo.empty_list_check()
|
||||
except curator.NoIndices:
|
||||
print(error_msg)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def str2bool(v):
|
||||
return v.lower() in ("true", "1")
|
||||
|
||||
|
||||
def create_client(username, password, tls, ca, cert, key, skipHostVerify):
|
||||
context = ssl.create_default_context()
|
||||
if ca is not None:
|
||||
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=ca)
|
||||
elif skipHostVerify:
|
||||
context.check_hostname = False
|
||||
context.verify_mode = ssl.CERT_NONE
|
||||
if username is not None and password is not None:
|
||||
return elasticsearch.Elasticsearch(
|
||||
sys.argv[2:], http_auth=(username, password), ssl_context=context
|
||||
)
|
||||
elif tls:
|
||||
context.load_cert_chain(certfile=cert, keyfile=key)
|
||||
return elasticsearch.Elasticsearch(sys.argv[2:], ssl_context=context)
|
||||
else:
|
||||
return elasticsearch.Elasticsearch(sys.argv[2:], ssl_context=context)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,161 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from bot_utils import get_api_password, setup, send_msg
|
||||
from random import randint
|
||||
from time import sleep
|
||||
|
||||
import traceback
|
||||
import os
|
||||
import sys
|
||||
import asyncio
|
||||
import requests
|
||||
import json
|
||||
|
||||
from requests.auth import HTTPBasicAuth
|
||||
|
||||
setup()
|
||||
|
||||
|
||||
AIRTABLE_API_KEY = os.getenv("AIRTABLE_API_KEY")
|
||||
AIRTABLE_BASE = os.getenv("AIRTABLE_BASE")
|
||||
AIRTABLE_TABLE = os.getenv("AIRTABLE_TABLE")
|
||||
AIRTABLE_FIELD = os.getenv("AIRTABLE_FIELD")
|
||||
|
||||
# Check environment variables are defined
|
||||
for value in [AIRTABLE_API_KEY, AIRTABLE_BASE, AIRTABLE_TABLE, AIRTABLE_FIELD]:
|
||||
if not value:
|
||||
sys.exit("Configuration error: Missing AirTable environment variable.")
|
||||
|
||||
|
||||
async def run_checks():
|
||||
try:
|
||||
await block_skylinks_from_airtable()
|
||||
except: # catch all exceptions
|
||||
trace = traceback.format_exc()
|
||||
await send_msg("```\n{}\n```".format(trace), force_notify=True)
|
||||
|
||||
|
||||
def exec(command):
|
||||
return os.popen(command).read().strip()
|
||||
|
||||
|
||||
async def block_skylinks_from_airtable():
|
||||
# Get sia IP before doing anything else. If this step fails we don't
|
||||
# need to continue with the execution of the script.
|
||||
ipaddress = exec(
|
||||
"docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' sia"
|
||||
)
|
||||
|
||||
if ipaddress == "":
|
||||
print("Skyd IP could not be detected. Exiting.")
|
||||
return
|
||||
|
||||
print("Pulling blocked skylinks from Airtable via api integration")
|
||||
headers = {"Authorization": "Bearer " + AIRTABLE_API_KEY}
|
||||
skylinks = []
|
||||
offset = None
|
||||
retry = 0
|
||||
while len(skylinks) == 0 or offset:
|
||||
print(
|
||||
"Requesting a batch of records from Airtable with "
|
||||
+ (offset if offset else "empty")
|
||||
+ " offset"
|
||||
+ (" (retry " + str(retry) + ")" if retry else "")
|
||||
)
|
||||
query = "&".join(
|
||||
["fields%5B%5D=" + AIRTABLE_FIELD, ("offset=" + offset) if offset else ""]
|
||||
)
|
||||
response = requests.get(
|
||||
"https://api.airtable.com/v0/"
|
||||
+ AIRTABLE_BASE
|
||||
+ "/"
|
||||
+ AIRTABLE_TABLE
|
||||
+ "?"
|
||||
+ query,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
# rate limited - sleep for 2-10 secs and retry (up to 100 times, ~10 minutes)
|
||||
# https://support.airtable.com/hc/en-us/articles/203313985-Public-REST-API
|
||||
# > 5 requests per second, per base
|
||||
if response.status_code == 429:
|
||||
if retry < 100:
|
||||
retry = retry + 1
|
||||
sleep(randint(1, 10))
|
||||
continue
|
||||
else:
|
||||
return await send_msg(
|
||||
"Airtable: too many retries, aborting!", force_notify=True
|
||||
)
|
||||
retry = 0 # reset retry counter
|
||||
|
||||
if response.status_code != 200:
|
||||
status_code = str(response.status_code)
|
||||
response_text = response.text or "empty response"
|
||||
message = (
|
||||
"Airtable blocklist integration responded with code "
|
||||
+ status_code
|
||||
+ ": "
|
||||
+ response_text
|
||||
)
|
||||
return await send_msg(message, force_notify=False)
|
||||
|
||||
data = response.json()
|
||||
|
||||
if len(data["records"]) == 0:
|
||||
return print(
|
||||
"Airtable returned 0 records - make sure your configuration is correct"
|
||||
)
|
||||
|
||||
skylinks = skylinks + [
|
||||
entry["fields"].get(AIRTABLE_FIELD, "") for entry in data["records"]
|
||||
]
|
||||
skylinks = [
|
||||
skylink.strip() for skylink in skylinks if skylink
|
||||
] # filter empty skylinks, most likely empty rows, trim whitespace
|
||||
|
||||
offset = data.get("offset")
|
||||
|
||||
print(
|
||||
"Sending /skynet/blocklist request with "
|
||||
+ str(len(skylinks))
|
||||
+ " skylinks to siad"
|
||||
)
|
||||
response = requests.post(
|
||||
"http://" + ipaddress + ":9980/skynet/blocklist",
|
||||
data=json.dumps({"add": skylinks}),
|
||||
headers={"User-Agent": "Sia-Agent"},
|
||||
auth=HTTPBasicAuth("", get_api_password()),
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
status_code = str(response.status_code)
|
||||
response_text = response.text or "empty response"
|
||||
message = (
|
||||
"Airtable blocklist request responded with code "
|
||||
+ status_code
|
||||
+ ": "
|
||||
+ response_text
|
||||
)
|
||||
return await send_msg(message, force_notify=False)
|
||||
|
||||
response_json = json.loads(response.text)
|
||||
invalid_skylinks = response_json["invalids"]
|
||||
|
||||
if invalid_skylinks is None:
|
||||
return await send_msg("Blocklist successfully updated all skylinks")
|
||||
return await send_msg(
|
||||
"Blocklist responded ok but failed to update "
|
||||
+ str(len(invalid_skylinks))
|
||||
+ " skylinks: "
|
||||
+ json.dumps(invalid_skylinks)
|
||||
)
|
||||
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(run_checks())
|
||||
|
||||
# --- BASH EQUIVALENT
|
||||
# skylinks=$(curl "https://api.airtable.com/v0/${AIRTABLE_BASE}/${AIRTABLE_TABLE}?fields%5B%5D=${AIRTABLE_FIELD}" -H "Authorization: Bearer ${AIRTABLE_KEY}" | python3 -c "import sys, json; print('[\"' + '\",\"'.join([entry['fields']['Link'] for entry in json.load(sys.stdin)['records']]) + '\"]')")
|
||||
# ipaddress=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' sia)
|
||||
# curl --data "{\"add\" : ${skylinks}}" "${ipaddress}:8000/skynet/blocklist"
|
|
@ -9,3 +9,4 @@ ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN6Kcx8yetova4/ALUQHigo/PBMJO33ZTKOsg2jxSO2a
|
|||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDcenWnMQ6q/OEC4ZmQgjLDV2obWlR3fENV0zRGFvJF+ marcins@siasky.net
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB7prtVOTwtcSN9HkXum107RwcW5H8Vggx6Qv7T57ItT daniel@siasky.net
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH+4IrfEM9H16jqvPZncHkWWoHO4/BVq7d4pEyzK4e0W michal.leszczyk@skynetlabs.com
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHptEpqs57lhnHkfa+0SQgXQ4A63/YGV2cNTcGMQW+Jt david@skynetlabs.com
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
0 0,8,16 * * * /home/user/skynet-webportal/setup-scripts/funds-checker.py /home/user/skynet-webportal/.env
|
||||
0 0,8,16 * * * /home/user/skynet-webportal/setup-scripts/log-checker.py /home/user/skynet-webportal/.env sia 8
|
||||
0 * * * * /home/user/skynet-webportal/setup-scripts/health-checker.py /home/user/skynet-webportal/.env sia 1
|
||||
30 */4 * * * /home/user/skynet-webportal/setup-scripts/blocklist-airtable.py /home/user/skynet-webportal/.env
|
||||
44 5 * * * /home/user/skynet-webportal/scripts/backup-aws-s3.sh 1>>/home/user/skynet-webportal/logs/backup-aws-s3.log 2>>/home/user/skynet-webportal/logs/backup-aws-s3.log
|
||||
6 13 * * * /home/user/skynet-webportal/scripts/db_backup.sh 1>>/home/user/skynet-webportal/logs/db_backup.log 2>>/home/user/skynet-webportal/logs/db_backup.log
|
||||
0 5 * * * /home/user/skynet-webportal/scripts/es_cleaner.py 1 http://localhost:9200
|
||||
0 5 * * * /usr/bin/docker run --rm --net=host -e ROLLOVER=true jaegertracing/jaeger-es-index-cleaner:latest 1 http://localhost:9200
|
||||
|
|
Reference in New Issue