Remove files and unify kube

This commit is contained in:
kiloreux 2018-01-06 16:53:24 +01:00
parent 77614ae18f
commit c1ae4d4a6c
No known key found for this signature in database
GPG Key ID: 8B81EA3FA91EB758
11 changed files with 122 additions and 249 deletions

View File

@ -1,35 +0,0 @@
# Please make sure that you have the modules mod_ssl, mod_headers,
# mod_proxy and mod_proxy_http enabled. If not, you can use following
# command:
# $ sudo a2enmod ssl headers proxy proxy_http
<VirtualHost *:443>
ServerName localhost
# Enable secure communication using HTTPS
# Adjust the paths to the certificates files to your environment
SSLEngine on
SSLCertificateFile /etc/ssl/certs/ssl-cert-snakeoil.pem
SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key
# Tell tusd that the HTTPS protocol is used, in order to
# allow constructing correct upload URLs.
RequestHeader set X-Forwarded-Proto "https"
# Pass requests to tusd server
ProxyPass /files http://localhost:1080/files
ProxyPassReverse /files http://localhost:1080/files
</VirtualHost>
# This SSL configuration has been taken from Mozilla SSL Generator:
# https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=nginx-1.10.1&openssl=1.0.1e&hsts=no&profile=intermediate
SSLProtocol all -SSLv3
SSLCipherSuite ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-$
SSLHonorCipherOrder on
SSLCompression off
# OCSP Stapling, only in httpd 2.3.3 and later
SSLUseStapling on
SSLStaplingResponderTimeout 5
SSLStaplingReturnResponderErrors off
SSLStaplingCache shmcb:/var/run/ocsp(128000)

View File

@ -1,69 +0,0 @@
server {
listen 80 default_server;
listen [::]:80 default_server ipv6only=on;
listen 443 http2 ssl;
listen [::]:443 http2 ipv6only=on ssl;
ssl_certificate /etc/letsencrypt/live/master.tus.io/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/master.tus.io/privkey.pem;
ssl_trusted_certificate /etc/letsencrypt/live/master.tus.io/fullchain.pem;
# Load custom parameters for Diffie Hellman key exchange to avoid the usage
# of common primes
ssl_dhparam /etc/nginx/dhparams.pem;
# Restrict supported ciphers to prevent certain browsers from refusing to
# connect because we are offering blacklisted ciphers. This configuration has
# been generated by Mozilla's SSL Configuration Generator on the
# intermediate profile and can be accessed at:
# https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=nginx-1.10.1&openssl=1.0.1e&hsts=no&profile=intermediate
# More information about blacklisted ciphers can be found at:
# http://security.stackexchange.com/questions/126775/understanding-blacklisted-ciphers-for-http2
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS';
ssl_prefer_server_ciphers on;
# Enable OCSP stapling which allows clients to verify that our certificate
# is not revoked without contacting the Certificate Authority by appending a
# CA-signed promise, that it's still valid, to the TLS handshake response.
ssl_stapling on;
ssl_stapling_verify on;
# Enable SSL session cache to reduce overhead of TLS handshake. Allow nginx
# workers to use 5MB of memory for caching but disable session tickets as
# there is currently no easy way to rotate the ticket key which is not in
# sync with the ideals of Perfect Forward Secrecy.
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:5m;
ssl_session_tickets off;
server_name master.tus.io;
# certbot will place the files required for the HTTP challenge in the
# webroot under the .well-known/acme-challenge directory. Therefore we must
# make this path publicly accessible.
location /.well-known {
root /mnt/nginx-www/;
}
location / {
# Forward incoming requests to local tusd instance
proxy_pass http://localhost:8080;
# Disable request and response buffering
proxy_request_buffering off;
proxy_buffering off;
proxy_http_version 1.1;
# Add X-Forwarded-* headers
proxy_set_header X-Forwarded-Host $hostname;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
client_max_body_size 0;
add_header King marius;
}
}

View File

@ -1,5 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: tus

View File

@ -1,42 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: tusd
namespace: tus
spec:
replicas: 1
minReadySeconds: 10
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
app: tusd
spec:
containers:
- image: docker.io/tusproject/tusd:latest
imagePullPolicy: Always
args: ["-port=8080","-behind-proxy","-max-size=1000000000"]
name: tusd
resources:
limits:
cpu: 0.7
memory: "2Gi"
requests:
cpu: 0.5
memory: "1Gi"
ports:
- containerPort: 8080
securityContext:
runAsUser: 1000
fsGroup: 1000
volumeMounts:
- name: tusd-disk
mountPath: /srv/tusd-data
volumes:
- name: tusd-disk
persistentVolumeClaim:
claimName: tusd

View File

@ -1,14 +0,0 @@
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: tusd
namespace: tus
spec:
scaleTargetRef:
apiVersion: apps/v1beta1
kind: Deployment
name: tusd
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 90

View File

@ -1,37 +0,0 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: tusd
namespace: tus
annotations:
kubernetes.io/tls-acme: "true"
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/proxy-body-size: 0
nginx.ingress.kubernetes.io/proxy-connect-timeout: "300"
nginx.ingress.kubernetes.io/proxy-read-timeout: "300"
nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
nginx.ingress.kubernetes.io/proxy-send-timeout: "300"
spec:
tls:
- hosts:
- tusd.tus.io
secretName: tusd-tls
- hosts:
- master.tus.io
secretName: master-tls
rules:
- host: tusd.tus.io
http:
paths:
- path: /
backend:
serviceName: tusd
servicePort: 80
- host: master.tus.io
http:
paths:
- path: /
backend:
serviceName: tusd
servicePort: 80

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tusd
namespace: tus
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
storageClassName: standard

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: tusd
namespace: tus
spec:
ports:
- port: 80
targetPort: 8080
protocol: TCP
selector:
app: tusd

119
.infra/kube/tusd-kube.yaml Normal file
View File

@ -0,0 +1,119 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tusd
namespace: tus
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
storageClassName: standard
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: tusd
namespace: tus
spec:
replicas: 1
minReadySeconds: 10
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
app: tusd
spec:
containers:
- image: docker.io/tusproject/tusd:latest
imagePullPolicy: Always
args: ["-port=8080","-behind-proxy","-max-size=1000000000"]
name: tusd
resources:
limits:
cpu: 0.7
memory: "2Gi"
requests:
cpu: 0.5
memory: "1Gi"
ports:
- containerPort: 8080
securityContext:
runAsUser: 1000
fsGroup: 1000
volumeMounts:
- name: tusd-disk
mountPath: /srv/tusd-data
volumes:
- name: tusd-disk
persistentVolumeClaim:
claimName: tusd
---
apiVersion: v1
kind: Service
metadata:
name: tusd
namespace: tus
spec:
ports:
- port: 80
targetPort: 8080
protocol: TCP
selector:
app: tusd
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: tusd
namespace: tus
annotations:
kubernetes.io/tls-acme: "true"
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/proxy-body-size: 0
nginx.ingress.kubernetes.io/proxy-connect-timeout: "300"
nginx.ingress.kubernetes.io/proxy-read-timeout: "300"
nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
nginx.ingress.kubernetes.io/proxy-send-timeout: "300"
spec:
tls:
- hosts:
- tusd.tus.io
secretName: tusd-tls
- hosts:
- master.tus.io
secretName: master-tls
rules:
- host: tusd.tus.io
http:
paths:
- path: /
backend:
serviceName: tusd
servicePort: 80
- host: master.tus.io
http:
paths:
- path: /
backend:
serviceName: tusd
servicePort: 80
---
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: tusd
namespace: tus
spec:
scaleTargetRef:
apiVersion: apps/v1beta1
kind: Deployment
name: tusd
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 90

View File

@ -1,18 +0,0 @@
#!/usr/bin/env bash
set -e
__dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${__dir}/build_funcs.sh"
# Compile release archive for master.tus.io server
compile linux amd64
maketar linux amd64
cp ./tusd_linux_amd64.tar.gz "${__dir}/../.infra/files/"
pushd "${__dir}/../.infra"
yarn || npm install
./node_modules/.bin/frey --force-yes deploy
popd

View File

@ -31,10 +31,8 @@ kubectl config set-context travis --cluster=$CLUSTER_NAME --user=travis --namesp
kubectl config use-context travis
kubectl apply -f "${__root}/.infra/kube/pvc.yaml"
kubectl apply --validate=false -f "${__root}/.infra/kube/deployment.yaml"
kubectl apply -f "${__root}/.infra/kube/service.yaml"
kubectl apply -f "${__root}/.infra/kube/ingress-tls.yaml"
kubectl apply --validate=false -f "${__root}/.infra/kube/tusd-kube.yaml"
kubectl set image deployment/tusd --namespace=tus tusd=docker.io/tusproject/tusd:$TRAVIS_COMMIT
@ -45,6 +43,7 @@ kubectl get deployment --namespace=tus
function cleanup {
printf "Cleaning up...\n"
rm -f ${HOME}/ca.crt
printf "Cleaning done."
}