Compare commits
4 Commits
main
...
feat/new-t
Author | SHA1 | Date |
---|---|---|
Marius | 80ff08a50c | |
Marius | 947141b180 | |
Marius | b7da32553d | |
Marius | c941e5ef9a |
|
@ -1,16 +1,16 @@
|
||||||
version: 2
|
version: 2
|
||||||
updates:
|
updates:
|
||||||
- package-ecosystem: github-actions
|
- package-ecosystem: github-actions
|
||||||
directory: /
|
directory: "/"
|
||||||
schedule:
|
schedule:
|
||||||
interval: monthly
|
interval: "daily"
|
||||||
|
|
||||||
- package-ecosystem: docker
|
- package-ecosystem: docker
|
||||||
directory: /
|
directory: /
|
||||||
schedule:
|
schedule:
|
||||||
interval: monthly
|
interval: daily
|
||||||
|
|
||||||
- package-ecosystem: gomod
|
- package-ecosystem: gomod
|
||||||
directory: /
|
directory: /
|
||||||
schedule:
|
schedule:
|
||||||
interval: monthly
|
interval: daily
|
||||||
|
|
|
@ -1,156 +0,0 @@
|
||||||
# Taken from https://github.com/hrvey/combine-prs-workflow
|
|
||||||
# This action can be triggered manually to combine multiple PRs for
|
|
||||||
# dependency upgrades into a single PR. See the above links for
|
|
||||||
# more details.
|
|
||||||
name: 'Combine PRs'
|
|
||||||
|
|
||||||
# Controls when the action will run - in this case triggered manually
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
branchPrefix:
|
|
||||||
description: 'Branch prefix to find combinable PRs based on'
|
|
||||||
required: true
|
|
||||||
default: 'dependabot'
|
|
||||||
mustBeGreen:
|
|
||||||
description: 'Only combine PRs that are green (status is success). Set to false if repo does not run checks'
|
|
||||||
type: boolean
|
|
||||||
required: true
|
|
||||||
default: true
|
|
||||||
combineBranchName:
|
|
||||||
description: 'Name of the branch to combine PRs into'
|
|
||||||
required: true
|
|
||||||
default: 'combine-prs-branch'
|
|
||||||
ignoreLabel:
|
|
||||||
description: 'Exclude PRs with this label'
|
|
||||||
required: true
|
|
||||||
default: 'nocombine'
|
|
||||||
|
|
||||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
|
||||||
jobs:
|
|
||||||
# This workflow contains a single job called "combine-prs"
|
|
||||||
combine-prs:
|
|
||||||
# The type of runner that the job will run on
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
|
||||||
steps:
|
|
||||||
- uses: actions/github-script@v6
|
|
||||||
id: create-combined-pr
|
|
||||||
name: Create Combined PR
|
|
||||||
with:
|
|
||||||
github-token: ${{secrets.GITHUB_TOKEN}}
|
|
||||||
script: |
|
|
||||||
const pulls = await github.paginate('GET /repos/:owner/:repo/pulls', {
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo
|
|
||||||
});
|
|
||||||
let branchesAndPRStrings = [];
|
|
||||||
let baseBranch = null;
|
|
||||||
let baseBranchSHA = null;
|
|
||||||
for (const pull of pulls) {
|
|
||||||
const branch = pull['head']['ref'];
|
|
||||||
console.log('Pull for branch: ' + branch);
|
|
||||||
if (branch.startsWith('${{ github.event.inputs.branchPrefix }}')) {
|
|
||||||
console.log('Branch matched prefix: ' + branch);
|
|
||||||
let statusOK = true;
|
|
||||||
if(${{ github.event.inputs.mustBeGreen }}) {
|
|
||||||
console.log('Checking green status: ' + branch);
|
|
||||||
const stateQuery = `query($owner: String!, $repo: String!, $pull_number: Int!) {
|
|
||||||
repository(owner: $owner, name: $repo) {
|
|
||||||
pullRequest(number:$pull_number) {
|
|
||||||
commits(last: 1) {
|
|
||||||
nodes {
|
|
||||||
commit {
|
|
||||||
statusCheckRollup {
|
|
||||||
state
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}`
|
|
||||||
const vars = {
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
pull_number: pull['number']
|
|
||||||
};
|
|
||||||
const result = await github.graphql(stateQuery, vars);
|
|
||||||
const [{ commit }] = result.repository.pullRequest.commits.nodes;
|
|
||||||
const state = commit.statusCheckRollup.state
|
|
||||||
console.log('Validating status: ' + state);
|
|
||||||
if(state != 'SUCCESS') {
|
|
||||||
console.log('Discarding ' + branch + ' with status ' + state);
|
|
||||||
statusOK = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
console.log('Checking labels: ' + branch);
|
|
||||||
const labels = pull['labels'];
|
|
||||||
for(const label of labels) {
|
|
||||||
const labelName = label['name'];
|
|
||||||
console.log('Checking label: ' + labelName);
|
|
||||||
if(labelName == '${{ github.event.inputs.ignoreLabel }}') {
|
|
||||||
console.log('Discarding ' + branch + ' with label ' + labelName);
|
|
||||||
statusOK = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (statusOK) {
|
|
||||||
console.log('Adding branch to array: ' + branch);
|
|
||||||
const prString = '#' + pull['number'] + ' ' + pull['title'];
|
|
||||||
branchesAndPRStrings.push({ branch, prString });
|
|
||||||
baseBranch = pull['base']['ref'];
|
|
||||||
baseBranchSHA = pull['base']['sha'];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (branchesAndPRStrings.length == 0) {
|
|
||||||
core.setFailed('No PRs/branches matched criteria');
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
await github.rest.git.createRef({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
ref: 'refs/heads/' + '${{ github.event.inputs.combineBranchName }}',
|
|
||||||
sha: baseBranchSHA
|
|
||||||
});
|
|
||||||
} catch (error) {
|
|
||||||
console.log(error);
|
|
||||||
core.setFailed('Failed to create combined branch - maybe a branch by that name already exists?');
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let combinedPRs = [];
|
|
||||||
let mergeFailedPRs = [];
|
|
||||||
for(const { branch, prString } of branchesAndPRStrings) {
|
|
||||||
try {
|
|
||||||
await github.rest.repos.merge({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
base: '${{ github.event.inputs.combineBranchName }}',
|
|
||||||
head: branch,
|
|
||||||
});
|
|
||||||
console.log('Merged branch ' + branch);
|
|
||||||
combinedPRs.push(prString);
|
|
||||||
} catch (error) {
|
|
||||||
console.log('Failed to merge branch ' + branch);
|
|
||||||
mergeFailedPRs.push(prString);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log('Creating combined PR');
|
|
||||||
const combinedPRsString = combinedPRs.join('\n');
|
|
||||||
let body = '✅ This PR was created by the Combine PRs action by combining the following PRs:\n' + combinedPRsString;
|
|
||||||
if(mergeFailedPRs.length > 0) {
|
|
||||||
const mergeFailedPRsString = mergeFailedPRs.join('\n');
|
|
||||||
body += '\n\n⚠️ The following PRs were left out due to merge conflicts:\n' + mergeFailedPRsString
|
|
||||||
}
|
|
||||||
await github.rest.pulls.create({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
title: 'Combined PR',
|
|
||||||
head: '${{ github.event.inputs.combineBranchName }}',
|
|
||||||
base: baseBranch,
|
|
||||||
body: body
|
|
||||||
});
|
|
|
@ -9,7 +9,7 @@ jobs:
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
go-version: [stable, oldstable]
|
go-version: [1.16.x, 1.17.x]
|
||||||
platform: [ubuntu-latest, macos-latest, windows-latest]
|
platform: [ubuntu-latest, macos-latest, windows-latest]
|
||||||
runs-on: ${{ matrix.platform }}
|
runs-on: ${{ matrix.platform }}
|
||||||
env:
|
env:
|
||||||
|
@ -17,11 +17,11 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout code
|
name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v2.4.0
|
||||||
|
|
||||||
-
|
-
|
||||||
name: Install Go
|
name: Install Go
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v2.1.4
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go-version }}
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@ name: release
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- master
|
||||||
tags:
|
tags:
|
||||||
- "v*"
|
- "v*"
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout code
|
name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v2.4.0
|
||||||
|
|
||||||
- run: |
|
- run: |
|
||||||
echo "GIT_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
echo "GIT_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||||
|
@ -21,7 +21,7 @@ jobs:
|
||||||
-
|
-
|
||||||
name: Docker meta
|
name: Docker meta
|
||||||
id: docker_meta
|
id: docker_meta
|
||||||
uses: docker/metadata-action@v4.4.0
|
uses: docker/metadata-action@v3.6.0
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
ghcr.io/tus/tusd
|
ghcr.io/tus/tusd
|
||||||
|
@ -35,13 +35,13 @@ jobs:
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
id: buildx
|
id: buildx
|
||||||
uses: docker/setup-buildx-action@v2.5.0
|
uses: docker/setup-buildx-action@v1.6.0
|
||||||
with:
|
with:
|
||||||
install: true
|
install: true
|
||||||
|
|
||||||
-
|
-
|
||||||
name: Login to GitHub Container Registry
|
name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@v2.1.0
|
uses: docker/login-action@v1.10.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
|
@ -49,7 +49,7 @@ jobs:
|
||||||
|
|
||||||
-
|
-
|
||||||
name: Login to Docker Container Registry
|
name: Login to Docker Container Registry
|
||||||
uses: docker/login-action@v2.1.0
|
uses: docker/login-action@v1.10.0
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKER_USERNAME }}
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
@ -57,7 +57,7 @@ jobs:
|
||||||
-
|
-
|
||||||
name: Build and push
|
name: Build and push
|
||||||
id: build
|
id: build
|
||||||
uses: docker/build-push-action@v4
|
uses: docker/build-push-action@v2
|
||||||
with:
|
with:
|
||||||
push: true
|
push: true
|
||||||
builder: ${{ steps.buildx.outputs.name }}
|
builder: ${{ steps.buildx.outputs.name }}
|
||||||
|
@ -68,7 +68,6 @@ jobs:
|
||||||
build-args: |
|
build-args: |
|
||||||
GIT_VERSION=${{ env.GIT_VERSION }}
|
GIT_VERSION=${{ env.GIT_VERSION }}
|
||||||
GIT_COMMIT=${{ github.sha }}
|
GIT_COMMIT=${{ github.sha }}
|
||||||
platforms: linux/amd64,linux/arm64/v8
|
|
||||||
|
|
||||||
build-binary:
|
build-binary:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -78,13 +77,13 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout code
|
name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v2.4.0
|
||||||
|
|
||||||
-
|
-
|
||||||
name: Install Go
|
name: Install Go 1.17.2
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: 'stable'
|
go-version: '1.17.2'
|
||||||
|
|
||||||
-
|
-
|
||||||
name: Build TUSD
|
name: Build TUSD
|
||||||
|
@ -92,7 +91,7 @@ jobs:
|
||||||
|
|
||||||
-
|
-
|
||||||
name: GitHub Release
|
name: GitHub Release
|
||||||
uses: softprops/action-gh-release@v0.1.15
|
uses: softprops/action-gh-release@v0.1.13
|
||||||
with:
|
with:
|
||||||
files: tusd_*.*
|
files: tusd_*.*
|
||||||
|
|
||||||
|
@ -101,13 +100,12 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout code
|
name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v2.4.0
|
||||||
|
|
||||||
-
|
-
|
||||||
name: Deploy to heroku
|
name: Deploy to heroku
|
||||||
uses: akhileshns/heroku-deploy@v3.12.14
|
uses: akhileshns/heroku-deploy@v3.12.12
|
||||||
with:
|
with:
|
||||||
heroku_api_key: ${{secrets.HEROKU_API_KEY}}
|
heroku_api_key: ${{secrets.HEROKU_API_KEY}}
|
||||||
heroku_app_name: ${{secrets.HEROKU_APP_NAME}}
|
heroku_app_name: ${{secrets.HEROKU_APP_NAME}}
|
||||||
heroku_email: ${{secrets.HEROKU_USER_EMAIL}}
|
heroku_email: ${{secrets.HEROKU_USER_EMAIL}}
|
||||||
stack: heroku-22
|
|
||||||
|
|
|
@ -5,4 +5,3 @@ node_modules/
|
||||||
.DS_Store
|
.DS_Store
|
||||||
./tusd
|
./tusd
|
||||||
tusd_*_*
|
tusd_*_*
|
||||||
.idea/
|
|
||||||
|
|
20
Dockerfile
20
Dockerfile
|
@ -1,4 +1,4 @@
|
||||||
FROM --platform=$BUILDPLATFORM golang:1.20.4-alpine AS builder
|
FROM golang:1.17.3-alpine AS builder
|
||||||
WORKDIR /go/src/github.com/tus/tusd
|
WORKDIR /go/src/github.com/tus/tusd
|
||||||
|
|
||||||
# Add gcc and libc-dev early so it is cached
|
# Add gcc and libc-dev early so it is cached
|
||||||
|
@ -19,33 +19,25 @@ COPY pkg/ ./pkg/
|
||||||
ARG GIT_VERSION
|
ARG GIT_VERSION
|
||||||
ARG GIT_COMMIT
|
ARG GIT_COMMIT
|
||||||
|
|
||||||
# Get the operating system and architecture to build for
|
|
||||||
ARG TARGETOS
|
|
||||||
ARG TARGETARCH
|
|
||||||
|
|
||||||
RUN set -xe \
|
RUN set -xe \
|
||||||
&& GOOS=$TARGETOS GOARCH=$TARGETARCH go build \
|
&& GOOS=linux GOARCH=amd64 go build \
|
||||||
-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=${GIT_VERSION} -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=${GIT_COMMIT} -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'" \
|
-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=${GIT_VERSION} -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=${GIT_COMMIT} -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'" \
|
||||||
-o /go/bin/tusd ./cmd/tusd/main.go
|
-o /go/bin/tusd ./cmd/tusd/main.go
|
||||||
|
|
||||||
# start a new stage that copies in the binary built in the previous stage
|
# start a new stage that copies in the binary built in the previous stage
|
||||||
FROM alpine:3.18.0
|
FROM alpine:3.14.3
|
||||||
WORKDIR /srv/tusd-data
|
WORKDIR /srv/tusd-data
|
||||||
|
|
||||||
COPY ./docker/entrypoint.sh /usr/local/share/docker-entrypoint.sh
|
RUN apk add --no-cache ca-certificates jq \
|
||||||
COPY ./docker/load-env.sh /usr/local/share/load-env.sh
|
|
||||||
|
|
||||||
RUN apk add --no-cache ca-certificates jq bash \
|
|
||||||
&& addgroup -g 1000 tusd \
|
&& addgroup -g 1000 tusd \
|
||||||
&& adduser -u 1000 -G tusd -s /bin/sh -D tusd \
|
&& adduser -u 1000 -G tusd -s /bin/sh -D tusd \
|
||||||
&& mkdir -p /srv/tusd-hooks \
|
&& mkdir -p /srv/tusd-hooks \
|
||||||
&& chown tusd:tusd /srv/tusd-data \
|
&& chown tusd:tusd /srv/tusd-data
|
||||||
&& chmod +x /usr/local/share/docker-entrypoint.sh /usr/local/share/load-env.sh
|
|
||||||
|
|
||||||
COPY --from=builder /go/bin/tusd /usr/local/bin/tusd
|
COPY --from=builder /go/bin/tusd /usr/local/bin/tusd
|
||||||
|
|
||||||
EXPOSE 1080
|
EXPOSE 1080
|
||||||
USER tusd
|
USER tusd
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/share/docker-entrypoint.sh"]
|
ENTRYPOINT ["tusd"]
|
||||||
CMD [ "--hooks-dir", "/srv/tusd-hooks" ]
|
CMD [ "--hooks-dir", "/srv/tusd-hooks" ]
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# tusd
|
# tusd
|
||||||
|
|
||||||
<img alt="Tus logo" src="https://github.com/tus/tus.io/blob/main/assets/img/tus1.png?raw=true" width="30%" align="right" />
|
<img alt="Tus logo" src="https://github.com/tus/tus.io/blob/master/assets/img/tus1.png?raw=true" width="30%" align="right" />
|
||||||
|
|
||||||
> **tus** is a protocol based on HTTP for *resumable file uploads*. Resumable
|
> **tus** is a protocol based on HTTP for *resumable file uploads*. Resumable
|
||||||
> means that an upload can be interrupted at any moment and can be resumed without
|
> means that an upload can be interrupted at any moment and can be resumed without
|
||||||
|
|
|
@ -133,10 +133,9 @@ func CreateComposer() {
|
||||||
store := azurestore.New(azService)
|
store := azurestore.New(azService)
|
||||||
store.ObjectPrefix = Flags.AzObjectPrefix
|
store.ObjectPrefix = Flags.AzObjectPrefix
|
||||||
store.Container = Flags.AzStorage
|
store.Container = Flags.AzStorage
|
||||||
|
|
||||||
store.UseIn(Composer)
|
store.UseIn(Composer)
|
||||||
|
|
||||||
locker := memorylocker.New()
|
|
||||||
locker.UseIn(Composer)
|
|
||||||
} else {
|
} else {
|
||||||
dir, err := filepath.Abs(Flags.UploadDir)
|
dir, err := filepath.Abs(Flags.UploadDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -21,9 +21,6 @@ var Flags struct {
|
||||||
UploadDir string
|
UploadDir string
|
||||||
Basepath string
|
Basepath string
|
||||||
ShowGreeting bool
|
ShowGreeting bool
|
||||||
DisableDownload bool
|
|
||||||
DisableTermination bool
|
|
||||||
DisableCors bool
|
|
||||||
Timeout int64
|
Timeout int64
|
||||||
S3Bucket string
|
S3Bucket string
|
||||||
S3ObjectPrefix string
|
S3ObjectPrefix string
|
||||||
|
@ -59,6 +56,7 @@ var Flags struct {
|
||||||
TLSCertFile string
|
TLSCertFile string
|
||||||
TLSKeyFile string
|
TLSKeyFile string
|
||||||
TLSMode string
|
TLSMode string
|
||||||
|
TusV2 bool
|
||||||
|
|
||||||
CPUProfile string
|
CPUProfile string
|
||||||
}
|
}
|
||||||
|
@ -71,9 +69,6 @@ func ParseFlags() {
|
||||||
flag.StringVar(&Flags.UploadDir, "upload-dir", "./data", "Directory to store uploads in")
|
flag.StringVar(&Flags.UploadDir, "upload-dir", "./data", "Directory to store uploads in")
|
||||||
flag.StringVar(&Flags.Basepath, "base-path", "/files/", "Basepath of the HTTP server")
|
flag.StringVar(&Flags.Basepath, "base-path", "/files/", "Basepath of the HTTP server")
|
||||||
flag.BoolVar(&Flags.ShowGreeting, "show-greeting", true, "Show the greeting message")
|
flag.BoolVar(&Flags.ShowGreeting, "show-greeting", true, "Show the greeting message")
|
||||||
flag.BoolVar(&Flags.DisableDownload, "disable-download", false, "Disable the download endpoint")
|
|
||||||
flag.BoolVar(&Flags.DisableTermination, "disable-termination", false, "Disable the termination endpoint")
|
|
||||||
flag.BoolVar(&Flags.DisableCors, "disable-cors", false, "Disable CORS headers")
|
|
||||||
flag.Int64Var(&Flags.Timeout, "timeout", 6*1000, "Read timeout for connections in milliseconds. A zero value means that reads will not timeout")
|
flag.Int64Var(&Flags.Timeout, "timeout", 6*1000, "Read timeout for connections in milliseconds. A zero value means that reads will not timeout")
|
||||||
flag.StringVar(&Flags.S3Bucket, "s3-bucket", "", "Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)")
|
flag.StringVar(&Flags.S3Bucket, "s3-bucket", "", "Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)")
|
||||||
flag.StringVar(&Flags.S3ObjectPrefix, "s3-object-prefix", "", "Prefix for S3 object names")
|
flag.StringVar(&Flags.S3ObjectPrefix, "s3-object-prefix", "", "Prefix for S3 object names")
|
||||||
|
@ -83,7 +78,7 @@ func ParseFlags() {
|
||||||
flag.BoolVar(&Flags.S3DisableSSL, "s3-disable-ssl", false, "Disable SSL and only use HTTP for communication with S3 (experimental and may be removed in the future)")
|
flag.BoolVar(&Flags.S3DisableSSL, "s3-disable-ssl", false, "Disable SSL and only use HTTP for communication with S3 (experimental and may be removed in the future)")
|
||||||
flag.StringVar(&Flags.GCSBucket, "gcs-bucket", "", "Use Google Cloud Storage with this bucket as storage backend (requires the GCS_SERVICE_ACCOUNT_FILE environment variable to be set)")
|
flag.StringVar(&Flags.GCSBucket, "gcs-bucket", "", "Use Google Cloud Storage with this bucket as storage backend (requires the GCS_SERVICE_ACCOUNT_FILE environment variable to be set)")
|
||||||
flag.StringVar(&Flags.GCSObjectPrefix, "gcs-object-prefix", "", "Prefix for GCS object names")
|
flag.StringVar(&Flags.GCSObjectPrefix, "gcs-object-prefix", "", "Prefix for GCS object names")
|
||||||
flag.StringVar(&Flags.AzStorage, "azure-storage", "", "Use Azure BlockBlob Storage with this container name as a storage backend (requires the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_KEY environment variable to be set)")
|
flag.StringVar(&Flags.AzStorage, "azure-storage", "", "Use Azure BlockBlob Storage with this container name as a storage backend (requires the AZURE_ACCOUNT_NAME and AZURE_ACCOUNT_KEY environment variable to be set)")
|
||||||
flag.StringVar(&Flags.AzContainerAccessType, "azure-container-access-type", "", "Access type when creating a new container if it does not exist (possible values: blob, container, '')")
|
flag.StringVar(&Flags.AzContainerAccessType, "azure-container-access-type", "", "Access type when creating a new container if it does not exist (possible values: blob, container, '')")
|
||||||
flag.StringVar(&Flags.AzBlobAccessTier, "azure-blob-access-tier", "", "Blob access tier when uploading new files (possible values: archive, cool, hot, '')")
|
flag.StringVar(&Flags.AzBlobAccessTier, "azure-blob-access-tier", "", "Blob access tier when uploading new files (possible values: archive, cool, hot, '')")
|
||||||
flag.StringVar(&Flags.AzObjectPrefix, "azure-object-prefix", "", "Prefix for Azure object names")
|
flag.StringVar(&Flags.AzObjectPrefix, "azure-object-prefix", "", "Prefix for Azure object names")
|
||||||
|
@ -108,6 +103,8 @@ func ParseFlags() {
|
||||||
flag.StringVar(&Flags.TLSCertFile, "tls-certificate", "", "Path to the file containing the x509 TLS certificate to be used. The file should also contain any intermediate certificates and the CA certificate.")
|
flag.StringVar(&Flags.TLSCertFile, "tls-certificate", "", "Path to the file containing the x509 TLS certificate to be used. The file should also contain any intermediate certificates and the CA certificate.")
|
||||||
flag.StringVar(&Flags.TLSKeyFile, "tls-key", "", "Path to the file containing the key for the TLS certificate.")
|
flag.StringVar(&Flags.TLSKeyFile, "tls-key", "", "Path to the file containing the key for the TLS certificate.")
|
||||||
flag.StringVar(&Flags.TLSMode, "tls-mode", "tls12", "Specify which TLS mode to use; valid modes are tls13, tls12, and tls12-strong.")
|
flag.StringVar(&Flags.TLSMode, "tls-mode", "tls12", "Specify which TLS mode to use; valid modes are tls13, tls12, and tls12-strong.")
|
||||||
|
flag.BoolVar(&Flags.TusV2, "enable-tus-v2", false, "Enable support for the tus v2 protocol, next to support for v1 (experimental and may be removed/changed in the future)")
|
||||||
|
|
||||||
flag.StringVar(&Flags.CPUProfile, "cpuprofile", "", "write cpu profile to file")
|
flag.StringVar(&Flags.CPUProfile, "cpuprofile", "", "write cpu profile to file")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
|
|
|
@ -8,12 +8,6 @@ import (
|
||||||
var greeting string
|
var greeting string
|
||||||
|
|
||||||
func PrepareGreeting() {
|
func PrepareGreeting() {
|
||||||
// Do not show information about metric endpoint, if it is not exposed
|
|
||||||
metricsInfo := ""
|
|
||||||
if Flags.ExposeMetrics {
|
|
||||||
metricsInfo = fmt.Sprintf("- %s - gather statistics to keep tusd running smoothly\n", Flags.MetricsPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
greeting = fmt.Sprintf(
|
greeting = fmt.Sprintf(
|
||||||
`Welcome to tusd
|
`Welcome to tusd
|
||||||
===============
|
===============
|
||||||
|
@ -26,14 +20,15 @@ While you did an awesome job on getting tusd running, this is just the welcome
|
||||||
message, so let's talk about the places that really matter:
|
message, so let's talk about the places that really matter:
|
||||||
|
|
||||||
- %s - send your tus uploads to this endpoint
|
- %s - send your tus uploads to this endpoint
|
||||||
%s- https://github.com/tus/tusd/issues - report your bugs here
|
- %s - gather statistics to keep tusd running smoothly
|
||||||
|
- https://github.com/tus/tusd/issues - report your bugs here
|
||||||
|
|
||||||
So quit lollygagging, send over your files and experience the future!
|
So quit lollygagging, send over your files and experience the future!
|
||||||
|
|
||||||
Version = %s
|
Version = %s
|
||||||
GitCommit = %s
|
GitCommit = %s
|
||||||
BuildDate = %s
|
BuildDate = %s
|
||||||
`, Flags.Basepath, metricsInfo, VersionName, GitCommit, BuildDate)
|
`, Flags.Basepath, Flags.MetricsPath, VersionName, GitCommit, BuildDate)
|
||||||
}
|
}
|
||||||
|
|
||||||
func DisplayGreeting(w http.ResponseWriter, r *http.Request) {
|
func DisplayGreeting(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
|
@ -27,9 +27,7 @@ func Serve() {
|
||||||
MaxSize: Flags.MaxSize,
|
MaxSize: Flags.MaxSize,
|
||||||
BasePath: Flags.Basepath,
|
BasePath: Flags.Basepath,
|
||||||
RespectForwardedHeaders: Flags.BehindProxy,
|
RespectForwardedHeaders: Flags.BehindProxy,
|
||||||
DisableDownload: Flags.DisableDownload,
|
EnableTusV2: Flags.TusV2,
|
||||||
DisableTermination: Flags.DisableTermination,
|
|
||||||
DisableCors: Flags.DisableCors,
|
|
||||||
StoreComposer: Composer,
|
StoreComposer: Composer,
|
||||||
NotifyCompleteUploads: true,
|
NotifyCompleteUploads: true,
|
||||||
NotifyTerminatedUploads: true,
|
NotifyTerminatedUploads: true,
|
||||||
|
|
|
@ -1,9 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -o errexit
|
|
||||||
set -o nounset
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
. /usr/local/share/load-env.sh
|
|
||||||
|
|
||||||
exec tusd "$@"
|
|
|
@ -1,29 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -o errexit
|
|
||||||
set -o nounset
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
tusd_env_vars=(
|
|
||||||
AWS_ACCESS_KEY_ID
|
|
||||||
AWS_SECRET_ACCESS_KEY
|
|
||||||
AWS_REGION
|
|
||||||
GCS_SERVICE_ACCOUNT_FILE
|
|
||||||
AZURE_STORAGE_ACCOUNT
|
|
||||||
AZURE_STORAGE_KEY
|
|
||||||
)
|
|
||||||
|
|
||||||
for env_var in "${tusd_env_vars[@]}"; do
|
|
||||||
file_env_var="${env_var}_FILE"
|
|
||||||
|
|
||||||
if [[ -n "${!file_env_var:-}" ]]; then
|
|
||||||
if [[ -r "${!file_env_var:-}" ]]; then
|
|
||||||
export "${env_var}=$(< "${!file_env_var}")"
|
|
||||||
unset "${file_env_var}"
|
|
||||||
else
|
|
||||||
warn "Skipping export of '${env_var}'. '${!file_env_var:-}' is not readable."
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
unset tusd_env_vars
|
|
15
docs/faq.md
15
docs/faq.md
|
@ -36,7 +36,7 @@ tusd allows any user to retrieve a previously uploaded file by issuing a HTTP GE
|
||||||
|
|
||||||
### How can I keep the original filename for the uploads?
|
### How can I keep the original filename for the uploads?
|
||||||
|
|
||||||
tusd will generate a unique ID for every upload, e.g. `1881febb4343e9b806cad2e676989c0d`, which is also used as the filename for storing the upload. If you want to keep the original filename, e.g. `my_image.png`, you will have to rename the uploaded file manually after the upload is completed. One can use the [`post-finish` hook](https://github.com/tus/tusd/blob/main/docs/hooks.md#post-finish) to be notified once the upload is completed. The client must also be configured to add the filename to the upload's metadata, which can be [accessed inside the hooks](https://github.com/tus/tusd/blob/main/docs/hooks.md#the-hooks-environment) and used for the renaming operation.
|
tusd will generate a unique ID for every upload, e.g. `1881febb4343e9b806cad2e676989c0d`, which is also used as the filename for storing the upload. If you want to keep the original filename, e.g. `my_image.png`, you will have to rename the uploaded file manually after the upload is completed. One can use the [`post-finish` hook](https://github.com/tus/tusd/blob/master/docs/hooks.md#post-finish) to be notified once the upload is completed. The client must also be configured to add the filename to the upload's metadata, which can be [accessed inside the hooks](https://github.com/tus/tusd/blob/master/docs/hooks.md#the-hooks-environment) and used for the renaming operation.
|
||||||
|
|
||||||
### Does tusd support Cross-Origin Resource Sharing (CORS)?
|
### Does tusd support Cross-Origin Resource Sharing (CORS)?
|
||||||
|
|
||||||
|
@ -58,16 +58,3 @@ To make your setup easier, tusd already includes the necessary CORS configuratio
|
||||||
* `Upload-Concat`: A tus specific header used to indicate if the containing HTTP request is the final request for uploading a file or not. See [here](https://tus.io/protocols/resumable-upload.html#upload-concat) for details.
|
* `Upload-Concat`: A tus specific header used to indicate if the containing HTTP request is the final request for uploading a file or not. See [here](https://tus.io/protocols/resumable-upload.html#upload-concat) for details.
|
||||||
|
|
||||||
If you are looking for a way to communicate additional information from a client to a server, use the `Upload-Metadata` header.
|
If you are looking for a way to communicate additional information from a client to a server, use the `Upload-Metadata` header.
|
||||||
|
|
||||||
### How to use Docker Secrets for credentials (Swarm mode only)
|
|
||||||
|
|
||||||
Example usage with "minio"/S3 (AWS). Create the secrets:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
printf "minio" | docker secret create minio-username -
|
|
||||||
printf "miniosecret" | docker secret create minio-password -
|
|
||||||
```
|
|
||||||
|
|
||||||
Those commands create two secrets which are used inside the example [docker-compose.yml](../examples/docker-compose.yml) file.
|
|
||||||
The provided example assumes, that you also have a service named "minio" inside the same Docker Network.
|
|
||||||
We just append a _FILE suffix to the corresponding environment variables. The contents of the mounted file will be added to the environment variable without _FILE suffix.
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Hooks
|
# Hooks
|
||||||
|
|
||||||
When integrating tusd into an application, it is important to establish a communication channel between the two components. The tusd binary accomplishes this by providing a system which triggers actions when certain events happen, such as an upload being created or finished. This simple-but-powerful system enables use cases ranging from logging over validation and authorization to processing the uploaded files.
|
When integrating tusd into an application, it is important to establish a communication channel between the two components. The tusd binary accomplishes this by providing a system which triggers actions when certain events happen, such as an upload being created or finished. This simple-but-powerful system enables uses ranging from logging over validation and authorization to processing the uploaded files.
|
||||||
|
|
||||||
When a specific action happens during an upload (pre-create, post-receive, post-finish, or post-terminate), the hook system enables tusd to fire off a specific event. Tusd provides two ways of doing this:
|
When a specific action happens during an upload (pre-create, post-receive, post-finish, or post-terminate), the hook system enables tusd to fire off a specific event. Tusd provides two ways of doing this:
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ A non-zero exit code or HTTP response greater than `400` will return a HTTP 500
|
||||||
|
|
||||||
### post-finish
|
### post-finish
|
||||||
|
|
||||||
This event will be triggered after an upload is fully finished, meaning that all chunks have been transferred and saved in the storage. After this point, no further modifications, except possible deletion, can be made to the upload entity and it may be desirable to use the file for further processing or notify other applications of the completions of this upload.
|
This event will be triggered after an upload is fully finished, meaning that all chunks have been transfered and saved in the storage. After this point, no further modifications, except possible deletion, can be made to the upload entity and it may be desirable to use the file for further processing or notify other applications of the completions of this upload.
|
||||||
|
|
||||||
### post-terminate
|
### post-terminate
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ This event will be triggered after an upload has been terminated, meaning that t
|
||||||
|
|
||||||
### post-receive
|
### post-receive
|
||||||
|
|
||||||
This event will be triggered for every running upload to indicate its current progress. It will be emitted whenever the server has received more data from the client but at most every second. The offset property will be set to the number of bytes which have been transferred to the server, at the time in total. Please be aware that this number may be higher than the number of bytes which have been stored by the data store!
|
This event will be triggered for every running upload to indicate its current progress. It will be emitted whenever the server has received more data from the client but at most every second. The offset property will be set to the number of bytes which have been transfered to the server, at the time in total. Please be aware that this number may be higher than the number of bytes which have been stored by the data store!
|
||||||
|
|
||||||
## Whitelisting Hook Events
|
## Whitelisting Hook Events
|
||||||
|
|
||||||
|
@ -101,7 +101,7 @@ The process of the hook files are provided with information about the event and
|
||||||
"filename": "transloadit.png"
|
"filename": "transloadit.png"
|
||||||
},
|
},
|
||||||
// Details about where the data store saved the uploaded file. The different
|
// Details about where the data store saved the uploaded file. The different
|
||||||
// available keys vary depending on the used data store.
|
// availabl keys vary depending on the used data store.
|
||||||
"Storage": {
|
"Storage": {
|
||||||
// For example, the filestore supplies the absolute file path:
|
// For example, the filestore supplies the absolute file path:
|
||||||
"Type": "filestore",
|
"Type": "filestore",
|
||||||
|
@ -176,7 +176,7 @@ Tusd will issue a `POST` request to the specified URL endpoint, specifying the h
|
||||||
"filename": "transloadit.png"
|
"filename": "transloadit.png"
|
||||||
},
|
},
|
||||||
// Details about where the data store saved the uploaded file. The different
|
// Details about where the data store saved the uploaded file. The different
|
||||||
// available keys vary depending on the used data store.
|
// availabl keys vary depending on the used data store.
|
||||||
"Storage": {
|
"Storage": {
|
||||||
// For example, the filestore supplies the absolute file path:
|
// For example, the filestore supplies the absolute file path:
|
||||||
"Type": "filestore",
|
"Type": "filestore",
|
||||||
|
@ -253,7 +253,7 @@ Tusd will issue a `gRPC` request to the specified endpoint, specifying the hook
|
||||||
"filename": "transloadit.png"
|
"filename": "transloadit.png"
|
||||||
},
|
},
|
||||||
// Details about where the data store saved the uploaded file. The different
|
// Details about where the data store saved the uploaded file. The different
|
||||||
// available keys vary depending on the used data store.
|
// availabl keys vary depending on the used data store.
|
||||||
"Storage": {
|
"Storage": {
|
||||||
// For example, the filestore supplies the absolute file path:
|
// For example, the filestore supplies the absolute file path:
|
||||||
"Type": "filestore",
|
"Type": "filestore",
|
||||||
|
|
|
@ -9,14 +9,13 @@ Windows in various formats of the
|
||||||
## Compile from source
|
## Compile from source
|
||||||
|
|
||||||
The only requirement for building tusd is [Go](http://golang.org/doc/install).
|
The only requirement for building tusd is [Go](http://golang.org/doc/install).
|
||||||
We only test and support the [two latest major releases](https://go.dev/dl/) of
|
Currently only Go 1.12 and 1.13 is tested and supported and in the future only the two latest
|
||||||
Go, although tusd might also run with other versions.
|
major releases will be supported.
|
||||||
|
If you meet this criteria, you can clone the git repository, install the remaining
|
||||||
Once a recent Go version is installed, you can clone the git repository, install
|
dependencies and build the binary:
|
||||||
the remaining dependencies and build the binary:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/tus/tusd.git
|
git clone git@github.com:tus/tusd.git
|
||||||
cd tusd
|
cd tusd
|
||||||
|
|
||||||
go build -o tusd cmd/tusd/main.go
|
go build -o tusd cmd/tusd/main.go
|
||||||
|
|
|
@ -67,50 +67,6 @@ $ tusd -gcs-bucket=my-test-bucket.com
|
||||||
[tusd] Using /metrics as the metrics path.
|
[tusd] Using /metrics as the metrics path.
|
||||||
```
|
```
|
||||||
|
|
||||||
Tusd also supports storing uploads on Microsoft Azure Blob Storage. In order to enable this feature, provide the
|
|
||||||
corresponding access credentials using environment variables.
|
|
||||||
|
|
||||||
```
|
|
||||||
$ export AZURE_STORAGE_ACCOUNT=xxxxx
|
|
||||||
$ export AZURE_STORAGE_KEY=xxxxx
|
|
||||||
$ tusd -azure-storage my-test-container
|
|
||||||
[tusd] 2023/02/13 16:13:20.937373 Custom Azure Endpoint not specified in flag variable azure-endpoint.
|
|
||||||
Using endpoint https://xxxxx.blob.core.windows.net
|
|
||||||
[tusd] Using 0.00MB as maximum size.
|
|
||||||
[tusd] Using 0.0.0.0:1080 as address to listen.
|
|
||||||
[tusd] Using /files/ as the base path.
|
|
||||||
[tusd] Using /metrics as the metrics path.
|
|
||||||
```
|
|
||||||
|
|
||||||
If you want to upload to Microsoft Azure Blob Storage using a custom endpoint, e.g when using [Azurite](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#configure-a-connection-string-for-azurite) for local development,
|
|
||||||
you can specify the endpoint using the `-azure-endpoint` flag.
|
|
||||||
|
|
||||||
```
|
|
||||||
$ export AZURE_STORAGE_ACCOUNT=devstoreaccount1
|
|
||||||
$ export AZURE_STORAGE_KEY=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
|
|
||||||
$ tusd -azure-storage my-test-container -azure-endpoint https://my-custom-endpoint.com
|
|
||||||
[tusd] 2023/02/13 16:15:18.641937 Using Azure endpoint http://127.0.0.1:10000/devstoreaccount1
|
|
||||||
[tusd] Using 0.00MB as maximum size.
|
|
||||||
[tusd] Using 0.0.0.0:1080 as address to listen.
|
|
||||||
[tusd] Using /files/ as the base path.
|
|
||||||
[tusd] Using /metrics as the metrics path.
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also upload blobs to Microsoft Azure Blob Storage with a different storage tier, than what is set as the default for the storage account.
|
|
||||||
This can be done by using the `-azure-blob-access-tier` flag.
|
|
||||||
|
|
||||||
```
|
|
||||||
$ export AZURE_STORAGE_ACCOUNT=xxxxx
|
|
||||||
$ export AZURE_STORAGE_KEY=xxxxx
|
|
||||||
$ tusd -azure-storage my-test-container -azure-blob-access-tier cool
|
|
||||||
[tusd] 2023/02/13 16:13:20.937373 Custom Azure Endpoint not specified in flag variable azure-endpoint.
|
|
||||||
Using endpoint https://xxxxx.blob.core.windows.net
|
|
||||||
[tusd] Using 0.00MB as maximum size.
|
|
||||||
[tusd] Using 0.0.0.0:1080 as address to listen.
|
|
||||||
[tusd] Using /files/ as the base path.
|
|
||||||
[tusd] Using /metrics as the metrics path.
|
|
||||||
```
|
|
||||||
|
|
||||||
TLS support for HTTPS connections can be enabled by supplying a certificate and private key. Note that the certificate file must include the entire chain of certificates up to the CA certificate. The default configuration supports TLSv1.2 and TLSv1.3. It is possible to use only TLSv1.3 with `-tls-mode=tls13`; alternately, it is possible to disable TLSv1.3 and use only 256-bit AES ciphersuites with `-tls-mode=tls12-strong`. The following example generates a self-signed certificate for `localhost` and then uses it to serve files on the loopback address; that this certificate is not appropriate for production use. Note also that the key file must not be encrypted/require a passphrase.
|
TLS support for HTTPS connections can be enabled by supplying a certificate and private key. Note that the certificate file must include the entire chain of certificates up to the CA certificate. The default configuration supports TLSv1.2 and TLSv1.3. It is possible to use only TLSv1.3 with `-tls-mode=tls13`; alternately, it is possible to disable TLSv1.3 and use only 256-bit AES ciphersuites with `-tls-mode=tls12-strong`. The following example generates a self-signed certificate for `localhost` and then uses it to serve files on the loopback address; that this certificate is not appropriate for production use. Note also that the key file must not be encrypted/require a passphrase.
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -136,28 +92,17 @@ options:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ tusd -help
|
$ tusd -help
|
||||||
-azure-blob-access-tier string
|
Usage of tusd:
|
||||||
Blob access tier when uploading new files (possible values: archive, cool, hot, '')
|
|
||||||
-azure-container-access-type string
|
|
||||||
Access type when creating a new container if it does not exist (possible values: blob, container, '')
|
|
||||||
-azure-endpoint string
|
|
||||||
Custom Endpoint to use for Azure BlockBlob Storage (requires azure-storage to be pass)
|
|
||||||
-azure-object-prefix string
|
|
||||||
Prefix for Azure object names
|
|
||||||
-azure-storage string
|
|
||||||
Use Azure BlockBlob Storage with this container name as a storage backend (requires the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_KEY environment variable to be set)
|
|
||||||
-base-path string
|
-base-path string
|
||||||
Basepath of the HTTP server (default "/files/")
|
Basepath of the HTTP server (default "/files/")
|
||||||
-behind-proxy
|
-behind-proxy
|
||||||
Respect X-Forwarded-* and similar headers which may be set by proxies
|
Respect X-Forwarded-* and similar headers which may be set by proxies
|
||||||
-cpuprofile string
|
|
||||||
write cpu profile to file
|
|
||||||
-expose-metrics
|
-expose-metrics
|
||||||
Expose metrics about tusd usage (default true)
|
Expose metrics about tusd usage (default true)
|
||||||
-gcs-bucket string
|
-gcs-bucket string
|
||||||
Use Google Cloud Storage with this bucket as storage backend (requires the GCS_SERVICE_ACCOUNT_FILE environment variable to be set)
|
Use Google Cloud Storage with this bucket as storage backend (requires the GCS_SERVICE_ACCOUNT_FILE environment variable to be set)
|
||||||
-gcs-object-prefix string
|
-gcs-object-prefix string
|
||||||
Prefix for GCS object names
|
Prefix for GCS object names (can't contain underscore character)
|
||||||
-hooks-dir string
|
-hooks-dir string
|
||||||
Directory to search for available hooks scripts
|
Directory to search for available hooks scripts
|
||||||
-hooks-enabled-events string
|
-hooks-enabled-events string
|
||||||
|
@ -190,10 +135,6 @@ $ tusd -help
|
||||||
Port to bind HTTP server to (default "1080")
|
Port to bind HTTP server to (default "1080")
|
||||||
-s3-bucket string
|
-s3-bucket string
|
||||||
Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)
|
Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)
|
||||||
-s3-disable-content-hashes
|
|
||||||
Disable the calculation of MD5 and SHA256 hashes for the content that gets uploaded to S3 for minimized CPU usage (experimental and may be removed in the future)
|
|
||||||
-s3-disable-ssl
|
|
||||||
Disable SSL and only use HTTP for communication with S3 (experimental and may be removed in the future)
|
|
||||||
-s3-endpoint string
|
-s3-endpoint string
|
||||||
Endpoint to use S3 compatible implementations like minio (requires s3-bucket to be pass)
|
Endpoint to use S3 compatible implementations like minio (requires s3-bucket to be pass)
|
||||||
-s3-object-prefix string
|
-s3-object-prefix string
|
||||||
|
@ -202,8 +143,6 @@ $ tusd -help
|
||||||
Size in bytes of the individual upload requests made to the S3 API. Defaults to 50MiB (experimental and may be removed in the future) (default 52428800)
|
Size in bytes of the individual upload requests made to the S3 API. Defaults to 50MiB (experimental and may be removed in the future) (default 52428800)
|
||||||
-s3-transfer-acceleration
|
-s3-transfer-acceleration
|
||||||
Use AWS S3 transfer acceleration endpoint (requires -s3-bucket option and Transfer Acceleration property on S3 bucket to be set)
|
Use AWS S3 transfer acceleration endpoint (requires -s3-bucket option and Transfer Acceleration property on S3 bucket to be set)
|
||||||
-show-greeting
|
|
||||||
Show the greeting message (default true)
|
|
||||||
-timeout int
|
-timeout int
|
||||||
Read timeout for connections in milliseconds. A zero value means that reads will not timeout (default 6000)
|
Read timeout for connections in milliseconds. A zero value means that reads will not timeout (default 6000)
|
||||||
-tls-certificate string
|
-tls-certificate string
|
||||||
|
@ -216,11 +155,8 @@ $ tusd -help
|
||||||
If set, will listen to a UNIX socket at this location instead of a TCP socket
|
If set, will listen to a UNIX socket at this location instead of a TCP socket
|
||||||
-upload-dir string
|
-upload-dir string
|
||||||
Directory to store uploads in (default "./data")
|
Directory to store uploads in (default "./data")
|
||||||
-disable-cors
|
|
||||||
Disables CORS headers. If set to true, tusd will not send any CORS related header. This is useful if you have a proxy sitting in front of tusd that handles CORS (default false)
|
|
||||||
-verbose
|
-verbose
|
||||||
Enable verbose logging output (default true)
|
Enable verbose logging output (default true)
|
||||||
-version
|
-version
|
||||||
Print tusd version information
|
Print tusd version information
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,28 +0,0 @@
|
||||||
version: "3.9"
|
|
||||||
services:
|
|
||||||
tusd:
|
|
||||||
image: tusproject/tusd:v1.9
|
|
||||||
command: -verbose -s3-bucket mybucket -s3-endpoint http://minio:9000
|
|
||||||
volumes:
|
|
||||||
- tusd:/data
|
|
||||||
environment:
|
|
||||||
- AWS_REGION=us-east-1
|
|
||||||
- AWS_ACCESS_KEY_ID_FILE=/run/secrets/minio-username
|
|
||||||
- AWS_SECRET_ACCESS_KEY_FILE=/run/secrets/minio-password
|
|
||||||
secrets:
|
|
||||||
- minio-username
|
|
||||||
- minio-password
|
|
||||||
networks:
|
|
||||||
- tusd
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
tusd:
|
|
||||||
|
|
||||||
secrets:
|
|
||||||
minio-username:
|
|
||||||
external: true
|
|
||||||
minio-password:
|
|
||||||
external: true
|
|
||||||
|
|
||||||
networks:
|
|
||||||
tusd:
|
|
20
go.mod
20
go.mod
|
@ -2,23 +2,23 @@ module github.com/tus/tusd
|
||||||
|
|
||||||
// Specify the Go version needed for the Heroku deployment
|
// Specify the Go version needed for the Heroku deployment
|
||||||
// See https://github.com/heroku/heroku-buildpack-go#go-module-specifics
|
// See https://github.com/heroku/heroku-buildpack-go#go-module-specifics
|
||||||
// +heroku goVersion go1.19
|
// +heroku goVersion go1.16
|
||||||
go 1.16
|
go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go/storage v1.30.1
|
cloud.google.com/go/storage v1.18.2
|
||||||
github.com/Azure/azure-storage-blob-go v0.14.0
|
github.com/Azure/azure-storage-blob-go v0.14.0
|
||||||
github.com/aws/aws-sdk-go v1.44.275
|
github.com/aws/aws-sdk-go v1.42.8
|
||||||
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40
|
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40
|
||||||
github.com/golang/mock v1.6.0
|
github.com/golang/mock v1.6.0
|
||||||
github.com/golang/protobuf v1.5.3
|
github.com/golang/protobuf v1.5.2
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
|
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||||
github.com/prometheus/client_golang v1.15.1
|
github.com/prometheus/client_golang v1.11.0
|
||||||
github.com/sethgrid/pester v1.2.0
|
github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0
|
||||||
github.com/stretchr/testify v1.8.4
|
github.com/stretchr/testify v1.7.0
|
||||||
github.com/vimeo/go-util v1.4.1
|
github.com/vimeo/go-util v1.4.1
|
||||||
google.golang.org/api v0.125.0
|
google.golang.org/api v0.60.0
|
||||||
google.golang.org/grpc v1.55.0
|
google.golang.org/grpc v1.42.0
|
||||||
gopkg.in/Acconut/lockfile.v1 v1.1.0
|
gopkg.in/Acconut/lockfile.v1 v1.1.0
|
||||||
gopkg.in/h2non/gock.v1 v1.1.2
|
gopkg.in/h2non/gock.v1 v1.1.2
|
||||||
)
|
)
|
||||||
|
|
|
@ -26,7 +26,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -176,13 +175,9 @@ func (blockBlob *BlockBlob) Download(ctx context.Context) (data []byte, err erro
|
||||||
|
|
||||||
// If the file does not exist, it will not return an error, but a 404 status and body
|
// If the file does not exist, it will not return an error, but a 404 status and body
|
||||||
if downloadResponse != nil && downloadResponse.StatusCode() == 404 {
|
if downloadResponse != nil && downloadResponse.StatusCode() == 404 {
|
||||||
return nil, handler.ErrNotFound
|
return nil, fmt.Errorf("File %s does not exist", blockBlob.Blob.ToBlockBlobURL())
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This might occur when the blob is being uploaded, but a block list has not been committed yet
|
|
||||||
if isAzureError(err, "BlobNotFound") {
|
|
||||||
err = handler.ErrNotFound
|
|
||||||
}
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -205,8 +200,8 @@ func (blockBlob *BlockBlob) GetOffset(ctx context.Context) (int64, error) {
|
||||||
|
|
||||||
getBlock, err := blockBlob.Blob.GetBlockList(ctx, azblob.BlockListAll, azblob.LeaseAccessConditions{})
|
getBlock, err := blockBlob.Blob.GetBlockList(ctx, azblob.BlockListAll, azblob.LeaseAccessConditions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isAzureError(err, "BlobNotFound") {
|
if err.(azblob.StorageError).ServiceCode() == azblob.ServiceCodeBlobNotFound {
|
||||||
err = handler.ErrNotFound
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -266,9 +261,6 @@ func (infoBlob *InfoBlob) Download(ctx context.Context) ([]byte, error) {
|
||||||
return nil, fmt.Errorf("File %s does not exist", infoBlob.Blob.ToBlockBlobURL())
|
return nil, fmt.Errorf("File %s does not exist", infoBlob.Blob.ToBlockBlobURL())
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isAzureError(err, "BlobNotFound") {
|
|
||||||
err = handler.ErrNotFound
|
|
||||||
}
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -316,10 +308,3 @@ func blockIDBase64ToInt(blockID string) int {
|
||||||
blockIDBase64ToBinary(blockID)
|
blockIDBase64ToBinary(blockID)
|
||||||
return int(binary.LittleEndian.Uint32(blockIDBase64ToBinary(blockID)))
|
return int(binary.LittleEndian.Uint32(blockIDBase64ToBinary(blockID)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func isAzureError(err error, code string) bool {
|
|
||||||
if err, ok := err.(azblob.StorageError); ok && string(err.ServiceCode()) == code {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
|
@ -83,7 +83,7 @@ func (store AzureStore) NewUpload(ctx context.Context, info handler.FileInfo) (h
|
||||||
return azUpload, nil
|
return azUpload, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store AzureStore) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
|
func (store AzureStore) GetUpload(ctx context.Context, id string) (handle handler.Upload, err error) {
|
||||||
info := handler.FileInfo{}
|
info := handler.FileInfo{}
|
||||||
infoFile := store.keyWithPrefix(store.infoPath(id))
|
infoFile := store.keyWithPrefix(store.infoPath(id))
|
||||||
infoBlob, err := store.Service.NewBlob(ctx, infoFile)
|
infoBlob, err := store.Service.NewBlob(ctx, infoFile)
|
||||||
|
@ -112,7 +112,7 @@ func (store AzureStore) GetUpload(ctx context.Context, id string) (handler.Uploa
|
||||||
}
|
}
|
||||||
|
|
||||||
offset, err := blockBlob.GetOffset(ctx)
|
offset, err := blockBlob.GetOffset(ctx)
|
||||||
if err != nil && err != handler.ErrNotFound {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,8 @@ func (store FileStore) NewUpload(ctx context.Context, info handler.FileInfo) (ha
|
||||||
if info.ID == "" {
|
if info.ID == "" {
|
||||||
info.ID = uid.Uid()
|
info.ID = uid.Uid()
|
||||||
}
|
}
|
||||||
binPath := store.binPath(info.ID)
|
id := info.ID
|
||||||
|
binPath := store.binPath(id)
|
||||||
info.Storage = map[string]string{
|
info.Storage = map[string]string{
|
||||||
"Type": "filestore",
|
"Type": "filestore",
|
||||||
"Path": binPath,
|
"Path": binPath,
|
||||||
|
@ -73,8 +74,8 @@ func (store FileStore) NewUpload(ctx context.Context, info handler.FileInfo) (ha
|
||||||
|
|
||||||
upload := &fileUpload{
|
upload := &fileUpload{
|
||||||
info: info,
|
info: info,
|
||||||
infoPath: store.infoPath(info.ID),
|
infoPath: store.infoPath(id),
|
||||||
binPath: binPath,
|
binPath: store.binPath(id),
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeInfo creates the file by itself if necessary
|
// writeInfo creates the file by itself if necessary
|
||||||
|
|
|
@ -129,10 +129,6 @@ const COMPOSE_RETRIES = 3
|
||||||
|
|
||||||
// Compose takes a bucket name, a list of initial source names, and a destination string to compose multiple GCS objects together
|
// Compose takes a bucket name, a list of initial source names, and a destination string to compose multiple GCS objects together
|
||||||
func (service *GCSService) compose(ctx context.Context, bucket string, srcs []string, dst string) error {
|
func (service *GCSService) compose(ctx context.Context, bucket string, srcs []string, dst string) error {
|
||||||
if len(srcs) < 1 {
|
|
||||||
return fmt.Errorf("empty srcs passed to compose for bucket: %s dest: %s", bucket, dst)
|
|
||||||
}
|
|
||||||
|
|
||||||
dstParams := GCSObjectParams{
|
dstParams := GCSObjectParams{
|
||||||
Bucket: bucket,
|
Bucket: bucket,
|
||||||
ID: dst,
|
ID: dst,
|
||||||
|
|
|
@ -195,30 +195,6 @@ func TestComposeObjects(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeNoObjects(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
client, err := storage.NewClient(ctx, option.WithAPIKey("foo"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
service := GCSService{
|
|
||||||
Client: client,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = service.ComposeObjects(ctx, GCSComposeParams{
|
|
||||||
Bucket: "test-bucket",
|
|
||||||
Sources: []string{},
|
|
||||||
Destination: "test_all",
|
|
||||||
})
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("Error: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetObjectAttrs(t *testing.T) {
|
func TestGetObjectAttrs(t *testing.T) {
|
||||||
defer gock.Off()
|
defer gock.Off()
|
||||||
|
|
||||||
|
|
|
@ -270,10 +270,6 @@ func (upload gcsUpload) FinishUpload(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(names) == 0 {
|
|
||||||
return fmt.Errorf("no GCS objects found with FilterObjects %+v", filterParams)
|
|
||||||
}
|
|
||||||
|
|
||||||
composeParams := GCSComposeParams{
|
composeParams := GCSComposeParams{
|
||||||
Bucket: store.Bucket,
|
Bucket: store.Bucket,
|
||||||
Destination: store.keyWithPrefix(id),
|
Destination: store.keyWithPrefix(id),
|
||||||
|
|
|
@ -22,15 +22,9 @@ type Config struct {
|
||||||
// absolute URL containing a scheme, e.g. "http://tus.io"
|
// absolute URL containing a scheme, e.g. "http://tus.io"
|
||||||
BasePath string
|
BasePath string
|
||||||
isAbs bool
|
isAbs bool
|
||||||
// DisableDownload indicates whether the server will refuse downloads of the
|
// EnableTusV2 controls whether the new and experimental tus v2 protocol is
|
||||||
// uploaded file, by not mounting the GET handler.
|
// accepted, next to the current tus v1 protocol.
|
||||||
DisableDownload bool
|
EnableTusV2 bool
|
||||||
// DisableTermination indicates whether the server will refuse termination
|
|
||||||
// requests of the uploaded file, by not mounting the DELETE handler.
|
|
||||||
DisableTermination bool
|
|
||||||
// Disable cors headers. If set to true, tusd will not send any CORS related header.
|
|
||||||
// This is useful if you have a proxy sitting in front of tusd that handles CORS.
|
|
||||||
DisableCors bool
|
|
||||||
// NotifyCompleteUploads indicates whether sending notifications about
|
// NotifyCompleteUploads indicates whether sending notifications about
|
||||||
// completed uploads using the CompleteUploads channel should be enabled.
|
// completed uploads using the CompleteUploads channel should be enabled.
|
||||||
NotifyCompleteUploads bool
|
NotifyCompleteUploads bool
|
||||||
|
@ -62,7 +56,7 @@ type Config struct {
|
||||||
|
|
||||||
func (config *Config) validate() error {
|
func (config *Config) validate() error {
|
||||||
if config.Logger == nil {
|
if config.Logger == nil {
|
||||||
config.Logger = log.New(os.Stdout, "[tusd] ", log.Ldate|log.Lmicroseconds)
|
config.Logger = log.New(os.Stdout, "[tusd] ", log.Ldate|log.Ltime)
|
||||||
}
|
}
|
||||||
|
|
||||||
base := config.BasePath
|
base := config.BasePath
|
||||||
|
|
|
@ -22,29 +22,7 @@ func TestCORS(t *testing.T) {
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
ResHeader: map[string]string{
|
ResHeader: map[string]string{
|
||||||
"Access-Control-Allow-Headers": "Authorization, Origin, X-Requested-With, X-Request-ID, X-HTTP-Method-Override, Content-Type, Upload-Length, Upload-Offset, Tus-Resumable, Upload-Metadata, Upload-Defer-Length, Upload-Concat",
|
"Access-Control-Allow-Headers": "Authorization, Origin, X-Requested-With, X-Request-ID, X-HTTP-Method-Override, Content-Type, Upload-Length, Upload-Offset, Tus-Resumable, Upload-Metadata, Upload-Defer-Length, Upload-Concat",
|
||||||
"Access-Control-Allow-Methods": "POST, HEAD, PATCH, OPTIONS, GET, DELETE",
|
"Access-Control-Allow-Methods": "POST, GET, HEAD, PATCH, DELETE, OPTIONS",
|
||||||
"Access-Control-Max-Age": "86400",
|
|
||||||
"Access-Control-Allow-Origin": "tus.io",
|
|
||||||
},
|
|
||||||
}).Run(handler, t)
|
|
||||||
})
|
|
||||||
|
|
||||||
SubTest(t, "Conditional allow methods", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
|
||||||
handler, _ := NewHandler(Config{
|
|
||||||
StoreComposer: composer,
|
|
||||||
DisableTermination: true,
|
|
||||||
DisableDownload: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
(&httpTest{
|
|
||||||
Method: "OPTIONS",
|
|
||||||
ReqHeader: map[string]string{
|
|
||||||
"Origin": "tus.io",
|
|
||||||
},
|
|
||||||
Code: http.StatusOK,
|
|
||||||
ResHeader: map[string]string{
|
|
||||||
"Access-Control-Allow-Headers": "Authorization, Origin, X-Requested-With, X-Request-ID, X-HTTP-Method-Override, Content-Type, Upload-Length, Upload-Offset, Tus-Resumable, Upload-Metadata, Upload-Defer-Length, Upload-Concat",
|
|
||||||
"Access-Control-Allow-Methods": "POST, HEAD, PATCH, OPTIONS",
|
|
||||||
"Access-Control-Max-Age": "86400",
|
"Access-Control-Max-Age": "86400",
|
||||||
"Access-Control-Allow-Origin": "tus.io",
|
"Access-Control-Allow-Origin": "tus.io",
|
||||||
},
|
},
|
||||||
|
@ -96,20 +74,4 @@ func TestCORS(t *testing.T) {
|
||||||
t.Errorf("expected header to contain METHOD but got: %#v", methods)
|
t.Errorf("expected header to contain METHOD but got: %#v", methods)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "Disable CORS", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
|
||||||
handler, _ := NewHandler(Config{
|
|
||||||
StoreComposer: composer,
|
|
||||||
DisableCors: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
(&httpTest{
|
|
||||||
Method: "OPTIONS",
|
|
||||||
ReqHeader: map[string]string{
|
|
||||||
"Origin": "tus.io",
|
|
||||||
},
|
|
||||||
Code: http.StatusOK,
|
|
||||||
ResHeader: map[string]string{},
|
|
||||||
}).Run(handler, t)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,14 +40,21 @@ func NewHandler(config Config) (*Handler, error) {
|
||||||
mux.Post("", http.HandlerFunc(handler.PostFile))
|
mux.Post("", http.HandlerFunc(handler.PostFile))
|
||||||
mux.Head(":id", http.HandlerFunc(handler.HeadFile))
|
mux.Head(":id", http.HandlerFunc(handler.HeadFile))
|
||||||
mux.Add("PATCH", ":id", http.HandlerFunc(handler.PatchFile))
|
mux.Add("PATCH", ":id", http.HandlerFunc(handler.PatchFile))
|
||||||
if !config.DisableDownload {
|
|
||||||
mux.Get(":id", http.HandlerFunc(handler.GetFile))
|
mux.Get(":id", http.HandlerFunc(handler.GetFile))
|
||||||
}
|
|
||||||
|
|
||||||
// Only attach the DELETE handler if the Terminate() method is provided
|
// Only attach the DELETE handler if the Terminate() method is provided
|
||||||
if config.StoreComposer.UsesTerminater && !config.DisableTermination {
|
if config.StoreComposer.UsesTerminater {
|
||||||
mux.Del(":id", http.HandlerFunc(handler.DelFile))
|
mux.Del(":id", http.HandlerFunc(handler.DelFile))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if config.EnableTusV2 {
|
||||||
|
mux.Head("", http.HandlerFunc(handler.HeadFile))
|
||||||
|
|
||||||
|
// Only attach the DELETE handler if the Terminate() method is provided
|
||||||
|
if config.StoreComposer.UsesTerminater {
|
||||||
|
mux.Del("", http.HandlerFunc(handler.DelFile))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return routedHandler, nil
|
return routedHandler, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -319,44 +319,6 @@ func TestPost(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "RespectForwardedWithQuotes", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
|
||||||
// See https://github.com/tus/tusd/issues/809
|
|
||||||
ctrl := gomock.NewController(t)
|
|
||||||
defer ctrl.Finish()
|
|
||||||
upload := NewMockFullUpload(ctrl)
|
|
||||||
|
|
||||||
gomock.InOrder(
|
|
||||||
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
|
||||||
Size: 300,
|
|
||||||
MetaData: map[string]string{},
|
|
||||||
}).Return(upload, nil),
|
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
|
||||||
ID: "foo",
|
|
||||||
Size: 300,
|
|
||||||
MetaData: map[string]string{},
|
|
||||||
}, nil),
|
|
||||||
)
|
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
|
||||||
StoreComposer: composer,
|
|
||||||
BasePath: "/files/",
|
|
||||||
RespectForwardedHeaders: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
(&httpTest{
|
|
||||||
Method: "POST",
|
|
||||||
ReqHeader: map[string]string{
|
|
||||||
"Tus-Resumable": "1.0.0",
|
|
||||||
"Upload-Length": "300",
|
|
||||||
"Forwarded": `Forwarded: for=192.168.10.112;host="upload.example.tld:8443";proto=https`,
|
|
||||||
},
|
|
||||||
Code: http.StatusCreated,
|
|
||||||
ResHeader: map[string]string{
|
|
||||||
"Location": "https://upload.example.tld:8443/files/foo",
|
|
||||||
},
|
|
||||||
}).Run(handler, t)
|
|
||||||
})
|
|
||||||
|
|
||||||
SubTest(t, "FilterForwardedProtocol", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
SubTest(t, "FilterForwardedProtocol", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
ctrl := gomock.NewController(t)
|
ctrl := gomock.NewController(t)
|
||||||
defer ctrl.Finish()
|
defer ctrl.Finish()
|
||||||
|
|
|
@ -19,7 +19,7 @@ const UploadLengthDeferred = "1"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
reExtractFileID = regexp.MustCompile(`([^/]+)\/?$`)
|
reExtractFileID = regexp.MustCompile(`([^/]+)\/?$`)
|
||||||
reForwardedHost = regexp.MustCompile(`host="?([^;"]+)`)
|
reForwardedHost = regexp.MustCompile(`host=([^;]+)`)
|
||||||
reForwardedProto = regexp.MustCompile(`proto=(https?)`)
|
reForwardedProto = regexp.MustCompile(`proto=(https?)`)
|
||||||
reMimeType = regexp.MustCompile(`^[a-z]+\/[a-z0-9\-\+\.]+$`)
|
reMimeType = regexp.MustCompile(`^[a-z]+\/[a-z0-9\-\+\.]+$`)
|
||||||
)
|
)
|
||||||
|
@ -53,24 +53,6 @@ func NewHTTPError(err error, statusCode int) HTTPError {
|
||||||
return httpError{err, statusCode}
|
return httpError{err, statusCode}
|
||||||
}
|
}
|
||||||
|
|
||||||
type contextWithValues struct {
|
|
||||||
context.Context
|
|
||||||
valueHolder context.Context
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c contextWithValues) Value(key interface{}) interface{} {
|
|
||||||
return c.valueHolder.Value(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newContextWithValues(ctx context.Context) contextWithValues {
|
|
||||||
return contextWithValues{
|
|
||||||
// Use background to not get cancel event
|
|
||||||
Context: context.Background(),
|
|
||||||
// Use request context to get stored values
|
|
||||||
valueHolder: ctx,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrUnsupportedVersion = NewHTTPError(errors.New("unsupported version"), http.StatusPreconditionFailed)
|
ErrUnsupportedVersion = NewHTTPError(errors.New("unsupported version"), http.StatusPreconditionFailed)
|
||||||
ErrMaxSizeExceeded = NewHTTPError(errors.New("maximum size exceeded"), http.StatusRequestEntityTooLarge)
|
ErrMaxSizeExceeded = NewHTTPError(errors.New("maximum size exceeded"), http.StatusRequestEntityTooLarge)
|
||||||
|
@ -116,12 +98,6 @@ type HookEvent struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHookEvent(info FileInfo, r *http.Request) HookEvent {
|
func newHookEvent(info FileInfo, r *http.Request) HookEvent {
|
||||||
// The Host header field is not present in the header map, see https://pkg.go.dev/net/http#Request:
|
|
||||||
// > For incoming requests, the Host header is promoted to the
|
|
||||||
// > Request.Host field and removed from the Header map.
|
|
||||||
// That's why we add it back manually.
|
|
||||||
r.Header.Set("Host", r.Host)
|
|
||||||
|
|
||||||
return HookEvent{
|
return HookEvent{
|
||||||
Upload: info,
|
Upload: info,
|
||||||
HTTPRequest: HTTPRequest{
|
HTTPRequest: HTTPRequest{
|
||||||
|
@ -241,21 +217,12 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||||
|
|
||||||
header := w.Header()
|
header := w.Header()
|
||||||
|
|
||||||
if origin := r.Header.Get("Origin"); !handler.config.DisableCors && origin != "" {
|
if origin := r.Header.Get("Origin"); origin != "" {
|
||||||
header.Set("Access-Control-Allow-Origin", origin)
|
header.Set("Access-Control-Allow-Origin", origin)
|
||||||
|
|
||||||
if r.Method == "OPTIONS" {
|
if r.Method == "OPTIONS" {
|
||||||
allowedMethods := "POST, HEAD, PATCH, OPTIONS"
|
|
||||||
if !handler.config.DisableDownload {
|
|
||||||
allowedMethods += ", GET"
|
|
||||||
}
|
|
||||||
|
|
||||||
if !handler.config.DisableTermination {
|
|
||||||
allowedMethods += ", DELETE"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Preflight request
|
// Preflight request
|
||||||
header.Add("Access-Control-Allow-Methods", allowedMethods)
|
header.Add("Access-Control-Allow-Methods", "POST, GET, HEAD, PATCH, DELETE, OPTIONS")
|
||||||
header.Add("Access-Control-Allow-Headers", "Authorization, Origin, X-Requested-With, X-Request-ID, X-HTTP-Method-Override, Content-Type, Upload-Length, Upload-Offset, Tus-Resumable, Upload-Metadata, Upload-Defer-Length, Upload-Concat")
|
header.Add("Access-Control-Allow-Headers", "Authorization, Origin, X-Requested-With, X-Request-ID, X-HTTP-Method-Override, Content-Type, Upload-Length, Upload-Offset, Tus-Resumable, Upload-Metadata, Upload-Defer-Length, Upload-Concat")
|
||||||
header.Set("Access-Control-Max-Age", "86400")
|
header.Set("Access-Control-Max-Age", "86400")
|
||||||
|
|
||||||
|
@ -295,7 +262,7 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||||
// Test if the version sent by the client is supported
|
// Test if the version sent by the client is supported
|
||||||
// GET and HEAD methods are not checked since a browser may visit this URL and does
|
// GET and HEAD methods are not checked since a browser may visit this URL and does
|
||||||
// not include this header. GET requests are not part of the specification.
|
// not include this header. GET requests are not part of the specification.
|
||||||
if r.Method != "GET" && r.Method != "HEAD" && r.Header.Get("Tus-Resumable") != "1.0.0" {
|
if r.Method != "GET" && r.Method != "HEAD" && r.Header.Get("Tus-Resumable") != "1.0.0" && !handler.config.EnableTusV2 {
|
||||||
handler.sendError(w, r, ErrUnsupportedVersion)
|
handler.sendError(w, r, ErrUnsupportedVersion)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -308,7 +275,12 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||||
// PostFile creates a new file upload using the datastore after validating the
|
// PostFile creates a new file upload using the datastore after validating the
|
||||||
// length and parsing the metadata.
|
// length and parsing the metadata.
|
||||||
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := newContextWithValues(r.Context())
|
if isTusV2Request(r) {
|
||||||
|
handler.PostFileV2(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
// Check for presence of application/offset+octet-stream. If another content
|
// Check for presence of application/offset+octet-stream. If another content
|
||||||
// type is defined, it will be ignored and treated as none was set because
|
// type is defined, it will be ignored and treated as none was set because
|
||||||
|
@ -449,11 +421,123 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
handler.sendResp(w, r, http.StatusCreated)
|
handler.sendResp(w, r, http.StatusCreated)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PostFile creates a new file upload using the datastore after validating the
|
||||||
|
// length and parsing the metadata.
|
||||||
|
func (handler *UnroutedHandler) PostFileV2(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// TODO: Check that upload length deferring is supported
|
||||||
|
|
||||||
|
// Parse headers
|
||||||
|
// TODO: Make parsing of Upload-Offset optional
|
||||||
|
// TODO: What is the correct valud for Upload-Incomplete
|
||||||
|
// TODO: Also consider Content-Type and Content-Disposition (using https://play.golang.org/p/AjWbJB8vUk)
|
||||||
|
token := r.Header.Get("Upload-Token")
|
||||||
|
offset, err := strconv.ParseInt(r.Header.Get("Upload-Offset"), 10, 64)
|
||||||
|
if err != nil || offset < 0 {
|
||||||
|
handler.sendError(w, r, ErrInvalidOffset)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
isIncomplete := r.Header.Get("Upload-Incomplete") == "?1"
|
||||||
|
|
||||||
|
// 1. Get or create upload resource
|
||||||
|
// TODO: Create consistent ID from token? e.g. using SHA256
|
||||||
|
id := token
|
||||||
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
||||||
|
if err == ErrNotFound {
|
||||||
|
info := FileInfo{
|
||||||
|
ID: id,
|
||||||
|
SizeIsDeferred: true,
|
||||||
|
// TODO: Set metadata?
|
||||||
|
// MetaData: meta,
|
||||||
|
}
|
||||||
|
|
||||||
|
if handler.config.PreUploadCreateCallback != nil {
|
||||||
|
if err := handler.config.PreUploadCreateCallback(newHookEvent(info, r)); err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
upload, err = handler.composer.Core.NewUpload(ctx, info)
|
||||||
|
if err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
handler.Metrics.incUploadsCreated()
|
||||||
|
handler.log("UploadCreated", "id", id, "size", "n/a", "url", "n/a")
|
||||||
|
|
||||||
|
if handler.config.NotifyCreatedUploads {
|
||||||
|
handler.CreatedUploads <- newHookEvent(info, r)
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Verify offset
|
||||||
|
if handler.composer.UsesLocker {
|
||||||
|
lock, err := handler.lockUpload(id)
|
||||||
|
if err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := upload.GetInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset != info.Offset {
|
||||||
|
handler.sendError(w, r, ErrMismatchOffset)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Write chunk
|
||||||
|
if err := handler.writeChunk(ctx, upload, info, w, r); err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Finish upload, if necessary
|
||||||
|
if !isIncomplete {
|
||||||
|
info, err = upload.GetInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadLength := info.Offset
|
||||||
|
|
||||||
|
lengthDeclarableUpload := handler.composer.LengthDeferrer.AsLengthDeclarableUpload(upload)
|
||||||
|
if err := lengthDeclarableUpload.DeclareLength(ctx, uploadLength); err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
info.Size = uploadLength
|
||||||
|
info.SizeIsDeferred = false
|
||||||
|
|
||||||
|
if err := handler.finishUploadIfComplete(ctx, upload, info, r); err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
handler.sendResp(w, r, http.StatusCreated)
|
||||||
|
}
|
||||||
|
|
||||||
// HeadFile returns the length and offset for the HEAD request
|
// HeadFile returns the length and offset for the HEAD request
|
||||||
func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := newContextWithValues(r.Context())
|
ctx := context.Background()
|
||||||
|
|
||||||
id, err := extractIDFromPath(r.URL.Path)
|
id, err := handler.extractUploadID(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -497,6 +581,7 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
|
||||||
w.Header().Set("Upload-Concat", v)
|
w.Header().Set("Upload-Concat", v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !isTusV2Request(r) {
|
||||||
if len(info.MetaData) != 0 {
|
if len(info.MetaData) != 0 {
|
||||||
w.Header().Set("Upload-Metadata", SerializeMetadataHeader(info.MetaData))
|
w.Header().Set("Upload-Metadata", SerializeMetadataHeader(info.MetaData))
|
||||||
}
|
}
|
||||||
|
@ -507,6 +592,13 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
|
||||||
w.Header().Set("Upload-Length", strconv.FormatInt(info.Size, 10))
|
w.Header().Set("Upload-Length", strconv.FormatInt(info.Size, 10))
|
||||||
w.Header().Set("Content-Length", strconv.FormatInt(info.Size, 10))
|
w.Header().Set("Content-Length", strconv.FormatInt(info.Size, 10))
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if info.SizeIsDeferred {
|
||||||
|
w.Header().Set("Upload-Incomplete", "?1")
|
||||||
|
} else {
|
||||||
|
w.Header().Set("Upload-Incomplete", "?0")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
w.Header().Set("Cache-Control", "no-store")
|
w.Header().Set("Cache-Control", "no-store")
|
||||||
w.Header().Set("Upload-Offset", strconv.FormatInt(info.Offset, 10))
|
w.Header().Set("Upload-Offset", strconv.FormatInt(info.Offset, 10))
|
||||||
|
@ -516,7 +608,7 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
|
||||||
// PatchFile adds a chunk to an upload. This operation is only allowed
|
// PatchFile adds a chunk to an upload. This operation is only allowed
|
||||||
// if enough space in the upload is left.
|
// if enough space in the upload is left.
|
||||||
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := newContextWithValues(r.Context())
|
ctx := context.Background()
|
||||||
|
|
||||||
// Check for presence of application/offset+octet-stream
|
// Check for presence of application/offset+octet-stream
|
||||||
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
|
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
|
||||||
|
@ -719,23 +811,22 @@ func (handler *UnroutedHandler) writeChunk(ctx context.Context, upload Upload, i
|
||||||
func (handler *UnroutedHandler) finishUploadIfComplete(ctx context.Context, upload Upload, info FileInfo, r *http.Request) error {
|
func (handler *UnroutedHandler) finishUploadIfComplete(ctx context.Context, upload Upload, info FileInfo, r *http.Request) error {
|
||||||
// If the upload is completed, ...
|
// If the upload is completed, ...
|
||||||
if !info.SizeIsDeferred && info.Offset == info.Size {
|
if !info.SizeIsDeferred && info.Offset == info.Size {
|
||||||
// ... allow the data storage to finish and cleanup the upload
|
// ... allow custom mechanism to finish and cleanup the upload
|
||||||
if err := upload.FinishUpload(ctx); err != nil {
|
if err := upload.FinishUpload(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ... allow the hook callback to run before sending the response
|
// ... send the info out to the channel
|
||||||
if handler.config.PreFinishResponseCallback != nil {
|
if handler.config.NotifyCompleteUploads {
|
||||||
if err := handler.config.PreFinishResponseCallback(newHookEvent(info, r)); err != nil {
|
handler.CompleteUploads <- newHookEvent(info, r)
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.Metrics.incUploadsFinished()
|
handler.Metrics.incUploadsFinished()
|
||||||
|
|
||||||
// ... send the info out to the channel
|
if handler.config.PreFinishResponseCallback != nil {
|
||||||
if handler.config.NotifyCompleteUploads {
|
if err := handler.config.PreFinishResponseCallback(newHookEvent(info, r)); err != nil {
|
||||||
handler.CompleteUploads <- newHookEvent(info, r)
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -745,7 +836,7 @@ func (handler *UnroutedHandler) finishUploadIfComplete(ctx context.Context, uplo
|
||||||
// GetFile handles requests to download a file using a GET request. This is not
|
// GetFile handles requests to download a file using a GET request. This is not
|
||||||
// part of the specification.
|
// part of the specification.
|
||||||
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := newContextWithValues(r.Context())
|
ctx := context.Background()
|
||||||
|
|
||||||
id, err := extractIDFromPath(r.URL.Path)
|
id, err := extractIDFromPath(r.URL.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -805,10 +896,10 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
||||||
|
|
||||||
// mimeInlineBrowserWhitelist is a map containing MIME types which should be
|
// mimeInlineBrowserWhitelist is a map containing MIME types which should be
|
||||||
// allowed to be rendered by browser inline, instead of being forced to be
|
// allowed to be rendered by browser inline, instead of being forced to be
|
||||||
// downloaded. For example, HTML or SVG files are not allowed, since they may
|
// downloadd. For example, HTML or SVG files are not allowed, since they may
|
||||||
// contain malicious JavaScript. In a similiar fashion PDF is not on this list
|
// contain malicious JavaScript. In a similiar fashion PDF is not on this list
|
||||||
// as their parsers commonly contain vulnerabilities which can be exploited.
|
// as their parsers commonly contain vulnerabilities which can be exploited.
|
||||||
// The values of this map does not convey any meaning and are therefore just
|
// The values of this map does not convei any meaning and are therefore just
|
||||||
// empty structs.
|
// empty structs.
|
||||||
var mimeInlineBrowserWhitelist = map[string]struct{}{
|
var mimeInlineBrowserWhitelist = map[string]struct{}{
|
||||||
"text/plain": struct{}{},
|
"text/plain": struct{}{},
|
||||||
|
@ -866,7 +957,7 @@ func filterContentType(info FileInfo) (contentType string, contentDisposition st
|
||||||
|
|
||||||
// DelFile terminates an upload permanently.
|
// DelFile terminates an upload permanently.
|
||||||
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := newContextWithValues(r.Context())
|
ctx := context.Background()
|
||||||
|
|
||||||
// Abort the request handling if the required interface is not implemented
|
// Abort the request handling if the required interface is not implemented
|
||||||
if !handler.composer.UsesTerminater {
|
if !handler.composer.UsesTerminater {
|
||||||
|
@ -874,7 +965,7 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := extractIDFromPath(r.URL.Path)
|
id, err := handler.extractUploadID(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -1047,6 +1138,14 @@ func (handler *UnroutedHandler) sendProgressMessages(hook HookEvent, reader *bod
|
||||||
return stop
|
return stop
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (handler *UnroutedHandler) extractUploadID(r *http.Request) (string, error) {
|
||||||
|
if isTusV2Request(r) {
|
||||||
|
return r.Header.Get("Upload-Token"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return extractIDFromPath(r.URL.Path)
|
||||||
|
}
|
||||||
|
|
||||||
// getHostAndProtocol extracts the host and used protocol (either HTTP or HTTPS)
|
// getHostAndProtocol extracts the host and used protocol (either HTTP or HTTPS)
|
||||||
// from the given request. If `allowForwarded` is set, the X-Forwarded-Host,
|
// from the given request. If `allowForwarded` is set, the X-Forwarded-Host,
|
||||||
// X-Forwarded-Proto and Forwarded headers will also be checked to
|
// X-Forwarded-Proto and Forwarded headers will also be checked to
|
||||||
|
@ -1281,3 +1380,9 @@ func getRequestId(r *http.Request) string {
|
||||||
|
|
||||||
return reqId
|
return reqId
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isTusV2Request returns whether a HTTP request includes a sign that it is
|
||||||
|
// related to tus v2 (instead of tus v1)
|
||||||
|
func isTusV2Request(r *http.Request) bool {
|
||||||
|
return r.Header.Get("Upload-Token") != ""
|
||||||
|
}
|
||||||
|
|
|
@ -92,9 +92,11 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This regular expression matches every character which is not
|
// This regular expression matches every character which is not defined in the
|
||||||
// considered valid into a header value according to RFC2616.
|
// ASCII tables which range from 00 to 7F, inclusive.
|
||||||
var nonPrintableRegexp = regexp.MustCompile(`[^\x09\x20-\x7E]`)
|
// It also matches the \r and \n characters which are not allowed in values
|
||||||
|
// for HTTP headers.
|
||||||
|
var nonASCIIRegexp = regexp.MustCompile(`([^\x00-\x7F]|[\r\n])`)
|
||||||
|
|
||||||
// See the handler.DataStore interface for documentation about the different
|
// See the handler.DataStore interface for documentation about the different
|
||||||
// methods.
|
// methods.
|
||||||
|
@ -228,7 +230,7 @@ func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (hand
|
||||||
for key, value := range info.MetaData {
|
for key, value := range info.MetaData {
|
||||||
// Copying the value is required in order to prevent it from being
|
// Copying the value is required in order to prevent it from being
|
||||||
// overwritten by the next iteration.
|
// overwritten by the next iteration.
|
||||||
v := nonPrintableRegexp.ReplaceAllString(value, "?")
|
v := nonASCIIRegexp.ReplaceAllString(value, "?")
|
||||||
metadata[key] = &v
|
metadata[key] = &v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -800,7 +802,7 @@ func (upload *s3Upload) concatUsingMultipart(ctx context.Context, partialUploads
|
||||||
// Part numbers must be in the range of 1 to 10000, inclusive. Since
|
// Part numbers must be in the range of 1 to 10000, inclusive. Since
|
||||||
// slice indexes start at 0, we add 1 to ensure that i >= 1.
|
// slice indexes start at 0, we add 1 to ensure that i >= 1.
|
||||||
PartNumber: aws.Int64(int64(i + 1)),
|
PartNumber: aws.Int64(int64(i + 1)),
|
||||||
CopySource: aws.String(store.Bucket + "/" + *store.keyWithPrefix(partialId)),
|
CopySource: aws.String(store.Bucket + "/" + partialId),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
#
|
#
|
||||||
|
|
||||||
cat <<-EOH
|
cat <<-EOH
|
||||||
# This file is generated via https://github.com/tus/tusd/blob/main/generate-docker-library.sh
|
# This file is generated via https://github.com/tus/tusd/blob/master/generate-docker-library.sh
|
||||||
Maintainers: tus.io (@tus), Thomas A. Hirsch (@thirsch)
|
Maintainers: tus.io (@tus), Thomas A. Hirsch (@thirsch)
|
||||||
GitRepo: https://github.com/tus/tusd.git
|
GitRepo: https://github.com/tus/tusd.git
|
||||||
EOH
|
EOH
|
||||||
|
|
Loading…
Reference in New Issue