Compare commits
23 Commits
feature/di
...
main
Author | SHA1 | Date |
---|---|---|
Derrick Hammer | 7b20ce6a9a | |
dependabot[bot] | 2a401bc274 | |
dependabot[bot] | 596742fd98 | |
github-actions[bot] | fb0284b993 | |
Marius | e475df9895 | |
dependabot[bot] | 318aab451b | |
dependabot[bot] | 76fc247408 | |
dependabot[bot] | 912ff3df05 | |
dependabot[bot] | cfa859f866 | |
dependabot[bot] | d2bf62776b | |
Jonas Thelemann | db2b4918e2 | |
Marius | ee9f40158e | |
Marius | dcec3df83f | |
dependabot[bot] | a16d9a2ac6 | |
dependabot[bot] | 59afa4c213 | |
dependabot[bot] | 86518148b2 | |
dependabot[bot] | ae120982ae | |
dependabot[bot] | ec843312e5 | |
dependabot[bot] | 007e05618b | |
dependabot[bot] | c52a482720 | |
dependabot[bot] | d7b990e3c2 | |
dependabot[bot] | 3053ade119 | |
Christian Kaps | 7225439860 |
|
@ -0,0 +1,156 @@
|
||||||
|
# Taken from https://github.com/hrvey/combine-prs-workflow
|
||||||
|
# This action can be triggered manually to combine multiple PRs for
|
||||||
|
# dependency upgrades into a single PR. See the above links for
|
||||||
|
# more details.
|
||||||
|
name: 'Combine PRs'
|
||||||
|
|
||||||
|
# Controls when the action will run - in this case triggered manually
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
branchPrefix:
|
||||||
|
description: 'Branch prefix to find combinable PRs based on'
|
||||||
|
required: true
|
||||||
|
default: 'dependabot'
|
||||||
|
mustBeGreen:
|
||||||
|
description: 'Only combine PRs that are green (status is success). Set to false if repo does not run checks'
|
||||||
|
type: boolean
|
||||||
|
required: true
|
||||||
|
default: true
|
||||||
|
combineBranchName:
|
||||||
|
description: 'Name of the branch to combine PRs into'
|
||||||
|
required: true
|
||||||
|
default: 'combine-prs-branch'
|
||||||
|
ignoreLabel:
|
||||||
|
description: 'Exclude PRs with this label'
|
||||||
|
required: true
|
||||||
|
default: 'nocombine'
|
||||||
|
|
||||||
|
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||||
|
jobs:
|
||||||
|
# This workflow contains a single job called "combine-prs"
|
||||||
|
combine-prs:
|
||||||
|
# The type of runner that the job will run on
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||||
|
steps:
|
||||||
|
- uses: actions/github-script@v6
|
||||||
|
id: create-combined-pr
|
||||||
|
name: Create Combined PR
|
||||||
|
with:
|
||||||
|
github-token: ${{secrets.GITHUB_TOKEN}}
|
||||||
|
script: |
|
||||||
|
const pulls = await github.paginate('GET /repos/:owner/:repo/pulls', {
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo
|
||||||
|
});
|
||||||
|
let branchesAndPRStrings = [];
|
||||||
|
let baseBranch = null;
|
||||||
|
let baseBranchSHA = null;
|
||||||
|
for (const pull of pulls) {
|
||||||
|
const branch = pull['head']['ref'];
|
||||||
|
console.log('Pull for branch: ' + branch);
|
||||||
|
if (branch.startsWith('${{ github.event.inputs.branchPrefix }}')) {
|
||||||
|
console.log('Branch matched prefix: ' + branch);
|
||||||
|
let statusOK = true;
|
||||||
|
if(${{ github.event.inputs.mustBeGreen }}) {
|
||||||
|
console.log('Checking green status: ' + branch);
|
||||||
|
const stateQuery = `query($owner: String!, $repo: String!, $pull_number: Int!) {
|
||||||
|
repository(owner: $owner, name: $repo) {
|
||||||
|
pullRequest(number:$pull_number) {
|
||||||
|
commits(last: 1) {
|
||||||
|
nodes {
|
||||||
|
commit {
|
||||||
|
statusCheckRollup {
|
||||||
|
state
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
const vars = {
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
pull_number: pull['number']
|
||||||
|
};
|
||||||
|
const result = await github.graphql(stateQuery, vars);
|
||||||
|
const [{ commit }] = result.repository.pullRequest.commits.nodes;
|
||||||
|
const state = commit.statusCheckRollup.state
|
||||||
|
console.log('Validating status: ' + state);
|
||||||
|
if(state != 'SUCCESS') {
|
||||||
|
console.log('Discarding ' + branch + ' with status ' + state);
|
||||||
|
statusOK = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
console.log('Checking labels: ' + branch);
|
||||||
|
const labels = pull['labels'];
|
||||||
|
for(const label of labels) {
|
||||||
|
const labelName = label['name'];
|
||||||
|
console.log('Checking label: ' + labelName);
|
||||||
|
if(labelName == '${{ github.event.inputs.ignoreLabel }}') {
|
||||||
|
console.log('Discarding ' + branch + ' with label ' + labelName);
|
||||||
|
statusOK = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (statusOK) {
|
||||||
|
console.log('Adding branch to array: ' + branch);
|
||||||
|
const prString = '#' + pull['number'] + ' ' + pull['title'];
|
||||||
|
branchesAndPRStrings.push({ branch, prString });
|
||||||
|
baseBranch = pull['base']['ref'];
|
||||||
|
baseBranchSHA = pull['base']['sha'];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (branchesAndPRStrings.length == 0) {
|
||||||
|
core.setFailed('No PRs/branches matched criteria');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
await github.rest.git.createRef({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
ref: 'refs/heads/' + '${{ github.event.inputs.combineBranchName }}',
|
||||||
|
sha: baseBranchSHA
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.log(error);
|
||||||
|
core.setFailed('Failed to create combined branch - maybe a branch by that name already exists?');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let combinedPRs = [];
|
||||||
|
let mergeFailedPRs = [];
|
||||||
|
for(const { branch, prString } of branchesAndPRStrings) {
|
||||||
|
try {
|
||||||
|
await github.rest.repos.merge({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
base: '${{ github.event.inputs.combineBranchName }}',
|
||||||
|
head: branch,
|
||||||
|
});
|
||||||
|
console.log('Merged branch ' + branch);
|
||||||
|
combinedPRs.push(prString);
|
||||||
|
} catch (error) {
|
||||||
|
console.log('Failed to merge branch ' + branch);
|
||||||
|
mergeFailedPRs.push(prString);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('Creating combined PR');
|
||||||
|
const combinedPRsString = combinedPRs.join('\n');
|
||||||
|
let body = '✅ This PR was created by the Combine PRs action by combining the following PRs:\n' + combinedPRsString;
|
||||||
|
if(mergeFailedPRs.length > 0) {
|
||||||
|
const mergeFailedPRsString = mergeFailedPRs.join('\n');
|
||||||
|
body += '\n\n⚠️ The following PRs were left out due to merge conflicts:\n' + mergeFailedPRsString
|
||||||
|
}
|
||||||
|
await github.rest.pulls.create({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
title: 'Combined PR',
|
||||||
|
head: '${{ github.event.inputs.combineBranchName }}',
|
||||||
|
base: baseBranch,
|
||||||
|
body: body
|
||||||
|
});
|
|
@ -9,7 +9,7 @@ jobs:
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
go-version: [1.18.x, 1.19.x]
|
go-version: [stable, oldstable]
|
||||||
platform: [ubuntu-latest, macos-latest, windows-latest]
|
platform: [ubuntu-latest, macos-latest, windows-latest]
|
||||||
runs-on: ${{ matrix.platform }}
|
runs-on: ${{ matrix.platform }}
|
||||||
env:
|
env:
|
||||||
|
@ -21,7 +21,7 @@ jobs:
|
||||||
|
|
||||||
-
|
-
|
||||||
name: Install Go
|
name: Install Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go-version }}
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ jobs:
|
||||||
-
|
-
|
||||||
name: Docker meta
|
name: Docker meta
|
||||||
id: docker_meta
|
id: docker_meta
|
||||||
uses: docker/metadata-action@v4.3.0
|
uses: docker/metadata-action@v4.4.0
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
ghcr.io/tus/tusd
|
ghcr.io/tus/tusd
|
||||||
|
@ -35,7 +35,7 @@ jobs:
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
id: buildx
|
id: buildx
|
||||||
uses: docker/setup-buildx-action@v2.4.1
|
uses: docker/setup-buildx-action@v2.5.0
|
||||||
with:
|
with:
|
||||||
install: true
|
install: true
|
||||||
|
|
||||||
|
@ -81,10 +81,10 @@ jobs:
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
-
|
-
|
||||||
name: Install Go 1.19
|
name: Install Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: '1.19.5'
|
go-version: 'stable'
|
||||||
|
|
||||||
-
|
-
|
||||||
name: Build TUSD
|
name: Build TUSD
|
||||||
|
@ -105,7 +105,7 @@ jobs:
|
||||||
|
|
||||||
-
|
-
|
||||||
name: Deploy to heroku
|
name: Deploy to heroku
|
||||||
uses: akhileshns/heroku-deploy@v3.12.13
|
uses: akhileshns/heroku-deploy@v3.12.14
|
||||||
with:
|
with:
|
||||||
heroku_api_key: ${{secrets.HEROKU_API_KEY}}
|
heroku_api_key: ${{secrets.HEROKU_API_KEY}}
|
||||||
heroku_app_name: ${{secrets.HEROKU_APP_NAME}}
|
heroku_app_name: ${{secrets.HEROKU_APP_NAME}}
|
||||||
|
|
10
Dockerfile
10
Dockerfile
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.20.1-alpine AS builder
|
FROM --platform=$BUILDPLATFORM golang:1.20.4-alpine AS builder
|
||||||
WORKDIR /go/src/github.com/tus/tusd
|
WORKDIR /go/src/github.com/tus/tusd
|
||||||
|
|
||||||
# Add gcc and libc-dev early so it is cached
|
# Add gcc and libc-dev early so it is cached
|
||||||
|
@ -19,13 +19,17 @@ COPY pkg/ ./pkg/
|
||||||
ARG GIT_VERSION
|
ARG GIT_VERSION
|
||||||
ARG GIT_COMMIT
|
ARG GIT_COMMIT
|
||||||
|
|
||||||
|
# Get the operating system and architecture to build for
|
||||||
|
ARG TARGETOS
|
||||||
|
ARG TARGETARCH
|
||||||
|
|
||||||
RUN set -xe \
|
RUN set -xe \
|
||||||
&& GOOS=linux GOARCH=amd64 go build \
|
&& GOOS=$TARGETOS GOARCH=$TARGETARCH go build \
|
||||||
-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=${GIT_VERSION} -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=${GIT_COMMIT} -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'" \
|
-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=${GIT_VERSION} -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=${GIT_COMMIT} -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'" \
|
||||||
-o /go/bin/tusd ./cmd/tusd/main.go
|
-o /go/bin/tusd ./cmd/tusd/main.go
|
||||||
|
|
||||||
# start a new stage that copies in the binary built in the previous stage
|
# start a new stage that copies in the binary built in the previous stage
|
||||||
FROM alpine:3.17.2
|
FROM alpine:3.18.0
|
||||||
WORKDIR /srv/tusd-data
|
WORKDIR /srv/tusd-data
|
||||||
|
|
||||||
COPY ./docker/entrypoint.sh /usr/local/share/docker-entrypoint.sh
|
COPY ./docker/entrypoint.sh /usr/local/share/docker-entrypoint.sh
|
||||||
|
|
|
@ -23,6 +23,7 @@ var Flags struct {
|
||||||
ShowGreeting bool
|
ShowGreeting bool
|
||||||
DisableDownload bool
|
DisableDownload bool
|
||||||
DisableTermination bool
|
DisableTermination bool
|
||||||
|
DisableCors bool
|
||||||
Timeout int64
|
Timeout int64
|
||||||
S3Bucket string
|
S3Bucket string
|
||||||
S3ObjectPrefix string
|
S3ObjectPrefix string
|
||||||
|
@ -72,6 +73,7 @@ func ParseFlags() {
|
||||||
flag.BoolVar(&Flags.ShowGreeting, "show-greeting", true, "Show the greeting message")
|
flag.BoolVar(&Flags.ShowGreeting, "show-greeting", true, "Show the greeting message")
|
||||||
flag.BoolVar(&Flags.DisableDownload, "disable-download", false, "Disable the download endpoint")
|
flag.BoolVar(&Flags.DisableDownload, "disable-download", false, "Disable the download endpoint")
|
||||||
flag.BoolVar(&Flags.DisableTermination, "disable-termination", false, "Disable the termination endpoint")
|
flag.BoolVar(&Flags.DisableTermination, "disable-termination", false, "Disable the termination endpoint")
|
||||||
|
flag.BoolVar(&Flags.DisableCors, "disable-cors", false, "Disable CORS headers")
|
||||||
flag.Int64Var(&Flags.Timeout, "timeout", 6*1000, "Read timeout for connections in milliseconds. A zero value means that reads will not timeout")
|
flag.Int64Var(&Flags.Timeout, "timeout", 6*1000, "Read timeout for connections in milliseconds. A zero value means that reads will not timeout")
|
||||||
flag.StringVar(&Flags.S3Bucket, "s3-bucket", "", "Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)")
|
flag.StringVar(&Flags.S3Bucket, "s3-bucket", "", "Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)")
|
||||||
flag.StringVar(&Flags.S3ObjectPrefix, "s3-object-prefix", "", "Prefix for S3 object names")
|
flag.StringVar(&Flags.S3ObjectPrefix, "s3-object-prefix", "", "Prefix for S3 object names")
|
||||||
|
@ -106,7 +108,6 @@ func ParseFlags() {
|
||||||
flag.StringVar(&Flags.TLSCertFile, "tls-certificate", "", "Path to the file containing the x509 TLS certificate to be used. The file should also contain any intermediate certificates and the CA certificate.")
|
flag.StringVar(&Flags.TLSCertFile, "tls-certificate", "", "Path to the file containing the x509 TLS certificate to be used. The file should also contain any intermediate certificates and the CA certificate.")
|
||||||
flag.StringVar(&Flags.TLSKeyFile, "tls-key", "", "Path to the file containing the key for the TLS certificate.")
|
flag.StringVar(&Flags.TLSKeyFile, "tls-key", "", "Path to the file containing the key for the TLS certificate.")
|
||||||
flag.StringVar(&Flags.TLSMode, "tls-mode", "tls12", "Specify which TLS mode to use; valid modes are tls13, tls12, and tls12-strong.")
|
flag.StringVar(&Flags.TLSMode, "tls-mode", "tls12", "Specify which TLS mode to use; valid modes are tls13, tls12, and tls12-strong.")
|
||||||
|
|
||||||
flag.StringVar(&Flags.CPUProfile, "cpuprofile", "", "write cpu profile to file")
|
flag.StringVar(&Flags.CPUProfile, "cpuprofile", "", "write cpu profile to file")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
|
|
|
@ -29,6 +29,7 @@ func Serve() {
|
||||||
RespectForwardedHeaders: Flags.BehindProxy,
|
RespectForwardedHeaders: Flags.BehindProxy,
|
||||||
DisableDownload: Flags.DisableDownload,
|
DisableDownload: Flags.DisableDownload,
|
||||||
DisableTermination: Flags.DisableTermination,
|
DisableTermination: Flags.DisableTermination,
|
||||||
|
DisableCors: Flags.DisableCors,
|
||||||
StoreComposer: Composer,
|
StoreComposer: Composer,
|
||||||
NotifyCompleteUploads: true,
|
NotifyCompleteUploads: true,
|
||||||
NotifyTerminatedUploads: true,
|
NotifyTerminatedUploads: true,
|
||||||
|
|
|
@ -216,6 +216,8 @@ $ tusd -help
|
||||||
If set, will listen to a UNIX socket at this location instead of a TCP socket
|
If set, will listen to a UNIX socket at this location instead of a TCP socket
|
||||||
-upload-dir string
|
-upload-dir string
|
||||||
Directory to store uploads in (default "./data")
|
Directory to store uploads in (default "./data")
|
||||||
|
-disable-cors
|
||||||
|
Disables CORS headers. If set to true, tusd will not send any CORS related header. This is useful if you have a proxy sitting in front of tusd that handles CORS (default false)
|
||||||
-verbose
|
-verbose
|
||||||
Enable verbose logging output (default true)
|
Enable verbose logging output (default true)
|
||||||
-version
|
-version
|
||||||
|
|
17
go.mod
17
go.mod
|
@ -6,20 +6,19 @@ module github.com/tus/tusd
|
||||||
go 1.16
|
go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go/storage v1.29.0
|
cloud.google.com/go/storage v1.30.1
|
||||||
github.com/Azure/azure-storage-blob-go v0.14.0
|
github.com/Azure/azure-storage-blob-go v0.14.0
|
||||||
github.com/aws/aws-sdk-go v1.44.211
|
github.com/aws/aws-sdk-go v1.44.275
|
||||||
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40
|
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40
|
||||||
github.com/golang/mock v1.6.0
|
github.com/golang/mock v1.6.0
|
||||||
github.com/golang/protobuf v1.5.2
|
github.com/golang/protobuf v1.5.3
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
|
||||||
github.com/prometheus/client_golang v1.14.0
|
github.com/prometheus/client_golang v1.15.1
|
||||||
github.com/sethgrid/pester v1.2.0
|
github.com/sethgrid/pester v1.2.0
|
||||||
github.com/stretchr/testify v1.8.2
|
github.com/stretchr/testify v1.8.4
|
||||||
github.com/vimeo/go-util v1.4.1
|
github.com/vimeo/go-util v1.4.1
|
||||||
golang.org/x/sys v0.6.0 // indirect
|
google.golang.org/api v0.125.0
|
||||||
google.golang.org/api v0.111.0
|
google.golang.org/grpc v1.55.0
|
||||||
google.golang.org/grpc v1.53.0
|
|
||||||
gopkg.in/Acconut/lockfile.v1 v1.1.0
|
gopkg.in/Acconut/lockfile.v1 v1.1.0
|
||||||
gopkg.in/h2non/gock.v1 v1.1.2
|
gopkg.in/h2non/gock.v1 v1.1.2
|
||||||
)
|
)
|
||||||
|
|
|
@ -28,6 +28,9 @@ type Config struct {
|
||||||
// DisableTermination indicates whether the server will refuse termination
|
// DisableTermination indicates whether the server will refuse termination
|
||||||
// requests of the uploaded file, by not mounting the DELETE handler.
|
// requests of the uploaded file, by not mounting the DELETE handler.
|
||||||
DisableTermination bool
|
DisableTermination bool
|
||||||
|
// Disable cors headers. If set to true, tusd will not send any CORS related header.
|
||||||
|
// This is useful if you have a proxy sitting in front of tusd that handles CORS.
|
||||||
|
DisableCors bool
|
||||||
// NotifyCompleteUploads indicates whether sending notifications about
|
// NotifyCompleteUploads indicates whether sending notifications about
|
||||||
// completed uploads using the CompleteUploads channel should be enabled.
|
// completed uploads using the CompleteUploads channel should be enabled.
|
||||||
NotifyCompleteUploads bool
|
NotifyCompleteUploads bool
|
||||||
|
|
|
@ -96,4 +96,20 @@ func TestCORS(t *testing.T) {
|
||||||
t.Errorf("expected header to contain METHOD but got: %#v", methods)
|
t.Errorf("expected header to contain METHOD but got: %#v", methods)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
SubTest(t, "Disable CORS", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
handler, _ := NewHandler(Config{
|
||||||
|
StoreComposer: composer,
|
||||||
|
DisableCors: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
(&httpTest{
|
||||||
|
Method: "OPTIONS",
|
||||||
|
ReqHeader: map[string]string{
|
||||||
|
"Origin": "tus.io",
|
||||||
|
},
|
||||||
|
Code: http.StatusOK,
|
||||||
|
ResHeader: map[string]string{},
|
||||||
|
}).Run(handler, t)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,6 +53,24 @@ func NewHTTPError(err error, statusCode int) HTTPError {
|
||||||
return httpError{err, statusCode}
|
return httpError{err, statusCode}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type contextWithValues struct {
|
||||||
|
context.Context
|
||||||
|
valueHolder context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c contextWithValues) Value(key interface{}) interface{} {
|
||||||
|
return c.valueHolder.Value(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newContextWithValues(ctx context.Context) contextWithValues {
|
||||||
|
return contextWithValues{
|
||||||
|
// Use background to not get cancel event
|
||||||
|
Context: context.Background(),
|
||||||
|
// Use request context to get stored values
|
||||||
|
valueHolder: ctx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrUnsupportedVersion = NewHTTPError(errors.New("unsupported version"), http.StatusPreconditionFailed)
|
ErrUnsupportedVersion = NewHTTPError(errors.New("unsupported version"), http.StatusPreconditionFailed)
|
||||||
ErrMaxSizeExceeded = NewHTTPError(errors.New("maximum size exceeded"), http.StatusRequestEntityTooLarge)
|
ErrMaxSizeExceeded = NewHTTPError(errors.New("maximum size exceeded"), http.StatusRequestEntityTooLarge)
|
||||||
|
@ -98,6 +116,12 @@ type HookEvent struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHookEvent(info FileInfo, r *http.Request) HookEvent {
|
func newHookEvent(info FileInfo, r *http.Request) HookEvent {
|
||||||
|
// The Host header field is not present in the header map, see https://pkg.go.dev/net/http#Request:
|
||||||
|
// > For incoming requests, the Host header is promoted to the
|
||||||
|
// > Request.Host field and removed from the Header map.
|
||||||
|
// That's why we add it back manually.
|
||||||
|
r.Header.Set("Host", r.Host)
|
||||||
|
|
||||||
return HookEvent{
|
return HookEvent{
|
||||||
Upload: info,
|
Upload: info,
|
||||||
HTTPRequest: HTTPRequest{
|
HTTPRequest: HTTPRequest{
|
||||||
|
@ -217,7 +241,7 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||||
|
|
||||||
header := w.Header()
|
header := w.Header()
|
||||||
|
|
||||||
if origin := r.Header.Get("Origin"); origin != "" {
|
if origin := r.Header.Get("Origin"); !handler.config.DisableCors && origin != "" {
|
||||||
header.Set("Access-Control-Allow-Origin", origin)
|
header.Set("Access-Control-Allow-Origin", origin)
|
||||||
|
|
||||||
if r.Method == "OPTIONS" {
|
if r.Method == "OPTIONS" {
|
||||||
|
@ -284,7 +308,7 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||||
// PostFile creates a new file upload using the datastore after validating the
|
// PostFile creates a new file upload using the datastore after validating the
|
||||||
// length and parsing the metadata.
|
// length and parsing the metadata.
|
||||||
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := newContextWithValues(r.Context())
|
||||||
|
|
||||||
// Check for presence of application/offset+octet-stream. If another content
|
// Check for presence of application/offset+octet-stream. If another content
|
||||||
// type is defined, it will be ignored and treated as none was set because
|
// type is defined, it will be ignored and treated as none was set because
|
||||||
|
@ -427,7 +451,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
|
|
||||||
// HeadFile returns the length and offset for the HEAD request
|
// HeadFile returns the length and offset for the HEAD request
|
||||||
func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := newContextWithValues(r.Context())
|
||||||
|
|
||||||
id, err := extractIDFromPath(r.URL.Path)
|
id, err := extractIDFromPath(r.URL.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -492,7 +516,7 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
|
||||||
// PatchFile adds a chunk to an upload. This operation is only allowed
|
// PatchFile adds a chunk to an upload. This operation is only allowed
|
||||||
// if enough space in the upload is left.
|
// if enough space in the upload is left.
|
||||||
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := newContextWithValues(r.Context())
|
||||||
|
|
||||||
// Check for presence of application/offset+octet-stream
|
// Check for presence of application/offset+octet-stream
|
||||||
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
|
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
|
||||||
|
@ -721,7 +745,7 @@ func (handler *UnroutedHandler) finishUploadIfComplete(ctx context.Context, uplo
|
||||||
// GetFile handles requests to download a file using a GET request. This is not
|
// GetFile handles requests to download a file using a GET request. This is not
|
||||||
// part of the specification.
|
// part of the specification.
|
||||||
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := newContextWithValues(r.Context())
|
||||||
|
|
||||||
id, err := extractIDFromPath(r.URL.Path)
|
id, err := extractIDFromPath(r.URL.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -842,7 +866,7 @@ func filterContentType(info FileInfo) (contentType string, contentDisposition st
|
||||||
|
|
||||||
// DelFile terminates an upload permanently.
|
// DelFile terminates an upload permanently.
|
||||||
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := newContextWithValues(r.Context())
|
||||||
|
|
||||||
// Abort the request handling if the required interface is not implemented
|
// Abort the request handling if the required interface is not implemented
|
||||||
if !handler.composer.UsesTerminater {
|
if !handler.composer.UsesTerminater {
|
||||||
|
|
Loading…
Reference in New Issue