Merge branch 'main' of github.com:tus/tusd into v2

This commit is contained in:
Marius Kleidl 2023-06-09 12:33:33 +02:00
commit 633a088870
No known key found for this signature in database
GPG Key ID: 2F8AC5B12D755265
13 changed files with 689 additions and 74 deletions

View File

@ -0,0 +1,156 @@
# Taken from https://github.com/hrvey/combine-prs-workflow
# This action can be triggered manually to combine multiple PRs for
# dependency upgrades into a single PR. See the above links for
# more details.
name: 'Combine PRs'
# Controls when the action will run - in this case triggered manually
on:
workflow_dispatch:
inputs:
branchPrefix:
description: 'Branch prefix to find combinable PRs based on'
required: true
default: 'dependabot'
mustBeGreen:
description: 'Only combine PRs that are green (status is success). Set to false if repo does not run checks'
type: boolean
required: true
default: true
combineBranchName:
description: 'Name of the branch to combine PRs into'
required: true
default: 'combine-prs-branch'
ignoreLabel:
description: 'Exclude PRs with this label'
required: true
default: 'nocombine'
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "combine-prs"
combine-prs:
# The type of runner that the job will run on
runs-on: ubuntu-latest
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
- uses: actions/github-script@v6
id: create-combined-pr
name: Create Combined PR
with:
github-token: ${{secrets.GITHUB_TOKEN}}
script: |
const pulls = await github.paginate('GET /repos/:owner/:repo/pulls', {
owner: context.repo.owner,
repo: context.repo.repo
});
let branchesAndPRStrings = [];
let baseBranch = null;
let baseBranchSHA = null;
for (const pull of pulls) {
const branch = pull['head']['ref'];
console.log('Pull for branch: ' + branch);
if (branch.startsWith('${{ github.event.inputs.branchPrefix }}')) {
console.log('Branch matched prefix: ' + branch);
let statusOK = true;
if(${{ github.event.inputs.mustBeGreen }}) {
console.log('Checking green status: ' + branch);
const stateQuery = `query($owner: String!, $repo: String!, $pull_number: Int!) {
repository(owner: $owner, name: $repo) {
pullRequest(number:$pull_number) {
commits(last: 1) {
nodes {
commit {
statusCheckRollup {
state
}
}
}
}
}
}
}`
const vars = {
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: pull['number']
};
const result = await github.graphql(stateQuery, vars);
const [{ commit }] = result.repository.pullRequest.commits.nodes;
const state = commit.statusCheckRollup.state
console.log('Validating status: ' + state);
if(state != 'SUCCESS') {
console.log('Discarding ' + branch + ' with status ' + state);
statusOK = false;
}
}
console.log('Checking labels: ' + branch);
const labels = pull['labels'];
for(const label of labels) {
const labelName = label['name'];
console.log('Checking label: ' + labelName);
if(labelName == '${{ github.event.inputs.ignoreLabel }}') {
console.log('Discarding ' + branch + ' with label ' + labelName);
statusOK = false;
}
}
if (statusOK) {
console.log('Adding branch to array: ' + branch);
const prString = '#' + pull['number'] + ' ' + pull['title'];
branchesAndPRStrings.push({ branch, prString });
baseBranch = pull['base']['ref'];
baseBranchSHA = pull['base']['sha'];
}
}
}
if (branchesAndPRStrings.length == 0) {
core.setFailed('No PRs/branches matched criteria');
return;
}
try {
await github.rest.git.createRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: 'refs/heads/' + '${{ github.event.inputs.combineBranchName }}',
sha: baseBranchSHA
});
} catch (error) {
console.log(error);
core.setFailed('Failed to create combined branch - maybe a branch by that name already exists?');
return;
}
let combinedPRs = [];
let mergeFailedPRs = [];
for(const { branch, prString } of branchesAndPRStrings) {
try {
await github.rest.repos.merge({
owner: context.repo.owner,
repo: context.repo.repo,
base: '${{ github.event.inputs.combineBranchName }}',
head: branch,
});
console.log('Merged branch ' + branch);
combinedPRs.push(prString);
} catch (error) {
console.log('Failed to merge branch ' + branch);
mergeFailedPRs.push(prString);
}
}
console.log('Creating combined PR');
const combinedPRsString = combinedPRs.join('\n');
let body = '✅ This PR was created by the Combine PRs action by combining the following PRs:\n' + combinedPRsString;
if(mergeFailedPRs.length > 0) {
const mergeFailedPRsString = mergeFailedPRs.join('\n');
body += '\n\n⚠ The following PRs were left out due to merge conflicts:\n' + mergeFailedPRsString
}
await github.rest.pulls.create({
owner: context.repo.owner,
repo: context.repo.repo,
title: 'Combined PR',
head: '${{ github.event.inputs.combineBranchName }}',
base: baseBranch,
body: body
});

View File

@ -9,7 +9,7 @@ jobs:
strategy:
fail-fast: false
matrix:
go-version: [1.18.x, 1.19.x]
go-version: [stable, oldstable]
platform: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.platform }}
env:
@ -21,7 +21,7 @@ jobs:
-
name: Install Go
uses: actions/setup-go@v3
uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go-version }}

View File

@ -21,7 +21,7 @@ jobs:
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@v4.3.0
uses: docker/metadata-action@v4.4.0
with:
images: |
ghcr.io/tus/tusd
@ -35,7 +35,7 @@ jobs:
-
name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2.4.1
uses: docker/setup-buildx-action@v2.5.0
with:
install: true
@ -81,10 +81,10 @@ jobs:
uses: actions/checkout@v3
-
name: Install Go 1.19
uses: actions/setup-go@v3
name: Install Go
uses: actions/setup-go@v4
with:
go-version: '1.19.5'
go-version: 'stable'
-
name: Build TUSD
@ -105,7 +105,7 @@ jobs:
-
name: Deploy to heroku
uses: akhileshns/heroku-deploy@v3.12.13
uses: akhileshns/heroku-deploy@v3.12.14
with:
heroku_api_key: ${{secrets.HEROKU_API_KEY}}
heroku_app_name: ${{secrets.HEROKU_APP_NAME}}

View File

@ -1,4 +1,4 @@
FROM golang:1.20.1-alpine AS builder
FROM --platform=$BUILDPLATFORM golang:1.20.4-alpine AS builder
WORKDIR /go/src/github.com/tus/tusd
# Add gcc and libc-dev early so it is cached
@ -19,13 +19,17 @@ COPY pkg/ ./pkg/
ARG GIT_VERSION
ARG GIT_COMMIT
# Get the operating system and architecture to build for
ARG TARGETOS
ARG TARGETARCH
RUN set -xe \
&& GOOS=linux GOARCH=amd64 go build \
&& GOOS=$TARGETOS GOARCH=$TARGETARCH go build \
-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=${GIT_VERSION} -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=${GIT_COMMIT} -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'" \
-o /go/bin/tusd ./cmd/tusd/main.go
# start a new stage that copies in the binary built in the previous stage
FROM alpine:3.17.2
FROM alpine:3.18.0
WORKDIR /srv/tusd-data
COPY ./docker/entrypoint.sh /usr/local/share/docker-entrypoint.sh

View File

@ -18,6 +18,7 @@ var Flags struct {
ShowGreeting bool
DisableDownload bool
DisableTermination bool
DisableCors bool
Timeout int64
S3Bucket string
S3ObjectPrefix string
@ -72,6 +73,7 @@ func ParseFlags() {
flag.BoolVar(&Flags.ShowGreeting, "show-greeting", true, "Show the greeting message")
flag.BoolVar(&Flags.DisableDownload, "disable-download", false, "Disable the download endpoint")
flag.BoolVar(&Flags.DisableTermination, "disable-termination", false, "Disable the termination endpoint")
flag.BoolVar(&Flags.DisableCors, "disable-cors", false, "Disable CORS headers")
flag.Int64Var(&Flags.Timeout, "timeout", 6*1000, "Read timeout for connections in milliseconds. A zero value means that reads will not timeout")
flag.StringVar(&Flags.S3Bucket, "s3-bucket", "", "Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)")
flag.StringVar(&Flags.S3ObjectPrefix, "s3-object-prefix", "", "Prefix for S3 object names")

View File

@ -29,6 +29,7 @@ func Serve() {
RespectForwardedHeaders: Flags.BehindProxy,
DisableDownload: Flags.DisableDownload,
DisableTermination: Flags.DisableTermination,
DisableCors: Flags.DisableCors,
StoreComposer: Composer,
NotifyCompleteUploads: true,
NotifyTerminatedUploads: true,

View File

@ -11,11 +11,11 @@ When a specific action happens during an upload (pre-create, post-receive, post-
## Non-Blocking Hooks
If not otherwise noted, all hooks are invoked in a *non-blocking* way, meaning that tusd will not wait until the hook process has finished and exited. Therefore, the hook process is not able to influence how tusd may continue handling the current request, regardless of which exit code it may set. Furthermore, the hook process' stdout and stderr will be piped to tusd's stdout and stderr correspondingly, allowing one to use these channels for additional logging.
If not otherwise noted, all hooks are invoked in a _non-blocking_ way, meaning that tusd will not wait until the hook process has finished and exited. Therefore, the hook process is not able to influence how tusd may continue handling the current request, regardless of which exit code it may set. Furthermore, the hook process' stdout and stderr will be piped to tusd's stdout and stderr correspondingly, allowing one to use these channels for additional logging.
## Blocking Hooks
On the other hand, there are a few *blocking* hooks, such as caused by the `pre-create` and `pre-finish` events. Because their exit code will dictate whether tusd will accept the current incoming request, tusd will wait until the hook process has exited. Therefore, in order to keep the response times low, one should avoid to make time-consuming operations inside the processes for blocking hooks.
On the other hand, there are a few _blocking_ hooks, such as caused by the `pre-create` and `pre-finish` events. Because their exit code will dictate whether tusd will accept the current incoming request, tusd will wait until the hook process has exited. Therefore, in order to keep the response times low, one should avoid to make time-consuming operations inside the processes for blocking hooks.
### Blocking File Hooks
@ -58,7 +58,9 @@ This event will be triggered for every running upload to indicate its current pr
The `--hooks-enabled-events` option for the tusd binary works as a whitelist for hook events and takes a comma separated list of hook events (for instance: `pre-create,post-create`). This can be useful to limit the number of hook executions and save resources if you are only interested in some events. If the `--hooks-enabled-events` option is omitted, all default hook events are enabled (pre-create, post-create, post-receive, post-terminate, post-finish).
## File Hooks
### The Hook Directory
By default, the file hook system is disabled. To enable it, pass the `--hooks-dir` option to the tusd binary. The flag's value will be a path, the **hook directory**, relative to the current working directory, pointing to the folder containing the executable **hook files**:
```bash
@ -69,13 +71,14 @@ $ tusd --hooks-dir ./path/to/hooks/
...
```
If an event occurs, the tusd binary will look for a file, named exactly as the event, which will then be executed, as long as the object exists. In the example above, the binary `./path/to/hooks/pre-create` will be invoked, before an upload is created, which can be used to e.g. validate certain metadata. Please note, that in UNIX environments the hook file *must not* have an extension, such as `.sh` or `.py`, or else tusd will not recognize and ignore it. On Windows, however, the hook file *must* have an extension, such as `.bat` or `.exe`.
If an event occurs, the tusd binary will look for a file, named exactly as the event, which will then be executed, as long as the object exists. In the example above, the binary `./path/to/hooks/pre-create` will be invoked, before an upload is created, which can be used to e.g. validate certain metadata. Please note, that in UNIX environments the hook file _must not_ have an extension, such as `.sh` or `.py`, or else tusd will not recognize and ignore it. On Windows, however, the hook file _must_ have an extension, such as `.bat` or `.exe`.
### The Hook's Environment
The process of the hook files are provided with information about the event and the upload using to two methods:
* The `TUS_ID` and `TUS_SIZE` environment variables will contain the upload ID and its size in bytes, which triggered the event. Please be aware, that in the `pre-create` hook the upload ID will be an empty string as the entity has not been created and therefore this piece of information is not yet available.
* On `stdin` a JSON-encoded object can be read which contains more details about the corresponding event in following format:
- The `TUS_ID` and `TUS_SIZE` environment variables will contain the upload ID and its size in bytes, which triggered the event. Please be aware, that in the `pre-create` hook the upload ID will be an empty string as the entity has not been created and therefore this piece of information is not yet available.
- On `stdin` a JSON-encoded object can be read which contains more details about the corresponding event in following format:
```js
{

View File

@ -216,6 +216,8 @@ $ tusd -help
If set, will listen to a UNIX socket at this location instead of a TCP socket
-upload-dir string
Directory to store uploads in (default "./data")
-disable-cors
Disables CORS headers. If set to true, tusd will not send any CORS related header. This is useful if you have a proxy sitting in front of tusd that handles CORS (default false)
-verbose
Enable verbose logging output (default true)
-version

16
go.mod
View File

@ -6,24 +6,24 @@ module github.com/tus/tusd/v2
go 1.16
require (
cloud.google.com/go/storage v1.29.0
cloud.google.com/go/storage v1.30.1
github.com/Azure/azure-storage-blob-go v0.14.0
github.com/aws/aws-sdk-go v1.44.211
github.com/aws/aws-sdk-go v1.44.273
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40
github.com/felixge/fgprof v0.9.2
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d
github.com/golang/mock v1.6.0
github.com/golang/protobuf v1.5.2
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/golang/protobuf v1.5.3
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
github.com/hashicorp/go-hclog v0.14.1
github.com/hashicorp/go-plugin v1.4.3
github.com/minio/minio-go/v7 v7.0.31 // indirect
github.com/prometheus/client_golang v1.14.0
github.com/prometheus/client_golang v1.15.1
github.com/sethgrid/pester v1.2.0
github.com/stretchr/testify v1.8.2
github.com/stretchr/testify v1.8.4
github.com/vimeo/go-util v1.4.1
google.golang.org/api v0.111.0
google.golang.org/grpc v1.53.0
google.golang.org/api v0.125.0
google.golang.org/grpc v1.55.0
gopkg.in/Acconut/lockfile.v1 v1.1.0
gopkg.in/h2non/gock.v1 v1.1.2
)

524
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -29,6 +29,9 @@ type Config struct {
// DisableTermination indicates whether the server will refuse termination
// requests of the uploaded file, by not mounting the DELETE handler.
DisableTermination bool
// Disable cors headers. If set to true, tusd will not send any CORS related header.
// This is useful if you have a proxy sitting in front of tusd that handles CORS.
DisableCors bool
// NotifyCompleteUploads indicates whether sending notifications about
// completed uploads using the CompleteUploads channel should be enabled.
NotifyCompleteUploads bool

View File

@ -96,4 +96,20 @@ func TestCORS(t *testing.T) {
t.Errorf("expected header to contain METHOD but got: %#v", methods)
}
})
SubTest(t, "Disable CORS", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
handler, _ := NewHandler(Config{
StoreComposer: composer,
DisableCors: true,
})
(&httpTest{
Method: "OPTIONS",
ReqHeader: map[string]string{
"Origin": "tus.io",
},
Code: http.StatusOK,
ResHeader: map[string]string{},
}).Run(handler, t)
})
}

View File

@ -158,7 +158,7 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
header := w.Header()
if origin := r.Header.Get("Origin"); origin != "" {
if origin := r.Header.Get("Origin"); !handler.config.DisableCors && origin != "" {
header.Set("Access-Control-Allow-Origin", origin)
if r.Method == "OPTIONS" {