Compare commits

...

46 Commits

Author SHA1 Message Date
Marius 4b707fac26 docker: Add tools for preproducing load issues 2023-01-15 21:35:08 +01:00
Marius 621de70da4 cli: Switch to memorylocker for filestore 2022-12-07 10:39:04 +01:00
Marius edf8238af9 ci: Disable CGO when building binaries 2022-11-28 22:33:52 +01:00
Marius aff352c413 fixup! Merge branch 'master' of github.com:tus/tusd into v2 2022-10-10 20:54:44 +02:00
Marius 70ba0f5540 Merge branch 'v2' of github.com:tus/tusd into v2 2022-10-10 20:49:03 +02:00
Marius f0faa8e556 Merge branch 'master' of github.com:tus/tusd into v2 2022-10-10 20:48:47 +02:00
jonaskaltenbachz 707f41be2b
cli: Pass headers to gRPC hooks (#813)
Co-authored-by: Kaltenbach, Jonas <jonas.kaltenbach@zoi.de>
2022-09-30 14:51:13 +02:00
Marius 920deb3df7 cli: Add flag to specify progress interval 2022-08-15 23:26:58 +02:00
Marius 870c434485 s3store: Add metrics for demand on upload semaphore 2022-08-11 23:31:33 +02:00
Marius 5d8c2beed3 Merge branch 'master' of github.com:tus/tusd into v2 2022-08-11 22:38:51 +02:00
Marius eec6a14d4a cli: Do not show metrics in greeting, if disabled
Closes https://github.com/tus/tusd/issues/760
2022-08-04 11:39:55 +02:00
Marius be6f50f14f s3store: Only disable signatures for UploadPart, if -s3-disable-content-hashes is used 2022-07-25 13:48:08 +02:00
Marius cab456900a Merge branch 'master' of github.com:tus/tusd into v2 2022-07-25 13:02:05 +02:00
Marius f680b9f1ff s3store: Fix tests by adding ContentLength property 2022-07-11 15:01:24 +02:00
Marius efe8c9ce05 s3store: Add option for switching to Minio SDK 2022-07-11 13:43:09 +02:00
Marius 1038298a79 core: Change GetReader to return io.ReadCloser (#739)
Squashed commit of the following:

commit e8b5b3751a86d86cae10e0bcf89caa481e5c3de6
Author: Marius <marius@transloadit.com>
Date:   Sun Jun 19 12:15:22 2022 +0200

    Fix generated mocks

commit 736e2e7bb6
Merge: 9d7096f 1e69d9b
Author: Stefan Scheidewig <stefan.scheidewig@staffbase.com>
Date:   Sat Jun 18 07:53:29 2022 +0200

    Merge branch 'v2' into readcloser_in_getreader

commit 9d7096fcb3
Author: Stefan Scheidewig <stefan.scheidewig@staffbase.com>
Date:   Tue May 24 14:16:01 2022 +0200

    Return ReadCloser in getReader
2022-06-19 12:18:02 +02:00
Marius 1e69d9ba68 Merge branch 'master' of github.com:tus/tusd into v2 2022-06-17 13:21:59 +02:00
Marius 9ef0b54c7c ci: Update builds to Go 1.18 2022-04-12 09:14:28 +02:00
Marius c0f2026e96 s3store: Implement temporary support for buffering in memory 2022-04-10 21:13:07 +02:00
Marius aace4704d7 cli: Add fgprof and authentication to pprof endpoint 2022-04-10 19:29:50 +02:00
Marius 9508fd3910 Merge branch 'master' of github.com:tus/tusd into v2 2022-04-10 18:24:30 +02:00
Marius afc9f10704 Merge branch 'master' of github.com:tus/tusd into v2 2022-03-21 10:20:04 +01:00
Marius f8e3337948 deps: Fix error when deploying to Heroku
See https://github.com/tus/tusd/runs/5613967985?check_suite_focus=true
2022-03-19 23:50:51 +01:00
Marius 7eae867ec1 azurestore: Work around error being not comparable 2022-03-19 23:41:54 +01:00
Marius e77cc64063 Merge branch 'v2' of github.com:tus/tusd into v2 2022-03-19 23:25:03 +01:00
Marius 211feb9ab9 Merge branch 'master' of github.com:tus/tusd into v2 2022-03-19 23:23:03 +01:00
Marius e52139f977
v2: Implement cancelable lock mechanism (#667)
* handler: Implement prototype of new locking back-end

* memorylocker2: Switch to channel for release notification

* handler: Update locker interface

* handler: Add method to close body with error

* memorylocker: Replace with new implementation

* filelocker: Adjust methods to match interface

* handler: Introduce new httpContext

* handler: Implement upload interruption

* handler: Adjust tests to new inferfaces

* handler, memorylocker: Cancel context to avoid leaks
2022-03-19 23:21:17 +01:00
Marius 04e786e81a Merge branch 'master' of github.com:tus/tusd into v2 2022-03-02 00:37:38 +01:00
Marius 12c10bf62f
v2: Rework hooks system (#516)
* ci: Remove plugin hook handler

* Rework error type from interface to struct

* Avoid writing to http.ResponseWriter directly

* Allow hooks to modify response

* Add example for HTTP hooks using Python

* Implement new plugin system using Hashicorp/go-plugin

* Enable returning partial HTTPResponses

* Remove some (unnecessary) error handling

* Forward stdout and stderr from plugin to tusd

* docs: Update examples

* cli: Update filehooks to new system

* cli: Renovate gRPC hooks

* docs: Correct casing of gRPC

* misc: Documentation, better examples, and code structure
2022-03-02 00:36:49 +01:00
Marius a05c090d05 Merge branch 'master' into v2 2022-02-23 16:03:41 +01:00
Marius aca18332d1 core: Add TODO for metrics work 2021-11-15 13:23:59 +01:00
Marius b2273d4153 core: Add error constants
/cc @kvz
2021-10-25 11:54:02 +02:00
Marius c1eddef26a core: Use Go's builtin status code property 2021-10-25 10:38:10 +02:00
Marius 92d704f43f Merge branch 'master' of github.com:tus/tusd into v2 2021-10-25 10:28:51 +02:00
Marius bc51cb05c0 Merge branch 'master' of github.com:tus/tusd into v2 2021-10-19 17:30:55 +02:00
Marius 387b04a2e2 Merge branch 'master' of github.com:tus/tusd into v2 2021-10-15 22:05:01 +02:00
Marius 675e767ee6 Merge branch 'master' into v2 2021-07-01 18:49:09 +02:00
Marius 934265dd38 Merge branch 'master' of github.com:tus/tusd into v2 2021-07-01 18:15:47 +02:00
Marius ccdfe8e604 cli: Add flag to customize the S3 part buffer size 2021-06-21 13:17:36 +02:00
Marius 36f12b1d18 s3store: Fix failing tests due to missing argument 2021-06-21 13:15:21 +02:00
Marius 946539c3b9 cli: Add option to expose Go's pprof 2021-05-28 13:26:13 +02:00
Marius f4314dd360 s3store: Do not register metrics to default registry 2021-05-24 23:45:54 +02:00
Marius 0f24a80ea5 s3store: Expose metrics about request durations 2021-05-24 12:00:20 +02:00
Marius ce54ff8b1f fixup! s3store: Parallelize part uploads and information retrieval (#478) 2021-05-23 12:54:53 +02:00
Marius 578731ab0b cli: Add metric for number of hook invocations 2021-05-20 21:38:19 +02:00
Marius 8fd18364e7
s3store: Parallelize part uploads and information retrieval (#478)
* Add first draft of parallel upload queue

* s3store: Use queue for parallel uploads

* Revert "Add first draft of parallel upload queue"

This reverts commit 86a329cef2.

* Revert "s3store: Use queue for parallel uploads"

This reverts commit 29b59a2c90.

* s3store: Cache results from listing parts and checking incomplete object

* s3store: Remove debugging output`

* s3store: Make requests for fetching info concurrently

* s3store: Make parallel uploads work and tests pass

* s3store: Add semaphore package

* s3store: Add comments to semaphore package

* s3store: Encapsulate more logic into s3PartProducer

* s3store: Refactor WriteChunk

* s3store: Remove TODO

* s3store: Acquire lock before uploading

* cli: Add flag for setting concurrency limit

* s3store: One more comment
2021-05-18 10:29:18 +02:00
86 changed files with 4186 additions and 2160 deletions

View File

@ -9,7 +9,7 @@ jobs:
strategy:
fail-fast: false
matrix:
go-version: [1.16.x, 1.17.x]
go-version: [1.16.x, 1.17.x, 1.18.x]
platform: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.platform }}
env:

View File

@ -80,10 +80,10 @@ jobs:
uses: actions/checkout@v3
-
name: Install Go 1.17.2
name: Install Go 1.18
uses: actions/setup-go@v3
with:
go-version: '1.17.2'
go-version: '1.18'
-
name: Build TUSD

2
.gitignore vendored
View File

@ -5,4 +5,6 @@ node_modules/
.DS_Store
./tusd
tusd_*_*
__pycache__/
examples/hooks/plugin/hook_handler
.idea/

View File

@ -14,6 +14,7 @@ RUN set -xe \
COPY cmd/ ./cmd/
COPY internal/ ./internal/
COPY pkg/ ./pkg/
COPY examples/ ./examples/
# Get the version name and git commit as a build argument
ARG GIT_VERSION
@ -24,6 +25,10 @@ RUN set -xe \
-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=${GIT_VERSION} -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=${GIT_COMMIT} -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'" \
-o /go/bin/tusd ./cmd/tusd/main.go
RUN set -xe \
&& GOOS=linux GOARCH=amd64 go build \
-o /go/bin/hooks_handler ./examples/hooks/plugin/hook_handler.go
# start a new stage that copies in the binary built in the previous stage
FROM alpine:3.16.2
WORKDIR /srv/tusd-data
@ -39,6 +44,7 @@ RUN apk add --no-cache ca-certificates jq bash \
&& chmod +x /usr/local/share/docker-entrypoint.sh /usr/local/share/load-env.sh
COPY --from=builder /go/bin/tusd /usr/local/bin/tusd
COPY --from=builder /go/bin/hooks_handler /usr/local/bin/hooks_handler
EXPOSE 1080
USER tusd

View File

@ -7,7 +7,6 @@ import (
"strings"
"github.com/tus/tusd/pkg/azurestore"
"github.com/tus/tusd/pkg/filelocker"
"github.com/tus/tusd/pkg/filestore"
"github.com/tus/tusd/pkg/gcsstore"
"github.com/tus/tusd/pkg/handler"
@ -17,6 +16,11 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/prometheus/client_golang/prometheus"
)
var Composer *handler.StoreComposer
@ -26,47 +30,76 @@ func CreateComposer() {
// If not, we default to storing them locally on disk.
Composer = handler.NewStoreComposer()
if Flags.S3Bucket != "" {
s3Config := aws.NewConfig()
var s3Api s3store.S3API
if Flags.S3TransferAcceleration {
s3Config = s3Config.WithS3UseAccelerate(true)
}
if Flags.S3DisableContentHashes {
// Prevent the S3 service client from automatically
// adding the Content-MD5 header to S3 Object Put and Upload API calls.
s3Config = s3Config.WithS3DisableContentMD5Validation(true)
}
if Flags.S3DisableSSL {
// Disable HTTPS and only use HTTP (helpful for debugging requests).
s3Config = s3Config.WithDisableSSL(true)
}
if Flags.S3Endpoint == "" {
if !Flags.S3UseMinioSDK {
s3Config := aws.NewConfig()
if Flags.S3TransferAcceleration {
stdout.Printf("Using 's3://%s' as S3 bucket for storage with AWS S3 Transfer Acceleration enabled.\n", Flags.S3Bucket)
} else {
stdout.Printf("Using 's3://%s' as S3 bucket for storage.\n", Flags.S3Bucket)
s3Config = s3Config.WithS3UseAccelerate(true)
}
} else {
stdout.Printf("Using '%s/%s' as S3 endpoint and bucket for storage.\n", Flags.S3Endpoint, Flags.S3Bucket)
if Flags.S3DisableContentHashes {
// Prevent the S3 service client from automatically
// adding the Content-MD5 header to S3 Object Put and Upload API calls.
//
// Note: For now, we do not set S3DisableContentMD5Validation because when terminating an upload,
// a signature is required. If not present, S3 will complain:
// InvalidRequest: Missing required header for this request: Content-MD5 OR x-amz-checksum-*
// So for now, this flag will only cause hashes to be disabled for the UploadPart operation (see s3store.go).
//s3Config = s3Config.WithS3DisableContentMD5Validation(true)
}
s3Config = s3Config.WithEndpoint(Flags.S3Endpoint).WithS3ForcePathStyle(true)
if Flags.S3DisableSSL {
// Disable HTTPS and only use HTTP (helpful for debugging requests).
s3Config = s3Config.WithDisableSSL(true)
}
if Flags.S3Endpoint == "" {
if Flags.S3TransferAcceleration {
stdout.Printf("Using 's3://%s' as S3 bucket for storage with AWS S3 Transfer Acceleration enabled.\n", Flags.S3Bucket)
} else {
stdout.Printf("Using 's3://%s' as S3 bucket for storage.\n", Flags.S3Bucket)
}
} else {
stdout.Printf("Using '%s/%s' as S3 endpoint and bucket for storage.\n", Flags.S3Endpoint, Flags.S3Bucket)
s3Config = s3Config.WithEndpoint(Flags.S3Endpoint).WithS3ForcePathStyle(true)
}
// Derive credentials from default credential chain (env, shared, ec2 instance role)
// as per https://github.com/aws/aws-sdk-go#configuring-credentials
s3Api = s3.New(session.Must(session.NewSession()), s3Config)
} else {
core, err := minio.NewCore(Flags.S3Endpoint, &minio.Options{
Creds: credentials.NewEnvAWS(),
Secure: !Flags.S3DisableSSL,
Region: os.Getenv("AWS_REGION"),
})
if err != nil {
stderr.Fatalf("Unable to create Minio SDK: %s\n", err)
}
// TODO: Flags.S3TransferAcceleration
// TODO: Flags.S3DisableContentHashes
s3Api = s3store.NewMinioS3API(core)
}
// Derive credentials from default credential chain (env, shared, ec2 instance role)
// as per https://github.com/aws/aws-sdk-go#configuring-credentials
store := s3store.New(Flags.S3Bucket, s3.New(session.Must(session.NewSession()), s3Config))
store := s3store.New(Flags.S3Bucket, s3Api)
store.ObjectPrefix = Flags.S3ObjectPrefix
store.PreferredPartSize = Flags.S3PartSize
store.MaxBufferedParts = Flags.S3MaxBufferedParts
store.DisableContentHashes = Flags.S3DisableContentHashes
store.SetConcurrentPartUploads(Flags.S3ConcurrentPartUploads)
store.UseIn(Composer)
locker := memorylocker.New()
locker.UseIn(Composer)
// Attach the metrics from S3 store to the global Prometheus registry
// TODO: Do not use the global registry here.
store.RegisterMetrics(prometheus.DefaultRegisterer)
} else if Flags.GCSBucket != "" {
if Flags.GCSObjectPrefix != "" && strings.Contains(Flags.GCSObjectPrefix, "_") {
stderr.Fatalf("gcs-object-prefix value (%s) can't contain underscore. "+
@ -151,7 +184,9 @@ func CreateComposer() {
store := filestore.New(dir)
store.UseIn(Composer)
locker := filelocker.New(dir)
// TODO: Do not use filelocker here, because it does not implement the lock
// release mechanism yet.
locker := memorylocker.New()
locker.UseIn(Composer)
}

View File

@ -2,13 +2,8 @@ package cli
import (
"flag"
"fmt"
"log"
"os"
"path/filepath"
"runtime/pprof"
"strings"
"time"
"github.com/tus/tusd/cmd/tusd/cli/hooks"
)
@ -28,8 +23,11 @@ var Flags struct {
S3ObjectPrefix string
S3Endpoint string
S3PartSize int64
S3MaxBufferedParts int64
S3DisableContentHashes bool
S3DisableSSL bool
S3ConcurrentPartUploads int
S3UseMinioSDK bool
GCSBucket string
GCSObjectPrefix string
AzStorage string
@ -38,6 +36,7 @@ var Flags struct {
AzObjectPrefix string
AzEndpoint string
EnabledHooksString string
PluginHookPath string
FileHooksDir string
HttpHooksEndpoint string
HttpHooksForwardHeaders string
@ -46,20 +45,21 @@ var Flags struct {
GrpcHooksEndpoint string
GrpcHooksRetry int
GrpcHooksBackoff int
HooksStopUploadCode int
PluginHookPath string
EnabledHooks []hooks.HookType
ProgressHooksInterval int64
ShowVersion bool
ExposeMetrics bool
MetricsPath string
ExposePprof bool
PprofPath string
PprofBlockProfileRate int
PprofMutexProfileRate int
BehindProxy bool
VerboseOutput bool
S3TransferAcceleration bool
TLSCertFile string
TLSKeyFile string
TLSMode string
CPUProfile string
}
func ParseFlags() {
@ -77,8 +77,11 @@ func ParseFlags() {
flag.StringVar(&Flags.S3ObjectPrefix, "s3-object-prefix", "", "Prefix for S3 object names")
flag.StringVar(&Flags.S3Endpoint, "s3-endpoint", "", "Endpoint to use S3 compatible implementations like minio (requires s3-bucket to be pass)")
flag.Int64Var(&Flags.S3PartSize, "s3-part-size", 50*1024*1024, "Size in bytes of the individual upload requests made to the S3 API. Defaults to 50MiB (experimental and may be removed in the future)")
flag.Int64Var(&Flags.S3MaxBufferedParts, "s3-max-buffered-parts", 20, "Size in bytes of the individual upload requests made to the S3 API. Defaults to 50MiB (experimental and may be removed in the future)")
flag.BoolVar(&Flags.S3DisableContentHashes, "s3-disable-content-hashes", false, "Disable the calculation of MD5 and SHA256 hashes for the content that gets uploaded to S3 for minimized CPU usage (experimental and may be removed in the future)")
flag.BoolVar(&Flags.S3DisableSSL, "s3-disable-ssl", false, "Disable SSL and only use HTTP for communication with S3 (experimental and may be removed in the future)")
flag.IntVar(&Flags.S3ConcurrentPartUploads, "s3-concurrent-part-uploads", 10, "Number of concurrent part uploads to S3 (experimental and may be removed in the future)")
flag.BoolVar(&Flags.S3UseMinioSDK, "s3-use-minio-sdk", false, "Use the Minio SDK interally (experimental)")
flag.StringVar(&Flags.GCSBucket, "gcs-bucket", "", "Use Google Cloud Storage with this bucket as storage backend (requires the GCS_SERVICE_ACCOUNT_FILE environment variable to be set)")
flag.StringVar(&Flags.GCSObjectPrefix, "gcs-object-prefix", "", "Prefix for GCS object names")
flag.StringVar(&Flags.AzStorage, "azure-storage", "", "Use Azure BlockBlob Storage with this container name as a storage backend (requires the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_KEY environment variable to be set)")
@ -87,6 +90,8 @@ func ParseFlags() {
flag.StringVar(&Flags.AzObjectPrefix, "azure-object-prefix", "", "Prefix for Azure object names")
flag.StringVar(&Flags.AzEndpoint, "azure-endpoint", "", "Custom Endpoint to use for Azure BlockBlob Storage (requires azure-storage to be pass)")
flag.StringVar(&Flags.EnabledHooksString, "hooks-enabled-events", "pre-create,post-create,post-receive,post-terminate,post-finish", "Comma separated list of enabled hook events (e.g. post-create,post-finish). Leave empty to enable default events")
flag.Int64Var(&Flags.ProgressHooksInterval, "progress-hooks-interval", 1000, "Interval in milliseconds at which the post-receive progress hooks are emitted for each active upload")
flag.StringVar(&Flags.PluginHookPath, "hooks-plugin", "", "Path to a Go plugin for loading hook functions")
flag.StringVar(&Flags.FileHooksDir, "hooks-dir", "", "Directory to search for available hooks scripts")
flag.StringVar(&Flags.HttpHooksEndpoint, "hooks-http", "", "An HTTP endpoint to which hook events will be sent to")
flag.StringVar(&Flags.HttpHooksForwardHeaders, "hooks-http-forward-headers", "", "List of HTTP request headers to be forwarded from the client request to the hook endpoint")
@ -95,19 +100,19 @@ func ParseFlags() {
flag.StringVar(&Flags.GrpcHooksEndpoint, "hooks-grpc", "", "An gRPC endpoint to which hook events will be sent to")
flag.IntVar(&Flags.GrpcHooksRetry, "hooks-grpc-retry", 3, "Number of times to retry on a server error or network timeout")
flag.IntVar(&Flags.GrpcHooksBackoff, "hooks-grpc-backoff", 1, "Number of seconds to wait before retrying each retry")
flag.IntVar(&Flags.HooksStopUploadCode, "hooks-stop-code", 0, "Return code from post-receive hook which causes tusd to stop and delete the current upload. A zero value means that no uploads will be stopped")
flag.StringVar(&Flags.PluginHookPath, "hooks-plugin", "", "Path to a Go plugin for loading hook functions (only supported on Linux and macOS; highly EXPERIMENTAL and may BREAK in the future)")
flag.BoolVar(&Flags.ShowVersion, "version", false, "Print tusd version information")
flag.BoolVar(&Flags.ExposeMetrics, "expose-metrics", true, "Expose metrics about tusd usage")
flag.StringVar(&Flags.MetricsPath, "metrics-path", "/metrics", "Path under which the metrics endpoint will be accessible")
flag.BoolVar(&Flags.ExposePprof, "expose-pprof", false, "Expose the pprof interface over HTTP for profiling tusd")
flag.StringVar(&Flags.PprofPath, "pprof-path", "/debug/pprof/", "Path under which the pprof endpoint will be accessible")
flag.IntVar(&Flags.PprofBlockProfileRate, "pprof-block-profile-rate", 0, "Fraction of goroutine blocking events that are reported in the blocking profile")
flag.IntVar(&Flags.PprofMutexProfileRate, "pprof-mutex-profile-rate", 0, "Fraction of mutex contention events that are reported in the mutex profile")
flag.BoolVar(&Flags.BehindProxy, "behind-proxy", false, "Respect X-Forwarded-* and similar headers which may be set by proxies")
flag.BoolVar(&Flags.VerboseOutput, "verbose", true, "Enable verbose logging output")
flag.BoolVar(&Flags.S3TransferAcceleration, "s3-transfer-acceleration", false, "Use AWS S3 transfer acceleration endpoint (requires -s3-bucket option and Transfer Acceleration property on S3 bucket to be set)")
flag.StringVar(&Flags.TLSCertFile, "tls-certificate", "", "Path to the file containing the x509 TLS certificate to be used. The file should also contain any intermediate certificates and the CA certificate.")
flag.StringVar(&Flags.TLSKeyFile, "tls-key", "", "Path to the file containing the key for the TLS certificate.")
flag.StringVar(&Flags.TLSMode, "tls-mode", "tls12", "Specify which TLS mode to use; valid modes are tls13, tls12, and tls12-strong.")
flag.StringVar(&Flags.CPUProfile, "cpuprofile", "", "write cpu profile to file")
flag.Parse()
SetEnabledHooks()
@ -115,20 +120,6 @@ func ParseFlags() {
if Flags.FileHooksDir != "" {
Flags.FileHooksDir, _ = filepath.Abs(Flags.FileHooksDir)
}
if Flags.CPUProfile != "" {
f, err := os.Create(Flags.CPUProfile)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
go func() {
<-time.After(20 * time.Second)
pprof.StopCPUProfile()
fmt.Println("Stopped CPU profile")
}()
}
}
func SetEnabledHooks() {

View File

@ -1,7 +1,6 @@
package cli
import (
"fmt"
"strconv"
"strings"
@ -20,27 +19,12 @@ func hookTypeInSlice(a hooks.HookType, list []hooks.HookType) bool {
return false
}
func hookCallback(typ hooks.HookType, info handler.HookEvent) error {
if output, err := invokeHookSync(typ, info, true); err != nil {
if hookErr, ok := err.(hooks.HookError); ok {
return hooks.NewHookError(
fmt.Errorf("%s hook failed: %s", typ, err),
hookErr.StatusCode(),
hookErr.Body(),
)
}
return fmt.Errorf("%s hook failed: %s\n%s", typ, err, string(output))
}
return nil
func preCreateCallback(event handler.HookEvent) (handler.HTTPResponse, error) {
return invokeHookSync(hooks.HookPreCreate, event)
}
func preCreateCallback(info handler.HookEvent) error {
return hookCallback(hooks.HookPreCreate, info)
}
func preFinishCallback(info handler.HookEvent) error {
return hookCallback(hooks.HookPreFinish, info)
func preFinishCallback(event handler.HookEvent) (handler.HTTPResponse, error) {
return invokeHookSync(hooks.HookPreFinish, event)
}
func SetupHookMetrics() {
@ -50,6 +34,12 @@ func SetupHookMetrics() {
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostCreate)).Add(0)
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreCreate)).Add(0)
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreFinish)).Add(0)
MetricsHookInvocationsTotal.WithLabelValues(string(hooks.HookPostFinish)).Add(0)
MetricsHookInvocationsTotal.WithLabelValues(string(hooks.HookPostTerminate)).Add(0)
MetricsHookInvocationsTotal.WithLabelValues(string(hooks.HookPostReceive)).Add(0)
MetricsHookInvocationsTotal.WithLabelValues(string(hooks.HookPostCreate)).Add(0)
MetricsHookInvocationsTotal.WithLabelValues(string(hooks.HookPreCreate)).Add(0)
MetricsHookInvocationsTotal.WithLabelValues(string(hooks.HookPreFinish)).Add(0)
}
func SetupPreHooks(config *handler.Config) error {
@ -107,33 +97,35 @@ func SetupPostHooks(handler *handler.Handler) {
go func() {
for {
select {
case info := <-handler.CompleteUploads:
invokeHookAsync(hooks.HookPostFinish, info)
case info := <-handler.TerminatedUploads:
invokeHookAsync(hooks.HookPostTerminate, info)
case info := <-handler.UploadProgress:
invokeHookAsync(hooks.HookPostReceive, info)
case info := <-handler.CreatedUploads:
invokeHookAsync(hooks.HookPostCreate, info)
case event := <-handler.CompleteUploads:
invokeHookAsync(hooks.HookPostFinish, event)
case event := <-handler.TerminatedUploads:
invokeHookAsync(hooks.HookPostTerminate, event)
case event := <-handler.UploadProgress:
invokeHookAsync(hooks.HookPostReceive, event)
case event := <-handler.CreatedUploads:
invokeHookAsync(hooks.HookPostCreate, event)
}
}
}()
}
func invokeHookAsync(typ hooks.HookType, info handler.HookEvent) {
func invokeHookAsync(typ hooks.HookType, event handler.HookEvent) {
go func() {
// Error handling is taken care by the function.
_, _ = invokeHookSync(typ, info, false)
_, _ = invokeHookSync(typ, event)
}()
}
func invokeHookSync(typ hooks.HookType, info handler.HookEvent, captureOutput bool) ([]byte, error) {
func invokeHookSync(typ hooks.HookType, event handler.HookEvent) (httpRes handler.HTTPResponse, err error) {
if !hookTypeInSlice(typ, Flags.EnabledHooks) {
return nil, nil
return httpRes, nil
}
id := info.Upload.ID
size := info.Upload.Size
MetricsHookInvocationsTotal.WithLabelValues(string(typ)).Add(1)
id := event.Upload.ID
size := event.Upload.Size
switch typ {
case hooks.HookPostFinish:
@ -143,28 +135,43 @@ func invokeHookSync(typ hooks.HookType, info handler.HookEvent, captureOutput bo
}
if hookHandler == nil {
return nil, nil
return httpRes, nil
}
name := string(typ)
if Flags.VerboseOutput {
logEv(stdout, "HookInvocationStart", "type", name, "id", id)
logEv(stdout, "HookInvocationStart", "type", string(typ), "id", id)
}
output, returnCode, err := hookHandler.InvokeHook(typ, info, captureOutput)
hookRes, err := hookHandler.InvokeHook(hooks.HookRequest{
Type: typ,
Event: event,
})
if err != nil {
logEv(stderr, "HookInvocationError", "type", string(typ), "id", id, "error", err.Error())
MetricsHookErrorsTotal.WithLabelValues(string(typ)).Add(1)
return httpRes, err
} else if Flags.VerboseOutput {
logEv(stdout, "HookInvocationFinish", "type", string(typ), "id", id)
}
if typ == hooks.HookPostReceive && Flags.HooksStopUploadCode != 0 && Flags.HooksStopUploadCode == returnCode {
logEv(stdout, "HookStopUpload", "id", id)
httpRes = hookRes.HTTPResponse
info.Upload.StopUpload()
// If the hook response includes the instruction to reject the upload, reuse the error code
// and message from ErrUploadRejectedByServer, but also include custom HTTP response values
if typ == hooks.HookPreCreate && hookRes.RejectUpload {
err := handler.ErrUploadRejectedByServer
err.HTTPResponse = err.HTTPResponse.MergeWith(httpRes)
return httpRes, err
}
return output, err
if typ == hooks.HookPostReceive && hookRes.StopUpload {
logEv(stdout, "HookStopUpload", "id", id)
// TODO: Control response for PATCH request
event.Upload.StopUpload()
}
return httpRes, err
}

View File

@ -3,11 +3,10 @@ package hooks
import (
"bytes"
"encoding/json"
"fmt"
"os"
"os/exec"
"strconv"
"github.com/tus/tusd/pkg/handler"
)
type FileHook struct {
@ -18,43 +17,50 @@ func (_ FileHook) Setup() error {
return nil
}
func (h FileHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutput bool) ([]byte, int, error) {
hookPath := h.Directory + string(os.PathSeparator) + string(typ)
func (h FileHook) InvokeHook(req HookRequest) (res HookResponse, err error) {
hookPath := h.Directory + string(os.PathSeparator) + string(req.Type)
cmd := exec.Command(hookPath)
env := os.Environ()
env = append(env, "TUS_ID="+info.Upload.ID)
env = append(env, "TUS_SIZE="+strconv.FormatInt(info.Upload.Size, 10))
env = append(env, "TUS_OFFSET="+strconv.FormatInt(info.Upload.Offset, 10))
env = append(env, "TUS_ID="+req.Event.Upload.ID)
env = append(env, "TUS_SIZE="+strconv.FormatInt(req.Event.Upload.Size, 10))
env = append(env, "TUS_OFFSET="+strconv.FormatInt(req.Event.Upload.Offset, 10))
jsonInfo, err := json.Marshal(info)
jsonReq, err := json.Marshal(req)
if err != nil {
return nil, 0, err
return res, err
}
reader := bytes.NewReader(jsonInfo)
reader := bytes.NewReader(jsonReq)
cmd.Stdin = reader
cmd.Env = env
cmd.Dir = h.Directory
cmd.Stderr = os.Stderr
// If `captureOutput` is true, this function will return the output (both,
// stderr and stdout), else it will use this process' stdout
var output []byte
if !captureOutput {
cmd.Stdout = os.Stdout
err = cmd.Run()
} else {
output, err = cmd.Output()
}
output, err := cmd.Output()
// Ignore the error, only, if the hook's file could not be found. This usually
// Ignore the error if the hook's file could not be found. This usually
// means that the user is only using a subset of the available hooks.
if os.IsNotExist(err) {
err = nil
return res, nil
}
returnCode := cmd.ProcessState.ExitCode()
// Report error if the exit code was non-zero
if err, ok := err.(*exec.ExitError); ok {
return res, fmt.Errorf("unexpected return code %d from hook endpoint: %s", err.ProcessState.ExitCode(), string(output))
}
return output, returnCode, err
if err != nil {
return res, err
}
// Do not parse the output as JSON, if we received no output to reduce possible
// errors.
if len(output) > 0 {
if err = json.Unmarshal(output, &res); err != nil {
return res, fmt.Errorf("failed to parse hook response: %w, response was: %s", err, string(output))
}
}
return res, nil
}

View File

@ -2,20 +2,19 @@ package hooks
import (
"context"
"net/http"
"time"
grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry"
"github.com/tus/tusd/pkg/handler"
pb "github.com/tus/tusd/pkg/proto/v1"
pb "github.com/tus/tusd/pkg/proto/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
)
type GrpcHook struct {
Endpoint string
MaxRetries int
Backoff int
Client pb.HookServiceClient
Client pb.HookHandlerClient
}
func (g *GrpcHook) Setup() error {
@ -31,44 +30,69 @@ func (g *GrpcHook) Setup() error {
if err != nil {
return err
}
g.Client = pb.NewHookServiceClient(conn)
g.Client = pb.NewHookHandlerClient(conn)
return nil
}
func (g *GrpcHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutput bool) ([]byte, int, error) {
func (g *GrpcHook) InvokeHook(hookReq HookRequest) (hookRes HookResponse, err error) {
ctx := context.Background()
req := &pb.SendRequest{Hook: marshal(typ, info)}
resp, err := g.Client.Send(ctx, req)
req := marshal(hookReq)
res, err := g.Client.InvokeHook(ctx, req)
if err != nil {
if e, ok := status.FromError(err); ok {
return nil, int(e.Code()), err
}
return nil, 2, err
return hookRes, err
}
if captureOutput {
return resp.Response.GetValue(), 0, err
}
return nil, 0, err
hookRes = unmarshal(res)
return hookRes, nil
}
func marshal(typ HookType, info handler.HookEvent) *pb.Hook {
return &pb.Hook{
Upload: &pb.Upload{
Id: info.Upload.ID,
Size: info.Upload.Size,
SizeIsDeferred: info.Upload.SizeIsDeferred,
Offset: info.Upload.Offset,
MetaData: info.Upload.MetaData,
IsPartial: info.Upload.IsPartial,
IsFinal: info.Upload.IsFinal,
PartialUploads: info.Upload.PartialUploads,
Storage: info.Upload.Storage,
func marshal(hookReq HookRequest) *pb.HookRequest {
event := hookReq.Event
return &pb.HookRequest{
Type: string(hookReq.Type),
Event: &pb.Event{
Upload: &pb.FileInfo{
Id: event.Upload.ID,
Size: event.Upload.Size,
SizeIsDeferred: event.Upload.SizeIsDeferred,
Offset: event.Upload.Offset,
MetaData: event.Upload.MetaData,
IsPartial: event.Upload.IsPartial,
IsFinal: event.Upload.IsFinal,
PartialUploads: event.Upload.PartialUploads,
Storage: event.Upload.Storage,
},
HttpRequest: &pb.HTTPRequest{
Method: event.HTTPRequest.Method,
Uri: event.HTTPRequest.URI,
RemoteAddr: event.HTTPRequest.RemoteAddr,
Header: getHeaders(event.HTTPRequest.Header),
},
},
HttpRequest: &pb.HTTPRequest{
Method: info.HTTPRequest.Method,
Uri: info.HTTPRequest.URI,
RemoteAddr: info.HTTPRequest.RemoteAddr,
},
Name: string(typ),
}
}
func getHeaders(httpHeader http.Header) (hookHeader map[string]string) {
hookHeader = make(map[string]string)
for key, val := range httpHeader {
if key != "" && val != nil && len(val) > 0 {
hookHeader[key] = val[0]
}
}
return hookHeader
}
func unmarshal(res *pb.HookResponse) (hookRes HookResponse) {
hookRes.RejectUpload = res.RejectUpload
hookRes.StopUpload = res.StopUpload
httpRes := res.HttpResponse
if httpRes != nil {
hookRes.HTTPResponse.StatusCode = int(httpRes.StatusCode)
hookRes.HTTPResponse.Headers = httpRes.Headers
hookRes.HTTPResponse.Body = httpRes.Body
}
return hookRes
}

View File

@ -1,12 +1,58 @@
package hooks
// TODO: Move hooks into a package in /pkg
import (
"github.com/tus/tusd/pkg/handler"
)
// HookHandler is the main inferface to be implemented by all hook backends.
type HookHandler interface {
// Setup is invoked once the hook backend is initalized.
Setup() error
InvokeHook(typ HookType, info handler.HookEvent, captureOutput bool) ([]byte, int, error)
// InvokeHook is invoked for every hook that is executed. req contains the
// corresponding information about the hook type, the involved upload, and
// causing HTTP request.
// The return value res allows to stop or reject an upload, as well as modifying
// the HTTP response. See the documentation for HookResponse for more details.
// If err is not nil, the value of res will be ignored. err should only be
// non-nil if the hook failed to complete successfully.
InvokeHook(req HookRequest) (res HookResponse, err error)
}
// HookRequest contains the information about the hook type, the involved upload,
// and causing HTTP request.
type HookRequest struct {
// Type is the name of the hook.
Type HookType
// Event contains the involved upload and causing HTTP request.
Event handler.HookEvent
}
// HookResponse is the response after a hook is executed.
type HookResponse struct {
// HTTPResponse's fields can be filled to modify the HTTP response.
// This is only possible for pre-create, pre-finish and post-receive hooks.
// For other hooks this value is ignored.
// If multiple hooks modify the HTTP response, a later hook may overwrite the
// modified values from a previous hook (e.g. if multiple post-receive hooks
// are executed).
// Example usages: Send an error to the client if RejectUpload/StopUpload are
// set in the pre-create/post-receive hook. Send more information to the client
// in the pre-finish hook.
HTTPResponse handler.HTTPResponse
// RejectUpload will cause the upload to be rejected and not be created during
// POST request. This value is only respected for pre-create hooks. For other hooks,
// it is ignored. Use the HTTPResponse field to send details about the rejection
// to the client.
RejectUpload bool
// StopUpload will cause the upload to be stopped during a PATCH request.
// This value is only respected for post-receive hooks. For other hooks,
// it is ignored. Use the HTTPResponse field to send details about the stop
// to the client.
StopUpload bool
}
type HookType string
@ -21,29 +67,3 @@ const (
)
var AvailableHooks []HookType = []HookType{HookPreCreate, HookPostCreate, HookPostReceive, HookPostTerminate, HookPostFinish, HookPreFinish}
type hookDataStore struct {
handler.DataStore
}
type HookError struct {
error
statusCode int
body []byte
}
func NewHookError(err error, statusCode int, body []byte) HookError {
return HookError{err, statusCode, body}
}
func (herr HookError) StatusCode() int {
return herr.statusCode
}
func (herr HookError) Body() []byte {
return herr.body
}
func (herr HookError) Error() string {
return herr.error.Error()
}

View File

@ -8,8 +8,6 @@ import (
"net/http"
"time"
"github.com/tus/tusd/pkg/handler"
"github.com/sethgrid/pester"
)
@ -18,35 +16,11 @@ type HttpHook struct {
MaxRetries int
Backoff int
ForwardHeaders []string
client *pester.Client
}
func (_ HttpHook) Setup() error {
return nil
}
func (h HttpHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutput bool) ([]byte, int, error) {
jsonInfo, err := json.Marshal(info)
if err != nil {
return nil, 0, err
}
req, err := http.NewRequest("POST", h.Endpoint, bytes.NewBuffer(jsonInfo))
if err != nil {
return nil, 0, err
}
for _, k := range h.ForwardHeaders {
// Lookup the Canonicalised version of the specified header
if vals, ok := info.HTTPRequest.Header[http.CanonicalHeaderKey(k)]; ok {
// but set the case specified by the user
req.Header[k] = vals
}
}
req.Header.Set("Hook-Name", string(typ))
req.Header.Set("Content-Type", "application/json")
// TODO: Can we initialize this in Setup()?
func (h *HttpHook) Setup() error {
// Use linear backoff strategy with the user defined values.
client := pester.New()
client.KeepLog = true
@ -55,24 +29,51 @@ func (h HttpHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutput
return time.Duration(h.Backoff) * time.Second
}
resp, err := client.Do(req)
if err != nil {
return nil, 0, err
}
defer resp.Body.Close()
h.client = client
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, 0, err
}
if resp.StatusCode >= http.StatusBadRequest {
return body, resp.StatusCode, NewHookError(fmt.Errorf("endpoint returned: %s", resp.Status), resp.StatusCode, body)
}
if captureOutput {
return body, resp.StatusCode, err
}
return nil, resp.StatusCode, err
return nil
}
func (h HttpHook) InvokeHook(hookReq HookRequest) (hookRes HookResponse, err error) {
jsonInfo, err := json.Marshal(hookReq)
if err != nil {
return hookRes, err
}
httpReq, err := http.NewRequest("POST", h.Endpoint, bytes.NewBuffer(jsonInfo))
if err != nil {
return hookRes, err
}
for _, k := range h.ForwardHeaders {
// Lookup the Canonicalised version of the specified header
if vals, ok := hookReq.Event.HTTPRequest.Header[http.CanonicalHeaderKey(k)]; ok {
// but set the case specified by the user
httpReq.Header[k] = vals
}
}
httpReq.Header.Set("Content-Type", "application/json")
httpRes, err := h.client.Do(httpReq)
if err != nil {
return hookRes, err
}
defer httpRes.Body.Close()
httpBody, err := ioutil.ReadAll(httpRes.Body)
if err != nil {
return hookRes, err
}
// Report an error, if the response has a non-2XX status code
if httpRes.StatusCode < http.StatusOK || httpRes.StatusCode >= http.StatusMultipleChoices {
return hookRes, fmt.Errorf("unexpected response code from hook endpoint (%d): %s", httpRes.StatusCode, string(httpBody))
}
if err = json.Unmarshal(httpBody, &hookRes); err != nil {
return hookRes, fmt.Errorf("failed to parse hook response: %w", err)
}
return hookRes, nil
}

View File

@ -1,69 +1,122 @@
package hooks
import (
"fmt"
"plugin"
"log"
"net/rpc"
"os"
"os/exec"
"github.com/tus/tusd/pkg/handler"
"github.com/hashicorp/go-plugin"
)
type PluginHookHandler interface {
PreCreate(info handler.HookEvent) error
PostCreate(info handler.HookEvent) error
PostReceive(info handler.HookEvent) error
PostFinish(info handler.HookEvent) error
PostTerminate(info handler.HookEvent) error
PreFinish(info handler.HookEvent) error
}
// TODO: When the tusd process stops, the plugin does not get properly killed
// and lives on as a zombie process.
type PluginHook struct {
Path string
handler PluginHookHandler
handlerImpl HookHandler
}
func (h *PluginHook) Setup() error {
p, err := plugin.Open(h.Path)
// We're a host! Start by launching the plugin process.
client := plugin.NewClient(&plugin.ClientConfig{
HandshakeConfig: handshakeConfig,
Plugins: pluginMap,
Cmd: exec.Command(h.Path),
SyncStdout: os.Stdout,
SyncStderr: os.Stderr,
//Logger: logger,
})
//defer client.Kill()
// Connect via RPC
rpcClient, err := client.Client()
if err != nil {
return err
log.Fatal(err)
}
symbol, err := p.Lookup("TusdHookHandler")
// Request the plugin
raw, err := rpcClient.Dispense("hookHandler")
if err != nil {
return err
log.Fatal(err)
}
handler, ok := symbol.(*PluginHookHandler)
if !ok {
return fmt.Errorf("hooks: could not cast TusdHookHandler from %s into PluginHookHandler interface", h.Path)
}
// We should have a HookHandler now! This feels like a normal interface
// implementation but is in fact over an RPC connection.
h.handlerImpl = raw.(HookHandler)
h.handler = *handler
return nil
return h.handlerImpl.Setup()
}
func (h PluginHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutput bool) ([]byte, int, error) {
var err error
switch typ {
case HookPostFinish:
err = h.handler.PostFinish(info)
case HookPostTerminate:
err = h.handler.PostTerminate(info)
case HookPostReceive:
err = h.handler.PostReceive(info)
case HookPostCreate:
err = h.handler.PostCreate(info)
case HookPreCreate:
err = h.handler.PreCreate(info)
case HookPreFinish:
err = h.handler.PreFinish(info)
default:
err = fmt.Errorf("hooks: unknown hook named %s", typ)
}
if err != nil {
return nil, 1, err
}
return nil, 0, nil
func (h *PluginHook) InvokeHook(req HookRequest) (HookResponse, error) {
return h.handlerImpl.InvokeHook(req)
}
// handshakeConfigs are used to just do a basic handshake between
// a plugin and host. If the handshake fails, a user friendly error is shown.
// This prevents users from executing bad plugins or executing a plugin
// directory. It is a UX feature, not a security feature.
var handshakeConfig = plugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: "TUSD_PLUGIN",
MagicCookieValue: "yes",
}
// pluginMap is the map of plugins we can dispense.
var pluginMap = map[string]plugin.Plugin{
"hookHandler": &HookHandlerPlugin{},
}
// Here is an implementation that talks over RPC
type HookHandlerRPC struct{ client *rpc.Client }
func (g *HookHandlerRPC) Setup() error {
var res interface{}
err := g.client.Call("Plugin.Setup", new(interface{}), &res)
return err
}
func (g *HookHandlerRPC) InvokeHook(req HookRequest) (res HookResponse, err error) {
err = g.client.Call("Plugin.InvokeHook", req, &res)
return res, err
}
// Here is the RPC server that HookHandlerRPC talks to, conforming to
// the requirements of net/rpc
type HookHandlerRPCServer struct {
// This is the real implementation
Impl HookHandler
}
func (s *HookHandlerRPCServer) Setup(args interface{}, resp *interface{}) error {
return s.Impl.Setup()
}
func (s *HookHandlerRPCServer) InvokeHook(args HookRequest, resp *HookResponse) (err error) {
*resp, err = s.Impl.InvokeHook(args)
return err
}
// This is the implementation of plugin.Plugin so we can serve/consume this
//
// This has two methods: Server must return an RPC server for this plugin
// type. We construct a HookHandlerRPCServer for this.
//
// Client must return an implementation of our interface that communicates
// over an RPC client. We return HookHandlerRPC for this.
//
// Ignore MuxBroker. That is used to create more multiplexed streams on our
// plugin connection and is a more advanced use case.
type HookHandlerPlugin struct {
// Impl Injection
Impl HookHandler
}
func (p *HookHandlerPlugin) Server(*plugin.MuxBroker) (interface{}, error) {
return &HookHandlerRPCServer{Impl: p.Impl}, nil
}
func (HookHandlerPlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
return &HookHandlerRPC{client: c}, nil
}

View File

@ -1,77 +0,0 @@
// If this file gets changed, you must recompile the generate package in pkg/proto.
// To do this, install the Go protobuf toolchain as mentioned in
// https://github.com/golang/protobuf#installation.
// Then use following command to recompile it with gRPC support:
// protoc --go_out=plugins=grpc:../../../../../pkg/proto/ v1/hook.proto
// In addition, it may be necessary to update the protobuf or gRPC dependencies as well.
syntax = "proto3";
package v1;
import "google/protobuf/any.proto";
// Uploaded data
message Upload {
// Unique integer identifier of the uploaded file
string id = 1;
// Total file size in bytes specified in the NewUpload call
int64 Size = 2;
// Indicates whether the total file size is deferred until later
bool SizeIsDeferred = 3;
// Offset in bytes (zero-based)
int64 Offset = 4;
map<string, string> metaData = 5;
// Indicates that this is a partial upload which will later be used to form
// a final upload by concatenation. Partial uploads should not be processed
// when they are finished since they are only incomplete chunks of files.
bool isPartial = 6;
// Indicates that this is a final upload
bool isFinal = 7;
// If the upload is a final one (see IsFinal) this will be a non-empty
// ordered slice containing the ids of the uploads of which the final upload
// will consist after concatenation.
repeated string partialUploads = 8;
// Storage contains information about where the data storage saves the upload,
// for example a file path. The available values vary depending on what data
// store is used. This map may also be nil.
map <string, string> storage = 9;
}
message HTTPRequest {
// Method is the HTTP method, e.g. POST or PATCH
string method = 1;
// URI is the full HTTP request URI, e.g. /files/fooo
string uri = 2;
// RemoteAddr contains the network address that sent the request
string remoteAddr = 3;
}
// Hook's data
message Hook {
// Upload contains information about the upload that caused this hook
// to be fired.
Upload upload = 1;
// HTTPRequest contains details about the HTTP request that reached
// tusd.
HTTPRequest httpRequest = 2;
// The hook name
string name = 3;
}
// Request data to send hook
message SendRequest {
// The hook data
Hook hook = 1;
}
// Response that contains data for sended hook
message SendResponse {
// The response of the hook.
google.protobuf.Any response = 1;
}
// The hook service definition.
service HookService {
// Sends a hook
rpc Send (SendRequest) returns (SendResponse) {}
}

View File

@ -0,0 +1,116 @@
// If this file gets changed, you must recompile the generate package in pkg/proto.
// To do this, install the Go protobuf toolchain as mentioned in
// https://github.com/golang/protobuf#installation.
// Then use following command to recompile it with gRPC support:
// protoc --go_out=plugins=grpc:../../../../../pkg/proto/ v2/hook.proto
// In addition, it may be necessary to update the protobuf or gRPC dependencies as well.
syntax = "proto3";
package v2;
// HookRequest contains the information about the hook type, the involved upload,
// and causing HTTP request.
message HookRequest {
// Type is the name of the hook.
string type = 1;
// Event contains the involved upload and causing HTTP request.
Event event = 2;
}
// Event represents an event from tusd which can be handled by the application.
message Event {
// Upload contains information about the upload that caused this hook
// to be fired.
FileInfo upload = 1;
// HTTPRequest contains details about the HTTP request that reached
// tusd.
HTTPRequest httpRequest = 2;
}
// FileInfo contains information about a single upload resource.
message FileInfo {
// ID is the unique identifier of the upload resource.
string id = 1;
// Total file size in bytes specified in the NewUpload call
int64 size = 2;
// Indicates whether the total file size is deferred until later
bool sizeIsDeferred = 3;
// Offset in bytes (zero-based)
int64 offset = 4;
map<string, string> metaData = 5;
// Indicates that this is a partial upload which will later be used to form
// a final upload by concatenation. Partial uploads should not be processed
// when they are finished since they are only incomplete chunks of files.
bool isPartial = 6;
// Indicates that this is a final upload
bool isFinal = 7;
// If the upload is a final one (see IsFinal) this will be a non-empty
// ordered slice containing the ids of the uploads of which the final upload
// will consist after concatenation.
repeated string partialUploads = 8;
// Storage contains information about where the data storage saves the upload,
// for example a file path. The available values vary depending on what data
// store is used. This map may also be nil.
map <string, string> storage = 9;
}
// HTTPRequest contains basic details of an incoming HTTP request.
message HTTPRequest {
// Method is the HTTP method, e.g. POST or PATCH.
string method = 1;
// URI is the full HTTP request URI, e.g. /files/fooo.
string uri = 2;
// RemoteAddr contains the network address that sent the request.
string remoteAddr = 3;
// Header contains all HTTP headers as present in the HTTP request.
map <string, string> header = 4;
}
// HookResponse is the response after a hook is executed.
message HookResponse {
// HTTPResponse's fields can be filled to modify the HTTP response.
// This is only possible for pre-create, pre-finish and post-receive hooks.
// For other hooks this value is ignored.
// If multiple hooks modify the HTTP response, a later hook may overwrite the
// modified values from a previous hook (e.g. if multiple post-receive hooks
// are executed).
// Example usages: Send an error to the client if RejectUpload/StopUpload are
// set in the pre-create/post-receive hook. Send more information to the client
// in the pre-finish hook.
HTTPResponse httpResponse = 1;
// RejectUpload will cause the upload to be rejected and not be created during
// POST request. This value is only respected for pre-create hooks. For other hooks,
// it is ignored. Use the HTTPResponse field to send details about the rejection
// to the client.
bool rejectUpload = 2;
// StopUpload will cause the upload to be stopped during a PATCH request.
// This value is only respected for post-receive hooks. For other hooks,
// it is ignored. Use the HTTPResponse field to send details about the stop
// to the client.
bool stopUpload = 3;
}
// HTTPResponse contains basic details of an outgoing HTTP response.
message HTTPResponse {
// StatusCode is status code, e.g. 200 or 400.
int64 statusCode = 1;
// Headers contains additional HTTP headers for the response.
map <string, string> headers = 2;
// Body is the response body.
string body = 3;
}
// The hook service definition.
service HookHandler {
// InvokeHook is invoked for every hook that is executed. HookRequest contains the
// corresponding information about the hook type, the involved upload, and
// causing HTTP request.
// The return value HookResponse allows to stop or reject an upload, as well as modifying
// the HTTP response. See the documentation for HookResponse for more details.
rpc InvokeHook (HookRequest) returns (HookResponse) {}
}

View File

@ -23,11 +23,20 @@ var MetricsHookErrorsTotal = prometheus.NewCounterVec(
[]string{"hooktype"},
)
func SetupMetrics(handler *handler.Handler) {
var MetricsHookInvocationsTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "tusd_hook_invocations_total",
Help: "Total number of invocations per hook type.",
},
[]string{"hooktype"},
)
func SetupMetrics(mux *http.ServeMux, handler *handler.Handler) {
prometheus.MustRegister(MetricsOpenConnections)
prometheus.MustRegister(MetricsHookErrorsTotal)
prometheus.MustRegister(MetricsHookInvocationsTotal)
prometheus.MustRegister(prometheuscollector.New(handler.Metrics))
stdout.Printf("Using %s as the metrics path.\n", Flags.MetricsPath)
http.Handle(Flags.MetricsPath, promhttp.Handler())
mux.Handle(Flags.MetricsPath, promhttp.Handler())
}

40
cmd/tusd/cli/pprof.go Normal file
View File

@ -0,0 +1,40 @@
package cli
import (
"net/http"
"net/http/pprof"
"os"
"runtime"
"strings"
"github.com/bmizerany/pat"
"github.com/felixge/fgprof"
"github.com/goji/httpauth"
)
func SetupPprof(globalMux *http.ServeMux) {
runtime.SetBlockProfileRate(Flags.PprofBlockProfileRate)
runtime.SetMutexProfileFraction(Flags.PprofMutexProfileRate)
mux := pat.New()
mux.Get("", http.HandlerFunc(pprof.Index))
mux.Get("cmdline", http.HandlerFunc(pprof.Cmdline))
mux.Get("profile", http.HandlerFunc(pprof.Profile))
mux.Get("symbol", http.HandlerFunc(pprof.Symbol))
mux.Get("trace", http.HandlerFunc(pprof.Trace))
mux.Get("fgprof", fgprof.Handler())
var handler http.Handler = mux
auth := os.Getenv("TUSD_PPROF_AUTH")
if auth != "" {
parts := strings.SplitN(auth, ":", 2)
if len(parts) != 2 {
stderr.Fatalf("TUSD_PPROF_AUTH must be two values separated by a colon")
}
handler = httpauth.SimpleBasicAuth(parts[0], parts[1])(mux)
}
globalMux.Handle(Flags.PprofPath, http.StripPrefix(Flags.PprofPath, handler))
}

View File

@ -34,6 +34,7 @@ func Serve() {
NotifyTerminatedUploads: true,
NotifyUploadProgress: true,
NotifyCreatedUploads: true,
UploadProgressInterval: time.Duration(Flags.ProgressHooksInterval) * time.Millisecond,
}
if err := SetupPreHooks(&config); err != nil {
@ -60,21 +61,17 @@ func Serve() {
SetupPostHooks(handler)
if Flags.ExposeMetrics {
SetupMetrics(handler)
SetupHookMetrics()
}
stdout.Printf("Supported tus extensions: %s\n", handler.SupportedExtensions())
mux := http.NewServeMux()
if basepath == "/" {
// If the basepath is set to the root path, only install the tusd handler
// and do not show a greeting.
http.Handle("/", http.StripPrefix("/", handler))
mux.Handle("/", http.StripPrefix("/", handler))
} else {
// If a custom basepath is defined, we show a greeting at the root path...
if Flags.ShowGreeting {
http.HandleFunc("/", DisplayGreeting)
mux.HandleFunc("/", DisplayGreeting)
}
// ... and register a route with and without the trailing slash, so we can
@ -82,8 +79,17 @@ func Serve() {
basepathWithoutSlash := strings.TrimSuffix(basepath, "/")
basepathWithSlash := basepathWithoutSlash + "/"
http.Handle(basepathWithSlash, http.StripPrefix(basepathWithSlash, handler))
http.Handle(basepathWithoutSlash, http.StripPrefix(basepathWithoutSlash, handler))
mux.Handle(basepathWithSlash, http.StripPrefix(basepathWithSlash, handler))
mux.Handle(basepathWithoutSlash, http.StripPrefix(basepathWithoutSlash, handler))
}
if Flags.ExposeMetrics {
SetupMetrics(mux, handler)
SetupHookMetrics()
}
if Flags.ExposePprof {
SetupPprof(mux)
}
var listener net.Listener
@ -110,14 +116,17 @@ func Serve() {
// If we're not using TLS just start the server and, if http.Serve() returns, just return.
if protocol == "http" {
if err = http.Serve(listener, nil); err != nil {
if err = http.Serve(listener, mux); err != nil {
stderr.Fatalf("Unable to serve: %s", err)
}
return
}
// TODO: Move TLS handling into own file.
// Fall-through for TLS mode.
server := &http.Server{}
server := &http.Server{
Handler: mux,
}
switch Flags.TLSMode {
case TLS13:
server.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS13}

19
docker/load-tests/1_run-test.sh Executable file
View File

@ -0,0 +1,19 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
# Setup traps, so our background job of monitoring the containers
# exits, if the script is complete.
trap "exit" INT TERM ERR
trap "kill 0" EXIT
# 1) Ensure that the containers are up-to-date
docker compose build
# 2) Start the container monitoring
docker stats --format "{{ json . }}" > resource-usage-log.txt &
# 3) Run the actual tests
docker compose up --abort-on-container-exit

View File

@ -0,0 +1,123 @@
#!/usr/bin/env python3
import json
import re
import matplotlib.pyplot as plt
snapshots = []
with open("./resource-usage-log.txt") as file:
current_snapshot = None
for line in file:
# The lines might contain the reset characters before the actual JSON.
# This means that the entire resources for the current time have been
# written out, so we add the latest snapshot to our list and continue
# reading the next entries.
first_backet = line.find("{")
if first_backet == -1:
continue
if first_backet != 0:
if current_snapshot is not None:
snapshots.append(current_snapshot)
current_snapshot = []
line = line[first_backet:]
current_snapshot.append(json.loads(line))
def parse_percentage(string):
return float(string.strip('%'))
units = {"B": 1, "kB": 10**3, "MB": 10**6, "GB": 10**9, "TB": 10**12,
"KiB": 2**10, "MiB": 2**20, "GiB": 2**30, "TiB": 2**40}
def parse_byte_size(size):
number, unit = re.findall(r'([0-9\.]+)([A-Za-z]+)', size)[0]
return int(float(number)*units[unit])
def parse_two_bytes(string):
str1, str2 = string.split("/")
return parse_byte_size(str1), parse_byte_size(str2)
s3_cpu = []
s3_mem = []
tusd_cpu = []
tusd_mem = []
tusd_net = []
uploader_cpu = []
uploader_mem = []
uploader_net = []
timestamp = []
for (i, snapshot) in enumerate(snapshots):
a_s3_cpu = None
a_s3_mem = None
a_tusd_cpu = None
a_tusd_mem = None
a_tusd_net = None
a_uploader_cpu = None
a_uploader_mem = None
a_uploader_net = None
for entry in snapshot:
if entry["Name"] == "load-tests-tusd-1":
a_tusd_cpu = parse_percentage(entry["CPUPerc"])
a_tusd_mem = parse_two_bytes(entry["MemUsage"])[0]
a_tusd_net = parse_two_bytes(entry["NetIO"])[0]
elif entry["Name"] == "load-tests-s3-1":
a_s3_cpu = parse_percentage(entry["CPUPerc"])
a_s3_mem = parse_two_bytes(entry["MemUsage"])[0]
elif entry["Name"] == "load-tests-uploader-1":
a_uploader_cpu = parse_percentage(entry["CPUPerc"])
a_uploader_mem = parse_two_bytes(entry["MemUsage"])[0]
a_uploader_net = parse_two_bytes(entry["NetIO"])[1]
s3_cpu.append(a_s3_cpu)
s3_mem.append(a_s3_mem)
tusd_cpu.append(a_tusd_cpu)
tusd_mem.append(a_tusd_mem)
tusd_net.append(a_tusd_net)
uploader_cpu.append(a_uploader_cpu)
uploader_mem.append(a_uploader_mem)
uploader_net.append(a_uploader_net)
# The docker stats command is hard coded to output stats every 500ms:
# https://github.com/docker/cli/blob/81c68913e4c2cb058b5a9fd5972e2989d9915b2c/cli/command/container/stats.go#L223
timestamp.append(0.5 * i)
fig, axs = plt.subplots(3, 3, sharex=True, sharey='row')
axs[0, 0].plot(timestamp, tusd_cpu)
axs[0, 0].set_title('tusd CPU percentage')
axs[0, 0].set(ylabel='CPU perc', xlabel='time')
axs[0, 1].plot(timestamp, s3_cpu)
axs[0, 1].set_title('s3 CPU percentage')
axs[0, 1].set(ylabel='CPU perc', xlabel='time')
axs[0, 2].plot(timestamp, uploader_cpu)
axs[0, 2].set_title('uploader CPU percentage')
axs[0, 2].set(ylabel='CPU perc', xlabel='time')
axs[1, 0].plot(timestamp, tusd_mem)
axs[1, 0].set_title('tusd memory usage')
axs[1, 0].set(ylabel='mem perc', xlabel='time')
axs[1, 1].plot(timestamp, s3_mem)
axs[1, 1].set_title('s3 memory usage')
axs[1, 1].set(ylabel='mem perc', xlabel='time')
axs[1, 2].plot(timestamp, uploader_mem)
axs[1, 2].set_title('uploader memory usage')
axs[1, 2].set(ylabel='mem perc', xlabel='time')
axs[2, 0].plot(timestamp, tusd_net)
axs[2, 0].set_title('tusd network input')
axs[2, 0].set(ylabel='total volume', xlabel='time')
axs[2, 1].axis('off')
axs[2, 2].plot(timestamp, uploader_net)
axs[2, 2].set_title('uploader network output')
axs[2, 2].set(ylabel='total volume', xlabel='time')
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
plt.show()

View File

@ -0,0 +1,3 @@
## Load issues with tusd & S3
This

View File

@ -0,0 +1,68 @@
version: "3.9"
# TODO:
# - Add service for monitoring tusd
# - Add hooks
# - Use similar configuration as api2
services:
s3:
image: minio/minio
ports:
- "9000:9000"
- "9001:9001"
# Note: Data directory is not persistent on purpose
command: server /data --console-address ":9001"
environment:
MINIO_ROOT_USER: minioadmin
MINIO_ROOT_PASSWORD: minioadmin
# deploy:
# resources:
# limits:
# cpus: "2"
createbucket:
image: minio/mc
entrypoint: >
/bin/sh -c "
/usr/bin/mc config host add s3 http://s3:9000 minioadmin minioadmin;
/usr/bin/mc mb --ignore-existing s3/tusdtest.transloadit.com;
sleep infinity;
"
depends_on:
- s3
tusd:
build: ../../
ports:
- "1080:1080"
# entrypoint: file /srv/tusdhook/hook_handler
entrypoint: tusd -s3-bucket "tusdtest.transloadit.com" -s3-endpoint "http://s3:9000" -hooks-plugin=/usr/local/bin/hooks_handler -hooks-enabled-events=pre-create,post-create,post-receive,post-finish -progress-hooks-interval=3000 -max-size=128849018880 -timeout=60000 -s3-disable-content-hashes=true -s3-disable-ssl=true -s3-concurrent-part-uploads=48 -s3-max-buffered-parts=1
environment:
AWS_REGION: us-east-1
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
depends_on:
- s3
- createbucket
volumes:
- ../../examples/hooks/plugin:/srv/tusdhook
deploy:
resources:
limits:
cpus: "2"
uploader:
build: ./uploader
# 10 MiB: 10485760
# 100 MiB: 104857600
# 1000 MiB: 1048576000
command: 10485760 50 /dev/shm
tmpfs:
- /dev/shm
depends_on:
- tusd
# deploy:
# resources:
# limits:
# cpus: "1"

View File

@ -0,0 +1,69 @@
{"BlockIO":"0B / 0B","CPUPerc":"0.00%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"0.05%","MemUsage":"8.633MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"220B / 0B","PIDs":"9"}
{"BlockIO":"0B / 0B","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"","MemPerc":"0.00%","MemUsage":"0B / 0B","Name":"--","NetIO":"0B / 0B","PIDs":"0"}
{"BlockIO":"0B / 0B","CPUPerc":"0.00%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"0.05%","MemUsage":"8.633MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"220B / 0B","PIDs":"9"}
{"BlockIO":"0B / 0B","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"","MemPerc":"0.00%","MemUsage":"0B / 0B","Name":"--","NetIO":"0B / 0B","PIDs":"0"}
{"BlockIO":"0B / 0B","CPUPerc":"0.00%","Container":"045ce1160eb0","ID":"","MemPerc":"0.00%","MemUsage":"0B / 0B","Name":"--","NetIO":"0B / 0B","PIDs":"0"}
{"BlockIO":"115kB / 4.1kB","CPUPerc":"34.90%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"0.57%","MemUsage":"90.34MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"10.2kB / 3.43kB","PIDs":"13"}
{"BlockIO":"213kB / 4.1kB","CPUPerc":"0.01%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"2.41kB / 1.76kB","PIDs":"2"}
{"BlockIO":"0B / 0B","CPUPerc":"0.00%","Container":"045ce1160eb0","ID":"045ce1160eb0","MemPerc":"0.08%","MemUsage":"12.74MiB / 15.47GiB","Name":"load-tests-tusd-1","NetIO":"90B / 0B","PIDs":"19"}
{"BlockIO":"0B / 0B","CPUPerc":"0.00%","Container":"e4e5ae23b118","ID":"","MemPerc":"0.00%","MemUsage":"0B / 0B","Name":"--","NetIO":"0B / 0B","PIDs":"0"}
{"BlockIO":"115kB / 4.1kB","CPUPerc":"34.90%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"0.57%","MemUsage":"90.34MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"10.2kB / 3.43kB","PIDs":"13"}
{"BlockIO":"213kB / 4.1kB","CPUPerc":"0.01%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"2.41kB / 1.76kB","PIDs":"2"}
{"BlockIO":"0B / 0B","CPUPerc":"0.00%","Container":"045ce1160eb0","ID":"045ce1160eb0","MemPerc":"0.08%","MemUsage":"12.74MiB / 15.47GiB","Name":"load-tests-tusd-1","NetIO":"90B / 0B","PIDs":"19"}
{"BlockIO":"0B / 0B","CPUPerc":"0.00%","Container":"e4e5ae23b118","ID":"","MemPerc":"0.00%","MemUsage":"0B / 0B","Name":"--","NetIO":"0B / 0B","PIDs":"0"}
{"BlockIO":"225kB / 209kB","CPUPerc":"97.82%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"1.34%","MemUsage":"212.4MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"212kB / 128kB","PIDs":"15"}
{"BlockIO":"213kB / 4.1kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"3.76kB / 1.76kB","PIDs":"2"}
{"BlockIO":"0B / 0B","CPUPerc":"17.92%","Container":"045ce1160eb0","ID":"045ce1160eb0","MemPerc":"0.16%","MemUsage":"25.36MiB / 15.47GiB","Name":"load-tests-tusd-1","NetIO":"180kB / 237kB","PIDs":"19"}
{"BlockIO":"0B / 0B","CPUPerc":"0.06%","Container":"e4e5ae23b118","ID":"e4e5ae23b118","MemPerc":"3.75%","MemUsage":"593.7MiB / 15.47GiB","Name":"load-tests-uploader-1","NetIO":"36kB / 54.2kB","PIDs":"51"}
{"BlockIO":"225kB / 209kB","CPUPerc":"97.82%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"1.34%","MemUsage":"212.4MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"212kB / 128kB","PIDs":"15"}
{"BlockIO":"213kB / 4.1kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"3.76kB / 1.76kB","PIDs":"2"}
{"BlockIO":"0B / 0B","CPUPerc":"17.92%","Container":"045ce1160eb0","ID":"045ce1160eb0","MemPerc":"0.16%","MemUsage":"25.36MiB / 15.47GiB","Name":"load-tests-tusd-1","NetIO":"180kB / 237kB","PIDs":"19"}
{"BlockIO":"0B / 0B","CPUPerc":"0.06%","Container":"e4e5ae23b118","ID":"e4e5ae23b118","MemPerc":"3.75%","MemUsage":"593.7MiB / 15.47GiB","Name":"load-tests-uploader-1","NetIO":"36kB / 54.2kB","PIDs":"51"}
{"BlockIO":"373kB / 189MB","CPUPerc":"637.26%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.59%","MemUsage":"568.3MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"254MB / 303kB","PIDs":"28"}
{"BlockIO":"213kB / 4.1kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"4.26kB / 1.76kB","PIDs":"2"}
{"BlockIO":"0B / 0B","CPUPerc":"55.52%","Container":"045ce1160eb0","ID":"045ce1160eb0","MemPerc":"0.53%","MemUsage":"84.67MiB / 15.47GiB","Name":"load-tests-tusd-1","NetIO":"349MB / 258MB","PIDs":"27"}
{"BlockIO":"0B / 0B","CPUPerc":"17.23%","Container":"e4e5ae23b118","ID":"e4e5ae23b118","MemPerc":"3.59%","MemUsage":"569.1MiB / 15.47GiB","Name":"load-tests-uploader-1","NetIO":"334kB / 349MB","PIDs":"48"}
{"BlockIO":"373kB / 189MB","CPUPerc":"637.26%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.59%","MemUsage":"568.3MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"254MB / 303kB","PIDs":"28"}
{"BlockIO":"213kB / 4.1kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"4.26kB / 1.76kB","PIDs":"2"}
{"BlockIO":"729kB / 526MB","CPUPerc":"172.00%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.47%","MemUsage":"550.3MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"4.46kB / 1.76kB","PIDs":"2"}
{"BlockIO":"729kB / 526MB","CPUPerc":"172.00%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.47%","MemUsage":"550.3MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"4.46kB / 1.76kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.27%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.47%","MemUsage":"550.3MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"4.85kB / 1.76kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.27%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.47%","MemUsage":"550.3MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"4.85kB / 1.76kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.55%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"4.96kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.55%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"4.96kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.00%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"4.96kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.00%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"4.96kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.02%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"4.96kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.02%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"4.96kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.00%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"5.14kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.00%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"5.14kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.10%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"5.14kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.10%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"5.14kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.00%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"5.14kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.00%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"5.14kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.02%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"5.14kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.02%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"5.14kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.00%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"5.14kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.00%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"213kB / 16.4kB","CPUPerc":"0.00%","Container":"6f791422d2d9","ID":"6f791422d2d9","MemPerc":"0.01%","MemUsage":"1.168MiB / 15.47GiB","Name":"load-tests-createbucket-1","NetIO":"5.14kB / 1.8kB","PIDs":"2"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.03%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}
{"BlockIO":"733kB / 526MB","CPUPerc":"0.03%","Container":"ae420f5bad2a","ID":"ae420f5bad2a","MemPerc":"3.23%","MemUsage":"511.1MiB / 15.47GiB","Name":"load-tests-s3-1","NetIO":"528MB / 485kB","PIDs":"51"}

View File

@ -0,0 +1,9 @@
FROM alpine:3.16.2
RUN apk add --no-cache curl bash openssl
COPY ./upload.sh /usr/local/share/upload.sh
RUN chmod +x /usr/local/share/upload.sh
ENTRYPOINT [ "/usr/local/share/upload.sh" ]
CMD []

View File

@ -0,0 +1,41 @@
#!/bin/bash
set -e
set -o pipefail
echo $@
if [ -z "$2" ]; then
echo "USAGE: upload.sh SIZE NUMBER [TEMP DIR]"
exit 1
fi
size="$1"
number="$2"
directory="${3:-/tmp}"
file="${directory}/${size}.bin"
openssl rand -out "$file" "$size"
# Get upload size in bytes
upload_size=$(stat -c "%s" "$file")
echo "Generated file with size: ${upload_size} bytes."
# Create uploads
for i in $(seq 1 $number); do
# Note: I wanted to use the new feature for extracting header values
# (https://daniel.haxx.se/blog/2022/03/24/easier-header-picking-with-curl/)
# but this is not yet available on the current curl version in Alpine Linux.
upload_urls[${i}]="$(curl -X POST -H 'Tus-Resumable: 1.0.0' -H "Upload-Length: ${upload_size}" --fail --silent -i http://tusd:1080/files/ | grep -i ^Location: | cut -d: -f2- | sed 's/^ *\(.*\).*/\1/' | tr -d '\r')"
done
# Perform the uploads in parallel
for i in $(seq 1 $number); do
curl -X PATCH -H 'Tus-Resumable: 1.0.0' -H 'Upload-Offset: 0' -H 'Content-Type: application/offset+octet-stream' --data-binary "@${file}" "${upload_urls[${i}]}" &
pids[${i}]=$!
done
# Wait for all uploads to complete
for pid in ${pids[*]}; do
wait $pid
done

View File

@ -1,5 +1,7 @@
# Hooks
TODO: Update with new details
When integrating tusd into an application, it is important to establish a communication channel between the two components. The tusd binary accomplishes this by providing a system which triggers actions when certain events happen, such as an upload being created or finished. This simple-but-powerful system enables uses ranging from logging over validation and authorization to processing the uploaded files.
When a specific action happens during an upload (pre-create, post-receive, post-finish, or post-terminate), the hook system enables tusd to fire off a specific event. Tusd provides two ways of doing this:
@ -211,9 +213,9 @@ $ # Retrying 5 times with a 2 second backoff
$ tusd --hooks-http http://localhost:8081/write --hooks-http-retry 5 --hooks-http-backoff 2
```
## GRPC Hooks
## gRPC Hooks
GRPC Hooks are the third type of hooks supported by tusd. Like the others hooks, it is disabled by default. To enable it, pass the `--hooks-grpc` option to the tusd binary. The flag's value will be a gRPC endpoint, which the tusd binary will be sent to:
gRPC Hooks are the third type of hooks supported by tusd. Like the others hooks, it is disabled by default. To enable it, pass the `--hooks-grpc` option to the tusd binary. The flag's value will be a gRPC endpoint, which the tusd binary will be sent to:
```bash
$ tusd --hooks-grpc localhost:8080

3
docs/minio.txt Normal file
View File

@ -0,0 +1,3 @@
MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ./minio server data
AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE AWS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY go run cmd/tusd/main.go -s3-bucket tusdtest.transloadit.com -s3-endpoint http://127.0.0.1:9000 -expose-pprof

11
examples/README.md Normal file
View File

@ -0,0 +1,11 @@
# Examples
This directory contains following examples:
- `apache2.conf` is the recommended minimum configuration for an Apache2 proxy in front of tusd.
- `nginx.conf` is the recommended minimum configuration for an Nginx proxy in front of tusd.
- `server/` is an example of how to the tusd package embedded in your own Go application.
- `hooks/file/` are Bash scripts for file hook implementations.
- `hooks/http/` is a Python HTTP server as the HTTP hook implementation.
- `hooks/grpc/` is a Python gRPC server as the gRPC hook implementation.
- `hooks/plugin/` is a Go plugin usable with the plugin hooks.

14
examples/hooks/file/post-create Executable file
View File

@ -0,0 +1,14 @@
#!/bin/sh
# This example demonstrates how to read the hook event details
# from stdout and output debug messages.
id="$TUS_ID"
size="$TUS_SIZE"
# We use >&2 to write debugging output to stderr. tusd
# will forward these to its stderr. Any output from the
# hook on stdout will be captured by tusd and interpreted
# as a response.
echo "Upload created with ID ${id} and size ${size}" >&2
cat /dev/stdin | jq . >&2

11
examples/hooks/file/post-finish Executable file
View File

@ -0,0 +1,11 @@
#!/bin/sh
# This example demonstrates how to read the hook event details
# from environment variables, stdin, and output debug messages.
# We use >&2 to write debugging output to stderr. tusd
# will forward these to its stderr. Any output from the
# hook on stdout will be captured by tusd and interpreted
# as a response.
echo "Upload $TUS_ID ($TUS_SIZE bytes) finished" >&2
cat /dev/stdin | jq . >&2

View File

@ -0,0 +1,15 @@
#!/bin/sh
# This example demonstrates how to read the hook event details
# from environment variables and output debug messages.
id="$TUS_ID"
offset="$TUS_OFFSET"
size="$TUS_SIZE"
progress=$((100 * $offset/$size))
# We use >&2 to write debugging output to stderr. tusd
# will forward these to its stderr. Any output from the
# hook on stdout will be captured by tusd and interpreted
# as a response.
echo "Upload ${id} is at ${progress}% (${offset}/${size})" >&2

View File

@ -0,0 +1,11 @@
#!/bin/sh
# This example demonstrates how to read the hook event details
# from environment variables, stdin, and output debug messages.
# We use >&2 to write debugging output to stderr. tusd
# will forward these to its stderr. Any output from the
# hook on stdout will be captured by tusd and interpreted
# as a response.
echo "Upload $TUS_ID terminated" >&2
cat /dev/stdin | jq . >&2

37
examples/hooks/file/pre-create Executable file
View File

@ -0,0 +1,37 @@
#!/bin/sh
# This example demonstrates how to read the hook event details
# from stdout, output debug messages, and reject a new upload based
# on custom constraints. Here, an upload will be rejected if the
# filename metadata is missing. Remove the following `exit 0` line
# to activate the constraint:
exit 0
hasFilename="$(cat /dev/stdin | jq '.Event.Upload.MetaData | has("filename")')"
# We use >&2 to write debugging output to stderr. tusd
# will forward these to its stderr. Any output from the
# hook on stdout will be captured by tusd and interpreted
# as a response.
echo "Filename exists: $hasFilename" >&2
if [ "$hasFilename" == "false" ]; then
# If the condition is not met, output a JSON object on stdout,
# that instructs tusd to reject the upload and respond with a custom
# HTTP error response.
cat <<END
{
"RejectUpload": true,
"HTTPResponse": {
"StatusCode": 400,
"Body": "no filename provided"
}
}
END
# It is important that the hook exits with code 0. Otherwise, tusd
# assumes the hook has failed and will print an error message about
# the hook failure.
exit 0
fi

10
examples/hooks/file/pre-finish Executable file
View File

@ -0,0 +1,10 @@
#!/bin/sh
# This example demonstrates how to read the hook event details
# from stdin, and output debug messages.
# We use >&2 to write debugging output to stderr. tusd
# will forward these to its stderr. Any output from the
# hook on stdout will be captured by tusd and interpreted
# as a response.
cat /dev/stdin | jq . >&2

View File

@ -0,0 +1,2 @@
hook_pb2.py:
python -m grpc_tools.protoc --proto_path=../../../cmd/tusd/cli/hooks/proto/v2/ hook.proto --python_out=. --grpc_python_out=.

View File

@ -0,0 +1,139 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: hook.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\nhook.proto\x12\x02v2\"5\n\x0bHookRequest\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x18\n\x05\x65vent\x18\x02 \x01(\x0b\x32\t.v2.Event\"K\n\x05\x45vent\x12\x1c\n\x06upload\x18\x01 \x01(\x0b\x32\x0c.v2.FileInfo\x12$\n\x0bhttpRequest\x18\x02 \x01(\x0b\x32\x0f.v2.HTTPRequest\"\xc3\x02\n\x08\x46ileInfo\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04size\x18\x02 \x01(\x03\x12\x16\n\x0esizeIsDeferred\x18\x03 \x01(\x08\x12\x0e\n\x06offset\x18\x04 \x01(\x03\x12,\n\x08metaData\x18\x05 \x03(\x0b\x32\x1a.v2.FileInfo.MetaDataEntry\x12\x11\n\tisPartial\x18\x06 \x01(\x08\x12\x0f\n\x07isFinal\x18\x07 \x01(\x08\x12\x16\n\x0epartialUploads\x18\x08 \x03(\t\x12*\n\x07storage\x18\t \x03(\x0b\x32\x19.v2.FileInfo.StorageEntry\x1a/\n\rMetaDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cStorageEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x9a\x01\n\x0bHTTPRequest\x12\x0e\n\x06method\x18\x01 \x01(\t\x12\x0b\n\x03uri\x18\x02 \x01(\t\x12\x12\n\nremoteAddr\x18\x03 \x01(\t\x12+\n\x06header\x18\x04 \x03(\x0b\x32\x1b.v2.HTTPRequest.HeaderEntry\x1a-\n\x0bHeaderEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"`\n\x0cHookResponse\x12&\n\x0chttpResponse\x18\x01 \x01(\x0b\x32\x10.v2.HTTPResponse\x12\x14\n\x0crejectUpload\x18\x02 \x01(\x08\x12\x12\n\nstopUpload\x18\x03 \x01(\x08\"\x90\x01\n\x0cHTTPResponse\x12\x12\n\nstatusCode\x18\x01 \x01(\x03\x12.\n\x07headers\x18\x02 \x03(\x0b\x32\x1d.v2.HTTPResponse.HeadersEntry\x12\x0c\n\x04\x62ody\x18\x03 \x01(\t\x1a.\n\x0cHeadersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x32@\n\x0bHookHandler\x12\x31\n\nInvokeHook\x12\x0f.v2.HookRequest\x1a\x10.v2.HookResponse\"\x00\x62\x06proto3')
_HOOKREQUEST = DESCRIPTOR.message_types_by_name['HookRequest']
_EVENT = DESCRIPTOR.message_types_by_name['Event']
_FILEINFO = DESCRIPTOR.message_types_by_name['FileInfo']
_FILEINFO_METADATAENTRY = _FILEINFO.nested_types_by_name['MetaDataEntry']
_FILEINFO_STORAGEENTRY = _FILEINFO.nested_types_by_name['StorageEntry']
_HTTPREQUEST = DESCRIPTOR.message_types_by_name['HTTPRequest']
_HTTPREQUEST_HEADERENTRY = _HTTPREQUEST.nested_types_by_name['HeaderEntry']
_HOOKRESPONSE = DESCRIPTOR.message_types_by_name['HookResponse']
_HTTPRESPONSE = DESCRIPTOR.message_types_by_name['HTTPResponse']
_HTTPRESPONSE_HEADERSENTRY = _HTTPRESPONSE.nested_types_by_name['HeadersEntry']
HookRequest = _reflection.GeneratedProtocolMessageType('HookRequest', (_message.Message,), {
'DESCRIPTOR' : _HOOKREQUEST,
'__module__' : 'hook_pb2'
# @@protoc_insertion_point(class_scope:v2.HookRequest)
})
_sym_db.RegisterMessage(HookRequest)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), {
'DESCRIPTOR' : _EVENT,
'__module__' : 'hook_pb2'
# @@protoc_insertion_point(class_scope:v2.Event)
})
_sym_db.RegisterMessage(Event)
FileInfo = _reflection.GeneratedProtocolMessageType('FileInfo', (_message.Message,), {
'MetaDataEntry' : _reflection.GeneratedProtocolMessageType('MetaDataEntry', (_message.Message,), {
'DESCRIPTOR' : _FILEINFO_METADATAENTRY,
'__module__' : 'hook_pb2'
# @@protoc_insertion_point(class_scope:v2.FileInfo.MetaDataEntry)
})
,
'StorageEntry' : _reflection.GeneratedProtocolMessageType('StorageEntry', (_message.Message,), {
'DESCRIPTOR' : _FILEINFO_STORAGEENTRY,
'__module__' : 'hook_pb2'
# @@protoc_insertion_point(class_scope:v2.FileInfo.StorageEntry)
})
,
'DESCRIPTOR' : _FILEINFO,
'__module__' : 'hook_pb2'
# @@protoc_insertion_point(class_scope:v2.FileInfo)
})
_sym_db.RegisterMessage(FileInfo)
_sym_db.RegisterMessage(FileInfo.MetaDataEntry)
_sym_db.RegisterMessage(FileInfo.StorageEntry)
HTTPRequest = _reflection.GeneratedProtocolMessageType('HTTPRequest', (_message.Message,), {
'HeaderEntry' : _reflection.GeneratedProtocolMessageType('HeaderEntry', (_message.Message,), {
'DESCRIPTOR' : _HTTPREQUEST_HEADERENTRY,
'__module__' : 'hook_pb2'
# @@protoc_insertion_point(class_scope:v2.HTTPRequest.HeaderEntry)
})
,
'DESCRIPTOR' : _HTTPREQUEST,
'__module__' : 'hook_pb2'
# @@protoc_insertion_point(class_scope:v2.HTTPRequest)
})
_sym_db.RegisterMessage(HTTPRequest)
_sym_db.RegisterMessage(HTTPRequest.HeaderEntry)
HookResponse = _reflection.GeneratedProtocolMessageType('HookResponse', (_message.Message,), {
'DESCRIPTOR' : _HOOKRESPONSE,
'__module__' : 'hook_pb2'
# @@protoc_insertion_point(class_scope:v2.HookResponse)
})
_sym_db.RegisterMessage(HookResponse)
HTTPResponse = _reflection.GeneratedProtocolMessageType('HTTPResponse', (_message.Message,), {
'HeadersEntry' : _reflection.GeneratedProtocolMessageType('HeadersEntry', (_message.Message,), {
'DESCRIPTOR' : _HTTPRESPONSE_HEADERSENTRY,
'__module__' : 'hook_pb2'
# @@protoc_insertion_point(class_scope:v2.HTTPResponse.HeadersEntry)
})
,
'DESCRIPTOR' : _HTTPRESPONSE,
'__module__' : 'hook_pb2'
# @@protoc_insertion_point(class_scope:v2.HTTPResponse)
})
_sym_db.RegisterMessage(HTTPResponse)
_sym_db.RegisterMessage(HTTPResponse.HeadersEntry)
_HOOKHANDLER = DESCRIPTOR.services_by_name['HookHandler']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_FILEINFO_METADATAENTRY._options = None
_FILEINFO_METADATAENTRY._serialized_options = b'8\001'
_FILEINFO_STORAGEENTRY._options = None
_FILEINFO_STORAGEENTRY._serialized_options = b'8\001'
_HTTPREQUEST_HEADERENTRY._options = None
_HTTPREQUEST_HEADERENTRY._serialized_options = b'8\001'
_HTTPRESPONSE_HEADERSENTRY._options = None
_HTTPRESPONSE_HEADERSENTRY._serialized_options = b'8\001'
_HOOKREQUEST._serialized_start=18
_HOOKREQUEST._serialized_end=71
_EVENT._serialized_start=73
_EVENT._serialized_end=148
_FILEINFO._serialized_start=151
_FILEINFO._serialized_end=474
_FILEINFO_METADATAENTRY._serialized_start=379
_FILEINFO_METADATAENTRY._serialized_end=426
_FILEINFO_STORAGEENTRY._serialized_start=428
_FILEINFO_STORAGEENTRY._serialized_end=474
_HTTPREQUEST._serialized_start=477
_HTTPREQUEST._serialized_end=631
_HTTPREQUEST_HEADERENTRY._serialized_start=586
_HTTPREQUEST_HEADERENTRY._serialized_end=631
_HOOKRESPONSE._serialized_start=633
_HOOKRESPONSE._serialized_end=729
_HTTPRESPONSE._serialized_start=732
_HTTPRESPONSE._serialized_end=876
_HTTPRESPONSE_HEADERSENTRY._serialized_start=830
_HTTPRESPONSE_HEADERSENTRY._serialized_end=876
_HOOKHANDLER._serialized_start=878
_HOOKHANDLER._serialized_end=942
# @@protoc_insertion_point(module_scope)

View File

@ -0,0 +1,70 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import hook_pb2 as hook__pb2
class HookHandlerStub(object):
"""The hook service definition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.InvokeHook = channel.unary_unary(
'/v2.HookHandler/InvokeHook',
request_serializer=hook__pb2.HookRequest.SerializeToString,
response_deserializer=hook__pb2.HookResponse.FromString,
)
class HookHandlerServicer(object):
"""The hook service definition.
"""
def InvokeHook(self, request, context):
"""Sends a hook
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_HookHandlerServicer_to_server(servicer, server):
rpc_method_handlers = {
'InvokeHook': grpc.unary_unary_rpc_method_handler(
servicer.InvokeHook,
request_deserializer=hook__pb2.HookRequest.FromString,
response_serializer=hook__pb2.HookResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'v2.HookHandler', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class HookHandler(object):
"""The hook service definition.
"""
@staticmethod
def InvokeHook(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v2.HookHandler/InvokeHook',
hook__pb2.HookRequest.SerializeToString,
hook__pb2.HookResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)

View File

@ -0,0 +1,57 @@
import grpc
from concurrent import futures
import time
import hook_pb2_grpc as pb2_grpc
import hook_pb2 as pb2
class HookHandler(pb2_grpc.HookHandlerServicer):
def __init__(self, *args, **kwargs):
pass
def InvokeHook(self, hook_request, context):
# Print data from hook request for debugging
print('Received hook request:')
print(hook_request)
# Prepare hook response structure
hook_response = pb2.HookResponse()
# Example: Use the pre-create hook to check if a filename has been supplied
# using metadata. If not, the upload is rejected with a custom HTTP response.
if hook_request.type == 'pre-create':
filename = hook_request.event.upload.metaData['filename']
if filename == "":
hook_response.rejectUpload = True
hook_response.httpResponse.statusCode = 400
hook_response.httpResponse.body = 'no filename provided'
hook_response.httpResponse.headers['X-Some-Header'] = 'yes'
# Example: Use the post-finish hook to print information about a completed upload,
# including its storage location.
if hook_request.type == 'post-finish':
id = hook_request.event.upload.id
size = hook_request.event.upload.size
storage = hook_request.event.upload.storage
print(f'Upload {id} ({size} bytes) is finished. Find the file at:')
print(storage)
# Print data of hook response for debugging
print('Responding with hook response:')
print(hook_response)
print('------')
print('')
# Return the hook response to send back to tusd
return hook_response
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
pb2_grpc.add_HookHandlerServicer_to_server(HookHandler(), server)
server.add_insecure_port('[::]:8000')
server.start()
server.wait_for_termination()
if __name__ == '__main__':
serve()

View File

@ -0,0 +1,65 @@
from http.server import HTTPServer, BaseHTTPRequestHandler
from io import BytesIO
import json
class HTTPHookHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write(b'Hello! This server only responds to POST requests')
def do_POST(self):
# Read entire body as JSON object
content_length = int(self.headers['Content-Length'])
request_body = self.rfile.read(content_length)
hook_request = json.loads(request_body)
# Print data from hook request for debugging
print('Received hook request:')
print(hook_request)
# Prepare hook response structure
hook_response = {
'HTTPResponse': {
'Headers': {}
}
}
# Example: Use the pre-create hook to check if a filename has been supplied
# using metadata. If not, the upload is rejected with a custom HTTP response.
if hook_request['Type'] == 'pre-create':
metaData = hook_request['Event']['Upload']['MetaData']
if 'filename' not in metaData:
hook_response['RejectUpload'] = True
hook_response['HTTPResponse']['StatusCode'] = 400
hook_response['HTTPResponse']['Body'] = 'no filename provided'
hook_response['HTTPResponse']['Headers']['X-Some-Header'] = 'yes'
# Example: Use the post-finish hook to print information about a completed upload,
# including its storage location.
if hook_request['Type'] == 'post-finish':
id = hook_request['Event']['Upload']['ID']
size = hook_request['Event']['Upload']['Size']
storage = hook_request['Event']['Upload']['Storage']
print(f'Upload {id} ({size} bytes) is finished. Find the file at:')
print(storage)
# Print data of hook response for debugging
print('Responding with hook response:')
print(hook_response)
print('------')
print('')
# Send the data from the hook response as JSON output
response_body = json.dumps(hook_response)
self.send_response(200)
self.end_headers()
self.wfile.write(response_body.encode())
httpd = HTTPServer(('localhost', 8000), HTTPHookHandler)
httpd.serve_forever()

View File

@ -0,0 +1,2 @@
hook_handler: hook_handler.go
go build -o hook_handler ./hook_handler.go

View File

@ -0,0 +1,87 @@
package main
import (
"fmt"
"log"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-plugin"
"github.com/tus/tusd/cmd/tusd/cli/hooks"
)
// Here is the implementation of our hook handler
type MyHookHandler struct {
logger hclog.Logger
}
// Setup is called once the plugin has been loaded by tusd.
func (g *MyHookHandler) Setup() error {
// Use the log package or the g.logger field to write debug messages.
// Do not write to stdout directly, as this is used for communication between
// tusd and the plugin.
log.Println("MyHookHandler.Setup is invoked")
return nil
}
// InvokeHook is called for every hook that tusd fires.
func (g *MyHookHandler) InvokeHook(req hooks.HookRequest) (res hooks.HookResponse, err error) {
log.Println("MyHookHandler.InvokeHook is invoked")
// // Prepare hook response structure
// res.HTTPResponse.Headers = make(map[string]string)
// // Example: Use the pre-create hook to check if a filename has been supplied
// // using metadata. If not, the upload is rejected with a custom HTTP response.
// if req.Type == hooks.HookPreCreate {
// if _, ok := req.Event.Upload.MetaData["filename"]; !ok {
// res.RejectUpload = true
// res.HTTPResponse.StatusCode = 400
// res.HTTPResponse.Body = "no filename provided"
// res.HTTPResponse.Headers["X-Some-Header"] = "yes"
// }
// }
// // Example: Use the post-finish hook to print information about a completed upload,
// // including its storage location.
// if req.Type == hooks.HookPreFinish {
// id := req.Event.Upload.ID
// size := req.Event.Upload.Size
// storage := req.Event.Upload.Storage
// log.Printf("Upload %s (%d bytes) is finished. Find the file at:\n", id, size)
// log.Println(storage)
// }
// Return the hook response to tusd.
return res, nil
}
// handshakeConfigs are used to just do a basic handshake between
// a plugin and tusd. If the handshake fails, a user friendly error is shown.
// This prevents users from executing bad plugins or executing a plugin
// directory. It is a UX feature, not a security feature.
var handshakeConfig = plugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: "TUSD_PLUGIN",
MagicCookieValue: "yes",
}
func main() {
// 1. Initialize our handler.
myHandler := &MyHookHandler{}
// 2. Construct the plugin map. The key must be "hookHandler".
var pluginMap = map[string]plugin.Plugin{
"hookHandler": &hooks.HookHandlerPlugin{Impl: myHandler},
}
// 3. Expose the plugin to tusd.
plugin.Serve(&plugin.ServeConfig{
HandshakeConfig: handshakeConfig,
Plugins: pluginMap,
})
fmt.Println("DOONE")
}

View File

@ -1,8 +0,0 @@
#!/bin/sh
id="$TUS_ID"
offset="$TUS_OFFSET"
size="$TUS_SIZE"
echo "Upload created with ID ${id} and size ${size}"
cat /dev/stdin | jq .

View File

@ -1,4 +0,0 @@
#!/bin/sh
echo "Upload $TUS_ID ($TUS_SIZE bytes) finished"
cat /dev/stdin | jq .

View File

@ -1,8 +0,0 @@
#!/bin/sh
id="$TUS_ID"
offset="$TUS_OFFSET"
size="$TUS_SIZE"
progress=$((100 * $offset/$size))
echo "Upload ${id} is at ${progress}% (${offset}/${size})"

View File

@ -1,4 +0,0 @@
#!/bin/sh
echo "Upload $TUS_ID terminated"
cat /dev/stdin | jq .

View File

@ -1,7 +0,0 @@
#!/bin/sh
filename=$(cat /dev/stdin | jq .Upload.MetaData.filename)
if [ -z "$filename" ]; then
echo "Error: no filename provided"
exit 1
fi

7
go.mod
View File

@ -2,7 +2,7 @@ module github.com/tus/tusd
// Specify the Go version needed for the Heroku deployment
// See https://github.com/heroku/heroku-buildpack-go#go-module-specifics
// +heroku goVersion go1.16
// +heroku goVersion go1.18
go 1.16
require (
@ -10,9 +10,14 @@ require (
github.com/Azure/azure-storage-blob-go v0.14.0
github.com/aws/aws-sdk-go v1.44.114
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40
github.com/felixge/fgprof v0.9.2
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d
github.com/golang/mock v1.6.0
github.com/golang/protobuf v1.5.2
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/hashicorp/go-hclog v0.14.1
github.com/hashicorp/go-plugin v1.4.3
github.com/minio/minio-go/v7 v7.0.31 // indirect
github.com/prometheus/client_golang v1.12.2
github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0
github.com/stretchr/testify v1.8.0

66
go.sum
View File

@ -170,6 +170,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@ -180,6 +182,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/felixge/fgprof v0.9.2 h1:tAMHtWMyl6E0BimjVbFt7fieU6FpjttsZN7j0wT5blc=
github.com/felixge/fgprof v0.9.2/go.mod h1:+VNi+ZXtHIQ6wIw6bUT8nXQRefQflWECoFyRealT5sg=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@ -195,6 +201,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d h1:lBXNCxVENCipq4D1Is42JVOP4eQjlB8TQ6H69Yx5J9Q=
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -267,7 +275,10 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
@ -285,15 +296,25 @@ github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK
github.com/googleapis/gax-go/v2 v2.5.1 h1:kBRZU0PSuI7PspsSb/ChWoVResUcwNVIdpB049pKTiw=
github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo=
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU=
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM=
github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=
github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
@ -302,13 +323,20 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@ -318,19 +346,36 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4=
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
github.com/minio/minio-go/v7 v7.0.31 h1:zsJ3qPDeU3bC5UMVi9HJ4ED0lyEzrNd3iQguglZS5FE=
github.com/minio/minio-go/v7 v7.0.31/go.mod h1:/sjRKkKIA75CKh1iu8E3qBy7ktBmCCDGII0zbXGwbUk=
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg=
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -362,11 +407,17 @@ github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0 h1:X9XMOYjxEfAYSy3xK1DzO5dMkkWhs9E9UCcS1IERx2k=
github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0/go.mod h1:Ad7IjTpvzZO8Fl0vh9AzQ+j/jYZfyp2diGwI8m5q+ns=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -407,6 +458,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f h1:aZp0e2vLN4MToVqnjNEYEtrEA8RH8U8FN1CU7JgqsPU=
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -442,6 +495,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -531,6 +585,7 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -540,6 +595,8 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -583,6 +640,7 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -595,6 +653,7 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -621,6 +680,7 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
@ -732,6 +792,7 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@ -809,6 +870,9 @@ google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 h1:qRu95HZ148xXw+XeZ3dvqe85PxH4X8+jIo0iRPKcEnM=
google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8/go.mod h1:yKyY4AMRwFiC8yMMNaMi+RkCnjZJt9LoWuvhXjMs+To=
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
@ -892,6 +956,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY=
gopkg.in/h2non/gock.v1 v1.1.2/go.mod h1:n7UGz/ckNChHiK05rDoiC4MYSunEC/lyaUm2WWaDva0=
gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -0,0 +1,20 @@
// Package semaphore implements a basic semaphore for coordinating and limiting
// non-exclusive, concurrent access.
package semaphore
type Semaphore chan struct{}
// New creates a semaphore with the given concurrency limit.
func New(concurrency int) Semaphore {
return make(chan struct{}, concurrency)
}
// Acquire will block until the semaphore can be acquired.
func (s Semaphore) Acquire() {
s <- struct{}{}
}
// Release frees the acquired slot in the semaphore.
func (s Semaphore) Release() {
<-s
}

View File

@ -15,7 +15,6 @@
package azurestore
import (
"bytes"
"context"
"encoding/base64"
"encoding/binary"
@ -59,8 +58,8 @@ type AzBlob interface {
Delete(ctx context.Context) error
// Upload the blob
Upload(ctx context.Context, body io.ReadSeeker) error
// Download the contents of the blob
Download(ctx context.Context) ([]byte, error)
// Download returns a readcloser to download the contents of the blob
Download(ctx context.Context) (io.ReadCloser, error)
// Get the offset of the blob and its indexes
GetOffset(ctx context.Context) (int64, error)
// Commit the uploaded blocks to the BlockBlob
@ -171,7 +170,7 @@ func (blockBlob *BlockBlob) Upload(ctx context.Context, body io.ReadSeeker) erro
}
// Download the blockBlob from Azure Blob Storage
func (blockBlob *BlockBlob) Download(ctx context.Context) (data []byte, err error) {
func (blockBlob *BlockBlob) Download(ctx context.Context) (io.ReadCloser, error) {
downloadResponse, err := blockBlob.Blob.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
// If the file does not exist, it will not return an error, but a 404 status and body
@ -186,15 +185,7 @@ func (blockBlob *BlockBlob) Download(ctx context.Context) (data []byte, err erro
return nil, err
}
bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20})
downloadedData := bytes.Buffer{}
_, err = downloadedData.ReadFrom(bodyStream)
if err != nil {
return nil, err
}
return downloadedData.Bytes(), nil
return downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20}), nil
}
func (blockBlob *BlockBlob) GetOffset(ctx context.Context) (int64, error) {
@ -258,7 +249,7 @@ func (infoBlob *InfoBlob) Upload(ctx context.Context, body io.ReadSeeker) error
}
// Download the infoBlob from Azure Blob Storage
func (infoBlob *InfoBlob) Download(ctx context.Context) ([]byte, error) {
func (infoBlob *InfoBlob) Download(ctx context.Context) (io.ReadCloser, error) {
downloadResponse, err := infoBlob.Blob.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
// If the file does not exist, it will not return an error, but a 404 status and body
@ -272,15 +263,7 @@ func (infoBlob *InfoBlob) Download(ctx context.Context) ([]byte, error) {
return nil, err
}
bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20})
downloadedData := bytes.Buffer{}
_, err = downloadedData.ReadFrom(bodyStream)
if err != nil {
return nil, err
}
return downloadedData.Bytes(), nil
return downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20}), nil
}
// infoBlob does not utilise offset, so just return 0, nil

View File

@ -96,8 +96,9 @@ func (store AzureStore) GetUpload(ctx context.Context, id string) (handler.Uploa
if err != nil {
return nil, err
}
defer data.Close()
if err := json.Unmarshal(data, &info); err != nil {
if err := json.NewDecoder(data).Decode(&info); err != nil {
return nil, err
}
@ -112,8 +113,12 @@ func (store AzureStore) GetUpload(ctx context.Context, id string) (handler.Uploa
}
offset, err := blockBlob.GetOffset(ctx)
if err != nil && err != handler.ErrNotFound {
return nil, err
if err != nil {
// Unpack the error and see if it is a handler.ErrNotFound by comparing the
// error code. If it matches, we ignore the error, otherwise we return the error.
if handlerErr, ok := err.(handler.Error); !ok || handlerErr.ErrorCode != handler.ErrNotFound.ErrorCode {
return nil, err
}
}
info.Offset = offset
@ -169,7 +174,7 @@ func (upload *AzUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
return info, err
}
if err := json.Unmarshal(data, &info); err != nil {
if err := json.NewDecoder(data).Decode(&info); err != nil {
return info, err
}
@ -178,12 +183,8 @@ func (upload *AzUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
}
// Get the uploaded file from the Azure storage
func (upload *AzUpload) GetReader(ctx context.Context) (io.Reader, error) {
b, err := upload.BlockBlob.Download(ctx)
if err != nil {
return nil, err
}
return bytes.NewReader(b), nil
func (upload *AzUpload) GetReader(ctx context.Context) (io.ReadCloser, error) {
return upload.BlockBlob.Download(ctx)
}
// Finish the file upload and commit the block list

View File

@ -6,36 +6,37 @@ package azurestore_test
import (
context "context"
gomock "github.com/golang/mock/gomock"
azurestore "github.com/tus/tusd/pkg/azurestore"
io "io"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
azurestore "github.com/tus/tusd/pkg/azurestore"
)
// MockAzService is a mock of AzService interface
// MockAzService is a mock of AzService interface.
type MockAzService struct {
ctrl *gomock.Controller
recorder *MockAzServiceMockRecorder
}
// MockAzServiceMockRecorder is the mock recorder for MockAzService
// MockAzServiceMockRecorder is the mock recorder for MockAzService.
type MockAzServiceMockRecorder struct {
mock *MockAzService
}
// NewMockAzService creates a new mock instance
// NewMockAzService creates a new mock instance.
func NewMockAzService(ctrl *gomock.Controller) *MockAzService {
mock := &MockAzService{ctrl: ctrl}
mock.recorder = &MockAzServiceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockAzService) EXPECT() *MockAzServiceMockRecorder {
return m.recorder
}
// NewBlob mocks base method
// NewBlob mocks base method.
func (m *MockAzService) NewBlob(arg0 context.Context, arg1 string) (azurestore.AzBlob, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NewBlob", arg0, arg1)
@ -44,36 +45,36 @@ func (m *MockAzService) NewBlob(arg0 context.Context, arg1 string) (azurestore.A
return ret0, ret1
}
// NewBlob indicates an expected call of NewBlob
// NewBlob indicates an expected call of NewBlob.
func (mr *MockAzServiceMockRecorder) NewBlob(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBlob", reflect.TypeOf((*MockAzService)(nil).NewBlob), arg0, arg1)
}
// MockAzBlob is a mock of AzBlob interface
// MockAzBlob is a mock of AzBlob interface.
type MockAzBlob struct {
ctrl *gomock.Controller
recorder *MockAzBlobMockRecorder
}
// MockAzBlobMockRecorder is the mock recorder for MockAzBlob
// MockAzBlobMockRecorder is the mock recorder for MockAzBlob.
type MockAzBlobMockRecorder struct {
mock *MockAzBlob
}
// NewMockAzBlob creates a new mock instance
// NewMockAzBlob creates a new mock instance.
func NewMockAzBlob(ctrl *gomock.Controller) *MockAzBlob {
mock := &MockAzBlob{ctrl: ctrl}
mock.recorder = &MockAzBlobMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockAzBlob) EXPECT() *MockAzBlobMockRecorder {
return m.recorder
}
// Commit mocks base method
// Commit mocks base method.
func (m *MockAzBlob) Commit(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Commit", arg0)
@ -81,13 +82,13 @@ func (m *MockAzBlob) Commit(arg0 context.Context) error {
return ret0
}
// Commit indicates an expected call of Commit
// Commit indicates an expected call of Commit.
func (mr *MockAzBlobMockRecorder) Commit(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockAzBlob)(nil).Commit), arg0)
}
// Delete mocks base method
// Delete mocks base method.
func (m *MockAzBlob) Delete(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0)
@ -95,28 +96,28 @@ func (m *MockAzBlob) Delete(arg0 context.Context) error {
return ret0
}
// Delete indicates an expected call of Delete
// Delete indicates an expected call of Delete.
func (mr *MockAzBlobMockRecorder) Delete(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockAzBlob)(nil).Delete), arg0)
}
// Download mocks base method
func (m *MockAzBlob) Download(arg0 context.Context) ([]byte, error) {
// Download mocks base method.
func (m *MockAzBlob) Download(arg0 context.Context) (io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Download", arg0)
ret0, _ := ret[0].([]byte)
ret0, _ := ret[0].(io.ReadCloser)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Download indicates an expected call of Download
// Download indicates an expected call of Download.
func (mr *MockAzBlobMockRecorder) Download(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockAzBlob)(nil).Download), arg0)
}
// GetOffset mocks base method
// GetOffset mocks base method.
func (m *MockAzBlob) GetOffset(arg0 context.Context) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetOffset", arg0)
@ -125,13 +126,13 @@ func (m *MockAzBlob) GetOffset(arg0 context.Context) (int64, error) {
return ret0, ret1
}
// GetOffset indicates an expected call of GetOffset
// GetOffset indicates an expected call of GetOffset.
func (mr *MockAzBlobMockRecorder) GetOffset(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOffset", reflect.TypeOf((*MockAzBlob)(nil).GetOffset), arg0)
}
// Upload mocks base method
// Upload mocks base method.
func (m *MockAzBlob) Upload(arg0 context.Context, arg1 io.ReadSeeker) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Upload", arg0, arg1)
@ -139,7 +140,7 @@ func (m *MockAzBlob) Upload(arg0 context.Context, arg1 io.ReadSeeker) error {
return ret0
}
// Upload indicates an expected call of Upload
// Upload indicates an expected call of Upload.
func (mr *MockAzBlobMockRecorder) Upload(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upload", reflect.TypeOf((*MockAzBlob)(nil).Upload), arg0, arg1)

View File

@ -5,6 +5,7 @@ import (
"context"
"encoding/json"
"errors"
"io"
"testing"
"github.com/Azure/azure-storage-blob-go/azblob"
@ -153,7 +154,7 @@ func TestGetUpload(t *testing.T) {
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(newReadCloser(data), nil).Times(1),
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
)
@ -189,7 +190,7 @@ func TestGetUploadTooLargeBlob(t *testing.T) {
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(newReadCloser(data), nil).Times(1),
)
upload, err := store.GetUpload(ctx, mockID)
@ -246,10 +247,10 @@ func TestGetReader(t *testing.T) {
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(newReadCloser(data), nil).Times(1),
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
blockBlob.EXPECT().Download(ctx).Return([]byte(mockReaderData), nil).Times(1),
blockBlob.EXPECT().Download(ctx).Return(newReadCloser([]byte(mockReaderData)), nil).Times(1),
)
upload, err := store.GetUpload(ctx, mockID)
@ -286,7 +287,7 @@ func TestWriteChunk(t *testing.T) {
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(newReadCloser(data), nil).Times(1),
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
blockBlob.EXPECT().GetOffset(ctx).Return(offset, nil).Times(1),
blockBlob.EXPECT().Upload(ctx, bytes.NewReader([]byte(mockReaderData))).Return(nil).Times(1),
@ -325,7 +326,7 @@ func TestFinishUpload(t *testing.T) {
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(newReadCloser(data), nil).Times(1),
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
blockBlob.EXPECT().GetOffset(ctx).Return(offset, nil).Times(1),
blockBlob.EXPECT().Commit(ctx).Return(nil).Times(1),
@ -362,7 +363,7 @@ func TestTerminate(t *testing.T) {
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(newReadCloser(data), nil).Times(1),
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
infoBlob.EXPECT().Delete(ctx).Return(nil).Times(1),
@ -405,7 +406,7 @@ func TestDeclareLength(t *testing.T) {
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(newReadCloser(data), nil).Times(1),
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
infoBlob.EXPECT().Upload(ctx, r).Return(nil).Times(1),
@ -424,3 +425,7 @@ func TestDeclareLength(t *testing.T) {
cancel()
}
func newReadCloser(b []byte) io.ReadCloser {
return io.NopCloser(bytes.NewReader(b))
}

View File

@ -9,6 +9,7 @@
package filelocker
import (
"context"
"os"
"path/filepath"
@ -58,7 +59,8 @@ type fileUploadLock struct {
file lockfile.Lockfile
}
func (lock fileUploadLock) Lock() error {
// TODO: Implement functionality for ctx and requestRelease.
func (lock fileUploadLock) Lock(ctx context.Context, requestRelease func()) error {
err := lock.file.TryLock()
if err == lockfile.ErrBusy {
return handler.ErrFileLocked

View File

@ -1,6 +1,7 @@
package filelocker
import (
"context"
"io/ioutil"
"testing"
@ -21,12 +22,12 @@ func TestFileLocker(t *testing.T) {
lock1, err := locker.NewLock("one")
a.NoError(err)
a.NoError(lock1.Lock())
a.Equal(handler.ErrFileLocked, lock1.Lock())
a.NoError(lock1.Lock(context.TODO(), nil))
a.Equal(handler.ErrFileLocked, lock1.Lock(context.TODO(), nil))
lock2, err := locker.NewLock("one")
a.NoError(err)
a.Equal(handler.ErrFileLocked, lock2.Lock())
a.Equal(handler.ErrFileLocked, lock2.Lock(context.TODO(), nil))
a.NoError(lock1.Unlock())
}

View File

@ -168,7 +168,7 @@ func (upload *fileUpload) WriteChunk(ctx context.Context, offset int64, src io.R
return n, err
}
func (upload *fileUpload) GetReader(ctx context.Context) (io.Reader, error) {
func (upload *fileUpload) GetReader(ctx context.Context) (io.ReadCloser, error) {
return os.Open(upload.binPath)
}

View File

@ -325,7 +325,7 @@ func (upload gcsUpload) Terminate(ctx context.Context) error {
return nil
}
func (upload gcsUpload) GetReader(ctx context.Context) (io.Reader, error) {
func (upload gcsUpload) GetReader(ctx context.Context) (io.ReadCloser, error) {
id := upload.id
store := upload.store
@ -334,12 +334,7 @@ func (upload gcsUpload) GetReader(ctx context.Context) (io.Reader, error) {
ID: store.keyWithPrefix(id),
}
r, err := store.Service.ReadObject(ctx, params)
if err != nil {
return nil, err
}
return r, nil
return store.Service.ReadObject(ctx, params)
}
func (store GCSStore) keyWithPrefix(key string) string {

View File

@ -6,36 +6,37 @@ package gcsstore_test
import (
context "context"
gomock "github.com/golang/mock/gomock"
gcsstore "github.com/tus/tusd/pkg/gcsstore"
io "io"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
gcsstore "github.com/tus/tusd/pkg/gcsstore"
)
// MockGCSReader is a mock of GCSReader interface
// MockGCSReader is a mock of GCSReader interface.
type MockGCSReader struct {
ctrl *gomock.Controller
recorder *MockGCSReaderMockRecorder
}
// MockGCSReaderMockRecorder is the mock recorder for MockGCSReader
// MockGCSReaderMockRecorder is the mock recorder for MockGCSReader.
type MockGCSReaderMockRecorder struct {
mock *MockGCSReader
}
// NewMockGCSReader creates a new mock instance
// NewMockGCSReader creates a new mock instance.
func NewMockGCSReader(ctrl *gomock.Controller) *MockGCSReader {
mock := &MockGCSReader{ctrl: ctrl}
mock.recorder = &MockGCSReaderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockGCSReader) EXPECT() *MockGCSReaderMockRecorder {
return m.recorder
}
// Close mocks base method
// Close mocks base method.
func (m *MockGCSReader) Close() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close")
@ -43,13 +44,13 @@ func (m *MockGCSReader) Close() error {
return ret0
}
// Close indicates an expected call of Close
// Close indicates an expected call of Close.
func (mr *MockGCSReaderMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockGCSReader)(nil).Close))
}
// ContentType mocks base method
// ContentType mocks base method.
func (m *MockGCSReader) ContentType() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContentType")
@ -57,13 +58,13 @@ func (m *MockGCSReader) ContentType() string {
return ret0
}
// ContentType indicates an expected call of ContentType
// ContentType indicates an expected call of ContentType.
func (mr *MockGCSReaderMockRecorder) ContentType() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContentType", reflect.TypeOf((*MockGCSReader)(nil).ContentType))
}
// Read mocks base method
// Read mocks base method.
func (m *MockGCSReader) Read(arg0 []byte) (int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Read", arg0)
@ -72,13 +73,13 @@ func (m *MockGCSReader) Read(arg0 []byte) (int, error) {
return ret0, ret1
}
// Read indicates an expected call of Read
// Read indicates an expected call of Read.
func (mr *MockGCSReaderMockRecorder) Read(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockGCSReader)(nil).Read), arg0)
}
// Remain mocks base method
// Remain mocks base method.
func (m *MockGCSReader) Remain() int64 {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Remain")
@ -86,13 +87,13 @@ func (m *MockGCSReader) Remain() int64 {
return ret0
}
// Remain indicates an expected call of Remain
// Remain indicates an expected call of Remain.
func (mr *MockGCSReaderMockRecorder) Remain() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remain", reflect.TypeOf((*MockGCSReader)(nil).Remain))
}
// Size mocks base method
// Size mocks base method.
func (m *MockGCSReader) Size() int64 {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Size")
@ -100,36 +101,36 @@ func (m *MockGCSReader) Size() int64 {
return ret0
}
// Size indicates an expected call of Size
// Size indicates an expected call of Size.
func (mr *MockGCSReaderMockRecorder) Size() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockGCSReader)(nil).Size))
}
// MockGCSAPI is a mock of GCSAPI interface
// MockGCSAPI is a mock of GCSAPI interface.
type MockGCSAPI struct {
ctrl *gomock.Controller
recorder *MockGCSAPIMockRecorder
}
// MockGCSAPIMockRecorder is the mock recorder for MockGCSAPI
// MockGCSAPIMockRecorder is the mock recorder for MockGCSAPI.
type MockGCSAPIMockRecorder struct {
mock *MockGCSAPI
}
// NewMockGCSAPI creates a new mock instance
// NewMockGCSAPI creates a new mock instance.
func NewMockGCSAPI(ctrl *gomock.Controller) *MockGCSAPI {
mock := &MockGCSAPI{ctrl: ctrl}
mock.recorder = &MockGCSAPIMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockGCSAPI) EXPECT() *MockGCSAPIMockRecorder {
return m.recorder
}
// ComposeObjects mocks base method
// ComposeObjects mocks base method.
func (m *MockGCSAPI) ComposeObjects(arg0 context.Context, arg1 gcsstore.GCSComposeParams) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ComposeObjects", arg0, arg1)
@ -137,13 +138,13 @@ func (m *MockGCSAPI) ComposeObjects(arg0 context.Context, arg1 gcsstore.GCSCompo
return ret0
}
// ComposeObjects indicates an expected call of ComposeObjects
// ComposeObjects indicates an expected call of ComposeObjects.
func (mr *MockGCSAPIMockRecorder) ComposeObjects(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ComposeObjects", reflect.TypeOf((*MockGCSAPI)(nil).ComposeObjects), arg0, arg1)
}
// DeleteObject mocks base method
// DeleteObject mocks base method.
func (m *MockGCSAPI) DeleteObject(arg0 context.Context, arg1 gcsstore.GCSObjectParams) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteObject", arg0, arg1)
@ -151,13 +152,13 @@ func (m *MockGCSAPI) DeleteObject(arg0 context.Context, arg1 gcsstore.GCSObjectP
return ret0
}
// DeleteObject indicates an expected call of DeleteObject
// DeleteObject indicates an expected call of DeleteObject.
func (mr *MockGCSAPIMockRecorder) DeleteObject(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObject", reflect.TypeOf((*MockGCSAPI)(nil).DeleteObject), arg0, arg1)
}
// DeleteObjectsWithFilter mocks base method
// DeleteObjectsWithFilter mocks base method.
func (m *MockGCSAPI) DeleteObjectsWithFilter(arg0 context.Context, arg1 gcsstore.GCSFilterParams) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteObjectsWithFilter", arg0, arg1)
@ -165,13 +166,13 @@ func (m *MockGCSAPI) DeleteObjectsWithFilter(arg0 context.Context, arg1 gcsstore
return ret0
}
// DeleteObjectsWithFilter indicates an expected call of DeleteObjectsWithFilter
// DeleteObjectsWithFilter indicates an expected call of DeleteObjectsWithFilter.
func (mr *MockGCSAPIMockRecorder) DeleteObjectsWithFilter(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsWithFilter", reflect.TypeOf((*MockGCSAPI)(nil).DeleteObjectsWithFilter), arg0, arg1)
}
// FilterObjects mocks base method
// FilterObjects mocks base method.
func (m *MockGCSAPI) FilterObjects(arg0 context.Context, arg1 gcsstore.GCSFilterParams) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FilterObjects", arg0, arg1)
@ -180,13 +181,13 @@ func (m *MockGCSAPI) FilterObjects(arg0 context.Context, arg1 gcsstore.GCSFilter
return ret0, ret1
}
// FilterObjects indicates an expected call of FilterObjects
// FilterObjects indicates an expected call of FilterObjects.
func (mr *MockGCSAPIMockRecorder) FilterObjects(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterObjects", reflect.TypeOf((*MockGCSAPI)(nil).FilterObjects), arg0, arg1)
}
// GetObjectSize mocks base method
// GetObjectSize mocks base method.
func (m *MockGCSAPI) GetObjectSize(arg0 context.Context, arg1 gcsstore.GCSObjectParams) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetObjectSize", arg0, arg1)
@ -195,13 +196,13 @@ func (m *MockGCSAPI) GetObjectSize(arg0 context.Context, arg1 gcsstore.GCSObject
return ret0, ret1
}
// GetObjectSize indicates an expected call of GetObjectSize
// GetObjectSize indicates an expected call of GetObjectSize.
func (mr *MockGCSAPIMockRecorder) GetObjectSize(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectSize", reflect.TypeOf((*MockGCSAPI)(nil).GetObjectSize), arg0, arg1)
}
// ReadObject mocks base method
// ReadObject mocks base method.
func (m *MockGCSAPI) ReadObject(arg0 context.Context, arg1 gcsstore.GCSObjectParams) (gcsstore.GCSReader, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReadObject", arg0, arg1)
@ -210,13 +211,13 @@ func (m *MockGCSAPI) ReadObject(arg0 context.Context, arg1 gcsstore.GCSObjectPar
return ret0, ret1
}
// ReadObject indicates an expected call of ReadObject
// ReadObject indicates an expected call of ReadObject.
func (mr *MockGCSAPIMockRecorder) ReadObject(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadObject", reflect.TypeOf((*MockGCSAPI)(nil).ReadObject), arg0, arg1)
}
// SetObjectMetadata mocks base method
// SetObjectMetadata mocks base method.
func (m *MockGCSAPI) SetObjectMetadata(arg0 context.Context, arg1 gcsstore.GCSObjectParams, arg2 map[string]string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetObjectMetadata", arg0, arg1, arg2)
@ -224,13 +225,13 @@ func (m *MockGCSAPI) SetObjectMetadata(arg0 context.Context, arg1 gcsstore.GCSOb
return ret0
}
// SetObjectMetadata indicates an expected call of SetObjectMetadata
// SetObjectMetadata indicates an expected call of SetObjectMetadata.
func (mr *MockGCSAPIMockRecorder) SetObjectMetadata(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetObjectMetadata", reflect.TypeOf((*MockGCSAPI)(nil).SetObjectMetadata), arg0, arg1, arg2)
}
// WriteObject mocks base method
// WriteObject mocks base method.
func (m *MockGCSAPI) WriteObject(arg0 context.Context, arg1 gcsstore.GCSObjectParams, arg2 io.Reader) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteObject", arg0, arg1, arg2)
@ -239,7 +240,7 @@ func (m *MockGCSAPI) WriteObject(arg0 context.Context, arg1 gcsstore.GCSObjectPa
return ret0, ret1
}
// WriteObject indicates an expected call of WriteObject
// WriteObject indicates an expected call of WriteObject.
func (mr *MockGCSAPIMockRecorder) WriteObject(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteObject", reflect.TypeOf((*MockGCSAPI)(nil).WriteObject), arg0, arg1, arg2)

View File

@ -14,13 +14,15 @@ import (
// In addition, the bodyReader keeps track of how many bytes were read.
type bodyReader struct {
reader io.Reader
closer io.Closer
err error
bytesCounter int64
}
func newBodyReader(r io.Reader) *bodyReader {
func newBodyReader(r io.ReadCloser, maxSize int64) *bodyReader {
return &bodyReader{
reader: r,
reader: io.LimitReader(r, maxSize),
closer: r,
}
}
@ -51,3 +53,8 @@ func (r bodyReader) hasError() error {
func (r *bodyReader) bytesRead() int64 {
return atomic.LoadInt64(&r.bytesCounter)
}
func (r *bodyReader) closeWithError(err error) {
r.closer.Close()
r.err = err
}

View File

@ -1,7 +1,6 @@
package handler_test
import (
"context"
"net/http"
"strings"
"testing"
@ -38,14 +37,14 @@ func TestConcat(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(context.Background(), FileInfo{
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
Size: 300,
IsPartial: true,
IsFinal: false,
PartialUploads: nil,
MetaData: make(map[string]string),
}).Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
Size: 300,
IsPartial: true,
@ -77,8 +76,8 @@ func TestConcat(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "foo").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
IsPartial: true,
}, nil),
@ -114,26 +113,26 @@ func TestConcat(t *testing.T) {
uploadC := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "a").Return(uploadA, nil),
uploadA.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "a").Return(uploadA, nil),
uploadA.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
IsPartial: true,
Size: 5,
Offset: 5,
}, nil),
store.EXPECT().GetUpload(context.Background(), "b").Return(uploadB, nil),
uploadB.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "b").Return(uploadB, nil),
uploadB.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
IsPartial: true,
Size: 5,
Offset: 5,
}, nil),
store.EXPECT().NewUpload(context.Background(), FileInfo{
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
Size: 10,
IsPartial: false,
IsFinal: true,
PartialUploads: []string{"a", "b"},
MetaData: make(map[string]string),
}).Return(uploadC, nil),
uploadC.EXPECT().GetInfo(context.Background()).Return(FileInfo{
uploadC.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
Size: 10,
IsPartial: false,
@ -142,7 +141,7 @@ func TestConcat(t *testing.T) {
MetaData: make(map[string]string),
}, nil),
store.EXPECT().AsConcatableUpload(uploadC).Return(uploadC),
uploadC.EXPECT().ConcatUploads(context.Background(), []Upload{uploadA, uploadB}).Return(nil),
uploadC.EXPECT().ConcatUploads(gomock.Any(), []Upload{uploadA, uploadB}).Return(nil),
)
handler, _ := NewHandler(Config{
@ -188,8 +187,8 @@ func TestConcat(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "foo").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
IsFinal: true,
PartialUploads: []string{"a", "b"},
@ -226,8 +225,8 @@ func TestConcat(t *testing.T) {
// This upload is still unfinished (mismatching offset and size) and
// will therefore cause the POST request to fail.
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "c").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "c").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "c",
IsPartial: true,
Size: 5,
@ -256,8 +255,8 @@ func TestConcat(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "huge").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "huge").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "huge",
Size: 1000,
Offset: 1000,
@ -286,8 +285,8 @@ func TestConcat(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "foo").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
Size: 10,
Offset: 0,

View File

@ -5,6 +5,7 @@ import (
"log"
"net/url"
"os"
"time"
)
// Config provides a way to configure the Handler depending on your needs.
@ -40,6 +41,10 @@ type Config struct {
// NotifyCreatedUploads indicates whether sending notifications about
// the upload having been created using the CreatedUploads channel should be enabled.
NotifyCreatedUploads bool
// UploadProgressInterval specifies the interval at which the upload progress
// notifications are sent to the UploadProgress channel, if enabled.
// Defaults to 1s.
UploadProgressInterval time.Duration
// Logger is the logger to use internally, mostly for printing requests.
Logger *log.Logger
// Respect the X-Forwarded-Host, X-Forwarded-Proto and Forwarded headers
@ -47,14 +52,18 @@ type Config struct {
// response to POST requests.
RespectForwardedHeaders bool
// PreUploadCreateCallback will be invoked before a new upload is created, if the
// property is supplied. If the callback returns nil, the upload will be created.
// Otherwise the HTTP request will be aborted. This can be used to implement
// validation of upload metadata etc.
PreUploadCreateCallback func(hook HookEvent) error
// property is supplied. If the callback returns no error, the upload will be created
// and optional values from HTTPResponse will be contained in the HTTP response.
// If the error is non-nil, the upload will not be created. This can be used to implement
// validation of upload metadata etc. Furthermore, HTTPResponse will be ignored and
// the error value can contain values for the HTTP response.
PreUploadCreateCallback func(hook HookEvent) (HTTPResponse, error)
// PreFinishResponseCallback will be invoked after an upload is completed but before
// a response is returned to the client. Error responses from the callback will be passed
// back to the client. This can be used to implement post-processing validation.
PreFinishResponseCallback func(hook HookEvent) error
// a response is returned to the client. This can be used to implement post-processing validation.
// If the callback returns no error, optional values from HTTPResponse will be contained in the HTTP response.
// If the error is non-nil, the error will be forwarded to the client. Furthermore,
// HTTPResponse will be ignored and the error value can contain values for the HTTP response.
PreFinishResponseCallback func(hook HookEvent) (HTTPResponse, error)
}
func (config *Config) validate() error {
@ -88,5 +97,9 @@ func (config *Config) validate() error {
return errors.New("tusd: StoreComposer in Config needs to contain a non-nil core")
}
if config.UploadProgressInterval <= 0 {
config.UploadProgressInterval = 1 * time.Second
}
return nil
}

28
pkg/handler/context.go Normal file
View File

@ -0,0 +1,28 @@
package handler
import (
"context"
"net/http"
)
// httpContext is wrapper around context.Context that also carries the
// corresponding HTTP request and response writer, as well as an
// optional body reader
// TODO: Consider including HTTPResponse as well
type httpContext struct {
context.Context
res http.ResponseWriter
req *http.Request
body *bodyReader
}
func newContext(w http.ResponseWriter, r *http.Request) *httpContext {
return &httpContext{
// TODO: Try to reuse the request's context in the future
Context: context.Background(),
res: w,
req: r,
body: nil, // body can be filled later for PATCH requests
}
}

View File

@ -7,7 +7,9 @@ import (
type MetaData map[string]string
// FileInfo contains information about a single upload resource.
type FileInfo struct {
// ID is the unique identifier of the upload resource.
ID string
// Total file size in bytes specified in the NewUpload call
Size int64
@ -41,6 +43,7 @@ type FileInfo struct {
// more data. Furthermore, a response is sent to notify the client of the
// interrupting and the upload is terminated (if supported by the data store),
// so the upload cannot be resumed anymore.
// TODO: Allow passing in a HTTP Response
func (f FileInfo) StopUpload() {
if f.stopUpload != nil {
f.stopUpload()
@ -60,14 +63,12 @@ type Upload interface {
// requests. It may return an os.ErrNotExist which will be interpreted as a
// 404 Not Found.
GetInfo(ctx context.Context) (FileInfo, error)
// GetReader returns a reader which allows iterating of the content of an
// GetReader returns an io.ReadCloser which allows iterating of the content of an
// upload specified by its ID. It should attempt to provide a reader even if
// the upload has not been finished yet but it's not required.
// If the returned reader also implements the io.Closer interface, the
// Close() method will be invoked once everything has been read.
// If the given upload could not be found, the error tusd.ErrNotFound should
// be returned.
GetReader(ctx context.Context) (io.Reader, error)
GetReader(ctx context.Context) (io.ReadCloser, error)
// FinisherDataStore is the interface which can be implemented by DataStores
// which need to do additional operations once an entire upload has been
// completed. These tasks may include but are not limited to freeing unused
@ -146,11 +147,15 @@ type Locker interface {
type Lock interface {
// Lock attempts to obtain an exclusive lock for the upload specified
// by its id.
// If this operation fails because the resource is already locked, the
// tusd.ErrFileLocked must be returned. If no error is returned, the attempt
// is consider to be successful and the upload to be locked until UnlockUpload
// is invoked for the same upload.
Lock() error
// If the lock can be acquired, it will return without error. The requestUnlock
// callback is invoked when another caller attempts to create a lock. In this
// case, the holder of the lock should attempt to release the lock as soon
// as possible
// If the lock is already held, the holder's requestUnlock function will be
// invoked to request the lock to be released. If the context is cancelled before
// the lock can be acquired, ErrLockTimeout will be returned without acquiring
// the lock.
Lock(ctx context.Context, requestUnlock func()) error
// Unlock releases an existing lock for the given upload.
Unlock() error
}

34
pkg/handler/error.go Normal file
View File

@ -0,0 +1,34 @@
package handler
// Error represents an error with the intent to be sent in the HTTP
// response to the client. Therefore, it also contains a HTTPResponse,
// next to an error code and error message.
// TODO: Error is not comparable anymore because HTTPResponse
// contains a map. See if we should change this.
type Error struct {
ErrorCode string
Message string
HTTPResponse HTTPResponse
}
func (e Error) Error() string {
return e.ErrorCode + ": " + e.Message
}
// NewError constructs a new Error object with the given error code and message.
// The corresponding HTTP response will have the provided status code
// and a body consisting of the error details.
// responses. See the net/http package for standardized status codes.
func NewError(errCode string, message string, statusCode int) Error {
return Error{
ErrorCode: errCode,
Message: message,
HTTPResponse: HTTPResponse{
StatusCode: statusCode,
Body: errCode + ": " + message + "\n",
Headers: HTTPHeaders{
"Content-Type": "text/plain; charset=utf-8",
},
},
}
}

View File

@ -1,7 +1,6 @@
package handler_test
import (
"context"
"net/http"
"strings"
"testing"
@ -34,9 +33,9 @@ func TestGet(t *testing.T) {
gomock.InOrder(
locker.EXPECT().NewLock("yes").Return(lock, nil),
lock.EXPECT().Lock().Return(nil),
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
lock.EXPECT().Lock(gomock.Any(), gomock.Any()).Return(nil),
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
Offset: 5,
Size: 20,
MetaData: map[string]string{
@ -44,7 +43,7 @@ func TestGet(t *testing.T) {
"filetype": "image/jpeg",
},
}, nil),
upload.EXPECT().GetReader(context.Background()).Return(reader, nil),
upload.EXPECT().GetReader(gomock.Any()).Return(reader, nil),
lock.EXPECT().Unlock().Return(nil),
)
@ -79,8 +78,8 @@ func TestGet(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
Offset: 0,
}, nil),
)
@ -107,8 +106,8 @@ func TestGet(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
Offset: 0,
MetaData: map[string]string{
"filetype": "non-a-valid-mime-type",
@ -139,8 +138,8 @@ func TestGet(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
Offset: 0,
MetaData: map[string]string{
"filetype": "application/vnd.openxmlformats-officedocument.wordprocessingml.document.v1",

View File

@ -6,51 +6,79 @@ package handler_test
import (
context "context"
gomock "github.com/golang/mock/gomock"
handler "github.com/tus/tusd/pkg/handler"
io "io"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
handler "github.com/tus/tusd/pkg/handler"
)
// MockFullDataStore is a mock of FullDataStore interface
// MockFullDataStore is a mock of FullDataStore interface.
type MockFullDataStore struct {
ctrl *gomock.Controller
recorder *MockFullDataStoreMockRecorder
}
// MockFullDataStoreMockRecorder is the mock recorder for MockFullDataStore
// MockFullDataStoreMockRecorder is the mock recorder for MockFullDataStore.
type MockFullDataStoreMockRecorder struct {
mock *MockFullDataStore
}
// NewMockFullDataStore creates a new mock instance
// NewMockFullDataStore creates a new mock instance.
func NewMockFullDataStore(ctrl *gomock.Controller) *MockFullDataStore {
mock := &MockFullDataStore{ctrl: ctrl}
mock.recorder = &MockFullDataStoreMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockFullDataStore) EXPECT() *MockFullDataStoreMockRecorder {
return m.recorder
}
// NewUpload mocks base method
func (m *MockFullDataStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
// AsConcatableUpload mocks base method.
func (m *MockFullDataStore) AsConcatableUpload(upload handler.Upload) handler.ConcatableUpload {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NewUpload", ctx, info)
ret0, _ := ret[0].(handler.Upload)
ret1, _ := ret[1].(error)
return ret0, ret1
ret := m.ctrl.Call(m, "AsConcatableUpload", upload)
ret0, _ := ret[0].(handler.ConcatableUpload)
return ret0
}
// NewUpload indicates an expected call of NewUpload
func (mr *MockFullDataStoreMockRecorder) NewUpload(ctx, info interface{}) *gomock.Call {
// AsConcatableUpload indicates an expected call of AsConcatableUpload.
func (mr *MockFullDataStoreMockRecorder) AsConcatableUpload(upload interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewUpload", reflect.TypeOf((*MockFullDataStore)(nil).NewUpload), ctx, info)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AsConcatableUpload", reflect.TypeOf((*MockFullDataStore)(nil).AsConcatableUpload), upload)
}
// GetUpload mocks base method
// AsLengthDeclarableUpload mocks base method.
func (m *MockFullDataStore) AsLengthDeclarableUpload(upload handler.Upload) handler.LengthDeclarableUpload {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AsLengthDeclarableUpload", upload)
ret0, _ := ret[0].(handler.LengthDeclarableUpload)
return ret0
}
// AsLengthDeclarableUpload indicates an expected call of AsLengthDeclarableUpload.
func (mr *MockFullDataStoreMockRecorder) AsLengthDeclarableUpload(upload interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AsLengthDeclarableUpload", reflect.TypeOf((*MockFullDataStore)(nil).AsLengthDeclarableUpload), upload)
}
// AsTerminatableUpload mocks base method.
func (m *MockFullDataStore) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AsTerminatableUpload", upload)
ret0, _ := ret[0].(handler.TerminatableUpload)
return ret0
}
// AsTerminatableUpload indicates an expected call of AsTerminatableUpload.
func (mr *MockFullDataStoreMockRecorder) AsTerminatableUpload(upload interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AsTerminatableUpload", reflect.TypeOf((*MockFullDataStore)(nil).AsTerminatableUpload), upload)
}
// GetUpload mocks base method.
func (m *MockFullDataStore) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetUpload", ctx, id)
@ -59,93 +87,93 @@ func (m *MockFullDataStore) GetUpload(ctx context.Context, id string) (handler.U
return ret0, ret1
}
// GetUpload indicates an expected call of GetUpload
// GetUpload indicates an expected call of GetUpload.
func (mr *MockFullDataStoreMockRecorder) GetUpload(ctx, id interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUpload", reflect.TypeOf((*MockFullDataStore)(nil).GetUpload), ctx, id)
}
// AsTerminatableUpload mocks base method
func (m *MockFullDataStore) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload {
// NewUpload mocks base method.
func (m *MockFullDataStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AsTerminatableUpload", upload)
ret0, _ := ret[0].(handler.TerminatableUpload)
return ret0
ret := m.ctrl.Call(m, "NewUpload", ctx, info)
ret0, _ := ret[0].(handler.Upload)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AsTerminatableUpload indicates an expected call of AsTerminatableUpload
func (mr *MockFullDataStoreMockRecorder) AsTerminatableUpload(upload interface{}) *gomock.Call {
// NewUpload indicates an expected call of NewUpload.
func (mr *MockFullDataStoreMockRecorder) NewUpload(ctx, info interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AsTerminatableUpload", reflect.TypeOf((*MockFullDataStore)(nil).AsTerminatableUpload), upload)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewUpload", reflect.TypeOf((*MockFullDataStore)(nil).NewUpload), ctx, info)
}
// AsConcatableUpload mocks base method
func (m *MockFullDataStore) AsConcatableUpload(upload handler.Upload) handler.ConcatableUpload {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AsConcatableUpload", upload)
ret0, _ := ret[0].(handler.ConcatableUpload)
return ret0
}
// AsConcatableUpload indicates an expected call of AsConcatableUpload
func (mr *MockFullDataStoreMockRecorder) AsConcatableUpload(upload interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AsConcatableUpload", reflect.TypeOf((*MockFullDataStore)(nil).AsConcatableUpload), upload)
}
// AsLengthDeclarableUpload mocks base method
func (m *MockFullDataStore) AsLengthDeclarableUpload(upload handler.Upload) handler.LengthDeclarableUpload {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AsLengthDeclarableUpload", upload)
ret0, _ := ret[0].(handler.LengthDeclarableUpload)
return ret0
}
// AsLengthDeclarableUpload indicates an expected call of AsLengthDeclarableUpload
func (mr *MockFullDataStoreMockRecorder) AsLengthDeclarableUpload(upload interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AsLengthDeclarableUpload", reflect.TypeOf((*MockFullDataStore)(nil).AsLengthDeclarableUpload), upload)
}
// MockFullUpload is a mock of FullUpload interface
// MockFullUpload is a mock of FullUpload interface.
type MockFullUpload struct {
ctrl *gomock.Controller
recorder *MockFullUploadMockRecorder
}
// MockFullUploadMockRecorder is the mock recorder for MockFullUpload
// MockFullUploadMockRecorder is the mock recorder for MockFullUpload.
type MockFullUploadMockRecorder struct {
mock *MockFullUpload
}
// NewMockFullUpload creates a new mock instance
// NewMockFullUpload creates a new mock instance.
func NewMockFullUpload(ctrl *gomock.Controller) *MockFullUpload {
mock := &MockFullUpload{ctrl: ctrl}
mock.recorder = &MockFullUploadMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockFullUpload) EXPECT() *MockFullUploadMockRecorder {
return m.recorder
}
// WriteChunk mocks base method
func (m *MockFullUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
// ConcatUploads mocks base method.
func (m *MockFullUpload) ConcatUploads(ctx context.Context, partialUploads []handler.Upload) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteChunk", ctx, offset, src)
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
ret := m.ctrl.Call(m, "ConcatUploads", ctx, partialUploads)
ret0, _ := ret[0].(error)
return ret0
}
// WriteChunk indicates an expected call of WriteChunk
func (mr *MockFullUploadMockRecorder) WriteChunk(ctx, offset, src interface{}) *gomock.Call {
// ConcatUploads indicates an expected call of ConcatUploads.
func (mr *MockFullUploadMockRecorder) ConcatUploads(ctx, partialUploads interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChunk", reflect.TypeOf((*MockFullUpload)(nil).WriteChunk), ctx, offset, src)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConcatUploads", reflect.TypeOf((*MockFullUpload)(nil).ConcatUploads), ctx, partialUploads)
}
// GetInfo mocks base method
// DeclareLength mocks base method.
func (m *MockFullUpload) DeclareLength(ctx context.Context, length int64) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeclareLength", ctx, length)
ret0, _ := ret[0].(error)
return ret0
}
// DeclareLength indicates an expected call of DeclareLength.
func (mr *MockFullUploadMockRecorder) DeclareLength(ctx, length interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeclareLength", reflect.TypeOf((*MockFullUpload)(nil).DeclareLength), ctx, length)
}
// FinishUpload mocks base method.
func (m *MockFullUpload) FinishUpload(ctx context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FinishUpload", ctx)
ret0, _ := ret[0].(error)
return ret0
}
// FinishUpload indicates an expected call of FinishUpload.
func (mr *MockFullUploadMockRecorder) FinishUpload(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinishUpload", reflect.TypeOf((*MockFullUpload)(nil).FinishUpload), ctx)
}
// GetInfo mocks base method.
func (m *MockFullUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetInfo", ctx)
@ -154,42 +182,28 @@ func (m *MockFullUpload) GetInfo(ctx context.Context) (handler.FileInfo, error)
return ret0, ret1
}
// GetInfo indicates an expected call of GetInfo
// GetInfo indicates an expected call of GetInfo.
func (mr *MockFullUploadMockRecorder) GetInfo(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInfo", reflect.TypeOf((*MockFullUpload)(nil).GetInfo), ctx)
}
// GetReader mocks base method
func (m *MockFullUpload) GetReader(ctx context.Context) (io.Reader, error) {
// GetReader mocks base method.
func (m *MockFullUpload) GetReader(ctx context.Context) (io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetReader", ctx)
ret0, _ := ret[0].(io.Reader)
ret0, _ := ret[0].(io.ReadCloser)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetReader indicates an expected call of GetReader
// GetReader indicates an expected call of GetReader.
func (mr *MockFullUploadMockRecorder) GetReader(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReader", reflect.TypeOf((*MockFullUpload)(nil).GetReader), ctx)
}
// FinishUpload mocks base method
func (m *MockFullUpload) FinishUpload(ctx context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FinishUpload", ctx)
ret0, _ := ret[0].(error)
return ret0
}
// FinishUpload indicates an expected call of FinishUpload
func (mr *MockFullUploadMockRecorder) FinishUpload(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinishUpload", reflect.TypeOf((*MockFullUpload)(nil).FinishUpload), ctx)
}
// Terminate mocks base method
// Terminate mocks base method.
func (m *MockFullUpload) Terminate(ctx context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Terminate", ctx)
@ -197,64 +211,51 @@ func (m *MockFullUpload) Terminate(ctx context.Context) error {
return ret0
}
// Terminate indicates an expected call of Terminate
// Terminate indicates an expected call of Terminate.
func (mr *MockFullUploadMockRecorder) Terminate(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terminate", reflect.TypeOf((*MockFullUpload)(nil).Terminate), ctx)
}
// DeclareLength mocks base method
func (m *MockFullUpload) DeclareLength(ctx context.Context, length int64) error {
// WriteChunk mocks base method.
func (m *MockFullUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeclareLength", ctx, length)
ret0, _ := ret[0].(error)
return ret0
ret := m.ctrl.Call(m, "WriteChunk", ctx, offset, src)
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeclareLength indicates an expected call of DeclareLength
func (mr *MockFullUploadMockRecorder) DeclareLength(ctx, length interface{}) *gomock.Call {
// WriteChunk indicates an expected call of WriteChunk.
func (mr *MockFullUploadMockRecorder) WriteChunk(ctx, offset, src interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeclareLength", reflect.TypeOf((*MockFullUpload)(nil).DeclareLength), ctx, length)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChunk", reflect.TypeOf((*MockFullUpload)(nil).WriteChunk), ctx, offset, src)
}
// ConcatUploads mocks base method
func (m *MockFullUpload) ConcatUploads(ctx context.Context, partialUploads []handler.Upload) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ConcatUploads", ctx, partialUploads)
ret0, _ := ret[0].(error)
return ret0
}
// ConcatUploads indicates an expected call of ConcatUploads
func (mr *MockFullUploadMockRecorder) ConcatUploads(ctx, partialUploads interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConcatUploads", reflect.TypeOf((*MockFullUpload)(nil).ConcatUploads), ctx, partialUploads)
}
// MockFullLocker is a mock of FullLocker interface
// MockFullLocker is a mock of FullLocker interface.
type MockFullLocker struct {
ctrl *gomock.Controller
recorder *MockFullLockerMockRecorder
}
// MockFullLockerMockRecorder is the mock recorder for MockFullLocker
// MockFullLockerMockRecorder is the mock recorder for MockFullLocker.
type MockFullLockerMockRecorder struct {
mock *MockFullLocker
}
// NewMockFullLocker creates a new mock instance
// NewMockFullLocker creates a new mock instance.
func NewMockFullLocker(ctrl *gomock.Controller) *MockFullLocker {
mock := &MockFullLocker{ctrl: ctrl}
mock.recorder = &MockFullLockerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockFullLocker) EXPECT() *MockFullLockerMockRecorder {
return m.recorder
}
// NewLock mocks base method
// NewLock mocks base method.
func (m *MockFullLocker) NewLock(id string) (handler.Lock, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NewLock", id)
@ -263,50 +264,50 @@ func (m *MockFullLocker) NewLock(id string) (handler.Lock, error) {
return ret0, ret1
}
// NewLock indicates an expected call of NewLock
// NewLock indicates an expected call of NewLock.
func (mr *MockFullLockerMockRecorder) NewLock(id interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewLock", reflect.TypeOf((*MockFullLocker)(nil).NewLock), id)
}
// MockFullLock is a mock of FullLock interface
// MockFullLock is a mock of FullLock interface.
type MockFullLock struct {
ctrl *gomock.Controller
recorder *MockFullLockMockRecorder
}
// MockFullLockMockRecorder is the mock recorder for MockFullLock
// MockFullLockMockRecorder is the mock recorder for MockFullLock.
type MockFullLockMockRecorder struct {
mock *MockFullLock
}
// NewMockFullLock creates a new mock instance
// NewMockFullLock creates a new mock instance.
func NewMockFullLock(ctrl *gomock.Controller) *MockFullLock {
mock := &MockFullLock{ctrl: ctrl}
mock.recorder = &MockFullLockMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockFullLock) EXPECT() *MockFullLockMockRecorder {
return m.recorder
}
// Lock mocks base method
func (m *MockFullLock) Lock() error {
// Lock mocks base method.
func (m *MockFullLock) Lock(ctx context.Context, requestUnlock func()) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Lock")
ret := m.ctrl.Call(m, "Lock", ctx, requestUnlock)
ret0, _ := ret[0].(error)
return ret0
}
// Lock indicates an expected call of Lock
func (mr *MockFullLockMockRecorder) Lock() *gomock.Call {
// Lock indicates an expected call of Lock.
func (mr *MockFullLockMockRecorder) Lock(ctx, requestUnlock interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lock", reflect.TypeOf((*MockFullLock)(nil).Lock))
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lock", reflect.TypeOf((*MockFullLock)(nil).Lock), ctx, requestUnlock)
}
// Unlock mocks base method
// Unlock mocks base method.
func (m *MockFullLock) Unlock() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Unlock")
@ -314,7 +315,7 @@ func (m *MockFullLock) Unlock() error {
return ret0
}
// Unlock indicates an expected call of Unlock
// Unlock indicates an expected call of Unlock.
func (mr *MockFullLockMockRecorder) Unlock() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unlock", reflect.TypeOf((*MockFullLock)(nil).Unlock))

View File

@ -1,7 +1,6 @@
package handler_test
import (
"context"
"net/http"
"testing"
@ -19,9 +18,9 @@ func TestHead(t *testing.T) {
gomock.InOrder(
locker.EXPECT().NewLock("yes").Return(lock, nil),
lock.EXPECT().Lock().Return(nil),
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
lock.EXPECT().Lock(gomock.Any(), gomock.Any()).Return(nil),
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
Offset: 11,
Size: 44,
MetaData: map[string]string{
@ -64,7 +63,7 @@ func TestHead(t *testing.T) {
})
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
store.EXPECT().GetUpload(context.Background(), "no").Return(nil, ErrNotFound)
store.EXPECT().GetUpload(gomock.Any(), "no").Return(nil, ErrNotFound)
handler, _ := NewHandler(Config{
StoreComposer: composer,
@ -76,10 +75,8 @@ func TestHead(t *testing.T) {
ReqHeader: map[string]string{
"Tus-Resumable": "1.0.0",
},
Code: http.StatusNotFound,
ResHeader: map[string]string{
"Content-Length": "0",
},
Code: http.StatusNotFound,
ResHeader: map[string]string{},
}).Run(handler, t)
if res.Body.String() != "" {
@ -93,8 +90,8 @@ func TestHead(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
SizeIsDeferred: true,
Size: 0,
}, nil),
@ -123,8 +120,8 @@ func TestHead(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
SizeIsDeferred: false,
Size: 10,
}, nil),

25
pkg/handler/hooks.go Normal file
View File

@ -0,0 +1,25 @@
package handler
import "net/http"
// HookEvent represents an event from tusd which can be handled by the application.
type HookEvent struct {
// Upload contains information about the upload that caused this hook
// to be fired.
Upload FileInfo
// HTTPRequest contains details about the HTTP request that reached
// tusd.
HTTPRequest HTTPRequest
}
func newHookEvent(info FileInfo, r *http.Request) HookEvent {
return HookEvent{
Upload: info,
HTTPRequest: HTTPRequest{
Method: r.Method,
URI: r.RequestURI,
RemoteAddr: r.RemoteAddr,
Header: r.Header,
},
}
}

80
pkg/handler/http.go Normal file
View File

@ -0,0 +1,80 @@
package handler
import (
"net/http"
"strconv"
)
// HTTPRequest contains basic details of an incoming HTTP request.
type HTTPRequest struct {
// Method is the HTTP method, e.g. POST or PATCH.
Method string
// URI is the full HTTP request URI, e.g. /files/fooo.
URI string
// RemoteAddr contains the network address that sent the request.
RemoteAddr string
// Header contains all HTTP headers as present in the HTTP request.
Header http.Header
}
type HTTPHeaders map[string]string
// HTTPResponse contains basic details of an outgoing HTTP response.
type HTTPResponse struct {
// StatusCode is status code, e.g. 200 or 400.
StatusCode int
// Body is the response body.
Body string
// Headers contains additional HTTP headers for the response.
// TODO: Uniform naming with HTTPRequest.Header
Headers HTTPHeaders
}
// writeTo writes the HTTP response into w, as specified by the fields in resp.
func (resp HTTPResponse) writeTo(w http.ResponseWriter) {
headers := w.Header()
for key, value := range resp.Headers {
headers.Set(key, value)
}
if len(resp.Body) > 0 {
headers.Set("Content-Length", strconv.Itoa(len(resp.Body)))
}
w.WriteHeader(resp.StatusCode)
if len(resp.Body) > 0 {
w.Write([]byte(resp.Body))
}
}
// MergeWith returns a copy of resp1, where non-default values from resp2 overwrite
// values from resp1.
func (resp1 HTTPResponse) MergeWith(resp2 HTTPResponse) HTTPResponse {
// Clone the response 1 and use it as a basis
newResp := resp1
// Take the status code and body from response 2 to
// overwrite values from response 1.
if resp2.StatusCode != 0 {
newResp.StatusCode = resp2.StatusCode
}
if len(resp2.Body) > 0 {
newResp.Body = resp2.Body
}
// For the headers, me must make a new map to avoid writing
// into the header map from response 1.
newResp.Headers = make(HTTPHeaders, len(resp1.Headers)+len(resp2.Headers))
for key, value := range resp1.Headers {
newResp.Headers[key] = value
}
for key, value := range resp2.Headers {
newResp.Headers[key] = value
}
return newResp
}

View File

@ -1,7 +1,6 @@
package handler
import (
"errors"
"sync"
"sync/atomic"
)
@ -30,8 +29,9 @@ func (m Metrics) incRequestsTotal(method string) {
}
}
// TODO: Rework to only store error code
// incErrorsTotal increases the counter for this error atomically by one.
func (m Metrics) incErrorsTotal(err HTTPError) {
func (m Metrics) incErrorsTotal(err Error) {
ptr := m.ErrorsTotal.retrievePointerFor(err)
atomic.AddUint64(ptr, 1)
}
@ -78,23 +78,16 @@ func newMetrics() Metrics {
// ErrorsTotalMap stores the counters for the different HTTP errors.
type ErrorsTotalMap struct {
lock sync.RWMutex
counter map[simpleHTTPError]*uint64
counter map[ErrorsTotalMapEntry]*uint64
}
type simpleHTTPError struct {
Message string
type ErrorsTotalMapEntry struct {
ErrorCode string
StatusCode int
}
func simplifyHTTPError(err HTTPError) simpleHTTPError {
return simpleHTTPError{
Message: err.Error(),
StatusCode: err.StatusCode(),
}
}
func newErrorsTotalMap() *ErrorsTotalMap {
m := make(map[simpleHTTPError]*uint64, 20)
m := make(map[ErrorsTotalMapEntry]*uint64, 20)
return &ErrorsTotalMap{
counter: m,
}
@ -102,8 +95,12 @@ func newErrorsTotalMap() *ErrorsTotalMap {
// retrievePointerFor returns (after creating it if necessary) the pointer to
// the counter for the error.
func (e *ErrorsTotalMap) retrievePointerFor(err HTTPError) *uint64 {
serr := simplifyHTTPError(err)
func (e *ErrorsTotalMap) retrievePointerFor(err Error) *uint64 {
serr := ErrorsTotalMapEntry{
ErrorCode: err.ErrorCode,
StatusCode: err.HTTPResponse.StatusCode,
}
e.lock.RLock()
ptr, ok := e.counter[serr]
e.lock.RUnlock()
@ -124,12 +121,11 @@ func (e *ErrorsTotalMap) retrievePointerFor(err HTTPError) *uint64 {
}
// Load retrieves the map of the counter pointers atomically
func (e *ErrorsTotalMap) Load() map[HTTPError]*uint64 {
m := make(map[HTTPError]*uint64, len(e.counter))
func (e *ErrorsTotalMap) Load() map[ErrorsTotalMapEntry]*uint64 {
m := make(map[ErrorsTotalMapEntry]*uint64, len(e.counter))
e.lock.RLock()
for err, ptr := range e.counter {
httpErr := NewHTTPError(errors.New(err.Message), err.StatusCode)
m[httpErr] = ptr
m[err] = ptr
}
e.lock.RUnlock()

View File

@ -1,7 +1,6 @@
package handler_test
import (
"context"
"errors"
"io"
"io/ioutil"
@ -23,14 +22,14 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "yes",
Offset: 5,
Size: 10,
}, nil),
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
upload.EXPECT().FinishUpload(context.Background()),
upload.EXPECT().WriteChunk(gomock.Any(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
upload.EXPECT().FinishUpload(gomock.Any()),
)
handler, _ := NewHandler(Config{
@ -75,14 +74,14 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "yes",
Offset: 5,
Size: 10,
}, nil),
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
upload.EXPECT().FinishUpload(context.Background()),
upload.EXPECT().WriteChunk(gomock.Any(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
upload.EXPECT().FinishUpload(gomock.Any()),
)
handler, _ := NewHandler(Config{
@ -112,8 +111,8 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "yes",
Offset: 20,
Size: 20,
@ -141,7 +140,7 @@ func TestPatch(t *testing.T) {
})
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
store.EXPECT().GetUpload(context.Background(), "no").Return(nil, ErrNotFound)
store.EXPECT().GetUpload(gomock.Any(), "no").Return(nil, ErrNotFound)
handler, _ := NewHandler(Config{
StoreComposer: composer,
@ -165,8 +164,8 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "yes",
Offset: 5,
}, nil),
@ -194,8 +193,8 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "yes",
Offset: 5,
Size: 10,
@ -268,14 +267,14 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "yes",
Offset: 5,
Size: 20,
}, nil),
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
upload.EXPECT().FinishUpload(context.Background()),
upload.EXPECT().WriteChunk(gomock.Any(), int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
upload.EXPECT().FinishUpload(gomock.Any()),
)
handler, _ := NewHandler(Config{
@ -310,17 +309,17 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "yes",
Offset: 5,
Size: 0,
SizeIsDeferred: true,
}, nil),
store.EXPECT().AsLengthDeclarableUpload(upload).Return(upload),
upload.EXPECT().DeclareLength(context.Background(), int64(20)),
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
upload.EXPECT().FinishUpload(context.Background()),
upload.EXPECT().DeclareLength(gomock.Any(), int64(20)),
upload.EXPECT().WriteChunk(gomock.Any(), int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
upload.EXPECT().FinishUpload(gomock.Any()),
)
handler, _ := NewHandler(Config{
@ -353,16 +352,16 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "yes",
Offset: 20,
Size: 0,
SizeIsDeferred: true,
}, nil),
store.EXPECT().AsLengthDeclarableUpload(upload).Return(upload),
upload.EXPECT().DeclareLength(context.Background(), int64(20)),
upload.EXPECT().FinishUpload(context.Background()),
upload.EXPECT().DeclareLength(gomock.Any(), int64(20)),
upload.EXPECT().FinishUpload(gomock.Any()),
)
handler, _ := NewHandler(Config{
@ -392,26 +391,26 @@ func TestPatch(t *testing.T) {
upload2 := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload1, nil),
upload1.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload1, nil),
upload1.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "yes",
Offset: 5,
Size: 0,
SizeIsDeferred: true,
}, nil),
store.EXPECT().AsLengthDeclarableUpload(upload1).Return(upload1),
upload1.EXPECT().DeclareLength(context.Background(), int64(20)),
upload1.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
upload1.EXPECT().DeclareLength(gomock.Any(), int64(20)),
upload1.EXPECT().WriteChunk(gomock.Any(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload2, nil),
upload2.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload2, nil),
upload2.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "yes",
Offset: 10,
Size: 20,
SizeIsDeferred: false,
}, nil),
upload2.EXPECT().WriteChunk(context.Background(), int64(10), NewReaderMatcher("thisismore")).Return(int64(10), nil),
upload2.EXPECT().FinishUpload(context.Background()),
upload2.EXPECT().WriteChunk(gomock.Any(), int64(10), NewReaderMatcher("thisismore")).Return(int64(10), nil),
upload2.EXPECT().FinishUpload(gomock.Any()),
)
handler, _ := NewHandler(Config{
@ -460,14 +459,14 @@ func TestPatch(t *testing.T) {
gomock.InOrder(
locker.EXPECT().NewLock("yes").Return(lock, nil),
lock.EXPECT().Lock().Return(nil),
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
lock.EXPECT().Lock(gomock.Any(), gomock.Any()).Return(nil),
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "yes",
Offset: 0,
Size: 20,
}, nil),
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
upload.EXPECT().WriteChunk(gomock.Any(), int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
lock.EXPECT().Unlock().Return(nil),
)
@ -500,13 +499,13 @@ func TestPatch(t *testing.T) {
// We simulate that the upload has already an offset of 10 bytes. Therefore, the progress notifications
// must be the sum of the exisiting offset and the newly read bytes.
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "yes",
Offset: 10,
Size: 100,
}, nil),
upload.EXPECT().WriteChunk(context.Background(), int64(10), NewReaderMatcher("first second third")).Return(int64(18), nil),
upload.EXPECT().WriteChunk(gomock.Any(), int64(10), NewReaderMatcher("first second third")).Return(int64(18), nil),
)
handler, _ := NewHandler(Config{
@ -574,15 +573,15 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "yes",
Offset: 0,
Size: 100,
}, nil),
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("first ")).Return(int64(6), nil),
upload.EXPECT().WriteChunk(gomock.Any(), int64(0), NewReaderMatcher("first ")).Return(int64(6), nil),
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
upload.EXPECT().Terminate(context.Background()),
upload.EXPECT().Terminate(gomock.Any()),
)
handler, _ := NewHandler(Config{
@ -629,7 +628,7 @@ func TestPatch(t *testing.T) {
ResHeader: map[string]string{
"Upload-Offset": "",
},
ResBody: "upload has been stopped by server\n",
ResBody: "ERR_UPLOAD_STOPPED: upload has been stopped by server\n",
}).Run(handler, t)
_, more := <-c
@ -644,14 +643,14 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "yes",
Offset: 0,
Size: 100,
}, nil),
// The reader for WriteChunk must not return an error.
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("first ")).Return(int64(6), nil),
upload.EXPECT().WriteChunk(gomock.Any(), int64(0), NewReaderMatcher("first ")).Return(int64(6), nil),
)
handler, _ := NewHandler(Config{
@ -680,7 +679,7 @@ func TestPatch(t *testing.T) {
ResHeader: map[string]string{
"Upload-Offset": "",
},
ResBody: "an error while reading the body\n",
ResBody: "ERR_INTERNAL_SERVER_ERROR: an error while reading the body\n",
}).Run(handler, t)
})
}

View File

@ -2,7 +2,6 @@ package handler_test
import (
"bytes"
"context"
"net/http"
"strings"
"testing"
@ -20,7 +19,7 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(context.Background(), FileInfo{
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
Size: 300,
MetaData: map[string]string{
"foo": "hello",
@ -28,7 +27,7 @@ func TestPost(t *testing.T) {
"empty": "",
},
}).Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{
@ -76,16 +75,16 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(context.Background(), FileInfo{
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
Size: 0,
MetaData: map[string]string{},
}).Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
Size: 0,
MetaData: map[string]string{},
}, nil),
upload.EXPECT().FinishUpload(context.Background()).Return(nil),
upload.EXPECT().FinishUpload(gomock.Any()).Return(nil),
)
handler, _ := NewHandler(Config{
@ -211,11 +210,11 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(context.Background(), FileInfo{
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
Size: 300,
MetaData: map[string]string{},
}).Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{},
@ -248,11 +247,11 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(context.Background(), FileInfo{
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
Size: 300,
MetaData: map[string]string{},
}).Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{},
@ -286,11 +285,11 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(context.Background(), FileInfo{
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
Size: 300,
MetaData: map[string]string{},
}).Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{},
@ -326,11 +325,11 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(context.Background(), FileInfo{
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
Size: 300,
MetaData: map[string]string{},
}).Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{},
@ -363,11 +362,11 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(context.Background(), FileInfo{
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
Size: 300,
MetaData: map[string]string{},
}).Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{},
@ -405,14 +404,14 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(context.Background(), FileInfo{
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
Size: 300,
MetaData: map[string]string{
"foo": "hello",
"bar": "world",
},
}).Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{
@ -421,8 +420,8 @@ func TestPost(t *testing.T) {
},
}, nil),
locker.EXPECT().NewLock("foo").Return(lock, nil),
lock.EXPECT().Lock().Return(nil),
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
lock.EXPECT().Lock(gomock.Any(), gomock.Any()).Return(nil),
upload.EXPECT().WriteChunk(gomock.Any(), int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
lock.EXPECT().Unlock().Return(nil),
)
@ -458,11 +457,11 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(context.Background(), FileInfo{
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
Size: 300,
MetaData: map[string]string{},
}).Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{},
@ -492,11 +491,11 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(context.Background(), FileInfo{
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
Size: 300,
MetaData: map[string]string{},
}).Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{},

View File

@ -1,7 +1,6 @@
package handler_test
import (
"context"
"net/http"
"testing"
@ -39,14 +38,14 @@ func TestTerminate(t *testing.T) {
gomock.InOrder(
locker.EXPECT().NewLock("foo").Return(lock, nil),
lock.EXPECT().Lock().Return(nil),
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
lock.EXPECT().Lock(gomock.Any(), gomock.Any()).Return(nil),
store.EXPECT().GetUpload(gomock.Any(), "foo").Return(upload, nil),
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
ID: "foo",
Size: 10,
}, nil),
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
upload.EXPECT().Terminate(context.Background()).Return(nil),
upload.EXPECT().Terminate(gomock.Any()).Return(nil),
lock.EXPECT().Unlock().Return(nil),
)

View File

@ -3,7 +3,6 @@ package handler
import (
"context"
"encoding/base64"
"errors"
"io"
"log"
"math"
@ -24,91 +23,33 @@ var (
reMimeType = regexp.MustCompile(`^[a-z]+\/[a-z0-9\-\+\.]+$`)
)
// HTTPError represents an error with an additional status code attached
// which may be used when this error is sent in a HTTP response.
// See the net/http package for standardized status codes.
type HTTPError interface {
error
StatusCode() int
Body() []byte
}
type httpError struct {
error
statusCode int
}
func (err httpError) StatusCode() int {
return err.statusCode
}
func (err httpError) Body() []byte {
return []byte(err.Error())
}
// NewHTTPError adds the given status code to the provided error and returns
// the new error instance. The status code may be used in corresponding HTTP
// responses. See the net/http package for standardized status codes.
func NewHTTPError(err error, statusCode int) HTTPError {
return httpError{err, statusCode}
}
var (
ErrUnsupportedVersion = NewHTTPError(errors.New("unsupported version"), http.StatusPreconditionFailed)
ErrMaxSizeExceeded = NewHTTPError(errors.New("maximum size exceeded"), http.StatusRequestEntityTooLarge)
ErrInvalidContentType = NewHTTPError(errors.New("missing or invalid Content-Type header"), http.StatusBadRequest)
ErrInvalidUploadLength = NewHTTPError(errors.New("missing or invalid Upload-Length header"), http.StatusBadRequest)
ErrInvalidOffset = NewHTTPError(errors.New("missing or invalid Upload-Offset header"), http.StatusBadRequest)
ErrNotFound = NewHTTPError(errors.New("upload not found"), http.StatusNotFound)
ErrFileLocked = NewHTTPError(errors.New("file currently locked"), 423) // Locked (WebDAV) (RFC 4918)
ErrMismatchOffset = NewHTTPError(errors.New("mismatched offset"), http.StatusConflict)
ErrSizeExceeded = NewHTTPError(errors.New("resource's size exceeded"), http.StatusRequestEntityTooLarge)
ErrNotImplemented = NewHTTPError(errors.New("feature not implemented"), http.StatusNotImplemented)
ErrUploadNotFinished = NewHTTPError(errors.New("one of the partial uploads is not finished"), http.StatusBadRequest)
ErrInvalidConcat = NewHTTPError(errors.New("invalid Upload-Concat header"), http.StatusBadRequest)
ErrModifyFinal = NewHTTPError(errors.New("modifying a final upload is not allowed"), http.StatusForbidden)
ErrUploadLengthAndUploadDeferLength = NewHTTPError(errors.New("provided both Upload-Length and Upload-Defer-Length"), http.StatusBadRequest)
ErrInvalidUploadDeferLength = NewHTTPError(errors.New("invalid Upload-Defer-Length header"), http.StatusBadRequest)
ErrUploadStoppedByServer = NewHTTPError(errors.New("upload has been stopped by server"), http.StatusBadRequest)
ErrUnsupportedVersion = NewError("ERR_UNSUPPORTED_VERSION", "missing, invalid or unsupported Tus-Resumable header", http.StatusPreconditionFailed)
ErrMaxSizeExceeded = NewError("ERR_MAX_SIZE_EXCEEDED", "maximum size exceeded", http.StatusRequestEntityTooLarge)
ErrInvalidContentType = NewError("ERR_INVALID_CONTENT_TYPE", "missing or invalid Content-Type header", http.StatusBadRequest)
ErrInvalidUploadLength = NewError("ERR_INVALID_UPLOAD_LENGTH", "missing or invalid Upload-Length header", http.StatusBadRequest)
ErrInvalidOffset = NewError("ERR_INVALID_OFFSET", "missing or invalid Upload-Offset header", http.StatusBadRequest)
ErrNotFound = NewError("ERR_UPLOAD_NOT_FOUND", "upload not found", http.StatusNotFound)
ErrFileLocked = NewError("ERR_UPLOAD_LOCKED", "file currently locked", http.StatusLocked)
ErrLockTimeout = NewError("ERR_LOCK_TIMEOUT", "failed to acquire lock before timeout", http.StatusInternalServerError)
ErrMismatchOffset = NewError("ERR_MISMATCHED_OFFSET", "mismatched offset", http.StatusConflict)
ErrSizeExceeded = NewError("ERR_UPLOAD_SIZE_EXCEEDED", "upload's size exceeded", http.StatusRequestEntityTooLarge)
ErrNotImplemented = NewError("ERR_NOT_IMPLEMENTED", "feature not implemented", http.StatusNotImplemented)
ErrUploadNotFinished = NewError("ERR_UPLOAD_NOT_FINISHED", "one of the partial uploads is not finished", http.StatusBadRequest)
ErrInvalidConcat = NewError("ERR_INVALID_CONCAT", "invalid Upload-Concat header", http.StatusBadRequest)
ErrModifyFinal = NewError("ERR_MODIFY_FINAL", "modifying a final upload is not allowed", http.StatusForbidden)
ErrUploadLengthAndUploadDeferLength = NewError("ERR_AMBIGUOUS_UPLOAD_LENGTH", "provided both Upload-Length and Upload-Defer-Length", http.StatusBadRequest)
ErrInvalidUploadDeferLength = NewError("ERR_INVALID_UPLOAD_LENGTH_DEFER", "invalid Upload-Defer-Length header", http.StatusBadRequest)
ErrUploadStoppedByServer = NewError("ERR_UPLOAD_STOPPED", "upload has been stopped by server", http.StatusBadRequest)
ErrUploadRejectedByServer = NewError("ERR_UPLOAD_REJECTED", "upload creation has been rejected by server", http.StatusBadRequest)
ErrUploadInterrupted = NewError("ERR_UPLAOD_INTERRUPTED", "upload has been interrupted by another request for this upload resource", http.StatusBadRequest)
errReadTimeout = errors.New("read tcp: i/o timeout")
errConnectionReset = errors.New("read tcp: connection reset by peer")
// TODO: These two responses are 500 for backwards compatability. We should discuss
// whether it is better to more them to 4XX status codes.
ErrReadTimeout = NewError("ERR_READ_TIMEOUT", "timeout while reading request body", http.StatusInternalServerError)
ErrConnectionReset = NewError("ERR_CONNECTION_RESET", "TCP connection reset by peer", http.StatusInternalServerError)
)
// HTTPRequest contains basic details of an incoming HTTP request.
type HTTPRequest struct {
// Method is the HTTP method, e.g. POST or PATCH
Method string
// URI is the full HTTP request URI, e.g. /files/fooo
URI string
// RemoteAddr contains the network address that sent the request
RemoteAddr string
// Header contains all HTTP headers as present in the HTTP request.
Header http.Header
}
// HookEvent represents an event from tusd which can be handled by the application.
type HookEvent struct {
// Upload contains information about the upload that caused this hook
// to be fired.
Upload FileInfo
// HTTPRequest contains details about the HTTP request that reached
// tusd.
HTTPRequest HTTPRequest
}
func newHookEvent(info FileInfo, r *http.Request) HookEvent {
return HookEvent{
Upload: info,
HTTPRequest: HTTPRequest{
Method: r.Method,
URI: r.RequestURI,
RemoteAddr: r.RemoteAddr,
Header: r.Header,
},
}
}
// UnroutedHandler exposes methods to handle requests as part of the tus protocol,
// such as PostFile, HeadFile, PatchFile and DelFile. In addition the GetFile method
// is provided which is, however, not part of the specification.
@ -264,7 +205,10 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
// will be ignored or interpreted as a rejection.
// For example, the Presto engine, which is used in older versions of
// Opera, Opera Mobile and Opera Mini, handles CORS this way.
handler.sendResp(w, r, http.StatusOK)
c := newContext(w, r)
handler.sendResp(c, HTTPResponse{
StatusCode: http.StatusOK,
})
return
}
@ -272,7 +216,8 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
// GET and HEAD methods are not checked since a browser may visit this URL and does
// not include this header. GET requests are not part of the specification.
if r.Method != "GET" && r.Method != "HEAD" && r.Header.Get("Tus-Resumable") != "1.0.0" {
handler.sendError(w, r, ErrUnsupportedVersion)
c := newContext(w, r)
handler.sendError(c, ErrUnsupportedVersion)
return
}
@ -284,7 +229,7 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
// PostFile creates a new file upload using the datastore after validating the
// length and parsing the metadata.
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
c := newContext(w, r)
// Check for presence of application/offset+octet-stream. If another content
// type is defined, it will be ignored and treated as none was set because
@ -301,7 +246,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
// Parse Upload-Concat header
isPartial, isFinal, partialUploadIDs, err := parseConcat(concatHeader)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
@ -314,13 +259,13 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
if isFinal {
// A final upload must not contain a chunk within the creation request
if containsChunk {
handler.sendError(w, r, ErrModifyFinal)
handler.sendError(c, ErrModifyFinal)
return
}
partialUploads, size, err = handler.sizeOfUploads(ctx, partialUploadIDs)
partialUploads, size, err = handler.sizeOfUploads(c, partialUploadIDs)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
} else {
@ -328,14 +273,14 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
uploadDeferLengthHeader := r.Header.Get("Upload-Defer-Length")
size, sizeIsDeferred, err = handler.validateNewUploadLengthHeaders(uploadLengthHeader, uploadDeferLengthHeader)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
}
// Test whether the size is still allowed
if handler.config.MaxSize > 0 && size > handler.config.MaxSize {
handler.sendError(w, r, ErrMaxSizeExceeded)
handler.sendError(c, ErrMaxSizeExceeded)
return
}
@ -351,22 +296,29 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
PartialUploads: partialUploadIDs,
}
if handler.config.PreUploadCreateCallback != nil {
if err := handler.config.PreUploadCreateCallback(newHookEvent(info, r)); err != nil {
handler.sendError(w, r, err)
return
}
resp := HTTPResponse{
StatusCode: http.StatusCreated,
Headers: HTTPHeaders{},
}
upload, err := handler.composer.Core.NewUpload(ctx, info)
if handler.config.PreUploadCreateCallback != nil {
resp2, err := handler.config.PreUploadCreateCallback(newHookEvent(info, r))
if err != nil {
handler.sendError(c, err)
return
}
resp = resp.MergeWith(resp2)
}
upload, err := handler.composer.Core.NewUpload(c, info)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
info, err = upload.GetInfo(ctx)
info, err = upload.GetInfo(c)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
@ -375,7 +327,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
// Add the Location header directly after creating the new resource to even
// include it in cases of failure when an error is returned
url := handler.absFileURL(r, id)
w.Header().Set("Location", url)
resp.Headers["Location"] = url
handler.Metrics.incUploadsCreated()
handler.log("UploadCreated", "id", id, "size", i64toa(size), "url", url)
@ -386,8 +338,8 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
if isFinal {
concatableUpload := handler.composer.Concater.AsConcatableUpload(upload)
if err := concatableUpload.ConcatUploads(ctx, partialUploads); err != nil {
handler.sendError(w, r, err)
if err := concatableUpload.ConcatUploads(c, partialUploads); err != nil {
handler.sendError(c, err)
return
}
info.Offset = size
@ -399,67 +351,74 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
if containsChunk {
if handler.composer.UsesLocker {
lock, err := handler.lockUpload(id)
lock, err := handler.lockUpload(c, id)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
defer lock.Unlock()
}
if err := handler.writeChunk(ctx, upload, info, w, r); err != nil {
handler.sendError(w, r, err)
resp, err = handler.writeChunk(c, resp, upload, info)
if err != nil {
handler.sendError(c, err)
return
}
} else if !sizeIsDeferred && size == 0 {
// Directly finish the upload if the upload is empty (i.e. has a size of 0).
// This statement is in an else-if block to avoid causing duplicate calls
// to finishUploadIfComplete if an upload is empty and contains a chunk.
if err := handler.finishUploadIfComplete(ctx, upload, info, r); err != nil {
handler.sendError(w, r, err)
resp, err = handler.finishUploadIfComplete(c, resp, upload, info)
if err != nil {
handler.sendError(c, err)
return
}
}
handler.sendResp(w, r, http.StatusCreated)
handler.sendResp(c, resp)
}
// HeadFile returns the length and offset for the HEAD request
func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
c := newContext(w, r)
id, err := extractIDFromPath(r.URL.Path)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
if handler.composer.UsesLocker {
lock, err := handler.lockUpload(id)
lock, err := handler.lockUpload(c, id)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
defer lock.Unlock()
}
upload, err := handler.composer.Core.GetUpload(ctx, id)
upload, err := handler.composer.Core.GetUpload(c, id)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
info, err := upload.GetInfo(ctx)
info, err := upload.GetInfo(c)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
resp := HTTPResponse{
StatusCode: http.StatusOK,
Headers: make(HTTPHeaders),
}
// Add Upload-Concat header if possible
if info.IsPartial {
w.Header().Set("Upload-Concat", "partial")
resp.Headers["Upload-Concat"] = "partial"
}
if info.IsFinal {
@ -470,107 +429,112 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
// Remove trailing space
v = v[:len(v)-1]
w.Header().Set("Upload-Concat", v)
resp.Headers["Upload-Concat"] = v
}
if len(info.MetaData) != 0 {
w.Header().Set("Upload-Metadata", SerializeMetadataHeader(info.MetaData))
resp.Headers["Upload-Metadata"] = SerializeMetadataHeader(info.MetaData)
}
if info.SizeIsDeferred {
w.Header().Set("Upload-Defer-Length", UploadLengthDeferred)
resp.Headers["Upload-Defer-Length"] = UploadLengthDeferred
} else {
w.Header().Set("Upload-Length", strconv.FormatInt(info.Size, 10))
w.Header().Set("Content-Length", strconv.FormatInt(info.Size, 10))
resp.Headers["Upload-Length"] = strconv.FormatInt(info.Size, 10)
resp.Headers["Content-Length"] = strconv.FormatInt(info.Size, 10)
}
w.Header().Set("Cache-Control", "no-store")
w.Header().Set("Upload-Offset", strconv.FormatInt(info.Offset, 10))
handler.sendResp(w, r, http.StatusOK)
resp.Headers["Cache-Control"] = "no-store"
resp.Headers["Upload-Offset"] = strconv.FormatInt(info.Offset, 10)
handler.sendResp(c, resp)
}
// PatchFile adds a chunk to an upload. This operation is only allowed
// if enough space in the upload is left.
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
c := newContext(w, r)
// Check for presence of application/offset+octet-stream
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
handler.sendError(w, r, ErrInvalidContentType)
handler.sendError(c, ErrInvalidContentType)
return
}
// Check for presence of a valid Upload-Offset Header
offset, err := strconv.ParseInt(r.Header.Get("Upload-Offset"), 10, 64)
if err != nil || offset < 0 {
handler.sendError(w, r, ErrInvalidOffset)
handler.sendError(c, ErrInvalidOffset)
return
}
id, err := extractIDFromPath(r.URL.Path)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
if handler.composer.UsesLocker {
lock, err := handler.lockUpload(id)
lock, err := handler.lockUpload(c, id)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
defer lock.Unlock()
}
upload, err := handler.composer.Core.GetUpload(ctx, id)
upload, err := handler.composer.Core.GetUpload(c, id)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
info, err := upload.GetInfo(ctx)
info, err := upload.GetInfo(c)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
// Modifying a final upload is not allowed
if info.IsFinal {
handler.sendError(w, r, ErrModifyFinal)
handler.sendError(c, ErrModifyFinal)
return
}
if offset != info.Offset {
handler.sendError(w, r, ErrMismatchOffset)
handler.sendError(c, ErrMismatchOffset)
return
}
resp := HTTPResponse{
StatusCode: http.StatusNoContent,
Headers: make(HTTPHeaders, 1), // Initialize map, so writeChunk can set the Upload-Offset header.
}
// Do not proxy the call to the data store if the upload is already completed
if !info.SizeIsDeferred && info.Offset == info.Size {
w.Header().Set("Upload-Offset", strconv.FormatInt(offset, 10))
handler.sendResp(w, r, http.StatusNoContent)
resp.Headers["Upload-Offset"] = strconv.FormatInt(offset, 10)
handler.sendResp(c, resp)
return
}
if r.Header.Get("Upload-Length") != "" {
if !handler.composer.UsesLengthDeferrer {
handler.sendError(w, r, ErrNotImplemented)
handler.sendError(c, ErrNotImplemented)
return
}
if !info.SizeIsDeferred {
handler.sendError(w, r, ErrInvalidUploadLength)
handler.sendError(c, ErrInvalidUploadLength)
return
}
uploadLength, err := strconv.ParseInt(r.Header.Get("Upload-Length"), 10, 64)
if err != nil || uploadLength < 0 || uploadLength < info.Offset || (handler.config.MaxSize > 0 && uploadLength > handler.config.MaxSize) {
handler.sendError(w, r, ErrInvalidUploadLength)
handler.sendError(c, ErrInvalidUploadLength)
return
}
lengthDeclarableUpload := handler.composer.LengthDeferrer.AsLengthDeclarableUpload(upload)
if err := lengthDeclarableUpload.DeclareLength(ctx, uploadLength); err != nil {
handler.sendError(w, r, err)
if err := lengthDeclarableUpload.DeclareLength(c, uploadLength); err != nil {
handler.sendError(c, err)
return
}
@ -578,26 +542,28 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
info.SizeIsDeferred = false
}
if err := handler.writeChunk(ctx, upload, info, w, r); err != nil {
handler.sendError(w, r, err)
resp, err = handler.writeChunk(c, resp, upload, info)
if err != nil {
handler.sendError(c, err)
return
}
handler.sendResp(w, r, http.StatusNoContent)
handler.sendResp(c, resp)
}
// writeChunk reads the body from the requests r and appends it to the upload
// with the corresponding id. Afterwards, it will set the necessary response
// headers but will not send the response.
func (handler *UnroutedHandler) writeChunk(ctx context.Context, upload Upload, info FileInfo, w http.ResponseWriter, r *http.Request) error {
func (handler *UnroutedHandler) writeChunk(c *httpContext, resp HTTPResponse, upload Upload, info FileInfo) (HTTPResponse, error) {
// Get Content-Length if possible
r := c.req
length := r.ContentLength
offset := info.Offset
id := info.ID
// Test if this upload fits into the file's size
if !info.SizeIsDeferred && offset+length > info.Size {
return ErrSizeExceeded
return resp, ErrSizeExceeded
}
maxSize := info.Size - offset
@ -625,7 +591,7 @@ func (handler *UnroutedHandler) writeChunk(ctx context.Context, upload Upload, i
// available in the case of a malicious request.
if r.Body != nil {
// Limit the data read from the request's body to the allowed maximum
reader := newBodyReader(io.LimitReader(r.Body, maxSize))
c.body = newBodyReader(r.Body, maxSize)
// We use a context object to allow the hook system to cancel an upload
uploadCtx, stopUpload := context.WithCancel(context.Background())
@ -640,18 +606,19 @@ func (handler *UnroutedHandler) writeChunk(ctx context.Context, upload Upload, i
go func() {
// Interrupt the Read() call from the request body
<-uploadCtx.Done()
// TODO: Consider using CloseWithError function from BodyReader
terminateUpload = true
r.Body.Close()
}()
if handler.config.NotifyUploadProgress {
stopProgressEvents := handler.sendProgressMessages(newHookEvent(info, r), reader)
stopProgressEvents := handler.sendProgressMessages(newHookEvent(info, r), c.body)
defer close(stopProgressEvents)
}
bytesWritten, err = upload.WriteChunk(ctx, offset, reader)
bytesWritten, err = upload.WriteChunk(c, offset, c.body)
if terminateUpload && handler.composer.UsesTerminater {
if terminateErr := handler.terminateUpload(ctx, upload, info, r); terminateErr != nil {
if terminateErr := handler.terminateUpload(c, upload, info); terminateErr != nil {
// We only log this error and not show it to the user since this
// termination error is not relevant to the uploading client
handler.log("UploadStopTerminateError", "id", id, "error", terminateErr.Error())
@ -660,7 +627,7 @@ func (handler *UnroutedHandler) writeChunk(ctx context.Context, upload Upload, i
// If we encountered an error while reading the body from the HTTP request, log it, but only include
// it in the response, if the store did not also return an error.
if bodyErr := reader.hasError(); bodyErr != nil {
if bodyErr := c.body.hasError(); bodyErr != nil {
handler.log("BodyReadError", "id", id, "error", bodyErr.Error())
if err == nil {
err = bodyErr
@ -677,34 +644,38 @@ func (handler *UnroutedHandler) writeChunk(ctx context.Context, upload Upload, i
handler.log("ChunkWriteComplete", "id", id, "bytesWritten", i64toa(bytesWritten))
if err != nil {
return err
return resp, err
}
// Send new offset to client
newOffset := offset + bytesWritten
w.Header().Set("Upload-Offset", strconv.FormatInt(newOffset, 10))
resp.Headers["Upload-Offset"] = strconv.FormatInt(newOffset, 10)
handler.Metrics.incBytesReceived(uint64(bytesWritten))
info.Offset = newOffset
return handler.finishUploadIfComplete(ctx, upload, info, r)
return handler.finishUploadIfComplete(c, resp, upload, info)
}
// finishUploadIfComplete checks whether an upload is completed (i.e. upload offset
// matches upload size) and if so, it will call the data store's FinishUpload
// function and send the necessary message on the CompleteUpload channel.
func (handler *UnroutedHandler) finishUploadIfComplete(ctx context.Context, upload Upload, info FileInfo, r *http.Request) error {
func (handler *UnroutedHandler) finishUploadIfComplete(c *httpContext, resp HTTPResponse, upload Upload, info FileInfo) (HTTPResponse, error) {
r := c.req
// If the upload is completed, ...
if !info.SizeIsDeferred && info.Offset == info.Size {
// ... allow the data storage to finish and cleanup the upload
if err := upload.FinishUpload(ctx); err != nil {
return err
if err := upload.FinishUpload(c); err != nil {
return resp, err
}
// ... allow the hook callback to run before sending the response
if handler.config.PreFinishResponseCallback != nil {
if err := handler.config.PreFinishResponseCallback(newHookEvent(info, r)); err != nil {
return err
resp2, err := handler.config.PreFinishResponseCallback(newHookEvent(info, r))
if err != nil {
return resp, err
}
resp = resp.MergeWith(resp2)
}
handler.Metrics.incUploadsFinished()
@ -715,68 +686,70 @@ func (handler *UnroutedHandler) finishUploadIfComplete(ctx context.Context, uplo
}
}
return nil
return resp, nil
}
// GetFile handles requests to download a file using a GET request. This is not
// part of the specification.
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
c := newContext(w, r)
id, err := extractIDFromPath(r.URL.Path)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
if handler.composer.UsesLocker {
lock, err := handler.lockUpload(id)
lock, err := handler.lockUpload(c, id)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
defer lock.Unlock()
}
upload, err := handler.composer.Core.GetUpload(ctx, id)
upload, err := handler.composer.Core.GetUpload(c, id)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
info, err := upload.GetInfo(ctx)
info, err := upload.GetInfo(c)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
// Set headers before sending responses
w.Header().Set("Content-Length", strconv.FormatInt(info.Offset, 10))
contentType, contentDisposition := filterContentType(info)
w.Header().Set("Content-Type", contentType)
w.Header().Set("Content-Disposition", contentDisposition)
resp := HTTPResponse{
StatusCode: http.StatusOK,
Headers: HTTPHeaders{
"Content-Length": strconv.FormatInt(info.Offset, 10),
"Content-Type": contentType,
"Content-Disposition": contentDisposition,
},
Body: "", // Body is intentionally left empty, and we copy it manually in later.
}
// If no data has been uploaded yet, respond with an empty "204 No Content" status.
if info.Offset == 0 {
handler.sendResp(w, r, http.StatusNoContent)
resp.StatusCode = http.StatusNoContent
handler.sendResp(c, resp)
return
}
src, err := upload.GetReader(ctx)
src, err := upload.GetReader(c)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
handler.sendResp(w, r, http.StatusOK)
handler.sendResp(c, resp)
io.Copy(w, src)
// Try to close the reader if the io.Closer interface is implemented
if closer, ok := src.(io.Closer); ok {
closer.Close()
}
src.Close()
}
// mimeInlineBrowserWhitelist is a map containing MIME types which should be
@ -842,52 +815,54 @@ func filterContentType(info FileInfo) (contentType string, contentDisposition st
// DelFile terminates an upload permanently.
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
c := newContext(w, r)
// Abort the request handling if the required interface is not implemented
if !handler.composer.UsesTerminater {
handler.sendError(w, r, ErrNotImplemented)
handler.sendError(c, ErrNotImplemented)
return
}
id, err := extractIDFromPath(r.URL.Path)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
if handler.composer.UsesLocker {
lock, err := handler.lockUpload(id)
lock, err := handler.lockUpload(c, id)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
defer lock.Unlock()
}
upload, err := handler.composer.Core.GetUpload(ctx, id)
upload, err := handler.composer.Core.GetUpload(c, id)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
var info FileInfo
if handler.config.NotifyTerminatedUploads {
info, err = upload.GetInfo(ctx)
info, err = upload.GetInfo(c)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
}
err = handler.terminateUpload(ctx, upload, info, r)
err = handler.terminateUpload(c, upload, info)
if err != nil {
handler.sendError(w, r, err)
handler.sendError(c, err)
return
}
handler.sendResp(w, r, http.StatusNoContent)
handler.sendResp(c, HTTPResponse{
StatusCode: http.StatusNoContent,
})
}
// terminateUpload passes a given upload to the DataStore's Terminater,
@ -895,16 +870,16 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
// and updates the statistics.
// Note the the info argument is only needed if the terminated uploads
// notifications are enabled.
func (handler *UnroutedHandler) terminateUpload(ctx context.Context, upload Upload, info FileInfo, r *http.Request) error {
func (handler *UnroutedHandler) terminateUpload(c *httpContext, upload Upload, info FileInfo) error {
terminatableUpload := handler.composer.Terminater.AsTerminatableUpload(upload)
err := terminatableUpload.Terminate(ctx)
err := terminatableUpload.Terminate(c)
if err != nil {
return err
}
if handler.config.NotifyTerminatedUploads {
handler.TerminatedUploads <- newHookEvent(info, r)
handler.TerminatedUploads <- newHookEvent(info, c.req)
}
handler.Metrics.incUploadsTerminated()
@ -914,20 +889,20 @@ func (handler *UnroutedHandler) terminateUpload(ctx context.Context, upload Uplo
// Send the error in the response body. The status code will be looked up in
// ErrStatusCodes. If none is found 500 Internal Error will be used.
func (handler *UnroutedHandler) sendError(w http.ResponseWriter, r *http.Request, err error) {
func (handler *UnroutedHandler) sendError(c *httpContext, err error) {
// Errors for read timeouts contain too much information which is not
// necessary for us and makes grouping for the metrics harder. The error
// message looks like: read tcp 127.0.0.1:1080->127.0.0.1:53673: i/o timeout
// Therefore, we use a common error message for all of them.
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
err = errReadTimeout
err = ErrReadTimeout
}
// Errors for connnection resets also contain TCP details, we don't need, e.g:
// read tcp 127.0.0.1:1080->127.0.0.1:10023: read: connection reset by peer
// Therefore, we also trim those down.
if strings.HasSuffix(err.Error(), "read: connection reset by peer") {
err = errConnectionReset
err = ErrConnectionReset
}
// TODO: Decide if we should handle this in here, in body_reader or not at all.
@ -949,31 +924,29 @@ func (handler *UnroutedHandler) sendError(w http.ResponseWriter, r *http.Request
// err = nil
//}
statusErr, ok := err.(HTTPError)
r := c.req
detailedErr, ok := err.(Error)
if !ok {
statusErr = NewHTTPError(err, http.StatusInternalServerError)
handler.log("InternalServerError", "message", err.Error(), "method", r.Method, "path", r.URL.Path, "requestId", getRequestId(r))
detailedErr = NewError("ERR_INTERNAL_SERVER_ERROR", err.Error(), http.StatusInternalServerError)
}
reason := append(statusErr.Body(), '\n')
// If we are sending the response for a HEAD request, ensure that we are not including
// any response body.
if r.Method == "HEAD" {
reason = nil
detailedErr.HTTPResponse.Body = ""
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("Content-Length", strconv.Itoa(len(reason)))
w.WriteHeader(statusErr.StatusCode())
w.Write(reason)
handler.log("ResponseOutgoing", "status", strconv.Itoa(statusErr.StatusCode()), "method", r.Method, "path", r.URL.Path, "error", err.Error(), "requestId", getRequestId(r))
handler.Metrics.incErrorsTotal(statusErr)
handler.sendResp(c, detailedErr.HTTPResponse)
handler.Metrics.incErrorsTotal(detailedErr)
}
// sendResp writes the header to w with the specified status code.
func (handler *UnroutedHandler) sendResp(w http.ResponseWriter, r *http.Request, status int) {
w.WriteHeader(status)
func (handler *UnroutedHandler) sendResp(c *httpContext, resp HTTPResponse) {
resp.writeTo(c.res)
handler.log("ResponseOutgoing", "status", strconv.Itoa(status), "method", r.Method, "path", r.URL.Path, "requestId", getRequestId(r))
handler.log("ResponseOutgoing", "status", strconv.Itoa(resp.StatusCode), "method", c.req.Method, "path", c.req.URL.Path, "requestId", getRequestId(c.req), "body", resp.Body)
}
// Make an absolute URLs to the given upload id. If the base path is absolute
@ -1010,7 +983,7 @@ func (handler *UnroutedHandler) sendProgressMessages(hook HookEvent, reader *bod
previousOffset = hook.Upload.Offset
}
return
case <-time.After(1 * time.Second):
case <-time.After(handler.config.UploadProgressInterval):
hook.Upload.Offset = originalOffset + reader.bytesRead()
if hook.Upload.Offset != previousOffset {
handler.UploadProgress <- hook
@ -1117,13 +1090,23 @@ func (handler *UnroutedHandler) validateNewUploadLengthHeaders(uploadLengthHeade
// lockUpload creates a new lock for the given upload ID and attempts to lock it.
// The created lock is returned if it was aquired successfully.
func (handler *UnroutedHandler) lockUpload(id string) (Lock, error) {
func (handler *UnroutedHandler) lockUpload(c *httpContext, id string) (Lock, error) {
lock, err := handler.composer.Locker.NewLock(id)
if err != nil {
return nil, err
}
if err := lock.Lock(); err != nil {
// TODO: Make lock timeout configurable
ctx, cancelContext := context.WithTimeout(context.Background(), 3*time.Second)
defer cancelContext()
releaseLock := func() {
if c.body != nil {
handler.log("UploadInterrupted", "id", id, "requestId", getRequestId(c.req))
c.body.closeWithError(ErrUploadInterrupted)
}
}
if err := lock.Lock(ctx, releaseLock); err != nil {
return nil, err
}

View File

@ -13,7 +13,7 @@ import (
"github.com/tus/tusd/pkg/handler"
)
//go:generate mockgen -package handler_test -source utils_test.go -aux_files handler=datastore.go -destination=handler_mock_test.go
//go:generate mockgen -package handler_test -source utils_test.go -destination=handler_mock_test.go
// FullDataStore is an interface combining most interfaces for data stores.
// This is used by mockgen(1) to generate a mocked data store used for testing

View File

@ -1,5 +1,6 @@
// Package memorylocker provides an in-memory locking mechanism.
//
// TODO: Update comment
// When multiple processes are attempting to access an upload, whether it be
// by reading or writing, a synchronization mechanism is required to prevent
// data corruption, especially to ensure correct offset values and the proper
@ -11,6 +12,7 @@
package memorylocker
import (
"context"
"sync"
"github.com/tus/tusd/pkg/handler"
@ -20,14 +22,19 @@ import (
// cheap mechanism. Locks will only exist as long as this object is kept in
// reference and will be erased if the program exits.
type MemoryLocker struct {
locks map[string]struct{}
mutex sync.Mutex
locks map[string]lockEntry
mutex sync.RWMutex
}
type lockEntry struct {
lockReleased chan struct{}
requestRelease func()
}
// New creates a new in-memory locker.
func New() *MemoryLocker {
return &MemoryLocker{
locks: make(map[string]struct{}),
locks: make(map[string]lockEntry),
}
}
@ -46,16 +53,40 @@ type memoryLock struct {
}
// Lock tries to obtain the exclusive lock.
func (lock memoryLock) Lock() error {
lock.locker.mutex.Lock()
defer lock.locker.mutex.Unlock()
func (lock memoryLock) Lock(ctx context.Context, requestRelease func()) error {
lock.locker.mutex.RLock()
entry, ok := lock.locker.locks[lock.id]
lock.locker.mutex.RUnlock()
// Ensure file is not locked
if _, ok := lock.locker.locks[lock.id]; ok {
return handler.ErrFileLocked
requestRelease:
if ok {
// TODO: Make this channel?
// TODO: Should we ensure this is only called once?
entry.requestRelease()
select {
case <-ctx.Done():
return handler.ErrLockTimeout
case <-entry.lockReleased:
}
}
lock.locker.locks[lock.id] = struct{}{}
lock.locker.mutex.Lock()
// Check that the lock has not already been created in the meantime
entry, ok = lock.locker.locks[lock.id]
if ok {
// Lock has been created in the meantime, so we must wait again until it is free
lock.locker.mutex.Unlock()
goto requestRelease
}
// No lock exists, so we can create it
entry = lockEntry{
lockReleased: make(chan struct{}),
requestRelease: requestRelease,
}
lock.locker.locks[lock.id] = entry
lock.locker.mutex.Unlock()
return nil
}
@ -64,10 +95,14 @@ func (lock memoryLock) Lock() error {
func (lock memoryLock) Unlock() error {
lock.locker.mutex.Lock()
// Deleting a non-existing key does not end in unexpected errors or panic
// since this operation results in a no-op
lockReleased := lock.locker.locks[lock.id].lockReleased
// Delete the lock entry entirely
delete(lock.locker.locks, lock.id)
lock.locker.mutex.Unlock()
close(lockReleased)
return nil
}

View File

@ -1,16 +1,17 @@
package memorylocker
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/tus/tusd/pkg/handler"
)
var _ handler.Locker = &MemoryLocker{}
func TestMemoryLocker(t *testing.T) {
func TestMemoryLocker_LockAndUnlock(t *testing.T) {
a := assert.New(t)
locker := New()
@ -18,13 +19,62 @@ func TestMemoryLocker(t *testing.T) {
lock1, err := locker.NewLock("one")
a.NoError(err)
a.NoError(lock1.Lock())
a.Equal(handler.ErrFileLocked, lock1.Lock())
a.NoError(lock1.Lock(context.Background(), func() {
panic("must not be called")
}))
a.NoError(lock1.Unlock())
}
func TestMemoryLocker_Timeout(t *testing.T) {
a := assert.New(t)
locker := New()
releaseRequestCalled := false
lock1, err := locker.NewLock("one")
a.NoError(err)
a.NoError(lock1.Lock(context.Background(), func() {
releaseRequestCalled = true
// We note that the function has been called, but do not
// release the lock
}))
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
defer cancel()
lock2, err := locker.NewLock("one")
a.NoError(err)
a.Equal(handler.ErrFileLocked, lock2.Lock())
err = lock2.Lock(ctx, func() {
panic("must not be called")
})
a.NoError(lock1.Unlock())
a.NoError(lock1.Unlock())
a.Equal(err, handler.ErrLockTimeout)
a.True(releaseRequestCalled)
}
func TestMemoryLocker_RequestUnlock(t *testing.T) {
a := assert.New(t)
locker := New()
releaseRequestCalled := false
lock1, err := locker.NewLock("one")
a.NoError(err)
a.NoError(lock1.Lock(context.Background(), func() {
releaseRequestCalled = true
<-time.After(10 * time.Millisecond)
a.NoError(lock1.Unlock())
}))
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
lock2, err := locker.NewLock("one")
a.NoError(err)
a.NoError(lock2.Lock(ctx, func() {
panic("must not be called")
}))
a.NoError(lock2.Unlock())
a.True(releaseRequestCalled)
}

View File

@ -25,7 +25,7 @@ var (
errorsTotalDesc = prometheus.NewDesc(
"tusd_errors_total",
"Total number of errors per status.",
[]string{"status", "message"}, nil)
[]string{"status", "code"}, nil)
bytesReceivedDesc = prometheus.NewDesc(
"tusd_bytes_received",
"Number of bytes received for uploads.",
@ -79,8 +79,8 @@ func (c Collector) Collect(metrics chan<- prometheus.Metric) {
errorsTotalDesc,
prometheus.CounterValue,
float64(atomic.LoadUint64(valuePtr)),
strconv.Itoa(httpError.StatusCode()),
httpError.Error(),
strconv.Itoa(httpError.StatusCode),
httpError.ErrorCode,
)
}

View File

@ -1,475 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: v1/hook.proto
package v1
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
any "github.com/golang/protobuf/ptypes/any"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Uploaded data
type Upload struct {
// Unique integer identifier of the uploaded file
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// Total file size in bytes specified in the NewUpload call
Size int64 `protobuf:"varint,2,opt,name=Size,proto3" json:"Size,omitempty"`
// Indicates whether the total file size is deferred until later
SizeIsDeferred bool `protobuf:"varint,3,opt,name=SizeIsDeferred,proto3" json:"SizeIsDeferred,omitempty"`
// Offset in bytes (zero-based)
Offset int64 `protobuf:"varint,4,opt,name=Offset,proto3" json:"Offset,omitempty"`
MetaData map[string]string `protobuf:"bytes,5,rep,name=metaData,proto3" json:"metaData,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Indicates that this is a partial upload which will later be used to form
// a final upload by concatenation. Partial uploads should not be processed
// when they are finished since they are only incomplete chunks of files.
IsPartial bool `protobuf:"varint,6,opt,name=isPartial,proto3" json:"isPartial,omitempty"`
// Indicates that this is a final upload
IsFinal bool `protobuf:"varint,7,opt,name=isFinal,proto3" json:"isFinal,omitempty"`
// If the upload is a final one (see IsFinal) this will be a non-empty
// ordered slice containing the ids of the uploads of which the final upload
// will consist after concatenation.
PartialUploads []string `protobuf:"bytes,8,rep,name=partialUploads,proto3" json:"partialUploads,omitempty"`
// Storage contains information about where the data storage saves the upload,
// for example a file path. The available values vary depending on what data
// store is used. This map may also be nil.
Storage map[string]string `protobuf:"bytes,9,rep,name=storage,proto3" json:"storage,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Upload) Reset() { *m = Upload{} }
func (m *Upload) String() string { return proto.CompactTextString(m) }
func (*Upload) ProtoMessage() {}
func (*Upload) Descriptor() ([]byte, []int) {
return fileDescriptor_581082325ef044c1, []int{0}
}
func (m *Upload) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Upload.Unmarshal(m, b)
}
func (m *Upload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Upload.Marshal(b, m, deterministic)
}
func (m *Upload) XXX_Merge(src proto.Message) {
xxx_messageInfo_Upload.Merge(m, src)
}
func (m *Upload) XXX_Size() int {
return xxx_messageInfo_Upload.Size(m)
}
func (m *Upload) XXX_DiscardUnknown() {
xxx_messageInfo_Upload.DiscardUnknown(m)
}
var xxx_messageInfo_Upload proto.InternalMessageInfo
func (m *Upload) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Upload) GetSize() int64 {
if m != nil {
return m.Size
}
return 0
}
func (m *Upload) GetSizeIsDeferred() bool {
if m != nil {
return m.SizeIsDeferred
}
return false
}
func (m *Upload) GetOffset() int64 {
if m != nil {
return m.Offset
}
return 0
}
func (m *Upload) GetMetaData() map[string]string {
if m != nil {
return m.MetaData
}
return nil
}
func (m *Upload) GetIsPartial() bool {
if m != nil {
return m.IsPartial
}
return false
}
func (m *Upload) GetIsFinal() bool {
if m != nil {
return m.IsFinal
}
return false
}
func (m *Upload) GetPartialUploads() []string {
if m != nil {
return m.PartialUploads
}
return nil
}
func (m *Upload) GetStorage() map[string]string {
if m != nil {
return m.Storage
}
return nil
}
type HTTPRequest struct {
// Method is the HTTP method, e.g. POST or PATCH
Method string `protobuf:"bytes,1,opt,name=method,proto3" json:"method,omitempty"`
// URI is the full HTTP request URI, e.g. /files/fooo
Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"`
// RemoteAddr contains the network address that sent the request
RemoteAddr string `protobuf:"bytes,3,opt,name=remoteAddr,proto3" json:"remoteAddr,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HTTPRequest) Reset() { *m = HTTPRequest{} }
func (m *HTTPRequest) String() string { return proto.CompactTextString(m) }
func (*HTTPRequest) ProtoMessage() {}
func (*HTTPRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_581082325ef044c1, []int{1}
}
func (m *HTTPRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HTTPRequest.Unmarshal(m, b)
}
func (m *HTTPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HTTPRequest.Marshal(b, m, deterministic)
}
func (m *HTTPRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_HTTPRequest.Merge(m, src)
}
func (m *HTTPRequest) XXX_Size() int {
return xxx_messageInfo_HTTPRequest.Size(m)
}
func (m *HTTPRequest) XXX_DiscardUnknown() {
xxx_messageInfo_HTTPRequest.DiscardUnknown(m)
}
var xxx_messageInfo_HTTPRequest proto.InternalMessageInfo
func (m *HTTPRequest) GetMethod() string {
if m != nil {
return m.Method
}
return ""
}
func (m *HTTPRequest) GetUri() string {
if m != nil {
return m.Uri
}
return ""
}
func (m *HTTPRequest) GetRemoteAddr() string {
if m != nil {
return m.RemoteAddr
}
return ""
}
// Hook's data
type Hook struct {
// Upload contains information about the upload that caused this hook
// to be fired.
Upload *Upload `protobuf:"bytes,1,opt,name=upload,proto3" json:"upload,omitempty"`
// HTTPRequest contains details about the HTTP request that reached
// tusd.
HttpRequest *HTTPRequest `protobuf:"bytes,2,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"`
// The hook name
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Hook) Reset() { *m = Hook{} }
func (m *Hook) String() string { return proto.CompactTextString(m) }
func (*Hook) ProtoMessage() {}
func (*Hook) Descriptor() ([]byte, []int) {
return fileDescriptor_581082325ef044c1, []int{2}
}
func (m *Hook) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Hook.Unmarshal(m, b)
}
func (m *Hook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Hook.Marshal(b, m, deterministic)
}
func (m *Hook) XXX_Merge(src proto.Message) {
xxx_messageInfo_Hook.Merge(m, src)
}
func (m *Hook) XXX_Size() int {
return xxx_messageInfo_Hook.Size(m)
}
func (m *Hook) XXX_DiscardUnknown() {
xxx_messageInfo_Hook.DiscardUnknown(m)
}
var xxx_messageInfo_Hook proto.InternalMessageInfo
func (m *Hook) GetUpload() *Upload {
if m != nil {
return m.Upload
}
return nil
}
func (m *Hook) GetHttpRequest() *HTTPRequest {
if m != nil {
return m.HttpRequest
}
return nil
}
func (m *Hook) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// Request data to send hook
type SendRequest struct {
// The hook data
Hook *Hook `protobuf:"bytes,1,opt,name=hook,proto3" json:"hook,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SendRequest) Reset() { *m = SendRequest{} }
func (m *SendRequest) String() string { return proto.CompactTextString(m) }
func (*SendRequest) ProtoMessage() {}
func (*SendRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_581082325ef044c1, []int{3}
}
func (m *SendRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SendRequest.Unmarshal(m, b)
}
func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic)
}
func (m *SendRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SendRequest.Merge(m, src)
}
func (m *SendRequest) XXX_Size() int {
return xxx_messageInfo_SendRequest.Size(m)
}
func (m *SendRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SendRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SendRequest proto.InternalMessageInfo
func (m *SendRequest) GetHook() *Hook {
if m != nil {
return m.Hook
}
return nil
}
// Response that contains data for sended hook
type SendResponse struct {
// The response of the hook.
Response *any.Any `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SendResponse) Reset() { *m = SendResponse{} }
func (m *SendResponse) String() string { return proto.CompactTextString(m) }
func (*SendResponse) ProtoMessage() {}
func (*SendResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_581082325ef044c1, []int{4}
}
func (m *SendResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SendResponse.Unmarshal(m, b)
}
func (m *SendResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SendResponse.Marshal(b, m, deterministic)
}
func (m *SendResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SendResponse.Merge(m, src)
}
func (m *SendResponse) XXX_Size() int {
return xxx_messageInfo_SendResponse.Size(m)
}
func (m *SendResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SendResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SendResponse proto.InternalMessageInfo
func (m *SendResponse) GetResponse() *any.Any {
if m != nil {
return m.Response
}
return nil
}
func init() {
proto.RegisterType((*Upload)(nil), "v1.Upload")
proto.RegisterMapType((map[string]string)(nil), "v1.Upload.MetaDataEntry")
proto.RegisterMapType((map[string]string)(nil), "v1.Upload.StorageEntry")
proto.RegisterType((*HTTPRequest)(nil), "v1.HTTPRequest")
proto.RegisterType((*Hook)(nil), "v1.Hook")
proto.RegisterType((*SendRequest)(nil), "v1.SendRequest")
proto.RegisterType((*SendResponse)(nil), "v1.SendResponse")
}
func init() {
proto.RegisterFile("v1/hook.proto", fileDescriptor_581082325ef044c1)
}
var fileDescriptor_581082325ef044c1 = []byte{
// 477 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x4d, 0x6f, 0xd3, 0x40,
0x10, 0x25, 0xb1, 0xeb, 0xd8, 0xe3, 0xb6, 0x54, 0xab, 0x0a, 0x96, 0xa8, 0x42, 0x96, 0x0f, 0xc8,
0x52, 0x25, 0x07, 0x07, 0x0e, 0x28, 0x5c, 0xa8, 0x54, 0x50, 0x39, 0x20, 0xaa, 0x4d, 0x11, 0xe7,
0x2d, 0xde, 0x24, 0x56, 0x1c, 0xaf, 0xbb, 0x5e, 0x5b, 0x0a, 0x3f, 0x8a, 0xdf, 0x88, 0xf6, 0xc3,
0x8d, 0xe9, 0x8d, 0x93, 0x67, 0xde, 0xbc, 0x79, 0xf3, 0x3c, 0x3b, 0x70, 0xd2, 0x65, 0xb3, 0x0d,
0xe7, 0xdb, 0xb4, 0x16, 0x5c, 0x72, 0x34, 0xee, 0xb2, 0xe9, 0xab, 0x35, 0xe7, 0xeb, 0x92, 0xcd,
0x34, 0x72, 0xdf, 0xae, 0x66, 0xb4, 0xda, 0x9b, 0x72, 0xfc, 0xc7, 0x01, 0xef, 0x47, 0x5d, 0x72,
0x9a, 0xa3, 0x53, 0x18, 0x17, 0x39, 0x1e, 0x45, 0xa3, 0x24, 0x20, 0xe3, 0x22, 0x47, 0x08, 0xdc,
0x65, 0xf1, 0x9b, 0xe1, 0x71, 0x34, 0x4a, 0x1c, 0xa2, 0x63, 0xf4, 0x06, 0x4e, 0xd5, 0xf7, 0x6b,
0x73, 0xcd, 0x56, 0x4c, 0x08, 0x96, 0x63, 0x27, 0x1a, 0x25, 0x3e, 0x79, 0x82, 0xa2, 0x17, 0xe0,
0x7d, 0x5f, 0xad, 0x1a, 0x26, 0xb1, 0xab, 0xbb, 0x6d, 0x86, 0xde, 0x83, 0xbf, 0x63, 0x92, 0x5e,
0x53, 0x49, 0xf1, 0x51, 0xe4, 0x24, 0xe1, 0x1c, 0xa7, 0x5d, 0x96, 0x1a, 0x07, 0xe9, 0x37, 0x5b,
0xfa, 0x5c, 0x49, 0xb1, 0x27, 0x8f, 0x4c, 0x74, 0x01, 0x41, 0xd1, 0xdc, 0x52, 0x21, 0x0b, 0x5a,
0x62, 0x4f, 0x0f, 0x3c, 0x00, 0x08, 0xc3, 0xa4, 0x68, 0xbe, 0x14, 0x15, 0x2d, 0xf1, 0x44, 0xd7,
0xfa, 0x54, 0xb9, 0xad, 0x0d, 0xc9, 0x0c, 0x68, 0xb0, 0x1f, 0x39, 0x49, 0x40, 0x9e, 0xa0, 0x28,
0x83, 0x49, 0x23, 0xb9, 0xa0, 0x6b, 0x86, 0x03, 0x6d, 0xea, 0xe5, 0xc0, 0xd4, 0xd2, 0x54, 0x8c,
0xa7, 0x9e, 0x37, 0xfd, 0x08, 0x27, 0xff, 0xb8, 0x45, 0x67, 0xe0, 0x6c, 0xd9, 0xde, 0xae, 0x4f,
0x85, 0xe8, 0x1c, 0x8e, 0x3a, 0x5a, 0xb6, 0x66, 0x81, 0x01, 0x31, 0xc9, 0x62, 0xfc, 0x61, 0x34,
0x5d, 0xc0, 0xf1, 0x50, 0xf5, 0x7f, 0x7a, 0xe3, 0x9f, 0x10, 0xde, 0xdc, 0xdd, 0xdd, 0x12, 0xf6,
0xd0, 0xb2, 0x46, 0xaa, 0x45, 0xef, 0x98, 0xdc, 0xf0, 0xfe, 0xe1, 0x6c, 0xa6, 0x24, 0x5b, 0x51,
0xd8, 0x76, 0x15, 0xa2, 0xd7, 0x00, 0x82, 0xed, 0xb8, 0x64, 0x57, 0x79, 0x2e, 0xf4, 0xb3, 0x05,
0x64, 0x80, 0xc4, 0x0f, 0xe0, 0xde, 0x70, 0xbe, 0x45, 0x31, 0x78, 0xad, 0xfe, 0x73, 0xad, 0x18,
0xce, 0xe1, 0xb0, 0x0b, 0x62, 0x2b, 0x28, 0x83, 0x70, 0x23, 0x65, 0x6d, 0x4d, 0xe8, 0x29, 0xe1,
0xfc, 0xb9, 0x22, 0x0e, 0xbc, 0x91, 0x21, 0x47, 0x5d, 0x53, 0x45, 0x77, 0xcc, 0x0e, 0xd6, 0x71,
0x7c, 0x09, 0xe1, 0x92, 0x55, 0x79, 0x4f, 0xb9, 0x00, 0x57, 0x1d, 0xae, 0x9d, 0xeb, 0x6b, 0x39,
0xce, 0xb7, 0x44, 0xa3, 0xf1, 0x27, 0x38, 0x36, 0xe4, 0xa6, 0xe6, 0x55, 0xc3, 0xd0, 0x5b, 0xf0,
0x85, 0x8d, 0x6d, 0xc7, 0x79, 0x6a, 0xee, 0x3c, 0xed, 0xef, 0x3c, 0xbd, 0xaa, 0xf6, 0xe4, 0x91,
0x35, 0x5f, 0x40, 0xa8, 0xf4, 0x96, 0x4c, 0x74, 0xc5, 0x2f, 0x86, 0x2e, 0xc1, 0x55, 0x82, 0x48,
0xfb, 0x1e, 0xf8, 0x98, 0x9e, 0x1d, 0x00, 0xd3, 0x19, 0x3f, 0xbb, 0xf7, 0xb4, 0xe6, 0xbb, 0xbf,
0x01, 0x00, 0x00, 0xff, 0xff, 0x8f, 0xd4, 0x14, 0x0d, 0x5e, 0x03, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// HookServiceClient is the client API for HookService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type HookServiceClient interface {
// Sends a hook
Send(ctx context.Context, in *SendRequest, opts ...grpc.CallOption) (*SendResponse, error)
}
type hookServiceClient struct {
cc grpc.ClientConnInterface
}
func NewHookServiceClient(cc grpc.ClientConnInterface) HookServiceClient {
return &hookServiceClient{cc}
}
func (c *hookServiceClient) Send(ctx context.Context, in *SendRequest, opts ...grpc.CallOption) (*SendResponse, error) {
out := new(SendResponse)
err := c.cc.Invoke(ctx, "/v1.HookService/Send", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// HookServiceServer is the server API for HookService service.
type HookServiceServer interface {
// Sends a hook
Send(context.Context, *SendRequest) (*SendResponse, error)
}
// UnimplementedHookServiceServer can be embedded to have forward compatible implementations.
type UnimplementedHookServiceServer struct {
}
func (*UnimplementedHookServiceServer) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Send not implemented")
}
func RegisterHookServiceServer(s *grpc.Server, srv HookServiceServer) {
s.RegisterService(&_HookService_serviceDesc, srv)
}
func _HookService_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SendRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HookServiceServer).Send(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/v1.HookService/Send",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HookServiceServer).Send(ctx, req.(*SendRequest))
}
return interceptor(ctx, in, info, handler)
}
var _HookService_serviceDesc = grpc.ServiceDesc{
ServiceName: "v1.HookService",
HandlerType: (*HookServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Send",
Handler: _HookService_Send_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "v1/hook.proto",
}

555
pkg/proto/v2/hook.pb.go Normal file
View File

@ -0,0 +1,555 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: v2/hook.proto
package v2
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Hook's data
type HookRequest struct {
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
Event *Event `protobuf:"bytes,2,opt,name=event,proto3" json:"event,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HookRequest) Reset() { *m = HookRequest{} }
func (m *HookRequest) String() string { return proto.CompactTextString(m) }
func (*HookRequest) ProtoMessage() {}
func (*HookRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_938ab51c60d4b622, []int{0}
}
func (m *HookRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HookRequest.Unmarshal(m, b)
}
func (m *HookRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HookRequest.Marshal(b, m, deterministic)
}
func (m *HookRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_HookRequest.Merge(m, src)
}
func (m *HookRequest) XXX_Size() int {
return xxx_messageInfo_HookRequest.Size(m)
}
func (m *HookRequest) XXX_DiscardUnknown() {
xxx_messageInfo_HookRequest.DiscardUnknown(m)
}
var xxx_messageInfo_HookRequest proto.InternalMessageInfo
func (m *HookRequest) GetType() string {
if m != nil {
return m.Type
}
return ""
}
func (m *HookRequest) GetEvent() *Event {
if m != nil {
return m.Event
}
return nil
}
type Event struct {
Upload *FileInfo `protobuf:"bytes,1,opt,name=upload,proto3" json:"upload,omitempty"`
HttpRequest *HTTPRequest `protobuf:"bytes,2,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Event) Reset() { *m = Event{} }
func (m *Event) String() string { return proto.CompactTextString(m) }
func (*Event) ProtoMessage() {}
func (*Event) Descriptor() ([]byte, []int) {
return fileDescriptor_938ab51c60d4b622, []int{1}
}
func (m *Event) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Event.Unmarshal(m, b)
}
func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Event.Marshal(b, m, deterministic)
}
func (m *Event) XXX_Merge(src proto.Message) {
xxx_messageInfo_Event.Merge(m, src)
}
func (m *Event) XXX_Size() int {
return xxx_messageInfo_Event.Size(m)
}
func (m *Event) XXX_DiscardUnknown() {
xxx_messageInfo_Event.DiscardUnknown(m)
}
var xxx_messageInfo_Event proto.InternalMessageInfo
func (m *Event) GetUpload() *FileInfo {
if m != nil {
return m.Upload
}
return nil
}
func (m *Event) GetHttpRequest() *HTTPRequest {
if m != nil {
return m.HttpRequest
}
return nil
}
// TODO: Keep consistent naming capitalization
// Uploaded data
type FileInfo struct {
// Unique integer identifier of the uploaded file
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// Total file size in bytes specified in the NewUpload call
Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
// Indicates whether the total file size is deferred until later
SizeIsDeferred bool `protobuf:"varint,3,opt,name=sizeIsDeferred,proto3" json:"sizeIsDeferred,omitempty"`
// Offset in bytes (zero-based)
Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"`
MetaData map[string]string `protobuf:"bytes,5,rep,name=metaData,proto3" json:"metaData,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Indicates that this is a partial upload which will later be used to form
// a final upload by concatenation. Partial uploads should not be processed
// when they are finished since they are only incomplete chunks of files.
IsPartial bool `protobuf:"varint,6,opt,name=isPartial,proto3" json:"isPartial,omitempty"`
// Indicates that this is a final upload
IsFinal bool `protobuf:"varint,7,opt,name=isFinal,proto3" json:"isFinal,omitempty"`
// If the upload is a final one (see IsFinal) this will be a non-empty
// ordered slice containing the ids of the uploads of which the final upload
// will consist after concatenation.
PartialUploads []string `protobuf:"bytes,8,rep,name=partialUploads,proto3" json:"partialUploads,omitempty"`
// Storage contains information about where the data storage saves the upload,
// for example a file path. The available values vary depending on what data
// store is used. This map may also be nil.
Storage map[string]string `protobuf:"bytes,9,rep,name=storage,proto3" json:"storage,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *FileInfo) Reset() { *m = FileInfo{} }
func (m *FileInfo) String() string { return proto.CompactTextString(m) }
func (*FileInfo) ProtoMessage() {}
func (*FileInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_938ab51c60d4b622, []int{2}
}
func (m *FileInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FileInfo.Unmarshal(m, b)
}
func (m *FileInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_FileInfo.Marshal(b, m, deterministic)
}
func (m *FileInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_FileInfo.Merge(m, src)
}
func (m *FileInfo) XXX_Size() int {
return xxx_messageInfo_FileInfo.Size(m)
}
func (m *FileInfo) XXX_DiscardUnknown() {
xxx_messageInfo_FileInfo.DiscardUnknown(m)
}
var xxx_messageInfo_FileInfo proto.InternalMessageInfo
func (m *FileInfo) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *FileInfo) GetSize() int64 {
if m != nil {
return m.Size
}
return 0
}
func (m *FileInfo) GetSizeIsDeferred() bool {
if m != nil {
return m.SizeIsDeferred
}
return false
}
func (m *FileInfo) GetOffset() int64 {
if m != nil {
return m.Offset
}
return 0
}
func (m *FileInfo) GetMetaData() map[string]string {
if m != nil {
return m.MetaData
}
return nil
}
func (m *FileInfo) GetIsPartial() bool {
if m != nil {
return m.IsPartial
}
return false
}
func (m *FileInfo) GetIsFinal() bool {
if m != nil {
return m.IsFinal
}
return false
}
func (m *FileInfo) GetPartialUploads() []string {
if m != nil {
return m.PartialUploads
}
return nil
}
func (m *FileInfo) GetStorage() map[string]string {
if m != nil {
return m.Storage
}
return nil
}
type HTTPRequest struct {
// Method is the HTTP method, e.g. POST or PATCH
Method string `protobuf:"bytes,1,opt,name=method,proto3" json:"method,omitempty"`
// URI is the full HTTP request URI, e.g. /files/fooo
Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"`
// RemoteAddr contains the network address that sent the request
RemoteAddr string `protobuf:"bytes,3,opt,name=remoteAddr,proto3" json:"remoteAddr,omitempty"`
Header map[string]string `protobuf:"bytes,4,rep,name=header,proto3" json:"header,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HTTPRequest) Reset() { *m = HTTPRequest{} }
func (m *HTTPRequest) String() string { return proto.CompactTextString(m) }
func (*HTTPRequest) ProtoMessage() {}
func (*HTTPRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_938ab51c60d4b622, []int{3}
}
func (m *HTTPRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HTTPRequest.Unmarshal(m, b)
}
func (m *HTTPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HTTPRequest.Marshal(b, m, deterministic)
}
func (m *HTTPRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_HTTPRequest.Merge(m, src)
}
func (m *HTTPRequest) XXX_Size() int {
return xxx_messageInfo_HTTPRequest.Size(m)
}
func (m *HTTPRequest) XXX_DiscardUnknown() {
xxx_messageInfo_HTTPRequest.DiscardUnknown(m)
}
var xxx_messageInfo_HTTPRequest proto.InternalMessageInfo
func (m *HTTPRequest) GetMethod() string {
if m != nil {
return m.Method
}
return ""
}
func (m *HTTPRequest) GetUri() string {
if m != nil {
return m.Uri
}
return ""
}
func (m *HTTPRequest) GetRemoteAddr() string {
if m != nil {
return m.RemoteAddr
}
return ""
}
func (m *HTTPRequest) GetHeader() map[string]string {
if m != nil {
return m.Header
}
return nil
}
type HookResponse struct {
HttpResponse *HTTPResponse `protobuf:"bytes,1,opt,name=httpResponse,proto3" json:"httpResponse,omitempty"`
RejectUpload bool `protobuf:"varint,2,opt,name=rejectUpload,proto3" json:"rejectUpload,omitempty"`
StopUpload bool `protobuf:"varint,3,opt,name=stopUpload,proto3" json:"stopUpload,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HookResponse) Reset() { *m = HookResponse{} }
func (m *HookResponse) String() string { return proto.CompactTextString(m) }
func (*HookResponse) ProtoMessage() {}
func (*HookResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_938ab51c60d4b622, []int{4}
}
func (m *HookResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HookResponse.Unmarshal(m, b)
}
func (m *HookResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HookResponse.Marshal(b, m, deterministic)
}
func (m *HookResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_HookResponse.Merge(m, src)
}
func (m *HookResponse) XXX_Size() int {
return xxx_messageInfo_HookResponse.Size(m)
}
func (m *HookResponse) XXX_DiscardUnknown() {
xxx_messageInfo_HookResponse.DiscardUnknown(m)
}
var xxx_messageInfo_HookResponse proto.InternalMessageInfo
func (m *HookResponse) GetHttpResponse() *HTTPResponse {
if m != nil {
return m.HttpResponse
}
return nil
}
func (m *HookResponse) GetRejectUpload() bool {
if m != nil {
return m.RejectUpload
}
return false
}
func (m *HookResponse) GetStopUpload() bool {
if m != nil {
return m.StopUpload
}
return false
}
type HTTPResponse struct {
StatusCode int64 `protobuf:"varint,1,opt,name=statusCode,proto3" json:"statusCode,omitempty"`
Headers map[string]string `protobuf:"bytes,2,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Body string `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HTTPResponse) Reset() { *m = HTTPResponse{} }
func (m *HTTPResponse) String() string { return proto.CompactTextString(m) }
func (*HTTPResponse) ProtoMessage() {}
func (*HTTPResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_938ab51c60d4b622, []int{5}
}
func (m *HTTPResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HTTPResponse.Unmarshal(m, b)
}
func (m *HTTPResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HTTPResponse.Marshal(b, m, deterministic)
}
func (m *HTTPResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_HTTPResponse.Merge(m, src)
}
func (m *HTTPResponse) XXX_Size() int {
return xxx_messageInfo_HTTPResponse.Size(m)
}
func (m *HTTPResponse) XXX_DiscardUnknown() {
xxx_messageInfo_HTTPResponse.DiscardUnknown(m)
}
var xxx_messageInfo_HTTPResponse proto.InternalMessageInfo
func (m *HTTPResponse) GetStatusCode() int64 {
if m != nil {
return m.StatusCode
}
return 0
}
func (m *HTTPResponse) GetHeaders() map[string]string {
if m != nil {
return m.Headers
}
return nil
}
func (m *HTTPResponse) GetBody() string {
if m != nil {
return m.Body
}
return ""
}
func init() {
proto.RegisterType((*HookRequest)(nil), "v2.HookRequest")
proto.RegisterType((*Event)(nil), "v2.Event")
proto.RegisterType((*FileInfo)(nil), "v2.FileInfo")
proto.RegisterMapType((map[string]string)(nil), "v2.FileInfo.MetaDataEntry")
proto.RegisterMapType((map[string]string)(nil), "v2.FileInfo.StorageEntry")
proto.RegisterType((*HTTPRequest)(nil), "v2.HTTPRequest")
proto.RegisterMapType((map[string]string)(nil), "v2.HTTPRequest.HeaderEntry")
proto.RegisterType((*HookResponse)(nil), "v2.HookResponse")
proto.RegisterType((*HTTPResponse)(nil), "v2.HTTPResponse")
proto.RegisterMapType((map[string]string)(nil), "v2.HTTPResponse.HeadersEntry")
}
func init() {
proto.RegisterFile("v2/hook.proto", fileDescriptor_938ab51c60d4b622)
}
var fileDescriptor_938ab51c60d4b622 = []byte{
// 578 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdb, 0x6e, 0xd3, 0x40,
0x10, 0xc5, 0x71, 0x73, 0xf1, 0x38, 0x2d, 0xd5, 0x0a, 0xa1, 0x25, 0xdc, 0x22, 0x0b, 0xa1, 0x3c,
0x05, 0xd5, 0x45, 0x5c, 0xca, 0x0b, 0x97, 0xb6, 0x4a, 0x1f, 0x90, 0xaa, 0xa5, 0xbc, 0xb3, 0xc5,
0x13, 0x62, 0xe2, 0x7a, 0xcd, 0xee, 0xc6, 0x52, 0xf8, 0x02, 0x3e, 0x08, 0x89, 0x4f, 0xe0, 0xb7,
0xd0, 0x5e, 0x42, 0x9c, 0xbc, 0xe5, 0xc9, 0x3b, 0x67, 0xce, 0xcc, 0x9e, 0x3d, 0x3b, 0x5e, 0xd8,
0xaf, 0xd3, 0x67, 0x33, 0x21, 0xe6, 0xe3, 0x4a, 0x0a, 0x2d, 0x48, 0xab, 0x4e, 0x93, 0xf7, 0x10,
0x4f, 0x84, 0x98, 0x33, 0xfc, 0xb1, 0x40, 0xa5, 0x09, 0x81, 0x3d, 0xbd, 0xac, 0x90, 0x06, 0xc3,
0x60, 0x14, 0x31, 0xbb, 0x26, 0x8f, 0xa1, 0x8d, 0x35, 0x96, 0x9a, 0xb6, 0x86, 0xc1, 0x28, 0x4e,
0xa3, 0x71, 0x9d, 0x8e, 0xcf, 0x0c, 0xc0, 0x1c, 0x9e, 0x7c, 0x81, 0xb6, 0x8d, 0xc9, 0x13, 0xe8,
0x2c, 0xaa, 0x42, 0xf0, 0xcc, 0xd6, 0xc7, 0x69, 0xdf, 0x50, 0xcf, 0xf3, 0x02, 0x2f, 0xca, 0xa9,
0x60, 0x3e, 0x47, 0x8e, 0x20, 0x9e, 0x69, 0x5d, 0xf9, 0x2d, 0x7d, 0xd7, 0xdb, 0x86, 0x3a, 0xb9,
0xba, 0xba, 0xf4, 0x30, 0x6b, 0x72, 0x92, 0xdf, 0x21, 0xf4, 0x56, 0x7d, 0xc8, 0x01, 0xb4, 0xf2,
0xcc, 0x2b, 0x6c, 0xe5, 0x99, 0xd1, 0xac, 0xf2, 0x9f, 0x68, 0x1b, 0x85, 0xcc, 0xae, 0xc9, 0x53,
0x38, 0x30, 0xdf, 0x0b, 0x75, 0x8a, 0x53, 0x94, 0x12, 0x33, 0x1a, 0x0e, 0x83, 0x51, 0x8f, 0x6d,
0xa1, 0xe4, 0x2e, 0x74, 0xc4, 0x74, 0xaa, 0x50, 0xd3, 0x3d, 0x5b, 0xed, 0x23, 0xf2, 0x02, 0x7a,
0x37, 0xa8, 0xf9, 0x29, 0xd7, 0x9c, 0xb6, 0x87, 0xe1, 0x28, 0x4e, 0x07, 0xcd, 0xb3, 0x8c, 0x3f,
0xfa, 0xe4, 0x59, 0xa9, 0xe5, 0x92, 0xfd, 0xe7, 0x92, 0x07, 0x10, 0xe5, 0xea, 0x92, 0x4b, 0x9d,
0xf3, 0x82, 0x76, 0xec, 0x96, 0x6b, 0x80, 0x50, 0xe8, 0xe6, 0xea, 0x3c, 0x2f, 0x79, 0x41, 0xbb,
0x36, 0xb7, 0x0a, 0x8d, 0xde, 0xca, 0x91, 0x3e, 0x5b, 0x93, 0x14, 0xed, 0x0d, 0xc3, 0x51, 0xc4,
0xb6, 0x50, 0x72, 0x0c, 0x5d, 0xa5, 0x85, 0xe4, 0xdf, 0x90, 0x46, 0x56, 0xd6, 0xbd, 0x0d, 0x59,
0x9f, 0x5c, 0xce, 0xa9, 0x5a, 0x31, 0x07, 0x6f, 0x60, 0x7f, 0x43, 0x2f, 0x39, 0x84, 0x70, 0x8e,
0x4b, 0x6f, 0xa1, 0x59, 0x92, 0x3b, 0xd0, 0xae, 0x79, 0xb1, 0x70, 0x26, 0x46, 0xcc, 0x05, 0x27,
0xad, 0x57, 0xc1, 0xe0, 0x04, 0xfa, 0xcd, 0xae, 0xbb, 0xd4, 0x26, 0x7f, 0x03, 0x88, 0x1b, 0x77,
0x6a, 0xdc, 0xbe, 0x41, 0x3d, 0x13, 0xab, 0xdb, 0xf3, 0x91, 0xe9, 0xb9, 0x90, 0xb9, 0xaf, 0x37,
0x4b, 0xf2, 0x08, 0x40, 0xe2, 0x8d, 0xd0, 0xf8, 0x2e, 0xcb, 0xa4, 0xbd, 0xbb, 0x88, 0x35, 0x10,
0x72, 0x0c, 0x9d, 0x19, 0xf2, 0x0c, 0x25, 0xdd, 0xb3, 0x36, 0xdc, 0xdf, 0x1a, 0x9f, 0xf1, 0xc4,
0x66, 0x9d, 0x11, 0x9e, 0x3a, 0x78, 0x0d, 0x71, 0x03, 0xde, 0xe9, 0x24, 0xbf, 0x02, 0xe8, 0xbb,
0xff, 0x44, 0x55, 0xa2, 0x54, 0x48, 0x9e, 0x43, 0xdf, 0x0d, 0xa8, 0x8b, 0xfd, 0xc0, 0x1f, 0xae,
0x65, 0x38, 0x9c, 0x6d, 0xb0, 0x48, 0x02, 0x7d, 0x89, 0xdf, 0xf1, 0xab, 0x76, 0xf7, 0x69, 0xf7,
0xe9, 0xb1, 0x0d, 0xcc, 0x1c, 0x5d, 0x69, 0x51, 0x79, 0x86, 0x1b, 0xdb, 0x06, 0x92, 0xfc, 0x31,
0x52, 0x1a, 0x5b, 0xb8, 0x02, 0xae, 0x17, 0xea, 0x83, 0xc8, 0x9c, 0x90, 0x90, 0x35, 0x10, 0xf2,
0x12, 0xba, 0xce, 0x00, 0x45, 0x5b, 0xd6, 0xac, 0x87, 0xdb, 0x2a, 0xbd, 0x5b, 0xca, 0xcf, 0x8d,
0x67, 0x9b, 0x1f, 0xeb, 0x5a, 0x64, 0x4b, 0x6f, 0xbf, 0x5d, 0x9b, 0x71, 0x68, 0x92, 0x77, 0x31,
0x31, 0x7d, 0xeb, 0xde, 0x9a, 0x09, 0x2f, 0xb3, 0x02, 0x25, 0x39, 0x02, 0xb8, 0x28, 0x6b, 0x31,
0x47, 0x03, 0x12, 0xf7, 0x00, 0xac, 0x9f, 0xa2, 0xc1, 0xe1, 0x1a, 0x70, 0x2a, 0x93, 0x5b, 0xd7,
0x1d, 0xfb, 0x70, 0x1d, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x73, 0x61, 0x1c, 0xc9, 0x04,
0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// HookHandlerClient is the client API for HookHandler service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type HookHandlerClient interface {
// Sends a hook
InvokeHook(ctx context.Context, in *HookRequest, opts ...grpc.CallOption) (*HookResponse, error)
}
type hookHandlerClient struct {
cc grpc.ClientConnInterface
}
func NewHookHandlerClient(cc grpc.ClientConnInterface) HookHandlerClient {
return &hookHandlerClient{cc}
}
func (c *hookHandlerClient) InvokeHook(ctx context.Context, in *HookRequest, opts ...grpc.CallOption) (*HookResponse, error) {
out := new(HookResponse)
err := c.cc.Invoke(ctx, "/v2.HookHandler/InvokeHook", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// HookHandlerServer is the server API for HookHandler service.
type HookHandlerServer interface {
// Sends a hook
InvokeHook(context.Context, *HookRequest) (*HookResponse, error)
}
// UnimplementedHookHandlerServer can be embedded to have forward compatible implementations.
type UnimplementedHookHandlerServer struct {
}
func (*UnimplementedHookHandlerServer) InvokeHook(ctx context.Context, req *HookRequest) (*HookResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method InvokeHook not implemented")
}
func RegisterHookHandlerServer(s *grpc.Server, srv HookHandlerServer) {
s.RegisterService(&_HookHandler_serviceDesc, srv)
}
func _HookHandler_InvokeHook_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(HookRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HookHandlerServer).InvokeHook(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/v2.HookHandler/InvokeHook",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HookHandlerServer).InvokeHook(ctx, req.(*HookRequest))
}
return interceptor(ctx, in, info, handler)
}
var _HookHandler_serviceDesc = grpc.ServiceDesc{
ServiceName: "v2.HookHandler",
HandlerType: (*HookHandlerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "InvokeHook",
Handler: _HookHandler_InvokeHook_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "v2/hook.proto",
}

180
pkg/s3store/minio_s3_api.go Normal file
View File

@ -0,0 +1,180 @@
package s3store
import (
"context"
"errors"
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/minio/minio-go/v7"
)
type MinioS3API struct {
client *minio.Core
}
func NewMinioS3API(client *minio.Core) S3API {
return MinioS3API{
client: client,
}
}
func (s MinioS3API) PutObjectWithContext(ctx context.Context, input *s3.PutObjectInput, opt ...request.Option) (*s3.PutObjectOutput, error) {
var objectSize int64
if input.ContentLength != nil {
objectSize = *input.ContentLength
} else {
size, err := input.Body.Seek(0, os.SEEK_END)
if err != nil {
return nil, err
}
_, err = input.Body.Seek(0, os.SEEK_SET)
if err != nil {
return nil, err
}
objectSize = size
}
// TODO: Should we use the more low-level Core.PutObject here?
_, err := s.client.Client.PutObject(ctx, *input.Bucket, *input.Key, input.Body, objectSize, minio.PutObjectOptions{
DisableMultipart: true,
SendContentMd5: false, // TODO: Make configurable
})
if err != nil {
return nil, err
}
return &s3.PutObjectOutput{}, nil
}
func (s MinioS3API) ListPartsWithContext(ctx context.Context, input *s3.ListPartsInput, opt ...request.Option) (*s3.ListPartsOutput, error) {
partNumberMarker := 0
if input.PartNumberMarker != nil {
partNumberMarker = int(*input.PartNumberMarker)
}
res, err := s.client.ListObjectParts(ctx, *input.Bucket, *input.Key, *input.UploadId, partNumberMarker, 0)
if err != nil {
return nil, err
}
print(res.ObjectParts)
parts := make([]*s3.Part, len(res.ObjectParts))
for i, p := range res.ObjectParts {
partNumber := int64(p.PartNumber)
parts[i] = &s3.Part{
ETag: &p.ETag,
PartNumber: &partNumber,
Size: &p.Size,
}
}
nextPartNumberMarker := int64(res.NextPartNumberMarker)
return &s3.ListPartsOutput{
IsTruncated: &res.IsTruncated,
NextPartNumberMarker: &nextPartNumberMarker,
Parts: parts,
}, nil
}
func (s MinioS3API) UploadPartWithContext(ctx context.Context, input *s3.UploadPartInput, opt ...request.Option) (*s3.UploadPartOutput, error) {
var objectSize int64
if input.ContentLength != nil {
objectSize = *input.ContentLength
} else {
return nil, errors.New("missing ContentLength")
}
partNumber := int(*input.PartNumber)
part, err := s.client.PutObjectPart(ctx, *input.Bucket, *input.Key, *input.UploadId, partNumber, input.Body, objectSize, "", "", nil)
if err != nil {
return nil, err
}
return &s3.UploadPartOutput{
ETag: &part.ETag,
}, nil
}
func (s MinioS3API) GetObjectWithContext(ctx context.Context, input *s3.GetObjectInput, opt ...request.Option) (*s3.GetObjectOutput, error) {
body, info, _, err := s.client.GetObject(ctx, *input.Bucket, *input.Key, minio.GetObjectOptions{})
if err != nil {
return nil, err
}
return &s3.GetObjectOutput{
Body: body,
ContentLength: &info.Size,
}, nil
}
func (s MinioS3API) HeadObjectWithContext(ctx context.Context, input *s3.HeadObjectInput, opt ...request.Option) (*s3.HeadObjectOutput, error) {
info, err := s.client.StatObject(ctx, *input.Bucket, *input.Key, minio.StatObjectOptions{})
if err != nil {
return nil, err
}
print(info.Size)
return &s3.HeadObjectOutput{
ContentLength: &info.Size,
}, nil
}
func (s MinioS3API) CreateMultipartUploadWithContext(ctx context.Context, input *s3.CreateMultipartUploadInput, opt ...request.Option) (*s3.CreateMultipartUploadOutput, error) {
metadata := make(map[string]string, len(input.Metadata))
for key, value := range input.Metadata {
metadata[key] = *value
}
uploadId, err := s.client.NewMultipartUpload(ctx, *input.Bucket, *input.Key, minio.PutObjectOptions{
UserMetadata: metadata,
})
if err != nil {
return nil, err
}
return &s3.CreateMultipartUploadOutput{
UploadId: &uploadId,
}, nil
}
func (s MinioS3API) AbortMultipartUploadWithContext(ctx context.Context, input *s3.AbortMultipartUploadInput, opt ...request.Option) (*s3.AbortMultipartUploadOutput, error) {
return nil, fmt.Errorf("AbortMultipartUploadWithContext not implemented")
}
func (s MinioS3API) DeleteObjectWithContext(ctx context.Context, input *s3.DeleteObjectInput, opt ...request.Option) (*s3.DeleteObjectOutput, error) {
err := s.client.RemoveObject(ctx, *input.Bucket, *input.Key, minio.RemoveObjectOptions{})
if err != nil {
return nil, err
}
return &s3.DeleteObjectOutput{}, nil
}
func (s MinioS3API) DeleteObjectsWithContext(ctx context.Context, input *s3.DeleteObjectsInput, opt ...request.Option) (*s3.DeleteObjectsOutput, error) {
return nil, fmt.Errorf("DeleteObjectsWithContext not implemented")
}
func (s MinioS3API) CompleteMultipartUploadWithContext(ctx context.Context, input *s3.CompleteMultipartUploadInput, opt ...request.Option) (*s3.CompleteMultipartUploadOutput, error) {
parts := make([]minio.CompletePart, len(input.MultipartUpload.Parts))
for i, p := range input.MultipartUpload.Parts {
parts[i] = minio.CompletePart{
PartNumber: int(*p.PartNumber),
ETag: *p.ETag,
}
}
_, err := s.client.CompleteMultipartUpload(ctx, *input.Bucket, *input.Key, *input.UploadId, parts, minio.PutObjectOptions{})
if err != nil {
return nil, err
}
return &s3.CompleteMultipartUploadOutput{}, nil
}
func (s MinioS3API) UploadPartCopyWithContext(ctx context.Context, input *s3.UploadPartCopyInput, opt ...request.Option) (*s3.UploadPartCopyOutput, error) {
return nil, fmt.Errorf("UploadPartCopyWithContext not implemented")
}

View File

@ -83,6 +83,9 @@ import (
"sync"
"time"
"github.com/minio/minio-go/v7"
"github.com/prometheus/client_golang/prometheus"
"github.com/tus/tusd/internal/semaphore"
"github.com/tus/tusd/internal/uid"
"github.com/tus/tusd/pkg/handler"
@ -156,13 +159,43 @@ type S3Store struct {
// CPU, so it might be desirable to disable them.
// Note that this property is experimental and might be removed in the future!
DisableContentHashes bool
// uploadSemaphore limits the number of concurrent multipart part uploads to S3.
uploadSemaphore semaphore.Semaphore
// requestDurationMetric holds the prometheus instance for storing the request durations.
requestDurationMetric *prometheus.SummaryVec
// diskWriteDurationMetric holds the prometheus instance for storing the time it takes to write chunks to disk.
diskWriteDurationMetric prometheus.Summary
// uploadSemaphoreDemandMetric holds the prometheus instance for storing the demand on the upload semaphore
uploadSemaphoreDemandMetric prometheus.Gauge
// uploadSemaphoreLimitMetric holds the prometheus instance for storing the limit on the upload semaphore
uploadSemaphoreLimitMetric prometheus.Gauge
}
// The labels to use for observing and storing request duration. One label per operation.
const (
metricGetInfoObject = "get_info_object"
metricPutInfoObject = "put_info_object"
metricCreateMultipartUpload = "create_multipart_upload"
metricCompleteMultipartUpload = "complete_multipart_upload"
metricUploadPart = "upload_part"
metricListParts = "list_parts"
metricHeadPartObject = "head_part_object"
metricGetPartObject = "get_part_object"
metricPutPartObject = "put_part_object"
metricDeletePartObject = "delete_part_object"
)
type S3API interface {
PutObjectWithContext(ctx context.Context, input *s3.PutObjectInput, opt ...request.Option) (*s3.PutObjectOutput, error)
ListPartsWithContext(ctx context.Context, input *s3.ListPartsInput, opt ...request.Option) (*s3.ListPartsOutput, error)
UploadPartWithContext(ctx context.Context, input *s3.UploadPartInput, opt ...request.Option) (*s3.UploadPartOutput, error)
GetObjectWithContext(ctx context.Context, input *s3.GetObjectInput, opt ...request.Option) (*s3.GetObjectOutput, error)
HeadObjectWithContext(ctx context.Context, input *s3.HeadObjectInput, opt ...request.Option) (*s3.HeadObjectOutput, error)
CreateMultipartUploadWithContext(ctx context.Context, input *s3.CreateMultipartUploadInput, opt ...request.Option) (*s3.CreateMultipartUploadOutput, error)
AbortMultipartUploadWithContext(ctx context.Context, input *s3.AbortMultipartUploadInput, opt ...request.Option) (*s3.AbortMultipartUploadOutput, error)
DeleteObjectWithContext(ctx context.Context, input *s3.DeleteObjectInput, opt ...request.Option) (*s3.DeleteObjectOutput, error)
@ -177,17 +210,52 @@ type s3APIForPresigning interface {
// New constructs a new storage using the supplied bucket and service object.
func New(bucket string, service S3API) S3Store {
return S3Store{
Bucket: bucket,
Service: service,
MaxPartSize: 5 * 1024 * 1024 * 1024,
MinPartSize: 5 * 1024 * 1024,
PreferredPartSize: 50 * 1024 * 1024,
MaxMultipartParts: 10000,
MaxObjectSize: 5 * 1024 * 1024 * 1024 * 1024,
MaxBufferedParts: 20,
TemporaryDirectory: "",
requestDurationMetric := prometheus.NewSummaryVec(prometheus.SummaryOpts{
Name: "tusd_s3_request_duration_ms",
Help: "Duration of requests sent to S3 in milliseconds per operation",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}, []string{"operation"})
diskWriteDurationMetric := prometheus.NewSummary(prometheus.SummaryOpts{
Name: "tusd_s3_disk_write_duration_ms",
Help: "Duration of chunk writes to disk in milliseconds",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
})
uploadSemaphoreDemandMetric := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "tusd_s3_upload_semaphore_demand",
Help: "Number of goroutines wanting to acquire the upload lock or having it acquired",
})
uploadSemaphoreLimitMetric := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "tusd_s3_upload_semaphore_limit",
Help: "Limit of concurrent acquisitions of upload semaphore",
})
store := S3Store{
Bucket: bucket,
Service: service,
MaxPartSize: 5 * 1024 * 1024 * 1024,
MinPartSize: 5 * 1024 * 1024,
PreferredPartSize: 50 * 1024 * 1024,
MaxMultipartParts: 10000,
MaxObjectSize: 5 * 1024 * 1024 * 1024 * 1024,
MaxBufferedParts: 20,
TemporaryDirectory: "",
requestDurationMetric: requestDurationMetric,
diskWriteDurationMetric: diskWriteDurationMetric,
uploadSemaphoreDemandMetric: uploadSemaphoreDemandMetric,
uploadSemaphoreLimitMetric: uploadSemaphoreLimitMetric,
}
store.SetConcurrentPartUploads(10)
return store
}
// SetConcurrentPartUploads changes the limit on how many concurrent part uploads to S3 are allowed.
func (store *S3Store) SetConcurrentPartUploads(limit int) {
store.uploadSemaphore = semaphore.New(limit)
store.uploadSemaphoreLimitMetric.Set(float64(limit))
}
// UseIn sets this store as the core data store in the passed composer and adds
@ -199,6 +267,20 @@ func (store S3Store) UseIn(composer *handler.StoreComposer) {
composer.UseLengthDeferrer(store)
}
func (store S3Store) RegisterMetrics(registry prometheus.Registerer) {
registry.MustRegister(store.requestDurationMetric)
registry.MustRegister(store.diskWriteDurationMetric)
registry.MustRegister(store.uploadSemaphoreDemandMetric)
registry.MustRegister(store.uploadSemaphoreLimitMetric)
}
func (store S3Store) observeRequestDuration(start time.Time, label string) {
elapsed := time.Now().Sub(start)
ms := float64(elapsed.Nanoseconds() / int64(time.Millisecond))
store.requestDurationMetric.WithLabelValues(label).Observe(ms)
}
type s3Upload struct {
id string
store *S3Store
@ -207,6 +289,18 @@ type s3Upload struct {
// been fetched yet from S3. Never read or write to it directly but instead use
// the GetInfo and writeInfo functions.
info *handler.FileInfo
// parts collects all parts for this upload. It will be nil if info is nil as well.
parts []*s3Part
// incompletePartSize is the size of an incomplete part object, if one exists. It will be 0 if info is nil as well.
incompletePartSize int64
}
// s3Part represents a single part of a S3 multipart upload.
type s3Part struct {
number int64
size int64
etag string
}
func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
@ -233,11 +327,13 @@ func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (hand
}
// Create the actual multipart upload
t := time.Now()
res, err := store.Service.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(uploadId),
Metadata: metadata,
})
store.observeRequestDuration(t, metricCreateMultipartUpload)
if err != nil {
return nil, fmt.Errorf("s3store: unable to create multipart upload:\n%s", err)
}
@ -251,7 +347,7 @@ func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (hand
"Key": *store.keyWithPrefix(uploadId),
}
upload := &s3Upload{id, &store, nil}
upload := &s3Upload{id, &store, nil, []*s3Part{}, 0}
err = upload.writeInfo(ctx, info)
if err != nil {
return nil, fmt.Errorf("s3store: unable to create info file:\n%s", err)
@ -261,7 +357,7 @@ func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (hand
}
func (store S3Store) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
return &s3Upload{id, &store, nil}, nil
return &s3Upload{id, &store, nil, []*s3Part{}, 0}, nil
}
func (store S3Store) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload {
@ -290,24 +386,72 @@ func (upload *s3Upload) writeInfo(ctx context.Context, info handler.FileInfo) er
}
// Create object on S3 containing information about the file
t := time.Now()
_, err = store.Service.PutObjectWithContext(ctx, &s3.PutObjectInput{
Bucket: aws.String(store.Bucket),
Key: store.metadataKeyWithPrefix(uploadId + ".info"),
Body: bytes.NewReader(infoJson),
ContentLength: aws.Int64(int64(len(infoJson))),
})
store.observeRequestDuration(t, metricPutInfoObject)
return err
}
func (upload s3Upload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
func (upload *s3Upload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
id := upload.id
store := upload.store
uploadId, _ := splitIds(id)
// Get the total size of the current upload, number of parts to generate next number and whether
// an incomplete part exists
_, _, incompletePartSize, err := upload.getInternalInfo(ctx)
if err != nil {
return 0, err
}
if incompletePartSize > 0 {
incompletePartFile, err := store.downloadIncompletePartForUpload(ctx, uploadId)
if err != nil {
return 0, err
}
if incompletePartFile == nil {
return 0, fmt.Errorf("s3store: Expected an incomplete part file but did not get any")
}
defer cleanUpTempFile(incompletePartFile)
if err := store.deleteIncompletePartForUpload(ctx, uploadId); err != nil {
return 0, err
}
// Prepend an incomplete part, if necessary and adapt the offset
src = io.MultiReader(incompletePartFile, src)
offset = offset - incompletePartSize
}
bytesUploaded, err := upload.uploadParts(ctx, offset, src)
// The size of the incomplete part should not be counted, because the
// process of the incomplete part should be fully transparent to the user.
bytesUploaded = bytesUploaded - incompletePartSize
if bytesUploaded < 0 {
bytesUploaded = 0
}
upload.info.Offset += bytesUploaded
return bytesUploaded, err
}
func (upload *s3Upload) uploadParts(ctx context.Context, offset int64, src io.Reader) (int64, error) {
id := upload.id
store := upload.store
uploadId, multipartId := splitIds(id)
// Get the total size of the current upload
info, err := upload.GetInfo(ctx)
// Get the total size of the current upload and number of parts to generate next number
info, parts, _, err := upload.getInternalInfo(ctx)
if err != nil {
return 0, err
}
@ -319,83 +463,87 @@ func (upload s3Upload) WriteChunk(ctx context.Context, offset int64, src io.Read
return 0, err
}
// Get number of parts to generate next number
parts, err := store.listAllParts(ctx, id)
if err != nil {
return 0, err
}
numParts := len(parts)
nextPartNum := int64(numParts + 1)
incompletePartFile, incompletePartSize, err := store.downloadIncompletePartForUpload(ctx, uploadId)
if err != nil {
return 0, err
}
if incompletePartFile != nil {
defer cleanUpTempFile(incompletePartFile)
if err := store.deleteIncompletePartForUpload(ctx, uploadId); err != nil {
return 0, err
}
src = io.MultiReader(incompletePartFile, src)
}
fileChan := make(chan *os.File, store.MaxBufferedParts)
doneChan := make(chan struct{})
defer close(doneChan)
// If we panic or return while there are still files in the channel, then
// we may leak file descriptors. Let's ensure that those are cleaned up.
defer func() {
for file := range fileChan {
cleanUpTempFile(file)
}
}()
partProducer := s3PartProducer{
store: store,
done: doneChan,
files: fileChan,
r: src,
}
partProducer, fileChan := newS3PartProducer(src, store.MaxBufferedParts, store.TemporaryDirectory, store.diskWriteDurationMetric)
defer partProducer.stop()
go partProducer.produce(optimalPartSize)
for file := range fileChan {
stat, err := file.Stat()
if err != nil {
return 0, err
}
n := stat.Size()
var wg sync.WaitGroup
var uploadErr error
isFinalChunk := !info.SizeIsDeferred && (size == (offset-incompletePartSize)+n)
if n >= store.MinPartSize || isFinalChunk {
uploadPartInput := &s3.UploadPartInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(uploadId),
UploadId: aws.String(multipartId),
PartNumber: aws.Int64(nextPartNum),
}
if err := upload.putPartForUpload(ctx, uploadPartInput, file, n); err != nil {
return bytesUploaded, err
for {
// We acquire the semaphore before starting the goroutine to avoid
// starting many goroutines, most of which are just waiting for the lock.
// We also acquire the semaphore before reading from the channel to reduce
// the number of part files are laying around on disk without being used.
upload.store.acquireUploadSemaphore()
fileChunk, more := <-fileChan
if !more {
upload.store.releaseUploadSemaphore()
break
}
partfile := fileChunk.reader
partsize := fileChunk.size
closePart := fileChunk.closeReader
isFinalChunk := !info.SizeIsDeferred && (size == offset+bytesUploaded+partsize)
if partsize >= store.MinPartSize || isFinalChunk {
part := &s3Part{
etag: "",
size: partsize,
number: nextPartNum,
}
upload.parts = append(upload.parts, part)
wg.Add(1)
go func(file io.ReadSeeker, part *s3Part, closePart func()) {
defer upload.store.releaseUploadSemaphore()
defer wg.Done()
defer closePart()
t := time.Now()
uploadPartInput := &s3.UploadPartInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(uploadId),
UploadId: aws.String(multipartId),
PartNumber: aws.Int64(part.number),
}
etag, err := upload.putPartForUpload(ctx, uploadPartInput, file, part.size)
store.observeRequestDuration(t, metricUploadPart)
if err != nil {
uploadErr = err
} else {
part.etag = etag
}
}(partfile, part, closePart)
} else {
if err := store.putIncompletePartForUpload(ctx, uploadId, file); err != nil {
return bytesUploaded, err
}
wg.Add(1)
go func(file io.ReadSeeker, closePart func()) {
defer upload.store.releaseUploadSemaphore()
defer wg.Done()
defer closePart()
bytesUploaded += n
return (bytesUploaded - incompletePartSize), nil
if err := store.putIncompletePartForUpload(ctx, uploadId, file); err != nil {
uploadErr = err
}
upload.incompletePartSize = partsize
}(partfile, closePart)
}
offset += n
bytesUploaded += n
bytesUploaded += partsize
nextPartNum += 1
}
return bytesUploaded - incompletePartSize, partProducer.err
wg.Wait()
if uploadErr != nil {
return 0, uploadErr
}
return bytesUploaded, partProducer.err
}
func cleanUpTempFile(file *os.File) {
@ -403,14 +551,16 @@ func cleanUpTempFile(file *os.File) {
os.Remove(file.Name())
}
func (upload *s3Upload) putPartForUpload(ctx context.Context, uploadPartInput *s3.UploadPartInput, file *os.File, size int64) error {
defer cleanUpTempFile(file)
func (upload *s3Upload) putPartForUpload(ctx context.Context, uploadPartInput *s3.UploadPartInput, file io.ReadSeeker, size int64) (string, error) {
if !upload.store.DisableContentHashes {
// By default, use the traditional approach to upload data
uploadPartInput.Body = file
_, err := upload.store.Service.UploadPartWithContext(ctx, uploadPartInput)
return err
uploadPartInput.ContentLength = &size
res, err := upload.store.Service.UploadPartWithContext(ctx, uploadPartInput)
if err != nil {
return "", err
}
return *res.ETag, nil
} else {
// Experimental feature to prevent the AWS SDK from calculating the SHA256 hash
// for the parts we upload to S3.
@ -418,19 +568,19 @@ func (upload *s3Upload) putPartForUpload(ctx context.Context, uploadPartInput *s
// on our own. This way, the body is not included in the SHA256 calculation.
s3api, ok := upload.store.Service.(s3APIForPresigning)
if !ok {
return fmt.Errorf("s3store: failed to cast S3 service for presigning")
return "", fmt.Errorf("s3store: failed to cast S3 service for presigning")
}
s3Req, _ := s3api.UploadPartRequest(uploadPartInput)
url, err := s3Req.Presign(15 * time.Minute)
if err != nil {
return err
return "", err
}
req, err := http.NewRequest("PUT", url, file)
if err != nil {
return err
return "", err
}
// Set the Content-Length manually to prevent the usage of Transfer-Encoding: chunked,
@ -439,60 +589,100 @@ func (upload *s3Upload) putPartForUpload(ctx context.Context, uploadPartInput *s
res, err := http.DefaultClient.Do(req)
if err != nil {
return err
return "", err
}
defer res.Body.Close()
if res.StatusCode != 200 {
buf := new(strings.Builder)
io.Copy(buf, res.Body)
return fmt.Errorf("s3store: unexpected response code %d for presigned upload: %s", res.StatusCode, buf.String())
return "", fmt.Errorf("s3store: unexpected response code %d for presigned upload: %s", res.StatusCode, buf.String())
}
return nil
return res.Header.Get("ETag"), nil
}
}
func (upload *s3Upload) GetInfo(ctx context.Context) (info handler.FileInfo, err error) {
info, _, _, err = upload.getInternalInfo(ctx)
return info, err
}
func (upload *s3Upload) getInternalInfo(ctx context.Context) (info handler.FileInfo, parts []*s3Part, incompletePartSize int64, err error) {
if upload.info != nil {
return *upload.info, nil
return *upload.info, upload.parts, upload.incompletePartSize, nil
}
info, err = upload.fetchInfo(ctx)
info, parts, incompletePartSize, err = upload.fetchInfo(ctx)
if err != nil {
return info, err
return info, parts, incompletePartSize, err
}
upload.info = &info
return info, nil
upload.parts = parts
upload.incompletePartSize = incompletePartSize
return info, parts, incompletePartSize, nil
}
func (upload s3Upload) fetchInfo(ctx context.Context) (info handler.FileInfo, err error) {
func (upload s3Upload) fetchInfo(ctx context.Context) (info handler.FileInfo, parts []*s3Part, incompletePartSize int64, err error) {
id := upload.id
store := upload.store
uploadId, _ := splitIds(id)
// Get file info stored in separate object
res, err := store.Service.GetObjectWithContext(ctx, &s3.GetObjectInput{
Bucket: aws.String(store.Bucket),
Key: store.metadataKeyWithPrefix(uploadId + ".info"),
})
if err != nil {
if isAwsError(err, "NoSuchKey") {
return info, handler.ErrNotFound
var wg sync.WaitGroup
wg.Add(3)
// We store all errors in here and handle them all together once the wait
// group is done.
var infoErr error
var partsErr error
var incompletePartSizeErr error
go func() {
defer wg.Done()
t := time.Now()
// Get file info stored in separate object
var res *s3.GetObjectOutput
res, infoErr = store.Service.GetObjectWithContext(ctx, &s3.GetObjectInput{
Bucket: aws.String(store.Bucket),
Key: store.metadataKeyWithPrefix(uploadId + ".info"),
})
store.observeRequestDuration(t, metricGetInfoObject)
if infoErr == nil {
infoErr = json.NewDecoder(res.Body).Decode(&info)
}
}()
return info, err
go func() {
defer wg.Done()
// Get uploaded parts and their offset
parts, partsErr = store.listAllParts(ctx, id)
}()
go func() {
defer wg.Done()
// Get size of optional incomplete part file.
incompletePartSize, incompletePartSizeErr = store.headIncompletePartForUpload(ctx, uploadId)
}()
wg.Wait()
// Finally, after all requests are complete, let's handle the errors
if infoErr != nil {
err = infoErr
// If the info file is not found, we consider the upload to be non-existant
if isAwsError(err, "NoSuchKey") {
err = handler.ErrNotFound
}
return
}
if err := json.NewDecoder(res.Body).Decode(&info); err != nil {
return info, err
}
// Get uploaded parts and their offset
parts, err := store.listAllParts(ctx, id)
if err != nil {
// Check if the error is caused by the upload not being found. This happens
if partsErr != nil {
err = partsErr
// Check if the error is caused by the multipart upload not being found. This happens
// when the multipart upload has already been completed or aborted. Since
// we already found the info object, we know that the upload has been
// completed and therefore can ensure the the offset is the size.
@ -500,33 +690,28 @@ func (upload s3Upload) fetchInfo(ctx context.Context) (info handler.FileInfo, er
// Spaces, can also return NoSuchKey.
if isAwsError(err, "NoSuchUpload") || isAwsError(err, "NoSuchKey") {
info.Offset = info.Size
return info, nil
} else {
return info, err
err = nil
}
return
}
offset := int64(0)
if incompletePartSizeErr != nil {
err = incompletePartSizeErr
return
}
// The offset is the sum of all part sizes and the size of the incomplete part file.
offset := incompletePartSize
for _, part := range parts {
offset += *part.Size
}
incompletePartObject, err := store.getIncompletePartForUpload(ctx, uploadId)
if err != nil {
return info, err
}
if incompletePartObject != nil {
defer incompletePartObject.Body.Close()
offset += *incompletePartObject.ContentLength
offset += part.size
}
info.Offset = offset
return
return info, parts, incompletePartSize, nil
}
func (upload s3Upload) GetReader(ctx context.Context) (io.Reader, error) {
func (upload s3Upload) GetReader(ctx context.Context) (io.ReadCloser, error) {
id := upload.id
store := upload.store
uploadId, multipartId := splitIds(id)
@ -558,7 +743,7 @@ func (upload s3Upload) GetReader(ctx context.Context) (io.Reader, error) {
})
if err == nil {
// The multipart upload still exists, which means we cannot download it yet
return nil, handler.NewHTTPError(errors.New("cannot stream non-finished upload"), http.StatusBadRequest)
return nil, handler.NewError("ERR_INCOMPLETE_UPLOAD", "cannot stream non-finished upload", http.StatusBadRequest)
}
if isAwsError(err, "NoSuchUpload") {
@ -640,7 +825,7 @@ func (upload s3Upload) FinishUpload(ctx context.Context) error {
uploadId, multipartId := splitIds(id)
// Get uploaded parts
parts, err := store.listAllParts(ctx, id)
_, parts, _, err := upload.getInternalInfo(ctx)
if err != nil {
return err
}
@ -660,10 +845,11 @@ func (upload s3Upload) FinishUpload(ctx context.Context) error {
return err
}
parts = []*s3.Part{
&s3.Part{
ETag: res.ETag,
PartNumber: aws.Int64(1),
parts = []*s3Part{
&s3Part{
etag: *res.ETag,
number: 1,
size: 0,
},
}
@ -675,11 +861,12 @@ func (upload s3Upload) FinishUpload(ctx context.Context) error {
for index, part := range parts {
completedParts[index] = &s3.CompletedPart{
ETag: part.ETag,
PartNumber: part.PartNumber,
ETag: aws.String(part.etag),
PartNumber: aws.Int64(part.number),
}
}
t := time.Now()
_, err = store.Service.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(uploadId),
@ -688,6 +875,7 @@ func (upload s3Upload) FinishUpload(ctx context.Context) error {
Parts: completedParts,
},
})
store.observeRequestDuration(t, metricCompleteMultipartUpload)
return err
}
@ -790,10 +978,16 @@ func (upload *s3Upload) concatUsingMultipart(ctx context.Context, partialUploads
partialS3Upload := partialUpload.(*s3Upload)
partialId, _ := splitIds(partialS3Upload.id)
upload.parts = append(upload.parts, &s3Part{
number: int64(i + 1),
size: -1,
etag: "",
})
go func(i int, partialId string) {
defer wg.Done()
_, err := store.Service.UploadPartCopyWithContext(ctx, &s3.UploadPartCopyInput{
res, err := store.Service.UploadPartCopyWithContext(ctx, &s3.UploadPartCopyInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(uploadId),
UploadId: aws.String(multipartId),
@ -806,6 +1000,8 @@ func (upload *s3Upload) concatUsingMultipart(ctx context.Context, partialUploads
errs = append(errs, err)
return
}
upload.parts[i].etag = *res.CopyPartResult.ETag
}(i, partialId)
}
@ -829,11 +1025,13 @@ func (upload *s3Upload) DeclareLength(ctx context.Context, length int64) error {
return upload.writeInfo(ctx, info)
}
func (store S3Store) listAllParts(ctx context.Context, id string) (parts []*s3.Part, err error) {
func (store S3Store) listAllParts(ctx context.Context, id string) (parts []*s3Part, err error) {
uploadId, multipartId := splitIds(id)
partMarker := int64(0)
for {
t := time.Now()
// Get uploaded parts
listPtr, err := store.Service.ListPartsWithContext(ctx, &s3.ListPartsInput{
Bucket: aws.String(store.Bucket),
@ -841,11 +1039,19 @@ func (store S3Store) listAllParts(ctx context.Context, id string) (parts []*s3.P
UploadId: aws.String(multipartId),
PartNumberMarker: aws.Int64(partMarker),
})
store.observeRequestDuration(t, metricListParts)
if err != nil {
return nil, err
}
parts = append(parts, (*listPtr).Parts...)
// TODO: Find more efficient way when appending many elements
for _, part := range (*listPtr).Parts {
parts = append(parts, &s3Part{
number: *part.PartNumber,
size: *part.Size,
etag: *part.ETag,
})
}
if listPtr.IsTruncated != nil && *listPtr.IsTruncated {
partMarker = *listPtr.NextPartNumberMarker
@ -856,36 +1062,38 @@ func (store S3Store) listAllParts(ctx context.Context, id string) (parts []*s3.P
return parts, nil
}
func (store S3Store) downloadIncompletePartForUpload(ctx context.Context, uploadId string) (*os.File, int64, error) {
func (store S3Store) downloadIncompletePartForUpload(ctx context.Context, uploadId string) (*os.File, error) {
t := time.Now()
incompleteUploadObject, err := store.getIncompletePartForUpload(ctx, uploadId)
if err != nil {
return nil, 0, err
return nil, err
}
if incompleteUploadObject == nil {
// We did not find an incomplete upload
return nil, 0, nil
return nil, nil
}
defer incompleteUploadObject.Body.Close()
partFile, err := ioutil.TempFile(store.TemporaryDirectory, "tusd-s3-tmp-")
if err != nil {
return nil, 0, err
return nil, err
}
n, err := io.Copy(partFile, incompleteUploadObject.Body)
store.observeRequestDuration(t, metricGetPartObject)
if err != nil {
return nil, 0, err
return nil, err
}
if n < *incompleteUploadObject.ContentLength {
return nil, 0, errors.New("short read of incomplete upload")
return nil, errors.New("short read of incomplete upload")
}
_, err = partFile.Seek(0, 0)
if err != nil {
return nil, 0, err
return nil, err
}
return partFile, n, nil
return partFile, nil
}
func (store S3Store) getIncompletePartForUpload(ctx context.Context, uploadId string) (*s3.GetObjectOutput, error) {
@ -901,22 +1109,42 @@ func (store S3Store) getIncompletePartForUpload(ctx context.Context, uploadId st
return obj, err
}
func (store S3Store) putIncompletePartForUpload(ctx context.Context, uploadId string, file *os.File) error {
defer cleanUpTempFile(file)
func (store S3Store) headIncompletePartForUpload(ctx context.Context, uploadId string) (int64, error) {
t := time.Now()
obj, err := store.Service.HeadObjectWithContext(ctx, &s3.HeadObjectInput{
Bucket: aws.String(store.Bucket),
Key: store.metadataKeyWithPrefix(uploadId + ".part"),
})
store.observeRequestDuration(t, metricHeadPartObject)
if err != nil {
if isAwsError(err, s3.ErrCodeNoSuchKey) || isAwsError(err, "NotFound") || isAwsError(err, "AccessDenied") {
err = nil
}
return 0, err
}
return *obj.ContentLength, nil
}
func (store S3Store) putIncompletePartForUpload(ctx context.Context, uploadId string, file io.ReadSeeker) error {
t := time.Now()
_, err := store.Service.PutObjectWithContext(ctx, &s3.PutObjectInput{
Bucket: aws.String(store.Bucket),
Key: store.metadataKeyWithPrefix(uploadId + ".part"),
Body: file,
})
store.observeRequestDuration(t, metricPutPartObject)
return err
}
func (store S3Store) deleteIncompletePartForUpload(ctx context.Context, uploadId string) error {
t := time.Now()
_, err := store.Service.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{
Bucket: aws.String(store.Bucket),
Key: store.metadataKeyWithPrefix(uploadId + ".part"),
})
store.observeRequestDuration(t, metricPutPartObject)
return err
}
@ -937,6 +1165,11 @@ func isAwsError(err error, code string) bool {
if err, ok := err.(awserr.Error); ok && err.Code() == code {
return true
}
if err, ok := err.(minio.ErrorResponse); ok && err.Code == code {
return true
}
return false
}
@ -1004,3 +1237,13 @@ func (store S3Store) metadataKeyWithPrefix(key string) *string {
return aws.String(prefix + key)
}
func (store S3Store) acquireUploadSemaphore() {
store.uploadSemaphoreDemandMetric.Inc()
store.uploadSemaphore.Acquire()
}
func (store S3Store) releaseUploadSemaphore() {
store.uploadSemaphore.Release()
store.uploadSemaphoreDemandMetric.Dec()
}

View File

@ -6,36 +6,37 @@ package s3store
import (
context "context"
reflect "reflect"
request "github.com/aws/aws-sdk-go/aws/request"
s3 "github.com/aws/aws-sdk-go/service/s3"
gomock "github.com/golang/mock/gomock"
reflect "reflect"
)
// MockS3API is a mock of S3API interface
// MockS3API is a mock of S3API interface.
type MockS3API struct {
ctrl *gomock.Controller
recorder *MockS3APIMockRecorder
}
// MockS3APIMockRecorder is the mock recorder for MockS3API
// MockS3APIMockRecorder is the mock recorder for MockS3API.
type MockS3APIMockRecorder struct {
mock *MockS3API
}
// NewMockS3API creates a new mock instance
// NewMockS3API creates a new mock instance.
func NewMockS3API(ctrl *gomock.Controller) *MockS3API {
mock := &MockS3API{ctrl: ctrl}
mock.recorder = &MockS3APIMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockS3API) EXPECT() *MockS3APIMockRecorder {
return m.recorder
}
// AbortMultipartUploadWithContext mocks base method
// AbortMultipartUploadWithContext mocks base method.
func (m *MockS3API) AbortMultipartUploadWithContext(arg0 context.Context, arg1 *s3.AbortMultipartUploadInput, arg2 ...request.Option) (*s3.AbortMultipartUploadOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
@ -48,14 +49,14 @@ func (m *MockS3API) AbortMultipartUploadWithContext(arg0 context.Context, arg1 *
return ret0, ret1
}
// AbortMultipartUploadWithContext indicates an expected call of AbortMultipartUploadWithContext
// AbortMultipartUploadWithContext indicates an expected call of AbortMultipartUploadWithContext.
func (mr *MockS3APIMockRecorder) AbortMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUploadWithContext), varargs...)
}
// CompleteMultipartUploadWithContext mocks base method
// CompleteMultipartUploadWithContext mocks base method.
func (m *MockS3API) CompleteMultipartUploadWithContext(arg0 context.Context, arg1 *s3.CompleteMultipartUploadInput, arg2 ...request.Option) (*s3.CompleteMultipartUploadOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
@ -68,14 +69,14 @@ func (m *MockS3API) CompleteMultipartUploadWithContext(arg0 context.Context, arg
return ret0, ret1
}
// CompleteMultipartUploadWithContext indicates an expected call of CompleteMultipartUploadWithContext
// CompleteMultipartUploadWithContext indicates an expected call of CompleteMultipartUploadWithContext.
func (mr *MockS3APIMockRecorder) CompleteMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUploadWithContext), varargs...)
}
// CreateMultipartUploadWithContext mocks base method
// CreateMultipartUploadWithContext mocks base method.
func (m *MockS3API) CreateMultipartUploadWithContext(arg0 context.Context, arg1 *s3.CreateMultipartUploadInput, arg2 ...request.Option) (*s3.CreateMultipartUploadOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
@ -88,14 +89,14 @@ func (m *MockS3API) CreateMultipartUploadWithContext(arg0 context.Context, arg1
return ret0, ret1
}
// CreateMultipartUploadWithContext indicates an expected call of CreateMultipartUploadWithContext
// CreateMultipartUploadWithContext indicates an expected call of CreateMultipartUploadWithContext.
func (mr *MockS3APIMockRecorder) CreateMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUploadWithContext), varargs...)
}
// DeleteObjectWithContext mocks base method
// DeleteObjectWithContext mocks base method.
func (m *MockS3API) DeleteObjectWithContext(arg0 context.Context, arg1 *s3.DeleteObjectInput, arg2 ...request.Option) (*s3.DeleteObjectOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
@ -108,14 +109,14 @@ func (m *MockS3API) DeleteObjectWithContext(arg0 context.Context, arg1 *s3.Delet
return ret0, ret1
}
// DeleteObjectWithContext indicates an expected call of DeleteObjectWithContext
// DeleteObjectWithContext indicates an expected call of DeleteObjectWithContext.
func (mr *MockS3APIMockRecorder) DeleteObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectWithContext), varargs...)
}
// DeleteObjectsWithContext mocks base method
// DeleteObjectsWithContext mocks base method.
func (m *MockS3API) DeleteObjectsWithContext(arg0 context.Context, arg1 *s3.DeleteObjectsInput, arg2 ...request.Option) (*s3.DeleteObjectsOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
@ -128,14 +129,14 @@ func (m *MockS3API) DeleteObjectsWithContext(arg0 context.Context, arg1 *s3.Dele
return ret0, ret1
}
// DeleteObjectsWithContext indicates an expected call of DeleteObjectsWithContext
// DeleteObjectsWithContext indicates an expected call of DeleteObjectsWithContext.
func (mr *MockS3APIMockRecorder) DeleteObjectsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectsWithContext), varargs...)
}
// GetObjectWithContext mocks base method
// GetObjectWithContext mocks base method.
func (m *MockS3API) GetObjectWithContext(arg0 context.Context, arg1 *s3.GetObjectInput, arg2 ...request.Option) (*s3.GetObjectOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
@ -148,14 +149,34 @@ func (m *MockS3API) GetObjectWithContext(arg0 context.Context, arg1 *s3.GetObjec
return ret0, ret1
}
// GetObjectWithContext indicates an expected call of GetObjectWithContext
// GetObjectWithContext indicates an expected call of GetObjectWithContext.
func (mr *MockS3APIMockRecorder) GetObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectWithContext), varargs...)
}
// ListPartsWithContext mocks base method
// HeadObjectWithContext mocks base method.
func (m *MockS3API) HeadObjectWithContext(arg0 context.Context, arg1 *s3.HeadObjectInput, arg2 ...request.Option) (*s3.HeadObjectOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "HeadObjectWithContext", varargs...)
ret0, _ := ret[0].(*s3.HeadObjectOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// HeadObjectWithContext indicates an expected call of HeadObjectWithContext.
func (mr *MockS3APIMockRecorder) HeadObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObjectWithContext", reflect.TypeOf((*MockS3API)(nil).HeadObjectWithContext), varargs...)
}
// ListPartsWithContext mocks base method.
func (m *MockS3API) ListPartsWithContext(arg0 context.Context, arg1 *s3.ListPartsInput, arg2 ...request.Option) (*s3.ListPartsOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
@ -168,14 +189,14 @@ func (m *MockS3API) ListPartsWithContext(arg0 context.Context, arg1 *s3.ListPart
return ret0, ret1
}
// ListPartsWithContext indicates an expected call of ListPartsWithContext
// ListPartsWithContext indicates an expected call of ListPartsWithContext.
func (mr *MockS3APIMockRecorder) ListPartsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsWithContext", reflect.TypeOf((*MockS3API)(nil).ListPartsWithContext), varargs...)
}
// PutObjectWithContext mocks base method
// PutObjectWithContext mocks base method.
func (m *MockS3API) PutObjectWithContext(arg0 context.Context, arg1 *s3.PutObjectInput, arg2 ...request.Option) (*s3.PutObjectOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
@ -188,14 +209,14 @@ func (m *MockS3API) PutObjectWithContext(arg0 context.Context, arg1 *s3.PutObjec
return ret0, ret1
}
// PutObjectWithContext indicates an expected call of PutObjectWithContext
// PutObjectWithContext indicates an expected call of PutObjectWithContext.
func (mr *MockS3APIMockRecorder) PutObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectWithContext), varargs...)
}
// UploadPartCopyWithContext mocks base method
// UploadPartCopyWithContext mocks base method.
func (m *MockS3API) UploadPartCopyWithContext(arg0 context.Context, arg1 *s3.UploadPartCopyInput, arg2 ...request.Option) (*s3.UploadPartCopyOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
@ -208,14 +229,14 @@ func (m *MockS3API) UploadPartCopyWithContext(arg0 context.Context, arg1 *s3.Upl
return ret0, ret1
}
// UploadPartCopyWithContext indicates an expected call of UploadPartCopyWithContext
// UploadPartCopyWithContext indicates an expected call of UploadPartCopyWithContext.
func (mr *MockS3APIMockRecorder) UploadPartCopyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopyWithContext", reflect.TypeOf((*MockS3API)(nil).UploadPartCopyWithContext), varargs...)
}
// UploadPartWithContext mocks base method
// UploadPartWithContext mocks base method.
func (m *MockS3API) UploadPartWithContext(arg0 context.Context, arg1 *s3.UploadPartInput, arg2 ...request.Option) (*s3.UploadPartOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
@ -228,7 +249,7 @@ func (m *MockS3API) UploadPartWithContext(arg0 context.Context, arg1 *s3.UploadP
return ret0, ret1
}
// UploadPartWithContext indicates an expected call of UploadPartWithContext
// UploadPartWithContext indicates an expected call of UploadPartWithContext.
func (mr *MockS3APIMockRecorder) UploadPartWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)

View File

@ -1,64 +1,155 @@
package s3store
import (
"bytes"
"io"
"io/ioutil"
"os"
"time"
"github.com/prometheus/client_golang/prometheus"
)
const TEMP_DIR_USE_MEMORY = "_memory"
// s3PartProducer converts a stream of bytes from the reader into a stream of files on disk
type s3PartProducer struct {
store *S3Store
files chan<- *os.File
done chan struct{}
err error
r io.Reader
tmpDir string
files chan fileChunk
done chan struct{}
err error
r io.Reader
diskWriteDurationMetric prometheus.Summary
}
type fileChunk struct {
reader io.ReadSeeker
closeReader func()
size int64
}
func newS3PartProducer(source io.Reader, backlog int64, tmpDir string, diskWriteDurationMetric prometheus.Summary) (s3PartProducer, <-chan fileChunk) {
fileChan := make(chan fileChunk, backlog)
doneChan := make(chan struct{})
if os.Getenv("TUSD_S3STORE_TEMP_MEMORY") == "1" {
tmpDir = TEMP_DIR_USE_MEMORY
}
partProducer := s3PartProducer{
tmpDir: tmpDir,
done: doneChan,
files: fileChan,
r: source,
diskWriteDurationMetric: diskWriteDurationMetric,
}
return partProducer, fileChan
}
// stop should always be called by the consumer to ensure that the channels
// are properly closed and emptied.
func (spp *s3PartProducer) stop() {
close(spp.done)
// If we return while there are still files in the channel, then
// we may leak file descriptors. Let's ensure that those are cleaned up.
for fileChunk := range spp.files {
fileChunk.closeReader()
}
}
func (spp *s3PartProducer) produce(partSize int64) {
outerloop:
for {
file, err := spp.nextPart(partSize)
file, ok, err := spp.nextPart(partSize)
if err != nil {
// An error occured. Stop producing.
spp.err = err
close(spp.files)
return
break
}
if file == nil {
close(spp.files)
return
if !ok {
// The source was fully read. Stop producing.
break
}
select {
case spp.files <- file:
case <-spp.done:
close(spp.files)
return
// We are told to stop producing. Stop producing.
break outerloop
}
}
close(spp.files)
}
func (spp *s3PartProducer) nextPart(size int64) (*os.File, error) {
// Create a temporary file to store the part
file, err := ioutil.TempFile(spp.store.TemporaryDirectory, "tusd-s3-tmp-")
if err != nil {
return nil, err
func (spp *s3PartProducer) nextPart(size int64) (fileChunk, bool, error) {
if spp.tmpDir != TEMP_DIR_USE_MEMORY {
// Create a temporary file to store the part
file, err := ioutil.TempFile(spp.tmpDir, "tusd-s3-tmp-")
if err != nil {
return fileChunk{}, false, err
}
limitedReader := io.LimitReader(spp.r, size)
start := time.Now()
n, err := io.Copy(file, limitedReader)
if err != nil {
return fileChunk{}, false, err
}
// If the entire request body is read and no more data is available,
// io.Copy returns 0 since it is unable to read any bytes. In that
// case, we can close the s3PartProducer.
if n == 0 {
cleanUpTempFile(file)
return fileChunk{}, false, nil
}
elapsed := time.Now().Sub(start)
ms := float64(elapsed.Nanoseconds() / int64(time.Millisecond))
spp.diskWriteDurationMetric.Observe(ms)
// Seek to the beginning of the file
file.Seek(0, 0)
return fileChunk{
reader: file,
closeReader: func() {
file.Close()
os.Remove(file.Name())
},
size: n,
}, true, nil
} else {
// Create a temporary buffer to store the part
buf := new(bytes.Buffer)
limitedReader := io.LimitReader(spp.r, size)
start := time.Now()
n, err := io.Copy(buf, limitedReader)
if err != nil {
return fileChunk{}, false, err
}
// If the entire request body is read and no more data is available,
// io.Copy returns 0 since it is unable to read any bytes. In that
// case, we can close the s3PartProducer.
if n == 0 {
return fileChunk{}, false, nil
}
elapsed := time.Now().Sub(start)
ms := float64(elapsed.Nanoseconds() / int64(time.Millisecond))
spp.diskWriteDurationMetric.Observe(ms)
return fileChunk{
// buf does not get written to anymore, so we can turn it into a reader
reader: bytes.NewReader(buf.Bytes()),
closeReader: func() {},
size: n,
}, true, nil
}
limitedReader := io.LimitReader(spp.r, size)
n, err := io.Copy(file, limitedReader)
if err != nil {
return nil, err
}
// If the entire request body is read and no more data is available,
// io.Copy returns 0 since it is unable to read any bytes. In that
// case, we can close the s3PartProducer.
if n == 0 {
cleanUpTempFile(file)
return nil, nil
}
// Seek to the beginning of the file
file.Seek(0, 0)
return file, nil
}

View File

@ -2,10 +2,11 @@ package s3store
import (
"errors"
"os"
"strings"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
)
type InfiniteZeroReader struct{}
@ -21,33 +22,30 @@ func (ErrorReader) Read(b []byte) (int, error) {
return 0, errors.New("error from ErrorReader")
}
var testSummary = prometheus.NewSummary(prometheus.SummaryOpts{})
func TestPartProducerConsumesEntireReaderWithoutError(t *testing.T) {
fileChan := make(chan *os.File)
doneChan := make(chan struct{})
expectedStr := "test"
r := strings.NewReader(expectedStr)
pp := s3PartProducer{
store: &S3Store{},
done: doneChan,
files: fileChan,
r: r,
}
pp, fileChan := newS3PartProducer(r, 0, "", testSummary)
go pp.produce(1)
actualStr := ""
b := make([]byte, 1)
for f := range fileChan {
n, err := f.Read(b)
for chunk := range fileChan {
n, err := chunk.reader.Read(b)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if n != 1 {
t.Fatalf("incorrect number of bytes read: wanted %d, got %d", 1, n)
}
if chunk.size != 1 {
t.Fatalf("incorrect number of bytes in struct: wanted %d, got %d", 1, chunk.size)
}
actualStr += string(b)
os.Remove(f.Name())
f.Close()
chunk.closeReader()
}
if actualStr != expectedStr {
@ -59,15 +57,8 @@ func TestPartProducerConsumesEntireReaderWithoutError(t *testing.T) {
}
}
func TestPartProducerExitsWhenDoneChannelIsClosed(t *testing.T) {
fileChan := make(chan *os.File)
doneChan := make(chan struct{})
pp := s3PartProducer{
store: &S3Store{},
done: doneChan,
files: fileChan,
r: InfiniteZeroReader{},
}
func TestPartProducerExitsWhenProducerIsStopped(t *testing.T) {
pp, fileChan := newS3PartProducer(InfiniteZeroReader{}, 0, "", testSummary)
completedChan := make(chan struct{})
go func() {
@ -75,35 +66,7 @@ func TestPartProducerExitsWhenDoneChannelIsClosed(t *testing.T) {
completedChan <- struct{}{}
}()
close(doneChan)
select {
case <-completedChan:
// producer exited cleanly
case <-time.After(2 * time.Second):
t.Error("timed out waiting for producer to exit")
}
safelyDrainChannelOrFail(fileChan, t)
}
func TestPartProducerExitsWhenDoneChannelIsClosedBeforeAnyPartIsSent(t *testing.T) {
fileChan := make(chan *os.File)
doneChan := make(chan struct{})
pp := s3PartProducer{
store: &S3Store{},
done: doneChan,
files: fileChan,
r: InfiniteZeroReader{},
}
close(doneChan)
completedChan := make(chan struct{})
go func() {
pp.produce(10)
completedChan <- struct{}{}
}()
pp.stop()
select {
case <-completedChan:
@ -116,14 +79,7 @@ func TestPartProducerExitsWhenDoneChannelIsClosedBeforeAnyPartIsSent(t *testing.
}
func TestPartProducerExitsWhenUnableToReadFromFile(t *testing.T) {
fileChan := make(chan *os.File)
doneChan := make(chan struct{})
pp := s3PartProducer{
store: &S3Store{},
done: doneChan,
files: fileChan,
r: ErrorReader{},
}
pp, fileChan := newS3PartProducer(ErrorReader{}, 0, "", testSummary)
completedChan := make(chan struct{})
go func() {
@ -145,12 +101,12 @@ func TestPartProducerExitsWhenUnableToReadFromFile(t *testing.T) {
}
}
func safelyDrainChannelOrFail(c chan *os.File, t *testing.T) {
func safelyDrainChannelOrFail(c <-chan fileChunk, t *testing.T) {
// At this point, we've signaled that the producer should exit, but it may write a few files
// into the channel before closing it and exiting. Make sure that we get a nil value
// eventually.
for i := 0; i < 100; i++ {
if f := <-c; f == nil {
if _, more := <-c; !more {
return
}
}

View File

@ -171,6 +171,9 @@ func TestNewUploadWithMetadataObjectPrefix(t *testing.T) {
assert.NotNil(upload)
}
// This test ensures that an newly created upload without any chunks can be
// directly finished. There are no calls to ListPart or HeadObject because
// the upload is not fetched from S3 first.
func TestEmptyUpload(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@ -193,14 +196,6 @@ func TestEmptyUpload(t *testing.T) {
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":0,"SizeIsDeferred":false,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`)),
ContentLength: aws.Int64(int64(208)),
}),
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{},
}, nil),
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
@ -272,6 +267,17 @@ func TestGetInfoNotFound(t *testing.T) {
Key: aws.String("uploadId.info"),
}).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(nil, awserr.New("NoSuchUpload", "Not found", nil))
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(nil, awserr.New("NoSuchKey", "Not found", nil))
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
@ -287,47 +293,52 @@ func TestGetInfo(t *testing.T) {
s3obj := NewMockS3API(mockCtrl)
store := New("bucket", s3obj)
gomock.InOrder(
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
}).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"my/uploaded/files/uploadId","Type":"s3store"}}`))),
}, nil),
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
Size: aws.Int64(100),
},
{
Size: aws.Int64(200),
},
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
}).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"my/uploaded/files/uploadId","Type":"s3store"}}`))),
}, nil)
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
PartNumber: aws.Int64(1),
Size: aws.Int64(100),
ETag: aws.String("etag-1"),
},
NextPartNumberMarker: aws.Int64(2),
IsTruncated: aws.Bool(true),
}, nil),
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(2),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
Size: aws.Int64(100),
},
{
PartNumber: aws.Int64(2),
Size: aws.Int64(200),
ETag: aws.String("etag-2"),
},
}, nil),
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
)
},
NextPartNumberMarker: aws.Int64(2),
// Simulate a truncated response, so s3store should send a second request
IsTruncated: aws.Bool(true),
}, nil)
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(2),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
PartNumber: aws.Int64(3),
Size: aws.Int64(100),
ETag: aws.String("etag-3"),
},
},
}, nil)
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(nil, awserr.New("NoSuchKey", "Not found", nil))
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
@ -353,47 +364,52 @@ func TestGetInfoWithMetadataObjectPrefix(t *testing.T) {
store := New("bucket", s3obj)
store.MetadataObjectPrefix = "my/metadata"
gomock.InOrder(
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("my/metadata/uploadId.info"),
}).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"my/uploaded/files/uploadId","Type":"s3store"}}`))),
}, nil),
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
Size: aws.Int64(100),
},
{
Size: aws.Int64(200),
},
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("my/metadata/uploadId.info"),
}).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"my/uploaded/files/uploadId","Type":"s3store"}}`))),
}, nil)
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
PartNumber: aws.Int64(1),
Size: aws.Int64(100),
ETag: aws.String("etag-1"),
},
NextPartNumberMarker: aws.Int64(2),
IsTruncated: aws.Bool(true),
}, nil),
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(2),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
Size: aws.Int64(100),
},
{
PartNumber: aws.Int64(2),
Size: aws.Int64(200),
ETag: aws.String("etag-2"),
},
}, nil),
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("my/metadata/uploadId.part"),
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
)
},
NextPartNumberMarker: aws.Int64(2),
// Simulate a truncated response, so s3store should send a second request
IsTruncated: aws.Bool(true),
}, nil)
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(2),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
PartNumber: aws.Int64(3),
Size: aws.Int64(100),
ETag: aws.String("etag-3"),
},
},
}, nil)
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("my/metadata/uploadId.part"),
}).Return(nil, awserr.New("NoSuchKey", "Not found", nil))
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
@ -418,27 +434,24 @@ func TestGetInfoWithIncompletePart(t *testing.T) {
s3obj := NewMockS3API(mockCtrl)
store := New("bucket", s3obj)
gomock.InOrder(
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
}).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
}, nil),
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{
ContentLength: aws.Int64(10),
Body: ioutil.NopCloser(bytes.NewReader([]byte("0123456789"))),
}, nil),
)
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
}).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
}, nil)
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil)
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.HeadObjectOutput{
ContentLength: aws.Int64(10),
}, nil)
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
@ -457,20 +470,22 @@ func TestGetInfoFinished(t *testing.T) {
s3obj := NewMockS3API(mockCtrl)
store := New("bucket", s3obj)
gomock.InOrder(
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
}).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
}, nil),
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil)),
)
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
}).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
}, nil)
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil))
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(nil, awserr.New("NoSuchKey", "Not found", nil))
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
@ -561,7 +576,7 @@ func TestGetReaderNotFinished(t *testing.T) {
content, err := upload.GetReader(context.Background())
assert.Nil(content)
assert.Equal("cannot stream non-finished upload", err.Error())
assert.Equal("ERR_INCOMPLETE_UPLOAD: cannot stream non-finished upload", err.Error())
}
func TestDeclareLength(t *testing.T) {
@ -572,32 +587,30 @@ func TestDeclareLength(t *testing.T) {
s3obj := NewMockS3API(mockCtrl)
store := New("bucket", s3obj)
gomock.InOrder(
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
}).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":0,"SizeIsDeferred":true,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`))),
}, nil),
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{},
}, nil),
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(nil, awserr.New("NotFound", "Not Found", nil)),
s3obj.EXPECT().PutObjectWithContext(context.Background(), &s3.PutObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"SizeIsDeferred":false,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`)),
ContentLength: aws.Int64(int64(208)),
}),
)
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
}).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":0,"SizeIsDeferred":true,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`))),
}, nil)
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{},
}, nil)
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(nil, awserr.New("NotFound", "Not Found", nil))
s3obj.EXPECT().PutObjectWithContext(context.Background(), &s3.PutObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"SizeIsDeferred":false,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`)),
ContentLength: aws.Int64(int64(208)),
})
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
@ -617,64 +630,72 @@ func TestFinishUpload(t *testing.T) {
s3obj := NewMockS3API(mockCtrl)
store := New("bucket", s3obj)
gomock.InOrder(
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
}).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":400,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
}, nil)
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
Size: aws.Int64(100),
ETag: aws.String("etag-1"),
PartNumber: aws.Int64(1),
},
{
Size: aws.Int64(200),
ETag: aws.String("etag-2"),
PartNumber: aws.Int64(2),
},
},
NextPartNumberMarker: aws.Int64(2),
IsTruncated: aws.Bool(true),
}, nil)
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(2),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
Size: aws.Int64(100),
ETag: aws.String("etag-3"),
PartNumber: aws.Int64(3),
},
},
}, nil)
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(nil, awserr.New("NotFound", "Not Found", nil))
s3obj.EXPECT().CompleteMultipartUploadWithContext(context.Background(), &s3.CompleteMultipartUploadInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: []*s3.CompletedPart{
{
Size: aws.Int64(100),
ETag: aws.String("foo"),
ETag: aws.String("etag-1"),
PartNumber: aws.Int64(1),
},
{
Size: aws.Int64(200),
ETag: aws.String("bar"),
ETag: aws.String("etag-2"),
PartNumber: aws.Int64(2),
},
},
NextPartNumberMarker: aws.Int64(2),
IsTruncated: aws.Bool(true),
}, nil),
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(2),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
Size: aws.Int64(100),
ETag: aws.String("foobar"),
ETag: aws.String("etag-3"),
PartNumber: aws.Int64(3),
},
},
}, nil),
s3obj.EXPECT().CompleteMultipartUploadWithContext(context.Background(), &s3.CompleteMultipartUploadInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: []*s3.CompletedPart{
{
ETag: aws.String("foo"),
PartNumber: aws.Int64(1),
},
{
ETag: aws.String("bar"),
PartNumber: aws.Int64(2),
},
{
ETag: aws.String("foobar"),
PartNumber: aws.Int64(3),
},
},
},
}).Return(nil, nil),
)
},
}).Return(nil, nil)
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
@ -696,6 +717,7 @@ func TestWriteChunk(t *testing.T) {
store.MaxMultipartParts = 10000
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
// From GetInfo
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
@ -710,50 +732,58 @@ func TestWriteChunk(t *testing.T) {
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
Size: aws.Int64(100),
Size: aws.Int64(100),
ETag: aws.String("etag-1"),
PartNumber: aws.Int64(1),
},
{
Size: aws.Int64(200),
Size: aws.Int64(200),
ETag: aws.String("etag-2"),
PartNumber: aws.Int64(2),
},
},
}, nil).Times(2)
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
}, nil)
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil))
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
}).Return(nil, awserr.New("NoSuchKey", "Not found", nil))
gomock.InOrder(
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumber: aws.Int64(3),
Body: bytes.NewReader([]byte("1234")),
})).Return(nil, nil),
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumber: aws.Int64(4),
Body: bytes.NewReader([]byte("5678")),
})).Return(nil, nil),
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumber: aws.Int64(5),
Body: bytes.NewReader([]byte("90AB")),
})).Return(nil, nil),
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
Body: bytes.NewReader([]byte("CD")),
})).Return(nil, nil),
)
// From WriteChunk
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumber: aws.Int64(3),
Body: bytes.NewReader([]byte("1234")),
ContentLength: aws.Int64(4),
})).Return(&s3.UploadPartOutput{
ETag: aws.String("etag-3"),
}, nil)
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumber: aws.Int64(4),
Body: bytes.NewReader([]byte("5678")),
ContentLength: aws.Int64(4),
})).Return(&s3.UploadPartOutput{
ETag: aws.String("etag-4"),
}, nil)
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumber: aws.Int64(5),
Body: bytes.NewReader([]byte("90AB")),
ContentLength: aws.Int64(4),
})).Return(&s3.UploadPartOutput{
ETag: aws.String("etag-5"),
}, nil)
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
Body: bytes.NewReader([]byte("CD")),
})).Return(nil, nil)
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
@ -785,29 +815,27 @@ func TestWriteChunkWriteIncompletePartBecauseTooSmall(t *testing.T) {
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
Size: aws.Int64(100),
Size: aws.Int64(100),
ETag: aws.String("etag-1"),
PartNumber: aws.Int64(1),
},
{
Size: aws.Int64(200),
Size: aws.Int64(200),
ETag: aws.String("etag-2"),
PartNumber: aws.Int64(2),
},
},
}, nil).Times(2)
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
}, nil)
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist", nil))
}).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist", nil))
gomock.InOrder(
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
Body: bytes.NewReader([]byte("1234567890")),
})).Return(nil, nil),
)
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
Body: bytes.NewReader([]byte("1234567890")),
})).Return(nil, nil)
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
@ -836,12 +864,19 @@ func TestWriteChunkPrependsIncompletePart(t *testing.T) {
}).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":5,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
}, nil)
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{},
}, nil)
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{
}).Return(&s3.HeadObjectOutput{
ContentLength: aws.Int64(3),
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
}, nil)
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
@ -854,29 +889,27 @@ func TestWriteChunkPrependsIncompletePart(t *testing.T) {
Bucket: aws.String(store.Bucket),
Key: aws.String("uploadId.part"),
}).Return(&s3.DeleteObjectOutput{}, nil)
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil).Times(2)
gomock.InOrder(
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumber: aws.Int64(1),
Body: bytes.NewReader([]byte("1234")),
})).Return(nil, nil),
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumber: aws.Int64(2),
Body: bytes.NewReader([]byte("5")),
})).Return(nil, nil),
)
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumber: aws.Int64(1),
Body: bytes.NewReader([]byte("1234")),
ContentLength: aws.Int64(4),
})).Return(&s3.UploadPartOutput{
ETag: aws.String("etag-1"),
}, nil)
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumber: aws.Int64(2),
Body: bytes.NewReader([]byte("5")),
ContentLength: aws.Int64(1),
})).Return(&s3.UploadPartOutput{
ETag: aws.String("etag-2"),
}, nil)
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
@ -910,33 +943,40 @@ func TestWriteChunkPrependsIncompletePartAndWritesANewIncompletePart(t *testing.
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil).Times(2)
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil)
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.HeadObjectOutput{
ContentLength: aws.Int64(3),
}, nil)
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{
ContentLength: aws.Int64(3),
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
}, nil).Times(2)
}, nil)
s3obj.EXPECT().DeleteObjectWithContext(context.Background(), &s3.DeleteObjectInput{
Bucket: aws.String(store.Bucket),
Key: aws.String("uploadId.part"),
}).Return(&s3.DeleteObjectOutput{}, nil)
gomock.InOrder(
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumber: aws.Int64(1),
Body: bytes.NewReader([]byte("1234")),
})).Return(nil, nil),
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
Body: bytes.NewReader([]byte("5")),
})).Return(nil, nil),
)
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumber: aws.Int64(1),
Body: bytes.NewReader([]byte("1234")),
ContentLength: aws.Int64(4),
})).Return(&s3.UploadPartOutput{
ETag: aws.String("etag-1"),
}, nil)
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
Body: bytes.NewReader([]byte("5")),
})).Return(nil, nil)
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
@ -969,28 +1009,31 @@ func TestWriteChunkAllowTooSmallLast(t *testing.T) {
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
Size: aws.Int64(400),
PartNumber: aws.Int64(1),
Size: aws.Int64(400),
ETag: aws.String("etag-1"),
},
{
Size: aws.Int64(90),
PartNumber: aws.Int64(2),
Size: aws.Int64(90),
ETag: aws.String("etag-2"),
},
},
}, nil).Times(2)
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
}, nil)
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{}, awserr.New("AccessDenied", "Access Denied.", nil))
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
}).Return(nil, awserr.New("AccessDenied", "Access Denied.", nil))
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumber: aws.Int64(3),
Body: bytes.NewReader([]byte("1234567890")),
})).Return(nil, nil)
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumber: aws.Int64(3),
Body: bytes.NewReader([]byte("1234567890")),
ContentLength: aws.Int64(10),
})).Return(&s3.UploadPartOutput{
ETag: aws.String("etag-3"),
}, nil)
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
@ -1101,13 +1144,33 @@ func TestConcatUploadsUsingMultipart(t *testing.T) {
store := New("bucket", s3obj)
store.MinPartSize = 100
// Calls from NewUpload
s3obj.EXPECT().CreateMultipartUploadWithContext(context.Background(), &s3.CreateMultipartUploadInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
Metadata: map[string]*string{},
}).Return(&s3.CreateMultipartUploadOutput{
UploadId: aws.String("multipartId"),
}, nil)
s3obj.EXPECT().PutObjectWithContext(context.Background(), &s3.PutObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":0,"SizeIsDeferred":false,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":true,"PartialUploads":["aaa+AAA","bbb+BBB","ccc+CCC"],"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`)),
ContentLength: aws.Int64(int64(234)),
})
// Calls from ConcatUploads
s3obj.EXPECT().UploadPartCopyWithContext(context.Background(), &s3.UploadPartCopyInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
CopySource: aws.String("bucket/aaa"),
PartNumber: aws.Int64(1),
}).Return(nil, nil)
}).Return(&s3.UploadPartCopyOutput{
CopyPartResult: &s3.CopyPartResult{
ETag: aws.String("etag-1"),
},
}, nil)
s3obj.EXPECT().UploadPartCopyWithContext(context.Background(), &s3.UploadPartCopyInput{
Bucket: aws.String("bucket"),
@ -1115,7 +1178,11 @@ func TestConcatUploadsUsingMultipart(t *testing.T) {
UploadId: aws.String("multipartId"),
CopySource: aws.String("bucket/bbb"),
PartNumber: aws.Int64(2),
}).Return(nil, nil)
}).Return(&s3.UploadPartCopyOutput{
CopyPartResult: &s3.CopyPartResult{
ETag: aws.String("etag-2"),
},
}, nil)
s3obj.EXPECT().UploadPartCopyWithContext(context.Background(), &s3.UploadPartCopyInput{
Bucket: aws.String("bucket"),
@ -1123,55 +1190,45 @@ func TestConcatUploadsUsingMultipart(t *testing.T) {
UploadId: aws.String("multipartId"),
CopySource: aws.String("bucket/ccc"),
PartNumber: aws.Int64(3),
}).Return(nil, nil)
}).Return(&s3.UploadPartCopyOutput{
CopyPartResult: &s3.CopyPartResult{
ETag: aws.String("etag-3"),
},
}, nil)
// Output from s3Store.FinishUpload
gomock.InOrder(
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
// Calls from FinishUpload
s3obj.EXPECT().CompleteMultipartUploadWithContext(context.Background(), &s3.CompleteMultipartUploadInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: []*s3.CompletedPart{
{
ETag: aws.String("foo"),
ETag: aws.String("etag-1"),
PartNumber: aws.Int64(1),
},
{
ETag: aws.String("bar"),
ETag: aws.String("etag-2"),
PartNumber: aws.Int64(2),
},
{
ETag: aws.String("baz"),
ETag: aws.String("etag-3"),
PartNumber: aws.Int64(3),
},
},
}, nil),
s3obj.EXPECT().CompleteMultipartUploadWithContext(context.Background(), &s3.CompleteMultipartUploadInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: []*s3.CompletedPart{
{
ETag: aws.String("foo"),
PartNumber: aws.Int64(1),
},
{
ETag: aws.String("bar"),
PartNumber: aws.Int64(2),
},
{
ETag: aws.String("baz"),
PartNumber: aws.Int64(3),
},
},
},
}).Return(nil, nil),
)
},
}).Return(nil, nil)
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
info := handler.FileInfo{
ID: "uploadId",
IsFinal: true,
PartialUploads: []string{
"aaa+AAA",
"bbb+BBB",
"ccc+CCC",
},
}
upload, err := store.NewUpload(context.Background(), info)
assert.Nil(err)
uploadA, err := store.GetUpload(context.Background(), "aaa+AAA")
@ -1269,12 +1326,13 @@ type s3APIWithTempFileAssertion struct {
func (s s3APIWithTempFileAssertion) UploadPartWithContext(context.Context, *s3.UploadPartInput, ...request.Option) (*s3.UploadPartOutput, error) {
assert := s.assert
// Make sure that only the two temporary files from tusd are in here.
// Make sure that there are temporary files from tusd in here.
files, err := ioutil.ReadDir(s.tempDir)
assert.Nil(err)
for _, file := range files {
assert.True(strings.HasPrefix(file.Name(), "tusd-s3-tmp-"))
}
assert.GreaterOrEqual(len(files), 1)
assert.LessOrEqual(len(files), 3)
@ -1317,7 +1375,7 @@ func TestWriteChunkCleansUpTempFiles(t *testing.T) {
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
}).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":14,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
}, nil)
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
Bucket: aws.String("bucket"),
@ -1325,30 +1383,19 @@ func TestWriteChunkCleansUpTempFiles(t *testing.T) {
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
Size: aws.Int64(100),
},
{
Size: aws.Int64(200),
},
},
}, nil).Times(2)
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Parts: []*s3.Part{},
}, nil)
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil))
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
}).Return(nil, awserr.New("NoSuchKey", "Not found", nil))
// No calls to s3obj.EXPECT().UploadPartWithContext since that is handled by s3APIWithTempFileAssertion
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
bytesRead, err := upload.WriteChunk(context.Background(), 300, bytes.NewReader([]byte("1234567890ABCD")))
bytesRead, err := upload.WriteChunk(context.Background(), 0, bytes.NewReader([]byte("1234567890ABCD")))
assert.NotNil(err)
assert.Equal(err.Error(), "not now")
assert.Equal(int64(0), bytesRead)

View File

@ -15,7 +15,7 @@ function compile {
local dir="tusd_${os}_${arch}"
rm -rf "$dir"
mkdir -p "$dir"
GOOS=$os GOARCH=$arch go build \
GOOS=$os GOARCH=$arch CGO_ENABLED=0 go build \
-trimpath \
-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=${version} -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=${commit} -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'" \
-o "$dir/tusd$ext" ./cmd/tusd/main.go