Compare commits
58 Commits
Author | SHA1 | Date |
---|---|---|
Marius Kleidl | 59e4ecd927 | |
Marius Kleidl | 689740bac2 | |
Marius | 71765b61e4 | |
Marius Kleidl | bbf9e6011d | |
Marius | 3f042b97a0 | |
Marius Kleidl | 79f848e390 | |
Marius Kleidl | 899b4f9d98 | |
Marius Kleidl | 633a088870 | |
Marius Kleidl | 7b9030c33a | |
Marius Kleidl | b10c1876b1 | |
Marius Kleidl | d5eca08944 | |
Marius | a75c24996f | |
Marius | 158b242fbe | |
Marius | 621de70da4 | |
Marius | edf8238af9 | |
Marius | aff352c413 | |
Marius | 70ba0f5540 | |
Marius | f0faa8e556 | |
jonaskaltenbachz | 707f41be2b | |
Marius | 920deb3df7 | |
Marius | 870c434485 | |
Marius | 5d8c2beed3 | |
Marius | eec6a14d4a | |
Marius | be6f50f14f | |
Marius | cab456900a | |
Marius | f680b9f1ff | |
Marius | efe8c9ce05 | |
Marius | 1038298a79 | |
Marius | 1e69d9ba68 | |
Marius | 9ef0b54c7c | |
Marius | c0f2026e96 | |
Marius | aace4704d7 | |
Marius | 9508fd3910 | |
Marius | afc9f10704 | |
Marius | f8e3337948 | |
Marius | 7eae867ec1 | |
Marius | e77cc64063 | |
Marius | 211feb9ab9 | |
Marius | e52139f977 | |
Marius | 04e786e81a | |
Marius | 12c10bf62f | |
Marius | a05c090d05 | |
Marius | aca18332d1 | |
Marius | b2273d4153 | |
Marius | c1eddef26a | |
Marius | 92d704f43f | |
Marius | bc51cb05c0 | |
Marius | 387b04a2e2 | |
Marius | 675e767ee6 | |
Marius | 934265dd38 | |
Marius | ccdfe8e604 | |
Marius | 36f12b1d18 | |
Marius | 946539c3b9 | |
Marius | f4314dd360 | |
Marius | 0f24a80ea5 | |
Marius | ce54ff8b1f | |
Marius | 578731ab0b | |
Marius | 8fd18364e7 |
|
@ -5,4 +5,6 @@ node_modules/
|
||||||
.DS_Store
|
.DS_Store
|
||||||
./tusd
|
./tusd
|
||||||
tusd_*_*
|
tusd_*_*
|
||||||
|
__pycache__/
|
||||||
|
examples/hooks/plugin/hook_handler
|
||||||
.idea/
|
.idea/
|
||||||
|
|
|
@ -6,17 +6,21 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/tus/tusd/pkg/azurestore"
|
"github.com/tus/tusd/v2/pkg/azurestore"
|
||||||
"github.com/tus/tusd/pkg/filelocker"
|
"github.com/tus/tusd/v2/pkg/filestore"
|
||||||
"github.com/tus/tusd/pkg/filestore"
|
"github.com/tus/tusd/v2/pkg/gcsstore"
|
||||||
"github.com/tus/tusd/pkg/gcsstore"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/memorylocker"
|
||||||
"github.com/tus/tusd/pkg/memorylocker"
|
"github.com/tus/tusd/v2/pkg/s3store"
|
||||||
"github.com/tus/tusd/pkg/s3store"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/v7"
|
||||||
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var Composer *handler.StoreComposer
|
var Composer *handler.StoreComposer
|
||||||
|
@ -26,47 +30,76 @@ func CreateComposer() {
|
||||||
// If not, we default to storing them locally on disk.
|
// If not, we default to storing them locally on disk.
|
||||||
Composer = handler.NewStoreComposer()
|
Composer = handler.NewStoreComposer()
|
||||||
if Flags.S3Bucket != "" {
|
if Flags.S3Bucket != "" {
|
||||||
s3Config := aws.NewConfig()
|
var s3Api s3store.S3API
|
||||||
|
|
||||||
if Flags.S3TransferAcceleration {
|
if !Flags.S3UseMinioSDK {
|
||||||
s3Config = s3Config.WithS3UseAccelerate(true)
|
s3Config := aws.NewConfig()
|
||||||
}
|
|
||||||
|
|
||||||
if Flags.S3DisableContentHashes {
|
|
||||||
// Prevent the S3 service client from automatically
|
|
||||||
// adding the Content-MD5 header to S3 Object Put and Upload API calls.
|
|
||||||
s3Config = s3Config.WithS3DisableContentMD5Validation(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
if Flags.S3DisableSSL {
|
|
||||||
// Disable HTTPS and only use HTTP (helpful for debugging requests).
|
|
||||||
s3Config = s3Config.WithDisableSSL(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
if Flags.S3Endpoint == "" {
|
|
||||||
|
|
||||||
if Flags.S3TransferAcceleration {
|
if Flags.S3TransferAcceleration {
|
||||||
stdout.Printf("Using 's3://%s' as S3 bucket for storage with AWS S3 Transfer Acceleration enabled.\n", Flags.S3Bucket)
|
s3Config = s3Config.WithS3UseAccelerate(true)
|
||||||
} else {
|
|
||||||
stdout.Printf("Using 's3://%s' as S3 bucket for storage.\n", Flags.S3Bucket)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
if Flags.S3DisableContentHashes {
|
||||||
stdout.Printf("Using '%s/%s' as S3 endpoint and bucket for storage.\n", Flags.S3Endpoint, Flags.S3Bucket)
|
// Prevent the S3 service client from automatically
|
||||||
|
// adding the Content-MD5 header to S3 Object Put and Upload API calls.
|
||||||
|
//
|
||||||
|
// Note: For now, we do not set S3DisableContentMD5Validation because when terminating an upload,
|
||||||
|
// a signature is required. If not present, S3 will complain:
|
||||||
|
// InvalidRequest: Missing required header for this request: Content-MD5 OR x-amz-checksum-*
|
||||||
|
// So for now, this flag will only cause hashes to be disabled for the UploadPart operation (see s3store.go).
|
||||||
|
//s3Config = s3Config.WithS3DisableContentMD5Validation(true)
|
||||||
|
}
|
||||||
|
|
||||||
s3Config = s3Config.WithEndpoint(Flags.S3Endpoint).WithS3ForcePathStyle(true)
|
if Flags.S3DisableSSL {
|
||||||
|
// Disable HTTPS and only use HTTP (helpful for debugging requests).
|
||||||
|
s3Config = s3Config.WithDisableSSL(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
if Flags.S3Endpoint == "" {
|
||||||
|
if Flags.S3TransferAcceleration {
|
||||||
|
stdout.Printf("Using 's3://%s' as S3 bucket for storage with AWS S3 Transfer Acceleration enabled.\n", Flags.S3Bucket)
|
||||||
|
} else {
|
||||||
|
stdout.Printf("Using 's3://%s' as S3 bucket for storage.\n", Flags.S3Bucket)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
stdout.Printf("Using '%s/%s' as S3 endpoint and bucket for storage.\n", Flags.S3Endpoint, Flags.S3Bucket)
|
||||||
|
|
||||||
|
s3Config = s3Config.WithEndpoint(Flags.S3Endpoint).WithS3ForcePathStyle(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Derive credentials from default credential chain (env, shared, ec2 instance role)
|
||||||
|
// as per https://github.com/aws/aws-sdk-go#configuring-credentials
|
||||||
|
s3Api = s3.New(session.Must(session.NewSession()), s3Config)
|
||||||
|
} else {
|
||||||
|
core, err := minio.NewCore(Flags.S3Endpoint, &minio.Options{
|
||||||
|
Creds: credentials.NewEnvAWS(),
|
||||||
|
Secure: !Flags.S3DisableSSL,
|
||||||
|
Region: os.Getenv("AWS_REGION"),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
stderr.Fatalf("Unable to create Minio SDK: %s\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Flags.S3TransferAcceleration
|
||||||
|
// TODO: Flags.S3DisableContentHashes
|
||||||
|
|
||||||
|
s3Api = s3store.NewMinioS3API(core)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Derive credentials from default credential chain (env, shared, ec2 instance role)
|
store := s3store.New(Flags.S3Bucket, s3Api)
|
||||||
// as per https://github.com/aws/aws-sdk-go#configuring-credentials
|
|
||||||
store := s3store.New(Flags.S3Bucket, s3.New(session.Must(session.NewSession()), s3Config))
|
|
||||||
store.ObjectPrefix = Flags.S3ObjectPrefix
|
store.ObjectPrefix = Flags.S3ObjectPrefix
|
||||||
store.PreferredPartSize = Flags.S3PartSize
|
store.PreferredPartSize = Flags.S3PartSize
|
||||||
|
store.MaxBufferedParts = Flags.S3MaxBufferedParts
|
||||||
store.DisableContentHashes = Flags.S3DisableContentHashes
|
store.DisableContentHashes = Flags.S3DisableContentHashes
|
||||||
|
store.SetConcurrentPartUploads(Flags.S3ConcurrentPartUploads)
|
||||||
store.UseIn(Composer)
|
store.UseIn(Composer)
|
||||||
|
|
||||||
locker := memorylocker.New()
|
locker := memorylocker.New()
|
||||||
locker.UseIn(Composer)
|
locker.UseIn(Composer)
|
||||||
|
|
||||||
|
// Attach the metrics from S3 store to the global Prometheus registry
|
||||||
|
// TODO: Do not use the global registry here.
|
||||||
|
store.RegisterMetrics(prometheus.DefaultRegisterer)
|
||||||
} else if Flags.GCSBucket != "" {
|
} else if Flags.GCSBucket != "" {
|
||||||
if Flags.GCSObjectPrefix != "" && strings.Contains(Flags.GCSObjectPrefix, "_") {
|
if Flags.GCSObjectPrefix != "" && strings.Contains(Flags.GCSObjectPrefix, "_") {
|
||||||
stderr.Fatalf("gcs-object-prefix value (%s) can't contain underscore. "+
|
stderr.Fatalf("gcs-object-prefix value (%s) can't contain underscore. "+
|
||||||
|
@ -151,7 +184,9 @@ func CreateComposer() {
|
||||||
store := filestore.New(dir)
|
store := filestore.New(dir)
|
||||||
store.UseIn(Composer)
|
store.UseIn(Composer)
|
||||||
|
|
||||||
locker := filelocker.New(dir)
|
// TODO: Do not use filelocker here, because it does not implement the lock
|
||||||
|
// release mechanism yet.
|
||||||
|
locker := memorylocker.New()
|
||||||
locker.UseIn(Composer)
|
locker.UseIn(Composer)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,15 +2,10 @@ package cli
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime/pprof"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/tus/tusd/cmd/tusd/cli/hooks"
|
"github.com/tus/tusd/v2/cmd/tusd/cli/hooks"
|
||||||
)
|
)
|
||||||
|
|
||||||
var Flags struct {
|
var Flags struct {
|
||||||
|
@ -29,8 +24,11 @@ var Flags struct {
|
||||||
S3ObjectPrefix string
|
S3ObjectPrefix string
|
||||||
S3Endpoint string
|
S3Endpoint string
|
||||||
S3PartSize int64
|
S3PartSize int64
|
||||||
|
S3MaxBufferedParts int64
|
||||||
S3DisableContentHashes bool
|
S3DisableContentHashes bool
|
||||||
S3DisableSSL bool
|
S3DisableSSL bool
|
||||||
|
S3ConcurrentPartUploads int
|
||||||
|
S3UseMinioSDK bool
|
||||||
GCSBucket string
|
GCSBucket string
|
||||||
GCSObjectPrefix string
|
GCSObjectPrefix string
|
||||||
AzStorage string
|
AzStorage string
|
||||||
|
@ -39,6 +37,7 @@ var Flags struct {
|
||||||
AzObjectPrefix string
|
AzObjectPrefix string
|
||||||
AzEndpoint string
|
AzEndpoint string
|
||||||
EnabledHooksString string
|
EnabledHooksString string
|
||||||
|
PluginHookPath string
|
||||||
FileHooksDir string
|
FileHooksDir string
|
||||||
HttpHooksEndpoint string
|
HttpHooksEndpoint string
|
||||||
HttpHooksForwardHeaders string
|
HttpHooksForwardHeaders string
|
||||||
|
@ -47,20 +46,22 @@ var Flags struct {
|
||||||
GrpcHooksEndpoint string
|
GrpcHooksEndpoint string
|
||||||
GrpcHooksRetry int
|
GrpcHooksRetry int
|
||||||
GrpcHooksBackoff int
|
GrpcHooksBackoff int
|
||||||
HooksStopUploadCode int
|
|
||||||
PluginHookPath string
|
|
||||||
EnabledHooks []hooks.HookType
|
EnabledHooks []hooks.HookType
|
||||||
|
ProgressHooksInterval int64
|
||||||
ShowVersion bool
|
ShowVersion bool
|
||||||
ExposeMetrics bool
|
ExposeMetrics bool
|
||||||
MetricsPath string
|
MetricsPath string
|
||||||
|
ExposePprof bool
|
||||||
|
PprofPath string
|
||||||
|
PprofBlockProfileRate int
|
||||||
|
PprofMutexProfileRate int
|
||||||
BehindProxy bool
|
BehindProxy bool
|
||||||
VerboseOutput bool
|
VerboseOutput bool
|
||||||
S3TransferAcceleration bool
|
S3TransferAcceleration bool
|
||||||
TLSCertFile string
|
TLSCertFile string
|
||||||
TLSKeyFile string
|
TLSKeyFile string
|
||||||
TLSMode string
|
TLSMode string
|
||||||
|
ShutdownTimeout int64
|
||||||
CPUProfile string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseFlags() {
|
func ParseFlags() {
|
||||||
|
@ -79,8 +80,11 @@ func ParseFlags() {
|
||||||
flag.StringVar(&Flags.S3ObjectPrefix, "s3-object-prefix", "", "Prefix for S3 object names")
|
flag.StringVar(&Flags.S3ObjectPrefix, "s3-object-prefix", "", "Prefix for S3 object names")
|
||||||
flag.StringVar(&Flags.S3Endpoint, "s3-endpoint", "", "Endpoint to use S3 compatible implementations like minio (requires s3-bucket to be pass)")
|
flag.StringVar(&Flags.S3Endpoint, "s3-endpoint", "", "Endpoint to use S3 compatible implementations like minio (requires s3-bucket to be pass)")
|
||||||
flag.Int64Var(&Flags.S3PartSize, "s3-part-size", 50*1024*1024, "Size in bytes of the individual upload requests made to the S3 API. Defaults to 50MiB (experimental and may be removed in the future)")
|
flag.Int64Var(&Flags.S3PartSize, "s3-part-size", 50*1024*1024, "Size in bytes of the individual upload requests made to the S3 API. Defaults to 50MiB (experimental and may be removed in the future)")
|
||||||
|
flag.Int64Var(&Flags.S3MaxBufferedParts, "s3-max-buffered-parts", 20, "Size in bytes of the individual upload requests made to the S3 API. Defaults to 50MiB (experimental and may be removed in the future)")
|
||||||
flag.BoolVar(&Flags.S3DisableContentHashes, "s3-disable-content-hashes", false, "Disable the calculation of MD5 and SHA256 hashes for the content that gets uploaded to S3 for minimized CPU usage (experimental and may be removed in the future)")
|
flag.BoolVar(&Flags.S3DisableContentHashes, "s3-disable-content-hashes", false, "Disable the calculation of MD5 and SHA256 hashes for the content that gets uploaded to S3 for minimized CPU usage (experimental and may be removed in the future)")
|
||||||
flag.BoolVar(&Flags.S3DisableSSL, "s3-disable-ssl", false, "Disable SSL and only use HTTP for communication with S3 (experimental and may be removed in the future)")
|
flag.BoolVar(&Flags.S3DisableSSL, "s3-disable-ssl", false, "Disable SSL and only use HTTP for communication with S3 (experimental and may be removed in the future)")
|
||||||
|
flag.IntVar(&Flags.S3ConcurrentPartUploads, "s3-concurrent-part-uploads", 10, "Number of concurrent part uploads to S3 (experimental and may be removed in the future)")
|
||||||
|
flag.BoolVar(&Flags.S3UseMinioSDK, "s3-use-minio-sdk", false, "Use the Minio SDK interally (experimental)")
|
||||||
flag.StringVar(&Flags.GCSBucket, "gcs-bucket", "", "Use Google Cloud Storage with this bucket as storage backend (requires the GCS_SERVICE_ACCOUNT_FILE environment variable to be set)")
|
flag.StringVar(&Flags.GCSBucket, "gcs-bucket", "", "Use Google Cloud Storage with this bucket as storage backend (requires the GCS_SERVICE_ACCOUNT_FILE environment variable to be set)")
|
||||||
flag.StringVar(&Flags.GCSObjectPrefix, "gcs-object-prefix", "", "Prefix for GCS object names")
|
flag.StringVar(&Flags.GCSObjectPrefix, "gcs-object-prefix", "", "Prefix for GCS object names")
|
||||||
flag.StringVar(&Flags.AzStorage, "azure-storage", "", "Use Azure BlockBlob Storage with this container name as a storage backend (requires the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_KEY environment variable to be set)")
|
flag.StringVar(&Flags.AzStorage, "azure-storage", "", "Use Azure BlockBlob Storage with this container name as a storage backend (requires the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_KEY environment variable to be set)")
|
||||||
|
@ -89,6 +93,8 @@ func ParseFlags() {
|
||||||
flag.StringVar(&Flags.AzObjectPrefix, "azure-object-prefix", "", "Prefix for Azure object names")
|
flag.StringVar(&Flags.AzObjectPrefix, "azure-object-prefix", "", "Prefix for Azure object names")
|
||||||
flag.StringVar(&Flags.AzEndpoint, "azure-endpoint", "", "Custom Endpoint to use for Azure BlockBlob Storage (requires azure-storage to be pass)")
|
flag.StringVar(&Flags.AzEndpoint, "azure-endpoint", "", "Custom Endpoint to use for Azure BlockBlob Storage (requires azure-storage to be pass)")
|
||||||
flag.StringVar(&Flags.EnabledHooksString, "hooks-enabled-events", "pre-create,post-create,post-receive,post-terminate,post-finish", "Comma separated list of enabled hook events (e.g. post-create,post-finish). Leave empty to enable default events")
|
flag.StringVar(&Flags.EnabledHooksString, "hooks-enabled-events", "pre-create,post-create,post-receive,post-terminate,post-finish", "Comma separated list of enabled hook events (e.g. post-create,post-finish). Leave empty to enable default events")
|
||||||
|
flag.Int64Var(&Flags.ProgressHooksInterval, "progress-hooks-interval", 1000, "Interval in milliseconds at which the post-receive progress hooks are emitted for each active upload")
|
||||||
|
flag.StringVar(&Flags.PluginHookPath, "hooks-plugin", "", "Path to a Go plugin for loading hook functions")
|
||||||
flag.StringVar(&Flags.FileHooksDir, "hooks-dir", "", "Directory to search for available hooks scripts")
|
flag.StringVar(&Flags.FileHooksDir, "hooks-dir", "", "Directory to search for available hooks scripts")
|
||||||
flag.StringVar(&Flags.HttpHooksEndpoint, "hooks-http", "", "An HTTP endpoint to which hook events will be sent to")
|
flag.StringVar(&Flags.HttpHooksEndpoint, "hooks-http", "", "An HTTP endpoint to which hook events will be sent to")
|
||||||
flag.StringVar(&Flags.HttpHooksForwardHeaders, "hooks-http-forward-headers", "", "List of HTTP request headers to be forwarded from the client request to the hook endpoint")
|
flag.StringVar(&Flags.HttpHooksForwardHeaders, "hooks-http-forward-headers", "", "List of HTTP request headers to be forwarded from the client request to the hook endpoint")
|
||||||
|
@ -97,18 +103,20 @@ func ParseFlags() {
|
||||||
flag.StringVar(&Flags.GrpcHooksEndpoint, "hooks-grpc", "", "An gRPC endpoint to which hook events will be sent to")
|
flag.StringVar(&Flags.GrpcHooksEndpoint, "hooks-grpc", "", "An gRPC endpoint to which hook events will be sent to")
|
||||||
flag.IntVar(&Flags.GrpcHooksRetry, "hooks-grpc-retry", 3, "Number of times to retry on a server error or network timeout")
|
flag.IntVar(&Flags.GrpcHooksRetry, "hooks-grpc-retry", 3, "Number of times to retry on a server error or network timeout")
|
||||||
flag.IntVar(&Flags.GrpcHooksBackoff, "hooks-grpc-backoff", 1, "Number of seconds to wait before retrying each retry")
|
flag.IntVar(&Flags.GrpcHooksBackoff, "hooks-grpc-backoff", 1, "Number of seconds to wait before retrying each retry")
|
||||||
flag.IntVar(&Flags.HooksStopUploadCode, "hooks-stop-code", 0, "Return code from post-receive hook which causes tusd to stop and delete the current upload. A zero value means that no uploads will be stopped")
|
|
||||||
flag.StringVar(&Flags.PluginHookPath, "hooks-plugin", "", "Path to a Go plugin for loading hook functions (only supported on Linux and macOS; highly EXPERIMENTAL and may BREAK in the future)")
|
|
||||||
flag.BoolVar(&Flags.ShowVersion, "version", false, "Print tusd version information")
|
flag.BoolVar(&Flags.ShowVersion, "version", false, "Print tusd version information")
|
||||||
flag.BoolVar(&Flags.ExposeMetrics, "expose-metrics", true, "Expose metrics about tusd usage")
|
flag.BoolVar(&Flags.ExposeMetrics, "expose-metrics", true, "Expose metrics about tusd usage")
|
||||||
flag.StringVar(&Flags.MetricsPath, "metrics-path", "/metrics", "Path under which the metrics endpoint will be accessible")
|
flag.StringVar(&Flags.MetricsPath, "metrics-path", "/metrics", "Path under which the metrics endpoint will be accessible")
|
||||||
|
flag.BoolVar(&Flags.ExposePprof, "expose-pprof", false, "Expose the pprof interface over HTTP for profiling tusd")
|
||||||
|
flag.StringVar(&Flags.PprofPath, "pprof-path", "/debug/pprof/", "Path under which the pprof endpoint will be accessible")
|
||||||
|
flag.IntVar(&Flags.PprofBlockProfileRate, "pprof-block-profile-rate", 0, "Fraction of goroutine blocking events that are reported in the blocking profile")
|
||||||
|
flag.IntVar(&Flags.PprofMutexProfileRate, "pprof-mutex-profile-rate", 0, "Fraction of mutex contention events that are reported in the mutex profile")
|
||||||
flag.BoolVar(&Flags.BehindProxy, "behind-proxy", false, "Respect X-Forwarded-* and similar headers which may be set by proxies")
|
flag.BoolVar(&Flags.BehindProxy, "behind-proxy", false, "Respect X-Forwarded-* and similar headers which may be set by proxies")
|
||||||
flag.BoolVar(&Flags.VerboseOutput, "verbose", true, "Enable verbose logging output")
|
flag.BoolVar(&Flags.VerboseOutput, "verbose", true, "Enable verbose logging output")
|
||||||
flag.BoolVar(&Flags.S3TransferAcceleration, "s3-transfer-acceleration", false, "Use AWS S3 transfer acceleration endpoint (requires -s3-bucket option and Transfer Acceleration property on S3 bucket to be set)")
|
flag.BoolVar(&Flags.S3TransferAcceleration, "s3-transfer-acceleration", false, "Use AWS S3 transfer acceleration endpoint (requires -s3-bucket option and Transfer Acceleration property on S3 bucket to be set)")
|
||||||
flag.StringVar(&Flags.TLSCertFile, "tls-certificate", "", "Path to the file containing the x509 TLS certificate to be used. The file should also contain any intermediate certificates and the CA certificate.")
|
flag.StringVar(&Flags.TLSCertFile, "tls-certificate", "", "Path to the file containing the x509 TLS certificate to be used. The file should also contain any intermediate certificates and the CA certificate.")
|
||||||
flag.StringVar(&Flags.TLSKeyFile, "tls-key", "", "Path to the file containing the key for the TLS certificate.")
|
flag.StringVar(&Flags.TLSKeyFile, "tls-key", "", "Path to the file containing the key for the TLS certificate.")
|
||||||
flag.StringVar(&Flags.TLSMode, "tls-mode", "tls12", "Specify which TLS mode to use; valid modes are tls13, tls12, and tls12-strong.")
|
flag.StringVar(&Flags.TLSMode, "tls-mode", "tls12", "Specify which TLS mode to use; valid modes are tls13, tls12, and tls12-strong.")
|
||||||
flag.StringVar(&Flags.CPUProfile, "cpuprofile", "", "write cpu profile to file")
|
flag.Int64Var(&Flags.ShutdownTimeout, "shutdown-timeout", 10*1000, "Timeout in milliseconds for closing connections gracefully during shutdown. After the timeout, tusd will exit regardless of any open connection.")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
SetEnabledHooks()
|
SetEnabledHooks()
|
||||||
|
@ -116,20 +124,6 @@ func ParseFlags() {
|
||||||
if Flags.FileHooksDir != "" {
|
if Flags.FileHooksDir != "" {
|
||||||
Flags.FileHooksDir, _ = filepath.Abs(Flags.FileHooksDir)
|
Flags.FileHooksDir, _ = filepath.Abs(Flags.FileHooksDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
if Flags.CPUProfile != "" {
|
|
||||||
f, err := os.Create(Flags.CPUProfile)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
pprof.StartCPUProfile(f)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
<-time.After(20 * time.Second)
|
|
||||||
pprof.StopCPUProfile()
|
|
||||||
fmt.Println("Stopped CPU profile")
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetEnabledHooks() {
|
func SetEnabledHooks() {
|
||||||
|
|
|
@ -1,12 +1,10 @@
|
||||||
package cli
|
package cli
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/tus/tusd/cmd/tusd/cli/hooks"
|
"github.com/tus/tusd/v2/cmd/tusd/cli/hooks"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
var hookHandler hooks.HookHandler = nil
|
var hookHandler hooks.HookHandler = nil
|
||||||
|
@ -20,27 +18,52 @@ func hookTypeInSlice(a hooks.HookType, list []hooks.HookType) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func hookCallback(typ hooks.HookType, info handler.HookEvent) error {
|
func preCreateCallback(event handler.HookEvent) (handler.HTTPResponse, handler.FileInfoChanges, error) {
|
||||||
if output, err := invokeHookSync(typ, info, true); err != nil {
|
ok, hookRes, err := invokeHookSync(hooks.HookPreCreate, event)
|
||||||
if hookErr, ok := err.(hooks.HookError); ok {
|
if !ok || err != nil {
|
||||||
return hooks.NewHookError(
|
return handler.HTTPResponse{}, handler.FileInfoChanges{}, err
|
||||||
fmt.Errorf("%s hook failed: %s", typ, err),
|
|
||||||
hookErr.StatusCode(),
|
|
||||||
hookErr.Body(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return fmt.Errorf("%s hook failed: %s\n%s", typ, err, string(output))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
httpRes := hookRes.HTTPResponse
|
||||||
|
|
||||||
|
// If the hook response includes the instruction to reject the upload, reuse the error code
|
||||||
|
// and message from ErrUploadRejectedByServer, but also include custom HTTP response values.
|
||||||
|
if hookRes.RejectUpload {
|
||||||
|
err := handler.ErrUploadRejectedByServer
|
||||||
|
err.HTTPResponse = err.HTTPResponse.MergeWith(httpRes)
|
||||||
|
|
||||||
|
return handler.HTTPResponse{}, handler.FileInfoChanges{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pass any changes regarding file info from the hook to the handler.
|
||||||
|
changes := hookRes.ChangeFileInfo
|
||||||
|
return httpRes, changes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func preCreateCallback(info handler.HookEvent) error {
|
func preFinishCallback(event handler.HookEvent) (handler.HTTPResponse, error) {
|
||||||
return hookCallback(hooks.HookPreCreate, info)
|
ok, hookRes, err := invokeHookSync(hooks.HookPreFinish, event)
|
||||||
|
if !ok || err != nil {
|
||||||
|
return handler.HTTPResponse{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
httpRes := hookRes.HTTPResponse
|
||||||
|
return httpRes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func preFinishCallback(info handler.HookEvent) error {
|
func postReceiveCallback(event handler.HookEvent) {
|
||||||
return hookCallback(hooks.HookPreFinish, info)
|
ok, hookRes, _ := invokeHookSync(hooks.HookPostReceive, event)
|
||||||
|
// invokeHookSync already logs the error, if any occurs. So by checking `ok`, we can ensure
|
||||||
|
// that the hook finished successfully
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if hookRes.StopUpload {
|
||||||
|
logEv(stdout, "HookStopUpload", "id", event.Upload.ID)
|
||||||
|
|
||||||
|
// TODO: Control response for PATCH request
|
||||||
|
event.Upload.StopUpload()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetupHookMetrics() {
|
func SetupHookMetrics() {
|
||||||
|
@ -50,6 +73,12 @@ func SetupHookMetrics() {
|
||||||
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostCreate)).Add(0)
|
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostCreate)).Add(0)
|
||||||
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreCreate)).Add(0)
|
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreCreate)).Add(0)
|
||||||
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreFinish)).Add(0)
|
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreFinish)).Add(0)
|
||||||
|
MetricsHookInvocationsTotal.WithLabelValues(string(hooks.HookPostFinish)).Add(0)
|
||||||
|
MetricsHookInvocationsTotal.WithLabelValues(string(hooks.HookPostTerminate)).Add(0)
|
||||||
|
MetricsHookInvocationsTotal.WithLabelValues(string(hooks.HookPostReceive)).Add(0)
|
||||||
|
MetricsHookInvocationsTotal.WithLabelValues(string(hooks.HookPostCreate)).Add(0)
|
||||||
|
MetricsHookInvocationsTotal.WithLabelValues(string(hooks.HookPreCreate)).Add(0)
|
||||||
|
MetricsHookInvocationsTotal.WithLabelValues(string(hooks.HookPreFinish)).Add(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetupPreHooks(config *handler.Config) error {
|
func SetupPreHooks(config *handler.Config) error {
|
||||||
|
@ -107,64 +136,61 @@ func SetupPostHooks(handler *handler.Handler) {
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case info := <-handler.CompleteUploads:
|
case event := <-handler.CompleteUploads:
|
||||||
invokeHookAsync(hooks.HookPostFinish, info)
|
invokeHookAsync(hooks.HookPostFinish, event)
|
||||||
case info := <-handler.TerminatedUploads:
|
case event := <-handler.TerminatedUploads:
|
||||||
invokeHookAsync(hooks.HookPostTerminate, info)
|
invokeHookAsync(hooks.HookPostTerminate, event)
|
||||||
case info := <-handler.UploadProgress:
|
case event := <-handler.CreatedUploads:
|
||||||
invokeHookAsync(hooks.HookPostReceive, info)
|
invokeHookAsync(hooks.HookPostCreate, event)
|
||||||
case info := <-handler.CreatedUploads:
|
case event := <-handler.UploadProgress:
|
||||||
invokeHookAsync(hooks.HookPostCreate, info)
|
go postReceiveCallback(event)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func invokeHookAsync(typ hooks.HookType, info handler.HookEvent) {
|
func invokeHookAsync(typ hooks.HookType, event handler.HookEvent) {
|
||||||
go func() {
|
go func() {
|
||||||
// Error handling is taken care by the function.
|
// Error handling is taken care by the function.
|
||||||
_, _ = invokeHookSync(typ, info, false)
|
_, _, _ = invokeHookSync(typ, event)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func invokeHookSync(typ hooks.HookType, info handler.HookEvent, captureOutput bool) ([]byte, error) {
|
// invokeHookSync executes a hook of the given type with the given event data. If
|
||||||
if !hookTypeInSlice(typ, Flags.EnabledHooks) {
|
// the hook was not executed properly (e.g. an error occurred or not handler is installed),
|
||||||
return nil, nil
|
// `ok` will be false and `res` is not filled. `err` can contain the underlying error.
|
||||||
|
// If `ok` is true, `res` contains the response as retrieved from the hook.
|
||||||
|
// Therefore, a caller should always check `ok` and `err` before assuming that the
|
||||||
|
// hook completed successfully.
|
||||||
|
func invokeHookSync(typ hooks.HookType, event handler.HookEvent) (ok bool, res hooks.HookResponse, err error) {
|
||||||
|
// Stop, if no hook handler is installed or this hook event is not enabled
|
||||||
|
if hookHandler == nil || !hookTypeInSlice(typ, Flags.EnabledHooks) {
|
||||||
|
return false, hooks.HookResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
id := info.Upload.ID
|
MetricsHookInvocationsTotal.WithLabelValues(string(typ)).Add(1)
|
||||||
size := info.Upload.Size
|
|
||||||
|
|
||||||
switch typ {
|
id := event.Upload.ID
|
||||||
case hooks.HookPostFinish:
|
|
||||||
logEv(stdout, "UploadFinished", "id", id, "size", strconv.FormatInt(size, 10))
|
|
||||||
case hooks.HookPostTerminate:
|
|
||||||
logEv(stdout, "UploadTerminated", "id", id)
|
|
||||||
}
|
|
||||||
|
|
||||||
if hookHandler == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
name := string(typ)
|
|
||||||
if Flags.VerboseOutput {
|
if Flags.VerboseOutput {
|
||||||
logEv(stdout, "HookInvocationStart", "type", name, "id", id)
|
logEv(stdout, "HookInvocationStart", "type", string(typ), "id", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
output, returnCode, err := hookHandler.InvokeHook(typ, info, captureOutput)
|
res, err = hookHandler.InvokeHook(hooks.HookRequest{
|
||||||
|
Type: typ,
|
||||||
|
Event: event,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// If an error occurs during the hook execution, we log and track the error, but do not
|
||||||
|
// return a hook response.
|
||||||
logEv(stderr, "HookInvocationError", "type", string(typ), "id", id, "error", err.Error())
|
logEv(stderr, "HookInvocationError", "type", string(typ), "id", id, "error", err.Error())
|
||||||
MetricsHookErrorsTotal.WithLabelValues(string(typ)).Add(1)
|
MetricsHookErrorsTotal.WithLabelValues(string(typ)).Add(1)
|
||||||
} else if Flags.VerboseOutput {
|
return false, hooks.HookResponse{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if Flags.VerboseOutput {
|
||||||
logEv(stdout, "HookInvocationFinish", "type", string(typ), "id", id)
|
logEv(stdout, "HookInvocationFinish", "type", string(typ), "id", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
if typ == hooks.HookPostReceive && Flags.HooksStopUploadCode != 0 && Flags.HooksStopUploadCode == returnCode {
|
return true, res, nil
|
||||||
logEv(stdout, "HookStopUpload", "id", id)
|
|
||||||
|
|
||||||
info.Upload.StopUpload()
|
|
||||||
}
|
|
||||||
|
|
||||||
return output, err
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,11 +3,10 @@ package hooks
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/tus/tusd/pkg/handler"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type FileHook struct {
|
type FileHook struct {
|
||||||
|
@ -18,43 +17,50 @@ func (_ FileHook) Setup() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h FileHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutput bool) ([]byte, int, error) {
|
func (h FileHook) InvokeHook(req HookRequest) (res HookResponse, err error) {
|
||||||
hookPath := h.Directory + string(os.PathSeparator) + string(typ)
|
hookPath := h.Directory + string(os.PathSeparator) + string(req.Type)
|
||||||
cmd := exec.Command(hookPath)
|
cmd := exec.Command(hookPath)
|
||||||
env := os.Environ()
|
env := os.Environ()
|
||||||
env = append(env, "TUS_ID="+info.Upload.ID)
|
env = append(env, "TUS_ID="+req.Event.Upload.ID)
|
||||||
env = append(env, "TUS_SIZE="+strconv.FormatInt(info.Upload.Size, 10))
|
env = append(env, "TUS_SIZE="+strconv.FormatInt(req.Event.Upload.Size, 10))
|
||||||
env = append(env, "TUS_OFFSET="+strconv.FormatInt(info.Upload.Offset, 10))
|
env = append(env, "TUS_OFFSET="+strconv.FormatInt(req.Event.Upload.Offset, 10))
|
||||||
|
|
||||||
jsonInfo, err := json.Marshal(info)
|
jsonReq, err := json.Marshal(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return res, err
|
||||||
}
|
}
|
||||||
|
|
||||||
reader := bytes.NewReader(jsonInfo)
|
reader := bytes.NewReader(jsonReq)
|
||||||
cmd.Stdin = reader
|
cmd.Stdin = reader
|
||||||
|
|
||||||
cmd.Env = env
|
cmd.Env = env
|
||||||
cmd.Dir = h.Directory
|
cmd.Dir = h.Directory
|
||||||
cmd.Stderr = os.Stderr
|
cmd.Stderr = os.Stderr
|
||||||
|
|
||||||
// If `captureOutput` is true, this function will return the output (both,
|
output, err := cmd.Output()
|
||||||
// stderr and stdout), else it will use this process' stdout
|
|
||||||
var output []byte
|
|
||||||
if !captureOutput {
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
err = cmd.Run()
|
|
||||||
} else {
|
|
||||||
output, err = cmd.Output()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ignore the error, only, if the hook's file could not be found. This usually
|
// Ignore the error if the hook's file could not be found. This usually
|
||||||
// means that the user is only using a subset of the available hooks.
|
// means that the user is only using a subset of the available hooks.
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
err = nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
returnCode := cmd.ProcessState.ExitCode()
|
// Report error if the exit code was non-zero
|
||||||
|
if err, ok := err.(*exec.ExitError); ok {
|
||||||
|
return res, fmt.Errorf("unexpected return code %d from hook endpoint: %s", err.ProcessState.ExitCode(), string(output))
|
||||||
|
}
|
||||||
|
|
||||||
return output, returnCode, err
|
if err != nil {
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do not parse the output as JSON, if we received no output to reduce possible
|
||||||
|
// errors.
|
||||||
|
if len(output) > 0 {
|
||||||
|
if err = json.Unmarshal(output, &res); err != nil {
|
||||||
|
return res, fmt.Errorf("failed to parse hook response: %w, response was: %s", err, string(output))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,20 +2,19 @@ package hooks
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry"
|
grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
pb "github.com/tus/tusd/v2/pkg/proto/v2"
|
||||||
pb "github.com/tus/tusd/pkg/proto/v1"
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type GrpcHook struct {
|
type GrpcHook struct {
|
||||||
Endpoint string
|
Endpoint string
|
||||||
MaxRetries int
|
MaxRetries int
|
||||||
Backoff int
|
Backoff int
|
||||||
Client pb.HookServiceClient
|
Client pb.HookHandlerClient
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *GrpcHook) Setup() error {
|
func (g *GrpcHook) Setup() error {
|
||||||
|
@ -31,44 +30,76 @@ func (g *GrpcHook) Setup() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
g.Client = pb.NewHookServiceClient(conn)
|
g.Client = pb.NewHookHandlerClient(conn)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *GrpcHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutput bool) ([]byte, int, error) {
|
func (g *GrpcHook) InvokeHook(hookReq HookRequest) (hookRes HookResponse, err error) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
req := &pb.SendRequest{Hook: marshal(typ, info)}
|
req := marshal(hookReq)
|
||||||
resp, err := g.Client.Send(ctx, req)
|
res, err := g.Client.InvokeHook(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := status.FromError(err); ok {
|
return hookRes, err
|
||||||
return nil, int(e.Code()), err
|
|
||||||
}
|
|
||||||
return nil, 2, err
|
|
||||||
}
|
}
|
||||||
if captureOutput {
|
|
||||||
return resp.Response.GetValue(), 0, err
|
hookRes = unmarshal(res)
|
||||||
}
|
return hookRes, nil
|
||||||
return nil, 0, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func marshal(typ HookType, info handler.HookEvent) *pb.Hook {
|
func marshal(hookReq HookRequest) *pb.HookRequest {
|
||||||
return &pb.Hook{
|
event := hookReq.Event
|
||||||
Upload: &pb.Upload{
|
|
||||||
Id: info.Upload.ID,
|
return &pb.HookRequest{
|
||||||
Size: info.Upload.Size,
|
Type: string(hookReq.Type),
|
||||||
SizeIsDeferred: info.Upload.SizeIsDeferred,
|
Event: &pb.Event{
|
||||||
Offset: info.Upload.Offset,
|
Upload: &pb.FileInfo{
|
||||||
MetaData: info.Upload.MetaData,
|
Id: event.Upload.ID,
|
||||||
IsPartial: info.Upload.IsPartial,
|
Size: event.Upload.Size,
|
||||||
IsFinal: info.Upload.IsFinal,
|
SizeIsDeferred: event.Upload.SizeIsDeferred,
|
||||||
PartialUploads: info.Upload.PartialUploads,
|
Offset: event.Upload.Offset,
|
||||||
Storage: info.Upload.Storage,
|
MetaData: event.Upload.MetaData,
|
||||||
|
IsPartial: event.Upload.IsPartial,
|
||||||
|
IsFinal: event.Upload.IsFinal,
|
||||||
|
PartialUploads: event.Upload.PartialUploads,
|
||||||
|
Storage: event.Upload.Storage,
|
||||||
|
},
|
||||||
|
HttpRequest: &pb.HTTPRequest{
|
||||||
|
Method: event.HTTPRequest.Method,
|
||||||
|
Uri: event.HTTPRequest.URI,
|
||||||
|
RemoteAddr: event.HTTPRequest.RemoteAddr,
|
||||||
|
Header: getHeaders(event.HTTPRequest.Header),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
HttpRequest: &pb.HTTPRequest{
|
|
||||||
Method: info.HTTPRequest.Method,
|
|
||||||
Uri: info.HTTPRequest.URI,
|
|
||||||
RemoteAddr: info.HTTPRequest.RemoteAddr,
|
|
||||||
},
|
|
||||||
Name: string(typ),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getHeaders(httpHeader http.Header) (hookHeader map[string]string) {
|
||||||
|
hookHeader = make(map[string]string)
|
||||||
|
for key, val := range httpHeader {
|
||||||
|
if key != "" && val != nil && len(val) > 0 {
|
||||||
|
hookHeader[key] = val[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return hookHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshal(res *pb.HookResponse) (hookRes HookResponse) {
|
||||||
|
hookRes.RejectUpload = res.RejectUpload
|
||||||
|
hookRes.StopUpload = res.StopUpload
|
||||||
|
|
||||||
|
httpRes := res.HttpResponse
|
||||||
|
if httpRes != nil {
|
||||||
|
hookRes.HTTPResponse.StatusCode = int(httpRes.StatusCode)
|
||||||
|
hookRes.HTTPResponse.Headers = httpRes.Headers
|
||||||
|
hookRes.HTTPResponse.Body = httpRes.Body
|
||||||
|
}
|
||||||
|
|
||||||
|
changes := res.ChangeFileInfo
|
||||||
|
if changes != nil {
|
||||||
|
hookRes.ChangeFileInfo.ID = changes.Id
|
||||||
|
hookRes.ChangeFileInfo.MetaData = changes.MetaData
|
||||||
|
hookRes.ChangeFileInfo.Storage = changes.Storage
|
||||||
|
}
|
||||||
|
|
||||||
|
return hookRes
|
||||||
|
}
|
||||||
|
|
|
@ -1,12 +1,65 @@
|
||||||
package hooks
|
package hooks
|
||||||
|
|
||||||
|
// TODO: Move hooks into a package in /pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// HookHandler is the main inferface to be implemented by all hook backends.
|
||||||
type HookHandler interface {
|
type HookHandler interface {
|
||||||
|
// Setup is invoked once the hook backend is initalized.
|
||||||
Setup() error
|
Setup() error
|
||||||
InvokeHook(typ HookType, info handler.HookEvent, captureOutput bool) ([]byte, int, error)
|
// InvokeHook is invoked for every hook that is executed. req contains the
|
||||||
|
// corresponding information about the hook type, the involved upload, and
|
||||||
|
// causing HTTP request.
|
||||||
|
// The return value res allows to stop or reject an upload, as well as modifying
|
||||||
|
// the HTTP response. See the documentation for HookResponse for more details.
|
||||||
|
// If err is not nil, the value of res will be ignored. err should only be
|
||||||
|
// non-nil if the hook failed to complete successfully.
|
||||||
|
InvokeHook(req HookRequest) (res HookResponse, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HookRequest contains the information about the hook type, the involved upload,
|
||||||
|
// and causing HTTP request.
|
||||||
|
type HookRequest struct {
|
||||||
|
// Type is the name of the hook.
|
||||||
|
Type HookType
|
||||||
|
// Event contains the involved upload and causing HTTP request.
|
||||||
|
Event handler.HookEvent
|
||||||
|
}
|
||||||
|
|
||||||
|
// HookResponse is the response after a hook is executed.
|
||||||
|
type HookResponse struct {
|
||||||
|
// HTTPResponse's fields can be filled to modify the HTTP response.
|
||||||
|
// This is only possible for pre-create, pre-finish and post-receive hooks.
|
||||||
|
// For other hooks this value is ignored.
|
||||||
|
// If multiple hooks modify the HTTP response, a later hook may overwrite the
|
||||||
|
// modified values from a previous hook (e.g. if multiple post-receive hooks
|
||||||
|
// are executed).
|
||||||
|
// Example usages: Send an error to the client if RejectUpload/StopUpload are
|
||||||
|
// set in the pre-create/post-receive hook. Send more information to the client
|
||||||
|
// in the pre-finish hook.
|
||||||
|
HTTPResponse handler.HTTPResponse
|
||||||
|
|
||||||
|
// RejectUpload will cause the upload to be rejected and not be created during
|
||||||
|
// POST request. This value is only respected for pre-create hooks. For other hooks,
|
||||||
|
// it is ignored. Use the HTTPResponse field to send details about the rejection
|
||||||
|
// to the client.
|
||||||
|
RejectUpload bool
|
||||||
|
|
||||||
|
// ChangeFileInfo can be set to change selected properties of an upload before
|
||||||
|
// it has been created. See the handler.FileInfoChanges type for more details.
|
||||||
|
// Changes are applied on a per-property basis, meaning that specifying just
|
||||||
|
// one property leaves all others unchanged.
|
||||||
|
// This value is only respected for pre-create hooks.
|
||||||
|
ChangeFileInfo handler.FileInfoChanges
|
||||||
|
|
||||||
|
// StopUpload will cause the upload to be stopped during a PATCH request.
|
||||||
|
// This value is only respected for post-receive hooks. For other hooks,
|
||||||
|
// it is ignored. Use the HTTPResponse field to send details about the stop
|
||||||
|
// to the client.
|
||||||
|
StopUpload bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type HookType string
|
type HookType string
|
||||||
|
@ -21,29 +74,3 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var AvailableHooks []HookType = []HookType{HookPreCreate, HookPostCreate, HookPostReceive, HookPostTerminate, HookPostFinish, HookPreFinish}
|
var AvailableHooks []HookType = []HookType{HookPreCreate, HookPostCreate, HookPostReceive, HookPostTerminate, HookPostFinish, HookPreFinish}
|
||||||
|
|
||||||
type hookDataStore struct {
|
|
||||||
handler.DataStore
|
|
||||||
}
|
|
||||||
|
|
||||||
type HookError struct {
|
|
||||||
error
|
|
||||||
statusCode int
|
|
||||||
body []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewHookError(err error, statusCode int, body []byte) HookError {
|
|
||||||
return HookError{err, statusCode, body}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (herr HookError) StatusCode() int {
|
|
||||||
return herr.statusCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func (herr HookError) Body() []byte {
|
|
||||||
return herr.body
|
|
||||||
}
|
|
||||||
|
|
||||||
func (herr HookError) Error() string {
|
|
||||||
return herr.error.Error()
|
|
||||||
}
|
|
||||||
|
|
|
@ -8,8 +8,6 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/tus/tusd/pkg/handler"
|
|
||||||
|
|
||||||
"github.com/sethgrid/pester"
|
"github.com/sethgrid/pester"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -18,35 +16,11 @@ type HttpHook struct {
|
||||||
MaxRetries int
|
MaxRetries int
|
||||||
Backoff int
|
Backoff int
|
||||||
ForwardHeaders []string
|
ForwardHeaders []string
|
||||||
|
|
||||||
|
client *pester.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ HttpHook) Setup() error {
|
func (h *HttpHook) Setup() error {
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h HttpHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutput bool) ([]byte, int, error) {
|
|
||||||
jsonInfo, err := json.Marshal(info)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest("POST", h.Endpoint, bytes.NewBuffer(jsonInfo))
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, k := range h.ForwardHeaders {
|
|
||||||
// Lookup the Canonicalised version of the specified header
|
|
||||||
if vals, ok := info.HTTPRequest.Header[http.CanonicalHeaderKey(k)]; ok {
|
|
||||||
// but set the case specified by the user
|
|
||||||
req.Header[k] = vals
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("Hook-Name", string(typ))
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
|
|
||||||
// TODO: Can we initialize this in Setup()?
|
|
||||||
// Use linear backoff strategy with the user defined values.
|
// Use linear backoff strategy with the user defined values.
|
||||||
client := pester.New()
|
client := pester.New()
|
||||||
client.KeepLog = true
|
client.KeepLog = true
|
||||||
|
@ -55,24 +29,51 @@ func (h HttpHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutput
|
||||||
return time.Duration(h.Backoff) * time.Second
|
return time.Duration(h.Backoff) * time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := client.Do(req)
|
h.client = client
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
return nil
|
||||||
if err != nil {
|
}
|
||||||
return nil, 0, err
|
|
||||||
}
|
func (h HttpHook) InvokeHook(hookReq HookRequest) (hookRes HookResponse, err error) {
|
||||||
|
jsonInfo, err := json.Marshal(hookReq)
|
||||||
if resp.StatusCode >= http.StatusBadRequest {
|
if err != nil {
|
||||||
return body, resp.StatusCode, NewHookError(fmt.Errorf("endpoint returned: %s", resp.Status), resp.StatusCode, body)
|
return hookRes, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if captureOutput {
|
httpReq, err := http.NewRequest("POST", h.Endpoint, bytes.NewBuffer(jsonInfo))
|
||||||
return body, resp.StatusCode, err
|
if err != nil {
|
||||||
}
|
return hookRes, err
|
||||||
|
}
|
||||||
return nil, resp.StatusCode, err
|
|
||||||
|
for _, k := range h.ForwardHeaders {
|
||||||
|
// Lookup the Canonicalised version of the specified header
|
||||||
|
if vals, ok := hookReq.Event.HTTPRequest.Header[http.CanonicalHeaderKey(k)]; ok {
|
||||||
|
// but set the case specified by the user
|
||||||
|
httpReq.Header[k] = vals
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
httpReq.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
httpRes, err := h.client.Do(httpReq)
|
||||||
|
if err != nil {
|
||||||
|
return hookRes, err
|
||||||
|
}
|
||||||
|
defer httpRes.Body.Close()
|
||||||
|
|
||||||
|
httpBody, err := ioutil.ReadAll(httpRes.Body)
|
||||||
|
if err != nil {
|
||||||
|
return hookRes, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Report an error, if the response has a non-2XX status code
|
||||||
|
if httpRes.StatusCode < http.StatusOK || httpRes.StatusCode >= http.StatusMultipleChoices {
|
||||||
|
return hookRes, fmt.Errorf("unexpected response code from hook endpoint (%d): %s", httpRes.StatusCode, string(httpBody))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = json.Unmarshal(httpBody, &hookRes); err != nil {
|
||||||
|
return hookRes, fmt.Errorf("failed to parse hook response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return hookRes, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,69 +1,122 @@
|
||||||
package hooks
|
package hooks
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"log"
|
||||||
"plugin"
|
"net/rpc"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/hashicorp/go-plugin"
|
||||||
)
|
)
|
||||||
|
|
||||||
type PluginHookHandler interface {
|
// TODO: When the tusd process stops, the plugin does not get properly killed
|
||||||
PreCreate(info handler.HookEvent) error
|
// and lives on as a zombie process.
|
||||||
PostCreate(info handler.HookEvent) error
|
|
||||||
PostReceive(info handler.HookEvent) error
|
|
||||||
PostFinish(info handler.HookEvent) error
|
|
||||||
PostTerminate(info handler.HookEvent) error
|
|
||||||
PreFinish(info handler.HookEvent) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type PluginHook struct {
|
type PluginHook struct {
|
||||||
Path string
|
Path string
|
||||||
|
|
||||||
handler PluginHookHandler
|
handlerImpl HookHandler
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *PluginHook) Setup() error {
|
func (h *PluginHook) Setup() error {
|
||||||
p, err := plugin.Open(h.Path)
|
// We're a host! Start by launching the plugin process.
|
||||||
|
client := plugin.NewClient(&plugin.ClientConfig{
|
||||||
|
HandshakeConfig: handshakeConfig,
|
||||||
|
Plugins: pluginMap,
|
||||||
|
Cmd: exec.Command(h.Path),
|
||||||
|
SyncStdout: os.Stdout,
|
||||||
|
SyncStderr: os.Stderr,
|
||||||
|
//Logger: logger,
|
||||||
|
})
|
||||||
|
//defer client.Kill()
|
||||||
|
|
||||||
|
// Connect via RPC
|
||||||
|
rpcClient, err := client.Client()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
symbol, err := p.Lookup("TusdHookHandler")
|
// Request the plugin
|
||||||
|
raw, err := rpcClient.Dispense("hookHandler")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler, ok := symbol.(*PluginHookHandler)
|
// We should have a HookHandler now! This feels like a normal interface
|
||||||
if !ok {
|
// implementation but is in fact over an RPC connection.
|
||||||
return fmt.Errorf("hooks: could not cast TusdHookHandler from %s into PluginHookHandler interface", h.Path)
|
h.handlerImpl = raw.(HookHandler)
|
||||||
}
|
|
||||||
|
|
||||||
h.handler = *handler
|
return h.handlerImpl.Setup()
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h PluginHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutput bool) ([]byte, int, error) {
|
func (h *PluginHook) InvokeHook(req HookRequest) (HookResponse, error) {
|
||||||
var err error
|
return h.handlerImpl.InvokeHook(req)
|
||||||
switch typ {
|
}
|
||||||
case HookPostFinish:
|
|
||||||
err = h.handler.PostFinish(info)
|
// handshakeConfigs are used to just do a basic handshake between
|
||||||
case HookPostTerminate:
|
// a plugin and host. If the handshake fails, a user friendly error is shown.
|
||||||
err = h.handler.PostTerminate(info)
|
// This prevents users from executing bad plugins or executing a plugin
|
||||||
case HookPostReceive:
|
// directory. It is a UX feature, not a security feature.
|
||||||
err = h.handler.PostReceive(info)
|
var handshakeConfig = plugin.HandshakeConfig{
|
||||||
case HookPostCreate:
|
ProtocolVersion: 1,
|
||||||
err = h.handler.PostCreate(info)
|
MagicCookieKey: "TUSD_PLUGIN",
|
||||||
case HookPreCreate:
|
MagicCookieValue: "yes",
|
||||||
err = h.handler.PreCreate(info)
|
}
|
||||||
case HookPreFinish:
|
|
||||||
err = h.handler.PreFinish(info)
|
// pluginMap is the map of plugins we can dispense.
|
||||||
default:
|
var pluginMap = map[string]plugin.Plugin{
|
||||||
err = fmt.Errorf("hooks: unknown hook named %s", typ)
|
"hookHandler": &HookHandlerPlugin{},
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
// Here is an implementation that talks over RPC
|
||||||
return nil, 1, err
|
type HookHandlerRPC struct{ client *rpc.Client }
|
||||||
}
|
|
||||||
|
func (g *HookHandlerRPC) Setup() error {
|
||||||
return nil, 0, nil
|
var res interface{}
|
||||||
|
err := g.client.Call("Plugin.Setup", new(interface{}), &res)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *HookHandlerRPC) InvokeHook(req HookRequest) (res HookResponse, err error) {
|
||||||
|
err = g.client.Call("Plugin.InvokeHook", req, &res)
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Here is the RPC server that HookHandlerRPC talks to, conforming to
|
||||||
|
// the requirements of net/rpc
|
||||||
|
type HookHandlerRPCServer struct {
|
||||||
|
// This is the real implementation
|
||||||
|
Impl HookHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *HookHandlerRPCServer) Setup(args interface{}, resp *interface{}) error {
|
||||||
|
return s.Impl.Setup()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *HookHandlerRPCServer) InvokeHook(args HookRequest, resp *HookResponse) (err error) {
|
||||||
|
*resp, err = s.Impl.InvokeHook(args)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is the implementation of plugin.Plugin so we can serve/consume this
|
||||||
|
//
|
||||||
|
// This has two methods: Server must return an RPC server for this plugin
|
||||||
|
// type. We construct a HookHandlerRPCServer for this.
|
||||||
|
//
|
||||||
|
// Client must return an implementation of our interface that communicates
|
||||||
|
// over an RPC client. We return HookHandlerRPC for this.
|
||||||
|
//
|
||||||
|
// Ignore MuxBroker. That is used to create more multiplexed streams on our
|
||||||
|
// plugin connection and is a more advanced use case.
|
||||||
|
type HookHandlerPlugin struct {
|
||||||
|
// Impl Injection
|
||||||
|
Impl HookHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *HookHandlerPlugin) Server(*plugin.MuxBroker) (interface{}, error) {
|
||||||
|
return &HookHandlerRPCServer{Impl: p.Impl}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (HookHandlerPlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
|
||||||
|
return &HookHandlerRPC{client: c}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,77 +0,0 @@
|
||||||
// If this file gets changed, you must recompile the generate package in pkg/proto.
|
|
||||||
// To do this, install the Go protobuf toolchain as mentioned in
|
|
||||||
// https://github.com/golang/protobuf#installation.
|
|
||||||
// Then use following command to recompile it with gRPC support:
|
|
||||||
// protoc --go_out=plugins=grpc:../../../../../pkg/proto/ v1/hook.proto
|
|
||||||
// In addition, it may be necessary to update the protobuf or gRPC dependencies as well.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
package v1;
|
|
||||||
|
|
||||||
import "google/protobuf/any.proto";
|
|
||||||
|
|
||||||
// Uploaded data
|
|
||||||
message Upload {
|
|
||||||
// Unique integer identifier of the uploaded file
|
|
||||||
string id = 1;
|
|
||||||
// Total file size in bytes specified in the NewUpload call
|
|
||||||
int64 Size = 2;
|
|
||||||
// Indicates whether the total file size is deferred until later
|
|
||||||
bool SizeIsDeferred = 3;
|
|
||||||
// Offset in bytes (zero-based)
|
|
||||||
int64 Offset = 4;
|
|
||||||
map<string, string> metaData = 5;
|
|
||||||
// Indicates that this is a partial upload which will later be used to form
|
|
||||||
// a final upload by concatenation. Partial uploads should not be processed
|
|
||||||
// when they are finished since they are only incomplete chunks of files.
|
|
||||||
bool isPartial = 6;
|
|
||||||
// Indicates that this is a final upload
|
|
||||||
bool isFinal = 7;
|
|
||||||
// If the upload is a final one (see IsFinal) this will be a non-empty
|
|
||||||
// ordered slice containing the ids of the uploads of which the final upload
|
|
||||||
// will consist after concatenation.
|
|
||||||
repeated string partialUploads = 8;
|
|
||||||
// Storage contains information about where the data storage saves the upload,
|
|
||||||
// for example a file path. The available values vary depending on what data
|
|
||||||
// store is used. This map may also be nil.
|
|
||||||
map <string, string> storage = 9;
|
|
||||||
}
|
|
||||||
|
|
||||||
message HTTPRequest {
|
|
||||||
// Method is the HTTP method, e.g. POST or PATCH
|
|
||||||
string method = 1;
|
|
||||||
// URI is the full HTTP request URI, e.g. /files/fooo
|
|
||||||
string uri = 2;
|
|
||||||
// RemoteAddr contains the network address that sent the request
|
|
||||||
string remoteAddr = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hook's data
|
|
||||||
message Hook {
|
|
||||||
// Upload contains information about the upload that caused this hook
|
|
||||||
// to be fired.
|
|
||||||
Upload upload = 1;
|
|
||||||
// HTTPRequest contains details about the HTTP request that reached
|
|
||||||
// tusd.
|
|
||||||
HTTPRequest httpRequest = 2;
|
|
||||||
// The hook name
|
|
||||||
string name = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Request data to send hook
|
|
||||||
message SendRequest {
|
|
||||||
// The hook data
|
|
||||||
Hook hook = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Response that contains data for sended hook
|
|
||||||
message SendResponse {
|
|
||||||
// The response of the hook.
|
|
||||||
google.protobuf.Any response = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The hook service definition.
|
|
||||||
service HookService {
|
|
||||||
// Sends a hook
|
|
||||||
rpc Send (SendRequest) returns (SendResponse) {}
|
|
||||||
}
|
|
|
@ -0,0 +1,153 @@
|
||||||
|
// If this file gets changed, you must recompile the generate package in pkg/proto.
|
||||||
|
// To do this, install the Go protobuf toolchain as mentioned in
|
||||||
|
// https://grpc.io/docs/languages/go/quickstart/#prerequisites.
|
||||||
|
// Then use following command from the repository's root to recompile it with gRPC support:
|
||||||
|
// protoc --go-grpc_out=./pkg/ --go_out=./pkg/ ./cmd/tusd/cli/hooks/proto/v2/hook.proto
|
||||||
|
// In addition, it may be necessary to update the protobuf or gRPC dependencies as well.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
package v2;
|
||||||
|
|
||||||
|
option go_package = "proto/v2";
|
||||||
|
|
||||||
|
// HookRequest contains the information about the hook type, the involved upload,
|
||||||
|
// and causing HTTP request.
|
||||||
|
message HookRequest {
|
||||||
|
// Type is the name of the hook.
|
||||||
|
string type = 1;
|
||||||
|
|
||||||
|
// Event contains the involved upload and causing HTTP request.
|
||||||
|
Event event = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Event represents an event from tusd which can be handled by the application.
|
||||||
|
message Event {
|
||||||
|
// Upload contains information about the upload that caused this hook
|
||||||
|
// to be fired.
|
||||||
|
FileInfo upload = 1;
|
||||||
|
|
||||||
|
// HTTPRequest contains details about the HTTP request that reached
|
||||||
|
// tusd.
|
||||||
|
HTTPRequest httpRequest = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfo contains information about a single upload resource.
|
||||||
|
message FileInfo {
|
||||||
|
// ID is the unique identifier of the upload resource.
|
||||||
|
string id = 1;
|
||||||
|
// Total file size in bytes specified in the NewUpload call
|
||||||
|
int64 size = 2;
|
||||||
|
// Indicates whether the total file size is deferred until later
|
||||||
|
bool sizeIsDeferred = 3;
|
||||||
|
// Offset in bytes (zero-based)
|
||||||
|
int64 offset = 4;
|
||||||
|
map<string, string> metaData = 5;
|
||||||
|
// Indicates that this is a partial upload which will later be used to form
|
||||||
|
// a final upload by concatenation. Partial uploads should not be processed
|
||||||
|
// when they are finished since they are only incomplete chunks of files.
|
||||||
|
bool isPartial = 6;
|
||||||
|
// Indicates that this is a final upload
|
||||||
|
bool isFinal = 7;
|
||||||
|
// If the upload is a final one (see IsFinal) this will be a non-empty
|
||||||
|
// ordered slice containing the ids of the uploads of which the final upload
|
||||||
|
// will consist after concatenation.
|
||||||
|
repeated string partialUploads = 8;
|
||||||
|
// Storage contains information about where the data storage saves the upload,
|
||||||
|
// for example a file path. The available values vary depending on what data
|
||||||
|
// store is used. This map may also be nil.
|
||||||
|
map <string, string> storage = 9;
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfoChanges collects changes the should be made to a FileInfo object. This
|
||||||
|
// can be done using the PreUploadCreateCallback to modify certain properties before
|
||||||
|
// an upload is created. Properties which should not be modified (e.g. Size or Offset)
|
||||||
|
// are intentionally left out here.
|
||||||
|
message FileInfoChanges {
|
||||||
|
// If ID is not empty, it will be passed to the data store, allowing
|
||||||
|
// hooks to influence the upload ID. Be aware that a data store is not required to
|
||||||
|
// respect a pre-defined upload ID and might overwrite or modify it. However,
|
||||||
|
// all data stores in the github.com/tus/tusd package do respect pre-defined IDs.
|
||||||
|
string id = 1;
|
||||||
|
|
||||||
|
// If MetaData is not nil, it replaces the entire user-defined meta data from
|
||||||
|
// the upload creation request. You can add custom meta data fields this way
|
||||||
|
// or ensure that only certain fields from the user-defined meta data are saved.
|
||||||
|
// If you want to retain only specific entries from the user-defined meta data, you must
|
||||||
|
// manually copy them into this MetaData field.
|
||||||
|
// If you do not want to store any meta data, set this field to an empty map (`MetaData{}`).
|
||||||
|
// If you want to keep the entire user-defined meta data, set this field to nil.
|
||||||
|
map <string, string> metaData = 2;
|
||||||
|
|
||||||
|
// If Storage is not nil, it is passed to the data store to allow for minor adjustments
|
||||||
|
// to the upload storage (e.g. destination file name). The details are specific for each
|
||||||
|
// data store and should be looked up in their respective documentation.
|
||||||
|
// Please be aware that this behavior is currently not supported by any data store in
|
||||||
|
// the github.com/tus/tusd package.
|
||||||
|
map <string, string> storage = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// HTTPRequest contains basic details of an incoming HTTP request.
|
||||||
|
message HTTPRequest {
|
||||||
|
// Method is the HTTP method, e.g. POST or PATCH.
|
||||||
|
string method = 1;
|
||||||
|
// URI is the full HTTP request URI, e.g. /files/fooo.
|
||||||
|
string uri = 2;
|
||||||
|
// RemoteAddr contains the network address that sent the request.
|
||||||
|
string remoteAddr = 3;
|
||||||
|
// Header contains all HTTP headers as present in the HTTP request.
|
||||||
|
map <string, string> header = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
// HookResponse is the response after a hook is executed.
|
||||||
|
message HookResponse {
|
||||||
|
// HTTPResponse's fields can be filled to modify the HTTP response.
|
||||||
|
// This is only possible for pre-create, pre-finish and post-receive hooks.
|
||||||
|
// For other hooks this value is ignored.
|
||||||
|
// If multiple hooks modify the HTTP response, a later hook may overwrite the
|
||||||
|
// modified values from a previous hook (e.g. if multiple post-receive hooks
|
||||||
|
// are executed).
|
||||||
|
// Example usages: Send an error to the client if RejectUpload/StopUpload are
|
||||||
|
// set in the pre-create/post-receive hook. Send more information to the client
|
||||||
|
// in the pre-finish hook.
|
||||||
|
HTTPResponse httpResponse = 1;
|
||||||
|
|
||||||
|
// RejectUpload will cause the upload to be rejected and not be created during
|
||||||
|
// POST request. This value is only respected for pre-create hooks. For other hooks,
|
||||||
|
// it is ignored. Use the HTTPResponse field to send details about the rejection
|
||||||
|
// to the client.
|
||||||
|
bool rejectUpload = 2;
|
||||||
|
|
||||||
|
// ChangeFileInfo can be set to change selected properties of an upload before
|
||||||
|
// it has been created. See the handler.FileInfoChanges type for more details.
|
||||||
|
// Changes are applied on a per-property basis, meaning that specifying just
|
||||||
|
// one property leaves all others unchanged.
|
||||||
|
// This value is only respected for pre-create hooks.
|
||||||
|
FileInfoChanges changeFileInfo = 4;
|
||||||
|
|
||||||
|
// StopUpload will cause the upload to be stopped during a PATCH request.
|
||||||
|
// This value is only respected for post-receive hooks. For other hooks,
|
||||||
|
// it is ignored. Use the HTTPResponse field to send details about the stop
|
||||||
|
// to the client.
|
||||||
|
bool stopUpload = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPResponse contains basic details of an outgoing HTTP response.
|
||||||
|
message HTTPResponse {
|
||||||
|
// StatusCode is status code, e.g. 200 or 400.
|
||||||
|
int64 statusCode = 1;
|
||||||
|
// Headers contains additional HTTP headers for the response.
|
||||||
|
map <string, string> headers = 2;
|
||||||
|
// Body is the response body.
|
||||||
|
string body = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The hook service definition.
|
||||||
|
service HookHandler {
|
||||||
|
// InvokeHook is invoked for every hook that is executed. HookRequest contains the
|
||||||
|
// corresponding information about the hook type, the involved upload, and
|
||||||
|
// causing HTTP request.
|
||||||
|
// The return value HookResponse allows to stop or reject an upload, as well as modifying
|
||||||
|
// the HTTP response. See the documentation for HookResponse for more details.
|
||||||
|
rpc InvokeHook (HookRequest) returns (HookResponse) {}
|
||||||
|
}
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
var stdout = log.New(os.Stdout, "[tusd] ", log.LstdFlags|log.Lmicroseconds)
|
var stdout = log.New(os.Stdout, "[tusd] ", log.LstdFlags|log.Lmicroseconds)
|
||||||
|
|
|
@ -3,8 +3,8 @@ package cli
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
"github.com/tus/tusd/pkg/prometheuscollector"
|
"github.com/tus/tusd/v2/pkg/prometheuscollector"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
@ -23,11 +23,20 @@ var MetricsHookErrorsTotal = prometheus.NewCounterVec(
|
||||||
[]string{"hooktype"},
|
[]string{"hooktype"},
|
||||||
)
|
)
|
||||||
|
|
||||||
func SetupMetrics(handler *handler.Handler) {
|
var MetricsHookInvocationsTotal = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "tusd_hook_invocations_total",
|
||||||
|
Help: "Total number of invocations per hook type.",
|
||||||
|
},
|
||||||
|
[]string{"hooktype"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func SetupMetrics(mux *http.ServeMux, handler *handler.Handler) {
|
||||||
prometheus.MustRegister(MetricsOpenConnections)
|
prometheus.MustRegister(MetricsOpenConnections)
|
||||||
prometheus.MustRegister(MetricsHookErrorsTotal)
|
prometheus.MustRegister(MetricsHookErrorsTotal)
|
||||||
|
prometheus.MustRegister(MetricsHookInvocationsTotal)
|
||||||
prometheus.MustRegister(prometheuscollector.New(handler.Metrics))
|
prometheus.MustRegister(prometheuscollector.New(handler.Metrics))
|
||||||
|
|
||||||
stdout.Printf("Using %s as the metrics path.\n", Flags.MetricsPath)
|
stdout.Printf("Using %s as the metrics path.\n", Flags.MetricsPath)
|
||||||
http.Handle(Flags.MetricsPath, promhttp.Handler())
|
mux.Handle(Flags.MetricsPath, promhttp.Handler())
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,40 @@
|
||||||
|
package cli
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/pprof"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/bmizerany/pat"
|
||||||
|
"github.com/felixge/fgprof"
|
||||||
|
"github.com/goji/httpauth"
|
||||||
|
)
|
||||||
|
|
||||||
|
func SetupPprof(globalMux *http.ServeMux) {
|
||||||
|
runtime.SetBlockProfileRate(Flags.PprofBlockProfileRate)
|
||||||
|
runtime.SetMutexProfileFraction(Flags.PprofMutexProfileRate)
|
||||||
|
|
||||||
|
mux := pat.New()
|
||||||
|
mux.Get("", http.HandlerFunc(pprof.Index))
|
||||||
|
mux.Get("cmdline", http.HandlerFunc(pprof.Cmdline))
|
||||||
|
mux.Get("profile", http.HandlerFunc(pprof.Profile))
|
||||||
|
mux.Get("symbol", http.HandlerFunc(pprof.Symbol))
|
||||||
|
mux.Get("trace", http.HandlerFunc(pprof.Trace))
|
||||||
|
mux.Get("fgprof", fgprof.Handler())
|
||||||
|
|
||||||
|
var handler http.Handler = mux
|
||||||
|
auth := os.Getenv("TUSD_PPROF_AUTH")
|
||||||
|
if auth != "" {
|
||||||
|
parts := strings.SplitN(auth, ":", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
stderr.Fatalf("TUSD_PPROF_AUTH must be two values separated by a colon")
|
||||||
|
}
|
||||||
|
|
||||||
|
handler = httpauth.SimpleBasicAuth(parts[0], parts[1])(mux)
|
||||||
|
}
|
||||||
|
|
||||||
|
globalMux.Handle(Flags.PprofPath, http.StripPrefix(Flags.PprofPath, handler))
|
||||||
|
|
||||||
|
}
|
|
@ -1,13 +1,18 @@
|
||||||
package cli
|
package cli
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -35,6 +40,7 @@ func Serve() {
|
||||||
NotifyTerminatedUploads: true,
|
NotifyTerminatedUploads: true,
|
||||||
NotifyUploadProgress: true,
|
NotifyUploadProgress: true,
|
||||||
NotifyCreatedUploads: true,
|
NotifyCreatedUploads: true,
|
||||||
|
UploadProgressInterval: time.Duration(Flags.ProgressHooksInterval) * time.Millisecond,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := SetupPreHooks(&config); err != nil {
|
if err := SetupPreHooks(&config); err != nil {
|
||||||
|
@ -61,21 +67,17 @@ func Serve() {
|
||||||
|
|
||||||
SetupPostHooks(handler)
|
SetupPostHooks(handler)
|
||||||
|
|
||||||
if Flags.ExposeMetrics {
|
|
||||||
SetupMetrics(handler)
|
|
||||||
SetupHookMetrics()
|
|
||||||
}
|
|
||||||
|
|
||||||
stdout.Printf("Supported tus extensions: %s\n", handler.SupportedExtensions())
|
stdout.Printf("Supported tus extensions: %s\n", handler.SupportedExtensions())
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
if basepath == "/" {
|
if basepath == "/" {
|
||||||
// If the basepath is set to the root path, only install the tusd handler
|
// If the basepath is set to the root path, only install the tusd handler
|
||||||
// and do not show a greeting.
|
// and do not show a greeting.
|
||||||
http.Handle("/", http.StripPrefix("/", handler))
|
mux.Handle("/", http.StripPrefix("/", handler))
|
||||||
} else {
|
} else {
|
||||||
// If a custom basepath is defined, we show a greeting at the root path...
|
// If a custom basepath is defined, we show a greeting at the root path...
|
||||||
if Flags.ShowGreeting {
|
if Flags.ShowGreeting {
|
||||||
http.HandleFunc("/", DisplayGreeting)
|
mux.HandleFunc("/", DisplayGreeting)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ... and register a route with and without the trailing slash, so we can
|
// ... and register a route with and without the trailing slash, so we can
|
||||||
|
@ -83,8 +85,17 @@ func Serve() {
|
||||||
basepathWithoutSlash := strings.TrimSuffix(basepath, "/")
|
basepathWithoutSlash := strings.TrimSuffix(basepath, "/")
|
||||||
basepathWithSlash := basepathWithoutSlash + "/"
|
basepathWithSlash := basepathWithoutSlash + "/"
|
||||||
|
|
||||||
http.Handle(basepathWithSlash, http.StripPrefix(basepathWithSlash, handler))
|
mux.Handle(basepathWithSlash, http.StripPrefix(basepathWithSlash, handler))
|
||||||
http.Handle(basepathWithoutSlash, http.StripPrefix(basepathWithoutSlash, handler))
|
mux.Handle(basepathWithoutSlash, http.StripPrefix(basepathWithoutSlash, handler))
|
||||||
|
}
|
||||||
|
|
||||||
|
if Flags.ExposeMetrics {
|
||||||
|
SetupMetrics(mux, handler)
|
||||||
|
SetupHookMetrics()
|
||||||
|
}
|
||||||
|
|
||||||
|
if Flags.ExposePprof {
|
||||||
|
SetupPprof(mux)
|
||||||
}
|
}
|
||||||
|
|
||||||
var listener net.Listener
|
var listener net.Listener
|
||||||
|
@ -109,59 +120,117 @@ func Serve() {
|
||||||
stdout.Printf("You can now upload files to: %s://%s%s", protocol, address, basepath)
|
stdout.Printf("You can now upload files to: %s://%s%s", protocol, address, basepath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we're not using TLS just start the server and, if http.Serve() returns, just return.
|
server := &http.Server{
|
||||||
|
Handler: mux,
|
||||||
|
}
|
||||||
|
|
||||||
|
shutdownComplete := setupSignalHandler(server, handler)
|
||||||
|
|
||||||
if protocol == "http" {
|
if protocol == "http" {
|
||||||
if err = http.Serve(listener, nil); err != nil {
|
// Non-TLS mode
|
||||||
stderr.Fatalf("Unable to serve: %s", err)
|
err = server.Serve(listener)
|
||||||
|
} else {
|
||||||
|
// TODO: Move TLS handling into own file.
|
||||||
|
// TLS mode
|
||||||
|
|
||||||
|
switch Flags.TLSMode {
|
||||||
|
case TLS13:
|
||||||
|
server.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS13}
|
||||||
|
|
||||||
|
case TLS12:
|
||||||
|
// Ciphersuite selection comes from
|
||||||
|
// https://ssl-config.mozilla.org/#server=go&version=1.14.4&config=intermediate&guideline=5.6
|
||||||
|
// 128-bit AES modes remain as TLSv1.3 is enabled in this mode, and TLSv1.3 compatibility requires an AES-128 ciphersuite.
|
||||||
|
server.TLSConfig = &tls.Config{
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
PreferServerCipherSuites: true,
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
case TLS12STRONG:
|
||||||
|
// Ciphersuite selection as above, but intersected with
|
||||||
|
// https://github.com/denji/golang-tls#perfect-ssl-labs-score-with-go
|
||||||
|
// TLSv1.3 is disabled as it requires an AES-128 ciphersuite.
|
||||||
|
server.TLSConfig = &tls.Config{
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
MaxVersion: tls.VersionTLS12,
|
||||||
|
PreferServerCipherSuites: true,
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
stderr.Fatalf("Invalid TLS mode chosen. Recommended valid modes are tls13, tls12 (default), and tls12-strong")
|
||||||
}
|
}
|
||||||
return
|
|
||||||
|
// Disable HTTP/2; the default non-TLS mode doesn't support it
|
||||||
|
server.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler), 0)
|
||||||
|
|
||||||
|
err = server.ServeTLS(listener, Flags.TLSCertFile, Flags.TLSKeyFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fall-through for TLS mode.
|
// Note: http.Server.Serve and http.Server.ServeTLS always return a non-nil error code. So
|
||||||
server := &http.Server{}
|
// we can assume from here that `err != nil`
|
||||||
switch Flags.TLSMode {
|
if err == http.ErrServerClosed {
|
||||||
case TLS13:
|
// ErrServerClosed means that http.Server.Shutdown was called due to an interruption signal.
|
||||||
server.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS13}
|
// We wait until the interruption procedure is complete or times out and then exit main.
|
||||||
|
<-shutdownComplete
|
||||||
case TLS12:
|
} else {
|
||||||
// Ciphersuite selection comes from
|
// Any other error is relayed to the user.
|
||||||
// https://ssl-config.mozilla.org/#server=go&version=1.14.4&config=intermediate&guideline=5.6
|
|
||||||
// 128-bit AES modes remain as TLSv1.3 is enabled in this mode, and TLSv1.3 compatibility requires an AES-128 ciphersuite.
|
|
||||||
server.TLSConfig = &tls.Config{
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
PreferServerCipherSuites: true,
|
|
||||||
CipherSuites: []uint16{
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
case TLS12STRONG:
|
|
||||||
// Ciphersuite selection as above, but intersected with
|
|
||||||
// https://github.com/denji/golang-tls#perfect-ssl-labs-score-with-go
|
|
||||||
// TLSv1.3 is disabled as it requires an AES-128 ciphersuite.
|
|
||||||
server.TLSConfig = &tls.Config{
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
MaxVersion: tls.VersionTLS12,
|
|
||||||
PreferServerCipherSuites: true,
|
|
||||||
CipherSuites: []uint16{
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
stderr.Fatalf("Invalid TLS mode chosen. Recommended valid modes are tls13, tls12 (default), and tls12-strong")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable HTTP/2; the default non-TLS mode doesn't support it
|
|
||||||
server.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler), 0)
|
|
||||||
|
|
||||||
if err = server.ServeTLS(listener, Flags.TLSCertFile, Flags.TLSKeyFile); err != nil {
|
|
||||||
stderr.Fatalf("Unable to serve: %s", err)
|
stderr.Fatalf("Unable to serve: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setupSignalHandler(server *http.Server, handler *handler.Handler) <-chan struct{} {
|
||||||
|
shutdownComplete := make(chan struct{})
|
||||||
|
|
||||||
|
// We read up to two signals, so use a capacity of 2 here to not miss any signal
|
||||||
|
c := make(chan os.Signal, 2)
|
||||||
|
|
||||||
|
// os.Interrupt is mapped to SIGINT on Unix and to the termination instructions on Windows.
|
||||||
|
// On Unix we also listen to SIGTERM.
|
||||||
|
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||||
|
|
||||||
|
// Signal to the handler that it should stop all long running requests if we shut down
|
||||||
|
server.RegisterOnShutdown(handler.InterruptRequestHandling)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
// First interrupt signal
|
||||||
|
<-c
|
||||||
|
stdout.Println("Received interrupt signal. Shutting down tusd...")
|
||||||
|
|
||||||
|
// Wait for second interrupt signal, while also shutting down the existing server
|
||||||
|
go func() {
|
||||||
|
<-c
|
||||||
|
stdout.Println("Received second interrupt signal. Exiting immediately!")
|
||||||
|
os.Exit(1)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Shutdown the server, but with a user-specified timeout
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(Flags.ShutdownTimeout)*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
err := server.Shutdown(ctx)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
stdout.Println("Shutdown completed. Goodbye!")
|
||||||
|
} else if errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
stderr.Println("Shutdown timeout exceeded. Exiting immediately!")
|
||||||
|
} else {
|
||||||
|
stderr.Printf("Failed to shutdown gracefully: %s\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
close(shutdownComplete)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return shutdownComplete
|
||||||
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/tus/tusd/cmd/tusd/cli"
|
"github.com/tus/tusd/v2/cmd/tusd/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
# Hooks
|
# Hooks
|
||||||
|
|
||||||
When integrating tusd into an application, it is important to establish a communication channel between the two components. The tusd binary accomplishes this by providing a system which triggers actions when certain events happen, such as an upload being created or finished. This simple-but-powerful system enables use cases ranging from logging over validation and authorization to processing the uploaded files.
|
TODO: Update with new details
|
||||||
|
|
||||||
|
When integrating tusd into an application, it is important to establish a communication channel between the two components. The tusd binary accomplishes this by providing a system which triggers actions when certain events happen, such as an upload being created or finished. This simple-but-powerful system enables uses ranging from logging over validation and authorization to processing the uploaded files.
|
||||||
|
|
||||||
When a specific action happens during an upload (pre-create, post-receive, post-finish, or post-terminate), the hook system enables tusd to fire off a specific event. Tusd provides two ways of doing this:
|
When a specific action happens during an upload (pre-create, post-receive, post-finish, or post-terminate), the hook system enables tusd to fire off a specific event. Tusd provides two ways of doing this:
|
||||||
|
|
||||||
|
@ -9,11 +11,11 @@ When a specific action happens during an upload (pre-create, post-receive, post-
|
||||||
|
|
||||||
## Non-Blocking Hooks
|
## Non-Blocking Hooks
|
||||||
|
|
||||||
If not otherwise noted, all hooks are invoked in a *non-blocking* way, meaning that tusd will not wait until the hook process has finished and exited. Therefore, the hook process is not able to influence how tusd may continue handling the current request, regardless of which exit code it may set. Furthermore, the hook process' stdout and stderr will be piped to tusd's stdout and stderr correspondingly, allowing one to use these channels for additional logging.
|
If not otherwise noted, all hooks are invoked in a _non-blocking_ way, meaning that tusd will not wait until the hook process has finished and exited. Therefore, the hook process is not able to influence how tusd may continue handling the current request, regardless of which exit code it may set. Furthermore, the hook process' stdout and stderr will be piped to tusd's stdout and stderr correspondingly, allowing one to use these channels for additional logging.
|
||||||
|
|
||||||
## Blocking Hooks
|
## Blocking Hooks
|
||||||
|
|
||||||
On the other hand, there are a few *blocking* hooks, such as caused by the `pre-create` and `pre-finish` events. Because their exit code will dictate whether tusd will accept the current incoming request, tusd will wait until the hook process has exited. Therefore, in order to keep the response times low, one should avoid to make time-consuming operations inside the processes for blocking hooks.
|
On the other hand, there are a few _blocking_ hooks, such as caused by the `pre-create` and `pre-finish` events. Because their exit code will dictate whether tusd will accept the current incoming request, tusd will wait until the hook process has exited. Therefore, in order to keep the response times low, one should avoid to make time-consuming operations inside the processes for blocking hooks.
|
||||||
|
|
||||||
### Blocking File Hooks
|
### Blocking File Hooks
|
||||||
|
|
||||||
|
@ -31,7 +33,7 @@ This event will be triggered before an upload is created, allowing you to run ce
|
||||||
|
|
||||||
### post-create
|
### post-create
|
||||||
|
|
||||||
This event will be triggered after an upload is created, allowing you to run certain routines. For example, notifying other parts of your system that a new upload has to be handled. At this point the upload may have received some data already since the invocation of these hooks may be delayed by a short duration.
|
This event will be triggered after an upload is created, allowing you to run certain routines. For example, notifying other parts of your system that a new upload has to be handled. At this point the upload may have received some data already since the invocation of these hooks may be delayed by a short duration.
|
||||||
|
|
||||||
### pre-finish
|
### pre-finish
|
||||||
|
|
||||||
|
@ -56,7 +58,9 @@ This event will be triggered for every running upload to indicate its current pr
|
||||||
The `--hooks-enabled-events` option for the tusd binary works as a whitelist for hook events and takes a comma separated list of hook events (for instance: `pre-create,post-create`). This can be useful to limit the number of hook executions and save resources if you are only interested in some events. If the `--hooks-enabled-events` option is omitted, all default hook events are enabled (pre-create, post-create, post-receive, post-terminate, post-finish).
|
The `--hooks-enabled-events` option for the tusd binary works as a whitelist for hook events and takes a comma separated list of hook events (for instance: `pre-create,post-create`). This can be useful to limit the number of hook executions and save resources if you are only interested in some events. If the `--hooks-enabled-events` option is omitted, all default hook events are enabled (pre-create, post-create, post-receive, post-terminate, post-finish).
|
||||||
|
|
||||||
## File Hooks
|
## File Hooks
|
||||||
|
|
||||||
### The Hook Directory
|
### The Hook Directory
|
||||||
|
|
||||||
By default, the file hook system is disabled. To enable it, pass the `--hooks-dir` option to the tusd binary. The flag's value will be a path, the **hook directory**, relative to the current working directory, pointing to the folder containing the executable **hook files**:
|
By default, the file hook system is disabled. To enable it, pass the `--hooks-dir` option to the tusd binary. The flag's value will be a path, the **hook directory**, relative to the current working directory, pointing to the folder containing the executable **hook files**:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -67,13 +71,14 @@ $ tusd --hooks-dir ./path/to/hooks/
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
If an event occurs, the tusd binary will look for a file, named exactly as the event, which will then be executed, as long as the object exists. In the example above, the binary `./path/to/hooks/pre-create` will be invoked, before an upload is created, which can be used to e.g. validate certain metadata. Please note, that in UNIX environments the hook file *must not* have an extension, such as `.sh` or `.py`, or else tusd will not recognize and ignore it. On Windows, however, the hook file *must* have an extension, such as `.bat` or `.exe`.
|
If an event occurs, the tusd binary will look for a file, named exactly as the event, which will then be executed, as long as the object exists. In the example above, the binary `./path/to/hooks/pre-create` will be invoked, before an upload is created, which can be used to e.g. validate certain metadata. Please note, that in UNIX environments the hook file _must not_ have an extension, such as `.sh` or `.py`, or else tusd will not recognize and ignore it. On Windows, however, the hook file _must_ have an extension, such as `.bat` or `.exe`.
|
||||||
|
|
||||||
### The Hook's Environment
|
### The Hook's Environment
|
||||||
|
|
||||||
The process of the hook files are provided with information about the event and the upload using to two methods:
|
The process of the hook files are provided with information about the event and the upload using to two methods:
|
||||||
* The `TUS_ID` and `TUS_SIZE` environment variables will contain the upload ID and its size in bytes, which triggered the event. Please be aware, that in the `pre-create` hook the upload ID will be an empty string as the entity has not been created and therefore this piece of information is not yet available.
|
|
||||||
* On `stdin` a JSON-encoded object can be read which contains more details about the corresponding event in following format:
|
- The `TUS_ID` and `TUS_SIZE` environment variables will contain the upload ID and its size in bytes, which triggered the event. Please be aware, that in the `pre-create` hook the upload ID will be an empty string as the entity has not been created and therefore this piece of information is not yet available.
|
||||||
|
- On `stdin` a JSON-encoded object can be read which contains more details about the corresponding event in following format:
|
||||||
|
|
||||||
```js
|
```js
|
||||||
{
|
{
|
||||||
|
@ -211,9 +216,9 @@ $ # Retrying 5 times with a 2 second backoff
|
||||||
$ tusd --hooks-http http://localhost:8081/write --hooks-http-retry 5 --hooks-http-backoff 2
|
$ tusd --hooks-http http://localhost:8081/write --hooks-http-retry 5 --hooks-http-backoff 2
|
||||||
```
|
```
|
||||||
|
|
||||||
## GRPC Hooks
|
## gRPC Hooks
|
||||||
|
|
||||||
GRPC Hooks are the third type of hooks supported by tusd. Like the others hooks, it is disabled by default. To enable it, pass the `--hooks-grpc` option to the tusd binary. The flag's value will be a gRPC endpoint, which the tusd binary will be sent to:
|
gRPC Hooks are the third type of hooks supported by tusd. Like the others hooks, it is disabled by default. To enable it, pass the `--hooks-grpc` option to the tusd binary. The flag's value will be a gRPC endpoint, which the tusd binary will be sent to:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ tusd --hooks-grpc localhost:8080
|
$ tusd --hooks-grpc localhost:8080
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ./minio server data
|
||||||
|
|
||||||
|
AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE AWS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY go run cmd/tusd/main.go -s3-bucket tusdtest.transloadit.com -s3-endpoint http://127.0.0.1:9000 -expose-pprof
|
|
@ -67,7 +67,7 @@ $ tusd -gcs-bucket=my-test-bucket.com
|
||||||
[tusd] Using /metrics as the metrics path.
|
[tusd] Using /metrics as the metrics path.
|
||||||
```
|
```
|
||||||
|
|
||||||
Tusd also supports storing uploads on Microsoft Azure Blob Storage. In order to enable this feature, provide the
|
Tusd also supports storing uploads on Microsoft Azure Blob Storage. In order to enable this feature, provide the
|
||||||
corresponding access credentials using environment variables.
|
corresponding access credentials using environment variables.
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -111,7 +111,7 @@ Using endpoint https://xxxxx.blob.core.windows.net
|
||||||
[tusd] Using /metrics as the metrics path.
|
[tusd] Using /metrics as the metrics path.
|
||||||
```
|
```
|
||||||
|
|
||||||
TLS support for HTTPS connections can be enabled by supplying a certificate and private key. Note that the certificate file must include the entire chain of certificates up to the CA certificate. The default configuration supports TLSv1.2 and TLSv1.3. It is possible to use only TLSv1.3 with `-tls-mode=tls13`; alternately, it is possible to disable TLSv1.3 and use only 256-bit AES ciphersuites with `-tls-mode=tls12-strong`. The following example generates a self-signed certificate for `localhost` and then uses it to serve files on the loopback address; that this certificate is not appropriate for production use. Note also that the key file must not be encrypted/require a passphrase.
|
TLS support for HTTPS connections can be enabled by supplying a certificate and private key. Note that the certificate file must include the entire chain of certificates up to the CA certificate. The default configuration supports TLSv1.2 and TLSv1.3. It is possible to use only TLSv1.3 with `-tls-mode=tls13`; alternately, it is possible to disable TLSv1.3 and use only 256-bit AES ciphersuites with `-tls-mode=tls12-strong`. The following example generates a self-signed certificate for `localhost` and then uses it to serve files on the loopback address; that this certificate is not appropriate for production use. Note also that the key file must not be encrypted/require a passphrase.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ openssl req -x509 -new -newkey rsa:4096 -nodes -sha256 -days 3650 -keyout localhost.key -out localhost.pem -subj "/CN=localhost"
|
$ openssl req -x509 -new -newkey rsa:4096 -nodes -sha256 -days 3650 -keyout localhost.key -out localhost.pem -subj "/CN=localhost"
|
||||||
|
@ -130,7 +130,6 @@ $ tusd -upload-dir=./data -host=127.0.0.1 -port=8443 -tls-certificate=localhost.
|
||||||
[tusd] You can now upload files to: https://127.0.0.1:8443/files/
|
[tusd] You can now upload files to: https://127.0.0.1:8443/files/
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
Besides these simple examples, tusd can be easily configured using a variety of command line
|
Besides these simple examples, tusd can be easily configured using a variety of command line
|
||||||
options:
|
options:
|
||||||
|
|
||||||
|
@ -224,3 +223,13 @@ $ tusd -help
|
||||||
Print tusd version information
|
Print tusd version information
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Graceful shutdown
|
||||||
|
|
||||||
|
If tusd receives a SIGINT or SIGTERM signal, it will initiate a graceful shutdown. SIGINT is usually emitted by pressing Ctrl+C inside the terminal that is running tusd. SIGINT and SIGTERM can also be emitted using the [`kill(1)`](https://man7.org/linux/man-pages/man1/kill.1.html) utility on Unix. Signals in that sense do not exist on Windows, so please refer to the [Go documentation](https://pkg.go.dev/os/signal#hdr-Windows) on how different events are translated into signals on Windows.
|
||||||
|
|
||||||
|
Once the graceful shutdown is started, tusd will stop listening on its port and won't accept new connections anymore. Idle connections are closed down. Already running requests will be given a grace period to complete before their connections are closed as well. PATCH and POST requests with a request body are interrupted, so that data stores can gracefully finish saving all the received data until that point. If all requests have been completed, tusd will exit.
|
||||||
|
|
||||||
|
If not all requests have been completed in the period defined by the `-shutdown-timeout` flag, tusd will exit regardless. By default, tusd will give all requests 10 seconds to complete their processing. If you do not want to wait for requests, use `-shutdown-timeout=0`.
|
||||||
|
|
||||||
|
tusd will also immediately exit if it receives a second SIGINT or SIGTERM signal. It will also always exit immediately if a SIGKILL is received.
|
||||||
|
|
|
@ -9,8 +9,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/tus/tusd/pkg/filestore"
|
"github.com/tus/tusd/v2/pkg/filestore"
|
||||||
tusd "github.com/tus/tusd/pkg/handler"
|
tusd "github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
# Examples
|
||||||
|
|
||||||
|
This directory contains following examples:
|
||||||
|
|
||||||
|
- `apache2.conf` is the recommended minimum configuration for an Apache2 proxy in front of tusd.
|
||||||
|
- `nginx.conf` is the recommended minimum configuration for an Nginx proxy in front of tusd.
|
||||||
|
- `server/` is an example of how to the tusd package embedded in your own Go application.
|
||||||
|
- `hooks/file/` are Bash scripts for file hook implementations.
|
||||||
|
- `hooks/http/` is a Python HTTP server as the HTTP hook implementation.
|
||||||
|
- `hooks/grpc/` is a Python gRPC server as the gRPC hook implementation.
|
||||||
|
- `hooks/plugin/` is a Go plugin usable with the plugin hooks.
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# This example demonstrates how to read the hook event details
|
||||||
|
# from stdout and output debug messages.
|
||||||
|
|
||||||
|
id="$TUS_ID"
|
||||||
|
size="$TUS_SIZE"
|
||||||
|
|
||||||
|
# We use >&2 to write debugging output to stderr. tusd
|
||||||
|
# will forward these to its stderr. Any output from the
|
||||||
|
# hook on stdout will be captured by tusd and interpreted
|
||||||
|
# as a response.
|
||||||
|
echo "Upload created with ID ${id} and size ${size}" >&2
|
||||||
|
cat /dev/stdin | jq . >&2
|
|
@ -0,0 +1,11 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# This example demonstrates how to read the hook event details
|
||||||
|
# from environment variables, stdin, and output debug messages.
|
||||||
|
|
||||||
|
# We use >&2 to write debugging output to stderr. tusd
|
||||||
|
# will forward these to its stderr. Any output from the
|
||||||
|
# hook on stdout will be captured by tusd and interpreted
|
||||||
|
# as a response.
|
||||||
|
echo "Upload $TUS_ID ($TUS_SIZE bytes) finished" >&2
|
||||||
|
cat /dev/stdin | jq . >&2
|
|
@ -0,0 +1,15 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# This example demonstrates how to read the hook event details
|
||||||
|
# from environment variables and output debug messages.
|
||||||
|
|
||||||
|
id="$TUS_ID"
|
||||||
|
offset="$TUS_OFFSET"
|
||||||
|
size="$TUS_SIZE"
|
||||||
|
progress=$((100 * $offset/$size))
|
||||||
|
|
||||||
|
# We use >&2 to write debugging output to stderr. tusd
|
||||||
|
# will forward these to its stderr. Any output from the
|
||||||
|
# hook on stdout will be captured by tusd and interpreted
|
||||||
|
# as a response.
|
||||||
|
echo "Upload ${id} is at ${progress}% (${offset}/${size})" >&2
|
|
@ -0,0 +1,11 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# This example demonstrates how to read the hook event details
|
||||||
|
# from environment variables, stdin, and output debug messages.
|
||||||
|
|
||||||
|
# We use >&2 to write debugging output to stderr. tusd
|
||||||
|
# will forward these to its stderr. Any output from the
|
||||||
|
# hook on stdout will be captured by tusd and interpreted
|
||||||
|
# as a response.
|
||||||
|
echo "Upload $TUS_ID terminated" >&2
|
||||||
|
cat /dev/stdin | jq . >&2
|
|
@ -0,0 +1,37 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# This example demonstrates how to read the hook event details
|
||||||
|
# from stdout, output debug messages, and reject a new upload based
|
||||||
|
# on custom constraints. Here, an upload will be rejected if the
|
||||||
|
# filename metadata is missing. Remove the following `exit 0` line
|
||||||
|
# to activate the constraint:
|
||||||
|
exit 0
|
||||||
|
|
||||||
|
hasFilename="$(cat /dev/stdin | jq '.Event.Upload.MetaData | has("filename")')"
|
||||||
|
|
||||||
|
# We use >&2 to write debugging output to stderr. tusd
|
||||||
|
# will forward these to its stderr. Any output from the
|
||||||
|
# hook on stdout will be captured by tusd and interpreted
|
||||||
|
# as a response.
|
||||||
|
echo "Filename exists: $hasFilename" >&2
|
||||||
|
|
||||||
|
if [ "$hasFilename" == "false" ]; then
|
||||||
|
|
||||||
|
# If the condition is not met, output a JSON object on stdout,
|
||||||
|
# that instructs tusd to reject the upload and respond with a custom
|
||||||
|
# HTTP error response.
|
||||||
|
cat <<END
|
||||||
|
{
|
||||||
|
"RejectUpload": true,
|
||||||
|
"HTTPResponse": {
|
||||||
|
"StatusCode": 400,
|
||||||
|
"Body": "no filename provided"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
END
|
||||||
|
|
||||||
|
# It is important that the hook exits with code 0. Otherwise, tusd
|
||||||
|
# assumes the hook has failed and will print an error message about
|
||||||
|
# the hook failure.
|
||||||
|
exit 0
|
||||||
|
fi
|
|
@ -0,0 +1,10 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# This example demonstrates how to read the hook event details
|
||||||
|
# from stdin, and output debug messages.
|
||||||
|
|
||||||
|
# We use >&2 to write debugging output to stderr. tusd
|
||||||
|
# will forward these to its stderr. Any output from the
|
||||||
|
# hook on stdout will be captured by tusd and interpreted
|
||||||
|
# as a response.
|
||||||
|
cat /dev/stdin | jq . >&2
|
|
@ -0,0 +1,2 @@
|
||||||
|
hook_pb2.py: ../../../cmd/tusd/cli/hooks/proto/v2/hook.proto
|
||||||
|
python3 -m grpc_tools.protoc --proto_path=../../../cmd/tusd/cli/hooks/proto/v2/ hook.proto --python_out=. --grpc_python_out=.
|
|
@ -0,0 +1,63 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||||
|
# source: hook.proto
|
||||||
|
"""Generated protocol buffer code."""
|
||||||
|
from google.protobuf.internal import builder as _builder
|
||||||
|
from google.protobuf import descriptor as _descriptor
|
||||||
|
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||||
|
from google.protobuf import symbol_database as _symbol_database
|
||||||
|
# @@protoc_insertion_point(imports)
|
||||||
|
|
||||||
|
_sym_db = _symbol_database.Default()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\nhook.proto\x12\x02v2\"5\n\x0bHookRequest\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x18\n\x05\x65vent\x18\x02 \x01(\x0b\x32\t.v2.Event\"K\n\x05\x45vent\x12\x1c\n\x06upload\x18\x01 \x01(\x0b\x32\x0c.v2.FileInfo\x12$\n\x0bhttpRequest\x18\x02 \x01(\x0b\x32\x0f.v2.HTTPRequest\"\xc3\x02\n\x08\x46ileInfo\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04size\x18\x02 \x01(\x03\x12\x16\n\x0esizeIsDeferred\x18\x03 \x01(\x08\x12\x0e\n\x06offset\x18\x04 \x01(\x03\x12,\n\x08metaData\x18\x05 \x03(\x0b\x32\x1a.v2.FileInfo.MetaDataEntry\x12\x11\n\tisPartial\x18\x06 \x01(\x08\x12\x0f\n\x07isFinal\x18\x07 \x01(\x08\x12\x16\n\x0epartialUploads\x18\x08 \x03(\t\x12*\n\x07storage\x18\t \x03(\x0b\x32\x19.v2.FileInfo.StorageEntry\x1a/\n\rMetaDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cStorageEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe6\x01\n\x0f\x46ileInfoChanges\x12\n\n\x02id\x18\x01 \x01(\t\x12\x33\n\x08metaData\x18\x02 \x03(\x0b\x32!.v2.FileInfoChanges.MetaDataEntry\x12\x31\n\x07storage\x18\x03 \x03(\x0b\x32 .v2.FileInfoChanges.StorageEntry\x1a/\n\rMetaDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cStorageEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x9a\x01\n\x0bHTTPRequest\x12\x0e\n\x06method\x18\x01 \x01(\t\x12\x0b\n\x03uri\x18\x02 \x01(\t\x12\x12\n\nremoteAddr\x18\x03 \x01(\t\x12+\n\x06header\x18\x04 \x03(\x0b\x32\x1b.v2.HTTPRequest.HeaderEntry\x1a-\n\x0bHeaderEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x8d\x01\n\x0cHookResponse\x12&\n\x0chttpResponse\x18\x01 \x01(\x0b\x32\x10.v2.HTTPResponse\x12\x14\n\x0crejectUpload\x18\x02 \x01(\x08\x12+\n\x0e\x63hangeFileInfo\x18\x04 \x01(\x0b\x32\x13.v2.FileInfoChanges\x12\x12\n\nstopUpload\x18\x03 \x01(\x08\"\x90\x01\n\x0cHTTPResponse\x12\x12\n\nstatusCode\x18\x01 \x01(\x03\x12.\n\x07headers\x18\x02 \x03(\x0b\x32\x1d.v2.HTTPResponse.HeadersEntry\x12\x0c\n\x04\x62ody\x18\x03 \x01(\t\x1a.\n\x0cHeadersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x32@\n\x0bHookHandler\x12\x31\n\nInvokeHook\x12\x0f.v2.HookRequest\x1a\x10.v2.HookResponse\"\x00\x62\x06proto3')
|
||||||
|
|
||||||
|
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||||
|
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'hook_pb2', globals())
|
||||||
|
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||||
|
|
||||||
|
DESCRIPTOR._options = None
|
||||||
|
_FILEINFO_METADATAENTRY._options = None
|
||||||
|
_FILEINFO_METADATAENTRY._serialized_options = b'8\001'
|
||||||
|
_FILEINFO_STORAGEENTRY._options = None
|
||||||
|
_FILEINFO_STORAGEENTRY._serialized_options = b'8\001'
|
||||||
|
_FILEINFOCHANGES_METADATAENTRY._options = None
|
||||||
|
_FILEINFOCHANGES_METADATAENTRY._serialized_options = b'8\001'
|
||||||
|
_FILEINFOCHANGES_STORAGEENTRY._options = None
|
||||||
|
_FILEINFOCHANGES_STORAGEENTRY._serialized_options = b'8\001'
|
||||||
|
_HTTPREQUEST_HEADERENTRY._options = None
|
||||||
|
_HTTPREQUEST_HEADERENTRY._serialized_options = b'8\001'
|
||||||
|
_HTTPRESPONSE_HEADERSENTRY._options = None
|
||||||
|
_HTTPRESPONSE_HEADERSENTRY._serialized_options = b'8\001'
|
||||||
|
_HOOKREQUEST._serialized_start=18
|
||||||
|
_HOOKREQUEST._serialized_end=71
|
||||||
|
_EVENT._serialized_start=73
|
||||||
|
_EVENT._serialized_end=148
|
||||||
|
_FILEINFO._serialized_start=151
|
||||||
|
_FILEINFO._serialized_end=474
|
||||||
|
_FILEINFO_METADATAENTRY._serialized_start=379
|
||||||
|
_FILEINFO_METADATAENTRY._serialized_end=426
|
||||||
|
_FILEINFO_STORAGEENTRY._serialized_start=428
|
||||||
|
_FILEINFO_STORAGEENTRY._serialized_end=474
|
||||||
|
_FILEINFOCHANGES._serialized_start=477
|
||||||
|
_FILEINFOCHANGES._serialized_end=707
|
||||||
|
_FILEINFOCHANGES_METADATAENTRY._serialized_start=379
|
||||||
|
_FILEINFOCHANGES_METADATAENTRY._serialized_end=426
|
||||||
|
_FILEINFOCHANGES_STORAGEENTRY._serialized_start=428
|
||||||
|
_FILEINFOCHANGES_STORAGEENTRY._serialized_end=474
|
||||||
|
_HTTPREQUEST._serialized_start=710
|
||||||
|
_HTTPREQUEST._serialized_end=864
|
||||||
|
_HTTPREQUEST_HEADERENTRY._serialized_start=819
|
||||||
|
_HTTPREQUEST_HEADERENTRY._serialized_end=864
|
||||||
|
_HOOKRESPONSE._serialized_start=867
|
||||||
|
_HOOKRESPONSE._serialized_end=1008
|
||||||
|
_HTTPRESPONSE._serialized_start=1011
|
||||||
|
_HTTPRESPONSE._serialized_end=1155
|
||||||
|
_HTTPRESPONSE_HEADERSENTRY._serialized_start=1109
|
||||||
|
_HTTPRESPONSE_HEADERSENTRY._serialized_end=1155
|
||||||
|
_HOOKHANDLER._serialized_start=1157
|
||||||
|
_HOOKHANDLER._serialized_end=1221
|
||||||
|
# @@protoc_insertion_point(module_scope)
|
|
@ -0,0 +1,74 @@
|
||||||
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
||||||
|
"""Client and server classes corresponding to protobuf-defined services."""
|
||||||
|
import grpc
|
||||||
|
|
||||||
|
import hook_pb2 as hook__pb2
|
||||||
|
|
||||||
|
|
||||||
|
class HookHandlerStub(object):
|
||||||
|
"""The hook service definition.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, channel):
|
||||||
|
"""Constructor.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
channel: A grpc.Channel.
|
||||||
|
"""
|
||||||
|
self.InvokeHook = channel.unary_unary(
|
||||||
|
'/v2.HookHandler/InvokeHook',
|
||||||
|
request_serializer=hook__pb2.HookRequest.SerializeToString,
|
||||||
|
response_deserializer=hook__pb2.HookResponse.FromString,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class HookHandlerServicer(object):
|
||||||
|
"""The hook service definition.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def InvokeHook(self, request, context):
|
||||||
|
"""InvokeHook is invoked for every hook that is executed. HookRequest contains the
|
||||||
|
corresponding information about the hook type, the involved upload, and
|
||||||
|
causing HTTP request.
|
||||||
|
The return value HookResponse allows to stop or reject an upload, as well as modifying
|
||||||
|
the HTTP response. See the documentation for HookResponse for more details.
|
||||||
|
"""
|
||||||
|
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||||
|
context.set_details('Method not implemented!')
|
||||||
|
raise NotImplementedError('Method not implemented!')
|
||||||
|
|
||||||
|
|
||||||
|
def add_HookHandlerServicer_to_server(servicer, server):
|
||||||
|
rpc_method_handlers = {
|
||||||
|
'InvokeHook': grpc.unary_unary_rpc_method_handler(
|
||||||
|
servicer.InvokeHook,
|
||||||
|
request_deserializer=hook__pb2.HookRequest.FromString,
|
||||||
|
response_serializer=hook__pb2.HookResponse.SerializeToString,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
generic_handler = grpc.method_handlers_generic_handler(
|
||||||
|
'v2.HookHandler', rpc_method_handlers)
|
||||||
|
server.add_generic_rpc_handlers((generic_handler,))
|
||||||
|
|
||||||
|
|
||||||
|
# This class is part of an EXPERIMENTAL API.
|
||||||
|
class HookHandler(object):
|
||||||
|
"""The hook service definition.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def InvokeHook(request,
|
||||||
|
target,
|
||||||
|
options=(),
|
||||||
|
channel_credentials=None,
|
||||||
|
call_credentials=None,
|
||||||
|
insecure=False,
|
||||||
|
compression=None,
|
||||||
|
wait_for_ready=None,
|
||||||
|
timeout=None,
|
||||||
|
metadata=None):
|
||||||
|
return grpc.experimental.unary_unary(request, target, '/v2.HookHandler/InvokeHook',
|
||||||
|
hook__pb2.HookRequest.SerializeToString,
|
||||||
|
hook__pb2.HookResponse.FromString,
|
||||||
|
options, channel_credentials,
|
||||||
|
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
|
@ -0,0 +1,67 @@
|
||||||
|
import grpc
|
||||||
|
from concurrent import futures
|
||||||
|
import time
|
||||||
|
import uuid
|
||||||
|
import hook_pb2_grpc as pb2_grpc
|
||||||
|
import hook_pb2 as pb2
|
||||||
|
|
||||||
|
class HookHandler(pb2_grpc.HookHandlerServicer):
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def InvokeHook(self, hook_request, context):
|
||||||
|
# Print data from hook request for debugging
|
||||||
|
print('Received hook request:')
|
||||||
|
print(hook_request)
|
||||||
|
|
||||||
|
# Prepare hook response structure
|
||||||
|
hook_response = pb2.HookResponse()
|
||||||
|
|
||||||
|
# Example: Use the pre-create hook to check if a filename has been supplied
|
||||||
|
# using metadata. If not, the upload is rejected with a custom HTTP response.
|
||||||
|
# In addition, a custom upload ID with a choosable prefix is supplied.
|
||||||
|
# Metadata is configured, so that it only retains the filename meta data
|
||||||
|
# and the creation time.
|
||||||
|
if hook_request.type == 'pre-create':
|
||||||
|
metaData = hook_request.event.upload.metaData
|
||||||
|
isValid = 'filename' in metaData
|
||||||
|
if not isValid:
|
||||||
|
hook_response.rejectUpload = True
|
||||||
|
hook_response.httpResponse.statusCode = 400
|
||||||
|
hook_response.httpResponse.body = 'no filename provided'
|
||||||
|
hook_response.httpResponse.headers['X-Some-Header'] = 'yes'
|
||||||
|
else:
|
||||||
|
hook_response.changeFileInfo.id = f'prefix-{uuid.uuid4()}'
|
||||||
|
hook_response.changeFileInfo.metaData
|
||||||
|
hook_response.changeFileInfo.metaData['filename'] = metaData['filename']
|
||||||
|
hook_response.changeFileInfo.metaData['creation_time'] = time.ctime()
|
||||||
|
|
||||||
|
# Example: Use the post-finish hook to print information about a completed upload,
|
||||||
|
# including its storage location.
|
||||||
|
if hook_request.type == 'post-finish':
|
||||||
|
id = hook_request.event.upload.id
|
||||||
|
size = hook_request.event.upload.size
|
||||||
|
storage = hook_request.event.upload.storage
|
||||||
|
|
||||||
|
print(f'Upload {id} ({size} bytes) is finished. Find the file at:')
|
||||||
|
print(storage)
|
||||||
|
|
||||||
|
# Print data of hook response for debugging
|
||||||
|
print('Responding with hook response:')
|
||||||
|
print(hook_response)
|
||||||
|
print('------')
|
||||||
|
print('')
|
||||||
|
|
||||||
|
# Return the hook response to send back to tusd
|
||||||
|
return hook_response
|
||||||
|
|
||||||
|
def serve():
|
||||||
|
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
|
||||||
|
pb2_grpc.add_HookHandlerServicer_to_server(HookHandler(), server)
|
||||||
|
server.add_insecure_port('[::]:8000')
|
||||||
|
server.start()
|
||||||
|
server.wait_for_termination()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
serve()
|
|
@ -0,0 +1,79 @@
|
||||||
|
from http.server import HTTPServer, BaseHTTPRequestHandler
|
||||||
|
from io import BytesIO
|
||||||
|
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
class HTTPHookHandler(BaseHTTPRequestHandler):
|
||||||
|
|
||||||
|
def do_GET(self):
|
||||||
|
self.send_response(200)
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(b'Hello! This server only responds to POST requests')
|
||||||
|
|
||||||
|
def do_POST(self):
|
||||||
|
# Read entire body as JSON object
|
||||||
|
content_length = int(self.headers['Content-Length'])
|
||||||
|
request_body = self.rfile.read(content_length)
|
||||||
|
hook_request = json.loads(request_body)
|
||||||
|
|
||||||
|
# Print data from hook request for debugging
|
||||||
|
print('Received hook request:')
|
||||||
|
print(hook_request)
|
||||||
|
|
||||||
|
# Prepare hook response structure
|
||||||
|
hook_response = {
|
||||||
|
'HTTPResponse': {
|
||||||
|
'Headers': {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Example: Use the pre-create hook to check if a filename has been supplied
|
||||||
|
# using metadata. If not, the upload is rejected with a custom HTTP response.
|
||||||
|
# In addition, a custom upload ID with a choosable prefix is supplied.
|
||||||
|
# Metadata is configured, so that it only retains the filename meta data
|
||||||
|
# and the creation time.
|
||||||
|
if hook_request['Type'] == 'pre-create':
|
||||||
|
metaData = hook_request['Event']['Upload']['MetaData']
|
||||||
|
isValid = 'filename' in metaData
|
||||||
|
if not isValid:
|
||||||
|
hook_response['RejectUpload'] = True
|
||||||
|
hook_response['HTTPResponse']['StatusCode'] = 400
|
||||||
|
hook_response['HTTPResponse']['Body'] = 'no filename provided'
|
||||||
|
hook_response['HTTPResponse']['Headers']['X-Some-Header'] = 'yes'
|
||||||
|
else:
|
||||||
|
hook_response['ChangeFileInfo'] = {}
|
||||||
|
hook_response['ChangeFileInfo']['ID'] = f'prefix-{uuid.uuid4()}'
|
||||||
|
hook_response['ChangeFileInfo']['MetaData'] = {
|
||||||
|
'filename': metaData['filename'],
|
||||||
|
'creation_time': time.ctime(),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Example: Use the post-finish hook to print information about a completed upload,
|
||||||
|
# including its storage location.
|
||||||
|
if hook_request['Type'] == 'post-finish':
|
||||||
|
id = hook_request['Event']['Upload']['ID']
|
||||||
|
size = hook_request['Event']['Upload']['Size']
|
||||||
|
storage = hook_request['Event']['Upload']['Storage']
|
||||||
|
|
||||||
|
print(f'Upload {id} ({size} bytes) is finished. Find the file at:')
|
||||||
|
print(storage)
|
||||||
|
|
||||||
|
|
||||||
|
# Print data of hook response for debugging
|
||||||
|
print('Responding with hook response:')
|
||||||
|
print(hook_response)
|
||||||
|
print('------')
|
||||||
|
print('')
|
||||||
|
|
||||||
|
# Send the data from the hook response as JSON output
|
||||||
|
response_body = json.dumps(hook_response)
|
||||||
|
self.send_response(200)
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(response_body.encode())
|
||||||
|
|
||||||
|
|
||||||
|
httpd = HTTPServer(('localhost', 8000), HTTPHookHandler)
|
||||||
|
httpd.serve_forever()
|
|
@ -0,0 +1,2 @@
|
||||||
|
hook_handler: hook_handler.go
|
||||||
|
go build -o hook_handler ./hook_handler.go
|
|
@ -0,0 +1,87 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
|
"github.com/hashicorp/go-plugin"
|
||||||
|
"github.com/tus/tusd/v2/cmd/tusd/cli/hooks"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Here is the implementation of our hook handler
|
||||||
|
type MyHookHandler struct {
|
||||||
|
logger hclog.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup is called once the plugin has been loaded by tusd.
|
||||||
|
func (g *MyHookHandler) Setup() error {
|
||||||
|
// Use the log package or the g.logger field to write debug messages.
|
||||||
|
// Do not write to stdout directly, as this is used for communication between
|
||||||
|
// tusd and the plugin.
|
||||||
|
log.Println("MyHookHandler.Setup is invoked")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvokeHook is called for every hook that tusd fires.
|
||||||
|
func (g *MyHookHandler) InvokeHook(req hooks.HookRequest) (res hooks.HookResponse, err error) {
|
||||||
|
log.Println("MyHookHandler.InvokeHook is invoked")
|
||||||
|
|
||||||
|
// Prepare hook response structure
|
||||||
|
res.HTTPResponse.Headers = make(map[string]string)
|
||||||
|
|
||||||
|
// Example: Use the pre-create hook to check if a filename has been supplied
|
||||||
|
// using metadata. If not, the upload is rejected with a custom HTTP response.
|
||||||
|
|
||||||
|
if req.Type == hooks.HookPreCreate {
|
||||||
|
if _, ok := req.Event.Upload.MetaData["filename"]; !ok {
|
||||||
|
res.RejectUpload = true
|
||||||
|
res.HTTPResponse.StatusCode = 400
|
||||||
|
res.HTTPResponse.Body = "no filename provided"
|
||||||
|
res.HTTPResponse.Headers["X-Some-Header"] = "yes"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Example: Use the post-finish hook to print information about a completed upload,
|
||||||
|
// including its storage location.
|
||||||
|
if req.Type == hooks.HookPreFinish {
|
||||||
|
id := req.Event.Upload.ID
|
||||||
|
size := req.Event.Upload.Size
|
||||||
|
storage := req.Event.Upload.Storage
|
||||||
|
|
||||||
|
log.Printf("Upload %s (%d bytes) is finished. Find the file at:\n", id, size)
|
||||||
|
log.Println(storage)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the hook response to tusd.
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// handshakeConfigs are used to just do a basic handshake between
|
||||||
|
// a plugin and tusd. If the handshake fails, a user friendly error is shown.
|
||||||
|
// This prevents users from executing bad plugins or executing a plugin
|
||||||
|
// directory. It is a UX feature, not a security feature.
|
||||||
|
var handshakeConfig = plugin.HandshakeConfig{
|
||||||
|
ProtocolVersion: 1,
|
||||||
|
MagicCookieKey: "TUSD_PLUGIN",
|
||||||
|
MagicCookieValue: "yes",
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// 1. Initialize our handler.
|
||||||
|
myHandler := &MyHookHandler{}
|
||||||
|
|
||||||
|
// 2. Construct the plugin map. The key must be "hookHandler".
|
||||||
|
var pluginMap = map[string]plugin.Plugin{
|
||||||
|
"hookHandler": &hooks.HookHandlerPlugin{Impl: myHandler},
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Expose the plugin to tusd.
|
||||||
|
plugin.Serve(&plugin.ServeConfig{
|
||||||
|
HandshakeConfig: handshakeConfig,
|
||||||
|
Plugins: pluginMap,
|
||||||
|
})
|
||||||
|
|
||||||
|
fmt.Println("DOONE")
|
||||||
|
}
|
|
@ -1,8 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
id="$TUS_ID"
|
|
||||||
offset="$TUS_OFFSET"
|
|
||||||
size="$TUS_SIZE"
|
|
||||||
|
|
||||||
echo "Upload created with ID ${id} and size ${size}"
|
|
||||||
cat /dev/stdin | jq .
|
|
|
@ -1,4 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
echo "Upload $TUS_ID ($TUS_SIZE bytes) finished"
|
|
||||||
cat /dev/stdin | jq .
|
|
|
@ -1,8 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
id="$TUS_ID"
|
|
||||||
offset="$TUS_OFFSET"
|
|
||||||
size="$TUS_SIZE"
|
|
||||||
progress=$((100 * $offset/$size))
|
|
||||||
|
|
||||||
echo "Upload ${id} is at ${progress}% (${offset}/${size})"
|
|
|
@ -1,4 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
echo "Upload $TUS_ID terminated"
|
|
||||||
cat /dev/stdin | jq .
|
|
|
@ -1,7 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
filename=$(cat /dev/stdin | jq .Upload.MetaData.filename)
|
|
||||||
if [ -z "$filename" ]; then
|
|
||||||
echo "Error: no filename provided"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
|
@ -4,8 +4,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/tus/tusd/pkg/filestore"
|
"github.com/tus/tusd/v2/pkg/filestore"
|
||||||
tusd "github.com/tus/tusd/pkg/handler"
|
tusd "github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|
9
go.mod
9
go.mod
|
@ -1,4 +1,4 @@
|
||||||
module github.com/tus/tusd
|
module github.com/tus/tusd/v2
|
||||||
|
|
||||||
// Specify the Go version needed for the Heroku deployment
|
// Specify the Go version needed for the Heroku deployment
|
||||||
// See https://github.com/heroku/heroku-buildpack-go#go-module-specifics
|
// See https://github.com/heroku/heroku-buildpack-go#go-module-specifics
|
||||||
|
@ -10,15 +10,20 @@ require (
|
||||||
github.com/Azure/azure-storage-blob-go v0.14.0
|
github.com/Azure/azure-storage-blob-go v0.14.0
|
||||||
github.com/aws/aws-sdk-go v1.44.273
|
github.com/aws/aws-sdk-go v1.44.273
|
||||||
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40
|
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40
|
||||||
|
github.com/felixge/fgprof v0.9.2
|
||||||
|
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d
|
||||||
github.com/golang/mock v1.6.0
|
github.com/golang/mock v1.6.0
|
||||||
github.com/golang/protobuf v1.5.3
|
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
|
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
|
||||||
|
github.com/hashicorp/go-hclog v0.14.1
|
||||||
|
github.com/hashicorp/go-plugin v1.4.3
|
||||||
|
github.com/minio/minio-go/v7 v7.0.31
|
||||||
github.com/prometheus/client_golang v1.15.1
|
github.com/prometheus/client_golang v1.15.1
|
||||||
github.com/sethgrid/pester v1.2.0
|
github.com/sethgrid/pester v1.2.0
|
||||||
github.com/stretchr/testify v1.8.4
|
github.com/stretchr/testify v1.8.4
|
||||||
github.com/vimeo/go-util v1.4.1
|
github.com/vimeo/go-util v1.4.1
|
||||||
google.golang.org/api v0.125.0
|
google.golang.org/api v0.125.0
|
||||||
google.golang.org/grpc v1.55.0
|
google.golang.org/grpc v1.55.0
|
||||||
|
google.golang.org/protobuf v1.30.0
|
||||||
gopkg.in/Acconut/lockfile.v1 v1.1.0
|
gopkg.in/Acconut/lockfile.v1 v1.1.0
|
||||||
gopkg.in/h2non/gock.v1 v1.1.2
|
gopkg.in/h2non/gock.v1 v1.1.2
|
||||||
)
|
)
|
||||||
|
|
65
go.sum
65
go.sum
|
@ -678,6 +678,7 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||||
|
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
@ -694,6 +695,10 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
|
github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w=
|
github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
|
github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
|
||||||
|
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||||
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
|
github.com/felixge/fgprof v0.9.2 h1:tAMHtWMyl6E0BimjVbFt7fieU6FpjttsZN7j0wT5blc=
|
||||||
|
github.com/felixge/fgprof v0.9.2/go.mod h1:+VNi+ZXtHIQ6wIw6bUT8nXQRefQflWECoFyRealT5sg=
|
||||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||||
github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
|
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
|
||||||
|
@ -724,6 +729,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
|
||||||
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
|
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d h1:lBXNCxVENCipq4D1Is42JVOP4eQjlB8TQ6H69Yx5J9Q=
|
||||||
|
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
|
||||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||||
|
@ -806,10 +813,13 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe
|
||||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
|
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y=
|
||||||
|
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
|
github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
|
||||||
github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
|
github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
|
||||||
github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
|
github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
|
||||||
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
|
@ -836,6 +846,8 @@ github.com/googleapis/gax-go/v2 v2.10.0 h1:ebSgKfMxynOdxw8QQuFOKMgomqeLGPqNLQox2
|
||||||
github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw=
|
github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw=
|
||||||
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
|
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
|
||||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
|
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
|
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||||
|
@ -843,11 +855,20 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4Zs
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
|
||||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
|
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
|
||||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
|
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
|
||||||
|
github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU=
|
||||||
|
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||||
|
github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM=
|
||||||
|
github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ=
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M=
|
||||||
|
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||||
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
|
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
|
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||||
|
github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=
|
||||||
|
github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
|
||||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||||
|
@ -856,9 +877,12 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||||
|
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||||
|
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||||
github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||||
|
@ -867,7 +891,12 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
|
github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
|
||||||
|
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
|
github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
|
||||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||||
|
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
|
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
|
||||||
|
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
@ -885,9 +914,14 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
|
github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
|
||||||
github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
|
github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
|
||||||
github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o=
|
github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o=
|
||||||
|
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||||
|
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
|
github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
|
||||||
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
|
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
|
||||||
|
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
|
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||||
|
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
@ -895,15 +929,27 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
|
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
|
||||||
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
|
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
|
||||||
|
github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4=
|
||||||
|
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
|
||||||
|
github.com/minio/minio-go/v7 v7.0.31 h1:zsJ3qPDeU3bC5UMVi9HJ4ED0lyEzrNd3iQguglZS5FE=
|
||||||
|
github.com/minio/minio-go/v7 v7.0.31/go.mod h1:/sjRKkKIA75CKh1iu8E3qBy7ktBmCCDGII0zbXGwbUk=
|
||||||
|
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
|
||||||
|
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||||
|
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg=
|
||||||
|
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=
|
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=
|
||||||
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
|
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
|
||||||
|
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
|
||||||
|
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||||
github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
|
github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
|
||||||
github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
|
github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
|
||||||
|
@ -953,6 +999,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
||||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
|
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
|
||||||
|
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||||
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
|
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
|
||||||
github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk=
|
github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk=
|
||||||
github.com/sethgrid/pester v1.2.0 h1:adC9RS29rRUef3rIKWPOuP1Jm3/MmB6ke+OhE5giENI=
|
github.com/sethgrid/pester v1.2.0 h1:adC9RS29rRUef3rIKWPOuP1Jm3/MmB6ke+OhE5giENI=
|
||||||
|
@ -960,6 +1008,12 @@ github.com/sethgrid/pester v1.2.0/go.mod h1:hEUINb4RqvDxtoCaU0BNT/HV4ig5kfgOasrf
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
|
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||||
|
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||||
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||||
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
|
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||||
|
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
|
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
|
||||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||||
|
@ -1015,6 +1069,7 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
@ -1080,6 +1135,7 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
|
||||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
|
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
@ -1198,6 +1254,7 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
@ -1207,6 +1264,8 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
@ -1286,6 +1345,7 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
@ -1327,6 +1387,7 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
@ -1469,6 +1530,7 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
|
||||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
|
google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
@ -1616,6 +1678,7 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
|
||||||
|
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
|
@ -1688,6 +1751,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY=
|
gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY=
|
||||||
gopkg.in/h2non/gock.v1 v1.1.2/go.mod h1:n7UGz/ckNChHiK05rDoiC4MYSunEC/lyaUm2WWaDva0=
|
gopkg.in/h2non/gock.v1 v1.1.2/go.mod h1:n7UGz/ckNChHiK05rDoiC4MYSunEC/lyaUm2WWaDva0=
|
||||||
|
gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
|
||||||
|
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
// Package semaphore implements a basic semaphore for coordinating and limiting
|
||||||
|
// non-exclusive, concurrent access.
|
||||||
|
package semaphore
|
||||||
|
|
||||||
|
type Semaphore chan struct{}
|
||||||
|
|
||||||
|
// New creates a semaphore with the given concurrency limit.
|
||||||
|
func New(concurrency int) Semaphore {
|
||||||
|
return make(chan struct{}, concurrency)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Acquire will block until the semaphore can be acquired.
|
||||||
|
func (s Semaphore) Acquire() {
|
||||||
|
s <- struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release frees the acquired slot in the semaphore.
|
||||||
|
func (s Semaphore) Release() {
|
||||||
|
<-s
|
||||||
|
}
|
|
@ -15,7 +15,6 @@
|
||||||
package azurestore
|
package azurestore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
@ -26,7 +25,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -59,8 +58,8 @@ type AzBlob interface {
|
||||||
Delete(ctx context.Context) error
|
Delete(ctx context.Context) error
|
||||||
// Upload the blob
|
// Upload the blob
|
||||||
Upload(ctx context.Context, body io.ReadSeeker) error
|
Upload(ctx context.Context, body io.ReadSeeker) error
|
||||||
// Download the contents of the blob
|
// Download returns a readcloser to download the contents of the blob
|
||||||
Download(ctx context.Context) ([]byte, error)
|
Download(ctx context.Context) (io.ReadCloser, error)
|
||||||
// Get the offset of the blob and its indexes
|
// Get the offset of the blob and its indexes
|
||||||
GetOffset(ctx context.Context) (int64, error)
|
GetOffset(ctx context.Context) (int64, error)
|
||||||
// Commit the uploaded blocks to the BlockBlob
|
// Commit the uploaded blocks to the BlockBlob
|
||||||
|
@ -171,7 +170,7 @@ func (blockBlob *BlockBlob) Upload(ctx context.Context, body io.ReadSeeker) erro
|
||||||
}
|
}
|
||||||
|
|
||||||
// Download the blockBlob from Azure Blob Storage
|
// Download the blockBlob from Azure Blob Storage
|
||||||
func (blockBlob *BlockBlob) Download(ctx context.Context) (data []byte, err error) {
|
func (blockBlob *BlockBlob) Download(ctx context.Context) (io.ReadCloser, error) {
|
||||||
downloadResponse, err := blockBlob.Blob.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
|
downloadResponse, err := blockBlob.Blob.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
|
||||||
|
|
||||||
// If the file does not exist, it will not return an error, but a 404 status and body
|
// If the file does not exist, it will not return an error, but a 404 status and body
|
||||||
|
@ -186,15 +185,7 @@ func (blockBlob *BlockBlob) Download(ctx context.Context) (data []byte, err erro
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20})
|
return downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20}), nil
|
||||||
downloadedData := bytes.Buffer{}
|
|
||||||
|
|
||||||
_, err = downloadedData.ReadFrom(bodyStream)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return downloadedData.Bytes(), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (blockBlob *BlockBlob) GetOffset(ctx context.Context) (int64, error) {
|
func (blockBlob *BlockBlob) GetOffset(ctx context.Context) (int64, error) {
|
||||||
|
@ -258,7 +249,7 @@ func (infoBlob *InfoBlob) Upload(ctx context.Context, body io.ReadSeeker) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Download the infoBlob from Azure Blob Storage
|
// Download the infoBlob from Azure Blob Storage
|
||||||
func (infoBlob *InfoBlob) Download(ctx context.Context) ([]byte, error) {
|
func (infoBlob *InfoBlob) Download(ctx context.Context) (io.ReadCloser, error) {
|
||||||
downloadResponse, err := infoBlob.Blob.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
|
downloadResponse, err := infoBlob.Blob.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
|
||||||
|
|
||||||
// If the file does not exist, it will not return an error, but a 404 status and body
|
// If the file does not exist, it will not return an error, but a 404 status and body
|
||||||
|
@ -272,15 +263,7 @@ func (infoBlob *InfoBlob) Download(ctx context.Context) ([]byte, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20})
|
return downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20}), nil
|
||||||
downloadedData := bytes.Buffer{}
|
|
||||||
|
|
||||||
_, err = downloadedData.ReadFrom(bodyStream)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return downloadedData.Bytes(), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// infoBlob does not utilise offset, so just return 0, nil
|
// infoBlob does not utilise offset, so just return 0, nil
|
||||||
|
|
|
@ -10,8 +10,8 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/tus/tusd/internal/uid"
|
"github.com/tus/tusd/v2/internal/uid"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
type AzureStore struct {
|
type AzureStore struct {
|
||||||
|
@ -96,8 +96,9 @@ func (store AzureStore) GetUpload(ctx context.Context, id string) (handler.Uploa
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer data.Close()
|
||||||
|
|
||||||
if err := json.Unmarshal(data, &info); err != nil {
|
if err := json.NewDecoder(data).Decode(&info); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,8 +113,12 @@ func (store AzureStore) GetUpload(ctx context.Context, id string) (handler.Uploa
|
||||||
}
|
}
|
||||||
|
|
||||||
offset, err := blockBlob.GetOffset(ctx)
|
offset, err := blockBlob.GetOffset(ctx)
|
||||||
if err != nil && err != handler.ErrNotFound {
|
if err != nil {
|
||||||
return nil, err
|
// Unpack the error and see if it is a handler.ErrNotFound by comparing the
|
||||||
|
// error code. If it matches, we ignore the error, otherwise we return the error.
|
||||||
|
if handlerErr, ok := err.(handler.Error); !ok || handlerErr.ErrorCode != handler.ErrNotFound.ErrorCode {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
info.Offset = offset
|
info.Offset = offset
|
||||||
|
@ -169,7 +174,7 @@ func (upload *AzUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
|
||||||
return info, err
|
return info, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal(data, &info); err != nil {
|
if err := json.NewDecoder(data).Decode(&info); err != nil {
|
||||||
return info, err
|
return info, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,12 +183,8 @@ func (upload *AzUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the uploaded file from the Azure storage
|
// Get the uploaded file from the Azure storage
|
||||||
func (upload *AzUpload) GetReader(ctx context.Context) (io.Reader, error) {
|
func (upload *AzUpload) GetReader(ctx context.Context) (io.ReadCloser, error) {
|
||||||
b, err := upload.BlockBlob.Download(ctx)
|
return upload.BlockBlob.Download(ctx)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return bytes.NewReader(b), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finish the file upload and commit the block list
|
// Finish the file upload and commit the block list
|
||||||
|
|
|
@ -6,36 +6,37 @@ package azurestore_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
context "context"
|
context "context"
|
||||||
gomock "github.com/golang/mock/gomock"
|
|
||||||
azurestore "github.com/tus/tusd/pkg/azurestore"
|
|
||||||
io "io"
|
io "io"
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
azurestore "github.com/tus/tusd/v2/pkg/azurestore"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockAzService is a mock of AzService interface
|
// MockAzService is a mock of AzService interface.
|
||||||
type MockAzService struct {
|
type MockAzService struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *MockAzServiceMockRecorder
|
recorder *MockAzServiceMockRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockAzServiceMockRecorder is the mock recorder for MockAzService
|
// MockAzServiceMockRecorder is the mock recorder for MockAzService.
|
||||||
type MockAzServiceMockRecorder struct {
|
type MockAzServiceMockRecorder struct {
|
||||||
mock *MockAzService
|
mock *MockAzService
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockAzService creates a new mock instance
|
// NewMockAzService creates a new mock instance.
|
||||||
func NewMockAzService(ctrl *gomock.Controller) *MockAzService {
|
func NewMockAzService(ctrl *gomock.Controller) *MockAzService {
|
||||||
mock := &MockAzService{ctrl: ctrl}
|
mock := &MockAzService{ctrl: ctrl}
|
||||||
mock.recorder = &MockAzServiceMockRecorder{mock}
|
mock.recorder = &MockAzServiceMockRecorder{mock}
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
// EXPECT returns an object that allows the caller to indicate expected use
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
func (m *MockAzService) EXPECT() *MockAzServiceMockRecorder {
|
func (m *MockAzService) EXPECT() *MockAzServiceMockRecorder {
|
||||||
return m.recorder
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBlob mocks base method
|
// NewBlob mocks base method.
|
||||||
func (m *MockAzService) NewBlob(arg0 context.Context, arg1 string) (azurestore.AzBlob, error) {
|
func (m *MockAzService) NewBlob(arg0 context.Context, arg1 string) (azurestore.AzBlob, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "NewBlob", arg0, arg1)
|
ret := m.ctrl.Call(m, "NewBlob", arg0, arg1)
|
||||||
|
@ -44,36 +45,36 @@ func (m *MockAzService) NewBlob(arg0 context.Context, arg1 string) (azurestore.A
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBlob indicates an expected call of NewBlob
|
// NewBlob indicates an expected call of NewBlob.
|
||||||
func (mr *MockAzServiceMockRecorder) NewBlob(arg0, arg1 interface{}) *gomock.Call {
|
func (mr *MockAzServiceMockRecorder) NewBlob(arg0, arg1 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBlob", reflect.TypeOf((*MockAzService)(nil).NewBlob), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBlob", reflect.TypeOf((*MockAzService)(nil).NewBlob), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockAzBlob is a mock of AzBlob interface
|
// MockAzBlob is a mock of AzBlob interface.
|
||||||
type MockAzBlob struct {
|
type MockAzBlob struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *MockAzBlobMockRecorder
|
recorder *MockAzBlobMockRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockAzBlobMockRecorder is the mock recorder for MockAzBlob
|
// MockAzBlobMockRecorder is the mock recorder for MockAzBlob.
|
||||||
type MockAzBlobMockRecorder struct {
|
type MockAzBlobMockRecorder struct {
|
||||||
mock *MockAzBlob
|
mock *MockAzBlob
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockAzBlob creates a new mock instance
|
// NewMockAzBlob creates a new mock instance.
|
||||||
func NewMockAzBlob(ctrl *gomock.Controller) *MockAzBlob {
|
func NewMockAzBlob(ctrl *gomock.Controller) *MockAzBlob {
|
||||||
mock := &MockAzBlob{ctrl: ctrl}
|
mock := &MockAzBlob{ctrl: ctrl}
|
||||||
mock.recorder = &MockAzBlobMockRecorder{mock}
|
mock.recorder = &MockAzBlobMockRecorder{mock}
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
// EXPECT returns an object that allows the caller to indicate expected use
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
func (m *MockAzBlob) EXPECT() *MockAzBlobMockRecorder {
|
func (m *MockAzBlob) EXPECT() *MockAzBlobMockRecorder {
|
||||||
return m.recorder
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit mocks base method
|
// Commit mocks base method.
|
||||||
func (m *MockAzBlob) Commit(arg0 context.Context) error {
|
func (m *MockAzBlob) Commit(arg0 context.Context) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Commit", arg0)
|
ret := m.ctrl.Call(m, "Commit", arg0)
|
||||||
|
@ -81,13 +82,13 @@ func (m *MockAzBlob) Commit(arg0 context.Context) error {
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit indicates an expected call of Commit
|
// Commit indicates an expected call of Commit.
|
||||||
func (mr *MockAzBlobMockRecorder) Commit(arg0 interface{}) *gomock.Call {
|
func (mr *MockAzBlobMockRecorder) Commit(arg0 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockAzBlob)(nil).Commit), arg0)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockAzBlob)(nil).Commit), arg0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete mocks base method
|
// Delete mocks base method.
|
||||||
func (m *MockAzBlob) Delete(arg0 context.Context) error {
|
func (m *MockAzBlob) Delete(arg0 context.Context) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Delete", arg0)
|
ret := m.ctrl.Call(m, "Delete", arg0)
|
||||||
|
@ -95,28 +96,28 @@ func (m *MockAzBlob) Delete(arg0 context.Context) error {
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete indicates an expected call of Delete
|
// Delete indicates an expected call of Delete.
|
||||||
func (mr *MockAzBlobMockRecorder) Delete(arg0 interface{}) *gomock.Call {
|
func (mr *MockAzBlobMockRecorder) Delete(arg0 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockAzBlob)(nil).Delete), arg0)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockAzBlob)(nil).Delete), arg0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Download mocks base method
|
// Download mocks base method.
|
||||||
func (m *MockAzBlob) Download(arg0 context.Context) ([]byte, error) {
|
func (m *MockAzBlob) Download(arg0 context.Context) (io.ReadCloser, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Download", arg0)
|
ret := m.ctrl.Call(m, "Download", arg0)
|
||||||
ret0, _ := ret[0].([]byte)
|
ret0, _ := ret[0].(io.ReadCloser)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Download indicates an expected call of Download
|
// Download indicates an expected call of Download.
|
||||||
func (mr *MockAzBlobMockRecorder) Download(arg0 interface{}) *gomock.Call {
|
func (mr *MockAzBlobMockRecorder) Download(arg0 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockAzBlob)(nil).Download), arg0)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockAzBlob)(nil).Download), arg0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetOffset mocks base method
|
// GetOffset mocks base method.
|
||||||
func (m *MockAzBlob) GetOffset(arg0 context.Context) (int64, error) {
|
func (m *MockAzBlob) GetOffset(arg0 context.Context) (int64, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "GetOffset", arg0)
|
ret := m.ctrl.Call(m, "GetOffset", arg0)
|
||||||
|
@ -125,13 +126,13 @@ func (m *MockAzBlob) GetOffset(arg0 context.Context) (int64, error) {
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetOffset indicates an expected call of GetOffset
|
// GetOffset indicates an expected call of GetOffset.
|
||||||
func (mr *MockAzBlobMockRecorder) GetOffset(arg0 interface{}) *gomock.Call {
|
func (mr *MockAzBlobMockRecorder) GetOffset(arg0 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOffset", reflect.TypeOf((*MockAzBlob)(nil).GetOffset), arg0)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOffset", reflect.TypeOf((*MockAzBlob)(nil).GetOffset), arg0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload mocks base method
|
// Upload mocks base method.
|
||||||
func (m *MockAzBlob) Upload(arg0 context.Context, arg1 io.ReadSeeker) error {
|
func (m *MockAzBlob) Upload(arg0 context.Context, arg1 io.ReadSeeker) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Upload", arg0, arg1)
|
ret := m.ctrl.Call(m, "Upload", arg0, arg1)
|
||||||
|
@ -139,7 +140,7 @@ func (m *MockAzBlob) Upload(arg0 context.Context, arg1 io.ReadSeeker) error {
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload indicates an expected call of Upload
|
// Upload indicates an expected call of Upload.
|
||||||
func (mr *MockAzBlobMockRecorder) Upload(arg0, arg1 interface{}) *gomock.Call {
|
func (mr *MockAzBlobMockRecorder) Upload(arg0, arg1 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upload", reflect.TypeOf((*MockAzBlob)(nil).Upload), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upload", reflect.TypeOf((*MockAzBlob)(nil).Upload), arg0, arg1)
|
||||||
|
|
|
@ -5,13 +5,14 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/tus/tusd/pkg/azurestore"
|
"github.com/tus/tusd/v2/pkg/azurestore"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate mockgen -destination=./azurestore_mock_test.go -package=azurestore_test github.com/tus/tusd/pkg/azurestore AzService,AzBlob
|
//go:generate mockgen -destination=./azurestore_mock_test.go -package=azurestore_test github.com/tus/tusd/pkg/azurestore AzService,AzBlob
|
||||||
|
@ -153,7 +154,7 @@ func TestGetUpload(t *testing.T) {
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
|
infoBlob.EXPECT().Download(ctx).Return(newReadCloser(data), nil).Times(1),
|
||||||
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
||||||
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
|
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
|
||||||
)
|
)
|
||||||
|
@ -189,7 +190,7 @@ func TestGetUploadTooLargeBlob(t *testing.T) {
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
|
infoBlob.EXPECT().Download(ctx).Return(newReadCloser(data), nil).Times(1),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload, err := store.GetUpload(ctx, mockID)
|
upload, err := store.GetUpload(ctx, mockID)
|
||||||
|
@ -246,10 +247,10 @@ func TestGetReader(t *testing.T) {
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
|
infoBlob.EXPECT().Download(ctx).Return(newReadCloser(data), nil).Times(1),
|
||||||
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
||||||
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
|
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
|
||||||
blockBlob.EXPECT().Download(ctx).Return([]byte(mockReaderData), nil).Times(1),
|
blockBlob.EXPECT().Download(ctx).Return(newReadCloser([]byte(mockReaderData)), nil).Times(1),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload, err := store.GetUpload(ctx, mockID)
|
upload, err := store.GetUpload(ctx, mockID)
|
||||||
|
@ -286,7 +287,7 @@ func TestWriteChunk(t *testing.T) {
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
|
infoBlob.EXPECT().Download(ctx).Return(newReadCloser(data), nil).Times(1),
|
||||||
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
||||||
blockBlob.EXPECT().GetOffset(ctx).Return(offset, nil).Times(1),
|
blockBlob.EXPECT().GetOffset(ctx).Return(offset, nil).Times(1),
|
||||||
blockBlob.EXPECT().Upload(ctx, bytes.NewReader([]byte(mockReaderData))).Return(nil).Times(1),
|
blockBlob.EXPECT().Upload(ctx, bytes.NewReader([]byte(mockReaderData))).Return(nil).Times(1),
|
||||||
|
@ -325,7 +326,7 @@ func TestFinishUpload(t *testing.T) {
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
|
infoBlob.EXPECT().Download(ctx).Return(newReadCloser(data), nil).Times(1),
|
||||||
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
||||||
blockBlob.EXPECT().GetOffset(ctx).Return(offset, nil).Times(1),
|
blockBlob.EXPECT().GetOffset(ctx).Return(offset, nil).Times(1),
|
||||||
blockBlob.EXPECT().Commit(ctx).Return(nil).Times(1),
|
blockBlob.EXPECT().Commit(ctx).Return(nil).Times(1),
|
||||||
|
@ -362,7 +363,7 @@ func TestTerminate(t *testing.T) {
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
|
infoBlob.EXPECT().Download(ctx).Return(newReadCloser(data), nil).Times(1),
|
||||||
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
||||||
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
|
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
|
||||||
infoBlob.EXPECT().Delete(ctx).Return(nil).Times(1),
|
infoBlob.EXPECT().Delete(ctx).Return(nil).Times(1),
|
||||||
|
@ -405,7 +406,7 @@ func TestDeclareLength(t *testing.T) {
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
|
infoBlob.EXPECT().Download(ctx).Return(newReadCloser(data), nil).Times(1),
|
||||||
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
||||||
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
|
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
|
||||||
infoBlob.EXPECT().Upload(ctx, r).Return(nil).Times(1),
|
infoBlob.EXPECT().Upload(ctx, r).Return(nil).Times(1),
|
||||||
|
@ -424,3 +425,7 @@ func TestDeclareLength(t *testing.T) {
|
||||||
|
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newReadCloser(b []byte) io.ReadCloser {
|
||||||
|
return io.NopCloser(bytes.NewReader(b))
|
||||||
|
}
|
||||||
|
|
|
@ -9,10 +9,11 @@
|
||||||
package filelocker
|
package filelocker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
|
|
||||||
"gopkg.in/Acconut/lockfile.v1"
|
"gopkg.in/Acconut/lockfile.v1"
|
||||||
)
|
)
|
||||||
|
@ -58,7 +59,8 @@ type fileUploadLock struct {
|
||||||
file lockfile.Lockfile
|
file lockfile.Lockfile
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lock fileUploadLock) Lock() error {
|
// TODO: Implement functionality for ctx and requestRelease.
|
||||||
|
func (lock fileUploadLock) Lock(ctx context.Context, requestRelease func()) error {
|
||||||
err := lock.file.TryLock()
|
err := lock.file.TryLock()
|
||||||
if err == lockfile.ErrBusy {
|
if err == lockfile.ErrBusy {
|
||||||
return handler.ErrFileLocked
|
return handler.ErrFileLocked
|
||||||
|
|
|
@ -1,11 +1,12 @@
|
||||||
package filelocker
|
package filelocker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ handler.Locker = &FileLocker{}
|
var _ handler.Locker = &FileLocker{}
|
||||||
|
@ -21,12 +22,12 @@ func TestFileLocker(t *testing.T) {
|
||||||
lock1, err := locker.NewLock("one")
|
lock1, err := locker.NewLock("one")
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
|
|
||||||
a.NoError(lock1.Lock())
|
a.NoError(lock1.Lock(context.TODO(), nil))
|
||||||
a.Equal(handler.ErrFileLocked, lock1.Lock())
|
a.Equal(handler.ErrFileLocked, lock1.Lock(context.TODO(), nil))
|
||||||
|
|
||||||
lock2, err := locker.NewLock("one")
|
lock2, err := locker.NewLock("one")
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
a.Equal(handler.ErrFileLocked, lock2.Lock())
|
a.Equal(handler.ErrFileLocked, lock2.Lock(context.TODO(), nil))
|
||||||
|
|
||||||
a.NoError(lock1.Unlock())
|
a.NoError(lock1.Unlock())
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,8 +17,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/tus/tusd/internal/uid"
|
"github.com/tus/tusd/v2/internal/uid"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
var defaultFilePerm = os.FileMode(0664)
|
var defaultFilePerm = os.FileMode(0664)
|
||||||
|
@ -49,7 +49,7 @@ func (store FileStore) UseIn(composer *handler.StoreComposer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store FileStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
func (store FileStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
||||||
if info.ID == "" {
|
if info.ID == "" {
|
||||||
info.ID = uid.Uid()
|
info.ID = uid.Uid()
|
||||||
}
|
}
|
||||||
binPath := store.binPath(info.ID)
|
binPath := store.binPath(info.ID)
|
||||||
|
@ -168,7 +168,7 @@ func (upload *fileUpload) WriteChunk(ctx context.Context, offset int64, src io.R
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload *fileUpload) GetReader(ctx context.Context) (io.Reader, error) {
|
func (upload *fileUpload) GetReader(ctx context.Context) (io.ReadCloser, error) {
|
||||||
return os.Open(upload.binPath)
|
return os.Open(upload.binPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test interface implementation of Filestore
|
// Test interface implementation of Filestore
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"gopkg.in/h2non/gock.v1"
|
"gopkg.in/h2non/gock.v1"
|
||||||
|
|
||||||
"cloud.google.com/go/storage"
|
"cloud.google.com/go/storage"
|
||||||
. "github.com/tus/tusd/pkg/gcsstore"
|
. "github.com/tus/tusd/v2/pkg/gcsstore"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -22,8 +22,8 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"cloud.google.com/go/storage"
|
"cloud.google.com/go/storage"
|
||||||
"github.com/tus/tusd/internal/uid"
|
"github.com/tus/tusd/v2/internal/uid"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
// See the handler.DataStore interface for documentation about the different
|
// See the handler.DataStore interface for documentation about the different
|
||||||
|
@ -325,7 +325,7 @@ func (upload gcsUpload) Terminate(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload gcsUpload) GetReader(ctx context.Context) (io.Reader, error) {
|
func (upload gcsUpload) GetReader(ctx context.Context) (io.ReadCloser, error) {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
|
|
||||||
|
@ -334,12 +334,7 @@ func (upload gcsUpload) GetReader(ctx context.Context) (io.Reader, error) {
|
||||||
ID: store.keyWithPrefix(id),
|
ID: store.keyWithPrefix(id),
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := store.Service.ReadObject(ctx, params)
|
return store.Service.ReadObject(ctx, params)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return r, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store GCSStore) keyWithPrefix(key string) string {
|
func (store GCSStore) keyWithPrefix(key string) string {
|
||||||
|
|
|
@ -6,36 +6,37 @@ package gcsstore_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
context "context"
|
context "context"
|
||||||
gomock "github.com/golang/mock/gomock"
|
|
||||||
gcsstore "github.com/tus/tusd/pkg/gcsstore"
|
|
||||||
io "io"
|
io "io"
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
gcsstore "github.com/tus/tusd/v2/pkg/gcsstore"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockGCSReader is a mock of GCSReader interface
|
// MockGCSReader is a mock of GCSReader interface.
|
||||||
type MockGCSReader struct {
|
type MockGCSReader struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *MockGCSReaderMockRecorder
|
recorder *MockGCSReaderMockRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockGCSReaderMockRecorder is the mock recorder for MockGCSReader
|
// MockGCSReaderMockRecorder is the mock recorder for MockGCSReader.
|
||||||
type MockGCSReaderMockRecorder struct {
|
type MockGCSReaderMockRecorder struct {
|
||||||
mock *MockGCSReader
|
mock *MockGCSReader
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockGCSReader creates a new mock instance
|
// NewMockGCSReader creates a new mock instance.
|
||||||
func NewMockGCSReader(ctrl *gomock.Controller) *MockGCSReader {
|
func NewMockGCSReader(ctrl *gomock.Controller) *MockGCSReader {
|
||||||
mock := &MockGCSReader{ctrl: ctrl}
|
mock := &MockGCSReader{ctrl: ctrl}
|
||||||
mock.recorder = &MockGCSReaderMockRecorder{mock}
|
mock.recorder = &MockGCSReaderMockRecorder{mock}
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
// EXPECT returns an object that allows the caller to indicate expected use
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
func (m *MockGCSReader) EXPECT() *MockGCSReaderMockRecorder {
|
func (m *MockGCSReader) EXPECT() *MockGCSReaderMockRecorder {
|
||||||
return m.recorder
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close mocks base method
|
// Close mocks base method.
|
||||||
func (m *MockGCSReader) Close() error {
|
func (m *MockGCSReader) Close() error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Close")
|
ret := m.ctrl.Call(m, "Close")
|
||||||
|
@ -43,13 +44,13 @@ func (m *MockGCSReader) Close() error {
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close indicates an expected call of Close
|
// Close indicates an expected call of Close.
|
||||||
func (mr *MockGCSReaderMockRecorder) Close() *gomock.Call {
|
func (mr *MockGCSReaderMockRecorder) Close() *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockGCSReader)(nil).Close))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockGCSReader)(nil).Close))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContentType mocks base method
|
// ContentType mocks base method.
|
||||||
func (m *MockGCSReader) ContentType() string {
|
func (m *MockGCSReader) ContentType() string {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "ContentType")
|
ret := m.ctrl.Call(m, "ContentType")
|
||||||
|
@ -57,13 +58,13 @@ func (m *MockGCSReader) ContentType() string {
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContentType indicates an expected call of ContentType
|
// ContentType indicates an expected call of ContentType.
|
||||||
func (mr *MockGCSReaderMockRecorder) ContentType() *gomock.Call {
|
func (mr *MockGCSReaderMockRecorder) ContentType() *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContentType", reflect.TypeOf((*MockGCSReader)(nil).ContentType))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContentType", reflect.TypeOf((*MockGCSReader)(nil).ContentType))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read mocks base method
|
// Read mocks base method.
|
||||||
func (m *MockGCSReader) Read(arg0 []byte) (int, error) {
|
func (m *MockGCSReader) Read(arg0 []byte) (int, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Read", arg0)
|
ret := m.ctrl.Call(m, "Read", arg0)
|
||||||
|
@ -72,13 +73,13 @@ func (m *MockGCSReader) Read(arg0 []byte) (int, error) {
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read indicates an expected call of Read
|
// Read indicates an expected call of Read.
|
||||||
func (mr *MockGCSReaderMockRecorder) Read(arg0 interface{}) *gomock.Call {
|
func (mr *MockGCSReaderMockRecorder) Read(arg0 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockGCSReader)(nil).Read), arg0)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockGCSReader)(nil).Read), arg0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remain mocks base method
|
// Remain mocks base method.
|
||||||
func (m *MockGCSReader) Remain() int64 {
|
func (m *MockGCSReader) Remain() int64 {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Remain")
|
ret := m.ctrl.Call(m, "Remain")
|
||||||
|
@ -86,13 +87,13 @@ func (m *MockGCSReader) Remain() int64 {
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remain indicates an expected call of Remain
|
// Remain indicates an expected call of Remain.
|
||||||
func (mr *MockGCSReaderMockRecorder) Remain() *gomock.Call {
|
func (mr *MockGCSReaderMockRecorder) Remain() *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remain", reflect.TypeOf((*MockGCSReader)(nil).Remain))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remain", reflect.TypeOf((*MockGCSReader)(nil).Remain))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size mocks base method
|
// Size mocks base method.
|
||||||
func (m *MockGCSReader) Size() int64 {
|
func (m *MockGCSReader) Size() int64 {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Size")
|
ret := m.ctrl.Call(m, "Size")
|
||||||
|
@ -100,36 +101,36 @@ func (m *MockGCSReader) Size() int64 {
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size indicates an expected call of Size
|
// Size indicates an expected call of Size.
|
||||||
func (mr *MockGCSReaderMockRecorder) Size() *gomock.Call {
|
func (mr *MockGCSReaderMockRecorder) Size() *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockGCSReader)(nil).Size))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockGCSReader)(nil).Size))
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockGCSAPI is a mock of GCSAPI interface
|
// MockGCSAPI is a mock of GCSAPI interface.
|
||||||
type MockGCSAPI struct {
|
type MockGCSAPI struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *MockGCSAPIMockRecorder
|
recorder *MockGCSAPIMockRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockGCSAPIMockRecorder is the mock recorder for MockGCSAPI
|
// MockGCSAPIMockRecorder is the mock recorder for MockGCSAPI.
|
||||||
type MockGCSAPIMockRecorder struct {
|
type MockGCSAPIMockRecorder struct {
|
||||||
mock *MockGCSAPI
|
mock *MockGCSAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockGCSAPI creates a new mock instance
|
// NewMockGCSAPI creates a new mock instance.
|
||||||
func NewMockGCSAPI(ctrl *gomock.Controller) *MockGCSAPI {
|
func NewMockGCSAPI(ctrl *gomock.Controller) *MockGCSAPI {
|
||||||
mock := &MockGCSAPI{ctrl: ctrl}
|
mock := &MockGCSAPI{ctrl: ctrl}
|
||||||
mock.recorder = &MockGCSAPIMockRecorder{mock}
|
mock.recorder = &MockGCSAPIMockRecorder{mock}
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
// EXPECT returns an object that allows the caller to indicate expected use
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
func (m *MockGCSAPI) EXPECT() *MockGCSAPIMockRecorder {
|
func (m *MockGCSAPI) EXPECT() *MockGCSAPIMockRecorder {
|
||||||
return m.recorder
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// ComposeObjects mocks base method
|
// ComposeObjects mocks base method.
|
||||||
func (m *MockGCSAPI) ComposeObjects(arg0 context.Context, arg1 gcsstore.GCSComposeParams) error {
|
func (m *MockGCSAPI) ComposeObjects(arg0 context.Context, arg1 gcsstore.GCSComposeParams) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "ComposeObjects", arg0, arg1)
|
ret := m.ctrl.Call(m, "ComposeObjects", arg0, arg1)
|
||||||
|
@ -137,13 +138,13 @@ func (m *MockGCSAPI) ComposeObjects(arg0 context.Context, arg1 gcsstore.GCSCompo
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// ComposeObjects indicates an expected call of ComposeObjects
|
// ComposeObjects indicates an expected call of ComposeObjects.
|
||||||
func (mr *MockGCSAPIMockRecorder) ComposeObjects(arg0, arg1 interface{}) *gomock.Call {
|
func (mr *MockGCSAPIMockRecorder) ComposeObjects(arg0, arg1 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ComposeObjects", reflect.TypeOf((*MockGCSAPI)(nil).ComposeObjects), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ComposeObjects", reflect.TypeOf((*MockGCSAPI)(nil).ComposeObjects), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObject mocks base method
|
// DeleteObject mocks base method.
|
||||||
func (m *MockGCSAPI) DeleteObject(arg0 context.Context, arg1 gcsstore.GCSObjectParams) error {
|
func (m *MockGCSAPI) DeleteObject(arg0 context.Context, arg1 gcsstore.GCSObjectParams) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "DeleteObject", arg0, arg1)
|
ret := m.ctrl.Call(m, "DeleteObject", arg0, arg1)
|
||||||
|
@ -151,13 +152,13 @@ func (m *MockGCSAPI) DeleteObject(arg0 context.Context, arg1 gcsstore.GCSObjectP
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObject indicates an expected call of DeleteObject
|
// DeleteObject indicates an expected call of DeleteObject.
|
||||||
func (mr *MockGCSAPIMockRecorder) DeleteObject(arg0, arg1 interface{}) *gomock.Call {
|
func (mr *MockGCSAPIMockRecorder) DeleteObject(arg0, arg1 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObject", reflect.TypeOf((*MockGCSAPI)(nil).DeleteObject), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObject", reflect.TypeOf((*MockGCSAPI)(nil).DeleteObject), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObjectsWithFilter mocks base method
|
// DeleteObjectsWithFilter mocks base method.
|
||||||
func (m *MockGCSAPI) DeleteObjectsWithFilter(arg0 context.Context, arg1 gcsstore.GCSFilterParams) error {
|
func (m *MockGCSAPI) DeleteObjectsWithFilter(arg0 context.Context, arg1 gcsstore.GCSFilterParams) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "DeleteObjectsWithFilter", arg0, arg1)
|
ret := m.ctrl.Call(m, "DeleteObjectsWithFilter", arg0, arg1)
|
||||||
|
@ -165,13 +166,13 @@ func (m *MockGCSAPI) DeleteObjectsWithFilter(arg0 context.Context, arg1 gcsstore
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObjectsWithFilter indicates an expected call of DeleteObjectsWithFilter
|
// DeleteObjectsWithFilter indicates an expected call of DeleteObjectsWithFilter.
|
||||||
func (mr *MockGCSAPIMockRecorder) DeleteObjectsWithFilter(arg0, arg1 interface{}) *gomock.Call {
|
func (mr *MockGCSAPIMockRecorder) DeleteObjectsWithFilter(arg0, arg1 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsWithFilter", reflect.TypeOf((*MockGCSAPI)(nil).DeleteObjectsWithFilter), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsWithFilter", reflect.TypeOf((*MockGCSAPI)(nil).DeleteObjectsWithFilter), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FilterObjects mocks base method
|
// FilterObjects mocks base method.
|
||||||
func (m *MockGCSAPI) FilterObjects(arg0 context.Context, arg1 gcsstore.GCSFilterParams) ([]string, error) {
|
func (m *MockGCSAPI) FilterObjects(arg0 context.Context, arg1 gcsstore.GCSFilterParams) ([]string, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "FilterObjects", arg0, arg1)
|
ret := m.ctrl.Call(m, "FilterObjects", arg0, arg1)
|
||||||
|
@ -180,13 +181,13 @@ func (m *MockGCSAPI) FilterObjects(arg0 context.Context, arg1 gcsstore.GCSFilter
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// FilterObjects indicates an expected call of FilterObjects
|
// FilterObjects indicates an expected call of FilterObjects.
|
||||||
func (mr *MockGCSAPIMockRecorder) FilterObjects(arg0, arg1 interface{}) *gomock.Call {
|
func (mr *MockGCSAPIMockRecorder) FilterObjects(arg0, arg1 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterObjects", reflect.TypeOf((*MockGCSAPI)(nil).FilterObjects), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterObjects", reflect.TypeOf((*MockGCSAPI)(nil).FilterObjects), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectSize mocks base method
|
// GetObjectSize mocks base method.
|
||||||
func (m *MockGCSAPI) GetObjectSize(arg0 context.Context, arg1 gcsstore.GCSObjectParams) (int64, error) {
|
func (m *MockGCSAPI) GetObjectSize(arg0 context.Context, arg1 gcsstore.GCSObjectParams) (int64, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "GetObjectSize", arg0, arg1)
|
ret := m.ctrl.Call(m, "GetObjectSize", arg0, arg1)
|
||||||
|
@ -195,13 +196,13 @@ func (m *MockGCSAPI) GetObjectSize(arg0 context.Context, arg1 gcsstore.GCSObject
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectSize indicates an expected call of GetObjectSize
|
// GetObjectSize indicates an expected call of GetObjectSize.
|
||||||
func (mr *MockGCSAPIMockRecorder) GetObjectSize(arg0, arg1 interface{}) *gomock.Call {
|
func (mr *MockGCSAPIMockRecorder) GetObjectSize(arg0, arg1 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectSize", reflect.TypeOf((*MockGCSAPI)(nil).GetObjectSize), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectSize", reflect.TypeOf((*MockGCSAPI)(nil).GetObjectSize), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadObject mocks base method
|
// ReadObject mocks base method.
|
||||||
func (m *MockGCSAPI) ReadObject(arg0 context.Context, arg1 gcsstore.GCSObjectParams) (gcsstore.GCSReader, error) {
|
func (m *MockGCSAPI) ReadObject(arg0 context.Context, arg1 gcsstore.GCSObjectParams) (gcsstore.GCSReader, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "ReadObject", arg0, arg1)
|
ret := m.ctrl.Call(m, "ReadObject", arg0, arg1)
|
||||||
|
@ -210,13 +211,13 @@ func (m *MockGCSAPI) ReadObject(arg0 context.Context, arg1 gcsstore.GCSObjectPar
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadObject indicates an expected call of ReadObject
|
// ReadObject indicates an expected call of ReadObject.
|
||||||
func (mr *MockGCSAPIMockRecorder) ReadObject(arg0, arg1 interface{}) *gomock.Call {
|
func (mr *MockGCSAPIMockRecorder) ReadObject(arg0, arg1 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadObject", reflect.TypeOf((*MockGCSAPI)(nil).ReadObject), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadObject", reflect.TypeOf((*MockGCSAPI)(nil).ReadObject), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetObjectMetadata mocks base method
|
// SetObjectMetadata mocks base method.
|
||||||
func (m *MockGCSAPI) SetObjectMetadata(arg0 context.Context, arg1 gcsstore.GCSObjectParams, arg2 map[string]string) error {
|
func (m *MockGCSAPI) SetObjectMetadata(arg0 context.Context, arg1 gcsstore.GCSObjectParams, arg2 map[string]string) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "SetObjectMetadata", arg0, arg1, arg2)
|
ret := m.ctrl.Call(m, "SetObjectMetadata", arg0, arg1, arg2)
|
||||||
|
@ -224,13 +225,13 @@ func (m *MockGCSAPI) SetObjectMetadata(arg0 context.Context, arg1 gcsstore.GCSOb
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetObjectMetadata indicates an expected call of SetObjectMetadata
|
// SetObjectMetadata indicates an expected call of SetObjectMetadata.
|
||||||
func (mr *MockGCSAPIMockRecorder) SetObjectMetadata(arg0, arg1, arg2 interface{}) *gomock.Call {
|
func (mr *MockGCSAPIMockRecorder) SetObjectMetadata(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetObjectMetadata", reflect.TypeOf((*MockGCSAPI)(nil).SetObjectMetadata), arg0, arg1, arg2)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetObjectMetadata", reflect.TypeOf((*MockGCSAPI)(nil).SetObjectMetadata), arg0, arg1, arg2)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteObject mocks base method
|
// WriteObject mocks base method.
|
||||||
func (m *MockGCSAPI) WriteObject(arg0 context.Context, arg1 gcsstore.GCSObjectParams, arg2 io.Reader) (int64, error) {
|
func (m *MockGCSAPI) WriteObject(arg0 context.Context, arg1 gcsstore.GCSObjectParams, arg2 io.Reader) (int64, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "WriteObject", arg0, arg1, arg2)
|
ret := m.ctrl.Call(m, "WriteObject", arg0, arg1, arg2)
|
||||||
|
@ -239,7 +240,7 @@ func (m *MockGCSAPI) WriteObject(arg0 context.Context, arg1 gcsstore.GCSObjectPa
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteObject indicates an expected call of WriteObject
|
// WriteObject indicates an expected call of WriteObject.
|
||||||
func (mr *MockGCSAPIMockRecorder) WriteObject(arg0, arg1, arg2 interface{}) *gomock.Call {
|
func (mr *MockGCSAPIMockRecorder) WriteObject(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteObject", reflect.TypeOf((*MockGCSAPI)(nil).WriteObject), arg0, arg1, arg2)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteObject", reflect.TypeOf((*MockGCSAPI)(nil).WriteObject), arg0, arg1, arg2)
|
||||||
|
|
|
@ -11,8 +11,8 @@ import (
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/tus/tusd/pkg/gcsstore"
|
"github.com/tus/tusd/v2/pkg/gcsstore"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate mockgen -destination=./gcsstore_mock_test.go -package=gcsstore_test github.com/tus/tusd/pkg/gcsstore GCSReader,GCSAPI
|
//go:generate mockgen -destination=./gcsstore_mock_test.go -package=gcsstore_test github.com/tus/tusd/pkg/gcsstore GCSReader,GCSAPI
|
||||||
|
|
|
@ -14,13 +14,15 @@ import (
|
||||||
// In addition, the bodyReader keeps track of how many bytes were read.
|
// In addition, the bodyReader keeps track of how many bytes were read.
|
||||||
type bodyReader struct {
|
type bodyReader struct {
|
||||||
reader io.Reader
|
reader io.Reader
|
||||||
|
closer io.Closer
|
||||||
err error
|
err error
|
||||||
bytesCounter int64
|
bytesCounter int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func newBodyReader(r io.Reader) *bodyReader {
|
func newBodyReader(r io.ReadCloser, maxSize int64) *bodyReader {
|
||||||
return &bodyReader{
|
return &bodyReader{
|
||||||
reader: r,
|
reader: io.LimitReader(r, maxSize),
|
||||||
|
closer: r,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,6 +31,10 @@ func (r *bodyReader) Read(b []byte) (int, error) {
|
||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Mask certain errors that we can safely ignore later on:
|
||||||
|
// io.EOF, io.UnexpectedEOF, io.ErrClosedPipe,
|
||||||
|
// read tcp 127.0.0.1:1080->127.0.0.1:56953: read: connection reset by peer,
|
||||||
|
// read tcp 127.0.0.1:1080->127.0.0.1:9375: i/o timeout
|
||||||
n, err := r.reader.Read(b)
|
n, err := r.reader.Read(b)
|
||||||
atomic.AddInt64(&r.bytesCounter, int64(n))
|
atomic.AddInt64(&r.bytesCounter, int64(n))
|
||||||
r.err = err
|
r.err = err
|
||||||
|
@ -51,3 +57,8 @@ func (r bodyReader) hasError() error {
|
||||||
func (r *bodyReader) bytesRead() int64 {
|
func (r *bodyReader) bytesRead() int64 {
|
||||||
return atomic.LoadInt64(&r.bytesCounter)
|
return atomic.LoadInt64(&r.bytesCounter)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *bodyReader) closeWithError(err error) {
|
||||||
|
r.closer.Close()
|
||||||
|
r.err = err
|
||||||
|
}
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
package handler_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/tus/tusd/pkg/filestore"
|
"github.com/tus/tusd/v2/pkg/filestore"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
"github.com/tus/tusd/pkg/memorylocker"
|
"github.com/tus/tusd/v2/pkg/memorylocker"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ExampleNewStoreComposer() {
|
func ExampleNewStoreComposer() {
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package handler_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -9,7 +8,7 @@ import (
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
. "github.com/tus/tusd/pkg/handler"
|
. "github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConcat(t *testing.T) {
|
func TestConcat(t *testing.T) {
|
||||||
|
@ -38,14 +37,14 @@ func TestConcat(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
IsPartial: true,
|
IsPartial: true,
|
||||||
IsFinal: false,
|
IsFinal: false,
|
||||||
PartialUploads: nil,
|
PartialUploads: nil,
|
||||||
MetaData: make(map[string]string),
|
MetaData: make(map[string]string),
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
IsPartial: true,
|
IsPartial: true,
|
||||||
|
@ -77,8 +76,8 @@ func TestConcat(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "foo").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
IsPartial: true,
|
IsPartial: true,
|
||||||
}, nil),
|
}, nil),
|
||||||
|
@ -114,26 +113,26 @@ func TestConcat(t *testing.T) {
|
||||||
uploadC := NewMockFullUpload(ctrl)
|
uploadC := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "a").Return(uploadA, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "a").Return(uploadA, nil),
|
||||||
uploadA.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
uploadA.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
IsPartial: true,
|
IsPartial: true,
|
||||||
Size: 5,
|
Size: 5,
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().GetUpload(context.Background(), "b").Return(uploadB, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "b").Return(uploadB, nil),
|
||||||
uploadB.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
uploadB.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
IsPartial: true,
|
IsPartial: true,
|
||||||
Size: 5,
|
Size: 5,
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
|
||||||
Size: 10,
|
Size: 10,
|
||||||
IsPartial: false,
|
IsPartial: false,
|
||||||
IsFinal: true,
|
IsFinal: true,
|
||||||
PartialUploads: []string{"a", "b"},
|
PartialUploads: []string{"a", "b"},
|
||||||
MetaData: make(map[string]string),
|
MetaData: make(map[string]string),
|
||||||
}).Return(uploadC, nil),
|
}).Return(uploadC, nil),
|
||||||
uploadC.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
uploadC.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 10,
|
Size: 10,
|
||||||
IsPartial: false,
|
IsPartial: false,
|
||||||
|
@ -142,7 +141,7 @@ func TestConcat(t *testing.T) {
|
||||||
MetaData: make(map[string]string),
|
MetaData: make(map[string]string),
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().AsConcatableUpload(uploadC).Return(uploadC),
|
store.EXPECT().AsConcatableUpload(uploadC).Return(uploadC),
|
||||||
uploadC.EXPECT().ConcatUploads(context.Background(), []Upload{uploadA, uploadB}).Return(nil),
|
uploadC.EXPECT().ConcatUploads(gomock.Any(), []Upload{uploadA, uploadB}).Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -188,8 +187,8 @@ func TestConcat(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "foo").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
IsFinal: true,
|
IsFinal: true,
|
||||||
PartialUploads: []string{"a", "b"},
|
PartialUploads: []string{"a", "b"},
|
||||||
|
@ -226,8 +225,8 @@ func TestConcat(t *testing.T) {
|
||||||
// This upload is still unfinished (mismatching offset and size) and
|
// This upload is still unfinished (mismatching offset and size) and
|
||||||
// will therefore cause the POST request to fail.
|
// will therefore cause the POST request to fail.
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "c").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "c").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "c",
|
ID: "c",
|
||||||
IsPartial: true,
|
IsPartial: true,
|
||||||
Size: 5,
|
Size: 5,
|
||||||
|
@ -256,8 +255,8 @@ func TestConcat(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "huge").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "huge").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "huge",
|
ID: "huge",
|
||||||
Size: 1000,
|
Size: 1000,
|
||||||
Offset: 1000,
|
Offset: 1000,
|
||||||
|
@ -286,8 +285,8 @@ func TestConcat(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "foo").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 10,
|
Size: 10,
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"log"
|
"log"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config provides a way to configure the Handler depending on your needs.
|
// Config provides a way to configure the Handler depending on your needs.
|
||||||
|
@ -43,6 +44,10 @@ type Config struct {
|
||||||
// NotifyCreatedUploads indicates whether sending notifications about
|
// NotifyCreatedUploads indicates whether sending notifications about
|
||||||
// the upload having been created using the CreatedUploads channel should be enabled.
|
// the upload having been created using the CreatedUploads channel should be enabled.
|
||||||
NotifyCreatedUploads bool
|
NotifyCreatedUploads bool
|
||||||
|
// UploadProgressInterval specifies the interval at which the upload progress
|
||||||
|
// notifications are sent to the UploadProgress channel, if enabled.
|
||||||
|
// Defaults to 1s.
|
||||||
|
UploadProgressInterval time.Duration
|
||||||
// Logger is the logger to use internally, mostly for printing requests.
|
// Logger is the logger to use internally, mostly for printing requests.
|
||||||
Logger *log.Logger
|
Logger *log.Logger
|
||||||
// Respect the X-Forwarded-Host, X-Forwarded-Proto and Forwarded headers
|
// Respect the X-Forwarded-Host, X-Forwarded-Proto and Forwarded headers
|
||||||
|
@ -50,14 +55,21 @@ type Config struct {
|
||||||
// response to POST requests.
|
// response to POST requests.
|
||||||
RespectForwardedHeaders bool
|
RespectForwardedHeaders bool
|
||||||
// PreUploadCreateCallback will be invoked before a new upload is created, if the
|
// PreUploadCreateCallback will be invoked before a new upload is created, if the
|
||||||
// property is supplied. If the callback returns nil, the upload will be created.
|
// property is supplied. If the callback returns no error, the upload will be created
|
||||||
// Otherwise the HTTP request will be aborted. This can be used to implement
|
// and optional values from HTTPResponse will be contained in the HTTP response.
|
||||||
// validation of upload metadata etc.
|
// If the error is non-nil, the upload will not be created. This can be used to implement
|
||||||
PreUploadCreateCallback func(hook HookEvent) error
|
// validation of upload metadata etc. Furthermore, HTTPResponse will be ignored and
|
||||||
|
// the error value can contain values for the HTTP response.
|
||||||
|
// If the error is nil, FileInfoChanges can be filled out to specify individual properties
|
||||||
|
// that should be overwriten before the upload is create. See its type definition for
|
||||||
|
// more details on its behavior. If you do not want to make any changes, return an empty struct.
|
||||||
|
PreUploadCreateCallback func(hook HookEvent) (HTTPResponse, FileInfoChanges, error)
|
||||||
// PreFinishResponseCallback will be invoked after an upload is completed but before
|
// PreFinishResponseCallback will be invoked after an upload is completed but before
|
||||||
// a response is returned to the client. Error responses from the callback will be passed
|
// a response is returned to the client. This can be used to implement post-processing validation.
|
||||||
// back to the client. This can be used to implement post-processing validation.
|
// If the callback returns no error, optional values from HTTPResponse will be contained in the HTTP response.
|
||||||
PreFinishResponseCallback func(hook HookEvent) error
|
// If the error is non-nil, the error will be forwarded to the client. Furthermore,
|
||||||
|
// HTTPResponse will be ignored and the error value can contain values for the HTTP response.
|
||||||
|
PreFinishResponseCallback func(hook HookEvent) (HTTPResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) validate() error {
|
func (config *Config) validate() error {
|
||||||
|
@ -91,5 +103,9 @@ func (config *Config) validate() error {
|
||||||
return errors.New("tusd: StoreComposer in Config needs to contain a non-nil core")
|
return errors.New("tusd: StoreComposer in Config needs to contain a non-nil core")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if config.UploadProgressInterval <= 0 {
|
||||||
|
config.UploadProgressInterval = 1 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// httpContext is wrapper around context.Context that also carries the
|
||||||
|
// corresponding HTTP request and response writer, as well as an
|
||||||
|
// optional body reader
|
||||||
|
// TODO: Consider including HTTPResponse as well
|
||||||
|
type httpContext struct {
|
||||||
|
context.Context
|
||||||
|
|
||||||
|
res http.ResponseWriter
|
||||||
|
req *http.Request
|
||||||
|
body *bodyReader
|
||||||
|
}
|
||||||
|
|
||||||
|
func newContext(w http.ResponseWriter, r *http.Request) *httpContext {
|
||||||
|
return &httpContext{
|
||||||
|
// TODO: Try to reuse the request's context in the future
|
||||||
|
Context: context.Background(),
|
||||||
|
res: w,
|
||||||
|
req: r,
|
||||||
|
body: nil, // body can be filled later for PATCH requests
|
||||||
|
}
|
||||||
|
}
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
. "github.com/tus/tusd/pkg/handler"
|
. "github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCORS(t *testing.T) {
|
func TestCORS(t *testing.T) {
|
||||||
|
|
|
@ -7,7 +7,9 @@ import (
|
||||||
|
|
||||||
type MetaData map[string]string
|
type MetaData map[string]string
|
||||||
|
|
||||||
|
// FileInfo contains information about a single upload resource.
|
||||||
type FileInfo struct {
|
type FileInfo struct {
|
||||||
|
// ID is the unique identifier of the upload resource.
|
||||||
ID string
|
ID string
|
||||||
// Total file size in bytes specified in the NewUpload call
|
// Total file size in bytes specified in the NewUpload call
|
||||||
Size int64
|
Size int64
|
||||||
|
@ -41,12 +43,41 @@ type FileInfo struct {
|
||||||
// more data. Furthermore, a response is sent to notify the client of the
|
// more data. Furthermore, a response is sent to notify the client of the
|
||||||
// interrupting and the upload is terminated (if supported by the data store),
|
// interrupting and the upload is terminated (if supported by the data store),
|
||||||
// so the upload cannot be resumed anymore.
|
// so the upload cannot be resumed anymore.
|
||||||
|
// TODO: Allow passing in a HTTP Response
|
||||||
func (f FileInfo) StopUpload() {
|
func (f FileInfo) StopUpload() {
|
||||||
if f.stopUpload != nil {
|
if f.stopUpload != nil {
|
||||||
f.stopUpload()
|
f.stopUpload()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileInfoChanges collects changes the should be made to a FileInfo struct. This
|
||||||
|
// can be done using the PreUploadCreateCallback to modify certain properties before
|
||||||
|
// an upload is created. Properties which should not be modified (e.g. Size or Offset)
|
||||||
|
// are intentionally left out here.
|
||||||
|
type FileInfoChanges struct {
|
||||||
|
// If ID is not empty, it will be passed to the data store, allowing
|
||||||
|
// hooks to influence the upload ID. Be aware that a data store is not required to
|
||||||
|
// respect a pre-defined upload ID and might overwrite or modify it. However,
|
||||||
|
// all data stores in the github.com/tus/tusd package do respect pre-defined IDs.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// If MetaData is not nil, it replaces the entire user-defined meta data from
|
||||||
|
// the upload creation request. You can add custom meta data fields this way
|
||||||
|
// or ensure that only certain fields from the user-defined meta data are saved.
|
||||||
|
// If you want to retain only specific entries from the user-defined meta data, you must
|
||||||
|
// manually copy them into this MetaData field.
|
||||||
|
// If you do not want to store any meta data, set this field to an empty map (`MetaData{}`).
|
||||||
|
// If you want to keep the entire user-defined meta data, set this field to nil.
|
||||||
|
MetaData MetaData
|
||||||
|
|
||||||
|
// If Storage is not nil, it is passed to the data store to allow for minor adjustments
|
||||||
|
// to the upload storage (e.g. destination file name). The details are specific for each
|
||||||
|
// data store and should be looked up in their respective documentation.
|
||||||
|
// Please be aware that this behavior is currently not supported by any data store in
|
||||||
|
// the github.com/tus/tusd package.
|
||||||
|
Storage map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
type Upload interface {
|
type Upload interface {
|
||||||
// Write the chunk read from src into the file specified by the id at the
|
// Write the chunk read from src into the file specified by the id at the
|
||||||
// given offset. The handler will take care of validating the offset and
|
// given offset. The handler will take care of validating the offset and
|
||||||
|
@ -60,14 +91,12 @@ type Upload interface {
|
||||||
// requests. It may return an os.ErrNotExist which will be interpreted as a
|
// requests. It may return an os.ErrNotExist which will be interpreted as a
|
||||||
// 404 Not Found.
|
// 404 Not Found.
|
||||||
GetInfo(ctx context.Context) (FileInfo, error)
|
GetInfo(ctx context.Context) (FileInfo, error)
|
||||||
// GetReader returns a reader which allows iterating of the content of an
|
// GetReader returns an io.ReadCloser which allows iterating of the content of an
|
||||||
// upload specified by its ID. It should attempt to provide a reader even if
|
// upload specified by its ID. It should attempt to provide a reader even if
|
||||||
// the upload has not been finished yet but it's not required.
|
// the upload has not been finished yet but it's not required.
|
||||||
// If the returned reader also implements the io.Closer interface, the
|
|
||||||
// Close() method will be invoked once everything has been read.
|
|
||||||
// If the given upload could not be found, the error tusd.ErrNotFound should
|
// If the given upload could not be found, the error tusd.ErrNotFound should
|
||||||
// be returned.
|
// be returned.
|
||||||
GetReader(ctx context.Context) (io.Reader, error)
|
GetReader(ctx context.Context) (io.ReadCloser, error)
|
||||||
// FinisherDataStore is the interface which can be implemented by DataStores
|
// FinisherDataStore is the interface which can be implemented by DataStores
|
||||||
// which need to do additional operations once an entire upload has been
|
// which need to do additional operations once an entire upload has been
|
||||||
// completed. These tasks may include but are not limited to freeing unused
|
// completed. These tasks may include but are not limited to freeing unused
|
||||||
|
@ -146,11 +175,15 @@ type Locker interface {
|
||||||
type Lock interface {
|
type Lock interface {
|
||||||
// Lock attempts to obtain an exclusive lock for the upload specified
|
// Lock attempts to obtain an exclusive lock for the upload specified
|
||||||
// by its id.
|
// by its id.
|
||||||
// If this operation fails because the resource is already locked, the
|
// If the lock can be acquired, it will return without error. The requestUnlock
|
||||||
// tusd.ErrFileLocked must be returned. If no error is returned, the attempt
|
// callback is invoked when another caller attempts to create a lock. In this
|
||||||
// is consider to be successful and the upload to be locked until UnlockUpload
|
// case, the holder of the lock should attempt to release the lock as soon
|
||||||
// is invoked for the same upload.
|
// as possible
|
||||||
Lock() error
|
// If the lock is already held, the holder's requestUnlock function will be
|
||||||
|
// invoked to request the lock to be released. If the context is cancelled before
|
||||||
|
// the lock can be acquired, ErrLockTimeout will be returned without acquiring
|
||||||
|
// the lock.
|
||||||
|
Lock(ctx context.Context, requestUnlock func()) error
|
||||||
// Unlock releases an existing lock for the given upload.
|
// Unlock releases an existing lock for the given upload.
|
||||||
Unlock() error
|
Unlock() error
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,34 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
// Error represents an error with the intent to be sent in the HTTP
|
||||||
|
// response to the client. Therefore, it also contains a HTTPResponse,
|
||||||
|
// next to an error code and error message.
|
||||||
|
// TODO: Error is not comparable anymore because HTTPResponse
|
||||||
|
// contains a map. See if we should change this.
|
||||||
|
type Error struct {
|
||||||
|
ErrorCode string
|
||||||
|
Message string
|
||||||
|
HTTPResponse HTTPResponse
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e Error) Error() string {
|
||||||
|
return e.ErrorCode + ": " + e.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewError constructs a new Error object with the given error code and message.
|
||||||
|
// The corresponding HTTP response will have the provided status code
|
||||||
|
// and a body consisting of the error details.
|
||||||
|
// responses. See the net/http package for standardized status codes.
|
||||||
|
func NewError(errCode string, message string, statusCode int) Error {
|
||||||
|
return Error{
|
||||||
|
ErrorCode: errCode,
|
||||||
|
Message: message,
|
||||||
|
HTTPResponse: HTTPResponse{
|
||||||
|
StatusCode: statusCode,
|
||||||
|
Body: errCode + ": " + message + "\n",
|
||||||
|
Headers: HTTPHeaders{
|
||||||
|
"Content-Type": "text/plain; charset=utf-8",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,13 +1,12 @@
|
||||||
package handler_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
. "github.com/tus/tusd/pkg/handler"
|
. "github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
type closingStringReader struct {
|
type closingStringReader struct {
|
||||||
|
@ -34,9 +33,9 @@ func TestGet(t *testing.T) {
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
locker.EXPECT().NewLock("yes").Return(lock, nil),
|
locker.EXPECT().NewLock("yes").Return(lock, nil),
|
||||||
lock.EXPECT().Lock().Return(nil),
|
lock.EXPECT().Lock(gomock.Any(), gomock.Any()).Return(nil),
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 20,
|
Size: 20,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
|
@ -44,7 +43,7 @@ func TestGet(t *testing.T) {
|
||||||
"filetype": "image/jpeg",
|
"filetype": "image/jpeg",
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().GetReader(context.Background()).Return(reader, nil),
|
upload.EXPECT().GetReader(gomock.Any()).Return(reader, nil),
|
||||||
lock.EXPECT().Unlock().Return(nil),
|
lock.EXPECT().Unlock().Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -79,8 +78,8 @@ func TestGet(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
}, nil),
|
}, nil),
|
||||||
)
|
)
|
||||||
|
@ -107,8 +106,8 @@ func TestGet(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
"filetype": "non-a-valid-mime-type",
|
"filetype": "non-a-valid-mime-type",
|
||||||
|
@ -139,8 +138,8 @@ func TestGet(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
"filetype": "application/vnd.openxmlformats-officedocument.wordprocessingml.document.v1",
|
"filetype": "application/vnd.openxmlformats-officedocument.wordprocessingml.document.v1",
|
||||||
|
|
|
@ -6,51 +6,79 @@ package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
context "context"
|
context "context"
|
||||||
gomock "github.com/golang/mock/gomock"
|
|
||||||
handler "github.com/tus/tusd/pkg/handler"
|
|
||||||
io "io"
|
io "io"
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
handler "github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockFullDataStore is a mock of FullDataStore interface
|
// MockFullDataStore is a mock of FullDataStore interface.
|
||||||
type MockFullDataStore struct {
|
type MockFullDataStore struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *MockFullDataStoreMockRecorder
|
recorder *MockFullDataStoreMockRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockFullDataStoreMockRecorder is the mock recorder for MockFullDataStore
|
// MockFullDataStoreMockRecorder is the mock recorder for MockFullDataStore.
|
||||||
type MockFullDataStoreMockRecorder struct {
|
type MockFullDataStoreMockRecorder struct {
|
||||||
mock *MockFullDataStore
|
mock *MockFullDataStore
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockFullDataStore creates a new mock instance
|
// NewMockFullDataStore creates a new mock instance.
|
||||||
func NewMockFullDataStore(ctrl *gomock.Controller) *MockFullDataStore {
|
func NewMockFullDataStore(ctrl *gomock.Controller) *MockFullDataStore {
|
||||||
mock := &MockFullDataStore{ctrl: ctrl}
|
mock := &MockFullDataStore{ctrl: ctrl}
|
||||||
mock.recorder = &MockFullDataStoreMockRecorder{mock}
|
mock.recorder = &MockFullDataStoreMockRecorder{mock}
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
// EXPECT returns an object that allows the caller to indicate expected use
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
func (m *MockFullDataStore) EXPECT() *MockFullDataStoreMockRecorder {
|
func (m *MockFullDataStore) EXPECT() *MockFullDataStoreMockRecorder {
|
||||||
return m.recorder
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUpload mocks base method
|
// AsConcatableUpload mocks base method.
|
||||||
func (m *MockFullDataStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
func (m *MockFullDataStore) AsConcatableUpload(upload handler.Upload) handler.ConcatableUpload {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "NewUpload", ctx, info)
|
ret := m.ctrl.Call(m, "AsConcatableUpload", upload)
|
||||||
ret0, _ := ret[0].(handler.Upload)
|
ret0, _ := ret[0].(handler.ConcatableUpload)
|
||||||
ret1, _ := ret[1].(error)
|
return ret0
|
||||||
return ret0, ret1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUpload indicates an expected call of NewUpload
|
// AsConcatableUpload indicates an expected call of AsConcatableUpload.
|
||||||
func (mr *MockFullDataStoreMockRecorder) NewUpload(ctx, info interface{}) *gomock.Call {
|
func (mr *MockFullDataStoreMockRecorder) AsConcatableUpload(upload interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewUpload", reflect.TypeOf((*MockFullDataStore)(nil).NewUpload), ctx, info)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AsConcatableUpload", reflect.TypeOf((*MockFullDataStore)(nil).AsConcatableUpload), upload)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUpload mocks base method
|
// AsLengthDeclarableUpload mocks base method.
|
||||||
|
func (m *MockFullDataStore) AsLengthDeclarableUpload(upload handler.Upload) handler.LengthDeclarableUpload {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "AsLengthDeclarableUpload", upload)
|
||||||
|
ret0, _ := ret[0].(handler.LengthDeclarableUpload)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsLengthDeclarableUpload indicates an expected call of AsLengthDeclarableUpload.
|
||||||
|
func (mr *MockFullDataStoreMockRecorder) AsLengthDeclarableUpload(upload interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AsLengthDeclarableUpload", reflect.TypeOf((*MockFullDataStore)(nil).AsLengthDeclarableUpload), upload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsTerminatableUpload mocks base method.
|
||||||
|
func (m *MockFullDataStore) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "AsTerminatableUpload", upload)
|
||||||
|
ret0, _ := ret[0].(handler.TerminatableUpload)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsTerminatableUpload indicates an expected call of AsTerminatableUpload.
|
||||||
|
func (mr *MockFullDataStoreMockRecorder) AsTerminatableUpload(upload interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AsTerminatableUpload", reflect.TypeOf((*MockFullDataStore)(nil).AsTerminatableUpload), upload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUpload mocks base method.
|
||||||
func (m *MockFullDataStore) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
|
func (m *MockFullDataStore) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "GetUpload", ctx, id)
|
ret := m.ctrl.Call(m, "GetUpload", ctx, id)
|
||||||
|
@ -59,93 +87,93 @@ func (m *MockFullDataStore) GetUpload(ctx context.Context, id string) (handler.U
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUpload indicates an expected call of GetUpload
|
// GetUpload indicates an expected call of GetUpload.
|
||||||
func (mr *MockFullDataStoreMockRecorder) GetUpload(ctx, id interface{}) *gomock.Call {
|
func (mr *MockFullDataStoreMockRecorder) GetUpload(ctx, id interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUpload", reflect.TypeOf((*MockFullDataStore)(nil).GetUpload), ctx, id)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUpload", reflect.TypeOf((*MockFullDataStore)(nil).GetUpload), ctx, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsTerminatableUpload mocks base method
|
// NewUpload mocks base method.
|
||||||
func (m *MockFullDataStore) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload {
|
func (m *MockFullDataStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "AsTerminatableUpload", upload)
|
ret := m.ctrl.Call(m, "NewUpload", ctx, info)
|
||||||
ret0, _ := ret[0].(handler.TerminatableUpload)
|
ret0, _ := ret[0].(handler.Upload)
|
||||||
return ret0
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsTerminatableUpload indicates an expected call of AsTerminatableUpload
|
// NewUpload indicates an expected call of NewUpload.
|
||||||
func (mr *MockFullDataStoreMockRecorder) AsTerminatableUpload(upload interface{}) *gomock.Call {
|
func (mr *MockFullDataStoreMockRecorder) NewUpload(ctx, info interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AsTerminatableUpload", reflect.TypeOf((*MockFullDataStore)(nil).AsTerminatableUpload), upload)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewUpload", reflect.TypeOf((*MockFullDataStore)(nil).NewUpload), ctx, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsConcatableUpload mocks base method
|
// MockFullUpload is a mock of FullUpload interface.
|
||||||
func (m *MockFullDataStore) AsConcatableUpload(upload handler.Upload) handler.ConcatableUpload {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "AsConcatableUpload", upload)
|
|
||||||
ret0, _ := ret[0].(handler.ConcatableUpload)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsConcatableUpload indicates an expected call of AsConcatableUpload
|
|
||||||
func (mr *MockFullDataStoreMockRecorder) AsConcatableUpload(upload interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AsConcatableUpload", reflect.TypeOf((*MockFullDataStore)(nil).AsConcatableUpload), upload)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsLengthDeclarableUpload mocks base method
|
|
||||||
func (m *MockFullDataStore) AsLengthDeclarableUpload(upload handler.Upload) handler.LengthDeclarableUpload {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "AsLengthDeclarableUpload", upload)
|
|
||||||
ret0, _ := ret[0].(handler.LengthDeclarableUpload)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsLengthDeclarableUpload indicates an expected call of AsLengthDeclarableUpload
|
|
||||||
func (mr *MockFullDataStoreMockRecorder) AsLengthDeclarableUpload(upload interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AsLengthDeclarableUpload", reflect.TypeOf((*MockFullDataStore)(nil).AsLengthDeclarableUpload), upload)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MockFullUpload is a mock of FullUpload interface
|
|
||||||
type MockFullUpload struct {
|
type MockFullUpload struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *MockFullUploadMockRecorder
|
recorder *MockFullUploadMockRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockFullUploadMockRecorder is the mock recorder for MockFullUpload
|
// MockFullUploadMockRecorder is the mock recorder for MockFullUpload.
|
||||||
type MockFullUploadMockRecorder struct {
|
type MockFullUploadMockRecorder struct {
|
||||||
mock *MockFullUpload
|
mock *MockFullUpload
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockFullUpload creates a new mock instance
|
// NewMockFullUpload creates a new mock instance.
|
||||||
func NewMockFullUpload(ctrl *gomock.Controller) *MockFullUpload {
|
func NewMockFullUpload(ctrl *gomock.Controller) *MockFullUpload {
|
||||||
mock := &MockFullUpload{ctrl: ctrl}
|
mock := &MockFullUpload{ctrl: ctrl}
|
||||||
mock.recorder = &MockFullUploadMockRecorder{mock}
|
mock.recorder = &MockFullUploadMockRecorder{mock}
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
// EXPECT returns an object that allows the caller to indicate expected use
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
func (m *MockFullUpload) EXPECT() *MockFullUploadMockRecorder {
|
func (m *MockFullUpload) EXPECT() *MockFullUploadMockRecorder {
|
||||||
return m.recorder
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteChunk mocks base method
|
// ConcatUploads mocks base method.
|
||||||
func (m *MockFullUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
|
func (m *MockFullUpload) ConcatUploads(ctx context.Context, partialUploads []handler.Upload) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "WriteChunk", ctx, offset, src)
|
ret := m.ctrl.Call(m, "ConcatUploads", ctx, partialUploads)
|
||||||
ret0, _ := ret[0].(int64)
|
ret0, _ := ret[0].(error)
|
||||||
ret1, _ := ret[1].(error)
|
return ret0
|
||||||
return ret0, ret1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteChunk indicates an expected call of WriteChunk
|
// ConcatUploads indicates an expected call of ConcatUploads.
|
||||||
func (mr *MockFullUploadMockRecorder) WriteChunk(ctx, offset, src interface{}) *gomock.Call {
|
func (mr *MockFullUploadMockRecorder) ConcatUploads(ctx, partialUploads interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChunk", reflect.TypeOf((*MockFullUpload)(nil).WriteChunk), ctx, offset, src)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConcatUploads", reflect.TypeOf((*MockFullUpload)(nil).ConcatUploads), ctx, partialUploads)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetInfo mocks base method
|
// DeclareLength mocks base method.
|
||||||
|
func (m *MockFullUpload) DeclareLength(ctx context.Context, length int64) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "DeclareLength", ctx, length)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeclareLength indicates an expected call of DeclareLength.
|
||||||
|
func (mr *MockFullUploadMockRecorder) DeclareLength(ctx, length interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeclareLength", reflect.TypeOf((*MockFullUpload)(nil).DeclareLength), ctx, length)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FinishUpload mocks base method.
|
||||||
|
func (m *MockFullUpload) FinishUpload(ctx context.Context) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "FinishUpload", ctx)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// FinishUpload indicates an expected call of FinishUpload.
|
||||||
|
func (mr *MockFullUploadMockRecorder) FinishUpload(ctx interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinishUpload", reflect.TypeOf((*MockFullUpload)(nil).FinishUpload), ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetInfo mocks base method.
|
||||||
func (m *MockFullUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
|
func (m *MockFullUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "GetInfo", ctx)
|
ret := m.ctrl.Call(m, "GetInfo", ctx)
|
||||||
|
@ -154,42 +182,28 @@ func (m *MockFullUpload) GetInfo(ctx context.Context) (handler.FileInfo, error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetInfo indicates an expected call of GetInfo
|
// GetInfo indicates an expected call of GetInfo.
|
||||||
func (mr *MockFullUploadMockRecorder) GetInfo(ctx interface{}) *gomock.Call {
|
func (mr *MockFullUploadMockRecorder) GetInfo(ctx interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInfo", reflect.TypeOf((*MockFullUpload)(nil).GetInfo), ctx)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInfo", reflect.TypeOf((*MockFullUpload)(nil).GetInfo), ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetReader mocks base method
|
// GetReader mocks base method.
|
||||||
func (m *MockFullUpload) GetReader(ctx context.Context) (io.Reader, error) {
|
func (m *MockFullUpload) GetReader(ctx context.Context) (io.ReadCloser, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "GetReader", ctx)
|
ret := m.ctrl.Call(m, "GetReader", ctx)
|
||||||
ret0, _ := ret[0].(io.Reader)
|
ret0, _ := ret[0].(io.ReadCloser)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetReader indicates an expected call of GetReader
|
// GetReader indicates an expected call of GetReader.
|
||||||
func (mr *MockFullUploadMockRecorder) GetReader(ctx interface{}) *gomock.Call {
|
func (mr *MockFullUploadMockRecorder) GetReader(ctx interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReader", reflect.TypeOf((*MockFullUpload)(nil).GetReader), ctx)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReader", reflect.TypeOf((*MockFullUpload)(nil).GetReader), ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FinishUpload mocks base method
|
// Terminate mocks base method.
|
||||||
func (m *MockFullUpload) FinishUpload(ctx context.Context) error {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "FinishUpload", ctx)
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
// FinishUpload indicates an expected call of FinishUpload
|
|
||||||
func (mr *MockFullUploadMockRecorder) FinishUpload(ctx interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinishUpload", reflect.TypeOf((*MockFullUpload)(nil).FinishUpload), ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Terminate mocks base method
|
|
||||||
func (m *MockFullUpload) Terminate(ctx context.Context) error {
|
func (m *MockFullUpload) Terminate(ctx context.Context) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Terminate", ctx)
|
ret := m.ctrl.Call(m, "Terminate", ctx)
|
||||||
|
@ -197,64 +211,51 @@ func (m *MockFullUpload) Terminate(ctx context.Context) error {
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Terminate indicates an expected call of Terminate
|
// Terminate indicates an expected call of Terminate.
|
||||||
func (mr *MockFullUploadMockRecorder) Terminate(ctx interface{}) *gomock.Call {
|
func (mr *MockFullUploadMockRecorder) Terminate(ctx interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terminate", reflect.TypeOf((*MockFullUpload)(nil).Terminate), ctx)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terminate", reflect.TypeOf((*MockFullUpload)(nil).Terminate), ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeclareLength mocks base method
|
// WriteChunk mocks base method.
|
||||||
func (m *MockFullUpload) DeclareLength(ctx context.Context, length int64) error {
|
func (m *MockFullUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "DeclareLength", ctx, length)
|
ret := m.ctrl.Call(m, "WriteChunk", ctx, offset, src)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(int64)
|
||||||
return ret0
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeclareLength indicates an expected call of DeclareLength
|
// WriteChunk indicates an expected call of WriteChunk.
|
||||||
func (mr *MockFullUploadMockRecorder) DeclareLength(ctx, length interface{}) *gomock.Call {
|
func (mr *MockFullUploadMockRecorder) WriteChunk(ctx, offset, src interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeclareLength", reflect.TypeOf((*MockFullUpload)(nil).DeclareLength), ctx, length)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChunk", reflect.TypeOf((*MockFullUpload)(nil).WriteChunk), ctx, offset, src)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConcatUploads mocks base method
|
// MockFullLocker is a mock of FullLocker interface.
|
||||||
func (m *MockFullUpload) ConcatUploads(ctx context.Context, partialUploads []handler.Upload) error {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "ConcatUploads", ctx, partialUploads)
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConcatUploads indicates an expected call of ConcatUploads
|
|
||||||
func (mr *MockFullUploadMockRecorder) ConcatUploads(ctx, partialUploads interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConcatUploads", reflect.TypeOf((*MockFullUpload)(nil).ConcatUploads), ctx, partialUploads)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MockFullLocker is a mock of FullLocker interface
|
|
||||||
type MockFullLocker struct {
|
type MockFullLocker struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *MockFullLockerMockRecorder
|
recorder *MockFullLockerMockRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockFullLockerMockRecorder is the mock recorder for MockFullLocker
|
// MockFullLockerMockRecorder is the mock recorder for MockFullLocker.
|
||||||
type MockFullLockerMockRecorder struct {
|
type MockFullLockerMockRecorder struct {
|
||||||
mock *MockFullLocker
|
mock *MockFullLocker
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockFullLocker creates a new mock instance
|
// NewMockFullLocker creates a new mock instance.
|
||||||
func NewMockFullLocker(ctrl *gomock.Controller) *MockFullLocker {
|
func NewMockFullLocker(ctrl *gomock.Controller) *MockFullLocker {
|
||||||
mock := &MockFullLocker{ctrl: ctrl}
|
mock := &MockFullLocker{ctrl: ctrl}
|
||||||
mock.recorder = &MockFullLockerMockRecorder{mock}
|
mock.recorder = &MockFullLockerMockRecorder{mock}
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
// EXPECT returns an object that allows the caller to indicate expected use
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
func (m *MockFullLocker) EXPECT() *MockFullLockerMockRecorder {
|
func (m *MockFullLocker) EXPECT() *MockFullLockerMockRecorder {
|
||||||
return m.recorder
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLock mocks base method
|
// NewLock mocks base method.
|
||||||
func (m *MockFullLocker) NewLock(id string) (handler.Lock, error) {
|
func (m *MockFullLocker) NewLock(id string) (handler.Lock, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "NewLock", id)
|
ret := m.ctrl.Call(m, "NewLock", id)
|
||||||
|
@ -263,50 +264,50 @@ func (m *MockFullLocker) NewLock(id string) (handler.Lock, error) {
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLock indicates an expected call of NewLock
|
// NewLock indicates an expected call of NewLock.
|
||||||
func (mr *MockFullLockerMockRecorder) NewLock(id interface{}) *gomock.Call {
|
func (mr *MockFullLockerMockRecorder) NewLock(id interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewLock", reflect.TypeOf((*MockFullLocker)(nil).NewLock), id)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewLock", reflect.TypeOf((*MockFullLocker)(nil).NewLock), id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockFullLock is a mock of FullLock interface
|
// MockFullLock is a mock of FullLock interface.
|
||||||
type MockFullLock struct {
|
type MockFullLock struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *MockFullLockMockRecorder
|
recorder *MockFullLockMockRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockFullLockMockRecorder is the mock recorder for MockFullLock
|
// MockFullLockMockRecorder is the mock recorder for MockFullLock.
|
||||||
type MockFullLockMockRecorder struct {
|
type MockFullLockMockRecorder struct {
|
||||||
mock *MockFullLock
|
mock *MockFullLock
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockFullLock creates a new mock instance
|
// NewMockFullLock creates a new mock instance.
|
||||||
func NewMockFullLock(ctrl *gomock.Controller) *MockFullLock {
|
func NewMockFullLock(ctrl *gomock.Controller) *MockFullLock {
|
||||||
mock := &MockFullLock{ctrl: ctrl}
|
mock := &MockFullLock{ctrl: ctrl}
|
||||||
mock.recorder = &MockFullLockMockRecorder{mock}
|
mock.recorder = &MockFullLockMockRecorder{mock}
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
// EXPECT returns an object that allows the caller to indicate expected use
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
func (m *MockFullLock) EXPECT() *MockFullLockMockRecorder {
|
func (m *MockFullLock) EXPECT() *MockFullLockMockRecorder {
|
||||||
return m.recorder
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lock mocks base method
|
// Lock mocks base method.
|
||||||
func (m *MockFullLock) Lock() error {
|
func (m *MockFullLock) Lock(ctx context.Context, requestUnlock func()) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Lock")
|
ret := m.ctrl.Call(m, "Lock", ctx, requestUnlock)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lock indicates an expected call of Lock
|
// Lock indicates an expected call of Lock.
|
||||||
func (mr *MockFullLockMockRecorder) Lock() *gomock.Call {
|
func (mr *MockFullLockMockRecorder) Lock(ctx, requestUnlock interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lock", reflect.TypeOf((*MockFullLock)(nil).Lock))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lock", reflect.TypeOf((*MockFullLock)(nil).Lock), ctx, requestUnlock)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unlock mocks base method
|
// Unlock mocks base method.
|
||||||
func (m *MockFullLock) Unlock() error {
|
func (m *MockFullLock) Unlock() error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Unlock")
|
ret := m.ctrl.Call(m, "Unlock")
|
||||||
|
@ -314,7 +315,7 @@ func (m *MockFullLock) Unlock() error {
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unlock indicates an expected call of Unlock
|
// Unlock indicates an expected call of Unlock.
|
||||||
func (mr *MockFullLockMockRecorder) Unlock() *gomock.Call {
|
func (mr *MockFullLockMockRecorder) Unlock() *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unlock", reflect.TypeOf((*MockFullLock)(nil).Unlock))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unlock", reflect.TypeOf((*MockFullLock)(nil).Unlock))
|
||||||
|
|
|
@ -1,12 +1,11 @@
|
||||||
package handler_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
. "github.com/tus/tusd/pkg/handler"
|
. "github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHead(t *testing.T) {
|
func TestHead(t *testing.T) {
|
||||||
|
@ -19,9 +18,9 @@ func TestHead(t *testing.T) {
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
locker.EXPECT().NewLock("yes").Return(lock, nil),
|
locker.EXPECT().NewLock("yes").Return(lock, nil),
|
||||||
lock.EXPECT().Lock().Return(nil),
|
lock.EXPECT().Lock(gomock.Any(), gomock.Any()).Return(nil),
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
Offset: 11,
|
Offset: 11,
|
||||||
Size: 44,
|
Size: 44,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
|
@ -64,7 +63,7 @@ func TestHead(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().GetUpload(context.Background(), "no").Return(nil, ErrNotFound)
|
store.EXPECT().GetUpload(gomock.Any(), "no").Return(nil, ErrNotFound)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
StoreComposer: composer,
|
StoreComposer: composer,
|
||||||
|
@ -76,10 +75,8 @@ func TestHead(t *testing.T) {
|
||||||
ReqHeader: map[string]string{
|
ReqHeader: map[string]string{
|
||||||
"Tus-Resumable": "1.0.0",
|
"Tus-Resumable": "1.0.0",
|
||||||
},
|
},
|
||||||
Code: http.StatusNotFound,
|
Code: http.StatusNotFound,
|
||||||
ResHeader: map[string]string{
|
ResHeader: map[string]string{},
|
||||||
"Content-Length": "0",
|
|
||||||
},
|
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
|
|
||||||
if res.Body.String() != "" {
|
if res.Body.String() != "" {
|
||||||
|
@ -93,8 +90,8 @@ func TestHead(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
SizeIsDeferred: true,
|
SizeIsDeferred: true,
|
||||||
Size: 0,
|
Size: 0,
|
||||||
}, nil),
|
}, nil),
|
||||||
|
@ -123,8 +120,8 @@ func TestHead(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
SizeIsDeferred: false,
|
SizeIsDeferred: false,
|
||||||
Size: 10,
|
Size: 10,
|
||||||
}, nil),
|
}, nil),
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
|
// HookEvent represents an event from tusd which can be handled by the application.
|
||||||
|
type HookEvent struct {
|
||||||
|
// Upload contains information about the upload that caused this hook
|
||||||
|
// to be fired.
|
||||||
|
Upload FileInfo
|
||||||
|
// HTTPRequest contains details about the HTTP request that reached
|
||||||
|
// tusd.
|
||||||
|
HTTPRequest HTTPRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHookEvent(info FileInfo, r *http.Request) HookEvent {
|
||||||
|
// The Host header field is not present in the header map, see https://pkg.go.dev/net/http#Request:
|
||||||
|
// > For incoming requests, the Host header is promoted to the
|
||||||
|
// > Request.Host field and removed from the Header map.
|
||||||
|
// That's why we add it back manually.
|
||||||
|
r.Header.Set("Host", r.Host)
|
||||||
|
|
||||||
|
return HookEvent{
|
||||||
|
Upload: info,
|
||||||
|
HTTPRequest: HTTPRequest{
|
||||||
|
Method: r.Method,
|
||||||
|
URI: r.RequestURI,
|
||||||
|
RemoteAddr: r.RemoteAddr,
|
||||||
|
Header: r.Header,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,80 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HTTPRequest contains basic details of an incoming HTTP request.
|
||||||
|
type HTTPRequest struct {
|
||||||
|
// Method is the HTTP method, e.g. POST or PATCH.
|
||||||
|
Method string
|
||||||
|
// URI is the full HTTP request URI, e.g. /files/fooo.
|
||||||
|
URI string
|
||||||
|
// RemoteAddr contains the network address that sent the request.
|
||||||
|
RemoteAddr string
|
||||||
|
// Header contains all HTTP headers as present in the HTTP request.
|
||||||
|
Header http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
type HTTPHeaders map[string]string
|
||||||
|
|
||||||
|
// HTTPResponse contains basic details of an outgoing HTTP response.
|
||||||
|
type HTTPResponse struct {
|
||||||
|
// StatusCode is status code, e.g. 200 or 400.
|
||||||
|
StatusCode int
|
||||||
|
// Body is the response body.
|
||||||
|
Body string
|
||||||
|
// Headers contains additional HTTP headers for the response.
|
||||||
|
// TODO: Uniform naming with HTTPRequest.Header
|
||||||
|
Headers HTTPHeaders
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeTo writes the HTTP response into w, as specified by the fields in resp.
|
||||||
|
func (resp HTTPResponse) writeTo(w http.ResponseWriter) {
|
||||||
|
headers := w.Header()
|
||||||
|
for key, value := range resp.Headers {
|
||||||
|
headers.Set(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.Body) > 0 {
|
||||||
|
headers.Set("Content-Length", strconv.Itoa(len(resp.Body)))
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(resp.StatusCode)
|
||||||
|
|
||||||
|
if len(resp.Body) > 0 {
|
||||||
|
w.Write([]byte(resp.Body))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeWith returns a copy of resp1, where non-default values from resp2 overwrite
|
||||||
|
// values from resp1.
|
||||||
|
func (resp1 HTTPResponse) MergeWith(resp2 HTTPResponse) HTTPResponse {
|
||||||
|
// Clone the response 1 and use it as a basis
|
||||||
|
newResp := resp1
|
||||||
|
|
||||||
|
// Take the status code and body from response 2 to
|
||||||
|
// overwrite values from response 1.
|
||||||
|
if resp2.StatusCode != 0 {
|
||||||
|
newResp.StatusCode = resp2.StatusCode
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp2.Body) > 0 {
|
||||||
|
newResp.Body = resp2.Body
|
||||||
|
}
|
||||||
|
|
||||||
|
// For the headers, me must make a new map to avoid writing
|
||||||
|
// into the header map from response 1.
|
||||||
|
newResp.Headers = make(HTTPHeaders, len(resp1.Headers)+len(resp2.Headers))
|
||||||
|
|
||||||
|
for key, value := range resp1.Headers {
|
||||||
|
newResp.Headers[key] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, value := range resp2.Headers {
|
||||||
|
newResp.Headers[key] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
return newResp
|
||||||
|
}
|
|
@ -1,7 +1,6 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
)
|
)
|
||||||
|
@ -30,8 +29,9 @@ func (m Metrics) incRequestsTotal(method string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Rework to only store error code
|
||||||
// incErrorsTotal increases the counter for this error atomically by one.
|
// incErrorsTotal increases the counter for this error atomically by one.
|
||||||
func (m Metrics) incErrorsTotal(err HTTPError) {
|
func (m Metrics) incErrorsTotal(err Error) {
|
||||||
ptr := m.ErrorsTotal.retrievePointerFor(err)
|
ptr := m.ErrorsTotal.retrievePointerFor(err)
|
||||||
atomic.AddUint64(ptr, 1)
|
atomic.AddUint64(ptr, 1)
|
||||||
}
|
}
|
||||||
|
@ -78,23 +78,16 @@ func newMetrics() Metrics {
|
||||||
// ErrorsTotalMap stores the counters for the different HTTP errors.
|
// ErrorsTotalMap stores the counters for the different HTTP errors.
|
||||||
type ErrorsTotalMap struct {
|
type ErrorsTotalMap struct {
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
counter map[simpleHTTPError]*uint64
|
counter map[ErrorsTotalMapEntry]*uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
type simpleHTTPError struct {
|
type ErrorsTotalMapEntry struct {
|
||||||
Message string
|
ErrorCode string
|
||||||
StatusCode int
|
StatusCode int
|
||||||
}
|
}
|
||||||
|
|
||||||
func simplifyHTTPError(err HTTPError) simpleHTTPError {
|
|
||||||
return simpleHTTPError{
|
|
||||||
Message: err.Error(),
|
|
||||||
StatusCode: err.StatusCode(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newErrorsTotalMap() *ErrorsTotalMap {
|
func newErrorsTotalMap() *ErrorsTotalMap {
|
||||||
m := make(map[simpleHTTPError]*uint64, 20)
|
m := make(map[ErrorsTotalMapEntry]*uint64, 20)
|
||||||
return &ErrorsTotalMap{
|
return &ErrorsTotalMap{
|
||||||
counter: m,
|
counter: m,
|
||||||
}
|
}
|
||||||
|
@ -102,8 +95,12 @@ func newErrorsTotalMap() *ErrorsTotalMap {
|
||||||
|
|
||||||
// retrievePointerFor returns (after creating it if necessary) the pointer to
|
// retrievePointerFor returns (after creating it if necessary) the pointer to
|
||||||
// the counter for the error.
|
// the counter for the error.
|
||||||
func (e *ErrorsTotalMap) retrievePointerFor(err HTTPError) *uint64 {
|
func (e *ErrorsTotalMap) retrievePointerFor(err Error) *uint64 {
|
||||||
serr := simplifyHTTPError(err)
|
serr := ErrorsTotalMapEntry{
|
||||||
|
ErrorCode: err.ErrorCode,
|
||||||
|
StatusCode: err.HTTPResponse.StatusCode,
|
||||||
|
}
|
||||||
|
|
||||||
e.lock.RLock()
|
e.lock.RLock()
|
||||||
ptr, ok := e.counter[serr]
|
ptr, ok := e.counter[serr]
|
||||||
e.lock.RUnlock()
|
e.lock.RUnlock()
|
||||||
|
@ -124,12 +121,11 @@ func (e *ErrorsTotalMap) retrievePointerFor(err HTTPError) *uint64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load retrieves the map of the counter pointers atomically
|
// Load retrieves the map of the counter pointers atomically
|
||||||
func (e *ErrorsTotalMap) Load() map[HTTPError]*uint64 {
|
func (e *ErrorsTotalMap) Load() map[ErrorsTotalMapEntry]*uint64 {
|
||||||
m := make(map[HTTPError]*uint64, len(e.counter))
|
m := make(map[ErrorsTotalMapEntry]*uint64, len(e.counter))
|
||||||
e.lock.RLock()
|
e.lock.RLock()
|
||||||
for err, ptr := range e.counter {
|
for err, ptr := range e.counter {
|
||||||
httpErr := NewHTTPError(errors.New(err.Message), err.StatusCode)
|
m[err] = ptr
|
||||||
m[httpErr] = ptr
|
|
||||||
}
|
}
|
||||||
e.lock.RUnlock()
|
e.lock.RUnlock()
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
. "github.com/tus/tusd/pkg/handler"
|
. "github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestOptions(t *testing.T) {
|
func TestOptions(t *testing.T) {
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package handler_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -13,7 +12,7 @@ import (
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
. "github.com/tus/tusd/pkg/handler"
|
. "github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPatch(t *testing.T) {
|
func TestPatch(t *testing.T) {
|
||||||
|
@ -23,14 +22,14 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 10,
|
Size: 10,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
upload.EXPECT().WriteChunk(gomock.Any(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
||||||
upload.EXPECT().FinishUpload(context.Background()),
|
upload.EXPECT().FinishUpload(gomock.Any()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -75,14 +74,14 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 10,
|
Size: 10,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
upload.EXPECT().WriteChunk(gomock.Any(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
||||||
upload.EXPECT().FinishUpload(context.Background()),
|
upload.EXPECT().FinishUpload(gomock.Any()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -112,8 +111,8 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 20,
|
Offset: 20,
|
||||||
Size: 20,
|
Size: 20,
|
||||||
|
@ -141,7 +140,7 @@ func TestPatch(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().GetUpload(context.Background(), "no").Return(nil, ErrNotFound)
|
store.EXPECT().GetUpload(gomock.Any(), "no").Return(nil, ErrNotFound)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
StoreComposer: composer,
|
StoreComposer: composer,
|
||||||
|
@ -165,8 +164,8 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
}, nil),
|
}, nil),
|
||||||
|
@ -194,8 +193,8 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 10,
|
Size: 10,
|
||||||
|
@ -268,14 +267,14 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 20,
|
Size: 20,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
|
upload.EXPECT().WriteChunk(gomock.Any(), int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
|
||||||
upload.EXPECT().FinishUpload(context.Background()),
|
upload.EXPECT().FinishUpload(gomock.Any()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -310,17 +309,17 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 0,
|
Size: 0,
|
||||||
SizeIsDeferred: true,
|
SizeIsDeferred: true,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().AsLengthDeclarableUpload(upload).Return(upload),
|
store.EXPECT().AsLengthDeclarableUpload(upload).Return(upload),
|
||||||
upload.EXPECT().DeclareLength(context.Background(), int64(20)),
|
upload.EXPECT().DeclareLength(gomock.Any(), int64(20)),
|
||||||
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
|
upload.EXPECT().WriteChunk(gomock.Any(), int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
|
||||||
upload.EXPECT().FinishUpload(context.Background()),
|
upload.EXPECT().FinishUpload(gomock.Any()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -353,16 +352,16 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 20,
|
Offset: 20,
|
||||||
Size: 0,
|
Size: 0,
|
||||||
SizeIsDeferred: true,
|
SizeIsDeferred: true,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().AsLengthDeclarableUpload(upload).Return(upload),
|
store.EXPECT().AsLengthDeclarableUpload(upload).Return(upload),
|
||||||
upload.EXPECT().DeclareLength(context.Background(), int64(20)),
|
upload.EXPECT().DeclareLength(gomock.Any(), int64(20)),
|
||||||
upload.EXPECT().FinishUpload(context.Background()),
|
upload.EXPECT().FinishUpload(gomock.Any()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -392,26 +391,26 @@ func TestPatch(t *testing.T) {
|
||||||
upload2 := NewMockFullUpload(ctrl)
|
upload2 := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload1, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload1, nil),
|
||||||
upload1.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload1.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 0,
|
Size: 0,
|
||||||
SizeIsDeferred: true,
|
SizeIsDeferred: true,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().AsLengthDeclarableUpload(upload1).Return(upload1),
|
store.EXPECT().AsLengthDeclarableUpload(upload1).Return(upload1),
|
||||||
upload1.EXPECT().DeclareLength(context.Background(), int64(20)),
|
upload1.EXPECT().DeclareLength(gomock.Any(), int64(20)),
|
||||||
upload1.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
upload1.EXPECT().WriteChunk(gomock.Any(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
||||||
|
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload2, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload2, nil),
|
||||||
upload2.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload2.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 10,
|
Offset: 10,
|
||||||
Size: 20,
|
Size: 20,
|
||||||
SizeIsDeferred: false,
|
SizeIsDeferred: false,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload2.EXPECT().WriteChunk(context.Background(), int64(10), NewReaderMatcher("thisismore")).Return(int64(10), nil),
|
upload2.EXPECT().WriteChunk(gomock.Any(), int64(10), NewReaderMatcher("thisismore")).Return(int64(10), nil),
|
||||||
upload2.EXPECT().FinishUpload(context.Background()),
|
upload2.EXPECT().FinishUpload(gomock.Any()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -460,14 +459,14 @@ func TestPatch(t *testing.T) {
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
locker.EXPECT().NewLock("yes").Return(lock, nil),
|
locker.EXPECT().NewLock("yes").Return(lock, nil),
|
||||||
lock.EXPECT().Lock().Return(nil),
|
lock.EXPECT().Lock(gomock.Any(), gomock.Any()).Return(nil),
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 20,
|
Size: 20,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
|
upload.EXPECT().WriteChunk(gomock.Any(), int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
|
||||||
lock.EXPECT().Unlock().Return(nil),
|
lock.EXPECT().Unlock().Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -500,13 +499,13 @@ func TestPatch(t *testing.T) {
|
||||||
// We simulate that the upload has already an offset of 10 bytes. Therefore, the progress notifications
|
// We simulate that the upload has already an offset of 10 bytes. Therefore, the progress notifications
|
||||||
// must be the sum of the exisiting offset and the newly read bytes.
|
// must be the sum of the exisiting offset and the newly read bytes.
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 10,
|
Offset: 10,
|
||||||
Size: 100,
|
Size: 100,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().WriteChunk(context.Background(), int64(10), NewReaderMatcher("first second third")).Return(int64(18), nil),
|
upload.EXPECT().WriteChunk(gomock.Any(), int64(10), NewReaderMatcher("first second third")).Return(int64(18), nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -574,15 +573,15 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 100,
|
Size: 100,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("first ")).Return(int64(6), nil),
|
upload.EXPECT().WriteChunk(gomock.Any(), int64(0), NewReaderMatcher("first ")).Return(int64(6), nil),
|
||||||
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
|
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
|
||||||
upload.EXPECT().Terminate(context.Background()),
|
upload.EXPECT().Terminate(gomock.Any()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -629,7 +628,7 @@ func TestPatch(t *testing.T) {
|
||||||
ResHeader: map[string]string{
|
ResHeader: map[string]string{
|
||||||
"Upload-Offset": "",
|
"Upload-Offset": "",
|
||||||
},
|
},
|
||||||
ResBody: "upload has been stopped by server\n",
|
ResBody: "ERR_UPLOAD_STOPPED: upload has been stopped by server\n",
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
|
|
||||||
_, more := <-c
|
_, more := <-c
|
||||||
|
@ -644,14 +643,14 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 100,
|
Size: 100,
|
||||||
}, nil),
|
}, nil),
|
||||||
// The reader for WriteChunk must not return an error.
|
// The reader for WriteChunk must not return an error.
|
||||||
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("first ")).Return(int64(6), nil),
|
upload.EXPECT().WriteChunk(gomock.Any(), int64(0), NewReaderMatcher("first ")).Return(int64(6), nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -680,7 +679,60 @@ func TestPatch(t *testing.T) {
|
||||||
ResHeader: map[string]string{
|
ResHeader: map[string]string{
|
||||||
"Upload-Offset": "",
|
"Upload-Offset": "",
|
||||||
},
|
},
|
||||||
ResBody: "an error while reading the body\n",
|
ResBody: "ERR_INTERNAL_SERVER_ERROR: an error while reading the body\n",
|
||||||
|
}).Run(handler, t)
|
||||||
|
})
|
||||||
|
|
||||||
|
SubTest(t, "InterruptRequestHandling", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().GetUpload(gomock.Any(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
|
ID: "yes",
|
||||||
|
Offset: 0,
|
||||||
|
Size: 100,
|
||||||
|
}, nil),
|
||||||
|
upload.EXPECT().WriteChunk(gomock.Any(), int64(0), NewReaderMatcher("first ")).Return(int64(6), nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
handler, _ := NewHandler(Config{
|
||||||
|
StoreComposer: composer,
|
||||||
|
})
|
||||||
|
|
||||||
|
reader, writer := io.Pipe()
|
||||||
|
a := assert.New(t)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
writer.Write([]byte("first "))
|
||||||
|
|
||||||
|
handler.InterruptRequestHandling()
|
||||||
|
|
||||||
|
// Wait a short time to ensure that the goroutine in the PATCH
|
||||||
|
// handler has received and processed the stop event.
|
||||||
|
<-time.After(10 * time.Millisecond)
|
||||||
|
|
||||||
|
// Assert that the "request body" has been closed.
|
||||||
|
_, err := writer.Write([]byte("second "))
|
||||||
|
a.Equal(err, io.ErrClosedPipe)
|
||||||
|
}()
|
||||||
|
|
||||||
|
(&httpTest{
|
||||||
|
Method: "PATCH",
|
||||||
|
URL: "yes",
|
||||||
|
ReqHeader: map[string]string{
|
||||||
|
"Tus-Resumable": "1.0.0",
|
||||||
|
"Content-Type": "application/offset+octet-stream",
|
||||||
|
"Upload-Offset": "0",
|
||||||
|
},
|
||||||
|
ReqBody: reader,
|
||||||
|
Code: http.StatusInternalServerError,
|
||||||
|
ResHeader: map[string]string{
|
||||||
|
"Upload-Offset": "",
|
||||||
|
},
|
||||||
|
ResBody: "ERR_SERVER_SHUTDOWN: request has been interrupted because the server is shutting down\n",
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,6 @@ package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -10,7 +9,7 @@ import (
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
. "github.com/tus/tusd/pkg/handler"
|
. "github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPost(t *testing.T) {
|
func TestPost(t *testing.T) {
|
||||||
|
@ -20,7 +19,7 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
"foo": "hello",
|
"foo": "hello",
|
||||||
|
@ -28,7 +27,7 @@ func TestPost(t *testing.T) {
|
||||||
"empty": "",
|
"empty": "",
|
||||||
},
|
},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
|
@ -76,16 +75,16 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
|
||||||
Size: 0,
|
Size: 0,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 0,
|
Size: 0,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().FinishUpload(context.Background()).Return(nil),
|
upload.EXPECT().FinishUpload(gomock.Any()).Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -211,11 +210,11 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
|
@ -248,11 +247,11 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
|
@ -286,11 +285,11 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
|
@ -326,11 +325,11 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
|
@ -363,11 +362,11 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
|
@ -405,14 +404,14 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
"foo": "hello",
|
"foo": "hello",
|
||||||
"bar": "world",
|
"bar": "world",
|
||||||
},
|
},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
|
@ -421,8 +420,8 @@ func TestPost(t *testing.T) {
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
locker.EXPECT().NewLock("foo").Return(lock, nil),
|
locker.EXPECT().NewLock("foo").Return(lock, nil),
|
||||||
lock.EXPECT().Lock().Return(nil),
|
lock.EXPECT().Lock(gomock.Any(), gomock.Any()).Return(nil),
|
||||||
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
|
upload.EXPECT().WriteChunk(gomock.Any(), int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
|
||||||
lock.EXPECT().Unlock().Return(nil),
|
lock.EXPECT().Unlock().Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -458,11 +457,11 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
|
@ -492,11 +491,11 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
store.EXPECT().NewUpload(gomock.Any(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
|
|
|
@ -3,7 +3,7 @@ package handler_test
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,12 +1,11 @@
|
||||||
package handler_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
. "github.com/tus/tusd/pkg/handler"
|
. "github.com/tus/tusd/v2/pkg/handler"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
@ -39,14 +38,14 @@ func TestTerminate(t *testing.T) {
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
locker.EXPECT().NewLock("foo").Return(lock, nil),
|
locker.EXPECT().NewLock("foo").Return(lock, nil),
|
||||||
lock.EXPECT().Lock().Return(nil),
|
lock.EXPECT().Lock(gomock.Any(), gomock.Any()).Return(nil),
|
||||||
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
|
store.EXPECT().GetUpload(gomock.Any(), "foo").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(gomock.Any()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 10,
|
Size: 10,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
|
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
|
||||||
upload.EXPECT().Terminate(context.Background()).Return(nil),
|
upload.EXPECT().Terminate(gomock.Any()).Return(nil),
|
||||||
lock.EXPECT().Unlock().Return(nil),
|
lock.EXPECT().Unlock().Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,6 @@ package handler
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
|
@ -24,97 +23,34 @@ var (
|
||||||
reMimeType = regexp.MustCompile(`^[a-z]+\/[a-z0-9\-\+\.]+$`)
|
reMimeType = regexp.MustCompile(`^[a-z]+\/[a-z0-9\-\+\.]+$`)
|
||||||
)
|
)
|
||||||
|
|
||||||
// HTTPError represents an error with an additional status code attached
|
|
||||||
// which may be used when this error is sent in a HTTP response.
|
|
||||||
// See the net/http package for standardized status codes.
|
|
||||||
type HTTPError interface {
|
|
||||||
error
|
|
||||||
StatusCode() int
|
|
||||||
Body() []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type httpError struct {
|
|
||||||
error
|
|
||||||
statusCode int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (err httpError) StatusCode() int {
|
|
||||||
return err.statusCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func (err httpError) Body() []byte {
|
|
||||||
return []byte(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHTTPError adds the given status code to the provided error and returns
|
|
||||||
// the new error instance. The status code may be used in corresponding HTTP
|
|
||||||
// responses. See the net/http package for standardized status codes.
|
|
||||||
func NewHTTPError(err error, statusCode int) HTTPError {
|
|
||||||
return httpError{err, statusCode}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrUnsupportedVersion = NewHTTPError(errors.New("unsupported version"), http.StatusPreconditionFailed)
|
ErrUnsupportedVersion = NewError("ERR_UNSUPPORTED_VERSION", "missing, invalid or unsupported Tus-Resumable header", http.StatusPreconditionFailed)
|
||||||
ErrMaxSizeExceeded = NewHTTPError(errors.New("maximum size exceeded"), http.StatusRequestEntityTooLarge)
|
ErrMaxSizeExceeded = NewError("ERR_MAX_SIZE_EXCEEDED", "maximum size exceeded", http.StatusRequestEntityTooLarge)
|
||||||
ErrInvalidContentType = NewHTTPError(errors.New("missing or invalid Content-Type header"), http.StatusBadRequest)
|
ErrInvalidContentType = NewError("ERR_INVALID_CONTENT_TYPE", "missing or invalid Content-Type header", http.StatusBadRequest)
|
||||||
ErrInvalidUploadLength = NewHTTPError(errors.New("missing or invalid Upload-Length header"), http.StatusBadRequest)
|
ErrInvalidUploadLength = NewError("ERR_INVALID_UPLOAD_LENGTH", "missing or invalid Upload-Length header", http.StatusBadRequest)
|
||||||
ErrInvalidOffset = NewHTTPError(errors.New("missing or invalid Upload-Offset header"), http.StatusBadRequest)
|
ErrInvalidOffset = NewError("ERR_INVALID_OFFSET", "missing or invalid Upload-Offset header", http.StatusBadRequest)
|
||||||
ErrNotFound = NewHTTPError(errors.New("upload not found"), http.StatusNotFound)
|
ErrNotFound = NewError("ERR_UPLOAD_NOT_FOUND", "upload not found", http.StatusNotFound)
|
||||||
ErrFileLocked = NewHTTPError(errors.New("file currently locked"), 423) // Locked (WebDAV) (RFC 4918)
|
ErrFileLocked = NewError("ERR_UPLOAD_LOCKED", "file currently locked", http.StatusLocked)
|
||||||
ErrMismatchOffset = NewHTTPError(errors.New("mismatched offset"), http.StatusConflict)
|
ErrLockTimeout = NewError("ERR_LOCK_TIMEOUT", "failed to acquire lock before timeout", http.StatusInternalServerError)
|
||||||
ErrSizeExceeded = NewHTTPError(errors.New("resource's size exceeded"), http.StatusRequestEntityTooLarge)
|
ErrMismatchOffset = NewError("ERR_MISMATCHED_OFFSET", "mismatched offset", http.StatusConflict)
|
||||||
ErrNotImplemented = NewHTTPError(errors.New("feature not implemented"), http.StatusNotImplemented)
|
ErrSizeExceeded = NewError("ERR_UPLOAD_SIZE_EXCEEDED", "upload's size exceeded", http.StatusRequestEntityTooLarge)
|
||||||
ErrUploadNotFinished = NewHTTPError(errors.New("one of the partial uploads is not finished"), http.StatusBadRequest)
|
ErrNotImplemented = NewError("ERR_NOT_IMPLEMENTED", "feature not implemented", http.StatusNotImplemented)
|
||||||
ErrInvalidConcat = NewHTTPError(errors.New("invalid Upload-Concat header"), http.StatusBadRequest)
|
ErrUploadNotFinished = NewError("ERR_UPLOAD_NOT_FINISHED", "one of the partial uploads is not finished", http.StatusBadRequest)
|
||||||
ErrModifyFinal = NewHTTPError(errors.New("modifying a final upload is not allowed"), http.StatusForbidden)
|
ErrInvalidConcat = NewError("ERR_INVALID_CONCAT", "invalid Upload-Concat header", http.StatusBadRequest)
|
||||||
ErrUploadLengthAndUploadDeferLength = NewHTTPError(errors.New("provided both Upload-Length and Upload-Defer-Length"), http.StatusBadRequest)
|
ErrModifyFinal = NewError("ERR_MODIFY_FINAL", "modifying a final upload is not allowed", http.StatusForbidden)
|
||||||
ErrInvalidUploadDeferLength = NewHTTPError(errors.New("invalid Upload-Defer-Length header"), http.StatusBadRequest)
|
ErrUploadLengthAndUploadDeferLength = NewError("ERR_AMBIGUOUS_UPLOAD_LENGTH", "provided both Upload-Length and Upload-Defer-Length", http.StatusBadRequest)
|
||||||
ErrUploadStoppedByServer = NewHTTPError(errors.New("upload has been stopped by server"), http.StatusBadRequest)
|
ErrInvalidUploadDeferLength = NewError("ERR_INVALID_UPLOAD_LENGTH_DEFER", "invalid Upload-Defer-Length header", http.StatusBadRequest)
|
||||||
|
ErrUploadStoppedByServer = NewError("ERR_UPLOAD_STOPPED", "upload has been stopped by server", http.StatusBadRequest)
|
||||||
|
ErrUploadRejectedByServer = NewError("ERR_UPLOAD_REJECTED", "upload creation has been rejected by server", http.StatusBadRequest)
|
||||||
|
ErrUploadInterrupted = NewError("ERR_UPLOAD_INTERRUPTED", "upload has been interrupted by another request for this upload resource", http.StatusBadRequest)
|
||||||
|
ErrServerShutdown = NewError("ERR_SERVER_SHUTDOWN", "request has been interrupted because the server is shutting down", http.StatusInternalServerError)
|
||||||
|
|
||||||
errReadTimeout = errors.New("read tcp: i/o timeout")
|
// TODO: These two responses are 500 for backwards compatability. We should discuss
|
||||||
errConnectionReset = errors.New("read tcp: connection reset by peer")
|
// whether it is better to more them to 4XX status codes.
|
||||||
|
ErrReadTimeout = NewError("ERR_READ_TIMEOUT", "timeout while reading request body", http.StatusInternalServerError)
|
||||||
|
ErrConnectionReset = NewError("ERR_CONNECTION_RESET", "TCP connection reset by peer", http.StatusInternalServerError)
|
||||||
)
|
)
|
||||||
|
|
||||||
// HTTPRequest contains basic details of an incoming HTTP request.
|
|
||||||
type HTTPRequest struct {
|
|
||||||
// Method is the HTTP method, e.g. POST or PATCH
|
|
||||||
Method string
|
|
||||||
// URI is the full HTTP request URI, e.g. /files/fooo
|
|
||||||
URI string
|
|
||||||
// RemoteAddr contains the network address that sent the request
|
|
||||||
RemoteAddr string
|
|
||||||
// Header contains all HTTP headers as present in the HTTP request.
|
|
||||||
Header http.Header
|
|
||||||
}
|
|
||||||
|
|
||||||
// HookEvent represents an event from tusd which can be handled by the application.
|
|
||||||
type HookEvent struct {
|
|
||||||
// Upload contains information about the upload that caused this hook
|
|
||||||
// to be fired.
|
|
||||||
Upload FileInfo
|
|
||||||
// HTTPRequest contains details about the HTTP request that reached
|
|
||||||
// tusd.
|
|
||||||
HTTPRequest HTTPRequest
|
|
||||||
}
|
|
||||||
|
|
||||||
func newHookEvent(info FileInfo, r *http.Request) HookEvent {
|
|
||||||
// The Host header field is not present in the header map, see https://pkg.go.dev/net/http#Request:
|
|
||||||
// > For incoming requests, the Host header is promoted to the
|
|
||||||
// > Request.Host field and removed from the Header map.
|
|
||||||
// That's why we add it back manually.
|
|
||||||
r.Header.Set("Host", r.Host)
|
|
||||||
|
|
||||||
return HookEvent{
|
|
||||||
Upload: info,
|
|
||||||
HTTPRequest: HTTPRequest{
|
|
||||||
Method: r.Method,
|
|
||||||
URI: r.RequestURI,
|
|
||||||
RemoteAddr: r.RemoteAddr,
|
|
||||||
Header: r.Header,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnroutedHandler exposes methods to handle requests as part of the tus protocol,
|
// UnroutedHandler exposes methods to handle requests as part of the tus protocol,
|
||||||
// such as PostFile, HeadFile, PatchFile and DelFile. In addition the GetFile method
|
// such as PostFile, HeadFile, PatchFile and DelFile. In addition the GetFile method
|
||||||
// is provided which is, however, not part of the specification.
|
// is provided which is, however, not part of the specification.
|
||||||
|
@ -125,6 +61,7 @@ type UnroutedHandler struct {
|
||||||
basePath string
|
basePath string
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
extensions string
|
extensions string
|
||||||
|
serverCtx chan struct{}
|
||||||
|
|
||||||
// CompleteUploads is used to send notifications whenever an upload is
|
// CompleteUploads is used to send notifications whenever an upload is
|
||||||
// completed by a user. The HookEvent will contain information about this
|
// completed by a user. The HookEvent will contain information about this
|
||||||
|
@ -191,11 +128,29 @@ func NewUnroutedHandler(config Config) (*UnroutedHandler, error) {
|
||||||
logger: config.Logger,
|
logger: config.Logger,
|
||||||
extensions: extensions,
|
extensions: extensions,
|
||||||
Metrics: newMetrics(),
|
Metrics: newMetrics(),
|
||||||
|
serverCtx: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
return handler, nil
|
return handler, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InterruptRequestHandling attempts to interrupt long running requests, so
|
||||||
|
// the server can shutdown gracefully. This function should not be used on
|
||||||
|
// its own, but as part of http.Server.Shutdown. For example:
|
||||||
|
//
|
||||||
|
// server := &http.Server{
|
||||||
|
// Handler: handler,
|
||||||
|
// }
|
||||||
|
// server.RegisterOnShutdown(handler.InterruptRequestHandling)
|
||||||
|
// server.Shutdown(ctx)
|
||||||
|
//
|
||||||
|
// Note: currently, this function only interrupts POST and PATCH requests
|
||||||
|
// with a request body. In the future, this might be extended to HEAD, DELETE
|
||||||
|
// and GET requests.
|
||||||
|
func (handler UnroutedHandler) InterruptRequestHandling() {
|
||||||
|
close(handler.serverCtx)
|
||||||
|
}
|
||||||
|
|
||||||
// SupportedExtensions returns a comma-separated list of the supported tus extensions.
|
// SupportedExtensions returns a comma-separated list of the supported tus extensions.
|
||||||
// The availability of an extension usually depends on whether the provided data store
|
// The availability of an extension usually depends on whether the provided data store
|
||||||
// implements some additional interfaces.
|
// implements some additional interfaces.
|
||||||
|
@ -270,7 +225,10 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||||
// will be ignored or interpreted as a rejection.
|
// will be ignored or interpreted as a rejection.
|
||||||
// For example, the Presto engine, which is used in older versions of
|
// For example, the Presto engine, which is used in older versions of
|
||||||
// Opera, Opera Mobile and Opera Mini, handles CORS this way.
|
// Opera, Opera Mobile and Opera Mini, handles CORS this way.
|
||||||
handler.sendResp(w, r, http.StatusOK)
|
c := newContext(w, r)
|
||||||
|
handler.sendResp(c, HTTPResponse{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -278,7 +236,8 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||||
// GET and HEAD methods are not checked since a browser may visit this URL and does
|
// GET and HEAD methods are not checked since a browser may visit this URL and does
|
||||||
// not include this header. GET requests are not part of the specification.
|
// not include this header. GET requests are not part of the specification.
|
||||||
if r.Method != "GET" && r.Method != "HEAD" && r.Header.Get("Tus-Resumable") != "1.0.0" {
|
if r.Method != "GET" && r.Method != "HEAD" && r.Header.Get("Tus-Resumable") != "1.0.0" {
|
||||||
handler.sendError(w, r, ErrUnsupportedVersion)
|
c := newContext(w, r)
|
||||||
|
handler.sendError(c, ErrUnsupportedVersion)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,7 +249,7 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||||
// PostFile creates a new file upload using the datastore after validating the
|
// PostFile creates a new file upload using the datastore after validating the
|
||||||
// length and parsing the metadata.
|
// length and parsing the metadata.
|
||||||
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
c := newContext(w, r)
|
||||||
|
|
||||||
// Check for presence of application/offset+octet-stream. If another content
|
// Check for presence of application/offset+octet-stream. If another content
|
||||||
// type is defined, it will be ignored and treated as none was set because
|
// type is defined, it will be ignored and treated as none was set because
|
||||||
|
@ -307,7 +266,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
// Parse Upload-Concat header
|
// Parse Upload-Concat header
|
||||||
isPartial, isFinal, partialUploadIDs, err := parseConcat(concatHeader)
|
isPartial, isFinal, partialUploadIDs, err := parseConcat(concatHeader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -320,13 +279,13 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
if isFinal {
|
if isFinal {
|
||||||
// A final upload must not contain a chunk within the creation request
|
// A final upload must not contain a chunk within the creation request
|
||||||
if containsChunk {
|
if containsChunk {
|
||||||
handler.sendError(w, r, ErrModifyFinal)
|
handler.sendError(c, ErrModifyFinal)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
partialUploads, size, err = handler.sizeOfUploads(ctx, partialUploadIDs)
|
partialUploads, size, err = handler.sizeOfUploads(c, partialUploadIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -334,14 +293,14 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
uploadDeferLengthHeader := r.Header.Get("Upload-Defer-Length")
|
uploadDeferLengthHeader := r.Header.Get("Upload-Defer-Length")
|
||||||
size, sizeIsDeferred, err = handler.validateNewUploadLengthHeaders(uploadLengthHeader, uploadDeferLengthHeader)
|
size, sizeIsDeferred, err = handler.validateNewUploadLengthHeaders(uploadLengthHeader, uploadDeferLengthHeader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test whether the size is still allowed
|
// Test whether the size is still allowed
|
||||||
if handler.config.MaxSize > 0 && size > handler.config.MaxSize {
|
if handler.config.MaxSize > 0 && size > handler.config.MaxSize {
|
||||||
handler.sendError(w, r, ErrMaxSizeExceeded)
|
handler.sendError(c, ErrMaxSizeExceeded)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -357,22 +316,42 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
PartialUploads: partialUploadIDs,
|
PartialUploads: partialUploadIDs,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resp := HTTPResponse{
|
||||||
|
StatusCode: http.StatusCreated,
|
||||||
|
Headers: HTTPHeaders{},
|
||||||
|
}
|
||||||
|
|
||||||
if handler.config.PreUploadCreateCallback != nil {
|
if handler.config.PreUploadCreateCallback != nil {
|
||||||
if err := handler.config.PreUploadCreateCallback(newHookEvent(info, r)); err != nil {
|
resp2, changes, err := handler.config.PreUploadCreateCallback(newHookEvent(info, r))
|
||||||
handler.sendError(w, r, err)
|
if err != nil {
|
||||||
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
resp = resp.MergeWith(resp2)
|
||||||
|
|
||||||
|
// Apply changes returned from the pre-create hook.
|
||||||
|
if changes.ID != "" {
|
||||||
|
info.ID = changes.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
if changes.MetaData != nil {
|
||||||
|
info.MetaData = changes.MetaData
|
||||||
|
}
|
||||||
|
|
||||||
|
if changes.Storage != nil {
|
||||||
|
info.Storage = changes.Storage
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
upload, err := handler.composer.Core.NewUpload(ctx, info)
|
upload, err := handler.composer.Core.NewUpload(c, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err = upload.GetInfo(ctx)
|
info, err = upload.GetInfo(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -381,7 +360,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
// Add the Location header directly after creating the new resource to even
|
// Add the Location header directly after creating the new resource to even
|
||||||
// include it in cases of failure when an error is returned
|
// include it in cases of failure when an error is returned
|
||||||
url := handler.absFileURL(r, id)
|
url := handler.absFileURL(r, id)
|
||||||
w.Header().Set("Location", url)
|
resp.Headers["Location"] = url
|
||||||
|
|
||||||
handler.Metrics.incUploadsCreated()
|
handler.Metrics.incUploadsCreated()
|
||||||
handler.log("UploadCreated", "id", id, "size", i64toa(size), "url", url)
|
handler.log("UploadCreated", "id", id, "size", i64toa(size), "url", url)
|
||||||
|
@ -392,8 +371,8 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
|
|
||||||
if isFinal {
|
if isFinal {
|
||||||
concatableUpload := handler.composer.Concater.AsConcatableUpload(upload)
|
concatableUpload := handler.composer.Concater.AsConcatableUpload(upload)
|
||||||
if err := concatableUpload.ConcatUploads(ctx, partialUploads); err != nil {
|
if err := concatableUpload.ConcatUploads(c, partialUploads); err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
info.Offset = size
|
info.Offset = size
|
||||||
|
@ -405,67 +384,74 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
|
|
||||||
if containsChunk {
|
if containsChunk {
|
||||||
if handler.composer.UsesLocker {
|
if handler.composer.UsesLocker {
|
||||||
lock, err := handler.lockUpload(id)
|
lock, err := handler.lockUpload(c, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := handler.writeChunk(ctx, upload, info, w, r); err != nil {
|
resp, err = handler.writeChunk(c, resp, upload, info)
|
||||||
handler.sendError(w, r, err)
|
if err != nil {
|
||||||
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else if !sizeIsDeferred && size == 0 {
|
} else if !sizeIsDeferred && size == 0 {
|
||||||
// Directly finish the upload if the upload is empty (i.e. has a size of 0).
|
// Directly finish the upload if the upload is empty (i.e. has a size of 0).
|
||||||
// This statement is in an else-if block to avoid causing duplicate calls
|
// This statement is in an else-if block to avoid causing duplicate calls
|
||||||
// to finishUploadIfComplete if an upload is empty and contains a chunk.
|
// to finishUploadIfComplete if an upload is empty and contains a chunk.
|
||||||
if err := handler.finishUploadIfComplete(ctx, upload, info, r); err != nil {
|
resp, err = handler.finishUploadIfComplete(c, resp, upload, info)
|
||||||
handler.sendError(w, r, err)
|
if err != nil {
|
||||||
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.sendResp(w, r, http.StatusCreated)
|
handler.sendResp(c, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeadFile returns the length and offset for the HEAD request
|
// HeadFile returns the length and offset for the HEAD request
|
||||||
func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
c := newContext(w, r)
|
||||||
|
|
||||||
id, err := extractIDFromPath(r.URL.Path)
|
id, err := extractIDFromPath(r.URL.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if handler.composer.UsesLocker {
|
if handler.composer.UsesLocker {
|
||||||
lock, err := handler.lockUpload(id)
|
lock, err := handler.lockUpload(c, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
upload, err := handler.composer.Core.GetUpload(c, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := upload.GetInfo(ctx)
|
info, err := upload.GetInfo(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resp := HTTPResponse{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Headers: make(HTTPHeaders),
|
||||||
|
}
|
||||||
|
|
||||||
// Add Upload-Concat header if possible
|
// Add Upload-Concat header if possible
|
||||||
if info.IsPartial {
|
if info.IsPartial {
|
||||||
w.Header().Set("Upload-Concat", "partial")
|
resp.Headers["Upload-Concat"] = "partial"
|
||||||
}
|
}
|
||||||
|
|
||||||
if info.IsFinal {
|
if info.IsFinal {
|
||||||
|
@ -476,107 +462,112 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
|
||||||
// Remove trailing space
|
// Remove trailing space
|
||||||
v = v[:len(v)-1]
|
v = v[:len(v)-1]
|
||||||
|
|
||||||
w.Header().Set("Upload-Concat", v)
|
resp.Headers["Upload-Concat"] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(info.MetaData) != 0 {
|
if len(info.MetaData) != 0 {
|
||||||
w.Header().Set("Upload-Metadata", SerializeMetadataHeader(info.MetaData))
|
resp.Headers["Upload-Metadata"] = SerializeMetadataHeader(info.MetaData)
|
||||||
}
|
}
|
||||||
|
|
||||||
if info.SizeIsDeferred {
|
if info.SizeIsDeferred {
|
||||||
w.Header().Set("Upload-Defer-Length", UploadLengthDeferred)
|
resp.Headers["Upload-Defer-Length"] = UploadLengthDeferred
|
||||||
} else {
|
} else {
|
||||||
w.Header().Set("Upload-Length", strconv.FormatInt(info.Size, 10))
|
resp.Headers["Upload-Length"] = strconv.FormatInt(info.Size, 10)
|
||||||
w.Header().Set("Content-Length", strconv.FormatInt(info.Size, 10))
|
resp.Headers["Content-Length"] = strconv.FormatInt(info.Size, 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Cache-Control", "no-store")
|
resp.Headers["Cache-Control"] = "no-store"
|
||||||
w.Header().Set("Upload-Offset", strconv.FormatInt(info.Offset, 10))
|
resp.Headers["Upload-Offset"] = strconv.FormatInt(info.Offset, 10)
|
||||||
handler.sendResp(w, r, http.StatusOK)
|
handler.sendResp(c, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PatchFile adds a chunk to an upload. This operation is only allowed
|
// PatchFile adds a chunk to an upload. This operation is only allowed
|
||||||
// if enough space in the upload is left.
|
// if enough space in the upload is left.
|
||||||
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
c := newContext(w, r)
|
||||||
|
|
||||||
// Check for presence of application/offset+octet-stream
|
// Check for presence of application/offset+octet-stream
|
||||||
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
|
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
|
||||||
handler.sendError(w, r, ErrInvalidContentType)
|
handler.sendError(c, ErrInvalidContentType)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for presence of a valid Upload-Offset Header
|
// Check for presence of a valid Upload-Offset Header
|
||||||
offset, err := strconv.ParseInt(r.Header.Get("Upload-Offset"), 10, 64)
|
offset, err := strconv.ParseInt(r.Header.Get("Upload-Offset"), 10, 64)
|
||||||
if err != nil || offset < 0 {
|
if err != nil || offset < 0 {
|
||||||
handler.sendError(w, r, ErrInvalidOffset)
|
handler.sendError(c, ErrInvalidOffset)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := extractIDFromPath(r.URL.Path)
|
id, err := extractIDFromPath(r.URL.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if handler.composer.UsesLocker {
|
if handler.composer.UsesLocker {
|
||||||
lock, err := handler.lockUpload(id)
|
lock, err := handler.lockUpload(c, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
upload, err := handler.composer.Core.GetUpload(c, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := upload.GetInfo(ctx)
|
info, err := upload.GetInfo(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Modifying a final upload is not allowed
|
// Modifying a final upload is not allowed
|
||||||
if info.IsFinal {
|
if info.IsFinal {
|
||||||
handler.sendError(w, r, ErrModifyFinal)
|
handler.sendError(c, ErrModifyFinal)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if offset != info.Offset {
|
if offset != info.Offset {
|
||||||
handler.sendError(w, r, ErrMismatchOffset)
|
handler.sendError(c, ErrMismatchOffset)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resp := HTTPResponse{
|
||||||
|
StatusCode: http.StatusNoContent,
|
||||||
|
Headers: make(HTTPHeaders, 1), // Initialize map, so writeChunk can set the Upload-Offset header.
|
||||||
|
}
|
||||||
|
|
||||||
// Do not proxy the call to the data store if the upload is already completed
|
// Do not proxy the call to the data store if the upload is already completed
|
||||||
if !info.SizeIsDeferred && info.Offset == info.Size {
|
if !info.SizeIsDeferred && info.Offset == info.Size {
|
||||||
w.Header().Set("Upload-Offset", strconv.FormatInt(offset, 10))
|
resp.Headers["Upload-Offset"] = strconv.FormatInt(offset, 10)
|
||||||
handler.sendResp(w, r, http.StatusNoContent)
|
handler.sendResp(c, resp)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.Header.Get("Upload-Length") != "" {
|
if r.Header.Get("Upload-Length") != "" {
|
||||||
if !handler.composer.UsesLengthDeferrer {
|
if !handler.composer.UsesLengthDeferrer {
|
||||||
handler.sendError(w, r, ErrNotImplemented)
|
handler.sendError(c, ErrNotImplemented)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !info.SizeIsDeferred {
|
if !info.SizeIsDeferred {
|
||||||
handler.sendError(w, r, ErrInvalidUploadLength)
|
handler.sendError(c, ErrInvalidUploadLength)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
uploadLength, err := strconv.ParseInt(r.Header.Get("Upload-Length"), 10, 64)
|
uploadLength, err := strconv.ParseInt(r.Header.Get("Upload-Length"), 10, 64)
|
||||||
if err != nil || uploadLength < 0 || uploadLength < info.Offset || (handler.config.MaxSize > 0 && uploadLength > handler.config.MaxSize) {
|
if err != nil || uploadLength < 0 || uploadLength < info.Offset || (handler.config.MaxSize > 0 && uploadLength > handler.config.MaxSize) {
|
||||||
handler.sendError(w, r, ErrInvalidUploadLength)
|
handler.sendError(c, ErrInvalidUploadLength)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
lengthDeclarableUpload := handler.composer.LengthDeferrer.AsLengthDeclarableUpload(upload)
|
lengthDeclarableUpload := handler.composer.LengthDeferrer.AsLengthDeclarableUpload(upload)
|
||||||
if err := lengthDeclarableUpload.DeclareLength(ctx, uploadLength); err != nil {
|
if err := lengthDeclarableUpload.DeclareLength(c, uploadLength); err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -584,26 +575,28 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||||
info.SizeIsDeferred = false
|
info.SizeIsDeferred = false
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := handler.writeChunk(ctx, upload, info, w, r); err != nil {
|
resp, err = handler.writeChunk(c, resp, upload, info)
|
||||||
handler.sendError(w, r, err)
|
if err != nil {
|
||||||
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.sendResp(w, r, http.StatusNoContent)
|
handler.sendResp(c, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeChunk reads the body from the requests r and appends it to the upload
|
// writeChunk reads the body from the requests r and appends it to the upload
|
||||||
// with the corresponding id. Afterwards, it will set the necessary response
|
// with the corresponding id. Afterwards, it will set the necessary response
|
||||||
// headers but will not send the response.
|
// headers but will not send the response.
|
||||||
func (handler *UnroutedHandler) writeChunk(ctx context.Context, upload Upload, info FileInfo, w http.ResponseWriter, r *http.Request) error {
|
func (handler *UnroutedHandler) writeChunk(c *httpContext, resp HTTPResponse, upload Upload, info FileInfo) (HTTPResponse, error) {
|
||||||
// Get Content-Length if possible
|
// Get Content-Length if possible
|
||||||
|
r := c.req
|
||||||
length := r.ContentLength
|
length := r.ContentLength
|
||||||
offset := info.Offset
|
offset := info.Offset
|
||||||
id := info.ID
|
id := info.ID
|
||||||
|
|
||||||
// Test if this upload fits into the file's size
|
// Test if this upload fits into the file's size
|
||||||
if !info.SizeIsDeferred && offset+length > info.Size {
|
if !info.SizeIsDeferred && offset+length > info.Size {
|
||||||
return ErrSizeExceeded
|
return resp, ErrSizeExceeded
|
||||||
}
|
}
|
||||||
|
|
||||||
maxSize := info.Size - offset
|
maxSize := info.Size - offset
|
||||||
|
@ -631,33 +624,44 @@ func (handler *UnroutedHandler) writeChunk(ctx context.Context, upload Upload, i
|
||||||
// available in the case of a malicious request.
|
// available in the case of a malicious request.
|
||||||
if r.Body != nil {
|
if r.Body != nil {
|
||||||
// Limit the data read from the request's body to the allowed maximum
|
// Limit the data read from the request's body to the allowed maximum
|
||||||
reader := newBodyReader(io.LimitReader(r.Body, maxSize))
|
c.body = newBodyReader(r.Body, maxSize)
|
||||||
|
|
||||||
// We use a context object to allow the hook system to cancel an upload
|
// We use a context object to allow the hook system to cancel an upload
|
||||||
uploadCtx, stopUpload := context.WithCancel(context.Background())
|
uploadCtx, stopUpload := context.WithCancel(context.Background())
|
||||||
info.stopUpload = stopUpload
|
info.stopUpload = stopUpload
|
||||||
|
|
||||||
// terminateUpload specifies whether the upload should be deleted after
|
// terminateUpload specifies whether the upload should be deleted after
|
||||||
// the write has finished
|
// the write has finished
|
||||||
terminateUpload := false
|
terminateUpload := false
|
||||||
|
|
||||||
|
serverShutDown := false
|
||||||
|
|
||||||
// Cancel the context when the function exits to ensure that the goroutine
|
// Cancel the context when the function exits to ensure that the goroutine
|
||||||
// is properly cleaned up
|
// is properly cleaned up
|
||||||
defer stopUpload()
|
defer stopUpload()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
// Interrupt the Read() call from the request body
|
select {
|
||||||
<-uploadCtx.Done()
|
case <-uploadCtx.Done():
|
||||||
terminateUpload = true
|
// uploadCtx is done if the upload is stopped by a post-receive hook
|
||||||
|
terminateUpload = true
|
||||||
|
case <-handler.serverCtx:
|
||||||
|
// serverCtx is closed if the server is being shut down
|
||||||
|
serverShutDown = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// interrupt the Read() calls from the request body
|
||||||
r.Body.Close()
|
r.Body.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if handler.config.NotifyUploadProgress {
|
if handler.config.NotifyUploadProgress {
|
||||||
stopProgressEvents := handler.sendProgressMessages(newHookEvent(info, r), reader)
|
stopProgressEvents := handler.sendProgressMessages(newHookEvent(info, r), c.body)
|
||||||
defer close(stopProgressEvents)
|
defer close(stopProgressEvents)
|
||||||
}
|
}
|
||||||
|
|
||||||
bytesWritten, err = upload.WriteChunk(ctx, offset, reader)
|
bytesWritten, err = upload.WriteChunk(c, offset, c.body)
|
||||||
if terminateUpload && handler.composer.UsesTerminater {
|
if terminateUpload && handler.composer.UsesTerminater {
|
||||||
if terminateErr := handler.terminateUpload(ctx, upload, info, r); terminateErr != nil {
|
if terminateErr := handler.terminateUpload(c, upload, info); terminateErr != nil {
|
||||||
// We only log this error and not show it to the user since this
|
// We only log this error and not show it to the user since this
|
||||||
// termination error is not relevant to the uploading client
|
// termination error is not relevant to the uploading client
|
||||||
handler.log("UploadStopTerminateError", "id", id, "error", terminateErr.Error())
|
handler.log("UploadStopTerminateError", "id", id, "error", terminateErr.Error())
|
||||||
|
@ -666,7 +670,7 @@ func (handler *UnroutedHandler) writeChunk(ctx context.Context, upload Upload, i
|
||||||
|
|
||||||
// If we encountered an error while reading the body from the HTTP request, log it, but only include
|
// If we encountered an error while reading the body from the HTTP request, log it, but only include
|
||||||
// it in the response, if the store did not also return an error.
|
// it in the response, if the store did not also return an error.
|
||||||
if bodyErr := reader.hasError(); bodyErr != nil {
|
if bodyErr := c.body.hasError(); bodyErr != nil {
|
||||||
handler.log("BodyReadError", "id", id, "error", bodyErr.Error())
|
handler.log("BodyReadError", "id", id, "error", bodyErr.Error())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = bodyErr
|
err = bodyErr
|
||||||
|
@ -678,41 +682,51 @@ func (handler *UnroutedHandler) writeChunk(ctx context.Context, upload Upload, i
|
||||||
if terminateUpload {
|
if terminateUpload {
|
||||||
err = ErrUploadStoppedByServer
|
err = ErrUploadStoppedByServer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the server is closing down, send an error response indicating this.
|
||||||
|
if serverShutDown {
|
||||||
|
err = ErrServerShutdown
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.log("ChunkWriteComplete", "id", id, "bytesWritten", i64toa(bytesWritten))
|
handler.log("ChunkWriteComplete", "id", id, "bytesWritten", i64toa(bytesWritten))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send new offset to client
|
// Send new offset to client
|
||||||
newOffset := offset + bytesWritten
|
newOffset := offset + bytesWritten
|
||||||
w.Header().Set("Upload-Offset", strconv.FormatInt(newOffset, 10))
|
resp.Headers["Upload-Offset"] = strconv.FormatInt(newOffset, 10)
|
||||||
handler.Metrics.incBytesReceived(uint64(bytesWritten))
|
handler.Metrics.incBytesReceived(uint64(bytesWritten))
|
||||||
info.Offset = newOffset
|
info.Offset = newOffset
|
||||||
|
|
||||||
return handler.finishUploadIfComplete(ctx, upload, info, r)
|
return handler.finishUploadIfComplete(c, resp, upload, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
// finishUploadIfComplete checks whether an upload is completed (i.e. upload offset
|
// finishUploadIfComplete checks whether an upload is completed (i.e. upload offset
|
||||||
// matches upload size) and if so, it will call the data store's FinishUpload
|
// matches upload size) and if so, it will call the data store's FinishUpload
|
||||||
// function and send the necessary message on the CompleteUpload channel.
|
// function and send the necessary message on the CompleteUpload channel.
|
||||||
func (handler *UnroutedHandler) finishUploadIfComplete(ctx context.Context, upload Upload, info FileInfo, r *http.Request) error {
|
func (handler *UnroutedHandler) finishUploadIfComplete(c *httpContext, resp HTTPResponse, upload Upload, info FileInfo) (HTTPResponse, error) {
|
||||||
|
r := c.req
|
||||||
|
|
||||||
// If the upload is completed, ...
|
// If the upload is completed, ...
|
||||||
if !info.SizeIsDeferred && info.Offset == info.Size {
|
if !info.SizeIsDeferred && info.Offset == info.Size {
|
||||||
// ... allow the data storage to finish and cleanup the upload
|
// ... allow the data storage to finish and cleanup the upload
|
||||||
if err := upload.FinishUpload(ctx); err != nil {
|
if err := upload.FinishUpload(c); err != nil {
|
||||||
return err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ... allow the hook callback to run before sending the response
|
// ... allow the hook callback to run before sending the response
|
||||||
if handler.config.PreFinishResponseCallback != nil {
|
if handler.config.PreFinishResponseCallback != nil {
|
||||||
if err := handler.config.PreFinishResponseCallback(newHookEvent(info, r)); err != nil {
|
resp2, err := handler.config.PreFinishResponseCallback(newHookEvent(info, r))
|
||||||
return err
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
}
|
}
|
||||||
|
resp = resp.MergeWith(resp2)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
handler.log("UploadFinished", "id", info.ID, "size", strconv.FormatInt(info.Size, 10))
|
||||||
handler.Metrics.incUploadsFinished()
|
handler.Metrics.incUploadsFinished()
|
||||||
|
|
||||||
// ... send the info out to the channel
|
// ... send the info out to the channel
|
||||||
|
@ -721,68 +735,70 @@ func (handler *UnroutedHandler) finishUploadIfComplete(ctx context.Context, uplo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFile handles requests to download a file using a GET request. This is not
|
// GetFile handles requests to download a file using a GET request. This is not
|
||||||
// part of the specification.
|
// part of the specification.
|
||||||
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
c := newContext(w, r)
|
||||||
|
|
||||||
id, err := extractIDFromPath(r.URL.Path)
|
id, err := extractIDFromPath(r.URL.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if handler.composer.UsesLocker {
|
if handler.composer.UsesLocker {
|
||||||
lock, err := handler.lockUpload(id)
|
lock, err := handler.lockUpload(c, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
upload, err := handler.composer.Core.GetUpload(c, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := upload.GetInfo(ctx)
|
info, err := upload.GetInfo(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set headers before sending responses
|
|
||||||
w.Header().Set("Content-Length", strconv.FormatInt(info.Offset, 10))
|
|
||||||
|
|
||||||
contentType, contentDisposition := filterContentType(info)
|
contentType, contentDisposition := filterContentType(info)
|
||||||
w.Header().Set("Content-Type", contentType)
|
resp := HTTPResponse{
|
||||||
w.Header().Set("Content-Disposition", contentDisposition)
|
StatusCode: http.StatusOK,
|
||||||
|
Headers: HTTPHeaders{
|
||||||
|
"Content-Length": strconv.FormatInt(info.Offset, 10),
|
||||||
|
"Content-Type": contentType,
|
||||||
|
"Content-Disposition": contentDisposition,
|
||||||
|
},
|
||||||
|
Body: "", // Body is intentionally left empty, and we copy it manually in later.
|
||||||
|
}
|
||||||
|
|
||||||
// If no data has been uploaded yet, respond with an empty "204 No Content" status.
|
// If no data has been uploaded yet, respond with an empty "204 No Content" status.
|
||||||
if info.Offset == 0 {
|
if info.Offset == 0 {
|
||||||
handler.sendResp(w, r, http.StatusNoContent)
|
resp.StatusCode = http.StatusNoContent
|
||||||
|
handler.sendResp(c, resp)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
src, err := upload.GetReader(ctx)
|
src, err := upload.GetReader(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.sendResp(w, r, http.StatusOK)
|
handler.sendResp(c, resp)
|
||||||
io.Copy(w, src)
|
io.Copy(w, src)
|
||||||
|
|
||||||
// Try to close the reader if the io.Closer interface is implemented
|
src.Close()
|
||||||
if closer, ok := src.(io.Closer); ok {
|
|
||||||
closer.Close()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// mimeInlineBrowserWhitelist is a map containing MIME types which should be
|
// mimeInlineBrowserWhitelist is a map containing MIME types which should be
|
||||||
|
@ -793,23 +809,23 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
||||||
// The values of this map does not convey any meaning and are therefore just
|
// The values of this map does not convey any meaning and are therefore just
|
||||||
// empty structs.
|
// empty structs.
|
||||||
var mimeInlineBrowserWhitelist = map[string]struct{}{
|
var mimeInlineBrowserWhitelist = map[string]struct{}{
|
||||||
"text/plain": struct{}{},
|
"text/plain": {},
|
||||||
|
|
||||||
"image/png": struct{}{},
|
"image/png": {},
|
||||||
"image/jpeg": struct{}{},
|
"image/jpeg": {},
|
||||||
"image/gif": struct{}{},
|
"image/gif": {},
|
||||||
"image/bmp": struct{}{},
|
"image/bmp": {},
|
||||||
"image/webp": struct{}{},
|
"image/webp": {},
|
||||||
|
|
||||||
"audio/wave": struct{}{},
|
"audio/wave": {},
|
||||||
"audio/wav": struct{}{},
|
"audio/wav": {},
|
||||||
"audio/x-wav": struct{}{},
|
"audio/x-wav": {},
|
||||||
"audio/x-pn-wav": struct{}{},
|
"audio/x-pn-wav": {},
|
||||||
"audio/webm": struct{}{},
|
"audio/webm": {},
|
||||||
"video/webm": struct{}{},
|
"video/webm": {},
|
||||||
"audio/ogg": struct{}{},
|
"audio/ogg": {},
|
||||||
"video/ogg": struct{}{},
|
"video/ogg": {},
|
||||||
"application/ogg": struct{}{},
|
"application/ogg": {},
|
||||||
}
|
}
|
||||||
|
|
||||||
// filterContentType returns the values for the Content-Type and
|
// filterContentType returns the values for the Content-Type and
|
||||||
|
@ -848,52 +864,54 @@ func filterContentType(info FileInfo) (contentType string, contentDisposition st
|
||||||
|
|
||||||
// DelFile terminates an upload permanently.
|
// DelFile terminates an upload permanently.
|
||||||
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
c := newContext(w, r)
|
||||||
|
|
||||||
// Abort the request handling if the required interface is not implemented
|
// Abort the request handling if the required interface is not implemented
|
||||||
if !handler.composer.UsesTerminater {
|
if !handler.composer.UsesTerminater {
|
||||||
handler.sendError(w, r, ErrNotImplemented)
|
handler.sendError(c, ErrNotImplemented)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := extractIDFromPath(r.URL.Path)
|
id, err := extractIDFromPath(r.URL.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if handler.composer.UsesLocker {
|
if handler.composer.UsesLocker {
|
||||||
lock, err := handler.lockUpload(id)
|
lock, err := handler.lockUpload(c, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
upload, err := handler.composer.Core.GetUpload(c, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var info FileInfo
|
var info FileInfo
|
||||||
if handler.config.NotifyTerminatedUploads {
|
if handler.config.NotifyTerminatedUploads {
|
||||||
info, err = upload.GetInfo(ctx)
|
info, err = upload.GetInfo(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = handler.terminateUpload(ctx, upload, info, r)
|
err = handler.terminateUpload(c, upload, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.sendResp(w, r, http.StatusNoContent)
|
handler.sendResp(c, HTTPResponse{
|
||||||
|
StatusCode: http.StatusNoContent,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// terminateUpload passes a given upload to the DataStore's Terminater,
|
// terminateUpload passes a given upload to the DataStore's Terminater,
|
||||||
|
@ -901,18 +919,19 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
|
||||||
// and updates the statistics.
|
// and updates the statistics.
|
||||||
// Note the the info argument is only needed if the terminated uploads
|
// Note the the info argument is only needed if the terminated uploads
|
||||||
// notifications are enabled.
|
// notifications are enabled.
|
||||||
func (handler *UnroutedHandler) terminateUpload(ctx context.Context, upload Upload, info FileInfo, r *http.Request) error {
|
func (handler *UnroutedHandler) terminateUpload(c *httpContext, upload Upload, info FileInfo) error {
|
||||||
terminatableUpload := handler.composer.Terminater.AsTerminatableUpload(upload)
|
terminatableUpload := handler.composer.Terminater.AsTerminatableUpload(upload)
|
||||||
|
|
||||||
err := terminatableUpload.Terminate(ctx)
|
err := terminatableUpload.Terminate(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if handler.config.NotifyTerminatedUploads {
|
if handler.config.NotifyTerminatedUploads {
|
||||||
handler.TerminatedUploads <- newHookEvent(info, r)
|
handler.TerminatedUploads <- newHookEvent(info, c.req)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
handler.log("UploadTerminated", "id", info.ID)
|
||||||
handler.Metrics.incUploadsTerminated()
|
handler.Metrics.incUploadsTerminated()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -920,20 +939,20 @@ func (handler *UnroutedHandler) terminateUpload(ctx context.Context, upload Uplo
|
||||||
|
|
||||||
// Send the error in the response body. The status code will be looked up in
|
// Send the error in the response body. The status code will be looked up in
|
||||||
// ErrStatusCodes. If none is found 500 Internal Error will be used.
|
// ErrStatusCodes. If none is found 500 Internal Error will be used.
|
||||||
func (handler *UnroutedHandler) sendError(w http.ResponseWriter, r *http.Request, err error) {
|
func (handler *UnroutedHandler) sendError(c *httpContext, err error) {
|
||||||
// Errors for read timeouts contain too much information which is not
|
// Errors for read timeouts contain too much information which is not
|
||||||
// necessary for us and makes grouping for the metrics harder. The error
|
// necessary for us and makes grouping for the metrics harder. The error
|
||||||
// message looks like: read tcp 127.0.0.1:1080->127.0.0.1:53673: i/o timeout
|
// message looks like: read tcp 127.0.0.1:1080->127.0.0.1:53673: i/o timeout
|
||||||
// Therefore, we use a common error message for all of them.
|
// Therefore, we use a common error message for all of them.
|
||||||
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
||||||
err = errReadTimeout
|
err = ErrReadTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errors for connnection resets also contain TCP details, we don't need, e.g:
|
// Errors for connnection resets also contain TCP details, we don't need, e.g:
|
||||||
// read tcp 127.0.0.1:1080->127.0.0.1:10023: read: connection reset by peer
|
// read tcp 127.0.0.1:1080->127.0.0.1:10023: read: connection reset by peer
|
||||||
// Therefore, we also trim those down.
|
// Therefore, we also trim those down.
|
||||||
if strings.HasSuffix(err.Error(), "read: connection reset by peer") {
|
if strings.HasSuffix(err.Error(), "read: connection reset by peer") {
|
||||||
err = errConnectionReset
|
err = ErrConnectionReset
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Decide if we should handle this in here, in body_reader or not at all.
|
// TODO: Decide if we should handle this in here, in body_reader or not at all.
|
||||||
|
@ -955,31 +974,29 @@ func (handler *UnroutedHandler) sendError(w http.ResponseWriter, r *http.Request
|
||||||
// err = nil
|
// err = nil
|
||||||
//}
|
//}
|
||||||
|
|
||||||
statusErr, ok := err.(HTTPError)
|
r := c.req
|
||||||
|
|
||||||
|
detailedErr, ok := err.(Error)
|
||||||
if !ok {
|
if !ok {
|
||||||
statusErr = NewHTTPError(err, http.StatusInternalServerError)
|
handler.log("InternalServerError", "message", err.Error(), "method", r.Method, "path", r.URL.Path, "requestId", getRequestId(r))
|
||||||
|
detailedErr = NewError("ERR_INTERNAL_SERVER_ERROR", err.Error(), http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
|
|
||||||
reason := append(statusErr.Body(), '\n')
|
// If we are sending the response for a HEAD request, ensure that we are not including
|
||||||
|
// any response body.
|
||||||
if r.Method == "HEAD" {
|
if r.Method == "HEAD" {
|
||||||
reason = nil
|
detailedErr.HTTPResponse.Body = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
handler.sendResp(c, detailedErr.HTTPResponse)
|
||||||
w.Header().Set("Content-Length", strconv.Itoa(len(reason)))
|
handler.Metrics.incErrorsTotal(detailedErr)
|
||||||
w.WriteHeader(statusErr.StatusCode())
|
|
||||||
w.Write(reason)
|
|
||||||
|
|
||||||
handler.log("ResponseOutgoing", "status", strconv.Itoa(statusErr.StatusCode()), "method", r.Method, "path", r.URL.Path, "error", err.Error(), "requestId", getRequestId(r))
|
|
||||||
|
|
||||||
handler.Metrics.incErrorsTotal(statusErr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendResp writes the header to w with the specified status code.
|
// sendResp writes the header to w with the specified status code.
|
||||||
func (handler *UnroutedHandler) sendResp(w http.ResponseWriter, r *http.Request, status int) {
|
func (handler *UnroutedHandler) sendResp(c *httpContext, resp HTTPResponse) {
|
||||||
w.WriteHeader(status)
|
resp.writeTo(c.res)
|
||||||
|
|
||||||
handler.log("ResponseOutgoing", "status", strconv.Itoa(status), "method", r.Method, "path", r.URL.Path, "requestId", getRequestId(r))
|
handler.log("ResponseOutgoing", "status", strconv.Itoa(resp.StatusCode), "method", c.req.Method, "path", c.req.URL.Path, "requestId", getRequestId(c.req), "body", resp.Body)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make an absolute URLs to the given upload id. If the base path is absolute
|
// Make an absolute URLs to the given upload id. If the base path is absolute
|
||||||
|
@ -1016,7 +1033,7 @@ func (handler *UnroutedHandler) sendProgressMessages(hook HookEvent, reader *bod
|
||||||
previousOffset = hook.Upload.Offset
|
previousOffset = hook.Upload.Offset
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
case <-time.After(1 * time.Second):
|
case <-time.After(handler.config.UploadProgressInterval):
|
||||||
hook.Upload.Offset = originalOffset + reader.bytesRead()
|
hook.Upload.Offset = originalOffset + reader.bytesRead()
|
||||||
if hook.Upload.Offset != previousOffset {
|
if hook.Upload.Offset != previousOffset {
|
||||||
handler.UploadProgress <- hook
|
handler.UploadProgress <- hook
|
||||||
|
@ -1123,13 +1140,24 @@ func (handler *UnroutedHandler) validateNewUploadLengthHeaders(uploadLengthHeade
|
||||||
|
|
||||||
// lockUpload creates a new lock for the given upload ID and attempts to lock it.
|
// lockUpload creates a new lock for the given upload ID and attempts to lock it.
|
||||||
// The created lock is returned if it was aquired successfully.
|
// The created lock is returned if it was aquired successfully.
|
||||||
func (handler *UnroutedHandler) lockUpload(id string) (Lock, error) {
|
func (handler *UnroutedHandler) lockUpload(c *httpContext, id string) (Lock, error) {
|
||||||
lock, err := handler.composer.Locker.NewLock(id)
|
lock, err := handler.composer.Locker.NewLock(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := lock.Lock(); err != nil {
|
// TODO: Make lock timeout configurable
|
||||||
|
ctx, cancelContext := context.WithTimeout(context.Background(), 3*time.Second)
|
||||||
|
defer cancelContext()
|
||||||
|
releaseLock := func() {
|
||||||
|
if c.body != nil {
|
||||||
|
handler.log("UploadInterrupted", "id", id, "requestId", getRequestId(c.req))
|
||||||
|
// TODO: Consider replacing this with a channel or a context
|
||||||
|
c.body.closeWithError(ErrUploadInterrupted)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := lock.Lock(ctx, releaseLock); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
. "github.com/tus/tusd/pkg/handler"
|
. "github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParseMetadataHeader(t *testing.T) {
|
func TestParseMetadataHeader(t *testing.T) {
|
||||||
|
|
|
@ -10,10 +10,10 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate mockgen -package handler_test -source utils_test.go -aux_files handler=datastore.go -destination=handler_mock_test.go
|
//go:generate mockgen -package handler_test -source utils_test.go -destination=handler_mock_test.go
|
||||||
|
|
||||||
// FullDataStore is an interface combining most interfaces for data stores.
|
// FullDataStore is an interface combining most interfaces for data stores.
|
||||||
// This is used by mockgen(1) to generate a mocked data store used for testing
|
// This is used by mockgen(1) to generate a mocked data store used for testing
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
// Package memorylocker provides an in-memory locking mechanism.
|
// Package memorylocker provides an in-memory locking mechanism.
|
||||||
//
|
//
|
||||||
|
// TODO: Update comment
|
||||||
// When multiple processes are attempting to access an upload, whether it be
|
// When multiple processes are attempting to access an upload, whether it be
|
||||||
// by reading or writing, a synchronization mechanism is required to prevent
|
// by reading or writing, a synchronization mechanism is required to prevent
|
||||||
// data corruption, especially to ensure correct offset values and the proper
|
// data corruption, especially to ensure correct offset values and the proper
|
||||||
|
@ -11,23 +12,29 @@
|
||||||
package memorylocker
|
package memorylocker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MemoryLocker persists locks using memory and therefore allowing a simple and
|
// MemoryLocker persists locks using memory and therefore allowing a simple and
|
||||||
// cheap mechanism. Locks will only exist as long as this object is kept in
|
// cheap mechanism. Locks will only exist as long as this object is kept in
|
||||||
// reference and will be erased if the program exits.
|
// reference and will be erased if the program exits.
|
||||||
type MemoryLocker struct {
|
type MemoryLocker struct {
|
||||||
locks map[string]struct{}
|
locks map[string]lockEntry
|
||||||
mutex sync.Mutex
|
mutex sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type lockEntry struct {
|
||||||
|
lockReleased chan struct{}
|
||||||
|
requestRelease func()
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new in-memory locker.
|
// New creates a new in-memory locker.
|
||||||
func New() *MemoryLocker {
|
func New() *MemoryLocker {
|
||||||
return &MemoryLocker{
|
return &MemoryLocker{
|
||||||
locks: make(map[string]struct{}),
|
locks: make(map[string]lockEntry),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,16 +53,40 @@ type memoryLock struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lock tries to obtain the exclusive lock.
|
// Lock tries to obtain the exclusive lock.
|
||||||
func (lock memoryLock) Lock() error {
|
func (lock memoryLock) Lock(ctx context.Context, requestRelease func()) error {
|
||||||
lock.locker.mutex.Lock()
|
lock.locker.mutex.RLock()
|
||||||
defer lock.locker.mutex.Unlock()
|
entry, ok := lock.locker.locks[lock.id]
|
||||||
|
lock.locker.mutex.RUnlock()
|
||||||
|
|
||||||
// Ensure file is not locked
|
requestRelease:
|
||||||
if _, ok := lock.locker.locks[lock.id]; ok {
|
if ok {
|
||||||
return handler.ErrFileLocked
|
// TODO: Make this channel?
|
||||||
|
// TODO: Should we ensure this is only called once?
|
||||||
|
entry.requestRelease()
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return handler.ErrLockTimeout
|
||||||
|
case <-entry.lockReleased:
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lock.locker.locks[lock.id] = struct{}{}
|
lock.locker.mutex.Lock()
|
||||||
|
// Check that the lock has not already been created in the meantime
|
||||||
|
entry, ok = lock.locker.locks[lock.id]
|
||||||
|
if ok {
|
||||||
|
// Lock has been created in the meantime, so we must wait again until it is free
|
||||||
|
lock.locker.mutex.Unlock()
|
||||||
|
goto requestRelease
|
||||||
|
}
|
||||||
|
|
||||||
|
// No lock exists, so we can create it
|
||||||
|
entry = lockEntry{
|
||||||
|
lockReleased: make(chan struct{}),
|
||||||
|
requestRelease: requestRelease,
|
||||||
|
}
|
||||||
|
|
||||||
|
lock.locker.locks[lock.id] = entry
|
||||||
|
lock.locker.mutex.Unlock()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -64,10 +95,14 @@ func (lock memoryLock) Lock() error {
|
||||||
func (lock memoryLock) Unlock() error {
|
func (lock memoryLock) Unlock() error {
|
||||||
lock.locker.mutex.Lock()
|
lock.locker.mutex.Lock()
|
||||||
|
|
||||||
// Deleting a non-existing key does not end in unexpected errors or panic
|
lockReleased := lock.locker.locks[lock.id].lockReleased
|
||||||
// since this operation results in a no-op
|
|
||||||
|
// Delete the lock entry entirely
|
||||||
delete(lock.locker.locks, lock.id)
|
delete(lock.locker.locks, lock.id)
|
||||||
|
|
||||||
lock.locker.mutex.Unlock()
|
lock.locker.mutex.Unlock()
|
||||||
|
|
||||||
|
close(lockReleased)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,16 +1,17 @@
|
||||||
package memorylocker
|
package memorylocker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ handler.Locker = &MemoryLocker{}
|
var _ handler.Locker = &MemoryLocker{}
|
||||||
|
|
||||||
func TestMemoryLocker(t *testing.T) {
|
func TestMemoryLocker_LockAndUnlock(t *testing.T) {
|
||||||
a := assert.New(t)
|
a := assert.New(t)
|
||||||
|
|
||||||
locker := New()
|
locker := New()
|
||||||
|
@ -18,13 +19,62 @@ func TestMemoryLocker(t *testing.T) {
|
||||||
lock1, err := locker.NewLock("one")
|
lock1, err := locker.NewLock("one")
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
|
|
||||||
a.NoError(lock1.Lock())
|
a.NoError(lock1.Lock(context.Background(), func() {
|
||||||
a.Equal(handler.ErrFileLocked, lock1.Lock())
|
panic("must not be called")
|
||||||
|
}))
|
||||||
|
a.NoError(lock1.Unlock())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryLocker_Timeout(t *testing.T) {
|
||||||
|
a := assert.New(t)
|
||||||
|
|
||||||
|
locker := New()
|
||||||
|
releaseRequestCalled := false
|
||||||
|
|
||||||
|
lock1, err := locker.NewLock("one")
|
||||||
|
a.NoError(err)
|
||||||
|
a.NoError(lock1.Lock(context.Background(), func() {
|
||||||
|
releaseRequestCalled = true
|
||||||
|
// We note that the function has been called, but do not
|
||||||
|
// release the lock
|
||||||
|
}))
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
lock2, err := locker.NewLock("one")
|
lock2, err := locker.NewLock("one")
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
a.Equal(handler.ErrFileLocked, lock2.Lock())
|
err = lock2.Lock(ctx, func() {
|
||||||
|
panic("must not be called")
|
||||||
|
})
|
||||||
|
|
||||||
a.NoError(lock1.Unlock())
|
a.Equal(err, handler.ErrLockTimeout)
|
||||||
a.NoError(lock1.Unlock())
|
a.True(releaseRequestCalled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryLocker_RequestUnlock(t *testing.T) {
|
||||||
|
a := assert.New(t)
|
||||||
|
|
||||||
|
locker := New()
|
||||||
|
releaseRequestCalled := false
|
||||||
|
|
||||||
|
lock1, err := locker.NewLock("one")
|
||||||
|
a.NoError(err)
|
||||||
|
a.NoError(lock1.Lock(context.Background(), func() {
|
||||||
|
releaseRequestCalled = true
|
||||||
|
<-time.After(10 * time.Millisecond)
|
||||||
|
a.NoError(lock1.Unlock())
|
||||||
|
}))
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
lock2, err := locker.NewLock("one")
|
||||||
|
a.NoError(err)
|
||||||
|
a.NoError(lock2.Lock(ctx, func() {
|
||||||
|
panic("must not be called")
|
||||||
|
}))
|
||||||
|
a.NoError(lock2.Unlock())
|
||||||
|
|
||||||
|
a.True(releaseRequestCalled)
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
@ -25,7 +25,7 @@ var (
|
||||||
errorsTotalDesc = prometheus.NewDesc(
|
errorsTotalDesc = prometheus.NewDesc(
|
||||||
"tusd_errors_total",
|
"tusd_errors_total",
|
||||||
"Total number of errors per status.",
|
"Total number of errors per status.",
|
||||||
[]string{"status", "message"}, nil)
|
[]string{"status", "code"}, nil)
|
||||||
bytesReceivedDesc = prometheus.NewDesc(
|
bytesReceivedDesc = prometheus.NewDesc(
|
||||||
"tusd_bytes_received",
|
"tusd_bytes_received",
|
||||||
"Number of bytes received for uploads.",
|
"Number of bytes received for uploads.",
|
||||||
|
@ -79,8 +79,8 @@ func (c Collector) Collect(metrics chan<- prometheus.Metric) {
|
||||||
errorsTotalDesc,
|
errorsTotalDesc,
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(atomic.LoadUint64(valuePtr)),
|
float64(atomic.LoadUint64(valuePtr)),
|
||||||
strconv.Itoa(httpError.StatusCode()),
|
strconv.Itoa(httpError.StatusCode),
|
||||||
httpError.Error(),
|
httpError.ErrorCode,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,475 +0,0 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: v1/hook.proto
|
|
||||||
|
|
||||||
package v1
|
|
||||||
|
|
||||||
import (
|
|
||||||
context "context"
|
|
||||||
fmt "fmt"
|
|
||||||
proto "github.com/golang/protobuf/proto"
|
|
||||||
any "github.com/golang/protobuf/ptypes/any"
|
|
||||||
grpc "google.golang.org/grpc"
|
|
||||||
codes "google.golang.org/grpc/codes"
|
|
||||||
status "google.golang.org/grpc/status"
|
|
||||||
math "math"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
// Uploaded data
|
|
||||||
type Upload struct {
|
|
||||||
// Unique integer identifier of the uploaded file
|
|
||||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
|
||||||
// Total file size in bytes specified in the NewUpload call
|
|
||||||
Size int64 `protobuf:"varint,2,opt,name=Size,proto3" json:"Size,omitempty"`
|
|
||||||
// Indicates whether the total file size is deferred until later
|
|
||||||
SizeIsDeferred bool `protobuf:"varint,3,opt,name=SizeIsDeferred,proto3" json:"SizeIsDeferred,omitempty"`
|
|
||||||
// Offset in bytes (zero-based)
|
|
||||||
Offset int64 `protobuf:"varint,4,opt,name=Offset,proto3" json:"Offset,omitempty"`
|
|
||||||
MetaData map[string]string `protobuf:"bytes,5,rep,name=metaData,proto3" json:"metaData,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
|
||||||
// Indicates that this is a partial upload which will later be used to form
|
|
||||||
// a final upload by concatenation. Partial uploads should not be processed
|
|
||||||
// when they are finished since they are only incomplete chunks of files.
|
|
||||||
IsPartial bool `protobuf:"varint,6,opt,name=isPartial,proto3" json:"isPartial,omitempty"`
|
|
||||||
// Indicates that this is a final upload
|
|
||||||
IsFinal bool `protobuf:"varint,7,opt,name=isFinal,proto3" json:"isFinal,omitempty"`
|
|
||||||
// If the upload is a final one (see IsFinal) this will be a non-empty
|
|
||||||
// ordered slice containing the ids of the uploads of which the final upload
|
|
||||||
// will consist after concatenation.
|
|
||||||
PartialUploads []string `protobuf:"bytes,8,rep,name=partialUploads,proto3" json:"partialUploads,omitempty"`
|
|
||||||
// Storage contains information about where the data storage saves the upload,
|
|
||||||
// for example a file path. The available values vary depending on what data
|
|
||||||
// store is used. This map may also be nil.
|
|
||||||
Storage map[string]string `protobuf:"bytes,9,rep,name=storage,proto3" json:"storage,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Upload) Reset() { *m = Upload{} }
|
|
||||||
func (m *Upload) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Upload) ProtoMessage() {}
|
|
||||||
func (*Upload) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_581082325ef044c1, []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Upload) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Upload.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Upload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Upload.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Upload) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Upload.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Upload) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Upload.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Upload) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Upload.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Upload proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Upload) GetId() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Id
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Upload) GetSize() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Size
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Upload) GetSizeIsDeferred() bool {
|
|
||||||
if m != nil {
|
|
||||||
return m.SizeIsDeferred
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Upload) GetOffset() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Offset
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Upload) GetMetaData() map[string]string {
|
|
||||||
if m != nil {
|
|
||||||
return m.MetaData
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Upload) GetIsPartial() bool {
|
|
||||||
if m != nil {
|
|
||||||
return m.IsPartial
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Upload) GetIsFinal() bool {
|
|
||||||
if m != nil {
|
|
||||||
return m.IsFinal
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Upload) GetPartialUploads() []string {
|
|
||||||
if m != nil {
|
|
||||||
return m.PartialUploads
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Upload) GetStorage() map[string]string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Storage
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type HTTPRequest struct {
|
|
||||||
// Method is the HTTP method, e.g. POST or PATCH
|
|
||||||
Method string `protobuf:"bytes,1,opt,name=method,proto3" json:"method,omitempty"`
|
|
||||||
// URI is the full HTTP request URI, e.g. /files/fooo
|
|
||||||
Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"`
|
|
||||||
// RemoteAddr contains the network address that sent the request
|
|
||||||
RemoteAddr string `protobuf:"bytes,3,opt,name=remoteAddr,proto3" json:"remoteAddr,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *HTTPRequest) Reset() { *m = HTTPRequest{} }
|
|
||||||
func (m *HTTPRequest) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*HTTPRequest) ProtoMessage() {}
|
|
||||||
func (*HTTPRequest) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_581082325ef044c1, []int{1}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *HTTPRequest) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_HTTPRequest.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *HTTPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_HTTPRequest.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *HTTPRequest) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_HTTPRequest.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *HTTPRequest) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_HTTPRequest.Size(m)
|
|
||||||
}
|
|
||||||
func (m *HTTPRequest) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_HTTPRequest.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_HTTPRequest proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *HTTPRequest) GetMethod() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Method
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *HTTPRequest) GetUri() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Uri
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *HTTPRequest) GetRemoteAddr() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.RemoteAddr
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hook's data
|
|
||||||
type Hook struct {
|
|
||||||
// Upload contains information about the upload that caused this hook
|
|
||||||
// to be fired.
|
|
||||||
Upload *Upload `protobuf:"bytes,1,opt,name=upload,proto3" json:"upload,omitempty"`
|
|
||||||
// HTTPRequest contains details about the HTTP request that reached
|
|
||||||
// tusd.
|
|
||||||
HttpRequest *HTTPRequest `protobuf:"bytes,2,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"`
|
|
||||||
// The hook name
|
|
||||||
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Hook) Reset() { *m = Hook{} }
|
|
||||||
func (m *Hook) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Hook) ProtoMessage() {}
|
|
||||||
func (*Hook) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_581082325ef044c1, []int{2}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Hook) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Hook.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Hook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Hook.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Hook) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Hook.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Hook) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Hook.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Hook) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Hook.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Hook proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Hook) GetUpload() *Upload {
|
|
||||||
if m != nil {
|
|
||||||
return m.Upload
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Hook) GetHttpRequest() *HTTPRequest {
|
|
||||||
if m != nil {
|
|
||||||
return m.HttpRequest
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Hook) GetName() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Request data to send hook
|
|
||||||
type SendRequest struct {
|
|
||||||
// The hook data
|
|
||||||
Hook *Hook `protobuf:"bytes,1,opt,name=hook,proto3" json:"hook,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *SendRequest) Reset() { *m = SendRequest{} }
|
|
||||||
func (m *SendRequest) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*SendRequest) ProtoMessage() {}
|
|
||||||
func (*SendRequest) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_581082325ef044c1, []int{3}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *SendRequest) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_SendRequest.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *SendRequest) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_SendRequest.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *SendRequest) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_SendRequest.Size(m)
|
|
||||||
}
|
|
||||||
func (m *SendRequest) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_SendRequest.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_SendRequest proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *SendRequest) GetHook() *Hook {
|
|
||||||
if m != nil {
|
|
||||||
return m.Hook
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Response that contains data for sended hook
|
|
||||||
type SendResponse struct {
|
|
||||||
// The response of the hook.
|
|
||||||
Response *any.Any `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *SendResponse) Reset() { *m = SendResponse{} }
|
|
||||||
func (m *SendResponse) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*SendResponse) ProtoMessage() {}
|
|
||||||
func (*SendResponse) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_581082325ef044c1, []int{4}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *SendResponse) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_SendResponse.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *SendResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_SendResponse.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *SendResponse) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_SendResponse.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *SendResponse) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_SendResponse.Size(m)
|
|
||||||
}
|
|
||||||
func (m *SendResponse) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_SendResponse.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_SendResponse proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *SendResponse) GetResponse() *any.Any {
|
|
||||||
if m != nil {
|
|
||||||
return m.Response
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*Upload)(nil), "v1.Upload")
|
|
||||||
proto.RegisterMapType((map[string]string)(nil), "v1.Upload.MetaDataEntry")
|
|
||||||
proto.RegisterMapType((map[string]string)(nil), "v1.Upload.StorageEntry")
|
|
||||||
proto.RegisterType((*HTTPRequest)(nil), "v1.HTTPRequest")
|
|
||||||
proto.RegisterType((*Hook)(nil), "v1.Hook")
|
|
||||||
proto.RegisterType((*SendRequest)(nil), "v1.SendRequest")
|
|
||||||
proto.RegisterType((*SendResponse)(nil), "v1.SendResponse")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("v1/hook.proto", fileDescriptor_581082325ef044c1)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_581082325ef044c1 = []byte{
|
|
||||||
// 477 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x4d, 0x6f, 0xd3, 0x40,
|
|
||||||
0x10, 0x25, 0xb1, 0xeb, 0xd8, 0xe3, 0xb6, 0x54, 0xab, 0x0a, 0x96, 0xa8, 0x42, 0x96, 0x0f, 0xc8,
|
|
||||||
0x52, 0x25, 0x07, 0x07, 0x0e, 0x28, 0x5c, 0xa8, 0x54, 0x50, 0x39, 0x20, 0xaa, 0x4d, 0x11, 0xe7,
|
|
||||||
0x2d, 0xde, 0x24, 0x56, 0x1c, 0xaf, 0xbb, 0x5e, 0x5b, 0x0a, 0x3f, 0x8a, 0xdf, 0x88, 0xf6, 0xc3,
|
|
||||||
0x8d, 0xe9, 0x8d, 0x93, 0x67, 0xde, 0xbc, 0x79, 0xf3, 0x3c, 0x3b, 0x70, 0xd2, 0x65, 0xb3, 0x0d,
|
|
||||||
0xe7, 0xdb, 0xb4, 0x16, 0x5c, 0x72, 0x34, 0xee, 0xb2, 0xe9, 0xab, 0x35, 0xe7, 0xeb, 0x92, 0xcd,
|
|
||||||
0x34, 0x72, 0xdf, 0xae, 0x66, 0xb4, 0xda, 0x9b, 0x72, 0xfc, 0xc7, 0x01, 0xef, 0x47, 0x5d, 0x72,
|
|
||||||
0x9a, 0xa3, 0x53, 0x18, 0x17, 0x39, 0x1e, 0x45, 0xa3, 0x24, 0x20, 0xe3, 0x22, 0x47, 0x08, 0xdc,
|
|
||||||
0x65, 0xf1, 0x9b, 0xe1, 0x71, 0x34, 0x4a, 0x1c, 0xa2, 0x63, 0xf4, 0x06, 0x4e, 0xd5, 0xf7, 0x6b,
|
|
||||||
0x73, 0xcd, 0x56, 0x4c, 0x08, 0x96, 0x63, 0x27, 0x1a, 0x25, 0x3e, 0x79, 0x82, 0xa2, 0x17, 0xe0,
|
|
||||||
0x7d, 0x5f, 0xad, 0x1a, 0x26, 0xb1, 0xab, 0xbb, 0x6d, 0x86, 0xde, 0x83, 0xbf, 0x63, 0x92, 0x5e,
|
|
||||||
0x53, 0x49, 0xf1, 0x51, 0xe4, 0x24, 0xe1, 0x1c, 0xa7, 0x5d, 0x96, 0x1a, 0x07, 0xe9, 0x37, 0x5b,
|
|
||||||
0xfa, 0x5c, 0x49, 0xb1, 0x27, 0x8f, 0x4c, 0x74, 0x01, 0x41, 0xd1, 0xdc, 0x52, 0x21, 0x0b, 0x5a,
|
|
||||||
0x62, 0x4f, 0x0f, 0x3c, 0x00, 0x08, 0xc3, 0xa4, 0x68, 0xbe, 0x14, 0x15, 0x2d, 0xf1, 0x44, 0xd7,
|
|
||||||
0xfa, 0x54, 0xb9, 0xad, 0x0d, 0xc9, 0x0c, 0x68, 0xb0, 0x1f, 0x39, 0x49, 0x40, 0x9e, 0xa0, 0x28,
|
|
||||||
0x83, 0x49, 0x23, 0xb9, 0xa0, 0x6b, 0x86, 0x03, 0x6d, 0xea, 0xe5, 0xc0, 0xd4, 0xd2, 0x54, 0x8c,
|
|
||||||
0xa7, 0x9e, 0x37, 0xfd, 0x08, 0x27, 0xff, 0xb8, 0x45, 0x67, 0xe0, 0x6c, 0xd9, 0xde, 0xae, 0x4f,
|
|
||||||
0x85, 0xe8, 0x1c, 0x8e, 0x3a, 0x5a, 0xb6, 0x66, 0x81, 0x01, 0x31, 0xc9, 0x62, 0xfc, 0x61, 0x34,
|
|
||||||
0x5d, 0xc0, 0xf1, 0x50, 0xf5, 0x7f, 0x7a, 0xe3, 0x9f, 0x10, 0xde, 0xdc, 0xdd, 0xdd, 0x12, 0xf6,
|
|
||||||
0xd0, 0xb2, 0x46, 0xaa, 0x45, 0xef, 0x98, 0xdc, 0xf0, 0xfe, 0xe1, 0x6c, 0xa6, 0x24, 0x5b, 0x51,
|
|
||||||
0xd8, 0x76, 0x15, 0xa2, 0xd7, 0x00, 0x82, 0xed, 0xb8, 0x64, 0x57, 0x79, 0x2e, 0xf4, 0xb3, 0x05,
|
|
||||||
0x64, 0x80, 0xc4, 0x0f, 0xe0, 0xde, 0x70, 0xbe, 0x45, 0x31, 0x78, 0xad, 0xfe, 0x73, 0xad, 0x18,
|
|
||||||
0xce, 0xe1, 0xb0, 0x0b, 0x62, 0x2b, 0x28, 0x83, 0x70, 0x23, 0x65, 0x6d, 0x4d, 0xe8, 0x29, 0xe1,
|
|
||||||
0xfc, 0xb9, 0x22, 0x0e, 0xbc, 0x91, 0x21, 0x47, 0x5d, 0x53, 0x45, 0x77, 0xcc, 0x0e, 0xd6, 0x71,
|
|
||||||
0x7c, 0x09, 0xe1, 0x92, 0x55, 0x79, 0x4f, 0xb9, 0x00, 0x57, 0x1d, 0xae, 0x9d, 0xeb, 0x6b, 0x39,
|
|
||||||
0xce, 0xb7, 0x44, 0xa3, 0xf1, 0x27, 0x38, 0x36, 0xe4, 0xa6, 0xe6, 0x55, 0xc3, 0xd0, 0x5b, 0xf0,
|
|
||||||
0x85, 0x8d, 0x6d, 0xc7, 0x79, 0x6a, 0xee, 0x3c, 0xed, 0xef, 0x3c, 0xbd, 0xaa, 0xf6, 0xe4, 0x91,
|
|
||||||
0x35, 0x5f, 0x40, 0xa8, 0xf4, 0x96, 0x4c, 0x74, 0xc5, 0x2f, 0x86, 0x2e, 0xc1, 0x55, 0x82, 0x48,
|
|
||||||
0xfb, 0x1e, 0xf8, 0x98, 0x9e, 0x1d, 0x00, 0xd3, 0x19, 0x3f, 0xbb, 0xf7, 0xb4, 0xe6, 0xbb, 0xbf,
|
|
||||||
0x01, 0x00, 0x00, 0xff, 0xff, 0x8f, 0xd4, 0x14, 0x0d, 0x5e, 0x03, 0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ context.Context
|
|
||||||
var _ grpc.ClientConnInterface
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the grpc package it is being compiled against.
|
|
||||||
const _ = grpc.SupportPackageIsVersion6
|
|
||||||
|
|
||||||
// HookServiceClient is the client API for HookService service.
|
|
||||||
//
|
|
||||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
|
||||||
type HookServiceClient interface {
|
|
||||||
// Sends a hook
|
|
||||||
Send(ctx context.Context, in *SendRequest, opts ...grpc.CallOption) (*SendResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type hookServiceClient struct {
|
|
||||||
cc grpc.ClientConnInterface
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewHookServiceClient(cc grpc.ClientConnInterface) HookServiceClient {
|
|
||||||
return &hookServiceClient{cc}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *hookServiceClient) Send(ctx context.Context, in *SendRequest, opts ...grpc.CallOption) (*SendResponse, error) {
|
|
||||||
out := new(SendResponse)
|
|
||||||
err := c.cc.Invoke(ctx, "/v1.HookService/Send", in, out, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HookServiceServer is the server API for HookService service.
|
|
||||||
type HookServiceServer interface {
|
|
||||||
// Sends a hook
|
|
||||||
Send(context.Context, *SendRequest) (*SendResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnimplementedHookServiceServer can be embedded to have forward compatible implementations.
|
|
||||||
type UnimplementedHookServiceServer struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*UnimplementedHookServiceServer) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method Send not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterHookServiceServer(s *grpc.Server, srv HookServiceServer) {
|
|
||||||
s.RegisterService(&_HookService_serviceDesc, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _HookService_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(SendRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(HookServiceServer).Send(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: "/v1.HookService/Send",
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(HookServiceServer).Send(ctx, req.(*SendRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _HookService_serviceDesc = grpc.ServiceDesc{
|
|
||||||
ServiceName: "v1.HookService",
|
|
||||||
HandlerType: (*HookServiceServer)(nil),
|
|
||||||
Methods: []grpc.MethodDesc{
|
|
||||||
{
|
|
||||||
MethodName: "Send",
|
|
||||||
Handler: _HookService_Send_Handler,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Streams: []grpc.StreamDesc{},
|
|
||||||
Metadata: "v1/hook.proto",
|
|
||||||
}
|
|
|
@ -0,0 +1,859 @@
|
||||||
|
// If this file gets changed, you must recompile the generate package in pkg/proto.
|
||||||
|
// To do this, install the Go protobuf toolchain as mentioned in
|
||||||
|
// https://grpc.io/docs/languages/go/quickstart/#prerequisites.
|
||||||
|
// Then use following command from the repository's root to recompile it with gRPC support:
|
||||||
|
// protoc --go-grpc_out=./pkg/ ./cmd/tusd/cli/hooks/proto/v2/hook.protoo
|
||||||
|
// In addition, it may be necessary to update the protobuf or gRPC dependencies as well.
|
||||||
|
|
||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// protoc-gen-go v1.28.1
|
||||||
|
// protoc v3.21.12
|
||||||
|
// source: cmd/tusd/cli/hooks/proto/v2/hook.proto
|
||||||
|
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
reflect "reflect"
|
||||||
|
sync "sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Verify that this generated code is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||||
|
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
|
)
|
||||||
|
|
||||||
|
// HookRequest contains the information about the hook type, the involved upload,
|
||||||
|
// and causing HTTP request.
|
||||||
|
type HookRequest struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// Type is the name of the hook.
|
||||||
|
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||||
|
// Event contains the involved upload and causing HTTP request.
|
||||||
|
Event *Event `protobuf:"bytes,2,opt,name=event,proto3" json:"event,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HookRequest) Reset() {
|
||||||
|
*x = HookRequest{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[0]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HookRequest) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*HookRequest) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *HookRequest) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[0]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use HookRequest.ProtoReflect.Descriptor instead.
|
||||||
|
func (*HookRequest) Descriptor() ([]byte, []int) {
|
||||||
|
return file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDescGZIP(), []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HookRequest) GetType() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Type
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HookRequest) GetEvent() *Event {
|
||||||
|
if x != nil {
|
||||||
|
return x.Event
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Event represents an event from tusd which can be handled by the application.
|
||||||
|
type Event struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// Upload contains information about the upload that caused this hook
|
||||||
|
// to be fired.
|
||||||
|
Upload *FileInfo `protobuf:"bytes,1,opt,name=upload,proto3" json:"upload,omitempty"`
|
||||||
|
// HTTPRequest contains details about the HTTP request that reached
|
||||||
|
// tusd.
|
||||||
|
HttpRequest *HTTPRequest `protobuf:"bytes,2,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *Event) Reset() {
|
||||||
|
*x = Event{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[1]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *Event) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Event) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *Event) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[1]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use Event.ProtoReflect.Descriptor instead.
|
||||||
|
func (*Event) Descriptor() ([]byte, []int) {
|
||||||
|
return file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDescGZIP(), []int{1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *Event) GetUpload() *FileInfo {
|
||||||
|
if x != nil {
|
||||||
|
return x.Upload
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *Event) GetHttpRequest() *HTTPRequest {
|
||||||
|
if x != nil {
|
||||||
|
return x.HttpRequest
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfo contains information about a single upload resource.
|
||||||
|
type FileInfo struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// ID is the unique identifier of the upload resource.
|
||||||
|
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||||
|
// Total file size in bytes specified in the NewUpload call
|
||||||
|
Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
|
||||||
|
// Indicates whether the total file size is deferred until later
|
||||||
|
SizeIsDeferred bool `protobuf:"varint,3,opt,name=sizeIsDeferred,proto3" json:"sizeIsDeferred,omitempty"`
|
||||||
|
// Offset in bytes (zero-based)
|
||||||
|
Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"`
|
||||||
|
MetaData map[string]string `protobuf:"bytes,5,rep,name=metaData,proto3" json:"metaData,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
// Indicates that this is a partial upload which will later be used to form
|
||||||
|
// a final upload by concatenation. Partial uploads should not be processed
|
||||||
|
// when they are finished since they are only incomplete chunks of files.
|
||||||
|
IsPartial bool `protobuf:"varint,6,opt,name=isPartial,proto3" json:"isPartial,omitempty"`
|
||||||
|
// Indicates that this is a final upload
|
||||||
|
IsFinal bool `protobuf:"varint,7,opt,name=isFinal,proto3" json:"isFinal,omitempty"`
|
||||||
|
// If the upload is a final one (see IsFinal) this will be a non-empty
|
||||||
|
// ordered slice containing the ids of the uploads of which the final upload
|
||||||
|
// will consist after concatenation.
|
||||||
|
PartialUploads []string `protobuf:"bytes,8,rep,name=partialUploads,proto3" json:"partialUploads,omitempty"`
|
||||||
|
// Storage contains information about where the data storage saves the upload,
|
||||||
|
// for example a file path. The available values vary depending on what data
|
||||||
|
// store is used. This map may also be nil.
|
||||||
|
Storage map[string]string `protobuf:"bytes,9,rep,name=storage,proto3" json:"storage,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfo) Reset() {
|
||||||
|
*x = FileInfo{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[2]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfo) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*FileInfo) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *FileInfo) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[2]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use FileInfo.ProtoReflect.Descriptor instead.
|
||||||
|
func (*FileInfo) Descriptor() ([]byte, []int) {
|
||||||
|
return file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDescGZIP(), []int{2}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfo) GetId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Id
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfo) GetSize() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Size
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfo) GetSizeIsDeferred() bool {
|
||||||
|
if x != nil {
|
||||||
|
return x.SizeIsDeferred
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfo) GetOffset() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Offset
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfo) GetMetaData() map[string]string {
|
||||||
|
if x != nil {
|
||||||
|
return x.MetaData
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfo) GetIsPartial() bool {
|
||||||
|
if x != nil {
|
||||||
|
return x.IsPartial
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfo) GetIsFinal() bool {
|
||||||
|
if x != nil {
|
||||||
|
return x.IsFinal
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfo) GetPartialUploads() []string {
|
||||||
|
if x != nil {
|
||||||
|
return x.PartialUploads
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfo) GetStorage() map[string]string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Storage
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfoChanges collects changes the should be made to a FileInfo object. This
|
||||||
|
// can be done using the PreUploadCreateCallback to modify certain properties before
|
||||||
|
// an upload is created. Properties which should not be modified (e.g. Size or Offset)
|
||||||
|
// are intentionally left out here.
|
||||||
|
type FileInfoChanges struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// If ID is not empty, it will be passed to the data store, allowing
|
||||||
|
// hooks to influence the upload ID. Be aware that a data store is not required to
|
||||||
|
// respect a pre-defined upload ID and might overwrite or modify it. However,
|
||||||
|
// all data stores in the github.com/tus/tusd package do respect pre-defined IDs.
|
||||||
|
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||||
|
// If MetaData is not nil, it replaces the entire user-defined meta data from
|
||||||
|
// the upload creation request. You can add custom meta data fields this way
|
||||||
|
// or ensure that only certain fields from the user-defined meta data are saved.
|
||||||
|
// If you want to retain only specific entries from the user-defined meta data, you must
|
||||||
|
// manually copy them into this MetaData field.
|
||||||
|
// If you do not want to store any meta data, set this field to an empty map (`MetaData{}`).
|
||||||
|
// If you want to keep the entire user-defined meta data, set this field to nil.
|
||||||
|
MetaData map[string]string `protobuf:"bytes,2,rep,name=metaData,proto3" json:"metaData,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
// If Storage is not nil, it is passed to the data store to allow for minor adjustments
|
||||||
|
// to the upload storage (e.g. destination file name). The details are specific for each
|
||||||
|
// data store and should be looked up in their respective documentation.
|
||||||
|
// Please be aware that this behavior is currently not supported by any data store in
|
||||||
|
// the github.com/tus/tusd package.
|
||||||
|
Storage map[string]string `protobuf:"bytes,3,rep,name=storage,proto3" json:"storage,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfoChanges) Reset() {
|
||||||
|
*x = FileInfoChanges{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[3]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfoChanges) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*FileInfoChanges) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *FileInfoChanges) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[3]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use FileInfoChanges.ProtoReflect.Descriptor instead.
|
||||||
|
func (*FileInfoChanges) Descriptor() ([]byte, []int) {
|
||||||
|
return file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDescGZIP(), []int{3}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfoChanges) GetId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Id
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfoChanges) GetMetaData() map[string]string {
|
||||||
|
if x != nil {
|
||||||
|
return x.MetaData
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FileInfoChanges) GetStorage() map[string]string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Storage
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPRequest contains basic details of an incoming HTTP request.
|
||||||
|
type HTTPRequest struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// Method is the HTTP method, e.g. POST or PATCH.
|
||||||
|
Method string `protobuf:"bytes,1,opt,name=method,proto3" json:"method,omitempty"`
|
||||||
|
// URI is the full HTTP request URI, e.g. /files/fooo.
|
||||||
|
Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"`
|
||||||
|
// RemoteAddr contains the network address that sent the request.
|
||||||
|
RemoteAddr string `protobuf:"bytes,3,opt,name=remoteAddr,proto3" json:"remoteAddr,omitempty"`
|
||||||
|
// Header contains all HTTP headers as present in the HTTP request.
|
||||||
|
Header map[string]string `protobuf:"bytes,4,rep,name=header,proto3" json:"header,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HTTPRequest) Reset() {
|
||||||
|
*x = HTTPRequest{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[4]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HTTPRequest) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*HTTPRequest) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *HTTPRequest) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[4]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use HTTPRequest.ProtoReflect.Descriptor instead.
|
||||||
|
func (*HTTPRequest) Descriptor() ([]byte, []int) {
|
||||||
|
return file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDescGZIP(), []int{4}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HTTPRequest) GetMethod() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Method
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HTTPRequest) GetUri() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Uri
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HTTPRequest) GetRemoteAddr() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.RemoteAddr
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HTTPRequest) GetHeader() map[string]string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Header
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HookResponse is the response after a hook is executed.
|
||||||
|
type HookResponse struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// HTTPResponse's fields can be filled to modify the HTTP response.
|
||||||
|
// This is only possible for pre-create, pre-finish and post-receive hooks.
|
||||||
|
// For other hooks this value is ignored.
|
||||||
|
// If multiple hooks modify the HTTP response, a later hook may overwrite the
|
||||||
|
// modified values from a previous hook (e.g. if multiple post-receive hooks
|
||||||
|
// are executed).
|
||||||
|
// Example usages: Send an error to the client if RejectUpload/StopUpload are
|
||||||
|
// set in the pre-create/post-receive hook. Send more information to the client
|
||||||
|
// in the pre-finish hook.
|
||||||
|
HttpResponse *HTTPResponse `protobuf:"bytes,1,opt,name=httpResponse,proto3" json:"httpResponse,omitempty"`
|
||||||
|
// RejectUpload will cause the upload to be rejected and not be created during
|
||||||
|
// POST request. This value is only respected for pre-create hooks. For other hooks,
|
||||||
|
// it is ignored. Use the HTTPResponse field to send details about the rejection
|
||||||
|
// to the client.
|
||||||
|
RejectUpload bool `protobuf:"varint,2,opt,name=rejectUpload,proto3" json:"rejectUpload,omitempty"`
|
||||||
|
// ChangeFileInfo can be set to change selected properties of an upload before
|
||||||
|
// it has been created. See the handler.FileInfoChanges type for more details.
|
||||||
|
// Changes are applied on a per-property basis, meaning that specifying just
|
||||||
|
// one property leaves all others unchanged.
|
||||||
|
// This value is only respected for pre-create hooks.
|
||||||
|
ChangeFileInfo *FileInfoChanges `protobuf:"bytes,4,opt,name=changeFileInfo,proto3" json:"changeFileInfo,omitempty"`
|
||||||
|
// StopUpload will cause the upload to be stopped during a PATCH request.
|
||||||
|
// This value is only respected for post-receive hooks. For other hooks,
|
||||||
|
// it is ignored. Use the HTTPResponse field to send details about the stop
|
||||||
|
// to the client.
|
||||||
|
StopUpload bool `protobuf:"varint,3,opt,name=stopUpload,proto3" json:"stopUpload,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HookResponse) Reset() {
|
||||||
|
*x = HookResponse{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[5]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HookResponse) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*HookResponse) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *HookResponse) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[5]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use HookResponse.ProtoReflect.Descriptor instead.
|
||||||
|
func (*HookResponse) Descriptor() ([]byte, []int) {
|
||||||
|
return file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDescGZIP(), []int{5}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HookResponse) GetHttpResponse() *HTTPResponse {
|
||||||
|
if x != nil {
|
||||||
|
return x.HttpResponse
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HookResponse) GetRejectUpload() bool {
|
||||||
|
if x != nil {
|
||||||
|
return x.RejectUpload
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HookResponse) GetChangeFileInfo() *FileInfoChanges {
|
||||||
|
if x != nil {
|
||||||
|
return x.ChangeFileInfo
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HookResponse) GetStopUpload() bool {
|
||||||
|
if x != nil {
|
||||||
|
return x.StopUpload
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPResponse contains basic details of an outgoing HTTP response.
|
||||||
|
type HTTPResponse struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// StatusCode is status code, e.g. 200 or 400.
|
||||||
|
StatusCode int64 `protobuf:"varint,1,opt,name=statusCode,proto3" json:"statusCode,omitempty"`
|
||||||
|
// Headers contains additional HTTP headers for the response.
|
||||||
|
Headers map[string]string `protobuf:"bytes,2,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
// Body is the response body.
|
||||||
|
Body string `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HTTPResponse) Reset() {
|
||||||
|
*x = HTTPResponse{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[6]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HTTPResponse) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*HTTPResponse) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *HTTPResponse) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[6]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use HTTPResponse.ProtoReflect.Descriptor instead.
|
||||||
|
func (*HTTPResponse) Descriptor() ([]byte, []int) {
|
||||||
|
return file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDescGZIP(), []int{6}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HTTPResponse) GetStatusCode() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.StatusCode
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HTTPResponse) GetHeaders() map[string]string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Headers
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *HTTPResponse) GetBody() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Body
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var File_cmd_tusd_cli_hooks_proto_v2_hook_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
|
var file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDesc = []byte{
|
||||||
|
0x0a, 0x26, 0x63, 0x6d, 0x64, 0x2f, 0x74, 0x75, 0x73, 0x64, 0x2f, 0x63, 0x6c, 0x69, 0x2f, 0x68,
|
||||||
|
0x6f, 0x6f, 0x6b, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x68, 0x6f,
|
||||||
|
0x6f, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76, 0x32, 0x22, 0x42, 0x0a, 0x0b,
|
||||||
|
0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74,
|
||||||
|
0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12,
|
||||||
|
0x1f, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09,
|
||||||
|
0x2e, 0x76, 0x32, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74,
|
||||||
|
0x22, 0x60, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x06, 0x75, 0x70, 0x6c,
|
||||||
|
0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x32, 0x2e, 0x46,
|
||||||
|
0x69, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x06, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x12,
|
||||||
|
0x31, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02,
|
||||||
|
0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65,
|
||||||
|
0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||||
|
0x73, 0x74, 0x22, 0xb4, 0x03, 0x0a, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
|
||||||
|
0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12,
|
||||||
|
0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73,
|
||||||
|
0x69, 0x7a, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x73, 0x69, 0x7a, 0x65, 0x49, 0x73, 0x44, 0x65, 0x66,
|
||||||
|
0x65, 0x72, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x69, 0x7a,
|
||||||
|
0x65, 0x49, 0x73, 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f,
|
||||||
|
0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66,
|
||||||
|
0x73, 0x65, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x18,
|
||||||
|
0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49,
|
||||||
|
0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
|
||||||
|
0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, 0x69,
|
||||||
|
0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09,
|
||||||
|
0x69, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x69, 0x73, 0x46,
|
||||||
|
0x69, 0x6e, 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x46, 0x69,
|
||||||
|
0x6e, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70,
|
||||||
|
0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x61, 0x72,
|
||||||
|
0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x33, 0x0a, 0x07, 0x73,
|
||||||
|
0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x76,
|
||||||
|
0x32, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61,
|
||||||
|
0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
|
||||||
|
0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
|
||||||
|
0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
|
||||||
|
0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
|
||||||
|
0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a,
|
||||||
|
0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
|
||||||
|
0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
|
||||||
|
0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
|
||||||
|
0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x95, 0x02, 0x0a, 0x0f, 0x46, 0x69,
|
||||||
|
0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x0e, 0x0a,
|
||||||
|
0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x3d, 0x0a,
|
||||||
|
0x08, 0x6d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
|
||||||
|
0x21, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x43, 0x68, 0x61,
|
||||||
|
0x6e, 0x67, 0x65, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74,
|
||||||
|
0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x07,
|
||||||
|
0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e,
|
||||||
|
0x76, 0x32, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x43, 0x68, 0x61, 0x6e, 0x67,
|
||||||
|
0x65, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
|
||||||
|
0x07, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61,
|
||||||
|
0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
|
||||||
|
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
|
||||||
|
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
|
||||||
|
0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
|
||||||
|
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
|
||||||
|
0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||||
|
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
|
||||||
|
0x01, 0x22, 0xc7, 0x01, 0x0a, 0x0b, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||||
|
0x74, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||||
|
0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69,
|
||||||
|
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x1e, 0x0a, 0x0a, 0x72,
|
||||||
|
0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||||
|
0x0a, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x12, 0x33, 0x0a, 0x06, 0x68,
|
||||||
|
0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x32,
|
||||||
|
0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x48, 0x65, 0x61,
|
||||||
|
0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
|
||||||
|
0x1a, 0x39, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
|
||||||
|
0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
|
||||||
|
0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
|
||||||
|
0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc5, 0x01, 0x0a, 0x0c,
|
||||||
|
0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x0c,
|
||||||
|
0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01,
|
||||||
|
0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x73, 0x70,
|
||||||
|
0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0c, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||||
|
0x73, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x55, 0x70, 0x6c, 0x6f,
|
||||||
|
0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74,
|
||||||
|
0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x3b, 0x0a, 0x0e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65,
|
||||||
|
0x46, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
|
||||||
|
0x2e, 0x76, 0x32, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x43, 0x68, 0x61, 0x6e,
|
||||||
|
0x67, 0x65, 0x73, 0x52, 0x0e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x49,
|
||||||
|
0x6e, 0x66, 0x6f, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x55, 0x70, 0x6c, 0x6f, 0x61,
|
||||||
|
0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x55, 0x70, 0x6c,
|
||||||
|
0x6f, 0x61, 0x64, 0x22, 0xb7, 0x01, 0x0a, 0x0c, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x73, 0x70,
|
||||||
|
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f,
|
||||||
|
0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||||
|
0x43, 0x6f, 0x64, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18,
|
||||||
|
0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52,
|
||||||
|
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45,
|
||||||
|
0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a,
|
||||||
|
0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x62, 0x6f, 0x64,
|
||||||
|
0x79, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72,
|
||||||
|
0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
|
||||||
|
0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
|
||||||
|
0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0x40, 0x0a,
|
||||||
|
0x0b, 0x48, 0x6f, 0x6f, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x0a,
|
||||||
|
0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x12, 0x0f, 0x2e, 0x76, 0x32, 0x2e,
|
||||||
|
0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x76, 0x32,
|
||||||
|
0x2e, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42,
|
||||||
|
0x0a, 0x5a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||||
|
0x74, 0x6f, 0x33,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDescOnce sync.Once
|
||||||
|
file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDescData = file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDesc
|
||||||
|
)
|
||||||
|
|
||||||
|
func file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDescGZIP() []byte {
|
||||||
|
file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDescOnce.Do(func() {
|
||||||
|
file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDescData = protoimpl.X.CompressGZIP(file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDescData)
|
||||||
|
})
|
||||||
|
return file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDescData
|
||||||
|
}
|
||||||
|
|
||||||
|
var file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
|
||||||
|
var file_cmd_tusd_cli_hooks_proto_v2_hook_proto_goTypes = []interface{}{
|
||||||
|
(*HookRequest)(nil), // 0: v2.HookRequest
|
||||||
|
(*Event)(nil), // 1: v2.Event
|
||||||
|
(*FileInfo)(nil), // 2: v2.FileInfo
|
||||||
|
(*FileInfoChanges)(nil), // 3: v2.FileInfoChanges
|
||||||
|
(*HTTPRequest)(nil), // 4: v2.HTTPRequest
|
||||||
|
(*HookResponse)(nil), // 5: v2.HookResponse
|
||||||
|
(*HTTPResponse)(nil), // 6: v2.HTTPResponse
|
||||||
|
nil, // 7: v2.FileInfo.MetaDataEntry
|
||||||
|
nil, // 8: v2.FileInfo.StorageEntry
|
||||||
|
nil, // 9: v2.FileInfoChanges.MetaDataEntry
|
||||||
|
nil, // 10: v2.FileInfoChanges.StorageEntry
|
||||||
|
nil, // 11: v2.HTTPRequest.HeaderEntry
|
||||||
|
nil, // 12: v2.HTTPResponse.HeadersEntry
|
||||||
|
}
|
||||||
|
var file_cmd_tusd_cli_hooks_proto_v2_hook_proto_depIdxs = []int32{
|
||||||
|
1, // 0: v2.HookRequest.event:type_name -> v2.Event
|
||||||
|
2, // 1: v2.Event.upload:type_name -> v2.FileInfo
|
||||||
|
4, // 2: v2.Event.httpRequest:type_name -> v2.HTTPRequest
|
||||||
|
7, // 3: v2.FileInfo.metaData:type_name -> v2.FileInfo.MetaDataEntry
|
||||||
|
8, // 4: v2.FileInfo.storage:type_name -> v2.FileInfo.StorageEntry
|
||||||
|
9, // 5: v2.FileInfoChanges.metaData:type_name -> v2.FileInfoChanges.MetaDataEntry
|
||||||
|
10, // 6: v2.FileInfoChanges.storage:type_name -> v2.FileInfoChanges.StorageEntry
|
||||||
|
11, // 7: v2.HTTPRequest.header:type_name -> v2.HTTPRequest.HeaderEntry
|
||||||
|
6, // 8: v2.HookResponse.httpResponse:type_name -> v2.HTTPResponse
|
||||||
|
3, // 9: v2.HookResponse.changeFileInfo:type_name -> v2.FileInfoChanges
|
||||||
|
12, // 10: v2.HTTPResponse.headers:type_name -> v2.HTTPResponse.HeadersEntry
|
||||||
|
0, // 11: v2.HookHandler.InvokeHook:input_type -> v2.HookRequest
|
||||||
|
5, // 12: v2.HookHandler.InvokeHook:output_type -> v2.HookResponse
|
||||||
|
12, // [12:13] is the sub-list for method output_type
|
||||||
|
11, // [11:12] is the sub-list for method input_type
|
||||||
|
11, // [11:11] is the sub-list for extension type_name
|
||||||
|
11, // [11:11] is the sub-list for extension extendee
|
||||||
|
0, // [0:11] is the sub-list for field type_name
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { file_cmd_tusd_cli_hooks_proto_v2_hook_proto_init() }
|
||||||
|
func file_cmd_tusd_cli_hooks_proto_v2_hook_proto_init() {
|
||||||
|
if File_cmd_tusd_cli_hooks_proto_v2_hook_proto != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !protoimpl.UnsafeEnabled {
|
||||||
|
file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*HookRequest); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*Event); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*FileInfo); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*FileInfoChanges); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*HTTPRequest); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*HookResponse); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*HTTPResponse); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
type x struct{}
|
||||||
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
|
RawDescriptor: file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDesc,
|
||||||
|
NumEnums: 0,
|
||||||
|
NumMessages: 13,
|
||||||
|
NumExtensions: 0,
|
||||||
|
NumServices: 1,
|
||||||
|
},
|
||||||
|
GoTypes: file_cmd_tusd_cli_hooks_proto_v2_hook_proto_goTypes,
|
||||||
|
DependencyIndexes: file_cmd_tusd_cli_hooks_proto_v2_hook_proto_depIdxs,
|
||||||
|
MessageInfos: file_cmd_tusd_cli_hooks_proto_v2_hook_proto_msgTypes,
|
||||||
|
}.Build()
|
||||||
|
File_cmd_tusd_cli_hooks_proto_v2_hook_proto = out.File
|
||||||
|
file_cmd_tusd_cli_hooks_proto_v2_hook_proto_rawDesc = nil
|
||||||
|
file_cmd_tusd_cli_hooks_proto_v2_hook_proto_goTypes = nil
|
||||||
|
file_cmd_tusd_cli_hooks_proto_v2_hook_proto_depIdxs = nil
|
||||||
|
}
|
|
@ -0,0 +1,115 @@
|
||||||
|
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// - protoc-gen-go-grpc v1.2.0
|
||||||
|
// - protoc v3.21.12
|
||||||
|
// source: cmd/tusd/cli/hooks/proto/v2/hook.proto
|
||||||
|
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
grpc "google.golang.org/grpc"
|
||||||
|
codes "google.golang.org/grpc/codes"
|
||||||
|
status "google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the grpc package it is being compiled against.
|
||||||
|
// Requires gRPC-Go v1.32.0 or later.
|
||||||
|
const _ = grpc.SupportPackageIsVersion7
|
||||||
|
|
||||||
|
// HookHandlerClient is the client API for HookHandler service.
|
||||||
|
//
|
||||||
|
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||||
|
type HookHandlerClient interface {
|
||||||
|
// InvokeHook is invoked for every hook that is executed. HookRequest contains the
|
||||||
|
// corresponding information about the hook type, the involved upload, and
|
||||||
|
// causing HTTP request.
|
||||||
|
// The return value HookResponse allows to stop or reject an upload, as well as modifying
|
||||||
|
// the HTTP response. See the documentation for HookResponse for more details.
|
||||||
|
InvokeHook(ctx context.Context, in *HookRequest, opts ...grpc.CallOption) (*HookResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type hookHandlerClient struct {
|
||||||
|
cc grpc.ClientConnInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHookHandlerClient(cc grpc.ClientConnInterface) HookHandlerClient {
|
||||||
|
return &hookHandlerClient{cc}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *hookHandlerClient) InvokeHook(ctx context.Context, in *HookRequest, opts ...grpc.CallOption) (*HookResponse, error) {
|
||||||
|
out := new(HookResponse)
|
||||||
|
err := c.cc.Invoke(ctx, "/v2.HookHandler/InvokeHook", in, out, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HookHandlerServer is the server API for HookHandler service.
|
||||||
|
// All implementations must embed UnimplementedHookHandlerServer
|
||||||
|
// for forward compatibility
|
||||||
|
type HookHandlerServer interface {
|
||||||
|
// InvokeHook is invoked for every hook that is executed. HookRequest contains the
|
||||||
|
// corresponding information about the hook type, the involved upload, and
|
||||||
|
// causing HTTP request.
|
||||||
|
// The return value HookResponse allows to stop or reject an upload, as well as modifying
|
||||||
|
// the HTTP response. See the documentation for HookResponse for more details.
|
||||||
|
InvokeHook(context.Context, *HookRequest) (*HookResponse, error)
|
||||||
|
mustEmbedUnimplementedHookHandlerServer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnimplementedHookHandlerServer must be embedded to have forward compatible implementations.
|
||||||
|
type UnimplementedHookHandlerServer struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (UnimplementedHookHandlerServer) InvokeHook(context.Context, *HookRequest) (*HookResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method InvokeHook not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedHookHandlerServer) mustEmbedUnimplementedHookHandlerServer() {}
|
||||||
|
|
||||||
|
// UnsafeHookHandlerServer may be embedded to opt out of forward compatibility for this service.
|
||||||
|
// Use of this interface is not recommended, as added methods to HookHandlerServer will
|
||||||
|
// result in compilation errors.
|
||||||
|
type UnsafeHookHandlerServer interface {
|
||||||
|
mustEmbedUnimplementedHookHandlerServer()
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterHookHandlerServer(s grpc.ServiceRegistrar, srv HookHandlerServer) {
|
||||||
|
s.RegisterService(&HookHandler_ServiceDesc, srv)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _HookHandler_InvokeHook_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(HookRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(HookHandlerServer).InvokeHook(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: "/v2.HookHandler/InvokeHook",
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(HookHandlerServer).InvokeHook(ctx, req.(*HookRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HookHandler_ServiceDesc is the grpc.ServiceDesc for HookHandler service.
|
||||||
|
// It's only intended for direct use with grpc.RegisterService,
|
||||||
|
// and not to be introspected or modified (even as a copy)
|
||||||
|
var HookHandler_ServiceDesc = grpc.ServiceDesc{
|
||||||
|
ServiceName: "v2.HookHandler",
|
||||||
|
HandlerType: (*HookHandlerServer)(nil),
|
||||||
|
Methods: []grpc.MethodDesc{
|
||||||
|
{
|
||||||
|
MethodName: "InvokeHook",
|
||||||
|
Handler: _HookHandler_InvokeHook_Handler,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Streams: []grpc.StreamDesc{},
|
||||||
|
Metadata: "cmd/tusd/cli/hooks/proto/v2/hook.proto",
|
||||||
|
}
|
|
@ -0,0 +1,180 @@
|
||||||
|
package s3store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
"github.com/minio/minio-go/v7"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MinioS3API struct {
|
||||||
|
client *minio.Core
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMinioS3API(client *minio.Core) S3API {
|
||||||
|
return MinioS3API{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MinioS3API) PutObjectWithContext(ctx context.Context, input *s3.PutObjectInput, opt ...request.Option) (*s3.PutObjectOutput, error) {
|
||||||
|
var objectSize int64
|
||||||
|
if input.ContentLength != nil {
|
||||||
|
objectSize = *input.ContentLength
|
||||||
|
} else {
|
||||||
|
size, err := input.Body.Seek(0, io.SeekEnd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_, err = input.Body.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
objectSize = size
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Should we use the more low-level Core.PutObject here?
|
||||||
|
_, err := s.client.Client.PutObject(ctx, *input.Bucket, *input.Key, input.Body, objectSize, minio.PutObjectOptions{
|
||||||
|
DisableMultipart: true,
|
||||||
|
SendContentMd5: false, // TODO: Make configurable
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &s3.PutObjectOutput{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MinioS3API) ListPartsWithContext(ctx context.Context, input *s3.ListPartsInput, opt ...request.Option) (*s3.ListPartsOutput, error) {
|
||||||
|
partNumberMarker := 0
|
||||||
|
if input.PartNumberMarker != nil {
|
||||||
|
partNumberMarker = int(*input.PartNumberMarker)
|
||||||
|
}
|
||||||
|
res, err := s.client.ListObjectParts(ctx, *input.Bucket, *input.Key, *input.UploadId, partNumberMarker, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
print(res.ObjectParts)
|
||||||
|
|
||||||
|
parts := make([]*s3.Part, len(res.ObjectParts))
|
||||||
|
for i, p := range res.ObjectParts {
|
||||||
|
partNumber := int64(p.PartNumber)
|
||||||
|
parts[i] = &s3.Part{
|
||||||
|
ETag: &p.ETag,
|
||||||
|
PartNumber: &partNumber,
|
||||||
|
Size: &p.Size,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nextPartNumberMarker := int64(res.NextPartNumberMarker)
|
||||||
|
return &s3.ListPartsOutput{
|
||||||
|
IsTruncated: &res.IsTruncated,
|
||||||
|
NextPartNumberMarker: &nextPartNumberMarker,
|
||||||
|
Parts: parts,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MinioS3API) UploadPartWithContext(ctx context.Context, input *s3.UploadPartInput, opt ...request.Option) (*s3.UploadPartOutput, error) {
|
||||||
|
var objectSize int64
|
||||||
|
if input.ContentLength != nil {
|
||||||
|
objectSize = *input.ContentLength
|
||||||
|
} else {
|
||||||
|
return nil, errors.New("missing ContentLength")
|
||||||
|
}
|
||||||
|
partNumber := int(*input.PartNumber)
|
||||||
|
|
||||||
|
part, err := s.client.PutObjectPart(ctx, *input.Bucket, *input.Key, *input.UploadId, partNumber, input.Body, objectSize, "", "", nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &s3.UploadPartOutput{
|
||||||
|
ETag: &part.ETag,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MinioS3API) GetObjectWithContext(ctx context.Context, input *s3.GetObjectInput, opt ...request.Option) (*s3.GetObjectOutput, error) {
|
||||||
|
body, info, _, err := s.client.GetObject(ctx, *input.Bucket, *input.Key, minio.GetObjectOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &s3.GetObjectOutput{
|
||||||
|
Body: body,
|
||||||
|
ContentLength: &info.Size,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MinioS3API) HeadObjectWithContext(ctx context.Context, input *s3.HeadObjectInput, opt ...request.Option) (*s3.HeadObjectOutput, error) {
|
||||||
|
info, err := s.client.StatObject(ctx, *input.Bucket, *input.Key, minio.StatObjectOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
print(info.Size)
|
||||||
|
|
||||||
|
return &s3.HeadObjectOutput{
|
||||||
|
ContentLength: &info.Size,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MinioS3API) CreateMultipartUploadWithContext(ctx context.Context, input *s3.CreateMultipartUploadInput, opt ...request.Option) (*s3.CreateMultipartUploadOutput, error) {
|
||||||
|
metadata := make(map[string]string, len(input.Metadata))
|
||||||
|
for key, value := range input.Metadata {
|
||||||
|
metadata[key] = *value
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadId, err := s.client.NewMultipartUpload(ctx, *input.Bucket, *input.Key, minio.PutObjectOptions{
|
||||||
|
UserMetadata: metadata,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &s3.CreateMultipartUploadOutput{
|
||||||
|
UploadId: &uploadId,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MinioS3API) AbortMultipartUploadWithContext(ctx context.Context, input *s3.AbortMultipartUploadInput, opt ...request.Option) (*s3.AbortMultipartUploadOutput, error) {
|
||||||
|
return nil, fmt.Errorf("AbortMultipartUploadWithContext not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MinioS3API) DeleteObjectWithContext(ctx context.Context, input *s3.DeleteObjectInput, opt ...request.Option) (*s3.DeleteObjectOutput, error) {
|
||||||
|
err := s.client.RemoveObject(ctx, *input.Bucket, *input.Key, minio.RemoveObjectOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &s3.DeleteObjectOutput{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MinioS3API) DeleteObjectsWithContext(ctx context.Context, input *s3.DeleteObjectsInput, opt ...request.Option) (*s3.DeleteObjectsOutput, error) {
|
||||||
|
return nil, fmt.Errorf("DeleteObjectsWithContext not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MinioS3API) CompleteMultipartUploadWithContext(ctx context.Context, input *s3.CompleteMultipartUploadInput, opt ...request.Option) (*s3.CompleteMultipartUploadOutput, error) {
|
||||||
|
parts := make([]minio.CompletePart, len(input.MultipartUpload.Parts))
|
||||||
|
for i, p := range input.MultipartUpload.Parts {
|
||||||
|
parts[i] = minio.CompletePart{
|
||||||
|
PartNumber: int(*p.PartNumber),
|
||||||
|
ETag: *p.ETag,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := s.client.CompleteMultipartUpload(ctx, *input.Bucket, *input.Key, *input.UploadId, parts, minio.PutObjectOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &s3.CompleteMultipartUploadOutput{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MinioS3API) UploadPartCopyWithContext(ctx context.Context, input *s3.UploadPartCopyInput, opt ...request.Option) (*s3.UploadPartCopyOutput, error) {
|
||||||
|
return nil, fmt.Errorf("UploadPartCopyWithContext not implemented")
|
||||||
|
}
|
|
@ -1,22 +1,23 @@
|
||||||
// Package s3store provides a storage backend using AWS S3 or compatible servers.
|
// Package s3store provides a storage backend using AWS S3 or compatible servers.
|
||||||
//
|
//
|
||||||
// Configuration
|
// # Configuration
|
||||||
//
|
//
|
||||||
// In order to allow this backend to function properly, the user accessing the
|
// In order to allow this backend to function properly, the user accessing the
|
||||||
// bucket must have at least following AWS IAM policy permissions for the
|
// bucket must have at least following AWS IAM policy permissions for the
|
||||||
// bucket and all of its subresources:
|
// bucket and all of its subresources:
|
||||||
// s3:AbortMultipartUpload
|
//
|
||||||
// s3:DeleteObject
|
// s3:AbortMultipartUpload
|
||||||
// s3:GetObject
|
// s3:DeleteObject
|
||||||
// s3:ListMultipartUploadParts
|
// s3:GetObject
|
||||||
// s3:PutObject
|
// s3:ListMultipartUploadParts
|
||||||
|
// s3:PutObject
|
||||||
//
|
//
|
||||||
// While this package uses the official AWS SDK for Go, S3Store is able
|
// While this package uses the official AWS SDK for Go, S3Store is able
|
||||||
// to work with any S3-compatible service such as Riak CS. In order to change
|
// to work with any S3-compatible service such as Riak CS. In order to change
|
||||||
// the HTTP endpoint used for sending requests to, consult the AWS Go SDK
|
// the HTTP endpoint used for sending requests to, consult the AWS Go SDK
|
||||||
// (http://docs.aws.amazon.com/sdk-for-go/api/aws/Config.html#WithEndpoint-instance_method).
|
// (http://docs.aws.amazon.com/sdk-for-go/api/aws/Config.html#WithEndpoint-instance_method).
|
||||||
//
|
//
|
||||||
// Implementation
|
// # Implementation
|
||||||
//
|
//
|
||||||
// Once a new tus upload is initiated, multiple objects in S3 are created:
|
// Once a new tus upload is initiated, multiple objects in S3 are created:
|
||||||
//
|
//
|
||||||
|
@ -49,7 +50,7 @@
|
||||||
// info object is also deleted. If the upload has been finished already, the
|
// info object is also deleted. If the upload has been finished already, the
|
||||||
// finished object containing the entire upload is also removed.
|
// finished object containing the entire upload is also removed.
|
||||||
//
|
//
|
||||||
// Considerations
|
// # Considerations
|
||||||
//
|
//
|
||||||
// In order to support tus' principle of resumable upload, S3's Multipart-Uploads
|
// In order to support tus' principle of resumable upload, S3's Multipart-Uploads
|
||||||
// are internally used.
|
// are internally used.
|
||||||
|
@ -83,8 +84,11 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/tus/tusd/internal/uid"
|
"github.com/minio/minio-go/v7"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/tus/tusd/v2/internal/semaphore"
|
||||||
|
"github.com/tus/tusd/v2/internal/uid"
|
||||||
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
@ -156,13 +160,43 @@ type S3Store struct {
|
||||||
// CPU, so it might be desirable to disable them.
|
// CPU, so it might be desirable to disable them.
|
||||||
// Note that this property is experimental and might be removed in the future!
|
// Note that this property is experimental and might be removed in the future!
|
||||||
DisableContentHashes bool
|
DisableContentHashes bool
|
||||||
|
|
||||||
|
// uploadSemaphore limits the number of concurrent multipart part uploads to S3.
|
||||||
|
uploadSemaphore semaphore.Semaphore
|
||||||
|
|
||||||
|
// requestDurationMetric holds the prometheus instance for storing the request durations.
|
||||||
|
requestDurationMetric *prometheus.SummaryVec
|
||||||
|
|
||||||
|
// diskWriteDurationMetric holds the prometheus instance for storing the time it takes to write chunks to disk.
|
||||||
|
diskWriteDurationMetric prometheus.Summary
|
||||||
|
|
||||||
|
// uploadSemaphoreDemandMetric holds the prometheus instance for storing the demand on the upload semaphore
|
||||||
|
uploadSemaphoreDemandMetric prometheus.Gauge
|
||||||
|
|
||||||
|
// uploadSemaphoreLimitMetric holds the prometheus instance for storing the limit on the upload semaphore
|
||||||
|
uploadSemaphoreLimitMetric prometheus.Gauge
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The labels to use for observing and storing request duration. One label per operation.
|
||||||
|
const (
|
||||||
|
metricGetInfoObject = "get_info_object"
|
||||||
|
metricPutInfoObject = "put_info_object"
|
||||||
|
metricCreateMultipartUpload = "create_multipart_upload"
|
||||||
|
metricCompleteMultipartUpload = "complete_multipart_upload"
|
||||||
|
metricUploadPart = "upload_part"
|
||||||
|
metricListParts = "list_parts"
|
||||||
|
metricHeadPartObject = "head_part_object"
|
||||||
|
metricGetPartObject = "get_part_object"
|
||||||
|
metricPutPartObject = "put_part_object"
|
||||||
|
metricDeletePartObject = "delete_part_object"
|
||||||
|
)
|
||||||
|
|
||||||
type S3API interface {
|
type S3API interface {
|
||||||
PutObjectWithContext(ctx context.Context, input *s3.PutObjectInput, opt ...request.Option) (*s3.PutObjectOutput, error)
|
PutObjectWithContext(ctx context.Context, input *s3.PutObjectInput, opt ...request.Option) (*s3.PutObjectOutput, error)
|
||||||
ListPartsWithContext(ctx context.Context, input *s3.ListPartsInput, opt ...request.Option) (*s3.ListPartsOutput, error)
|
ListPartsWithContext(ctx context.Context, input *s3.ListPartsInput, opt ...request.Option) (*s3.ListPartsOutput, error)
|
||||||
UploadPartWithContext(ctx context.Context, input *s3.UploadPartInput, opt ...request.Option) (*s3.UploadPartOutput, error)
|
UploadPartWithContext(ctx context.Context, input *s3.UploadPartInput, opt ...request.Option) (*s3.UploadPartOutput, error)
|
||||||
GetObjectWithContext(ctx context.Context, input *s3.GetObjectInput, opt ...request.Option) (*s3.GetObjectOutput, error)
|
GetObjectWithContext(ctx context.Context, input *s3.GetObjectInput, opt ...request.Option) (*s3.GetObjectOutput, error)
|
||||||
|
HeadObjectWithContext(ctx context.Context, input *s3.HeadObjectInput, opt ...request.Option) (*s3.HeadObjectOutput, error)
|
||||||
CreateMultipartUploadWithContext(ctx context.Context, input *s3.CreateMultipartUploadInput, opt ...request.Option) (*s3.CreateMultipartUploadOutput, error)
|
CreateMultipartUploadWithContext(ctx context.Context, input *s3.CreateMultipartUploadInput, opt ...request.Option) (*s3.CreateMultipartUploadOutput, error)
|
||||||
AbortMultipartUploadWithContext(ctx context.Context, input *s3.AbortMultipartUploadInput, opt ...request.Option) (*s3.AbortMultipartUploadOutput, error)
|
AbortMultipartUploadWithContext(ctx context.Context, input *s3.AbortMultipartUploadInput, opt ...request.Option) (*s3.AbortMultipartUploadOutput, error)
|
||||||
DeleteObjectWithContext(ctx context.Context, input *s3.DeleteObjectInput, opt ...request.Option) (*s3.DeleteObjectOutput, error)
|
DeleteObjectWithContext(ctx context.Context, input *s3.DeleteObjectInput, opt ...request.Option) (*s3.DeleteObjectOutput, error)
|
||||||
|
@ -177,17 +211,52 @@ type s3APIForPresigning interface {
|
||||||
|
|
||||||
// New constructs a new storage using the supplied bucket and service object.
|
// New constructs a new storage using the supplied bucket and service object.
|
||||||
func New(bucket string, service S3API) S3Store {
|
func New(bucket string, service S3API) S3Store {
|
||||||
return S3Store{
|
requestDurationMetric := prometheus.NewSummaryVec(prometheus.SummaryOpts{
|
||||||
Bucket: bucket,
|
Name: "tusd_s3_request_duration_ms",
|
||||||
Service: service,
|
Help: "Duration of requests sent to S3 in milliseconds per operation",
|
||||||
MaxPartSize: 5 * 1024 * 1024 * 1024,
|
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||||
MinPartSize: 5 * 1024 * 1024,
|
}, []string{"operation"})
|
||||||
PreferredPartSize: 50 * 1024 * 1024,
|
|
||||||
MaxMultipartParts: 10000,
|
diskWriteDurationMetric := prometheus.NewSummary(prometheus.SummaryOpts{
|
||||||
MaxObjectSize: 5 * 1024 * 1024 * 1024 * 1024,
|
Name: "tusd_s3_disk_write_duration_ms",
|
||||||
MaxBufferedParts: 20,
|
Help: "Duration of chunk writes to disk in milliseconds",
|
||||||
TemporaryDirectory: "",
|
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||||
|
})
|
||||||
|
|
||||||
|
uploadSemaphoreDemandMetric := prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Name: "tusd_s3_upload_semaphore_demand",
|
||||||
|
Help: "Number of goroutines wanting to acquire the upload lock or having it acquired",
|
||||||
|
})
|
||||||
|
|
||||||
|
uploadSemaphoreLimitMetric := prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Name: "tusd_s3_upload_semaphore_limit",
|
||||||
|
Help: "Limit of concurrent acquisitions of upload semaphore",
|
||||||
|
})
|
||||||
|
|
||||||
|
store := S3Store{
|
||||||
|
Bucket: bucket,
|
||||||
|
Service: service,
|
||||||
|
MaxPartSize: 5 * 1024 * 1024 * 1024,
|
||||||
|
MinPartSize: 5 * 1024 * 1024,
|
||||||
|
PreferredPartSize: 50 * 1024 * 1024,
|
||||||
|
MaxMultipartParts: 10000,
|
||||||
|
MaxObjectSize: 5 * 1024 * 1024 * 1024 * 1024,
|
||||||
|
MaxBufferedParts: 20,
|
||||||
|
TemporaryDirectory: "",
|
||||||
|
requestDurationMetric: requestDurationMetric,
|
||||||
|
diskWriteDurationMetric: diskWriteDurationMetric,
|
||||||
|
uploadSemaphoreDemandMetric: uploadSemaphoreDemandMetric,
|
||||||
|
uploadSemaphoreLimitMetric: uploadSemaphoreLimitMetric,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
store.SetConcurrentPartUploads(10)
|
||||||
|
return store
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetConcurrentPartUploads changes the limit on how many concurrent part uploads to S3 are allowed.
|
||||||
|
func (store *S3Store) SetConcurrentPartUploads(limit int) {
|
||||||
|
store.uploadSemaphore = semaphore.New(limit)
|
||||||
|
store.uploadSemaphoreLimitMetric.Set(float64(limit))
|
||||||
}
|
}
|
||||||
|
|
||||||
// UseIn sets this store as the core data store in the passed composer and adds
|
// UseIn sets this store as the core data store in the passed composer and adds
|
||||||
|
@ -199,6 +268,20 @@ func (store S3Store) UseIn(composer *handler.StoreComposer) {
|
||||||
composer.UseLengthDeferrer(store)
|
composer.UseLengthDeferrer(store)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (store S3Store) RegisterMetrics(registry prometheus.Registerer) {
|
||||||
|
registry.MustRegister(store.requestDurationMetric)
|
||||||
|
registry.MustRegister(store.diskWriteDurationMetric)
|
||||||
|
registry.MustRegister(store.uploadSemaphoreDemandMetric)
|
||||||
|
registry.MustRegister(store.uploadSemaphoreLimitMetric)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store S3Store) observeRequestDuration(start time.Time, label string) {
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
ms := float64(elapsed.Nanoseconds() / int64(time.Millisecond))
|
||||||
|
|
||||||
|
store.requestDurationMetric.WithLabelValues(label).Observe(ms)
|
||||||
|
}
|
||||||
|
|
||||||
type s3Upload struct {
|
type s3Upload struct {
|
||||||
id string
|
id string
|
||||||
store *S3Store
|
store *S3Store
|
||||||
|
@ -207,6 +290,18 @@ type s3Upload struct {
|
||||||
// been fetched yet from S3. Never read or write to it directly but instead use
|
// been fetched yet from S3. Never read or write to it directly but instead use
|
||||||
// the GetInfo and writeInfo functions.
|
// the GetInfo and writeInfo functions.
|
||||||
info *handler.FileInfo
|
info *handler.FileInfo
|
||||||
|
|
||||||
|
// parts collects all parts for this upload. It will be nil if info is nil as well.
|
||||||
|
parts []*s3Part
|
||||||
|
// incompletePartSize is the size of an incomplete part object, if one exists. It will be 0 if info is nil as well.
|
||||||
|
incompletePartSize int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// s3Part represents a single part of a S3 multipart upload.
|
||||||
|
type s3Part struct {
|
||||||
|
number int64
|
||||||
|
size int64
|
||||||
|
etag string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
||||||
|
@ -233,11 +328,13 @@ func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (hand
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the actual multipart upload
|
// Create the actual multipart upload
|
||||||
|
t := time.Now()
|
||||||
res, err := store.Service.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
|
res, err := store.Service.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId),
|
Key: store.keyWithPrefix(uploadId),
|
||||||
Metadata: metadata,
|
Metadata: metadata,
|
||||||
})
|
})
|
||||||
|
store.observeRequestDuration(t, metricCreateMultipartUpload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("s3store: unable to create multipart upload:\n%s", err)
|
return nil, fmt.Errorf("s3store: unable to create multipart upload:\n%s", err)
|
||||||
}
|
}
|
||||||
|
@ -251,7 +348,7 @@ func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (hand
|
||||||
"Key": *store.keyWithPrefix(uploadId),
|
"Key": *store.keyWithPrefix(uploadId),
|
||||||
}
|
}
|
||||||
|
|
||||||
upload := &s3Upload{id, &store, nil}
|
upload := &s3Upload{id, &store, nil, []*s3Part{}, 0}
|
||||||
err = upload.writeInfo(ctx, info)
|
err = upload.writeInfo(ctx, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("s3store: unable to create info file:\n%s", err)
|
return nil, fmt.Errorf("s3store: unable to create info file:\n%s", err)
|
||||||
|
@ -261,7 +358,7 @@ func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (hand
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
|
func (store S3Store) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
|
||||||
return &s3Upload{id, &store, nil}, nil
|
return &s3Upload{id, &store, nil, []*s3Part{}, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload {
|
func (store S3Store) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload {
|
||||||
|
@ -290,24 +387,72 @@ func (upload *s3Upload) writeInfo(ctx context.Context, info handler.FileInfo) er
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create object on S3 containing information about the file
|
// Create object on S3 containing information about the file
|
||||||
|
t := time.Now()
|
||||||
_, err = store.Service.PutObjectWithContext(ctx, &s3.PutObjectInput{
|
_, err = store.Service.PutObjectWithContext(ctx, &s3.PutObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.metadataKeyWithPrefix(uploadId + ".info"),
|
Key: store.metadataKeyWithPrefix(uploadId + ".info"),
|
||||||
Body: bytes.NewReader(infoJson),
|
Body: bytes.NewReader(infoJson),
|
||||||
ContentLength: aws.Int64(int64(len(infoJson))),
|
ContentLength: aws.Int64(int64(len(infoJson))),
|
||||||
})
|
})
|
||||||
|
store.observeRequestDuration(t, metricPutInfoObject)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload s3Upload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
|
func (upload *s3Upload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
|
||||||
|
id := upload.id
|
||||||
|
store := upload.store
|
||||||
|
|
||||||
|
uploadId, _ := splitIds(id)
|
||||||
|
|
||||||
|
// Get the total size of the current upload, number of parts to generate next number and whether
|
||||||
|
// an incomplete part exists
|
||||||
|
_, _, incompletePartSize, err := upload.getInternalInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if incompletePartSize > 0 {
|
||||||
|
incompletePartFile, err := store.downloadIncompletePartForUpload(ctx, uploadId)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if incompletePartFile == nil {
|
||||||
|
return 0, fmt.Errorf("s3store: Expected an incomplete part file but did not get any")
|
||||||
|
}
|
||||||
|
defer cleanUpTempFile(incompletePartFile)
|
||||||
|
|
||||||
|
if err := store.deleteIncompletePartForUpload(ctx, uploadId); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepend an incomplete part, if necessary and adapt the offset
|
||||||
|
src = io.MultiReader(incompletePartFile, src)
|
||||||
|
offset = offset - incompletePartSize
|
||||||
|
}
|
||||||
|
|
||||||
|
bytesUploaded, err := upload.uploadParts(ctx, offset, src)
|
||||||
|
|
||||||
|
// The size of the incomplete part should not be counted, because the
|
||||||
|
// process of the incomplete part should be fully transparent to the user.
|
||||||
|
bytesUploaded = bytesUploaded - incompletePartSize
|
||||||
|
if bytesUploaded < 0 {
|
||||||
|
bytesUploaded = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
upload.info.Offset += bytesUploaded
|
||||||
|
|
||||||
|
return bytesUploaded, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *s3Upload) uploadParts(ctx context.Context, offset int64, src io.Reader) (int64, error) {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
|
|
||||||
uploadId, multipartId := splitIds(id)
|
uploadId, multipartId := splitIds(id)
|
||||||
|
|
||||||
// Get the total size of the current upload
|
// Get the total size of the current upload and number of parts to generate next number
|
||||||
info, err := upload.GetInfo(ctx)
|
info, parts, _, err := upload.getInternalInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -319,83 +464,87 @@ func (upload s3Upload) WriteChunk(ctx context.Context, offset int64, src io.Read
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get number of parts to generate next number
|
|
||||||
parts, err := store.listAllParts(ctx, id)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
numParts := len(parts)
|
numParts := len(parts)
|
||||||
nextPartNum := int64(numParts + 1)
|
nextPartNum := int64(numParts + 1)
|
||||||
|
|
||||||
incompletePartFile, incompletePartSize, err := store.downloadIncompletePartForUpload(ctx, uploadId)
|
partProducer, fileChan := newS3PartProducer(src, store.MaxBufferedParts, store.TemporaryDirectory, store.diskWriteDurationMetric)
|
||||||
if err != nil {
|
defer partProducer.stop()
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if incompletePartFile != nil {
|
|
||||||
defer cleanUpTempFile(incompletePartFile)
|
|
||||||
|
|
||||||
if err := store.deleteIncompletePartForUpload(ctx, uploadId); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
src = io.MultiReader(incompletePartFile, src)
|
|
||||||
}
|
|
||||||
|
|
||||||
fileChan := make(chan *os.File, store.MaxBufferedParts)
|
|
||||||
doneChan := make(chan struct{})
|
|
||||||
defer close(doneChan)
|
|
||||||
|
|
||||||
// If we panic or return while there are still files in the channel, then
|
|
||||||
// we may leak file descriptors. Let's ensure that those are cleaned up.
|
|
||||||
defer func() {
|
|
||||||
for file := range fileChan {
|
|
||||||
cleanUpTempFile(file)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
partProducer := s3PartProducer{
|
|
||||||
store: store,
|
|
||||||
done: doneChan,
|
|
||||||
files: fileChan,
|
|
||||||
r: src,
|
|
||||||
}
|
|
||||||
go partProducer.produce(optimalPartSize)
|
go partProducer.produce(optimalPartSize)
|
||||||
|
|
||||||
for file := range fileChan {
|
var wg sync.WaitGroup
|
||||||
stat, err := file.Stat()
|
var uploadErr error
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
n := stat.Size()
|
|
||||||
|
|
||||||
isFinalChunk := !info.SizeIsDeferred && (size == (offset-incompletePartSize)+n)
|
for {
|
||||||
if n >= store.MinPartSize || isFinalChunk {
|
// We acquire the semaphore before starting the goroutine to avoid
|
||||||
uploadPartInput := &s3.UploadPartInput{
|
// starting many goroutines, most of which are just waiting for the lock.
|
||||||
Bucket: aws.String(store.Bucket),
|
// We also acquire the semaphore before reading from the channel to reduce
|
||||||
Key: store.keyWithPrefix(uploadId),
|
// the number of part files are laying around on disk without being used.
|
||||||
UploadId: aws.String(multipartId),
|
upload.store.acquireUploadSemaphore()
|
||||||
PartNumber: aws.Int64(nextPartNum),
|
fileChunk, more := <-fileChan
|
||||||
}
|
if !more {
|
||||||
if err := upload.putPartForUpload(ctx, uploadPartInput, file, n); err != nil {
|
upload.store.releaseUploadSemaphore()
|
||||||
return bytesUploaded, err
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
partfile := fileChunk.reader
|
||||||
|
partsize := fileChunk.size
|
||||||
|
closePart := fileChunk.closeReader
|
||||||
|
|
||||||
|
isFinalChunk := !info.SizeIsDeferred && (size == offset+bytesUploaded+partsize)
|
||||||
|
if partsize >= store.MinPartSize || isFinalChunk {
|
||||||
|
part := &s3Part{
|
||||||
|
etag: "",
|
||||||
|
size: partsize,
|
||||||
|
number: nextPartNum,
|
||||||
}
|
}
|
||||||
|
upload.parts = append(upload.parts, part)
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func(file io.ReadSeeker, part *s3Part, closePart func()) {
|
||||||
|
defer upload.store.releaseUploadSemaphore()
|
||||||
|
defer wg.Done()
|
||||||
|
defer closePart()
|
||||||
|
|
||||||
|
t := time.Now()
|
||||||
|
uploadPartInput := &s3.UploadPartInput{
|
||||||
|
Bucket: aws.String(store.Bucket),
|
||||||
|
Key: store.keyWithPrefix(uploadId),
|
||||||
|
UploadId: aws.String(multipartId),
|
||||||
|
PartNumber: aws.Int64(part.number),
|
||||||
|
}
|
||||||
|
etag, err := upload.putPartForUpload(ctx, uploadPartInput, file, part.size)
|
||||||
|
store.observeRequestDuration(t, metricUploadPart)
|
||||||
|
if err != nil {
|
||||||
|
uploadErr = err
|
||||||
|
} else {
|
||||||
|
part.etag = etag
|
||||||
|
}
|
||||||
|
}(partfile, part, closePart)
|
||||||
} else {
|
} else {
|
||||||
if err := store.putIncompletePartForUpload(ctx, uploadId, file); err != nil {
|
wg.Add(1)
|
||||||
return bytesUploaded, err
|
go func(file io.ReadSeeker, closePart func()) {
|
||||||
}
|
defer upload.store.releaseUploadSemaphore()
|
||||||
|
defer wg.Done()
|
||||||
|
defer closePart()
|
||||||
|
|
||||||
bytesUploaded += n
|
if err := store.putIncompletePartForUpload(ctx, uploadId, file); err != nil {
|
||||||
|
uploadErr = err
|
||||||
return (bytesUploaded - incompletePartSize), nil
|
}
|
||||||
|
upload.incompletePartSize = partsize
|
||||||
|
}(partfile, closePart)
|
||||||
}
|
}
|
||||||
|
|
||||||
offset += n
|
bytesUploaded += partsize
|
||||||
bytesUploaded += n
|
|
||||||
nextPartNum += 1
|
nextPartNum += 1
|
||||||
}
|
}
|
||||||
|
|
||||||
return bytesUploaded - incompletePartSize, partProducer.err
|
wg.Wait()
|
||||||
|
|
||||||
|
if uploadErr != nil {
|
||||||
|
return 0, uploadErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return bytesUploaded, partProducer.err
|
||||||
}
|
}
|
||||||
|
|
||||||
func cleanUpTempFile(file *os.File) {
|
func cleanUpTempFile(file *os.File) {
|
||||||
|
@ -403,14 +552,16 @@ func cleanUpTempFile(file *os.File) {
|
||||||
os.Remove(file.Name())
|
os.Remove(file.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload *s3Upload) putPartForUpload(ctx context.Context, uploadPartInput *s3.UploadPartInput, file *os.File, size int64) error {
|
func (upload *s3Upload) putPartForUpload(ctx context.Context, uploadPartInput *s3.UploadPartInput, file io.ReadSeeker, size int64) (string, error) {
|
||||||
defer cleanUpTempFile(file)
|
|
||||||
|
|
||||||
if !upload.store.DisableContentHashes {
|
if !upload.store.DisableContentHashes {
|
||||||
// By default, use the traditional approach to upload data
|
// By default, use the traditional approach to upload data
|
||||||
uploadPartInput.Body = file
|
uploadPartInput.Body = file
|
||||||
_, err := upload.store.Service.UploadPartWithContext(ctx, uploadPartInput)
|
uploadPartInput.ContentLength = &size
|
||||||
return err
|
res, err := upload.store.Service.UploadPartWithContext(ctx, uploadPartInput)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return *res.ETag, nil
|
||||||
} else {
|
} else {
|
||||||
// Experimental feature to prevent the AWS SDK from calculating the SHA256 hash
|
// Experimental feature to prevent the AWS SDK from calculating the SHA256 hash
|
||||||
// for the parts we upload to S3.
|
// for the parts we upload to S3.
|
||||||
|
@ -418,19 +569,19 @@ func (upload *s3Upload) putPartForUpload(ctx context.Context, uploadPartInput *s
|
||||||
// on our own. This way, the body is not included in the SHA256 calculation.
|
// on our own. This way, the body is not included in the SHA256 calculation.
|
||||||
s3api, ok := upload.store.Service.(s3APIForPresigning)
|
s3api, ok := upload.store.Service.(s3APIForPresigning)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("s3store: failed to cast S3 service for presigning")
|
return "", fmt.Errorf("s3store: failed to cast S3 service for presigning")
|
||||||
}
|
}
|
||||||
|
|
||||||
s3Req, _ := s3api.UploadPartRequest(uploadPartInput)
|
s3Req, _ := s3api.UploadPartRequest(uploadPartInput)
|
||||||
|
|
||||||
url, err := s3Req.Presign(15 * time.Minute)
|
url, err := s3Req.Presign(15 * time.Minute)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequest("PUT", url, file)
|
req, err := http.NewRequest("PUT", url, file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the Content-Length manually to prevent the usage of Transfer-Encoding: chunked,
|
// Set the Content-Length manually to prevent the usage of Transfer-Encoding: chunked,
|
||||||
|
@ -439,60 +590,100 @@ func (upload *s3Upload) putPartForUpload(ctx context.Context, uploadPartInput *s
|
||||||
|
|
||||||
res, err := http.DefaultClient.Do(req)
|
res, err := http.DefaultClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return "", err
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
|
|
||||||
if res.StatusCode != 200 {
|
if res.StatusCode != 200 {
|
||||||
buf := new(strings.Builder)
|
buf := new(strings.Builder)
|
||||||
io.Copy(buf, res.Body)
|
io.Copy(buf, res.Body)
|
||||||
return fmt.Errorf("s3store: unexpected response code %d for presigned upload: %s", res.StatusCode, buf.String())
|
return "", fmt.Errorf("s3store: unexpected response code %d for presigned upload: %s", res.StatusCode, buf.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return res.Header.Get("ETag"), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload *s3Upload) GetInfo(ctx context.Context) (info handler.FileInfo, err error) {
|
func (upload *s3Upload) GetInfo(ctx context.Context) (info handler.FileInfo, err error) {
|
||||||
|
info, _, _, err = upload.getInternalInfo(ctx)
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *s3Upload) getInternalInfo(ctx context.Context) (info handler.FileInfo, parts []*s3Part, incompletePartSize int64, err error) {
|
||||||
if upload.info != nil {
|
if upload.info != nil {
|
||||||
return *upload.info, nil
|
return *upload.info, upload.parts, upload.incompletePartSize, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err = upload.fetchInfo(ctx)
|
info, parts, incompletePartSize, err = upload.fetchInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return info, err
|
return info, parts, incompletePartSize, err
|
||||||
}
|
}
|
||||||
|
|
||||||
upload.info = &info
|
upload.info = &info
|
||||||
return info, nil
|
upload.parts = parts
|
||||||
|
upload.incompletePartSize = incompletePartSize
|
||||||
|
return info, parts, incompletePartSize, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload s3Upload) fetchInfo(ctx context.Context) (info handler.FileInfo, err error) {
|
func (upload s3Upload) fetchInfo(ctx context.Context) (info handler.FileInfo, parts []*s3Part, incompletePartSize int64, err error) {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
uploadId, _ := splitIds(id)
|
uploadId, _ := splitIds(id)
|
||||||
|
|
||||||
// Get file info stored in separate object
|
var wg sync.WaitGroup
|
||||||
res, err := store.Service.GetObjectWithContext(ctx, &s3.GetObjectInput{
|
wg.Add(3)
|
||||||
Bucket: aws.String(store.Bucket),
|
|
||||||
Key: store.metadataKeyWithPrefix(uploadId + ".info"),
|
// We store all errors in here and handle them all together once the wait
|
||||||
})
|
// group is done.
|
||||||
if err != nil {
|
var infoErr error
|
||||||
if isAwsError(err, "NoSuchKey") {
|
var partsErr error
|
||||||
return info, handler.ErrNotFound
|
var incompletePartSizeErr error
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
t := time.Now()
|
||||||
|
|
||||||
|
// Get file info stored in separate object
|
||||||
|
var res *s3.GetObjectOutput
|
||||||
|
res, infoErr = store.Service.GetObjectWithContext(ctx, &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String(store.Bucket),
|
||||||
|
Key: store.metadataKeyWithPrefix(uploadId + ".info"),
|
||||||
|
})
|
||||||
|
store.observeRequestDuration(t, metricGetInfoObject)
|
||||||
|
if infoErr == nil {
|
||||||
|
infoErr = json.NewDecoder(res.Body).Decode(&info)
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
return info, err
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
// Get uploaded parts and their offset
|
||||||
|
parts, partsErr = store.listAllParts(ctx, id)
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
// Get size of optional incomplete part file.
|
||||||
|
incompletePartSize, incompletePartSizeErr = store.headIncompletePartForUpload(ctx, uploadId)
|
||||||
|
}()
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// Finally, after all requests are complete, let's handle the errors
|
||||||
|
if infoErr != nil {
|
||||||
|
err = infoErr
|
||||||
|
// If the info file is not found, we consider the upload to be non-existant
|
||||||
|
if isAwsError(err, "NoSuchKey") {
|
||||||
|
err = handler.ErrNotFound
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.NewDecoder(res.Body).Decode(&info); err != nil {
|
if partsErr != nil {
|
||||||
return info, err
|
err = partsErr
|
||||||
}
|
// Check if the error is caused by the multipart upload not being found. This happens
|
||||||
|
|
||||||
// Get uploaded parts and their offset
|
|
||||||
parts, err := store.listAllParts(ctx, id)
|
|
||||||
if err != nil {
|
|
||||||
// Check if the error is caused by the upload not being found. This happens
|
|
||||||
// when the multipart upload has already been completed or aborted. Since
|
// when the multipart upload has already been completed or aborted. Since
|
||||||
// we already found the info object, we know that the upload has been
|
// we already found the info object, we know that the upload has been
|
||||||
// completed and therefore can ensure the the offset is the size.
|
// completed and therefore can ensure the the offset is the size.
|
||||||
|
@ -500,33 +691,28 @@ func (upload s3Upload) fetchInfo(ctx context.Context) (info handler.FileInfo, er
|
||||||
// Spaces, can also return NoSuchKey.
|
// Spaces, can also return NoSuchKey.
|
||||||
if isAwsError(err, "NoSuchUpload") || isAwsError(err, "NoSuchKey") {
|
if isAwsError(err, "NoSuchUpload") || isAwsError(err, "NoSuchKey") {
|
||||||
info.Offset = info.Size
|
info.Offset = info.Size
|
||||||
return info, nil
|
err = nil
|
||||||
} else {
|
|
||||||
return info, err
|
|
||||||
}
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
offset := int64(0)
|
if incompletePartSizeErr != nil {
|
||||||
|
err = incompletePartSizeErr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// The offset is the sum of all part sizes and the size of the incomplete part file.
|
||||||
|
offset := incompletePartSize
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
offset += *part.Size
|
offset += part.size
|
||||||
}
|
|
||||||
|
|
||||||
incompletePartObject, err := store.getIncompletePartForUpload(ctx, uploadId)
|
|
||||||
if err != nil {
|
|
||||||
return info, err
|
|
||||||
}
|
|
||||||
if incompletePartObject != nil {
|
|
||||||
defer incompletePartObject.Body.Close()
|
|
||||||
offset += *incompletePartObject.ContentLength
|
|
||||||
}
|
}
|
||||||
|
|
||||||
info.Offset = offset
|
info.Offset = offset
|
||||||
|
|
||||||
return
|
return info, parts, incompletePartSize, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload s3Upload) GetReader(ctx context.Context) (io.Reader, error) {
|
func (upload s3Upload) GetReader(ctx context.Context) (io.ReadCloser, error) {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
uploadId, multipartId := splitIds(id)
|
uploadId, multipartId := splitIds(id)
|
||||||
|
@ -558,7 +744,7 @@ func (upload s3Upload) GetReader(ctx context.Context) (io.Reader, error) {
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// The multipart upload still exists, which means we cannot download it yet
|
// The multipart upload still exists, which means we cannot download it yet
|
||||||
return nil, handler.NewHTTPError(errors.New("cannot stream non-finished upload"), http.StatusBadRequest)
|
return nil, handler.NewError("ERR_INCOMPLETE_UPLOAD", "cannot stream non-finished upload", http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
if isAwsError(err, "NoSuchUpload") {
|
if isAwsError(err, "NoSuchUpload") {
|
||||||
|
@ -640,7 +826,7 @@ func (upload s3Upload) FinishUpload(ctx context.Context) error {
|
||||||
uploadId, multipartId := splitIds(id)
|
uploadId, multipartId := splitIds(id)
|
||||||
|
|
||||||
// Get uploaded parts
|
// Get uploaded parts
|
||||||
parts, err := store.listAllParts(ctx, id)
|
_, parts, _, err := upload.getInternalInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -660,10 +846,11 @@ func (upload s3Upload) FinishUpload(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
parts = []*s3.Part{
|
parts = []*s3Part{
|
||||||
&s3.Part{
|
{
|
||||||
ETag: res.ETag,
|
etag: *res.ETag,
|
||||||
PartNumber: aws.Int64(1),
|
number: 1,
|
||||||
|
size: 0,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -675,11 +862,12 @@ func (upload s3Upload) FinishUpload(ctx context.Context) error {
|
||||||
|
|
||||||
for index, part := range parts {
|
for index, part := range parts {
|
||||||
completedParts[index] = &s3.CompletedPart{
|
completedParts[index] = &s3.CompletedPart{
|
||||||
ETag: part.ETag,
|
ETag: aws.String(part.etag),
|
||||||
PartNumber: part.PartNumber,
|
PartNumber: aws.Int64(part.number),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t := time.Now()
|
||||||
_, err = store.Service.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
|
_, err = store.Service.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId),
|
Key: store.keyWithPrefix(uploadId),
|
||||||
|
@ -688,6 +876,7 @@ func (upload s3Upload) FinishUpload(ctx context.Context) error {
|
||||||
Parts: completedParts,
|
Parts: completedParts,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
store.observeRequestDuration(t, metricCompleteMultipartUpload)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -790,10 +979,16 @@ func (upload *s3Upload) concatUsingMultipart(ctx context.Context, partialUploads
|
||||||
partialS3Upload := partialUpload.(*s3Upload)
|
partialS3Upload := partialUpload.(*s3Upload)
|
||||||
partialId, _ := splitIds(partialS3Upload.id)
|
partialId, _ := splitIds(partialS3Upload.id)
|
||||||
|
|
||||||
|
upload.parts = append(upload.parts, &s3Part{
|
||||||
|
number: int64(i + 1),
|
||||||
|
size: -1,
|
||||||
|
etag: "",
|
||||||
|
})
|
||||||
|
|
||||||
go func(i int, partialId string) {
|
go func(i int, partialId string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
_, err := store.Service.UploadPartCopyWithContext(ctx, &s3.UploadPartCopyInput{
|
res, err := store.Service.UploadPartCopyWithContext(ctx, &s3.UploadPartCopyInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId),
|
Key: store.keyWithPrefix(uploadId),
|
||||||
UploadId: aws.String(multipartId),
|
UploadId: aws.String(multipartId),
|
||||||
|
@ -806,6 +1001,8 @@ func (upload *s3Upload) concatUsingMultipart(ctx context.Context, partialUploads
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
upload.parts[i].etag = *res.CopyPartResult.ETag
|
||||||
}(i, partialId)
|
}(i, partialId)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -829,11 +1026,13 @@ func (upload *s3Upload) DeclareLength(ctx context.Context, length int64) error {
|
||||||
return upload.writeInfo(ctx, info)
|
return upload.writeInfo(ctx, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) listAllParts(ctx context.Context, id string) (parts []*s3.Part, err error) {
|
func (store S3Store) listAllParts(ctx context.Context, id string) (parts []*s3Part, err error) {
|
||||||
uploadId, multipartId := splitIds(id)
|
uploadId, multipartId := splitIds(id)
|
||||||
|
|
||||||
partMarker := int64(0)
|
partMarker := int64(0)
|
||||||
for {
|
for {
|
||||||
|
t := time.Now()
|
||||||
|
|
||||||
// Get uploaded parts
|
// Get uploaded parts
|
||||||
listPtr, err := store.Service.ListPartsWithContext(ctx, &s3.ListPartsInput{
|
listPtr, err := store.Service.ListPartsWithContext(ctx, &s3.ListPartsInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
|
@ -841,11 +1040,19 @@ func (store S3Store) listAllParts(ctx context.Context, id string) (parts []*s3.P
|
||||||
UploadId: aws.String(multipartId),
|
UploadId: aws.String(multipartId),
|
||||||
PartNumberMarker: aws.Int64(partMarker),
|
PartNumberMarker: aws.Int64(partMarker),
|
||||||
})
|
})
|
||||||
|
store.observeRequestDuration(t, metricListParts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
parts = append(parts, (*listPtr).Parts...)
|
// TODO: Find more efficient way when appending many elements
|
||||||
|
for _, part := range (*listPtr).Parts {
|
||||||
|
parts = append(parts, &s3Part{
|
||||||
|
number: *part.PartNumber,
|
||||||
|
size: *part.Size,
|
||||||
|
etag: *part.ETag,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
if listPtr.IsTruncated != nil && *listPtr.IsTruncated {
|
if listPtr.IsTruncated != nil && *listPtr.IsTruncated {
|
||||||
partMarker = *listPtr.NextPartNumberMarker
|
partMarker = *listPtr.NextPartNumberMarker
|
||||||
|
@ -856,36 +1063,38 @@ func (store S3Store) listAllParts(ctx context.Context, id string) (parts []*s3.P
|
||||||
return parts, nil
|
return parts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) downloadIncompletePartForUpload(ctx context.Context, uploadId string) (*os.File, int64, error) {
|
func (store S3Store) downloadIncompletePartForUpload(ctx context.Context, uploadId string) (*os.File, error) {
|
||||||
|
t := time.Now()
|
||||||
incompleteUploadObject, err := store.getIncompletePartForUpload(ctx, uploadId)
|
incompleteUploadObject, err := store.getIncompletePartForUpload(ctx, uploadId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if incompleteUploadObject == nil {
|
if incompleteUploadObject == nil {
|
||||||
// We did not find an incomplete upload
|
// We did not find an incomplete upload
|
||||||
return nil, 0, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
defer incompleteUploadObject.Body.Close()
|
defer incompleteUploadObject.Body.Close()
|
||||||
|
|
||||||
partFile, err := ioutil.TempFile(store.TemporaryDirectory, "tusd-s3-tmp-")
|
partFile, err := ioutil.TempFile(store.TemporaryDirectory, "tusd-s3-tmp-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
n, err := io.Copy(partFile, incompleteUploadObject.Body)
|
n, err := io.Copy(partFile, incompleteUploadObject.Body)
|
||||||
|
store.observeRequestDuration(t, metricGetPartObject)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if n < *incompleteUploadObject.ContentLength {
|
if n < *incompleteUploadObject.ContentLength {
|
||||||
return nil, 0, errors.New("short read of incomplete upload")
|
return nil, errors.New("short read of incomplete upload")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = partFile.Seek(0, 0)
|
_, err = partFile.Seek(0, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return partFile, n, nil
|
return partFile, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) getIncompletePartForUpload(ctx context.Context, uploadId string) (*s3.GetObjectOutput, error) {
|
func (store S3Store) getIncompletePartForUpload(ctx context.Context, uploadId string) (*s3.GetObjectOutput, error) {
|
||||||
|
@ -901,22 +1110,42 @@ func (store S3Store) getIncompletePartForUpload(ctx context.Context, uploadId st
|
||||||
return obj, err
|
return obj, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) putIncompletePartForUpload(ctx context.Context, uploadId string, file *os.File) error {
|
func (store S3Store) headIncompletePartForUpload(ctx context.Context, uploadId string) (int64, error) {
|
||||||
defer cleanUpTempFile(file)
|
t := time.Now()
|
||||||
|
obj, err := store.Service.HeadObjectWithContext(ctx, &s3.HeadObjectInput{
|
||||||
|
Bucket: aws.String(store.Bucket),
|
||||||
|
Key: store.metadataKeyWithPrefix(uploadId + ".part"),
|
||||||
|
})
|
||||||
|
store.observeRequestDuration(t, metricHeadPartObject)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if isAwsError(err, s3.ErrCodeNoSuchKey) || isAwsError(err, "NotFound") || isAwsError(err, "AccessDenied") {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return *obj.ContentLength, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store S3Store) putIncompletePartForUpload(ctx context.Context, uploadId string, file io.ReadSeeker) error {
|
||||||
|
t := time.Now()
|
||||||
_, err := store.Service.PutObjectWithContext(ctx, &s3.PutObjectInput{
|
_, err := store.Service.PutObjectWithContext(ctx, &s3.PutObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.metadataKeyWithPrefix(uploadId + ".part"),
|
Key: store.metadataKeyWithPrefix(uploadId + ".part"),
|
||||||
Body: file,
|
Body: file,
|
||||||
})
|
})
|
||||||
|
store.observeRequestDuration(t, metricPutPartObject)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) deleteIncompletePartForUpload(ctx context.Context, uploadId string) error {
|
func (store S3Store) deleteIncompletePartForUpload(ctx context.Context, uploadId string) error {
|
||||||
|
t := time.Now()
|
||||||
_, err := store.Service.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{
|
_, err := store.Service.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.metadataKeyWithPrefix(uploadId + ".part"),
|
Key: store.metadataKeyWithPrefix(uploadId + ".part"),
|
||||||
})
|
})
|
||||||
|
store.observeRequestDuration(t, metricPutPartObject)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -937,6 +1166,11 @@ func isAwsError(err error, code string) bool {
|
||||||
if err, ok := err.(awserr.Error); ok && err.Code() == code {
|
if err, ok := err.(awserr.Error); ok && err.Code() == code {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err, ok := err.(minio.ErrorResponse); ok && err.Code == code {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1004,3 +1238,13 @@ func (store S3Store) metadataKeyWithPrefix(key string) *string {
|
||||||
|
|
||||||
return aws.String(prefix + key)
|
return aws.String(prefix + key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (store S3Store) acquireUploadSemaphore() {
|
||||||
|
store.uploadSemaphoreDemandMetric.Inc()
|
||||||
|
store.uploadSemaphore.Acquire()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store S3Store) releaseUploadSemaphore() {
|
||||||
|
store.uploadSemaphore.Release()
|
||||||
|
store.uploadSemaphoreDemandMetric.Dec()
|
||||||
|
}
|
||||||
|
|
|
@ -6,36 +6,37 @@ package s3store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
context "context"
|
context "context"
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
request "github.com/aws/aws-sdk-go/aws/request"
|
request "github.com/aws/aws-sdk-go/aws/request"
|
||||||
s3 "github.com/aws/aws-sdk-go/service/s3"
|
s3 "github.com/aws/aws-sdk-go/service/s3"
|
||||||
gomock "github.com/golang/mock/gomock"
|
gomock "github.com/golang/mock/gomock"
|
||||||
reflect "reflect"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockS3API is a mock of S3API interface
|
// MockS3API is a mock of S3API interface.
|
||||||
type MockS3API struct {
|
type MockS3API struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *MockS3APIMockRecorder
|
recorder *MockS3APIMockRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockS3APIMockRecorder is the mock recorder for MockS3API
|
// MockS3APIMockRecorder is the mock recorder for MockS3API.
|
||||||
type MockS3APIMockRecorder struct {
|
type MockS3APIMockRecorder struct {
|
||||||
mock *MockS3API
|
mock *MockS3API
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockS3API creates a new mock instance
|
// NewMockS3API creates a new mock instance.
|
||||||
func NewMockS3API(ctrl *gomock.Controller) *MockS3API {
|
func NewMockS3API(ctrl *gomock.Controller) *MockS3API {
|
||||||
mock := &MockS3API{ctrl: ctrl}
|
mock := &MockS3API{ctrl: ctrl}
|
||||||
mock.recorder = &MockS3APIMockRecorder{mock}
|
mock.recorder = &MockS3APIMockRecorder{mock}
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
// EXPECT returns an object that allows the caller to indicate expected use
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
func (m *MockS3API) EXPECT() *MockS3APIMockRecorder {
|
func (m *MockS3API) EXPECT() *MockS3APIMockRecorder {
|
||||||
return m.recorder
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// AbortMultipartUploadWithContext mocks base method
|
// AbortMultipartUploadWithContext mocks base method.
|
||||||
func (m *MockS3API) AbortMultipartUploadWithContext(arg0 context.Context, arg1 *s3.AbortMultipartUploadInput, arg2 ...request.Option) (*s3.AbortMultipartUploadOutput, error) {
|
func (m *MockS3API) AbortMultipartUploadWithContext(arg0 context.Context, arg1 *s3.AbortMultipartUploadInput, arg2 ...request.Option) (*s3.AbortMultipartUploadOutput, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []interface{}{arg0, arg1}
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
@ -48,14 +49,14 @@ func (m *MockS3API) AbortMultipartUploadWithContext(arg0 context.Context, arg1 *
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// AbortMultipartUploadWithContext indicates an expected call of AbortMultipartUploadWithContext
|
// AbortMultipartUploadWithContext indicates an expected call of AbortMultipartUploadWithContext.
|
||||||
func (mr *MockS3APIMockRecorder) AbortMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
func (mr *MockS3APIMockRecorder) AbortMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUploadWithContext), varargs...)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUploadWithContext), varargs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompleteMultipartUploadWithContext mocks base method
|
// CompleteMultipartUploadWithContext mocks base method.
|
||||||
func (m *MockS3API) CompleteMultipartUploadWithContext(arg0 context.Context, arg1 *s3.CompleteMultipartUploadInput, arg2 ...request.Option) (*s3.CompleteMultipartUploadOutput, error) {
|
func (m *MockS3API) CompleteMultipartUploadWithContext(arg0 context.Context, arg1 *s3.CompleteMultipartUploadInput, arg2 ...request.Option) (*s3.CompleteMultipartUploadOutput, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []interface{}{arg0, arg1}
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
@ -68,14 +69,14 @@ func (m *MockS3API) CompleteMultipartUploadWithContext(arg0 context.Context, arg
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompleteMultipartUploadWithContext indicates an expected call of CompleteMultipartUploadWithContext
|
// CompleteMultipartUploadWithContext indicates an expected call of CompleteMultipartUploadWithContext.
|
||||||
func (mr *MockS3APIMockRecorder) CompleteMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
func (mr *MockS3APIMockRecorder) CompleteMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUploadWithContext), varargs...)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUploadWithContext), varargs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateMultipartUploadWithContext mocks base method
|
// CreateMultipartUploadWithContext mocks base method.
|
||||||
func (m *MockS3API) CreateMultipartUploadWithContext(arg0 context.Context, arg1 *s3.CreateMultipartUploadInput, arg2 ...request.Option) (*s3.CreateMultipartUploadOutput, error) {
|
func (m *MockS3API) CreateMultipartUploadWithContext(arg0 context.Context, arg1 *s3.CreateMultipartUploadInput, arg2 ...request.Option) (*s3.CreateMultipartUploadOutput, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []interface{}{arg0, arg1}
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
@ -88,14 +89,14 @@ func (m *MockS3API) CreateMultipartUploadWithContext(arg0 context.Context, arg1
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateMultipartUploadWithContext indicates an expected call of CreateMultipartUploadWithContext
|
// CreateMultipartUploadWithContext indicates an expected call of CreateMultipartUploadWithContext.
|
||||||
func (mr *MockS3APIMockRecorder) CreateMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
func (mr *MockS3APIMockRecorder) CreateMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUploadWithContext), varargs...)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUploadWithContext), varargs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObjectWithContext mocks base method
|
// DeleteObjectWithContext mocks base method.
|
||||||
func (m *MockS3API) DeleteObjectWithContext(arg0 context.Context, arg1 *s3.DeleteObjectInput, arg2 ...request.Option) (*s3.DeleteObjectOutput, error) {
|
func (m *MockS3API) DeleteObjectWithContext(arg0 context.Context, arg1 *s3.DeleteObjectInput, arg2 ...request.Option) (*s3.DeleteObjectOutput, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []interface{}{arg0, arg1}
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
@ -108,14 +109,14 @@ func (m *MockS3API) DeleteObjectWithContext(arg0 context.Context, arg1 *s3.Delet
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObjectWithContext indicates an expected call of DeleteObjectWithContext
|
// DeleteObjectWithContext indicates an expected call of DeleteObjectWithContext.
|
||||||
func (mr *MockS3APIMockRecorder) DeleteObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
func (mr *MockS3APIMockRecorder) DeleteObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectWithContext), varargs...)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectWithContext), varargs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObjectsWithContext mocks base method
|
// DeleteObjectsWithContext mocks base method.
|
||||||
func (m *MockS3API) DeleteObjectsWithContext(arg0 context.Context, arg1 *s3.DeleteObjectsInput, arg2 ...request.Option) (*s3.DeleteObjectsOutput, error) {
|
func (m *MockS3API) DeleteObjectsWithContext(arg0 context.Context, arg1 *s3.DeleteObjectsInput, arg2 ...request.Option) (*s3.DeleteObjectsOutput, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []interface{}{arg0, arg1}
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
@ -128,14 +129,14 @@ func (m *MockS3API) DeleteObjectsWithContext(arg0 context.Context, arg1 *s3.Dele
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObjectsWithContext indicates an expected call of DeleteObjectsWithContext
|
// DeleteObjectsWithContext indicates an expected call of DeleteObjectsWithContext.
|
||||||
func (mr *MockS3APIMockRecorder) DeleteObjectsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
func (mr *MockS3APIMockRecorder) DeleteObjectsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectsWithContext), varargs...)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectsWithContext), varargs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectWithContext mocks base method
|
// GetObjectWithContext mocks base method.
|
||||||
func (m *MockS3API) GetObjectWithContext(arg0 context.Context, arg1 *s3.GetObjectInput, arg2 ...request.Option) (*s3.GetObjectOutput, error) {
|
func (m *MockS3API) GetObjectWithContext(arg0 context.Context, arg1 *s3.GetObjectInput, arg2 ...request.Option) (*s3.GetObjectOutput, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []interface{}{arg0, arg1}
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
@ -148,14 +149,34 @@ func (m *MockS3API) GetObjectWithContext(arg0 context.Context, arg1 *s3.GetObjec
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectWithContext indicates an expected call of GetObjectWithContext
|
// GetObjectWithContext indicates an expected call of GetObjectWithContext.
|
||||||
func (mr *MockS3APIMockRecorder) GetObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
func (mr *MockS3APIMockRecorder) GetObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectWithContext), varargs...)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectWithContext), varargs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListPartsWithContext mocks base method
|
// HeadObjectWithContext mocks base method.
|
||||||
|
func (m *MockS3API) HeadObjectWithContext(arg0 context.Context, arg1 *s3.HeadObjectInput, arg2 ...request.Option) (*s3.HeadObjectOutput, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
for _, a := range arg2 {
|
||||||
|
varargs = append(varargs, a)
|
||||||
|
}
|
||||||
|
ret := m.ctrl.Call(m, "HeadObjectWithContext", varargs...)
|
||||||
|
ret0, _ := ret[0].(*s3.HeadObjectOutput)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeadObjectWithContext indicates an expected call of HeadObjectWithContext.
|
||||||
|
func (mr *MockS3APIMockRecorder) HeadObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObjectWithContext", reflect.TypeOf((*MockS3API)(nil).HeadObjectWithContext), varargs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListPartsWithContext mocks base method.
|
||||||
func (m *MockS3API) ListPartsWithContext(arg0 context.Context, arg1 *s3.ListPartsInput, arg2 ...request.Option) (*s3.ListPartsOutput, error) {
|
func (m *MockS3API) ListPartsWithContext(arg0 context.Context, arg1 *s3.ListPartsInput, arg2 ...request.Option) (*s3.ListPartsOutput, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []interface{}{arg0, arg1}
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
@ -168,14 +189,14 @@ func (m *MockS3API) ListPartsWithContext(arg0 context.Context, arg1 *s3.ListPart
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListPartsWithContext indicates an expected call of ListPartsWithContext
|
// ListPartsWithContext indicates an expected call of ListPartsWithContext.
|
||||||
func (mr *MockS3APIMockRecorder) ListPartsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
func (mr *MockS3APIMockRecorder) ListPartsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsWithContext", reflect.TypeOf((*MockS3API)(nil).ListPartsWithContext), varargs...)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsWithContext", reflect.TypeOf((*MockS3API)(nil).ListPartsWithContext), varargs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectWithContext mocks base method
|
// PutObjectWithContext mocks base method.
|
||||||
func (m *MockS3API) PutObjectWithContext(arg0 context.Context, arg1 *s3.PutObjectInput, arg2 ...request.Option) (*s3.PutObjectOutput, error) {
|
func (m *MockS3API) PutObjectWithContext(arg0 context.Context, arg1 *s3.PutObjectInput, arg2 ...request.Option) (*s3.PutObjectOutput, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []interface{}{arg0, arg1}
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
@ -188,14 +209,14 @@ func (m *MockS3API) PutObjectWithContext(arg0 context.Context, arg1 *s3.PutObjec
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectWithContext indicates an expected call of PutObjectWithContext
|
// PutObjectWithContext indicates an expected call of PutObjectWithContext.
|
||||||
func (mr *MockS3APIMockRecorder) PutObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
func (mr *MockS3APIMockRecorder) PutObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectWithContext), varargs...)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectWithContext), varargs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadPartCopyWithContext mocks base method
|
// UploadPartCopyWithContext mocks base method.
|
||||||
func (m *MockS3API) UploadPartCopyWithContext(arg0 context.Context, arg1 *s3.UploadPartCopyInput, arg2 ...request.Option) (*s3.UploadPartCopyOutput, error) {
|
func (m *MockS3API) UploadPartCopyWithContext(arg0 context.Context, arg1 *s3.UploadPartCopyInput, arg2 ...request.Option) (*s3.UploadPartCopyOutput, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []interface{}{arg0, arg1}
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
@ -208,14 +229,14 @@ func (m *MockS3API) UploadPartCopyWithContext(arg0 context.Context, arg1 *s3.Upl
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadPartCopyWithContext indicates an expected call of UploadPartCopyWithContext
|
// UploadPartCopyWithContext indicates an expected call of UploadPartCopyWithContext.
|
||||||
func (mr *MockS3APIMockRecorder) UploadPartCopyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
func (mr *MockS3APIMockRecorder) UploadPartCopyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopyWithContext", reflect.TypeOf((*MockS3API)(nil).UploadPartCopyWithContext), varargs...)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopyWithContext", reflect.TypeOf((*MockS3API)(nil).UploadPartCopyWithContext), varargs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadPartWithContext mocks base method
|
// UploadPartWithContext mocks base method.
|
||||||
func (m *MockS3API) UploadPartWithContext(arg0 context.Context, arg1 *s3.UploadPartInput, arg2 ...request.Option) (*s3.UploadPartOutput, error) {
|
func (m *MockS3API) UploadPartWithContext(arg0 context.Context, arg1 *s3.UploadPartInput, arg2 ...request.Option) (*s3.UploadPartOutput, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
varargs := []interface{}{arg0, arg1}
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
@ -228,7 +249,7 @@ func (m *MockS3API) UploadPartWithContext(arg0 context.Context, arg1 *s3.UploadP
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadPartWithContext indicates an expected call of UploadPartWithContext
|
// UploadPartWithContext indicates an expected call of UploadPartWithContext.
|
||||||
func (mr *MockS3APIMockRecorder) UploadPartWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
func (mr *MockS3APIMockRecorder) UploadPartWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
|
|
|
@ -1,64 +1,155 @@
|
||||||
package s3store
|
package s3store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const TEMP_DIR_USE_MEMORY = "_memory"
|
||||||
|
|
||||||
// s3PartProducer converts a stream of bytes from the reader into a stream of files on disk
|
// s3PartProducer converts a stream of bytes from the reader into a stream of files on disk
|
||||||
type s3PartProducer struct {
|
type s3PartProducer struct {
|
||||||
store *S3Store
|
tmpDir string
|
||||||
files chan<- *os.File
|
files chan fileChunk
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
err error
|
err error
|
||||||
r io.Reader
|
r io.Reader
|
||||||
|
diskWriteDurationMetric prometheus.Summary
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileChunk struct {
|
||||||
|
reader io.ReadSeeker
|
||||||
|
closeReader func()
|
||||||
|
size int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func newS3PartProducer(source io.Reader, backlog int64, tmpDir string, diskWriteDurationMetric prometheus.Summary) (s3PartProducer, <-chan fileChunk) {
|
||||||
|
fileChan := make(chan fileChunk, backlog)
|
||||||
|
doneChan := make(chan struct{})
|
||||||
|
|
||||||
|
if os.Getenv("TUSD_S3STORE_TEMP_MEMORY") == "1" {
|
||||||
|
tmpDir = TEMP_DIR_USE_MEMORY
|
||||||
|
}
|
||||||
|
|
||||||
|
partProducer := s3PartProducer{
|
||||||
|
tmpDir: tmpDir,
|
||||||
|
done: doneChan,
|
||||||
|
files: fileChan,
|
||||||
|
r: source,
|
||||||
|
diskWriteDurationMetric: diskWriteDurationMetric,
|
||||||
|
}
|
||||||
|
|
||||||
|
return partProducer, fileChan
|
||||||
|
}
|
||||||
|
|
||||||
|
// stop should always be called by the consumer to ensure that the channels
|
||||||
|
// are properly closed and emptied.
|
||||||
|
func (spp *s3PartProducer) stop() {
|
||||||
|
close(spp.done)
|
||||||
|
|
||||||
|
// If we return while there are still files in the channel, then
|
||||||
|
// we may leak file descriptors. Let's ensure that those are cleaned up.
|
||||||
|
for fileChunk := range spp.files {
|
||||||
|
fileChunk.closeReader()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (spp *s3PartProducer) produce(partSize int64) {
|
func (spp *s3PartProducer) produce(partSize int64) {
|
||||||
|
outerloop:
|
||||||
for {
|
for {
|
||||||
file, err := spp.nextPart(partSize)
|
file, ok, err := spp.nextPart(partSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// An error occured. Stop producing.
|
||||||
spp.err = err
|
spp.err = err
|
||||||
close(spp.files)
|
break
|
||||||
return
|
|
||||||
}
|
}
|
||||||
if file == nil {
|
if !ok {
|
||||||
close(spp.files)
|
// The source was fully read. Stop producing.
|
||||||
return
|
break
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case spp.files <- file:
|
case spp.files <- file:
|
||||||
case <-spp.done:
|
case <-spp.done:
|
||||||
close(spp.files)
|
// We are told to stop producing. Stop producing.
|
||||||
return
|
break outerloop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
close(spp.files)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (spp *s3PartProducer) nextPart(size int64) (*os.File, error) {
|
func (spp *s3PartProducer) nextPart(size int64) (fileChunk, bool, error) {
|
||||||
// Create a temporary file to store the part
|
if spp.tmpDir != TEMP_DIR_USE_MEMORY {
|
||||||
file, err := ioutil.TempFile(spp.store.TemporaryDirectory, "tusd-s3-tmp-")
|
// Create a temporary file to store the part
|
||||||
if err != nil {
|
file, err := ioutil.TempFile(spp.tmpDir, "tusd-s3-tmp-")
|
||||||
return nil, err
|
if err != nil {
|
||||||
|
return fileChunk{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
limitedReader := io.LimitReader(spp.r, size)
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
n, err := io.Copy(file, limitedReader)
|
||||||
|
if err != nil {
|
||||||
|
return fileChunk{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the entire request body is read and no more data is available,
|
||||||
|
// io.Copy returns 0 since it is unable to read any bytes. In that
|
||||||
|
// case, we can close the s3PartProducer.
|
||||||
|
if n == 0 {
|
||||||
|
cleanUpTempFile(file)
|
||||||
|
return fileChunk{}, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
ms := float64(elapsed.Nanoseconds() / int64(time.Millisecond))
|
||||||
|
spp.diskWriteDurationMetric.Observe(ms)
|
||||||
|
|
||||||
|
// Seek to the beginning of the file
|
||||||
|
file.Seek(0, 0)
|
||||||
|
|
||||||
|
return fileChunk{
|
||||||
|
reader: file,
|
||||||
|
closeReader: func() {
|
||||||
|
file.Close()
|
||||||
|
os.Remove(file.Name())
|
||||||
|
},
|
||||||
|
size: n,
|
||||||
|
}, true, nil
|
||||||
|
} else {
|
||||||
|
// Create a temporary buffer to store the part
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
|
||||||
|
limitedReader := io.LimitReader(spp.r, size)
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
n, err := io.Copy(buf, limitedReader)
|
||||||
|
if err != nil {
|
||||||
|
return fileChunk{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the entire request body is read and no more data is available,
|
||||||
|
// io.Copy returns 0 since it is unable to read any bytes. In that
|
||||||
|
// case, we can close the s3PartProducer.
|
||||||
|
if n == 0 {
|
||||||
|
return fileChunk{}, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
ms := float64(elapsed.Nanoseconds() / int64(time.Millisecond))
|
||||||
|
spp.diskWriteDurationMetric.Observe(ms)
|
||||||
|
|
||||||
|
return fileChunk{
|
||||||
|
// buf does not get written to anymore, so we can turn it into a reader
|
||||||
|
reader: bytes.NewReader(buf.Bytes()),
|
||||||
|
closeReader: func() {},
|
||||||
|
size: n,
|
||||||
|
}, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
limitedReader := io.LimitReader(spp.r, size)
|
|
||||||
n, err := io.Copy(file, limitedReader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the entire request body is read and no more data is available,
|
|
||||||
// io.Copy returns 0 since it is unable to read any bytes. In that
|
|
||||||
// case, we can close the s3PartProducer.
|
|
||||||
if n == 0 {
|
|
||||||
cleanUpTempFile(file)
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seek to the beginning of the file
|
|
||||||
file.Seek(0, 0)
|
|
||||||
|
|
||||||
return file, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,10 +2,11 @@ package s3store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"os"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type InfiniteZeroReader struct{}
|
type InfiniteZeroReader struct{}
|
||||||
|
@ -21,33 +22,30 @@ func (ErrorReader) Read(b []byte) (int, error) {
|
||||||
return 0, errors.New("error from ErrorReader")
|
return 0, errors.New("error from ErrorReader")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var testSummary = prometheus.NewSummary(prometheus.SummaryOpts{})
|
||||||
|
|
||||||
func TestPartProducerConsumesEntireReaderWithoutError(t *testing.T) {
|
func TestPartProducerConsumesEntireReaderWithoutError(t *testing.T) {
|
||||||
fileChan := make(chan *os.File)
|
|
||||||
doneChan := make(chan struct{})
|
|
||||||
expectedStr := "test"
|
expectedStr := "test"
|
||||||
r := strings.NewReader(expectedStr)
|
r := strings.NewReader(expectedStr)
|
||||||
pp := s3PartProducer{
|
pp, fileChan := newS3PartProducer(r, 0, "", testSummary)
|
||||||
store: &S3Store{},
|
|
||||||
done: doneChan,
|
|
||||||
files: fileChan,
|
|
||||||
r: r,
|
|
||||||
}
|
|
||||||
go pp.produce(1)
|
go pp.produce(1)
|
||||||
|
|
||||||
actualStr := ""
|
actualStr := ""
|
||||||
b := make([]byte, 1)
|
b := make([]byte, 1)
|
||||||
for f := range fileChan {
|
for chunk := range fileChan {
|
||||||
n, err := f.Read(b)
|
n, err := chunk.reader.Read(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %s", err)
|
t.Fatalf("unexpected error: %s", err)
|
||||||
}
|
}
|
||||||
if n != 1 {
|
if n != 1 {
|
||||||
t.Fatalf("incorrect number of bytes read: wanted %d, got %d", 1, n)
|
t.Fatalf("incorrect number of bytes read: wanted %d, got %d", 1, n)
|
||||||
}
|
}
|
||||||
|
if chunk.size != 1 {
|
||||||
|
t.Fatalf("incorrect number of bytes in struct: wanted %d, got %d", 1, chunk.size)
|
||||||
|
}
|
||||||
actualStr += string(b)
|
actualStr += string(b)
|
||||||
|
|
||||||
os.Remove(f.Name())
|
chunk.closeReader()
|
||||||
f.Close()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if actualStr != expectedStr {
|
if actualStr != expectedStr {
|
||||||
|
@ -59,15 +57,8 @@ func TestPartProducerConsumesEntireReaderWithoutError(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPartProducerExitsWhenDoneChannelIsClosed(t *testing.T) {
|
func TestPartProducerExitsWhenProducerIsStopped(t *testing.T) {
|
||||||
fileChan := make(chan *os.File)
|
pp, fileChan := newS3PartProducer(InfiniteZeroReader{}, 0, "", testSummary)
|
||||||
doneChan := make(chan struct{})
|
|
||||||
pp := s3PartProducer{
|
|
||||||
store: &S3Store{},
|
|
||||||
done: doneChan,
|
|
||||||
files: fileChan,
|
|
||||||
r: InfiniteZeroReader{},
|
|
||||||
}
|
|
||||||
|
|
||||||
completedChan := make(chan struct{})
|
completedChan := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -75,35 +66,7 @@ func TestPartProducerExitsWhenDoneChannelIsClosed(t *testing.T) {
|
||||||
completedChan <- struct{}{}
|
completedChan <- struct{}{}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
close(doneChan)
|
pp.stop()
|
||||||
|
|
||||||
select {
|
|
||||||
case <-completedChan:
|
|
||||||
// producer exited cleanly
|
|
||||||
case <-time.After(2 * time.Second):
|
|
||||||
t.Error("timed out waiting for producer to exit")
|
|
||||||
}
|
|
||||||
|
|
||||||
safelyDrainChannelOrFail(fileChan, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPartProducerExitsWhenDoneChannelIsClosedBeforeAnyPartIsSent(t *testing.T) {
|
|
||||||
fileChan := make(chan *os.File)
|
|
||||||
doneChan := make(chan struct{})
|
|
||||||
pp := s3PartProducer{
|
|
||||||
store: &S3Store{},
|
|
||||||
done: doneChan,
|
|
||||||
files: fileChan,
|
|
||||||
r: InfiniteZeroReader{},
|
|
||||||
}
|
|
||||||
|
|
||||||
close(doneChan)
|
|
||||||
|
|
||||||
completedChan := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
pp.produce(10)
|
|
||||||
completedChan <- struct{}{}
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-completedChan:
|
case <-completedChan:
|
||||||
|
@ -116,14 +79,7 @@ func TestPartProducerExitsWhenDoneChannelIsClosedBeforeAnyPartIsSent(t *testing.
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPartProducerExitsWhenUnableToReadFromFile(t *testing.T) {
|
func TestPartProducerExitsWhenUnableToReadFromFile(t *testing.T) {
|
||||||
fileChan := make(chan *os.File)
|
pp, fileChan := newS3PartProducer(ErrorReader{}, 0, "", testSummary)
|
||||||
doneChan := make(chan struct{})
|
|
||||||
pp := s3PartProducer{
|
|
||||||
store: &S3Store{},
|
|
||||||
done: doneChan,
|
|
||||||
files: fileChan,
|
|
||||||
r: ErrorReader{},
|
|
||||||
}
|
|
||||||
|
|
||||||
completedChan := make(chan struct{})
|
completedChan := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -145,12 +101,12 @@ func TestPartProducerExitsWhenUnableToReadFromFile(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func safelyDrainChannelOrFail(c chan *os.File, t *testing.T) {
|
func safelyDrainChannelOrFail(c <-chan fileChunk, t *testing.T) {
|
||||||
// At this point, we've signaled that the producer should exit, but it may write a few files
|
// At this point, we've signaled that the producer should exit, but it may write a few files
|
||||||
// into the channel before closing it and exiting. Make sure that we get a nil value
|
// into the channel before closing it and exiting. Make sure that we get a nil value
|
||||||
// eventually.
|
// eventually.
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
if f := <-c; f == nil {
|
if _, more := <-c; !more {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/v2/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate mockgen -destination=./s3store_mock_test.go -package=s3store github.com/tus/tusd/pkg/s3store S3API
|
//go:generate mockgen -destination=./s3store_mock_test.go -package=s3store github.com/tus/tusd/pkg/s3store S3API
|
||||||
|
@ -171,6 +171,9 @@ func TestNewUploadWithMetadataObjectPrefix(t *testing.T) {
|
||||||
assert.NotNil(upload)
|
assert.NotNil(upload)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This test ensures that an newly created upload without any chunks can be
|
||||||
|
// directly finished. There are no calls to ListPart or HeadObject because
|
||||||
|
// the upload is not fetched from S3 first.
|
||||||
func TestEmptyUpload(t *testing.T) {
|
func TestEmptyUpload(t *testing.T) {
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
defer mockCtrl.Finish()
|
||||||
|
@ -193,14 +196,6 @@ func TestEmptyUpload(t *testing.T) {
|
||||||
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":0,"SizeIsDeferred":false,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`)),
|
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":0,"SizeIsDeferred":false,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`)),
|
||||||
ContentLength: aws.Int64(int64(208)),
|
ContentLength: aws.Int64(int64(208)),
|
||||||
}),
|
}),
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(0),
|
|
||||||
}).Return(&s3.ListPartsOutput{
|
|
||||||
Parts: []*s3.Part{},
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
|
@ -272,6 +267,17 @@ func TestGetInfoNotFound(t *testing.T) {
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
}).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
|
}).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
|
||||||
|
|
||||||
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumberMarker: aws.Int64(0),
|
||||||
|
}).Return(nil, awserr.New("NoSuchUpload", "Not found", nil))
|
||||||
|
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
}).Return(nil, awserr.New("NoSuchKey", "Not found", nil))
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
|
@ -287,47 +293,52 @@ func TestGetInfo(t *testing.T) {
|
||||||
s3obj := NewMockS3API(mockCtrl)
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
gomock.InOrder(
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId.info"),
|
||||||
Key: aws.String("uploadId.info"),
|
}).Return(&s3.GetObjectOutput{
|
||||||
}).Return(&s3.GetObjectOutput{
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"my/uploaded/files/uploadId","Type":"s3store"}}`))),
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"my/uploaded/files/uploadId","Type":"s3store"}}`))),
|
}, nil)
|
||||||
}, nil),
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId"),
|
||||||
Key: aws.String("uploadId"),
|
UploadId: aws.String("multipartId"),
|
||||||
UploadId: aws.String("multipartId"),
|
PartNumberMarker: aws.Int64(0),
|
||||||
PartNumberMarker: aws.Int64(0),
|
}).Return(&s3.ListPartsOutput{
|
||||||
}).Return(&s3.ListPartsOutput{
|
Parts: []*s3.Part{
|
||||||
Parts: []*s3.Part{
|
{
|
||||||
{
|
PartNumber: aws.Int64(1),
|
||||||
Size: aws.Int64(100),
|
Size: aws.Int64(100),
|
||||||
},
|
ETag: aws.String("etag-1"),
|
||||||
{
|
|
||||||
Size: aws.Int64(200),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
NextPartNumberMarker: aws.Int64(2),
|
{
|
||||||
IsTruncated: aws.Bool(true),
|
PartNumber: aws.Int64(2),
|
||||||
}, nil),
|
Size: aws.Int64(200),
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
ETag: aws.String("etag-2"),
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(2),
|
|
||||||
}).Return(&s3.ListPartsOutput{
|
|
||||||
Parts: []*s3.Part{
|
|
||||||
{
|
|
||||||
Size: aws.Int64(100),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}, nil),
|
},
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
NextPartNumberMarker: aws.Int64(2),
|
||||||
Bucket: aws.String("bucket"),
|
// Simulate a truncated response, so s3store should send a second request
|
||||||
Key: aws.String("uploadId.part"),
|
IsTruncated: aws.Bool(true),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
|
}, nil)
|
||||||
)
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumberMarker: aws.Int64(2),
|
||||||
|
}).Return(&s3.ListPartsOutput{
|
||||||
|
Parts: []*s3.Part{
|
||||||
|
{
|
||||||
|
PartNumber: aws.Int64(3),
|
||||||
|
Size: aws.Int64(100),
|
||||||
|
ETag: aws.String("etag-3"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
}).Return(nil, awserr.New("NoSuchKey", "Not found", nil))
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
@ -353,47 +364,52 @@ func TestGetInfoWithMetadataObjectPrefix(t *testing.T) {
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
store.MetadataObjectPrefix = "my/metadata"
|
store.MetadataObjectPrefix = "my/metadata"
|
||||||
|
|
||||||
gomock.InOrder(
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("my/metadata/uploadId.info"),
|
||||||
Key: aws.String("my/metadata/uploadId.info"),
|
}).Return(&s3.GetObjectOutput{
|
||||||
}).Return(&s3.GetObjectOutput{
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"my/uploaded/files/uploadId","Type":"s3store"}}`))),
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"my/uploaded/files/uploadId","Type":"s3store"}}`))),
|
}, nil)
|
||||||
}, nil),
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId"),
|
||||||
Key: aws.String("uploadId"),
|
UploadId: aws.String("multipartId"),
|
||||||
UploadId: aws.String("multipartId"),
|
PartNumberMarker: aws.Int64(0),
|
||||||
PartNumberMarker: aws.Int64(0),
|
}).Return(&s3.ListPartsOutput{
|
||||||
}).Return(&s3.ListPartsOutput{
|
Parts: []*s3.Part{
|
||||||
Parts: []*s3.Part{
|
{
|
||||||
{
|
PartNumber: aws.Int64(1),
|
||||||
Size: aws.Int64(100),
|
Size: aws.Int64(100),
|
||||||
},
|
ETag: aws.String("etag-1"),
|
||||||
{
|
|
||||||
Size: aws.Int64(200),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
NextPartNumberMarker: aws.Int64(2),
|
{
|
||||||
IsTruncated: aws.Bool(true),
|
PartNumber: aws.Int64(2),
|
||||||
}, nil),
|
Size: aws.Int64(200),
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
ETag: aws.String("etag-2"),
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(2),
|
|
||||||
}).Return(&s3.ListPartsOutput{
|
|
||||||
Parts: []*s3.Part{
|
|
||||||
{
|
|
||||||
Size: aws.Int64(100),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}, nil),
|
},
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
NextPartNumberMarker: aws.Int64(2),
|
||||||
Bucket: aws.String("bucket"),
|
// Simulate a truncated response, so s3store should send a second request
|
||||||
Key: aws.String("my/metadata/uploadId.part"),
|
IsTruncated: aws.Bool(true),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
|
}, nil)
|
||||||
)
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumberMarker: aws.Int64(2),
|
||||||
|
}).Return(&s3.ListPartsOutput{
|
||||||
|
Parts: []*s3.Part{
|
||||||
|
{
|
||||||
|
PartNumber: aws.Int64(3),
|
||||||
|
Size: aws.Int64(100),
|
||||||
|
ETag: aws.String("etag-3"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("my/metadata/uploadId.part"),
|
||||||
|
}).Return(nil, awserr.New("NoSuchKey", "Not found", nil))
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
@ -418,27 +434,24 @@ func TestGetInfoWithIncompletePart(t *testing.T) {
|
||||||
s3obj := NewMockS3API(mockCtrl)
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
gomock.InOrder(
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId.info"),
|
||||||
Key: aws.String("uploadId.info"),
|
}).Return(&s3.GetObjectOutput{
|
||||||
}).Return(&s3.GetObjectOutput{
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
}, nil)
|
||||||
}, nil),
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId"),
|
||||||
Key: aws.String("uploadId"),
|
UploadId: aws.String("multipartId"),
|
||||||
UploadId: aws.String("multipartId"),
|
PartNumberMarker: aws.Int64(0),
|
||||||
PartNumberMarker: aws.Int64(0),
|
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil)
|
||||||
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
|
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId.part"),
|
||||||
Key: aws.String("uploadId.part"),
|
}).Return(&s3.HeadObjectOutput{
|
||||||
}).Return(&s3.GetObjectOutput{
|
ContentLength: aws.Int64(10),
|
||||||
ContentLength: aws.Int64(10),
|
}, nil)
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte("0123456789"))),
|
|
||||||
}, nil),
|
|
||||||
)
|
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
@ -457,20 +470,22 @@ func TestGetInfoFinished(t *testing.T) {
|
||||||
s3obj := NewMockS3API(mockCtrl)
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
gomock.InOrder(
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId.info"),
|
||||||
Key: aws.String("uploadId.info"),
|
}).Return(&s3.GetObjectOutput{
|
||||||
}).Return(&s3.GetObjectOutput{
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
}, nil)
|
||||||
}, nil),
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId"),
|
||||||
Key: aws.String("uploadId"),
|
UploadId: aws.String("multipartId"),
|
||||||
UploadId: aws.String("multipartId"),
|
PartNumberMarker: aws.Int64(0),
|
||||||
PartNumberMarker: aws.Int64(0),
|
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil))
|
||||||
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil)),
|
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
|
||||||
)
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
}).Return(nil, awserr.New("NoSuchKey", "Not found", nil))
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
@ -561,7 +576,7 @@ func TestGetReaderNotFinished(t *testing.T) {
|
||||||
|
|
||||||
content, err := upload.GetReader(context.Background())
|
content, err := upload.GetReader(context.Background())
|
||||||
assert.Nil(content)
|
assert.Nil(content)
|
||||||
assert.Equal("cannot stream non-finished upload", err.Error())
|
assert.Equal("ERR_INCOMPLETE_UPLOAD: cannot stream non-finished upload", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeclareLength(t *testing.T) {
|
func TestDeclareLength(t *testing.T) {
|
||||||
|
@ -572,32 +587,30 @@ func TestDeclareLength(t *testing.T) {
|
||||||
s3obj := NewMockS3API(mockCtrl)
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
gomock.InOrder(
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId.info"),
|
||||||
Key: aws.String("uploadId.info"),
|
}).Return(&s3.GetObjectOutput{
|
||||||
}).Return(&s3.GetObjectOutput{
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":0,"SizeIsDeferred":true,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`))),
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":0,"SizeIsDeferred":true,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`))),
|
}, nil)
|
||||||
}, nil),
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId"),
|
||||||
Key: aws.String("uploadId"),
|
UploadId: aws.String("multipartId"),
|
||||||
UploadId: aws.String("multipartId"),
|
PartNumberMarker: aws.Int64(0),
|
||||||
PartNumberMarker: aws.Int64(0),
|
}).Return(&s3.ListPartsOutput{
|
||||||
}).Return(&s3.ListPartsOutput{
|
Parts: []*s3.Part{},
|
||||||
Parts: []*s3.Part{},
|
}, nil)
|
||||||
}, nil),
|
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId.part"),
|
||||||
Key: aws.String("uploadId.part"),
|
}).Return(nil, awserr.New("NotFound", "Not Found", nil))
|
||||||
}).Return(nil, awserr.New("NotFound", "Not Found", nil)),
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), &s3.PutObjectInput{
|
||||||
s3obj.EXPECT().PutObjectWithContext(context.Background(), &s3.PutObjectInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId.info"),
|
||||||
Key: aws.String("uploadId.info"),
|
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"SizeIsDeferred":false,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`)),
|
||||||
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"SizeIsDeferred":false,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`)),
|
ContentLength: aws.Int64(int64(208)),
|
||||||
ContentLength: aws.Int64(int64(208)),
|
})
|
||||||
}),
|
|
||||||
)
|
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
@ -617,64 +630,72 @@ func TestFinishUpload(t *testing.T) {
|
||||||
s3obj := NewMockS3API(mockCtrl)
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
gomock.InOrder(
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId.info"),
|
||||||
Key: aws.String("uploadId"),
|
}).Return(&s3.GetObjectOutput{
|
||||||
UploadId: aws.String("multipartId"),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":400,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
PartNumberMarker: aws.Int64(0),
|
}, nil)
|
||||||
}).Return(&s3.ListPartsOutput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Parts: []*s3.Part{
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumberMarker: aws.Int64(0),
|
||||||
|
}).Return(&s3.ListPartsOutput{
|
||||||
|
Parts: []*s3.Part{
|
||||||
|
{
|
||||||
|
Size: aws.Int64(100),
|
||||||
|
ETag: aws.String("etag-1"),
|
||||||
|
PartNumber: aws.Int64(1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Size: aws.Int64(200),
|
||||||
|
ETag: aws.String("etag-2"),
|
||||||
|
PartNumber: aws.Int64(2),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
NextPartNumberMarker: aws.Int64(2),
|
||||||
|
IsTruncated: aws.Bool(true),
|
||||||
|
}, nil)
|
||||||
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumberMarker: aws.Int64(2),
|
||||||
|
}).Return(&s3.ListPartsOutput{
|
||||||
|
Parts: []*s3.Part{
|
||||||
|
{
|
||||||
|
Size: aws.Int64(100),
|
||||||
|
ETag: aws.String("etag-3"),
|
||||||
|
PartNumber: aws.Int64(3),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
}).Return(nil, awserr.New("NotFound", "Not Found", nil))
|
||||||
|
s3obj.EXPECT().CompleteMultipartUploadWithContext(context.Background(), &s3.CompleteMultipartUploadInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
MultipartUpload: &s3.CompletedMultipartUpload{
|
||||||
|
Parts: []*s3.CompletedPart{
|
||||||
{
|
{
|
||||||
Size: aws.Int64(100),
|
ETag: aws.String("etag-1"),
|
||||||
ETag: aws.String("foo"),
|
|
||||||
PartNumber: aws.Int64(1),
|
PartNumber: aws.Int64(1),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Size: aws.Int64(200),
|
ETag: aws.String("etag-2"),
|
||||||
ETag: aws.String("bar"),
|
|
||||||
PartNumber: aws.Int64(2),
|
PartNumber: aws.Int64(2),
|
||||||
},
|
},
|
||||||
},
|
|
||||||
NextPartNumberMarker: aws.Int64(2),
|
|
||||||
IsTruncated: aws.Bool(true),
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(2),
|
|
||||||
}).Return(&s3.ListPartsOutput{
|
|
||||||
Parts: []*s3.Part{
|
|
||||||
{
|
{
|
||||||
Size: aws.Int64(100),
|
ETag: aws.String("etag-3"),
|
||||||
ETag: aws.String("foobar"),
|
|
||||||
PartNumber: aws.Int64(3),
|
PartNumber: aws.Int64(3),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil),
|
},
|
||||||
s3obj.EXPECT().CompleteMultipartUploadWithContext(context.Background(), &s3.CompleteMultipartUploadInput{
|
}).Return(nil, nil)
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
MultipartUpload: &s3.CompletedMultipartUpload{
|
|
||||||
Parts: []*s3.CompletedPart{
|
|
||||||
{
|
|
||||||
ETag: aws.String("foo"),
|
|
||||||
PartNumber: aws.Int64(1),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ETag: aws.String("bar"),
|
|
||||||
PartNumber: aws.Int64(2),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ETag: aws.String("foobar"),
|
|
||||||
PartNumber: aws.Int64(3),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}).Return(nil, nil),
|
|
||||||
)
|
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
@ -696,6 +717,7 @@ func TestWriteChunk(t *testing.T) {
|
||||||
store.MaxMultipartParts = 10000
|
store.MaxMultipartParts = 10000
|
||||||
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
||||||
|
|
||||||
|
// From GetInfo
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
|
@ -710,50 +732,58 @@ func TestWriteChunk(t *testing.T) {
|
||||||
}).Return(&s3.ListPartsOutput{
|
}).Return(&s3.ListPartsOutput{
|
||||||
Parts: []*s3.Part{
|
Parts: []*s3.Part{
|
||||||
{
|
{
|
||||||
Size: aws.Int64(100),
|
Size: aws.Int64(100),
|
||||||
|
ETag: aws.String("etag-1"),
|
||||||
|
PartNumber: aws.Int64(1),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Size: aws.Int64(200),
|
Size: aws.Int64(200),
|
||||||
|
ETag: aws.String("etag-2"),
|
||||||
|
PartNumber: aws.Int64(2),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil).Times(2)
|
}, nil)
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil))
|
}).Return(nil, awserr.New("NoSuchKey", "Not found", nil))
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
|
|
||||||
|
|
||||||
gomock.InOrder(
|
// From WriteChunk
|
||||||
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
PartNumber: aws.Int64(3),
|
PartNumber: aws.Int64(3),
|
||||||
Body: bytes.NewReader([]byte("1234")),
|
Body: bytes.NewReader([]byte("1234")),
|
||||||
})).Return(nil, nil),
|
ContentLength: aws.Int64(4),
|
||||||
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
})).Return(&s3.UploadPartOutput{
|
||||||
Bucket: aws.String("bucket"),
|
ETag: aws.String("etag-3"),
|
||||||
Key: aws.String("uploadId"),
|
}, nil)
|
||||||
UploadId: aws.String("multipartId"),
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
PartNumber: aws.Int64(4),
|
Bucket: aws.String("bucket"),
|
||||||
Body: bytes.NewReader([]byte("5678")),
|
Key: aws.String("uploadId"),
|
||||||
})).Return(nil, nil),
|
UploadId: aws.String("multipartId"),
|
||||||
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
PartNumber: aws.Int64(4),
|
||||||
Bucket: aws.String("bucket"),
|
Body: bytes.NewReader([]byte("5678")),
|
||||||
Key: aws.String("uploadId"),
|
ContentLength: aws.Int64(4),
|
||||||
UploadId: aws.String("multipartId"),
|
})).Return(&s3.UploadPartOutput{
|
||||||
PartNumber: aws.Int64(5),
|
ETag: aws.String("etag-4"),
|
||||||
Body: bytes.NewReader([]byte("90AB")),
|
}, nil)
|
||||||
})).Return(nil, nil),
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId"),
|
||||||
Key: aws.String("uploadId.part"),
|
UploadId: aws.String("multipartId"),
|
||||||
Body: bytes.NewReader([]byte("CD")),
|
PartNumber: aws.Int64(5),
|
||||||
})).Return(nil, nil),
|
Body: bytes.NewReader([]byte("90AB")),
|
||||||
)
|
ContentLength: aws.Int64(4),
|
||||||
|
})).Return(&s3.UploadPartOutput{
|
||||||
|
ETag: aws.String("etag-5"),
|
||||||
|
}, nil)
|
||||||
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
Body: bytes.NewReader([]byte("CD")),
|
||||||
|
})).Return(nil, nil)
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
@ -785,29 +815,27 @@ func TestWriteChunkWriteIncompletePartBecauseTooSmall(t *testing.T) {
|
||||||
}).Return(&s3.ListPartsOutput{
|
}).Return(&s3.ListPartsOutput{
|
||||||
Parts: []*s3.Part{
|
Parts: []*s3.Part{
|
||||||
{
|
{
|
||||||
Size: aws.Int64(100),
|
Size: aws.Int64(100),
|
||||||
|
ETag: aws.String("etag-1"),
|
||||||
|
PartNumber: aws.Int64(1),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Size: aws.Int64(200),
|
Size: aws.Int64(200),
|
||||||
|
ETag: aws.String("etag-2"),
|
||||||
|
PartNumber: aws.Int64(2),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil).Times(2)
|
}, nil)
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist", nil))
|
}).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist", nil))
|
||||||
|
|
||||||
gomock.InOrder(
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId.part"),
|
||||||
Key: aws.String("uploadId.part"),
|
Body: bytes.NewReader([]byte("1234567890")),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
})).Return(nil, nil)
|
||||||
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
Body: bytes.NewReader([]byte("1234567890")),
|
|
||||||
})).Return(nil, nil),
|
|
||||||
)
|
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
@ -836,12 +864,19 @@ func TestWriteChunkPrependsIncompletePart(t *testing.T) {
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":5,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":5,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
}, nil)
|
}, nil)
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumberMarker: aws.Int64(0),
|
||||||
|
}).Return(&s3.ListPartsOutput{
|
||||||
|
Parts: []*s3.Part{},
|
||||||
|
}, nil)
|
||||||
|
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.HeadObjectOutput{
|
||||||
ContentLength: aws.Int64(3),
|
ContentLength: aws.Int64(3),
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
|
||||||
}, nil)
|
}, nil)
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
|
@ -854,29 +889,27 @@ func TestWriteChunkPrependsIncompletePart(t *testing.T) {
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.DeleteObjectOutput{}, nil)
|
}).Return(&s3.DeleteObjectOutput{}, nil)
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(0),
|
|
||||||
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil).Times(2)
|
|
||||||
|
|
||||||
gomock.InOrder(
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId"),
|
||||||
Key: aws.String("uploadId"),
|
UploadId: aws.String("multipartId"),
|
||||||
UploadId: aws.String("multipartId"),
|
PartNumber: aws.Int64(1),
|
||||||
PartNumber: aws.Int64(1),
|
Body: bytes.NewReader([]byte("1234")),
|
||||||
Body: bytes.NewReader([]byte("1234")),
|
ContentLength: aws.Int64(4),
|
||||||
})).Return(nil, nil),
|
})).Return(&s3.UploadPartOutput{
|
||||||
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
ETag: aws.String("etag-1"),
|
||||||
Bucket: aws.String("bucket"),
|
}, nil)
|
||||||
Key: aws.String("uploadId"),
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
UploadId: aws.String("multipartId"),
|
Bucket: aws.String("bucket"),
|
||||||
PartNumber: aws.Int64(2),
|
Key: aws.String("uploadId"),
|
||||||
Body: bytes.NewReader([]byte("5")),
|
UploadId: aws.String("multipartId"),
|
||||||
})).Return(nil, nil),
|
PartNumber: aws.Int64(2),
|
||||||
)
|
Body: bytes.NewReader([]byte("5")),
|
||||||
|
ContentLength: aws.Int64(1),
|
||||||
|
})).Return(&s3.UploadPartOutput{
|
||||||
|
ETag: aws.String("etag-2"),
|
||||||
|
}, nil)
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
@ -910,33 +943,40 @@ func TestWriteChunkPrependsIncompletePartAndWritesANewIncompletePart(t *testing.
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
PartNumberMarker: aws.Int64(0),
|
PartNumberMarker: aws.Int64(0),
|
||||||
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil).Times(2)
|
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil)
|
||||||
|
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
}).Return(&s3.HeadObjectOutput{
|
||||||
|
ContentLength: aws.Int64(3),
|
||||||
|
}, nil)
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
ContentLength: aws.Int64(3),
|
ContentLength: aws.Int64(3),
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
||||||
}, nil).Times(2)
|
}, nil)
|
||||||
s3obj.EXPECT().DeleteObjectWithContext(context.Background(), &s3.DeleteObjectInput{
|
s3obj.EXPECT().DeleteObjectWithContext(context.Background(), &s3.DeleteObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.DeleteObjectOutput{}, nil)
|
}).Return(&s3.DeleteObjectOutput{}, nil)
|
||||||
|
|
||||||
gomock.InOrder(
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId"),
|
||||||
Key: aws.String("uploadId"),
|
UploadId: aws.String("multipartId"),
|
||||||
UploadId: aws.String("multipartId"),
|
PartNumber: aws.Int64(1),
|
||||||
PartNumber: aws.Int64(1),
|
Body: bytes.NewReader([]byte("1234")),
|
||||||
Body: bytes.NewReader([]byte("1234")),
|
ContentLength: aws.Int64(4),
|
||||||
})).Return(nil, nil),
|
})).Return(&s3.UploadPartOutput{
|
||||||
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
|
ETag: aws.String("etag-1"),
|
||||||
Bucket: aws.String("bucket"),
|
}, nil)
|
||||||
Key: aws.String("uploadId.part"),
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
|
||||||
Body: bytes.NewReader([]byte("5")),
|
Bucket: aws.String("bucket"),
|
||||||
})).Return(nil, nil),
|
Key: aws.String("uploadId.part"),
|
||||||
)
|
Body: bytes.NewReader([]byte("5")),
|
||||||
|
})).Return(nil, nil)
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
@ -969,28 +1009,31 @@ func TestWriteChunkAllowTooSmallLast(t *testing.T) {
|
||||||
}).Return(&s3.ListPartsOutput{
|
}).Return(&s3.ListPartsOutput{
|
||||||
Parts: []*s3.Part{
|
Parts: []*s3.Part{
|
||||||
{
|
{
|
||||||
Size: aws.Int64(400),
|
PartNumber: aws.Int64(1),
|
||||||
|
Size: aws.Int64(400),
|
||||||
|
ETag: aws.String("etag-1"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Size: aws.Int64(90),
|
PartNumber: aws.Int64(2),
|
||||||
|
Size: aws.Int64(90),
|
||||||
|
ETag: aws.String("etag-2"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil).Times(2)
|
}, nil)
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("AccessDenied", "Access Denied.", nil))
|
}).Return(nil, awserr.New("AccessDenied", "Access Denied.", nil))
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
|
|
||||||
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
PartNumber: aws.Int64(3),
|
PartNumber: aws.Int64(3),
|
||||||
Body: bytes.NewReader([]byte("1234567890")),
|
Body: bytes.NewReader([]byte("1234567890")),
|
||||||
})).Return(nil, nil)
|
ContentLength: aws.Int64(10),
|
||||||
|
})).Return(&s3.UploadPartOutput{
|
||||||
|
ETag: aws.String("etag-3"),
|
||||||
|
}, nil)
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
@ -1101,13 +1144,33 @@ func TestConcatUploadsUsingMultipart(t *testing.T) {
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
store.MinPartSize = 100
|
store.MinPartSize = 100
|
||||||
|
|
||||||
|
// Calls from NewUpload
|
||||||
|
s3obj.EXPECT().CreateMultipartUploadWithContext(context.Background(), &s3.CreateMultipartUploadInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
Metadata: map[string]*string{},
|
||||||
|
}).Return(&s3.CreateMultipartUploadOutput{
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
}, nil)
|
||||||
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), &s3.PutObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.info"),
|
||||||
|
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":0,"SizeIsDeferred":false,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":true,"PartialUploads":["aaa+AAA","bbb+BBB","ccc+CCC"],"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`)),
|
||||||
|
ContentLength: aws.Int64(int64(234)),
|
||||||
|
})
|
||||||
|
|
||||||
|
// Calls from ConcatUploads
|
||||||
s3obj.EXPECT().UploadPartCopyWithContext(context.Background(), &s3.UploadPartCopyInput{
|
s3obj.EXPECT().UploadPartCopyWithContext(context.Background(), &s3.UploadPartCopyInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
CopySource: aws.String("bucket/aaa"),
|
CopySource: aws.String("bucket/aaa"),
|
||||||
PartNumber: aws.Int64(1),
|
PartNumber: aws.Int64(1),
|
||||||
}).Return(nil, nil)
|
}).Return(&s3.UploadPartCopyOutput{
|
||||||
|
CopyPartResult: &s3.CopyPartResult{
|
||||||
|
ETag: aws.String("etag-1"),
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
|
||||||
s3obj.EXPECT().UploadPartCopyWithContext(context.Background(), &s3.UploadPartCopyInput{
|
s3obj.EXPECT().UploadPartCopyWithContext(context.Background(), &s3.UploadPartCopyInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
|
@ -1115,7 +1178,11 @@ func TestConcatUploadsUsingMultipart(t *testing.T) {
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
CopySource: aws.String("bucket/bbb"),
|
CopySource: aws.String("bucket/bbb"),
|
||||||
PartNumber: aws.Int64(2),
|
PartNumber: aws.Int64(2),
|
||||||
}).Return(nil, nil)
|
}).Return(&s3.UploadPartCopyOutput{
|
||||||
|
CopyPartResult: &s3.CopyPartResult{
|
||||||
|
ETag: aws.String("etag-2"),
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
|
||||||
s3obj.EXPECT().UploadPartCopyWithContext(context.Background(), &s3.UploadPartCopyInput{
|
s3obj.EXPECT().UploadPartCopyWithContext(context.Background(), &s3.UploadPartCopyInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
|
@ -1123,55 +1190,45 @@ func TestConcatUploadsUsingMultipart(t *testing.T) {
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
CopySource: aws.String("bucket/ccc"),
|
CopySource: aws.String("bucket/ccc"),
|
||||||
PartNumber: aws.Int64(3),
|
PartNumber: aws.Int64(3),
|
||||||
}).Return(nil, nil)
|
}).Return(&s3.UploadPartCopyOutput{
|
||||||
|
CopyPartResult: &s3.CopyPartResult{
|
||||||
|
ETag: aws.String("etag-3"),
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
|
||||||
// Output from s3Store.FinishUpload
|
// Calls from FinishUpload
|
||||||
gomock.InOrder(
|
s3obj.EXPECT().CompleteMultipartUploadWithContext(context.Background(), &s3.CompleteMultipartUploadInput{
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId"),
|
||||||
Key: aws.String("uploadId"),
|
UploadId: aws.String("multipartId"),
|
||||||
UploadId: aws.String("multipartId"),
|
MultipartUpload: &s3.CompletedMultipartUpload{
|
||||||
PartNumberMarker: aws.Int64(0),
|
Parts: []*s3.CompletedPart{
|
||||||
}).Return(&s3.ListPartsOutput{
|
|
||||||
Parts: []*s3.Part{
|
|
||||||
{
|
{
|
||||||
ETag: aws.String("foo"),
|
ETag: aws.String("etag-1"),
|
||||||
PartNumber: aws.Int64(1),
|
PartNumber: aws.Int64(1),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ETag: aws.String("bar"),
|
ETag: aws.String("etag-2"),
|
||||||
PartNumber: aws.Int64(2),
|
PartNumber: aws.Int64(2),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ETag: aws.String("baz"),
|
ETag: aws.String("etag-3"),
|
||||||
PartNumber: aws.Int64(3),
|
PartNumber: aws.Int64(3),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil),
|
},
|
||||||
s3obj.EXPECT().CompleteMultipartUploadWithContext(context.Background(), &s3.CompleteMultipartUploadInput{
|
}).Return(nil, nil)
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
MultipartUpload: &s3.CompletedMultipartUpload{
|
|
||||||
Parts: []*s3.CompletedPart{
|
|
||||||
{
|
|
||||||
ETag: aws.String("foo"),
|
|
||||||
PartNumber: aws.Int64(1),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ETag: aws.String("bar"),
|
|
||||||
PartNumber: aws.Int64(2),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ETag: aws.String("baz"),
|
|
||||||
PartNumber: aws.Int64(3),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}).Return(nil, nil),
|
|
||||||
)
|
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
info := handler.FileInfo{
|
||||||
|
ID: "uploadId",
|
||||||
|
IsFinal: true,
|
||||||
|
PartialUploads: []string{
|
||||||
|
"aaa+AAA",
|
||||||
|
"bbb+BBB",
|
||||||
|
"ccc+CCC",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
upload, err := store.NewUpload(context.Background(), info)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
uploadA, err := store.GetUpload(context.Background(), "aaa+AAA")
|
uploadA, err := store.GetUpload(context.Background(), "aaa+AAA")
|
||||||
|
@ -1269,12 +1326,13 @@ type s3APIWithTempFileAssertion struct {
|
||||||
func (s s3APIWithTempFileAssertion) UploadPartWithContext(context.Context, *s3.UploadPartInput, ...request.Option) (*s3.UploadPartOutput, error) {
|
func (s s3APIWithTempFileAssertion) UploadPartWithContext(context.Context, *s3.UploadPartInput, ...request.Option) (*s3.UploadPartOutput, error) {
|
||||||
assert := s.assert
|
assert := s.assert
|
||||||
|
|
||||||
// Make sure that only the two temporary files from tusd are in here.
|
// Make sure that there are temporary files from tusd in here.
|
||||||
files, err := ioutil.ReadDir(s.tempDir)
|
files, err := ioutil.ReadDir(s.tempDir)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
assert.True(strings.HasPrefix(file.Name(), "tusd-s3-tmp-"))
|
assert.True(strings.HasPrefix(file.Name(), "tusd-s3-tmp-"))
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.GreaterOrEqual(len(files), 1)
|
assert.GreaterOrEqual(len(files), 1)
|
||||||
assert.LessOrEqual(len(files), 3)
|
assert.LessOrEqual(len(files), 3)
|
||||||
|
|
||||||
|
@ -1317,7 +1375,7 @@ func TestWriteChunkCleansUpTempFiles(t *testing.T) {
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":14,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
}, nil)
|
}, nil)
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
|
@ -1325,30 +1383,19 @@ func TestWriteChunkCleansUpTempFiles(t *testing.T) {
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
PartNumberMarker: aws.Int64(0),
|
PartNumberMarker: aws.Int64(0),
|
||||||
}).Return(&s3.ListPartsOutput{
|
}).Return(&s3.ListPartsOutput{
|
||||||
Parts: []*s3.Part{
|
Parts: []*s3.Part{},
|
||||||
{
|
}, nil)
|
||||||
Size: aws.Int64(100),
|
s3obj.EXPECT().HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
|
||||||
},
|
|
||||||
{
|
|
||||||
Size: aws.Int64(200),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, nil).Times(2)
|
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil))
|
}).Return(nil, awserr.New("NoSuchKey", "Not found", nil))
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
|
|
||||||
|
|
||||||
// No calls to s3obj.EXPECT().UploadPartWithContext since that is handled by s3APIWithTempFileAssertion
|
// No calls to s3obj.EXPECT().UploadPartWithContext since that is handled by s3APIWithTempFileAssertion
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
bytesRead, err := upload.WriteChunk(context.Background(), 300, bytes.NewReader([]byte("1234567890ABCD")))
|
bytesRead, err := upload.WriteChunk(context.Background(), 0, bytes.NewReader([]byte("1234567890ABCD")))
|
||||||
assert.NotNil(err)
|
assert.NotNil(err)
|
||||||
assert.Equal(err.Error(), "not now")
|
assert.Equal(err.Error(), "not now")
|
||||||
assert.Equal(int64(0), bytesRead)
|
assert.Equal(int64(0), bytesRead)
|
||||||
|
|
|
@ -15,7 +15,7 @@ function compile {
|
||||||
local dir="tusd_${os}_${arch}"
|
local dir="tusd_${os}_${arch}"
|
||||||
rm -rf "$dir"
|
rm -rf "$dir"
|
||||||
mkdir -p "$dir"
|
mkdir -p "$dir"
|
||||||
GOOS=$os GOARCH=$arch go build \
|
GOOS=$os GOARCH=$arch CGO_ENABLED=0 go build \
|
||||||
-trimpath \
|
-trimpath \
|
||||||
-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=${version} -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=${commit} -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'" \
|
-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=${version} -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=${commit} -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'" \
|
||||||
-o "$dir/tusd$ext" ./cmd/tusd/main.go
|
-o "$dir/tusd$ext" ./cmd/tusd/main.go
|
||||||
|
|
Loading…
Reference in New Issue