commit
7fd41f1bfc
|
@ -1,64 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Find all packages containing Go source code inside the current directory
|
|
||||||
packages=$(find ./ -maxdepth 2 -name '*.go' -printf '%h\n' | sort | uniq)
|
|
||||||
|
|
||||||
# The consul package only supports Go1.10+ and therefore we will only run the
|
|
||||||
# corresponding tests on these versions.
|
|
||||||
goversion=$(go version)
|
|
||||||
if [[ "$goversion" == *"go1.5"* ]] ||
|
|
||||||
[[ "$goversion" == *"go1.6"* ]] ||
|
|
||||||
[[ "$goversion" == *"go1.7"* ]] ||
|
|
||||||
[[ "$goversion" == *"go1.8"* ]] ||
|
|
||||||
[[ "$goversion" == *"go1.9"* ]]; then
|
|
||||||
|
|
||||||
echo "Skipping tests requiring Consul which is not supported on $goversion"
|
|
||||||
|
|
||||||
# Exclude consullocker since this may not be run on all Go versions.
|
|
||||||
packages=$(echo "$packages" | sed '/consul/d')
|
|
||||||
|
|
||||||
echo "Skipping tests requiring GCSStore, which is not supported on $goversion"
|
|
||||||
packages=$(echo "$packages" | sed '/gcsstore/d')
|
|
||||||
|
|
||||||
echo "Skipping tests requiring Prometheus, which is not supported on $goversion"
|
|
||||||
packages=$(echo "$packages" | sed '/prometheuscollector/d')
|
|
||||||
else
|
|
||||||
# Install the Consul and Prometheus client packages which are not vendored.
|
|
||||||
go get -u github.com/hashicorp/consul/...
|
|
||||||
go get -u github.com/prometheus/client_golang/prometheus
|
|
||||||
fi
|
|
||||||
|
|
||||||
install_etcd_pkgs() {
|
|
||||||
ETCD_VERSION="3.3.10"
|
|
||||||
go get -u go.etcd.io/etcd/clientv3
|
|
||||||
go get -u github.com/chen-anders/go-etcd-harness
|
|
||||||
wget -q -O /tmp/etcd.tar.gz "https://github.com/etcd-io/etcd/releases/download/v$ETCD_VERSION/etcd-v$ETCD_VERSION-linux-amd64.tar.gz"
|
|
||||||
tar xvzf /tmp/etcd.tar.gz -C /tmp
|
|
||||||
export PATH="$PATH:/tmp/etcd-v$ETCD_VERSION-linux-amd64"
|
|
||||||
}
|
|
||||||
|
|
||||||
# The etcd 3.3.x package only supports Go1.11+ and therefore
|
|
||||||
# we will only run the corresponding tests on these versions.
|
|
||||||
if [[ "$goversion" == *"go1.5"* ]] ||
|
|
||||||
[[ "$goversion" == *"go1.6"* ]] ||
|
|
||||||
[[ "$goversion" == *"go1.7"* ]] ||
|
|
||||||
[[ "$goversion" == *"go1.8"* ]] ||
|
|
||||||
[[ "$goversion" == *"go1.9"* ]] ||
|
|
||||||
[[ "$goversion" == *"go1.10"* ]]; then
|
|
||||||
echo "Skipping tests requiring etcd3locker, which is not supported on $goversion"
|
|
||||||
packages=$(echo "$packages" | sed '/etcd3locker/d')
|
|
||||||
else
|
|
||||||
# Install the etcd packages which are not vendored.
|
|
||||||
install_etcd_pkgs
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Install the AWS SDK which is explicitly not vendored
|
|
||||||
go get -u github.com/aws/aws-sdk-go/service/s3
|
|
||||||
go get -u github.com/aws/aws-sdk-go/aws/...
|
|
||||||
|
|
||||||
# Test all packages which are allowed on all Go versions
|
|
||||||
go test $packages
|
|
||||||
|
|
||||||
go vet $packages
|
|
28
.travis.yml
28
.travis.yml
|
@ -1,13 +1,12 @@
|
||||||
language: go
|
language: go
|
||||||
go:
|
go:
|
||||||
- 1.5
|
|
||||||
- 1.6
|
|
||||||
- 1.7
|
|
||||||
- 1.8
|
|
||||||
- 1.9
|
|
||||||
- "1.10"
|
|
||||||
- 1.11
|
|
||||||
- 1.12
|
- 1.12
|
||||||
|
- 1.13
|
||||||
|
env:
|
||||||
|
- GO111MODULE=on
|
||||||
|
os:
|
||||||
|
- linux
|
||||||
|
- windows
|
||||||
sudo: required
|
sudo: required
|
||||||
addons:
|
addons:
|
||||||
apt:
|
apt:
|
||||||
|
@ -18,15 +17,12 @@ cache:
|
||||||
directories:
|
directories:
|
||||||
- $HOME/.gimme
|
- $HOME/.gimme
|
||||||
- "$HOME/google-cloud-sdk/"
|
- "$HOME/google-cloud-sdk/"
|
||||||
env:
|
|
||||||
global:
|
|
||||||
- GO15VENDOREXPERIMENT=1
|
|
||||||
install:
|
install:
|
||||||
- true
|
- true
|
||||||
script:
|
script:
|
||||||
- ./.scripts/test_all.sh
|
- ./scripts/test_all.sh
|
||||||
before_deploy:
|
before_deploy:
|
||||||
- if [[ "$TRAVIS_TAG" != "" ]]; then ./.scripts/build_all.sh; fi
|
- if [[ "$TRAVIS_TAG" != "" ]]; then ./scripts/build_all.sh; fi
|
||||||
deploy:
|
deploy:
|
||||||
- provider: releases
|
- provider: releases
|
||||||
api_key:
|
api_key:
|
||||||
|
@ -36,11 +32,13 @@ deploy:
|
||||||
skip_cleanup: true
|
skip_cleanup: true
|
||||||
on:
|
on:
|
||||||
tags: true
|
tags: true
|
||||||
go: 1.12
|
go: 1.13
|
||||||
repo: tus/tusd
|
repo: tus/tusd
|
||||||
|
os: linux
|
||||||
- provider: script
|
- provider: script
|
||||||
script: .scripts/deploy_kube.sh
|
script: scripts/deploy_kube.sh
|
||||||
on:
|
on:
|
||||||
branch: master
|
branch: master
|
||||||
go: 1.12
|
go: 1.13
|
||||||
repo: tus/tusd
|
repo: tus/tusd
|
||||||
|
os: linux
|
||||||
|
|
34
README.md
34
README.md
|
@ -21,6 +21,9 @@ nearly any other cloud provider could easily be added to tusd.
|
||||||
|
|
||||||
**Protocol version:** 1.0.0
|
**Protocol version:** 1.0.0
|
||||||
|
|
||||||
|
This branch contains tusd v1. If you are looking for the previous major release, after which
|
||||||
|
breaking changes have been introduced, please look at the [0.14.0 tag](https://github.com/tus/tusd/tree/0.14.0).
|
||||||
|
|
||||||
## Getting started
|
## Getting started
|
||||||
|
|
||||||
### Download pre-builts binaries (recommended)
|
### Download pre-builts binaries (recommended)
|
||||||
|
@ -31,7 +34,9 @@ Windows in various formats of the
|
||||||
|
|
||||||
### Compile from source
|
### Compile from source
|
||||||
|
|
||||||
The only requirement for building tusd is [Go](http://golang.org/doc/install) 1.5 or newer.
|
The only requirement for building tusd is [Go](http://golang.org/doc/install).
|
||||||
|
Currently only Go 1.12 and 1.13 is tested and supported and in the future only the two latest
|
||||||
|
major releases will be supported.
|
||||||
If you meet this criteria, you can clone the git repository, install the remaining
|
If you meet this criteria, you can clone the git repository, install the remaining
|
||||||
dependencies and build the binary:
|
dependencies and build the binary:
|
||||||
|
|
||||||
|
@ -52,7 +57,7 @@ snippet demonstrates how to start a tusd process which accepts tus uploads at
|
||||||
`http://localhost:1080/files/` (notice the trailing slash) and stores them locally in the `./data` directory:
|
`http://localhost:1080/files/` (notice the trailing slash) and stores them locally in the `./data` directory:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ tusd -dir=./data
|
$ tusd -upload-dir./data
|
||||||
[tusd] Using './data' as directory storage.
|
[tusd] Using './data' as directory storage.
|
||||||
[tusd] Using 0.00MB as maximum size.
|
[tusd] Using 0.00MB as maximum size.
|
||||||
[tusd] Using 0.0.0.0:1080 as address to listen.
|
[tusd] Using 0.0.0.0:1080 as address to listen.
|
||||||
|
@ -200,20 +205,20 @@ func main() {
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Please consult the [online documentation](https://godoc.org/github.com/tus/tusd)
|
Please consult the [online documentation](https://godoc.org/github.com/tus/tusd/pkg)
|
||||||
for more details about tusd's APIs and its sub-packages.
|
for more details about tusd's APIs and its sub-packages.
|
||||||
|
|
||||||
## Implementing own storages
|
## Implementing own storages
|
||||||
|
|
||||||
The tusd server is built to be as flexible as possible and to allow the use
|
The tusd server is built to be as flexible as possible and to allow the use
|
||||||
of different upload storage mechanisms. By default the tusd binary includes
|
of different upload storage mechanisms. By default the tusd binary includes
|
||||||
[`filestore`](https://godoc.org/github.com/tus/tusd/filestore) which will save every upload
|
[`filestore`](https://godoc.org/github.com/tus/tusd/pkg/filestore) which will save every upload
|
||||||
to a specific directory on disk.
|
to a specific directory on disk.
|
||||||
|
|
||||||
If you have different requirements, you can build your own storage backend
|
If you have different requirements, you can build your own storage backend
|
||||||
which will save the files to S3, a remote FTP server or similar. Doing so
|
which will save the files to S3, a remote FTP server or similar. Doing so
|
||||||
is as simple as implementing the [`tusd.DataStore`](https://godoc.org/github.com/tus/tusd/#DataStore)
|
is as simple as implementing the [`tusd.DataStore`](https://godoc.org/github.com/tus/tusd/pkg/#DataStore)
|
||||||
interface and using the new struct in the [configuration object](https://godoc.org/github.com/tus/tusd/#Config).
|
interface and using the new struct in the [configuration object](https://godoc.org/github.com/tus/tusd/pkg/#Config).
|
||||||
Please consult the documentation about detailed information about the
|
Please consult the documentation about detailed information about the
|
||||||
required methods.
|
required methods.
|
||||||
|
|
||||||
|
@ -222,19 +227,18 @@ required methods.
|
||||||
This repository does not only contain the HTTP server's code but also other
|
This repository does not only contain the HTTP server's code but also other
|
||||||
useful tools:
|
useful tools:
|
||||||
|
|
||||||
* [**s3store**](https://godoc.org/github.com/tus/tusd/s3store): A storage backend using AWS S3
|
* [**s3store**](https://godoc.org/github.com/tus/tusd/pkg/s3store): A storage backend using AWS S3
|
||||||
* [**filestore**](https://godoc.org/github.com/tus/tusd/filestore): A storage backend using the local file system
|
* [**filestore**](https://godoc.org/github.com/tus/tusd/pkg/filestore): A storage backend using the local file system
|
||||||
* [**gcsstore**](https://godoc.org/github.com/tus/tusd/gcsstore): A storage backend using Google cloud storage
|
* [**gcsstore**](https://godoc.org/github.com/tus/tusd/pkg/gcsstore): A storage backend using Google cloud storage
|
||||||
* [**memorylocker**](https://godoc.org/github.com/tus/tusd/memorylocker): An in-memory locker for handling concurrent uploads
|
* [**memorylocker**](https://godoc.org/github.com/tus/tusd/pkg/memorylocker): An in-memory locker for handling concurrent uploads
|
||||||
* [**consullocker**](https://godoc.org/github.com/tus/tusd/consullocker): A locker using the distributed Consul service
|
* [**filelocker**](https://godoc.org/github.com/tus/tusd/pkg/filelocker): A disk-based locker for handling concurrent uploads
|
||||||
* [**etcd3locker**](https://godoc.org/github.com/tus/tusd/etcd3locker): A locker using the distributed KV etcd3 store
|
|
||||||
* [**limitedstore**](https://godoc.org/github.com/tus/tusd/limitedstore): A storage wrapper limiting the total used space for uploads
|
|
||||||
|
|
||||||
### 3rd-Party tusd Packages
|
### 3rd-Party tusd Packages
|
||||||
|
|
||||||
The following packages are supported by 3rd-party maintainers outside of this repository. Please file issues respective to the packages in their respective repositories.
|
The following packages are supported by 3rd-party maintainers outside of this repository. Please file issues respective to the packages in their respective repositories.
|
||||||
|
|
||||||
* [**tusd-dynamo-locker**](https://github.com/chen-anders/tusd-dynamo-locker): A locker using AWS DynamoDB store
|
* [**tusd-dynamo-locker**](https://github.com/chen-anders/tusd-dynamo-locker): A locker using AWS DynamoDB store
|
||||||
|
* [**tusd-etcd3-locker**](https://github.com/tus/tusd-etcd3-locker): A locker using the distributed KV etcd3 store
|
||||||
|
|
||||||
## Running the testsuite
|
## Running the testsuite
|
||||||
|
|
||||||
|
@ -249,7 +253,7 @@ go test -v ./...
|
||||||
|
|
||||||
### How can I access tusd using HTTPS?
|
### How can I access tusd using HTTPS?
|
||||||
|
|
||||||
The tusd binary, once executed, listens on the provided port for only non-encrypted HTTP requests and *does not accept* HTTPS connections. This decision has been made to limit the functionality inside this repository which has to be developed, tested and maintained. If you want to send requests to tusd in a secure fashion - what we absolutely encourage, we recommend you to utilize a reverse proxy in front of tusd which accepts incoming HTTPS connections and forwards them to tusd using plain HTTP. More information about this topic, including sample configurations for Nginx and Apache, can be found in [issue #86](https://github.com/tus/tusd/issues/86#issuecomment-269569077) and in the [Apache example configuration](/docs/apache2.conf).
|
The tusd binary, once executed, listens on the provided port for only non-encrypted HTTP requests and *does not accept* HTTPS connections. This decision has been made to limit the functionality inside this repository which has to be developed, tested and maintained. If you want to send requests to tusd in a secure fashion - what we absolutely encourage, we recommend you to utilize a reverse proxy in front of tusd which accepts incoming HTTPS connections and forwards them to tusd using plain HTTP. More information about this topic, including sample configurations for Nginx and Apache, can be found in [issue #86](https://github.com/tus/tusd/issues/86#issuecomment-269569077) and in the [Apache example configuration](/examples/apache2.conf).
|
||||||
|
|
||||||
### Can I run tusd behind a reverse proxy?
|
### Can I run tusd behind a reverse proxy?
|
||||||
|
|
||||||
|
@ -261,7 +265,7 @@ Yes, it is absolutely possible to do so. Firstly, you should execute the tusd bi
|
||||||
|
|
||||||
- *Forward hostname and scheme.* If the proxy rewrites the request URL, the tusd server does not know the original URL which was used to reach the proxy. This behavior can lead to situations, where tusd returns a redirect to a URL which can not be reached by the client. To avoid this confusion, you can explicitly tell tusd which hostname and scheme to use by supplying the `X-Forwarded-Host` and `X-Forwarded-Proto` headers.
|
- *Forward hostname and scheme.* If the proxy rewrites the request URL, the tusd server does not know the original URL which was used to reach the proxy. This behavior can lead to situations, where tusd returns a redirect to a URL which can not be reached by the client. To avoid this confusion, you can explicitly tell tusd which hostname and scheme to use by supplying the `X-Forwarded-Host` and `X-Forwarded-Proto` headers.
|
||||||
|
|
||||||
Explicit examples for the above points can be found in the [Nginx configuration](/docs/nginx.conf) which is used to power the [master.tus.io](https://master.tus.io) instace.
|
Explicit examples for the above points can be found in the [Nginx configuration](/examples/nginx.conf) which is used to power the [master.tus.io](https://master.tus.io) instace.
|
||||||
|
|
||||||
### Can I run custom verification/authentication checks before an upload begins?
|
### Can I run custom verification/authentication checks before an upload begins?
|
||||||
|
|
||||||
|
|
18
appveyor.yml
18
appveyor.yml
|
@ -2,25 +2,13 @@ clone_folder: c:\projects\go\src\github.com\tus\tusd
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
GOPATH: c:\projects\go
|
GOPATH: c:\projects\go
|
||||||
GO15VENDOREXPERIMENT: 1
|
GO111MODULE: on
|
||||||
|
|
||||||
install:
|
|
||||||
- git submodule update --init --recursive
|
|
||||||
|
|
||||||
build_script:
|
build_script:
|
||||||
- set PATH=%GOPATH%\bin;%PATH%
|
- set PATH=%GOPATH%\bin;%PATH%
|
||||||
- go env
|
- go env
|
||||||
- go version
|
- go version
|
||||||
- go get ./s3store
|
|
||||||
- go get ./consullocker
|
|
||||||
- go get ./prometheuscollector
|
|
||||||
- go get github.com/hashicorp/consul
|
|
||||||
|
|
||||||
test_script:
|
test_script:
|
||||||
- go test .
|
- go test ./pkg/...
|
||||||
- go test ./filestore
|
- go vet ./pkg/...
|
||||||
- go test ./limitedstore
|
|
||||||
- go test ./memorylocker
|
|
||||||
- go test ./s3store
|
|
||||||
- go vet ./prometheuscollector
|
|
||||||
- go test ./gcsstore
|
|
||||||
|
|
|
@ -2,25 +2,27 @@ package cli
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/pkg/filelocker"
|
||||||
"github.com/tus/tusd/filestore"
|
"github.com/tus/tusd/pkg/filestore"
|
||||||
"github.com/tus/tusd/gcsstore"
|
"github.com/tus/tusd/pkg/gcsstore"
|
||||||
"github.com/tus/tusd/limitedstore"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
"github.com/tus/tusd/memorylocker"
|
"github.com/tus/tusd/pkg/memorylocker"
|
||||||
"github.com/tus/tusd/s3store"
|
"github.com/tus/tusd/pkg/s3store"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
)
|
)
|
||||||
|
|
||||||
var Composer *tusd.StoreComposer
|
var Composer *handler.StoreComposer
|
||||||
|
|
||||||
func CreateComposer() {
|
func CreateComposer() {
|
||||||
// Attempt to use S3 as a backend if the -s3-bucket option has been supplied.
|
// Attempt to use S3 as a backend if the -s3-bucket option has been supplied.
|
||||||
// If not, we default to storing them locally on disk.
|
// If not, we default to storing them locally on disk.
|
||||||
Composer = tusd.NewStoreComposer()
|
Composer = handler.NewStoreComposer()
|
||||||
if Flags.S3Bucket != "" {
|
if Flags.S3Bucket != "" {
|
||||||
s3Config := aws.NewConfig()
|
s3Config := aws.NewConfig()
|
||||||
|
|
||||||
|
@ -41,6 +43,11 @@ func CreateComposer() {
|
||||||
locker := memorylocker.New()
|
locker := memorylocker.New()
|
||||||
locker.UseIn(Composer)
|
locker.UseIn(Composer)
|
||||||
} else if Flags.GCSBucket != "" {
|
} else if Flags.GCSBucket != "" {
|
||||||
|
if Flags.GCSObjectPrefix != "" && strings.Contains(Flags.GCSObjectPrefix, "_") {
|
||||||
|
stderr.Fatalf("gcs-object-prefix value (%s) can't contain underscore. "+
|
||||||
|
"Please remove underscore from the value", Flags.GCSObjectPrefix)
|
||||||
|
}
|
||||||
|
|
||||||
// Derivce credentials from service account file path passed in
|
// Derivce credentials from service account file path passed in
|
||||||
// GCS_SERVICE_ACCOUNT_FILE environment variable.
|
// GCS_SERVICE_ACCOUNT_FILE environment variable.
|
||||||
gcsSAF := os.Getenv("GCS_SERVICE_ACCOUNT_FILE")
|
gcsSAF := os.Getenv("GCS_SERVICE_ACCOUNT_FILE")
|
||||||
|
@ -62,7 +69,10 @@ func CreateComposer() {
|
||||||
locker := memorylocker.New()
|
locker := memorylocker.New()
|
||||||
locker.UseIn(Composer)
|
locker.UseIn(Composer)
|
||||||
} else {
|
} else {
|
||||||
dir := Flags.UploadDir
|
dir, err := filepath.Abs(Flags.UploadDir)
|
||||||
|
if err != nil {
|
||||||
|
stderr.Fatalf("Unable to make absolute path: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
stdout.Printf("Using '%s' as directory storage.\n", dir)
|
stdout.Printf("Using '%s' as directory storage.\n", dir)
|
||||||
if err := os.MkdirAll(dir, os.FileMode(0774)); err != nil {
|
if err := os.MkdirAll(dir, os.FileMode(0774)); err != nil {
|
||||||
|
@ -71,19 +81,9 @@ func CreateComposer() {
|
||||||
|
|
||||||
store := filestore.New(dir)
|
store := filestore.New(dir)
|
||||||
store.UseIn(Composer)
|
store.UseIn(Composer)
|
||||||
}
|
|
||||||
|
|
||||||
storeSize := Flags.StoreSize
|
locker := filelocker.New(dir)
|
||||||
maxSize := Flags.MaxSize
|
locker.UseIn(Composer)
|
||||||
|
|
||||||
if storeSize > 0 {
|
|
||||||
limitedstore.New(storeSize, Composer.Core, Composer.Terminater).UseIn(Composer)
|
|
||||||
stdout.Printf("Using %.2fMB as storage size.\n", float64(storeSize)/1024/1024)
|
|
||||||
|
|
||||||
// We need to ensure that a single upload can fit into the storage size
|
|
||||||
if maxSize > storeSize || maxSize == 0 {
|
|
||||||
Flags.MaxSize = storeSize
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
stdout.Printf("Using %.2fMB as maximum size.\n", float64(Flags.MaxSize)/1024/1024)
|
stdout.Printf("Using %.2fMB as maximum size.\n", float64(Flags.MaxSize)/1024/1024)
|
||||||
|
|
|
@ -14,7 +14,6 @@ var Flags struct {
|
||||||
HttpSock string
|
HttpSock string
|
||||||
MaxSize int64
|
MaxSize int64
|
||||||
UploadDir string
|
UploadDir string
|
||||||
StoreSize int64
|
|
||||||
Basepath string
|
Basepath string
|
||||||
Timeout int64
|
Timeout int64
|
||||||
S3Bucket string
|
S3Bucket string
|
||||||
|
@ -35,9 +34,6 @@ var Flags struct {
|
||||||
MetricsPath string
|
MetricsPath string
|
||||||
BehindProxy bool
|
BehindProxy bool
|
||||||
VerboseOutput bool
|
VerboseOutput bool
|
||||||
|
|
||||||
FileHooksInstalled bool
|
|
||||||
HttpHooksInstalled bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseFlags() {
|
func ParseFlags() {
|
||||||
|
@ -45,8 +41,7 @@ func ParseFlags() {
|
||||||
flag.StringVar(&Flags.HttpPort, "port", "1080", "Port to bind HTTP server to")
|
flag.StringVar(&Flags.HttpPort, "port", "1080", "Port to bind HTTP server to")
|
||||||
flag.StringVar(&Flags.HttpSock, "unix-sock", "", "If set, will listen to a UNIX socket at this location instead of a TCP socket")
|
flag.StringVar(&Flags.HttpSock, "unix-sock", "", "If set, will listen to a UNIX socket at this location instead of a TCP socket")
|
||||||
flag.Int64Var(&Flags.MaxSize, "max-size", 0, "Maximum size of a single upload in bytes")
|
flag.Int64Var(&Flags.MaxSize, "max-size", 0, "Maximum size of a single upload in bytes")
|
||||||
flag.StringVar(&Flags.UploadDir, "dir", "./data", "Directory to store uploads in")
|
flag.StringVar(&Flags.UploadDir, "upload-dir", "./data", "Directory to store uploads in")
|
||||||
flag.Int64Var(&Flags.StoreSize, "store-size", 0, "Size of space allowed for storage")
|
|
||||||
flag.StringVar(&Flags.Basepath, "base-path", "/files/", "Basepath of the HTTP server")
|
flag.StringVar(&Flags.Basepath, "base-path", "/files/", "Basepath of the HTTP server")
|
||||||
flag.Int64Var(&Flags.Timeout, "timeout", 30*1000, "Read timeout for connections in milliseconds. A zero value means that reads will not timeout")
|
flag.Int64Var(&Flags.Timeout, "timeout", 30*1000, "Read timeout for connections in milliseconds. A zero value means that reads will not timeout")
|
||||||
flag.StringVar(&Flags.S3Bucket, "s3-bucket", "", "Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)")
|
flag.StringVar(&Flags.S3Bucket, "s3-bucket", "", "Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)")
|
||||||
|
@ -72,27 +67,6 @@ func ParseFlags() {
|
||||||
|
|
||||||
if Flags.FileHooksDir != "" {
|
if Flags.FileHooksDir != "" {
|
||||||
Flags.FileHooksDir, _ = filepath.Abs(Flags.FileHooksDir)
|
Flags.FileHooksDir, _ = filepath.Abs(Flags.FileHooksDir)
|
||||||
Flags.FileHooksInstalled = true
|
|
||||||
|
|
||||||
stdout.Printf("Using '%s' for hooks", Flags.FileHooksDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
if Flags.HttpHooksEndpoint != "" {
|
|
||||||
Flags.HttpHooksInstalled = true
|
|
||||||
|
|
||||||
stdout.Printf("Using '%s' as the endpoint for hooks", Flags.HttpHooksEndpoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
if Flags.UploadDir == "" && Flags.S3Bucket == "" {
|
|
||||||
stderr.Fatalf("Either an upload directory (using -dir) or an AWS S3 Bucket " +
|
|
||||||
"(using -s3-bucket) must be specified to start tusd but " +
|
|
||||||
"neither flag was provided. Please consult `tusd -help` for " +
|
|
||||||
"more information on these options.")
|
|
||||||
}
|
|
||||||
|
|
||||||
if Flags.GCSObjectPrefix != "" && strings.Contains(Flags.GCSObjectPrefix, "_") {
|
|
||||||
stderr.Fatalf("gcs-object-prefix value (%s) can't contain underscore. "+
|
|
||||||
"Please remove underscore from the value", Flags.GCSObjectPrefix)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,11 +88,4 @@ func SetEnabledHooks() {
|
||||||
if len(Flags.EnabledHooks) == 0 {
|
if len(Flags.EnabledHooks) == 0 {
|
||||||
Flags.EnabledHooks = hooks.AvailableHooks
|
Flags.EnabledHooks = hooks.AvailableHooks
|
||||||
}
|
}
|
||||||
|
|
||||||
var enabledHooksString []string
|
|
||||||
for _, h := range Flags.EnabledHooks {
|
|
||||||
enabledHooksString = append(enabledHooksString, string(h))
|
|
||||||
}
|
|
||||||
|
|
||||||
stdout.Printf("Enabled hook events: %s", strings.Join(enabledHooksString, ", "))
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,9 +3,10 @@ package cli
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
|
||||||
"github.com/tus/tusd/cmd/tusd/cli/hooks"
|
"github.com/tus/tusd/cmd/tusd/cli/hooks"
|
||||||
|
"github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
var hookHandler hooks.HookHandler = nil
|
var hookHandler hooks.HookHandler = nil
|
||||||
|
@ -19,22 +20,19 @@ func hookTypeInSlice(a hooks.HookType, list []hooks.HookType) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
type hookDataStore struct {
|
func preCreateCallback(info handler.HookEvent) error {
|
||||||
tusd.DataStore
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store hookDataStore) NewUpload(info tusd.FileInfo) (id string, err error) {
|
|
||||||
if output, err := invokeHookSync(hooks.HookPreCreate, info, true); err != nil {
|
if output, err := invokeHookSync(hooks.HookPreCreate, info, true); err != nil {
|
||||||
if hookErr, ok := err.(hooks.HookError); ok {
|
if hookErr, ok := err.(hooks.HookError); ok {
|
||||||
return "", hooks.NewHookError(
|
return hooks.NewHookError(
|
||||||
fmt.Errorf("pre-create hook failed: %s", err),
|
fmt.Errorf("pre-create hook failed: %s", err),
|
||||||
hookErr.StatusCode(),
|
hookErr.StatusCode(),
|
||||||
hookErr.Body(),
|
hookErr.Body(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
return "", fmt.Errorf("pre-create hook failed: %s\n%s", err, string(output))
|
return fmt.Errorf("pre-create hook failed: %s\n%s", err, string(output))
|
||||||
}
|
}
|
||||||
return store.DataStore.NewUpload(info)
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetupHookMetrics() {
|
func SetupHookMetrics() {
|
||||||
|
@ -45,18 +43,24 @@ func SetupHookMetrics() {
|
||||||
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreCreate)).Add(0)
|
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreCreate)).Add(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetupPreHooks(composer *tusd.StoreComposer) error {
|
func SetupPreHooks(config *handler.Config) error {
|
||||||
if Flags.FileHooksDir != "" {
|
if Flags.FileHooksDir != "" {
|
||||||
|
stdout.Printf("Using '%s' for hooks", Flags.FileHooksDir)
|
||||||
|
|
||||||
hookHandler = &hooks.FileHook{
|
hookHandler = &hooks.FileHook{
|
||||||
Directory: Flags.FileHooksDir,
|
Directory: Flags.FileHooksDir,
|
||||||
}
|
}
|
||||||
} else if Flags.HttpHooksEndpoint != "" {
|
} else if Flags.HttpHooksEndpoint != "" {
|
||||||
|
stdout.Printf("Using '%s' as the endpoint for hooks", Flags.HttpHooksEndpoint)
|
||||||
|
|
||||||
hookHandler = &hooks.HttpHook{
|
hookHandler = &hooks.HttpHook{
|
||||||
Endpoint: Flags.HttpHooksEndpoint,
|
Endpoint: Flags.HttpHooksEndpoint,
|
||||||
MaxRetries: Flags.HttpHooksRetry,
|
MaxRetries: Flags.HttpHooksRetry,
|
||||||
Backoff: Flags.HttpHooksBackoff,
|
Backoff: Flags.HttpHooksBackoff,
|
||||||
}
|
}
|
||||||
} else if Flags.PluginHookPath != "" {
|
} else if Flags.PluginHookPath != "" {
|
||||||
|
stdout.Printf("Using '%s' to load plugin for hooks", Flags.PluginHookPath)
|
||||||
|
|
||||||
hookHandler = &hooks.PluginHook{
|
hookHandler = &hooks.PluginHook{
|
||||||
Path: Flags.PluginHookPath,
|
Path: Flags.PluginHookPath,
|
||||||
}
|
}
|
||||||
|
@ -64,17 +68,23 @@ func SetupPreHooks(composer *tusd.StoreComposer) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var enabledHooksString []string
|
||||||
|
for _, h := range Flags.EnabledHooks {
|
||||||
|
enabledHooksString = append(enabledHooksString, string(h))
|
||||||
|
}
|
||||||
|
|
||||||
|
stdout.Printf("Enabled hook events: %s", strings.Join(enabledHooksString, ", "))
|
||||||
|
|
||||||
if err := hookHandler.Setup(); err != nil {
|
if err := hookHandler.Setup(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
composer.UseCore(hookDataStore{
|
config.PreUploadCreateCallback = preCreateCallback
|
||||||
DataStore: composer.Core,
|
|
||||||
})
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetupPostHooks(handler *tusd.Handler) {
|
func SetupPostHooks(handler *handler.Handler) {
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
@ -91,23 +101,26 @@ func SetupPostHooks(handler *tusd.Handler) {
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func invokeHookAsync(typ hooks.HookType, info tusd.FileInfo) {
|
func invokeHookAsync(typ hooks.HookType, info handler.HookEvent) {
|
||||||
go func() {
|
go func() {
|
||||||
// Error handling is taken care by the function.
|
// Error handling is taken care by the function.
|
||||||
_, _ = invokeHookSync(typ, info, false)
|
_, _ = invokeHookSync(typ, info, false)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func invokeHookSync(typ hooks.HookType, info tusd.FileInfo, captureOutput bool) ([]byte, error) {
|
func invokeHookSync(typ hooks.HookType, info handler.HookEvent, captureOutput bool) ([]byte, error) {
|
||||||
if !hookTypeInSlice(typ, Flags.EnabledHooks) {
|
if !hookTypeInSlice(typ, Flags.EnabledHooks) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
id := info.Upload.ID
|
||||||
|
size := info.Upload.Size
|
||||||
|
|
||||||
switch typ {
|
switch typ {
|
||||||
case hooks.HookPostFinish:
|
case hooks.HookPostFinish:
|
||||||
logEv(stdout, "UploadFinished", "id", info.ID, "size", strconv.FormatInt(info.Size, 10))
|
logEv(stdout, "UploadFinished", "id", id, "size", strconv.FormatInt(size, 10))
|
||||||
case hooks.HookPostTerminate:
|
case hooks.HookPostTerminate:
|
||||||
logEv(stdout, "UploadTerminated", "id", info.ID)
|
logEv(stdout, "UploadTerminated", "id", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
if hookHandler == nil {
|
if hookHandler == nil {
|
||||||
|
@ -116,22 +129,22 @@ func invokeHookSync(typ hooks.HookType, info tusd.FileInfo, captureOutput bool)
|
||||||
|
|
||||||
name := string(typ)
|
name := string(typ)
|
||||||
if Flags.VerboseOutput {
|
if Flags.VerboseOutput {
|
||||||
logEv(stdout, "HookInvocationStart", "type", name, "id", info.ID)
|
logEv(stdout, "HookInvocationStart", "type", name, "id", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
output, returnCode, err := hookHandler.InvokeHook(typ, info, captureOutput)
|
output, returnCode, err := hookHandler.InvokeHook(typ, info, captureOutput)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logEv(stderr, "HookInvocationError", "type", string(typ), "id", info.ID, "error", err.Error())
|
logEv(stderr, "HookInvocationError", "type", string(typ), "id", id, "error", err.Error())
|
||||||
MetricsHookErrorsTotal.WithLabelValues(string(typ)).Add(1)
|
MetricsHookErrorsTotal.WithLabelValues(string(typ)).Add(1)
|
||||||
} else if Flags.VerboseOutput {
|
} else if Flags.VerboseOutput {
|
||||||
logEv(stdout, "HookInvocationFinish", "type", string(typ), "id", info.ID)
|
logEv(stdout, "HookInvocationFinish", "type", string(typ), "id", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
if typ == hooks.HookPostReceive && Flags.HooksStopUploadCode != 0 && Flags.HooksStopUploadCode == returnCode {
|
if typ == hooks.HookPostReceive && Flags.HooksStopUploadCode != 0 && Flags.HooksStopUploadCode == returnCode {
|
||||||
logEv(stdout, "HookStopUpload", "id", info.ID)
|
logEv(stdout, "HookStopUpload", "id", id)
|
||||||
|
|
||||||
info.StopUpload()
|
info.Upload.StopUpload()
|
||||||
}
|
}
|
||||||
|
|
||||||
return output, err
|
return output, err
|
||||||
|
|
|
@ -7,7 +7,7 @@ import (
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FileHook struct {
|
type FileHook struct {
|
||||||
|
@ -18,13 +18,13 @@ func (_ FileHook) Setup() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h FileHook) InvokeHook(typ HookType, info tusd.FileInfo, captureOutput bool) ([]byte, int, error) {
|
func (h FileHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutput bool) ([]byte, int, error) {
|
||||||
hookPath := h.Directory + string(os.PathSeparator) + string(typ)
|
hookPath := h.Directory + string(os.PathSeparator) + string(typ)
|
||||||
cmd := exec.Command(hookPath)
|
cmd := exec.Command(hookPath)
|
||||||
env := os.Environ()
|
env := os.Environ()
|
||||||
env = append(env, "TUS_ID="+info.ID)
|
env = append(env, "TUS_ID="+info.Upload.ID)
|
||||||
env = append(env, "TUS_SIZE="+strconv.FormatInt(info.Size, 10))
|
env = append(env, "TUS_SIZE="+strconv.FormatInt(info.Upload.Size, 10))
|
||||||
env = append(env, "TUS_OFFSET="+strconv.FormatInt(info.Offset, 10))
|
env = append(env, "TUS_OFFSET="+strconv.FormatInt(info.Upload.Offset, 10))
|
||||||
|
|
||||||
jsonInfo, err := json.Marshal(info)
|
jsonInfo, err := json.Marshal(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
package hooks
|
package hooks
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
type HookHandler interface {
|
type HookHandler interface {
|
||||||
Setup() error
|
Setup() error
|
||||||
InvokeHook(typ HookType, info tusd.FileInfo, captureOutput bool) ([]byte, int, error)
|
InvokeHook(typ HookType, info handler.HookEvent, captureOutput bool) ([]byte, int, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type HookType string
|
type HookType string
|
||||||
|
@ -22,7 +22,7 @@ const (
|
||||||
var AvailableHooks []HookType = []HookType{HookPreCreate, HookPostCreate, HookPostReceive, HookPostTerminate, HookPostFinish}
|
var AvailableHooks []HookType = []HookType{HookPreCreate, HookPostCreate, HookPostReceive, HookPostTerminate, HookPostFinish}
|
||||||
|
|
||||||
type hookDataStore struct {
|
type hookDataStore struct {
|
||||||
tusd.DataStore
|
handler.DataStore
|
||||||
}
|
}
|
||||||
|
|
||||||
type HookError struct {
|
type HookError struct {
|
||||||
|
|
|
@ -8,7 +8,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
|
|
||||||
"github.com/sethgrid/pester"
|
"github.com/sethgrid/pester"
|
||||||
)
|
)
|
||||||
|
@ -23,7 +23,7 @@ func (_ HttpHook) Setup() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h HttpHook) InvokeHook(typ HookType, info tusd.FileInfo, captureOutput bool) ([]byte, int, error) {
|
func (h HttpHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutput bool) ([]byte, int, error) {
|
||||||
jsonInfo, err := json.Marshal(info)
|
jsonInfo, err := json.Marshal(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
|
|
|
@ -4,15 +4,15 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"plugin"
|
"plugin"
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
type PluginHookHandler interface {
|
type PluginHookHandler interface {
|
||||||
PreCreate(info tusd.FileInfo) error
|
PreCreate(info handler.HookEvent) error
|
||||||
PostCreate(info tusd.FileInfo) error
|
PostCreate(info handler.HookEvent) error
|
||||||
PostReceive(info tusd.FileInfo) error
|
PostReceive(info handler.HookEvent) error
|
||||||
PostFinish(info tusd.FileInfo) error
|
PostFinish(info handler.HookEvent) error
|
||||||
PostTerminate(info tusd.FileInfo) error
|
PostTerminate(info handler.HookEvent) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type PluginHook struct {
|
type PluginHook struct {
|
||||||
|
@ -41,7 +41,7 @@ func (h *PluginHook) Setup() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h PluginHook) InvokeHook(typ HookType, info tusd.FileInfo, captureOutput bool) ([]byte, int, error) {
|
func (h PluginHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutput bool) ([]byte, int, error) {
|
||||||
var err error
|
var err error
|
||||||
switch typ {
|
switch typ {
|
||||||
case HookPostFinish:
|
case HookPostFinish:
|
||||||
|
|
|
@ -4,12 +4,12 @@ import (
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
var stdout = log.New(os.Stdout, "[tusd] ", log.Ldate|log.Ltime)
|
var stdout = log.New(os.Stdout, "[tusd] ", log.Ldate|log.Ltime)
|
||||||
var stderr = log.New(os.Stderr, "[tusd] ", log.Ldate|log.Ltime)
|
var stderr = log.New(os.Stderr, "[tusd] ", log.Ldate|log.Ltime)
|
||||||
|
|
||||||
func logEv(logOutput *log.Logger, eventName string, details ...string) {
|
func logEv(logOutput *log.Logger, eventName string, details ...string) {
|
||||||
tusd.LogEvent(logOutput, eventName, details...)
|
handler.LogEvent(logOutput, eventName, details...)
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,8 +3,8 @@ package cli
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
"github.com/tus/tusd/prometheuscollector"
|
"github.com/tus/tusd/pkg/prometheuscollector"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
@ -23,7 +23,7 @@ var MetricsHookErrorsTotal = prometheus.NewCounterVec(
|
||||||
[]string{"hooktype"},
|
[]string{"hooktype"},
|
||||||
)
|
)
|
||||||
|
|
||||||
func SetupMetrics(handler *tusd.Handler) {
|
func SetupMetrics(handler *handler.Handler) {
|
||||||
prometheus.MustRegister(MetricsOpenConnections)
|
prometheus.MustRegister(MetricsOpenConnections)
|
||||||
prometheus.MustRegister(MetricsHookErrorsTotal)
|
prometheus.MustRegister(MetricsHookErrorsTotal)
|
||||||
prometheus.MustRegister(prometheuscollector.New(handler.Metrics))
|
prometheus.MustRegister(prometheuscollector.New(handler.Metrics))
|
||||||
|
|
|
@ -3,9 +3,10 @@ package cli
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Setups the different components, starts a Listener and give it to
|
// Setups the different components, starts a Listener and give it to
|
||||||
|
@ -15,11 +16,7 @@ import (
|
||||||
// specified, in which case a different socket creation and binding mechanism
|
// specified, in which case a different socket creation and binding mechanism
|
||||||
// is put in place.
|
// is put in place.
|
||||||
func Serve() {
|
func Serve() {
|
||||||
if err := SetupPreHooks(Composer); err != nil {
|
config := handler.Config{
|
||||||
stderr.Fatalf("Unable to setup hooks for handler: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
handler, err := tusd.NewHandler(tusd.Config{
|
|
||||||
MaxSize: Flags.MaxSize,
|
MaxSize: Flags.MaxSize,
|
||||||
BasePath: Flags.Basepath,
|
BasePath: Flags.Basepath,
|
||||||
RespectForwardedHeaders: Flags.BehindProxy,
|
RespectForwardedHeaders: Flags.BehindProxy,
|
||||||
|
@ -28,7 +25,13 @@ func Serve() {
|
||||||
NotifyTerminatedUploads: true,
|
NotifyTerminatedUploads: true,
|
||||||
NotifyUploadProgress: true,
|
NotifyUploadProgress: true,
|
||||||
NotifyCreatedUploads: true,
|
NotifyCreatedUploads: true,
|
||||||
})
|
}
|
||||||
|
|
||||||
|
if err := SetupPreHooks(&config); err != nil {
|
||||||
|
stderr.Fatalf("Unable to setup hooks for handler: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
handler, err := handler.NewHandler(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
stderr.Fatalf("Unable to create handler: %s", err)
|
stderr.Fatalf("Unable to create handler: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -53,7 +56,7 @@ func Serve() {
|
||||||
SetupHookMetrics()
|
SetupHookMetrics()
|
||||||
}
|
}
|
||||||
|
|
||||||
stdout.Printf(Composer.Capabilities())
|
stdout.Printf("Supported tus extensions: %s\n", handler.SupportedExtensions())
|
||||||
|
|
||||||
// Do not display the greeting if the tusd handler will be mounted at the root
|
// Do not display the greeting if the tusd handler will be mounted at the root
|
||||||
// path. Else this would cause a "multiple registrations for /" panic.
|
// path. Else this would cause a "multiple registrations for /" panic.
|
||||||
|
@ -63,6 +66,13 @@ func Serve() {
|
||||||
|
|
||||||
http.Handle(basepath, http.StripPrefix(basepath, handler))
|
http.Handle(basepath, http.StripPrefix(basepath, handler))
|
||||||
|
|
||||||
|
// Also register a route without the trailing slash, so we can handle uploads
|
||||||
|
// for /files/ and /files, for example.
|
||||||
|
if strings.HasSuffix(basepath, "/") {
|
||||||
|
basepathWithoutSlash := strings.TrimSuffix(basepath, "/")
|
||||||
|
http.Handle(basepathWithoutSlash, http.StripPrefix(basepathWithoutSlash, handler))
|
||||||
|
}
|
||||||
|
|
||||||
var listener net.Listener
|
var listener net.Listener
|
||||||
timeoutDuration := time.Duration(Flags.Timeout) * time.Millisecond
|
timeoutDuration := time.Duration(Flags.Timeout) * time.Millisecond
|
||||||
|
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
package tusd_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/tus/tusd"
|
|
||||||
"github.com/tus/tusd/filestore"
|
|
||||||
"github.com/tus/tusd/limitedstore"
|
|
||||||
"github.com/tus/tusd/memorylocker"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ExampleNewStoreComposer() {
|
|
||||||
composer := tusd.NewStoreComposer()
|
|
||||||
|
|
||||||
fs := filestore.New("./data")
|
|
||||||
fs.UseIn(composer)
|
|
||||||
|
|
||||||
ml := memorylocker.New()
|
|
||||||
ml.UseIn(composer)
|
|
||||||
|
|
||||||
ls := limitedstore.New(1024*1024*1024, composer.Core, composer.Terminater)
|
|
||||||
ls.UseIn(composer)
|
|
||||||
|
|
||||||
config := tusd.Config{
|
|
||||||
StoreComposer: composer,
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _ = tusd.NewHandler(config)
|
|
||||||
}
|
|
252
concat_test.go
252
concat_test.go
|
@ -1,252 +0,0 @@
|
||||||
package tusd_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
. "github.com/tus/tusd"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConcat(t *testing.T) {
|
|
||||||
SubTest(t, "ExtensionDiscovery", func(t *testing.T, store *MockFullDataStore) {
|
|
||||||
composer := NewStoreComposer()
|
|
||||||
composer.UseCore(store)
|
|
||||||
composer.UseConcater(store)
|
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
|
||||||
StoreComposer: composer,
|
|
||||||
})
|
|
||||||
|
|
||||||
(&httpTest{
|
|
||||||
Method: "OPTIONS",
|
|
||||||
Code: http.StatusOK,
|
|
||||||
ResHeader: map[string]string{
|
|
||||||
"Tus-Extension": "creation,creation-with-upload,concatenation",
|
|
||||||
},
|
|
||||||
}).Run(handler, t)
|
|
||||||
})
|
|
||||||
|
|
||||||
SubTest(t, "Partial", func(t *testing.T, store *MockFullDataStore) {
|
|
||||||
SubTest(t, "Create", func(t *testing.T, store *MockFullDataStore) {
|
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
|
||||||
Size: 300,
|
|
||||||
IsPartial: true,
|
|
||||||
IsFinal: false,
|
|
||||||
PartialUploads: nil,
|
|
||||||
MetaData: make(map[string]string),
|
|
||||||
}).Return("foo", nil)
|
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
|
||||||
BasePath: "files",
|
|
||||||
DataStore: store,
|
|
||||||
})
|
|
||||||
|
|
||||||
(&httpTest{
|
|
||||||
Method: "POST",
|
|
||||||
ReqHeader: map[string]string{
|
|
||||||
"Tus-Resumable": "1.0.0",
|
|
||||||
"Upload-Length": "300",
|
|
||||||
"Upload-Concat": "partial",
|
|
||||||
},
|
|
||||||
Code: http.StatusCreated,
|
|
||||||
}).Run(handler, t)
|
|
||||||
})
|
|
||||||
|
|
||||||
SubTest(t, "Status", func(t *testing.T, store *MockFullDataStore) {
|
|
||||||
store.EXPECT().GetInfo("foo").Return(FileInfo{
|
|
||||||
IsPartial: true,
|
|
||||||
}, nil)
|
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
|
||||||
BasePath: "files",
|
|
||||||
DataStore: store,
|
|
||||||
})
|
|
||||||
|
|
||||||
(&httpTest{
|
|
||||||
Method: "HEAD",
|
|
||||||
URL: "foo",
|
|
||||||
ReqHeader: map[string]string{
|
|
||||||
"Tus-Resumable": "1.0.0",
|
|
||||||
},
|
|
||||||
Code: http.StatusOK,
|
|
||||||
ResHeader: map[string]string{
|
|
||||||
"Upload-Concat": "partial",
|
|
||||||
},
|
|
||||||
}).Run(handler, t)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
SubTest(t, "Final", func(t *testing.T, store *MockFullDataStore) {
|
|
||||||
SubTest(t, "Create", func(t *testing.T, store *MockFullDataStore) {
|
|
||||||
a := assert.New(t)
|
|
||||||
|
|
||||||
gomock.InOrder(
|
|
||||||
store.EXPECT().GetInfo("a").Return(FileInfo{
|
|
||||||
IsPartial: true,
|
|
||||||
Size: 5,
|
|
||||||
Offset: 5,
|
|
||||||
}, nil),
|
|
||||||
store.EXPECT().GetInfo("b").Return(FileInfo{
|
|
||||||
IsPartial: true,
|
|
||||||
Size: 5,
|
|
||||||
Offset: 5,
|
|
||||||
}, nil),
|
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
|
||||||
Size: 10,
|
|
||||||
IsPartial: false,
|
|
||||||
IsFinal: true,
|
|
||||||
PartialUploads: []string{"a", "b"},
|
|
||||||
MetaData: make(map[string]string),
|
|
||||||
}).Return("foo", nil),
|
|
||||||
store.EXPECT().ConcatUploads("foo", []string{"a", "b"}).Return(nil),
|
|
||||||
)
|
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
|
||||||
BasePath: "files",
|
|
||||||
DataStore: store,
|
|
||||||
NotifyCompleteUploads: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
c := make(chan FileInfo, 1)
|
|
||||||
handler.CompleteUploads = c
|
|
||||||
|
|
||||||
(&httpTest{
|
|
||||||
Method: "POST",
|
|
||||||
ReqHeader: map[string]string{
|
|
||||||
"Tus-Resumable": "1.0.0",
|
|
||||||
// A space between `final;` and the first URL should be allowed due to
|
|
||||||
// compatibility reasons, even if the specification does not define
|
|
||||||
// it. Therefore this character is included in this test case.
|
|
||||||
"Upload-Concat": "final; http://tus.io/files/a /files/b/",
|
|
||||||
},
|
|
||||||
Code: http.StatusCreated,
|
|
||||||
}).Run(handler, t)
|
|
||||||
|
|
||||||
info := <-c
|
|
||||||
a.Equal("foo", info.ID)
|
|
||||||
a.EqualValues(10, info.Size)
|
|
||||||
a.EqualValues(10, info.Offset)
|
|
||||||
a.False(info.IsPartial)
|
|
||||||
a.True(info.IsFinal)
|
|
||||||
a.Equal([]string{"a", "b"}, info.PartialUploads)
|
|
||||||
})
|
|
||||||
|
|
||||||
SubTest(t, "Status", func(t *testing.T, store *MockFullDataStore) {
|
|
||||||
store.EXPECT().GetInfo("foo").Return(FileInfo{
|
|
||||||
IsFinal: true,
|
|
||||||
PartialUploads: []string{"a", "b"},
|
|
||||||
Size: 10,
|
|
||||||
Offset: 10,
|
|
||||||
}, nil)
|
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
|
||||||
BasePath: "files",
|
|
||||||
DataStore: store,
|
|
||||||
})
|
|
||||||
|
|
||||||
(&httpTest{
|
|
||||||
Method: "HEAD",
|
|
||||||
URL: "foo",
|
|
||||||
ReqHeader: map[string]string{
|
|
||||||
"Tus-Resumable": "1.0.0",
|
|
||||||
},
|
|
||||||
Code: http.StatusOK,
|
|
||||||
ResHeader: map[string]string{
|
|
||||||
"Upload-Concat": "final;http://tus.io/files/a http://tus.io/files/b",
|
|
||||||
"Upload-Length": "10",
|
|
||||||
"Upload-Offset": "10",
|
|
||||||
},
|
|
||||||
}).Run(handler, t)
|
|
||||||
})
|
|
||||||
|
|
||||||
SubTest(t, "CreateWithUnfinishedFail", func(t *testing.T, store *MockFullDataStore) {
|
|
||||||
// This upload is still unfinished (mismatching offset and size) and
|
|
||||||
// will therefore cause the POST request to fail.
|
|
||||||
store.EXPECT().GetInfo("c").Return(FileInfo{
|
|
||||||
IsPartial: true,
|
|
||||||
Size: 5,
|
|
||||||
Offset: 3,
|
|
||||||
}, nil)
|
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
|
||||||
BasePath: "files",
|
|
||||||
DataStore: store,
|
|
||||||
})
|
|
||||||
|
|
||||||
(&httpTest{
|
|
||||||
Method: "POST",
|
|
||||||
ReqHeader: map[string]string{
|
|
||||||
"Tus-Resumable": "1.0.0",
|
|
||||||
"Upload-Concat": "final;http://tus.io/files/c",
|
|
||||||
},
|
|
||||||
Code: http.StatusBadRequest,
|
|
||||||
}).Run(handler, t)
|
|
||||||
})
|
|
||||||
|
|
||||||
SubTest(t, "CreateExceedingMaxSizeFail", func(t *testing.T, store *MockFullDataStore) {
|
|
||||||
store.EXPECT().GetInfo("huge").Return(FileInfo{
|
|
||||||
Size: 1000,
|
|
||||||
Offset: 1000,
|
|
||||||
}, nil)
|
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
|
||||||
MaxSize: 100,
|
|
||||||
BasePath: "files",
|
|
||||||
DataStore: store,
|
|
||||||
})
|
|
||||||
|
|
||||||
(&httpTest{
|
|
||||||
Method: "POST",
|
|
||||||
ReqHeader: map[string]string{
|
|
||||||
"Tus-Resumable": "1.0.0",
|
|
||||||
"Upload-Concat": "final;/files/huge",
|
|
||||||
},
|
|
||||||
Code: http.StatusRequestEntityTooLarge,
|
|
||||||
}).Run(handler, t)
|
|
||||||
})
|
|
||||||
|
|
||||||
SubTest(t, "UploadToFinalFail", func(t *testing.T, store *MockFullDataStore) {
|
|
||||||
store.EXPECT().GetInfo("foo").Return(FileInfo{
|
|
||||||
Size: 10,
|
|
||||||
Offset: 0,
|
|
||||||
IsFinal: true,
|
|
||||||
}, nil)
|
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
|
||||||
DataStore: store,
|
|
||||||
})
|
|
||||||
|
|
||||||
(&httpTest{
|
|
||||||
Method: "PATCH",
|
|
||||||
URL: "foo",
|
|
||||||
ReqHeader: map[string]string{
|
|
||||||
"Tus-Resumable": "1.0.0",
|
|
||||||
"Content-Type": "application/offset+octet-stream",
|
|
||||||
"Upload-Offset": "5",
|
|
||||||
},
|
|
||||||
ReqBody: strings.NewReader("hello"),
|
|
||||||
Code: http.StatusForbidden,
|
|
||||||
}).Run(handler, t)
|
|
||||||
})
|
|
||||||
|
|
||||||
SubTest(t, "InvalidConcatHeaderFail", func(t *testing.T, store *MockFullDataStore) {
|
|
||||||
handler, _ := NewHandler(Config{
|
|
||||||
DataStore: store,
|
|
||||||
})
|
|
||||||
|
|
||||||
(&httpTest{
|
|
||||||
Method: "POST",
|
|
||||||
URL: "",
|
|
||||||
ReqHeader: map[string]string{
|
|
||||||
"Tus-Resumable": "1.0.0",
|
|
||||||
"Upload-Concat": "final;",
|
|
||||||
},
|
|
||||||
Code: http.StatusBadRequest,
|
|
||||||
}).Run(handler, t)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,122 +0,0 @@
|
||||||
// Package consullocker provides a locking mechanism using a Consul server.
|
|
||||||
//
|
|
||||||
// Consul's (https://www.consul.io) key/value storage system can also be used
|
|
||||||
// for building a distributed exclusive locking mechanism, often referred to
|
|
||||||
// as leader election (https://www.consul.io/docs/guides/leader-election.html).
|
|
||||||
//
|
|
||||||
// Due to Consul being an external server, connection issues can occur between
|
|
||||||
// tusd and Consul. In this situation, tusd cannot always ensure that it still
|
|
||||||
// holds a lock and may panic in an unrecoverable way. This may seems like an
|
|
||||||
// inconvenient decision but is probably the best solution since we are not
|
|
||||||
// able to interrupt other goroutines which may be involved in moving the
|
|
||||||
// uploaded data to a backend.
|
|
||||||
package consullocker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
consul "github.com/hashicorp/consul/api"
|
|
||||||
"github.com/tus/tusd"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ConsulLocker struct {
|
|
||||||
// Client used to connect to the Consul server
|
|
||||||
Client *consul.Client
|
|
||||||
|
|
||||||
// ConnectionName is an optional field which may contain a human-readable
|
|
||||||
// description for the connection. It is only used for composing error
|
|
||||||
// messages and can be used to match them to a specific Consul instance.
|
|
||||||
ConnectionName string
|
|
||||||
|
|
||||||
// locks is used for storing consul.Lock structs before they are unlocked.
|
|
||||||
// If you want to release a lock, you need the same consul.Lock instance
|
|
||||||
// and therefore we need to save them temporarily.
|
|
||||||
locks map[string]*consul.Lock
|
|
||||||
mutex *sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// New constructs a new locker using the provided client.
|
|
||||||
func New(client *consul.Client) *ConsulLocker {
|
|
||||||
return &ConsulLocker{
|
|
||||||
Client: client,
|
|
||||||
locks: make(map[string]*consul.Lock),
|
|
||||||
mutex: new(sync.RWMutex),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseIn adds this locker to the passed composer.
|
|
||||||
func (locker *ConsulLocker) UseIn(composer *tusd.StoreComposer) {
|
|
||||||
composer.UseLocker(locker)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LockUpload tries to obtain the exclusive lock.
|
|
||||||
func (locker *ConsulLocker) LockUpload(id string) error {
|
|
||||||
lock, err := locker.Client.LockOpts(&consul.LockOptions{
|
|
||||||
Key: id + "/" + consul.DefaultSemaphoreKey,
|
|
||||||
LockTryOnce: true,
|
|
||||||
LockWaitTime: time.Second,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ch, err := lock.Lock(nil)
|
|
||||||
if ch == nil {
|
|
||||||
if err == nil || err == consul.ErrLockHeld {
|
|
||||||
return tusd.ErrFileLocked
|
|
||||||
} else {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
locker.mutex.Lock()
|
|
||||||
defer locker.mutex.Unlock()
|
|
||||||
// Only add the lock to our list if the acquire was successful and no error appeared.
|
|
||||||
locker.locks[id] = lock
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
// This channel will be closed once we lost the lock. This can either happen
|
|
||||||
// wanted (using the Unlock method) or by accident, e.g. if the connection
|
|
||||||
// to the Consul server is lost.
|
|
||||||
<-ch
|
|
||||||
|
|
||||||
locker.mutex.RLock()
|
|
||||||
defer locker.mutex.RUnlock()
|
|
||||||
// Only proceed if the lock has been lost by accident. If we cannot find it
|
|
||||||
// in the map, it has already been gracefully removed (see UnlockUpload).
|
|
||||||
if _, ok := locker.locks[id]; !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
msg := "consullocker: lock for upload '" + id + "' has been lost."
|
|
||||||
if locker.ConnectionName != "" {
|
|
||||||
msg += " Please ensure that the connection to '" + locker.ConnectionName + "' is stable."
|
|
||||||
} else {
|
|
||||||
msg += " Please ensure that the connection to Consul is stable (use ConnectionName to provide a printable name)."
|
|
||||||
}
|
|
||||||
|
|
||||||
// This will cause the program to crash since a panic can only be recovered
|
|
||||||
// from the causing goroutine.
|
|
||||||
panic(msg)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnlockUpload releases a lock.
|
|
||||||
func (locker *ConsulLocker) UnlockUpload(id string) error {
|
|
||||||
locker.mutex.Lock()
|
|
||||||
defer locker.mutex.Unlock()
|
|
||||||
|
|
||||||
// Complain if no lock has been found. This can only happen if LockUpload
|
|
||||||
// has not been invoked before or UnlockUpload multiple times.
|
|
||||||
lock, ok := locker.locks[id]
|
|
||||||
if !ok {
|
|
||||||
return consul.ErrLockNotHeld
|
|
||||||
}
|
|
||||||
|
|
||||||
defer delete(locker.locks, id)
|
|
||||||
|
|
||||||
return lock.Unlock()
|
|
||||||
}
|
|
|
@ -1,61 +0,0 @@
|
||||||
package consullocker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
consul "github.com/hashicorp/consul/api"
|
|
||||||
consultestutil "github.com/hashicorp/consul/sdk/testutil"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConsulLocker(t *testing.T) {
|
|
||||||
a := assert.New(t)
|
|
||||||
|
|
||||||
server, err := consultestutil.NewTestServer()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer server.Stop()
|
|
||||||
|
|
||||||
conf := consul.DefaultConfig()
|
|
||||||
conf.Address = server.HTTPAddr
|
|
||||||
client, err := consul.NewClient(conf)
|
|
||||||
a.NoError(err)
|
|
||||||
|
|
||||||
locker := New(client)
|
|
||||||
|
|
||||||
a.NoError(locker.LockUpload("one"))
|
|
||||||
a.Equal(tusd.ErrFileLocked, locker.LockUpload("one"))
|
|
||||||
a.NoError(locker.UnlockUpload("one"))
|
|
||||||
a.Equal(consul.ErrLockNotHeld, locker.UnlockUpload("one"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLockLost(t *testing.T) {
|
|
||||||
// This test will panic because the connection to Consul will be cut, which
|
|
||||||
// is indented.
|
|
||||||
// TODO: find a way to test this
|
|
||||||
t.SkipNow()
|
|
||||||
|
|
||||||
a := assert.New(t)
|
|
||||||
|
|
||||||
server, err := consultestutil.NewTestServer()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
client, err := consul.NewClient(&consul.Config{
|
|
||||||
Address: server.HTTPAddr,
|
|
||||||
})
|
|
||||||
a.NoError(err)
|
|
||||||
|
|
||||||
locker := New(client)
|
|
||||||
locker.ConnectionName = server.HTTPAddr
|
|
||||||
|
|
||||||
a.NoError(locker.LockUpload("two"))
|
|
||||||
|
|
||||||
server.Stop()
|
|
||||||
time.Sleep(time.Hour)
|
|
||||||
}
|
|
139
docs/hooks.md
139
docs/hooks.md
|
@ -61,43 +61,66 @@ $ tusd --hooks-dir ./path/to/hooks/
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
If an event occurs, the tusd binary will look for a file, named exactly as the event, which will then be executed, as long as the object exists. In the example above, the binary `./path/to/hooks/pre-create` will be invoked, before an upload is created, which can be used to e.g. validate certain metadata. Please note, that in UNIX environments the hook file *must not* have an extension, such as `.sh` or `.py`, or else tusd will not recognize and ignore it. On Windows, however, the hook file *must* have an extension, such as `.bat` or `.exe`. A detailed list of all events can be found at the end of this document.
|
If an event occurs, the tusd binary will look for a file, named exactly as the event, which will then be executed, as long as the object exists. In the example above, the binary `./path/to/hooks/pre-create` will be invoked, before an upload is created, which can be used to e.g. validate certain metadata. Please note, that in UNIX environments the hook file *must not* have an extension, such as `.sh` or `.py`, or else tusd will not recognize and ignore it. On Windows, however, the hook file *must* have an extension, such as `.bat` or `.exe`.
|
||||||
|
|
||||||
### The Hook's Environment
|
### The Hook's Environment
|
||||||
|
|
||||||
The process of the hook files are provided with information about the event and the upload using to two methods:
|
The process of the hook files are provided with information about the event and the upload using to two methods:
|
||||||
* The `TUS_ID` and `TUS_SIZE` environment variables will contain the upload ID and its size in bytes, which triggered the event. Please be aware, that in the `pre-create` hook the upload ID will be an empty string as the entity has not been created and therefore this piece of information is not yet available.
|
* The `TUS_ID` and `TUS_SIZE` environment variables will contain the upload ID and its size in bytes, which triggered the event. Please be aware, that in the `pre-create` hook the upload ID will be an empty string as the entity has not been created and therefore this piece of information is not yet available.
|
||||||
* On `stdin` a JSON-encoded object can be read which contains more details about the corresponding upload in following format:
|
* On `stdin` a JSON-encoded object can be read which contains more details about the corresponding event in following format:
|
||||||
|
|
||||||
```js
|
```js
|
||||||
{
|
{
|
||||||
// The upload's ID. Will be empty during the pre-create event
|
// The upload object contains the upload's details
|
||||||
"ID": "14b1c4c77771671a8479bc0444bbc5ce",
|
"Upload": {
|
||||||
// The upload's total size in bytes.
|
// The upload's ID. Will be empty during the pre-create event
|
||||||
"Size": 46205,
|
"ID": "14b1c4c77771671a8479bc0444bbc5ce",
|
||||||
// The upload's current offset in bytes.
|
// The upload's total size in bytes.
|
||||||
"Offset": 1592,
|
"Size": 46205,
|
||||||
// These properties will be set to true, if the upload as a final or partial
|
// The upload's current offset in bytes.
|
||||||
// one. See the Concatenation extension for details:
|
"Offset": 1592,
|
||||||
// http://tus.io/protocols/resumable-upload.html#concatenation
|
// These properties will be set to true, if the upload as a final or partial
|
||||||
"IsFinal": false,
|
// one. See the Concatenation extension for details:
|
||||||
"IsPartial": false,
|
// http://tus.io/protocols/resumable-upload.html#concatenation
|
||||||
// If the upload is a final one, this value will be an array of upload IDs
|
"IsFinal": false,
|
||||||
// which are concatenated to produce the upload.
|
"IsPartial": false,
|
||||||
"PartialUploads": null,
|
// If the upload is a final one, this value will be an array of upload IDs
|
||||||
// The upload's meta data which can be supplied by the clients as it wishes.
|
// which are concatenated to produce the upload.
|
||||||
// All keys and values in this object will be strings.
|
"PartialUploads": null,
|
||||||
// Be aware that it may contain maliciously crafted values and you must not
|
// The upload's meta data which can be supplied by the clients as it wishes.
|
||||||
// trust it without escaping it first!
|
// All keys and values in this object will be strings.
|
||||||
"MetaData": {
|
// Be aware that it may contain maliciously crafted values and you must not
|
||||||
"filename": "transloadit.png"
|
// trust it without escaping it first!
|
||||||
|
"MetaData": {
|
||||||
|
"filename": "transloadit.png"
|
||||||
|
},
|
||||||
|
// Details about where the data store saved the uploaded file. The different
|
||||||
|
// availabl keys vary depending on the used data store.
|
||||||
|
"Storage": {
|
||||||
|
// For example, the filestore supplies the absolute file path:
|
||||||
|
"Type": "filestore",
|
||||||
|
"Path": "/my/upload/directory/14b1c4c77771671a8479bc0444bbc5ce",
|
||||||
|
|
||||||
|
// The S3Store and GCSStore supply the bucket name and object key:
|
||||||
|
"Type": "s3store",
|
||||||
|
"Bucket": "my-upload-bucket",
|
||||||
|
"Key": "my-prefix/14b1c4c77771671a8479bc0444bbc5ce"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// Details about the HTTP request which caused this hook to be fired.
|
||||||
|
// It can be used to record the client's IP address or inspect the headers.
|
||||||
|
"HTTPRequest": {
|
||||||
|
"Method": "PATCH",
|
||||||
|
"URI": "/files/14b1c4c77771671a8479bc0444bbc5ce",
|
||||||
|
"RemoteAddr": "1.2.3.4:47689",
|
||||||
|
"Header": {
|
||||||
|
"Host": "myuploads.net",
|
||||||
|
"Cookies": "..."
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Be aware that this environment does *not* contain direct data from any HTTP request, in particular not any header values or cookies. If you would like to pass information from the client to the hook, such as authentication details, you may wish to use the [metadata system](http://tus.io/protocols/resumable-upload.html#upload-metadata).
|
|
||||||
|
|
||||||
|
|
||||||
## HTTP Hooks
|
## HTTP Hooks
|
||||||
|
|
||||||
HTTP Hooks are the second type of hooks supported by tusd. Like the file hooks, it is disabled by default. To enable it, pass the `--hooks-http` option to the tusd binary. The flag's value will be an HTTP URL endpoint, which the tusd binary will issue POST requests to:
|
HTTP Hooks are the second type of hooks supported by tusd. Like the file hooks, it is disabled by default. To enable it, pass the `--hooks-http` option to the tusd binary. The flag's value will be an HTTP URL endpoint, which the tusd binary will issue POST requests to:
|
||||||
|
@ -121,26 +144,52 @@ Tusd will issue a `POST` request to the specified URL endpoint, specifying the h
|
||||||
|
|
||||||
```js
|
```js
|
||||||
{
|
{
|
||||||
// The upload's ID. Will be empty during the pre-create event
|
// The upload object contains the upload's details
|
||||||
"ID": "14b1c4c77771671a8479bc0444bbc5ce",
|
"Upload": {
|
||||||
// The upload's total size in bytes.
|
// The upload's ID. Will be empty during the pre-create event
|
||||||
"Size": 46205,
|
"ID": "14b1c4c77771671a8479bc0444bbc5ce",
|
||||||
// The upload's current offset in bytes.
|
// The upload's total size in bytes.
|
||||||
"Offset": 1592,
|
"Size": 46205,
|
||||||
// These properties will be set to true, if the upload as a final or partial
|
// The upload's current offset in bytes.
|
||||||
// one. See the Concatenation extension for details:
|
"Offset": 1592,
|
||||||
// http://tus.io/protocols/resumable-upload.html#concatenation
|
// These properties will be set to true, if the upload as a final or partial
|
||||||
"IsFinal": false,
|
// one. See the Concatenation extension for details:
|
||||||
"IsPartial": false,
|
// http://tus.io/protocols/resumable-upload.html#concatenation
|
||||||
// If the upload is a final one, this value will be an array of upload IDs
|
"IsFinal": false,
|
||||||
// which are concatenated to produce the upload.
|
"IsPartial": false,
|
||||||
"PartialUploads": null,
|
// If the upload is a final one, this value will be an array of upload IDs
|
||||||
// The upload's meta data which can be supplied by the clients as it wishes.
|
// which are concatenated to produce the upload.
|
||||||
// All keys and values in this object will be strings.
|
"PartialUploads": null,
|
||||||
// Be aware that it may contain maliciously crafted values and you must not
|
// The upload's meta data which can be supplied by the clients as it wishes.
|
||||||
// trust it without escaping it first!
|
// All keys and values in this object will be strings.
|
||||||
"MetaData": {
|
// Be aware that it may contain maliciously crafted values and you must not
|
||||||
"filename": "transloadit.png"
|
// trust it without escaping it first!
|
||||||
|
"MetaData": {
|
||||||
|
"filename": "transloadit.png"
|
||||||
|
},
|
||||||
|
// Details about where the data store saved the uploaded file. The different
|
||||||
|
// availabl keys vary depending on the used data store.
|
||||||
|
"Storage": {
|
||||||
|
// For example, the filestore supplies the absolute file path:
|
||||||
|
"Type": "filestore",
|
||||||
|
"Path": "/my/upload/directory/14b1c4c77771671a8479bc0444bbc5ce",
|
||||||
|
|
||||||
|
// The S3Store and GCSStore supply the bucket name and object key:
|
||||||
|
"Type": "s3store",
|
||||||
|
"Bucket": "my-upload-bucket",
|
||||||
|
"Key": "my-prefix/14b1c4c77771671a8479bc0444bbc5ce"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// Details about the HTTP request which caused this hook to be fired.
|
||||||
|
// It can be used to record the client's IP address or inspect the headers.
|
||||||
|
"HTTPRequest": {
|
||||||
|
"Method": "PATCH",
|
||||||
|
"URI": "/files/14b1c4c77771671a8479bc0444bbc5ce",
|
||||||
|
"RemoteAddr": "1.2.3.4:47689",
|
||||||
|
"Header": {
|
||||||
|
"Host": "myuploads.net",
|
||||||
|
"Cookies": "..."
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,51 +0,0 @@
|
||||||
// Package etcd3locker provides a locking mechanism using an etcd3 cluster.
|
|
||||||
// Tested on etcd 3.1/3.2./3.3
|
|
||||||
package etcd3locker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
|
||||||
"go.etcd.io/etcd/clientv3/concurrency"
|
|
||||||
)
|
|
||||||
|
|
||||||
type etcd3Lock struct {
|
|
||||||
Id string
|
|
||||||
Mutex *concurrency.Mutex
|
|
||||||
Session *concurrency.Session
|
|
||||||
}
|
|
||||||
|
|
||||||
func newEtcd3Lock(session *concurrency.Session, id string) *etcd3Lock {
|
|
||||||
return &etcd3Lock{
|
|
||||||
Mutex: concurrency.NewMutex(session, id),
|
|
||||||
Session: session,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Acquires a lock from etcd3
|
|
||||||
func (lock *etcd3Lock) Acquire() error {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// this is a blocking call; if we receive DeadlineExceeded
|
|
||||||
// the lock is most likely already taken
|
|
||||||
if err := lock.Mutex.Lock(ctx); err != nil {
|
|
||||||
if err == context.DeadlineExceeded {
|
|
||||||
return tusd.ErrFileLocked
|
|
||||||
} else {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Releases a lock from etcd3
|
|
||||||
func (lock *etcd3Lock) Release() error {
|
|
||||||
return lock.Mutex.Unlock(context.Background())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Closes etcd3 session
|
|
||||||
func (lock *etcd3Lock) CloseSession() error {
|
|
||||||
return lock.Session.Close()
|
|
||||||
}
|
|
|
@ -1,145 +0,0 @@
|
||||||
// Package etcd3locker provides a locking mechanism using an etcd3 cluster
|
|
||||||
//
|
|
||||||
// To initialize a locker, a pre-existing connected etcd3 client must be present
|
|
||||||
//
|
|
||||||
// client, err := clientv3.New(clientv3.Config{
|
|
||||||
// Endpoints: []string{harness.Endpoint},
|
|
||||||
// DialTimeout: 5 * time.Second,
|
|
||||||
// })
|
|
||||||
//
|
|
||||||
// For the most basic locker (e.g. non-shared etcd3 cluster / use default TTLs),
|
|
||||||
// a locker can be instantiated like the following:
|
|
||||||
//
|
|
||||||
// locker, err := etcd3locker.New(client)
|
|
||||||
// if err != nil {
|
|
||||||
// return nil, fmt.Errorf("Failed to create etcd locker: %v", err.Error())
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// The locker will need to be included in composer that is used by tusd:
|
|
||||||
//
|
|
||||||
// composer := tusd.NewStoreComposer()
|
|
||||||
// locker.UseIn(composer)
|
|
||||||
//
|
|
||||||
// For a shared etcd3 cluster, you may want to modify the prefix that etcd3locker uses:
|
|
||||||
//
|
|
||||||
// locker, err := etcd3locker.NewWithPrefix(client, "my-prefix")
|
|
||||||
// if err != nil {
|
|
||||||
// return nil, fmt.Errorf("Failed to create etcd locker: %v", err.Error())
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// For full control over all options, an etcd3.LockerOptions may be passed into
|
|
||||||
// etcd3.NewWithLockerOptions like the following example:
|
|
||||||
//
|
|
||||||
// ttl := 15 // seconds
|
|
||||||
// options := etcd3locker.NewLockerOptions(ttl, "my-prefix")
|
|
||||||
// locker, err := etcd3locker.NewWithLockerOptions(client, options)
|
|
||||||
// if err != nil {
|
|
||||||
// return nil, fmt.Errorf("Failed to create etcd locker: %v", err.Error())
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Tested on etcd 3.1/3.2/3.3
|
|
||||||
//
|
|
||||||
package etcd3locker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
|
||||||
etcd3 "go.etcd.io/etcd/clientv3"
|
|
||||||
"go.etcd.io/etcd/clientv3/concurrency"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrLockNotHeld = errors.New("Lock not held")
|
|
||||||
GrantTimeout = 1500 * time.Millisecond
|
|
||||||
)
|
|
||||||
|
|
||||||
type Etcd3Locker struct {
|
|
||||||
// etcd3 client session
|
|
||||||
Client *etcd3.Client
|
|
||||||
|
|
||||||
// locks is used for storing Etcd3Locks before they are
|
|
||||||
// unlocked. If you want to release a lock, you need the same locker
|
|
||||||
// instance and therefore we need to save them temporarily.
|
|
||||||
locks map[string]*etcd3Lock
|
|
||||||
mutex sync.Mutex
|
|
||||||
prefix string
|
|
||||||
sessionTtl int
|
|
||||||
}
|
|
||||||
|
|
||||||
// New constructs a new locker using the provided client.
|
|
||||||
func New(client *etcd3.Client) (*Etcd3Locker, error) {
|
|
||||||
return NewWithLockerOptions(client, DefaultLockerOptions())
|
|
||||||
}
|
|
||||||
|
|
||||||
// This method may be used if a different prefix is required for multi-tenant etcd clusters
|
|
||||||
func NewWithPrefix(client *etcd3.Client, prefix string) (*Etcd3Locker, error) {
|
|
||||||
lockerOptions := DefaultLockerOptions()
|
|
||||||
lockerOptions.SetPrefix(prefix)
|
|
||||||
return NewWithLockerOptions(client, lockerOptions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This method may be used if we want control over both prefix/session TTLs. This is used for testing in particular.
|
|
||||||
func NewWithLockerOptions(client *etcd3.Client, opts LockerOptions) (*Etcd3Locker, error) {
|
|
||||||
locksMap := map[string]*etcd3Lock{}
|
|
||||||
return &Etcd3Locker{Client: client, prefix: opts.Prefix(), sessionTtl: opts.Ttl(), locks: locksMap, mutex: sync.Mutex{}}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseIn adds this locker to the passed composer.
|
|
||||||
func (locker *Etcd3Locker) UseIn(composer *tusd.StoreComposer) {
|
|
||||||
composer.UseLocker(locker)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LockUpload tries to obtain the exclusive lock.
|
|
||||||
func (locker *Etcd3Locker) LockUpload(id string) error {
|
|
||||||
session, err := locker.createSession()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
lock := newEtcd3Lock(session, locker.getId(id))
|
|
||||||
|
|
||||||
err = lock.Acquire()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
locker.mutex.Lock()
|
|
||||||
defer locker.mutex.Unlock()
|
|
||||||
// Only add the lock to our list if the acquire was successful and no error appeared.
|
|
||||||
locker.locks[locker.getId(id)] = lock
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnlockUpload releases a lock.
|
|
||||||
func (locker *Etcd3Locker) UnlockUpload(id string) error {
|
|
||||||
locker.mutex.Lock()
|
|
||||||
defer locker.mutex.Unlock()
|
|
||||||
|
|
||||||
// Complain if no lock has been found. This can only happen if LockUpload
|
|
||||||
// has not been invoked before or UnlockUpload multiple times.
|
|
||||||
lock, ok := locker.locks[locker.getId(id)]
|
|
||||||
if !ok {
|
|
||||||
return ErrLockNotHeld
|
|
||||||
}
|
|
||||||
|
|
||||||
err := lock.Release()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer delete(locker.locks, locker.getId(id))
|
|
||||||
return lock.CloseSession()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (locker *Etcd3Locker) createSession() (*concurrency.Session, error) {
|
|
||||||
return concurrency.NewSession(locker.Client, concurrency.WithTTL(locker.sessionTtl))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (locker *Etcd3Locker) getId(id string) string {
|
|
||||||
return locker.prefix + id
|
|
||||||
}
|
|
|
@ -1,66 +0,0 @@
|
||||||
package etcd3locker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
DefaultTtl = 60
|
|
||||||
DefaultPrefix = "/tusd"
|
|
||||||
)
|
|
||||||
|
|
||||||
type LockerOptions struct {
|
|
||||||
ttl int
|
|
||||||
prefix string
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultLockerOptions() instantiates an instance of LockerOptions
|
|
||||||
// with default 60 second time to live and an etcd3 prefix of "/tusd"
|
|
||||||
func DefaultLockerOptions() LockerOptions {
|
|
||||||
return LockerOptions{
|
|
||||||
ttl: 60,
|
|
||||||
prefix: "/tusd",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLockerOptions instantiates an instance of LockerOptions with a
|
|
||||||
// provided TTL (time to live) and string prefix for keys to be stored in etcd3
|
|
||||||
func NewLockerOptions(ttl int, prefix string) LockerOptions {
|
|
||||||
return LockerOptions{
|
|
||||||
ttl: ttl,
|
|
||||||
prefix: prefix,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the TTL (time to live) of sessions in etcd3
|
|
||||||
func (l *LockerOptions) Ttl() int {
|
|
||||||
if l.ttl == 0 {
|
|
||||||
return DefaultTtl
|
|
||||||
} else {
|
|
||||||
return l.ttl
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the string prefix used to store keys in etcd3
|
|
||||||
func (l *LockerOptions) Prefix() string {
|
|
||||||
prefix := l.prefix
|
|
||||||
if !strings.HasPrefix(prefix, "/") {
|
|
||||||
prefix = "/" + prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
if prefix == "" {
|
|
||||||
return DefaultPrefix
|
|
||||||
} else {
|
|
||||||
return prefix
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set etcd3 session TTL (time to live)
|
|
||||||
func (l *LockerOptions) SetTtl(ttl int) {
|
|
||||||
l.ttl = ttl
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set string prefix to be used in keys stored into etcd3 by the locker
|
|
||||||
func (l *LockerOptions) SetPrefix(prefix string) {
|
|
||||||
l.prefix = prefix
|
|
||||||
}
|
|
|
@ -1,59 +0,0 @@
|
||||||
package etcd3locker
|
|
||||||
|
|
||||||
import (
|
|
||||||
etcd_harness "github.com/chen-anders/go-etcd-harness"
|
|
||||||
"go.etcd.io/etcd/clientv3"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/tus/tusd"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestEtcd3Locker(t *testing.T) {
|
|
||||||
a := assert.New(t)
|
|
||||||
|
|
||||||
harness, err := etcd_harness.New(os.Stderr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed starting etcd harness: %v", err)
|
|
||||||
}
|
|
||||||
t.Logf("will use etcd harness endpoint: %v", harness.Endpoint)
|
|
||||||
defer func() {
|
|
||||||
harness.Stop()
|
|
||||||
t.Logf("cleaned up etcd harness")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := clientv3.New(clientv3.Config{
|
|
||||||
Endpoints: []string{harness.Endpoint},
|
|
||||||
DialTimeout: 5 * time.Second,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to connect to etcd3: %v", err)
|
|
||||||
}
|
|
||||||
defer client.Close()
|
|
||||||
|
|
||||||
shortTTL := 3
|
|
||||||
testPrefix := "/test-tusd"
|
|
||||||
|
|
||||||
lockerOptions := NewLockerOptions(shortTTL, testPrefix)
|
|
||||||
locker, err := NewWithLockerOptions(client, lockerOptions)
|
|
||||||
a.NoError(err)
|
|
||||||
a.NoError(locker.LockUpload("one"))
|
|
||||||
a.Equal(tusd.ErrFileLocked, locker.LockUpload("one"))
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
// test that we can't take over the upload via a different etcd3 session
|
|
||||||
// while an upload is already taking place; testing etcd3 session KeepAlive
|
|
||||||
a.Equal(tusd.ErrFileLocked, locker.LockUpload("one"))
|
|
||||||
a.NoError(locker.UnlockUpload("one"))
|
|
||||||
a.Equal(ErrLockNotHeld, locker.UnlockUpload("one"))
|
|
||||||
|
|
||||||
testPrefix = "/test-tusd2"
|
|
||||||
locker2, err := NewWithPrefix(client, testPrefix)
|
|
||||||
a.NoError(err)
|
|
||||||
a.NoError(locker2.LockUpload("one"))
|
|
||||||
a.Equal(tusd.ErrFileLocked, locker2.LockUpload("one"))
|
|
||||||
a.Equal(tusd.ErrFileLocked, locker2.LockUpload("one"))
|
|
||||||
a.NoError(locker2.UnlockUpload("one"))
|
|
||||||
a.Equal(ErrLockNotHeld, locker2.UnlockUpload("one"))
|
|
||||||
}
|
|
|
@ -1,227 +0,0 @@
|
||||||
// Package filestore provide a storage backend based on the local file system.
|
|
||||||
//
|
|
||||||
// FileStore is a storage backend used as a tusd.DataStore in tusd.NewHandler.
|
|
||||||
// It stores the uploads in a directory specified in two different files: The
|
|
||||||
// `[id].info` files are used to store the fileinfo in JSON format. The
|
|
||||||
// `[id].bin` files contain the raw binary data uploaded.
|
|
||||||
// No cleanup is performed so you may want to run a cronjob to ensure your disk
|
|
||||||
// is not filled up with old and finished uploads.
|
|
||||||
//
|
|
||||||
// In addition, it provides an exclusive upload locking mechanism using lock files
|
|
||||||
// which are stored on disk. Each of them stores the PID of the process which
|
|
||||||
// acquired the lock. This allows locks to be automatically freed when a process
|
|
||||||
// is unable to release it on its own because the process is not alive anymore.
|
|
||||||
// For more information, consult the documentation for tusd.LockerDataStore
|
|
||||||
// interface, which is implemented by FileStore
|
|
||||||
package filestore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
|
||||||
"github.com/tus/tusd/uid"
|
|
||||||
|
|
||||||
"gopkg.in/Acconut/lockfile.v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
var defaultFilePerm = os.FileMode(0664)
|
|
||||||
|
|
||||||
// See the tusd.DataStore interface for documentation about the different
|
|
||||||
// methods.
|
|
||||||
type FileStore struct {
|
|
||||||
// Relative or absolute path to store files in. FileStore does not check
|
|
||||||
// whether the path exists, use os.MkdirAll in this case on your own.
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new file based storage backend. The directory specified will
|
|
||||||
// be used as the only storage entry. This method does not check
|
|
||||||
// whether the path exists, use os.MkdirAll to ensure.
|
|
||||||
// In addition, a locking mechanism is provided.
|
|
||||||
func New(path string) FileStore {
|
|
||||||
return FileStore{path}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseIn sets this store as the core data store in the passed composer and adds
|
|
||||||
// all possible extension to it.
|
|
||||||
func (store FileStore) UseIn(composer *tusd.StoreComposer) {
|
|
||||||
composer.UseCore(store)
|
|
||||||
composer.UseGetReader(store)
|
|
||||||
composer.UseTerminater(store)
|
|
||||||
composer.UseLocker(store)
|
|
||||||
composer.UseConcater(store)
|
|
||||||
composer.UseLengthDeferrer(store)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store FileStore) NewUpload(info tusd.FileInfo) (id string, err error) {
|
|
||||||
id = uid.Uid()
|
|
||||||
info.ID = id
|
|
||||||
|
|
||||||
// Create .bin file with no content
|
|
||||||
file, err := os.OpenFile(store.binPath(id), os.O_CREATE|os.O_WRONLY, defaultFilePerm)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = fmt.Errorf("upload directory does not exist: %s", store.Path)
|
|
||||||
}
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
// writeInfo creates the file by itself if necessary
|
|
||||||
err = store.writeInfo(id, info)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store FileStore) WriteChunk(id string, offset int64, src io.Reader) (int64, error) {
|
|
||||||
file, err := os.OpenFile(store.binPath(id), os.O_WRONLY|os.O_APPEND, defaultFilePerm)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
n, err := io.Copy(file, src)
|
|
||||||
|
|
||||||
// If the HTTP PATCH request gets interrupted in the middle (e.g. because
|
|
||||||
// the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF.
|
|
||||||
// However, for FileStore it's not important whether the stream has ended
|
|
||||||
// on purpose or accidentally.
|
|
||||||
if err == io.ErrUnexpectedEOF {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store FileStore) GetInfo(id string) (tusd.FileInfo, error) {
|
|
||||||
info := tusd.FileInfo{}
|
|
||||||
data, err := ioutil.ReadFile(store.infoPath(id))
|
|
||||||
if err != nil {
|
|
||||||
return info, err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &info); err != nil {
|
|
||||||
return info, err
|
|
||||||
}
|
|
||||||
|
|
||||||
stat, err := os.Stat(store.binPath(id))
|
|
||||||
if err != nil {
|
|
||||||
return info, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info.Offset = stat.Size()
|
|
||||||
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store FileStore) GetReader(id string) (io.Reader, error) {
|
|
||||||
return os.Open(store.binPath(id))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store FileStore) Terminate(id string) error {
|
|
||||||
if err := os.Remove(store.infoPath(id)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := os.Remove(store.binPath(id)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store FileStore) ConcatUploads(dest string, uploads []string) (err error) {
|
|
||||||
file, err := os.OpenFile(store.binPath(dest), os.O_WRONLY|os.O_APPEND, defaultFilePerm)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
for _, id := range uploads {
|
|
||||||
src, err := store.GetReader(id)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.Copy(file, src); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store FileStore) DeclareLength(id string, length int64) error {
|
|
||||||
info, err := store.GetInfo(id)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
info.Size = length
|
|
||||||
info.SizeIsDeferred = false
|
|
||||||
return store.writeInfo(id, info)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store FileStore) LockUpload(id string) error {
|
|
||||||
lock, err := store.newLock(id)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = lock.TryLock()
|
|
||||||
if err == lockfile.ErrBusy {
|
|
||||||
return tusd.ErrFileLocked
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store FileStore) UnlockUpload(id string) error {
|
|
||||||
lock, err := store.newLock(id)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = lock.Unlock()
|
|
||||||
|
|
||||||
// A "no such file or directory" will be returned if no lockfile was found.
|
|
||||||
// Since this means that the file has never been locked, we drop the error
|
|
||||||
// and continue as if nothing happened.
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// newLock contructs a new Lockfile instance.
|
|
||||||
func (store FileStore) newLock(id string) (lockfile.Lockfile, error) {
|
|
||||||
path, err := filepath.Abs(filepath.Join(store.Path, id+".lock"))
|
|
||||||
if err != nil {
|
|
||||||
return lockfile.Lockfile(""), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// We use Lockfile directly instead of lockfile.New to bypass the unnecessary
|
|
||||||
// check whether the provided path is absolute since we just resolved it
|
|
||||||
// on our own.
|
|
||||||
return lockfile.Lockfile(path), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// binPath returns the path to the .bin storing the binary data.
|
|
||||||
func (store FileStore) binPath(id string) string {
|
|
||||||
return filepath.Join(store.Path, id+".bin")
|
|
||||||
}
|
|
||||||
|
|
||||||
// infoPath returns the path to the .info file storing the file's info.
|
|
||||||
func (store FileStore) infoPath(id string) string {
|
|
||||||
return filepath.Join(store.Path, id+".info")
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeInfo updates the entire information. Everything will be overwritten.
|
|
||||||
func (store FileStore) writeInfo(id string, info tusd.FileInfo) error {
|
|
||||||
data, err := json.Marshal(info)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return ioutil.WriteFile(store.infoPath(id), data, defaultFilePerm)
|
|
||||||
}
|
|
|
@ -1,174 +0,0 @@
|
||||||
package filestore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Test interface implementation of Filestore
|
|
||||||
var _ tusd.DataStore = FileStore{}
|
|
||||||
var _ tusd.GetReaderDataStore = FileStore{}
|
|
||||||
var _ tusd.TerminaterDataStore = FileStore{}
|
|
||||||
var _ tusd.LockerDataStore = FileStore{}
|
|
||||||
var _ tusd.ConcaterDataStore = FileStore{}
|
|
||||||
var _ tusd.LengthDeferrerDataStore = FileStore{}
|
|
||||||
|
|
||||||
func TestFilestore(t *testing.T) {
|
|
||||||
a := assert.New(t)
|
|
||||||
|
|
||||||
tmp, err := ioutil.TempDir("", "tusd-filestore-")
|
|
||||||
a.NoError(err)
|
|
||||||
|
|
||||||
store := FileStore{tmp}
|
|
||||||
|
|
||||||
// Create new upload
|
|
||||||
id, err := store.NewUpload(tusd.FileInfo{
|
|
||||||
Size: 42,
|
|
||||||
MetaData: map[string]string{
|
|
||||||
"hello": "world",
|
|
||||||
},
|
|
||||||
})
|
|
||||||
a.NoError(err)
|
|
||||||
a.NotEqual("", id)
|
|
||||||
|
|
||||||
// Check info without writing
|
|
||||||
info, err := store.GetInfo(id)
|
|
||||||
a.NoError(err)
|
|
||||||
a.EqualValues(42, info.Size)
|
|
||||||
a.EqualValues(0, info.Offset)
|
|
||||||
a.Equal(tusd.MetaData{"hello": "world"}, info.MetaData)
|
|
||||||
|
|
||||||
// Write data to upload
|
|
||||||
bytesWritten, err := store.WriteChunk(id, 0, strings.NewReader("hello world"))
|
|
||||||
a.NoError(err)
|
|
||||||
a.EqualValues(len("hello world"), bytesWritten)
|
|
||||||
|
|
||||||
// Check new offset
|
|
||||||
info, err = store.GetInfo(id)
|
|
||||||
a.NoError(err)
|
|
||||||
a.EqualValues(42, info.Size)
|
|
||||||
a.EqualValues(11, info.Offset)
|
|
||||||
|
|
||||||
// Read content
|
|
||||||
reader, err := store.GetReader(id)
|
|
||||||
a.NoError(err)
|
|
||||||
|
|
||||||
content, err := ioutil.ReadAll(reader)
|
|
||||||
a.NoError(err)
|
|
||||||
a.Equal("hello world", string(content))
|
|
||||||
reader.(io.Closer).Close()
|
|
||||||
|
|
||||||
// Terminate upload
|
|
||||||
a.NoError(store.Terminate(id))
|
|
||||||
|
|
||||||
// Test if upload is deleted
|
|
||||||
_, err = store.GetInfo(id)
|
|
||||||
a.True(os.IsNotExist(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMissingPath(t *testing.T) {
|
|
||||||
a := assert.New(t)
|
|
||||||
|
|
||||||
store := FileStore{"./path-that-does-not-exist"}
|
|
||||||
|
|
||||||
id, err := store.NewUpload(tusd.FileInfo{})
|
|
||||||
a.Error(err)
|
|
||||||
a.Equal(err.Error(), "upload directory does not exist: ./path-that-does-not-exist")
|
|
||||||
a.Equal(id, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFileLocker(t *testing.T) {
|
|
||||||
a := assert.New(t)
|
|
||||||
|
|
||||||
dir, err := ioutil.TempDir("", "tusd-file-locker")
|
|
||||||
a.NoError(err)
|
|
||||||
|
|
||||||
var locker tusd.LockerDataStore
|
|
||||||
locker = FileStore{dir}
|
|
||||||
|
|
||||||
a.NoError(locker.LockUpload("one"))
|
|
||||||
a.Equal(tusd.ErrFileLocked, locker.LockUpload("one"))
|
|
||||||
a.NoError(locker.UnlockUpload("one"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConcatUploads(t *testing.T) {
|
|
||||||
a := assert.New(t)
|
|
||||||
|
|
||||||
tmp, err := ioutil.TempDir("", "tusd-filestore-concat-")
|
|
||||||
a.NoError(err)
|
|
||||||
|
|
||||||
store := FileStore{tmp}
|
|
||||||
|
|
||||||
// Create new upload to hold concatenated upload
|
|
||||||
finId, err := store.NewUpload(tusd.FileInfo{Size: 9})
|
|
||||||
a.NoError(err)
|
|
||||||
a.NotEqual("", finId)
|
|
||||||
|
|
||||||
// Create three uploads for concatenating
|
|
||||||
ids := make([]string, 3)
|
|
||||||
contents := []string{
|
|
||||||
"abc",
|
|
||||||
"def",
|
|
||||||
"ghi",
|
|
||||||
}
|
|
||||||
for i := 0; i < 3; i++ {
|
|
||||||
id, err := store.NewUpload(tusd.FileInfo{Size: 3})
|
|
||||||
a.NoError(err)
|
|
||||||
|
|
||||||
n, err := store.WriteChunk(id, 0, strings.NewReader(contents[i]))
|
|
||||||
a.NoError(err)
|
|
||||||
a.EqualValues(3, n)
|
|
||||||
|
|
||||||
ids[i] = id
|
|
||||||
}
|
|
||||||
|
|
||||||
err = store.ConcatUploads(finId, ids)
|
|
||||||
a.NoError(err)
|
|
||||||
|
|
||||||
// Check offset
|
|
||||||
info, err := store.GetInfo(finId)
|
|
||||||
a.NoError(err)
|
|
||||||
a.EqualValues(9, info.Size)
|
|
||||||
a.EqualValues(9, info.Offset)
|
|
||||||
|
|
||||||
// Read content
|
|
||||||
reader, err := store.GetReader(finId)
|
|
||||||
a.NoError(err)
|
|
||||||
|
|
||||||
content, err := ioutil.ReadAll(reader)
|
|
||||||
a.NoError(err)
|
|
||||||
a.Equal("abcdefghi", string(content))
|
|
||||||
reader.(io.Closer).Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeclareLength(t *testing.T) {
|
|
||||||
a := assert.New(t)
|
|
||||||
|
|
||||||
tmp, err := ioutil.TempDir("", "tusd-filestore-declare-length-")
|
|
||||||
a.NoError(err)
|
|
||||||
|
|
||||||
store := FileStore{tmp}
|
|
||||||
|
|
||||||
originalInfo := tusd.FileInfo{Size: 0, SizeIsDeferred: true}
|
|
||||||
id, err := store.NewUpload(originalInfo)
|
|
||||||
a.NoError(err)
|
|
||||||
|
|
||||||
info, err := store.GetInfo(id)
|
|
||||||
a.Equal(info.Size, originalInfo.Size)
|
|
||||||
a.Equal(info.SizeIsDeferred, originalInfo.SizeIsDeferred)
|
|
||||||
|
|
||||||
size := int64(100)
|
|
||||||
err = store.DeclareLength(id, size)
|
|
||||||
a.NoError(err)
|
|
||||||
|
|
||||||
updatedInfo, err := store.GetInfo(id)
|
|
||||||
a.Equal(updatedInfo.Size, size)
|
|
||||||
a.False(updatedInfo.SizeIsDeferred)
|
|
||||||
}
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
module github.com/tus/tusd
|
||||||
|
|
||||||
|
go 1.12
|
||||||
|
|
||||||
|
require (
|
||||||
|
cloud.google.com/go v0.40.0
|
||||||
|
github.com/aws/aws-sdk-go v1.20.1
|
||||||
|
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40
|
||||||
|
github.com/golang/mock v1.3.1
|
||||||
|
github.com/prometheus/client_golang v1.0.0
|
||||||
|
github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0
|
||||||
|
github.com/stretchr/testify v1.3.0
|
||||||
|
github.com/vimeo/go-util v1.2.0
|
||||||
|
google.golang.org/api v0.6.0
|
||||||
|
gopkg.in/Acconut/lockfile.v1 v1.1.0
|
||||||
|
gopkg.in/h2non/gock.v1 v1.0.14
|
||||||
|
)
|
|
@ -0,0 +1,170 @@
|
||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||||
|
cloud.google.com/go v0.40.0 h1:FjSY7bOj+WzJe6TZRVtXI2b9kAYvtNg4lMbcH2+MUkk=
|
||||||
|
cloud.google.com/go v0.40.0/go.mod h1:Tk58MuI9rbLMKlAjeO/bDnteAx7tX2gJIXw4T5Jwlro=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/aws/aws-sdk-go v1.20.1 h1:p9ETyEP9iBPTLul2PHJblv5Iw0PKP10YK6DC5nMTzYM=
|
||||||
|
github.com/aws/aws-sdk-go v1.20.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||||
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
|
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||||
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
|
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 h1:y4B3+GPxKlrigF1ha5FFErxK+sr6sWxQovRMzwMhejo=
|
||||||
|
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
|
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
|
||||||
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
|
||||||
|
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
|
||||||
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||||
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
|
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
|
||||||
|
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||||
|
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||||
|
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
|
||||||
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
|
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=
|
||||||
|
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
|
||||||
|
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||||
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
|
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||||
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
|
||||||
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
|
||||||
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
|
github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0 h1:X9XMOYjxEfAYSy3xK1DzO5dMkkWhs9E9UCcS1IERx2k=
|
||||||
|
github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0/go.mod h1:Ad7IjTpvzZO8Fl0vh9AzQ+j/jYZfyp2diGwI8m5q+ns=
|
||||||
|
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||||
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
|
github.com/vimeo/go-util v1.2.0 h1:YHzwOnM+V2tc6r67K9fXpYqUiRwXp0TgFKuyj+A5bsg=
|
||||||
|
github.com/vimeo/go-util v1.2.0/go.mod h1:s13SMDTSO7AjH1nbgp707mfN5JFIWUFDU5MDDuRRtKs=
|
||||||
|
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
|
||||||
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw=
|
||||||
|
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
|
||||||
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||||
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||||
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c h1:97SnQk1GYRXJgvwZ8fadnxDOWfKvkNQHH3CtZntPSrM=
|
||||||
|
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||||
|
google.golang.org/api v0.6.0 h1:2tJEkRfnZL5g1GeBUlITh/rqT5HG3sFcoVCUUxmgJ2g=
|
||||||
|
google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
||||||
|
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101 h1:wuGevabY6r+ivPNagjUXGGxF+GqgMd+dBhjsxW4q9u4=
|
||||||
|
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
|
||||||
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
|
gopkg.in/Acconut/lockfile.v1 v1.1.0 h1:c5AMZOxgM1y+Zl8eSbaCENzVYp/LCaWosbQSXzb3FVI=
|
||||||
|
gopkg.in/Acconut/lockfile.v1 v1.1.0/go.mod h1:6UCz3wJ8tSFUsPR6uP/j8uegEtDuEEqFxlpi0JI4Umw=
|
||||||
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/h2non/gock.v1 v1.0.14 h1:fTeu9fcUvSnLNacYvYI54h+1/XEteDyHvrVCZEEEYNM=
|
||||||
|
gopkg.in/h2non/gock.v1 v1.0.14/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE=
|
||||||
|
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||||
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
|
@ -1,156 +0,0 @@
|
||||||
// Automatically generated by MockGen. DO NOT EDIT!
|
|
||||||
// Source: utils_test.go
|
|
||||||
|
|
||||||
package tusd_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
gomock "github.com/golang/mock/gomock"
|
|
||||||
tusd "github.com/tus/tusd"
|
|
||||||
io "io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Mock of FullDataStore interface
|
|
||||||
type MockFullDataStore struct {
|
|
||||||
ctrl *gomock.Controller
|
|
||||||
recorder *_MockFullDataStoreRecorder
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recorder for MockFullDataStore (not exported)
|
|
||||||
type _MockFullDataStoreRecorder struct {
|
|
||||||
mock *MockFullDataStore
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMockFullDataStore(ctrl *gomock.Controller) *MockFullDataStore {
|
|
||||||
mock := &MockFullDataStore{ctrl: ctrl}
|
|
||||||
mock.recorder = &_MockFullDataStoreRecorder{mock}
|
|
||||||
return mock
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockFullDataStore) EXPECT() *_MockFullDataStoreRecorder {
|
|
||||||
return _m.recorder
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockFullDataStore) NewUpload(info tusd.FileInfo) (string, error) {
|
|
||||||
ret := _m.ctrl.Call(_m, "NewUpload", info)
|
|
||||||
ret0, _ := ret[0].(string)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockFullDataStoreRecorder) NewUpload(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "NewUpload", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockFullDataStore) WriteChunk(id string, offset int64, src io.Reader) (int64, error) {
|
|
||||||
ret := _m.ctrl.Call(_m, "WriteChunk", id, offset, src)
|
|
||||||
ret0, _ := ret[0].(int64)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockFullDataStoreRecorder) WriteChunk(arg0, arg1, arg2 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "WriteChunk", arg0, arg1, arg2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockFullDataStore) GetInfo(id string) (tusd.FileInfo, error) {
|
|
||||||
ret := _m.ctrl.Call(_m, "GetInfo", id)
|
|
||||||
ret0, _ := ret[0].(tusd.FileInfo)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockFullDataStoreRecorder) GetInfo(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "GetInfo", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockFullDataStore) Terminate(id string) error {
|
|
||||||
ret := _m.ctrl.Call(_m, "Terminate", id)
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockFullDataStoreRecorder) Terminate(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "Terminate", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockFullDataStore) ConcatUploads(destination string, partialUploads []string) error {
|
|
||||||
ret := _m.ctrl.Call(_m, "ConcatUploads", destination, partialUploads)
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockFullDataStoreRecorder) ConcatUploads(arg0, arg1 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "ConcatUploads", arg0, arg1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockFullDataStore) GetReader(id string) (io.Reader, error) {
|
|
||||||
ret := _m.ctrl.Call(_m, "GetReader", id)
|
|
||||||
ret0, _ := ret[0].(io.Reader)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockFullDataStoreRecorder) GetReader(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "GetReader", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockFullDataStore) FinishUpload(id string) error {
|
|
||||||
ret := _m.ctrl.Call(_m, "FinishUpload", id)
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockFullDataStoreRecorder) FinishUpload(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "FinishUpload", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockFullDataStore) DeclareLength(id string, length int64) error {
|
|
||||||
ret := _m.ctrl.Call(_m, "DeclareLength", id, length)
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockFullDataStoreRecorder) DeclareLength(arg0, arg1 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "DeclareLength", arg0, arg1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mock of Locker interface
|
|
||||||
type MockLocker struct {
|
|
||||||
ctrl *gomock.Controller
|
|
||||||
recorder *_MockLockerRecorder
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recorder for MockLocker (not exported)
|
|
||||||
type _MockLockerRecorder struct {
|
|
||||||
mock *MockLocker
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMockLocker(ctrl *gomock.Controller) *MockLocker {
|
|
||||||
mock := &MockLocker{ctrl: ctrl}
|
|
||||||
mock.recorder = &_MockLockerRecorder{mock}
|
|
||||||
return mock
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockLocker) EXPECT() *_MockLockerRecorder {
|
|
||||||
return _m.recorder
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockLocker) LockUpload(id string) error {
|
|
||||||
ret := _m.ctrl.Call(_m, "LockUpload", id)
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockLockerRecorder) LockUpload(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "LockUpload", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockLocker) UnlockUpload(id string) error {
|
|
||||||
ret := _m.ctrl.Call(_m, "UnlockUpload", id)
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockLockerRecorder) UnlockUpload(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "UnlockUpload", arg0)
|
|
||||||
}
|
|
|
@ -1,140 +0,0 @@
|
||||||
// Package limitedstore provides a storage with a limited space.
|
|
||||||
//
|
|
||||||
// This goal is achieved by using a simple wrapper around existing
|
|
||||||
// datastores (tusd.DataStore) while limiting the used storage size.
|
|
||||||
// It will start terminating existing uploads if not enough space is left in
|
|
||||||
// order to create a new upload.
|
|
||||||
// The order in which the uploads will be terminated is defined by their size,
|
|
||||||
// whereas the biggest ones are deleted first.
|
|
||||||
// This package's functionality is very limited and naive. It will terminate
|
|
||||||
// uploads whether they are finished yet or not. Only one datastore is allowed to
|
|
||||||
// access the underlying storage else the limited store will not function
|
|
||||||
// properly. Two tusd.FileStore instances using the same directory, for example.
|
|
||||||
// In addition the limited store will keep a list of the uploads' IDs in memory
|
|
||||||
// which may create a growing memory leak.
|
|
||||||
package limitedstore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
|
||||||
)
|
|
||||||
|
|
||||||
type LimitedStore struct {
|
|
||||||
tusd.DataStore
|
|
||||||
terminater tusd.TerminaterDataStore
|
|
||||||
|
|
||||||
StoreSize int64
|
|
||||||
|
|
||||||
uploads map[string]int64
|
|
||||||
usedSize int64
|
|
||||||
|
|
||||||
mutex *sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// pair structure to perform map-sorting
|
|
||||||
type pair struct {
|
|
||||||
key string
|
|
||||||
value int64
|
|
||||||
}
|
|
||||||
|
|
||||||
type pairlist []pair
|
|
||||||
|
|
||||||
func (p pairlist) Len() int { return len(p) }
|
|
||||||
func (p pairlist) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
|
||||||
func (p pairlist) Less(i, j int) bool { return p[i].value < p[j].value }
|
|
||||||
|
|
||||||
// New creates a new limited store with the given size as the maximum storage
|
|
||||||
// size. The wrapped data store needs to implement the TerminaterDataStore
|
|
||||||
// interface, in order to provide the required Terminate method.
|
|
||||||
func New(storeSize int64, dataStore tusd.DataStore, terminater tusd.TerminaterDataStore) *LimitedStore {
|
|
||||||
return &LimitedStore{
|
|
||||||
StoreSize: storeSize,
|
|
||||||
DataStore: dataStore,
|
|
||||||
terminater: terminater,
|
|
||||||
uploads: make(map[string]int64),
|
|
||||||
mutex: new(sync.Mutex),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *LimitedStore) UseIn(composer *tusd.StoreComposer) {
|
|
||||||
composer.UseCore(store)
|
|
||||||
composer.UseTerminater(store)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *LimitedStore) NewUpload(info tusd.FileInfo) (string, error) {
|
|
||||||
store.mutex.Lock()
|
|
||||||
defer store.mutex.Unlock()
|
|
||||||
|
|
||||||
if err := store.ensureSpace(info.Size); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
id, err := store.DataStore.NewUpload(info)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
store.usedSize += info.Size
|
|
||||||
store.uploads[id] = info.Size
|
|
||||||
|
|
||||||
return id, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *LimitedStore) Terminate(id string) error {
|
|
||||||
store.mutex.Lock()
|
|
||||||
defer store.mutex.Unlock()
|
|
||||||
|
|
||||||
return store.terminate(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *LimitedStore) terminate(id string) error {
|
|
||||||
err := store.terminater.Terminate(id)
|
|
||||||
// Ignore the error if the upload could not be found. In this case, the upload
|
|
||||||
// has likely already been removed by another service (e.g. a cron job) and we
|
|
||||||
// just remove the upload from our internal list and claim the used space back.
|
|
||||||
if err != nil && err != tusd.ErrNotFound && !os.IsNotExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
size := store.uploads[id]
|
|
||||||
delete(store.uploads, id)
|
|
||||||
store.usedSize -= size
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure enough space is available to store an upload of the specified size.
|
|
||||||
// It will terminate uploads until enough space is freed.
|
|
||||||
func (store *LimitedStore) ensureSpace(size int64) error {
|
|
||||||
if (store.usedSize + size) <= store.StoreSize {
|
|
||||||
// Enough space is available to store the new upload
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
sortedUploads := make(pairlist, len(store.uploads))
|
|
||||||
i := 0
|
|
||||||
for u, h := range store.uploads {
|
|
||||||
sortedUploads[i] = pair{u, h}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
sort.Sort(sort.Reverse(sortedUploads))
|
|
||||||
|
|
||||||
// Forward traversal through the uploads in terms of size, biggest upload first
|
|
||||||
for _, k := range sortedUploads {
|
|
||||||
id := k.key
|
|
||||||
|
|
||||||
if err := store.terminate(id); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if (store.usedSize + size) <= store.StoreSize {
|
|
||||||
// Enough space has been freed to store the new upload
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,87 +0,0 @@
|
||||||
package limitedstore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ tusd.DataStore = &LimitedStore{}
|
|
||||||
var _ tusd.TerminaterDataStore = &LimitedStore{}
|
|
||||||
|
|
||||||
type dataStore struct {
|
|
||||||
t *assert.Assertions
|
|
||||||
numCreatedUploads int
|
|
||||||
numTerminatedUploads int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *dataStore) NewUpload(info tusd.FileInfo) (string, error) {
|
|
||||||
uploadId := store.numCreatedUploads
|
|
||||||
|
|
||||||
// We expect the uploads to be created in a specific order.
|
|
||||||
// These sizes correlate to this order.
|
|
||||||
expectedSize := []int64{30, 60, 80}[uploadId]
|
|
||||||
|
|
||||||
store.t.Equal(expectedSize, info.Size)
|
|
||||||
|
|
||||||
store.numCreatedUploads += 1
|
|
||||||
|
|
||||||
return strconv.Itoa(uploadId), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *dataStore) WriteChunk(id string, offset int64, src io.Reader) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *dataStore) GetInfo(id string) (tusd.FileInfo, error) {
|
|
||||||
return tusd.FileInfo{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *dataStore) Terminate(id string) error {
|
|
||||||
// We expect the uploads to be terminated in a specific order (the bigger
|
|
||||||
// come first)
|
|
||||||
expectedUploadId := []string{"1", "0"}[store.numTerminatedUploads]
|
|
||||||
|
|
||||||
store.t.Equal(expectedUploadId, id)
|
|
||||||
|
|
||||||
store.numTerminatedUploads += 1
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLimitedStore(t *testing.T) {
|
|
||||||
a := assert.New(t)
|
|
||||||
dataStore := &dataStore{
|
|
||||||
t: a,
|
|
||||||
}
|
|
||||||
store := New(100, dataStore, dataStore)
|
|
||||||
|
|
||||||
// Create new upload (30 bytes)
|
|
||||||
id, err := store.NewUpload(tusd.FileInfo{
|
|
||||||
Size: 30,
|
|
||||||
})
|
|
||||||
a.NoError(err)
|
|
||||||
a.Equal("0", id)
|
|
||||||
|
|
||||||
// Create new upload (60 bytes)
|
|
||||||
id, err = store.NewUpload(tusd.FileInfo{
|
|
||||||
Size: 60,
|
|
||||||
})
|
|
||||||
a.NoError(err)
|
|
||||||
a.Equal("1", id)
|
|
||||||
|
|
||||||
// Create new upload (80 bytes)
|
|
||||||
id, err = store.NewUpload(tusd.FileInfo{
|
|
||||||
Size: 80,
|
|
||||||
})
|
|
||||||
a.NoError(err)
|
|
||||||
a.Equal("2", id)
|
|
||||||
|
|
||||||
if dataStore.numTerminatedUploads != 2 {
|
|
||||||
t.Error("expected two uploads to be terminated")
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,21 +0,0 @@
|
||||||
package memorylocker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMemoryLocker(t *testing.T) {
|
|
||||||
a := assert.New(t)
|
|
||||||
|
|
||||||
var locker tusd.LockerDataStore
|
|
||||||
locker = New()
|
|
||||||
|
|
||||||
a.NoError(locker.LockUpload("one"))
|
|
||||||
a.Equal(tusd.ErrFileLocked, locker.LockUpload("one"))
|
|
||||||
a.NoError(locker.UnlockUpload("one"))
|
|
||||||
a.NoError(locker.UnlockUpload("one"))
|
|
||||||
}
|
|
|
@ -0,0 +1,81 @@
|
||||||
|
// Package filelocker provide an upload locker based on the local file system.
|
||||||
|
//
|
||||||
|
// It provides an exclusive upload locking mechanism using lock files
|
||||||
|
// which are stored on disk. Each of them stores the PID of the process which
|
||||||
|
// acquired the lock. This allows locks to be automatically freed when a process
|
||||||
|
// is unable to release it on its own because the process is not alive anymore.
|
||||||
|
// For more information, consult the documentation for handler.LockerDataStore
|
||||||
|
// interface, which is implemented by FileLocker.
|
||||||
|
package filelocker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/tus/tusd/pkg/handler"
|
||||||
|
|
||||||
|
"gopkg.in/Acconut/lockfile.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var defaultFilePerm = os.FileMode(0664)
|
||||||
|
|
||||||
|
// See the handler.DataStore interface for documentation about the different
|
||||||
|
// methods.
|
||||||
|
type FileLocker struct {
|
||||||
|
// Relative or absolute path to store files in. FileStore does not check
|
||||||
|
// whether the path exists, use os.MkdirAll in this case on your own.
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new file based storage backend. The directory specified will
|
||||||
|
// be used as the only storage entry. This method does not check
|
||||||
|
// whether the path exists, use os.MkdirAll to ensure.
|
||||||
|
// In addition, a locking mechanism is provided.
|
||||||
|
func New(path string) FileLocker {
|
||||||
|
return FileLocker{path}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseIn adds this locker to the passed composer.
|
||||||
|
func (locker FileLocker) UseIn(composer *handler.StoreComposer) {
|
||||||
|
composer.UseLocker(locker)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (locker FileLocker) NewLock(id string) (handler.Lock, error) {
|
||||||
|
path, err := filepath.Abs(filepath.Join(locker.Path, id+".lock"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We use Lockfile directly instead of lockfile.New to bypass the unnecessary
|
||||||
|
// check whether the provided path is absolute since we just resolved it
|
||||||
|
// on our own.
|
||||||
|
return &fileUploadLock{
|
||||||
|
file: lockfile.Lockfile(path),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileUploadLock struct {
|
||||||
|
file lockfile.Lockfile
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lock fileUploadLock) Lock() error {
|
||||||
|
err := lock.file.TryLock()
|
||||||
|
if err == lockfile.ErrBusy {
|
||||||
|
return handler.ErrFileLocked
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lock fileUploadLock) Unlock() error {
|
||||||
|
err := lock.file.Unlock()
|
||||||
|
|
||||||
|
// A "no such file or directory" will be returned if no lockfile was found.
|
||||||
|
// Since this means that the file has never been locked, we drop the error
|
||||||
|
// and continue as if nothing happened.
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
package filelocker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/tus/tusd/pkg/handler"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ handler.Locker = &FileLocker{}
|
||||||
|
|
||||||
|
func TestFileLocker(t *testing.T) {
|
||||||
|
a := assert.New(t)
|
||||||
|
|
||||||
|
dir, err := ioutil.TempDir("", "tusd-file-locker")
|
||||||
|
a.NoError(err)
|
||||||
|
|
||||||
|
locker := FileLocker{dir}
|
||||||
|
|
||||||
|
lock1, err := locker.NewLock("one")
|
||||||
|
a.NoError(err)
|
||||||
|
|
||||||
|
a.NoError(lock1.Lock())
|
||||||
|
a.Equal(handler.ErrFileLocked, lock1.Lock())
|
||||||
|
|
||||||
|
lock2, err := locker.NewLock("one")
|
||||||
|
a.NoError(err)
|
||||||
|
a.Equal(handler.ErrFileLocked, lock2.Lock())
|
||||||
|
|
||||||
|
a.NoError(lock1.Unlock())
|
||||||
|
}
|
|
@ -0,0 +1,222 @@
|
||||||
|
// Package filestore provide a storage backend based on the local file system.
|
||||||
|
//
|
||||||
|
// FileStore is a storage backend used as a handler.DataStore in handler.NewHandler.
|
||||||
|
// It stores the uploads in a directory specified in two different files: The
|
||||||
|
// `[id].info` files are used to store the fileinfo in JSON format. The
|
||||||
|
// `[id]` files without an extension contain the raw binary data uploaded.
|
||||||
|
// No cleanup is performed so you may want to run a cronjob to ensure your disk
|
||||||
|
// is not filled up with old and finished uploads.
|
||||||
|
package filestore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/tus/tusd/internal/uid"
|
||||||
|
"github.com/tus/tusd/pkg/handler"
|
||||||
|
)
|
||||||
|
|
||||||
|
var defaultFilePerm = os.FileMode(0664)
|
||||||
|
|
||||||
|
// See the handler.DataStore interface for documentation about the different
|
||||||
|
// methods.
|
||||||
|
type FileStore struct {
|
||||||
|
// Relative or absolute path to store files in. FileStore does not check
|
||||||
|
// whether the path exists, use os.MkdirAll in this case on your own.
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new file based storage backend. The directory specified will
|
||||||
|
// be used as the only storage entry. This method does not check
|
||||||
|
// whether the path exists, use os.MkdirAll to ensure.
|
||||||
|
// In addition, a locking mechanism is provided.
|
||||||
|
func New(path string) FileStore {
|
||||||
|
return FileStore{path}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseIn sets this store as the core data store in the passed composer and adds
|
||||||
|
// all possible extension to it.
|
||||||
|
func (store FileStore) UseIn(composer *handler.StoreComposer) {
|
||||||
|
composer.UseCore(store)
|
||||||
|
composer.UseTerminater(store)
|
||||||
|
composer.UseConcater(store)
|
||||||
|
composer.UseLengthDeferrer(store)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store FileStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
||||||
|
id := uid.Uid()
|
||||||
|
binPath := store.binPath(id)
|
||||||
|
info.ID = id
|
||||||
|
info.Storage = map[string]string{
|
||||||
|
"Type": "filestore",
|
||||||
|
"Path": binPath,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create binary file with no content
|
||||||
|
file, err := os.OpenFile(binPath, os.O_CREATE|os.O_WRONLY, defaultFilePerm)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
err = fmt.Errorf("upload directory does not exist: %s", store.Path)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
upload := &fileUpload{
|
||||||
|
info: info,
|
||||||
|
infoPath: store.infoPath(id),
|
||||||
|
binPath: store.binPath(id),
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeInfo creates the file by itself if necessary
|
||||||
|
err = upload.writeInfo()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return upload, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store FileStore) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
|
||||||
|
info := handler.FileInfo{}
|
||||||
|
data, err := ioutil.ReadFile(store.infoPath(id))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(data, &info); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
binPath := store.binPath(id)
|
||||||
|
infoPath := store.infoPath(id)
|
||||||
|
stat, err := os.Stat(binPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
info.Offset = stat.Size()
|
||||||
|
|
||||||
|
return &fileUpload{
|
||||||
|
info: info,
|
||||||
|
binPath: binPath,
|
||||||
|
infoPath: infoPath,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store FileStore) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload {
|
||||||
|
return upload.(*fileUpload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store FileStore) AsLengthDeclarableUpload(upload handler.Upload) handler.LengthDeclarableUpload {
|
||||||
|
return upload.(*fileUpload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store FileStore) AsConcatableUpload(upload handler.Upload) handler.ConcatableUpload {
|
||||||
|
return upload.(*fileUpload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// binPath returns the path to the file storing the binary data.
|
||||||
|
func (store FileStore) binPath(id string) string {
|
||||||
|
return filepath.Join(store.Path, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// infoPath returns the path to the .info file storing the file's info.
|
||||||
|
func (store FileStore) infoPath(id string) string {
|
||||||
|
return filepath.Join(store.Path, id+".info")
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileUpload struct {
|
||||||
|
// info stores the current information about the upload
|
||||||
|
info handler.FileInfo
|
||||||
|
// infoPath is the path to the .info file
|
||||||
|
infoPath string
|
||||||
|
// binPath is the path to the binary file (which has no extension)
|
||||||
|
binPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *fileUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
|
||||||
|
return upload.info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *fileUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
|
||||||
|
file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
n, err := io.Copy(file, src)
|
||||||
|
|
||||||
|
// If the HTTP PATCH request gets interrupted in the middle (e.g. because
|
||||||
|
// the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF.
|
||||||
|
// However, for FileStore it's not important whether the stream has ended
|
||||||
|
// on purpose or accidentally.
|
||||||
|
if err == io.ErrUnexpectedEOF {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
upload.info.Offset += n
|
||||||
|
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *fileUpload) GetReader(ctx context.Context) (io.Reader, error) {
|
||||||
|
return os.Open(upload.binPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *fileUpload) Terminate(ctx context.Context) error {
|
||||||
|
if err := os.Remove(upload.infoPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.Remove(upload.binPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *fileUpload) ConcatUploads(ctx context.Context, uploads []handler.Upload) (err error) {
|
||||||
|
file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
for _, partialUpload := range uploads {
|
||||||
|
fileUpload := partialUpload.(*fileUpload)
|
||||||
|
|
||||||
|
src, err := os.Open(fileUpload.binPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(file, src); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *fileUpload) DeclareLength(ctx context.Context, length int64) error {
|
||||||
|
upload.info.Size = length
|
||||||
|
upload.info.SizeIsDeferred = false
|
||||||
|
return upload.writeInfo()
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeInfo updates the entire information. Everything will be overwritten.
|
||||||
|
func (upload *fileUpload) writeInfo() error {
|
||||||
|
data, err := json.Marshal(upload.info)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return ioutil.WriteFile(upload.infoPath, data, defaultFilePerm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *fileUpload) FinishUpload(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,178 @@
|
||||||
|
package filestore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/tus/tusd/pkg/handler"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Test interface implementation of Filestore
|
||||||
|
var _ handler.DataStore = FileStore{}
|
||||||
|
var _ handler.TerminaterDataStore = FileStore{}
|
||||||
|
var _ handler.ConcaterDataStore = FileStore{}
|
||||||
|
var _ handler.LengthDeferrerDataStore = FileStore{}
|
||||||
|
|
||||||
|
func TestFilestore(t *testing.T) {
|
||||||
|
a := assert.New(t)
|
||||||
|
|
||||||
|
tmp, err := ioutil.TempDir("", "tusd-filestore-")
|
||||||
|
a.NoError(err)
|
||||||
|
|
||||||
|
store := FileStore{tmp}
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Create new upload
|
||||||
|
upload, err := store.NewUpload(ctx, handler.FileInfo{
|
||||||
|
Size: 42,
|
||||||
|
MetaData: map[string]string{
|
||||||
|
"hello": "world",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
a.NoError(err)
|
||||||
|
a.NotEqual(nil, upload)
|
||||||
|
|
||||||
|
// Check info without writing
|
||||||
|
info, err := upload.GetInfo(ctx)
|
||||||
|
a.NoError(err)
|
||||||
|
a.EqualValues(42, info.Size)
|
||||||
|
a.EqualValues(0, info.Offset)
|
||||||
|
a.Equal(handler.MetaData{"hello": "world"}, info.MetaData)
|
||||||
|
a.Equal(2, len(info.Storage))
|
||||||
|
a.Equal("filestore", info.Storage["Type"])
|
||||||
|
a.Equal(filepath.Join(tmp, info.ID), info.Storage["Path"])
|
||||||
|
|
||||||
|
// Write data to upload
|
||||||
|
bytesWritten, err := upload.WriteChunk(ctx, 0, strings.NewReader("hello world"))
|
||||||
|
a.NoError(err)
|
||||||
|
a.EqualValues(len("hello world"), bytesWritten)
|
||||||
|
|
||||||
|
// Check new offset
|
||||||
|
info, err = upload.GetInfo(ctx)
|
||||||
|
a.NoError(err)
|
||||||
|
a.EqualValues(42, info.Size)
|
||||||
|
a.EqualValues(11, info.Offset)
|
||||||
|
|
||||||
|
// Read content
|
||||||
|
reader, err := upload.GetReader(ctx)
|
||||||
|
a.NoError(err)
|
||||||
|
|
||||||
|
content, err := ioutil.ReadAll(reader)
|
||||||
|
a.NoError(err)
|
||||||
|
a.Equal("hello world", string(content))
|
||||||
|
reader.(io.Closer).Close()
|
||||||
|
|
||||||
|
// Terminate upload
|
||||||
|
a.NoError(store.AsTerminatableUpload(upload).Terminate(ctx))
|
||||||
|
|
||||||
|
// Test if upload is deleted
|
||||||
|
upload, err = store.GetUpload(ctx, info.ID)
|
||||||
|
a.Equal(nil, upload)
|
||||||
|
a.True(os.IsNotExist(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMissingPath(t *testing.T) {
|
||||||
|
a := assert.New(t)
|
||||||
|
|
||||||
|
store := FileStore{"./path-that-does-not-exist"}
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
upload, err := store.NewUpload(ctx, handler.FileInfo{})
|
||||||
|
a.Error(err)
|
||||||
|
a.Equal("upload directory does not exist: ./path-that-does-not-exist", err.Error())
|
||||||
|
a.Equal(nil, upload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConcatUploads(t *testing.T) {
|
||||||
|
a := assert.New(t)
|
||||||
|
|
||||||
|
tmp, err := ioutil.TempDir("", "tusd-filestore-concat-")
|
||||||
|
a.NoError(err)
|
||||||
|
|
||||||
|
store := FileStore{tmp}
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Create new upload to hold concatenated upload
|
||||||
|
finUpload, err := store.NewUpload(ctx, handler.FileInfo{Size: 9})
|
||||||
|
a.NoError(err)
|
||||||
|
a.NotEqual(nil, finUpload)
|
||||||
|
|
||||||
|
finInfo, err := finUpload.GetInfo(ctx)
|
||||||
|
a.NoError(err)
|
||||||
|
finId := finInfo.ID
|
||||||
|
|
||||||
|
// Create three uploads for concatenating
|
||||||
|
partialUploads := make([]handler.Upload, 3)
|
||||||
|
contents := []string{
|
||||||
|
"abc",
|
||||||
|
"def",
|
||||||
|
"ghi",
|
||||||
|
}
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
upload, err := store.NewUpload(ctx, handler.FileInfo{Size: 3})
|
||||||
|
a.NoError(err)
|
||||||
|
|
||||||
|
n, err := upload.WriteChunk(ctx, 0, strings.NewReader(contents[i]))
|
||||||
|
a.NoError(err)
|
||||||
|
a.EqualValues(3, n)
|
||||||
|
|
||||||
|
partialUploads[i] = upload
|
||||||
|
}
|
||||||
|
|
||||||
|
err = store.AsConcatableUpload(finUpload).ConcatUploads(ctx, partialUploads)
|
||||||
|
a.NoError(err)
|
||||||
|
|
||||||
|
// Check offset
|
||||||
|
finUpload, err = store.GetUpload(ctx, finId)
|
||||||
|
a.NoError(err)
|
||||||
|
|
||||||
|
info, err := finUpload.GetInfo(ctx)
|
||||||
|
a.NoError(err)
|
||||||
|
a.EqualValues(9, info.Size)
|
||||||
|
a.EqualValues(9, info.Offset)
|
||||||
|
|
||||||
|
// Read content
|
||||||
|
reader, err := finUpload.GetReader(ctx)
|
||||||
|
a.NoError(err)
|
||||||
|
|
||||||
|
content, err := ioutil.ReadAll(reader)
|
||||||
|
a.NoError(err)
|
||||||
|
a.Equal("abcdefghi", string(content))
|
||||||
|
reader.(io.Closer).Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeclareLength(t *testing.T) {
|
||||||
|
a := assert.New(t)
|
||||||
|
|
||||||
|
tmp, err := ioutil.TempDir("", "tusd-filestore-declare-length-")
|
||||||
|
a.NoError(err)
|
||||||
|
|
||||||
|
store := FileStore{tmp}
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
upload, err := store.NewUpload(ctx, handler.FileInfo{
|
||||||
|
Size: 0,
|
||||||
|
SizeIsDeferred: true,
|
||||||
|
})
|
||||||
|
a.NoError(err)
|
||||||
|
a.NotEqual(nil, upload)
|
||||||
|
|
||||||
|
info, err := upload.GetInfo(ctx)
|
||||||
|
a.NoError(err)
|
||||||
|
a.EqualValues(0, info.Size)
|
||||||
|
a.Equal(true, info.SizeIsDeferred)
|
||||||
|
|
||||||
|
err = store.AsLengthDeclarableUpload(upload).DeclareLength(ctx, 100)
|
||||||
|
a.NoError(err)
|
||||||
|
|
||||||
|
updatedInfo, err := upload.GetInfo(ctx)
|
||||||
|
a.NoError(err)
|
||||||
|
a.EqualValues(100, updatedInfo.Size)
|
||||||
|
a.Equal(false, updatedInfo.SizeIsDeferred)
|
||||||
|
}
|
|
@ -1,21 +1,20 @@
|
||||||
package gcsstore
|
package gcsstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"hash/crc32"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"cloud.google.com/go/storage"
|
"cloud.google.com/go/storage"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
|
|
||||||
"hash/crc32"
|
|
||||||
|
|
||||||
"github.com/vimeo/go-util/crc32combine"
|
"github.com/vimeo/go-util/crc32combine"
|
||||||
)
|
)
|
||||||
|
|
|
@ -2,13 +2,13 @@ package gcsstore_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"golang.org/x/net/context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gopkg.in/h2non/gock.v1"
|
"gopkg.in/h2non/gock.v1"
|
||||||
|
|
||||||
"cloud.google.com/go/storage"
|
"cloud.google.com/go/storage"
|
||||||
. "github.com/tus/tusd/gcsstore"
|
. "github.com/tus/tusd/pkg/gcsstore"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
)
|
)
|
||||||
|
|
|
@ -12,6 +12,7 @@ package gcsstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -20,14 +21,12 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
"cloud.google.com/go/storage"
|
"cloud.google.com/go/storage"
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/internal/uid"
|
||||||
"github.com/tus/tusd/uid"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
// See the tusd.DataStore interface for documentation about the different
|
// See the handler.DataStore interface for documentation about the different
|
||||||
// methods.
|
// methods.
|
||||||
type GCSStore struct {
|
type GCSStore struct {
|
||||||
// Specifies the GCS bucket that uploads will be stored in
|
// Specifies the GCS bucket that uploads will be stored in
|
||||||
|
@ -52,35 +51,53 @@ func New(bucket string, service GCSAPI) GCSStore {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store GCSStore) UseIn(composer *tusd.StoreComposer) {
|
func (store GCSStore) UseIn(composer *handler.StoreComposer) {
|
||||||
composer.UseCore(store)
|
composer.UseCore(store)
|
||||||
composer.UseTerminater(store)
|
composer.UseTerminater(store)
|
||||||
composer.UseFinisher(store)
|
|
||||||
composer.UseGetReader(store)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store GCSStore) NewUpload(info tusd.FileInfo) (id string, err error) {
|
func (store GCSStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
||||||
if info.ID == "" {
|
if info.ID == "" {
|
||||||
info.ID = uid.Uid()
|
info.ID = uid.Uid()
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
info.Storage = map[string]string{
|
||||||
err = store.writeInfo(ctx, store.keyWithPrefix(info.ID), info)
|
"Type": "gcsstore",
|
||||||
if err != nil {
|
"Bucket": store.Bucket,
|
||||||
return info.ID, err
|
"Key": store.keyWithPrefix(info.ID),
|
||||||
}
|
}
|
||||||
|
|
||||||
return info.ID, nil
|
err := store.writeInfo(ctx, store.keyWithPrefix(info.ID), info)
|
||||||
|
if err != nil {
|
||||||
|
return &gcsUpload{info.ID, &store}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &gcsUpload{info.ID, &store}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store GCSStore) WriteChunk(id string, offset int64, src io.Reader) (int64, error) {
|
type gcsUpload struct {
|
||||||
|
id string
|
||||||
|
store *GCSStore
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store GCSStore) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
|
||||||
|
return &gcsUpload{id, &store}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store GCSStore) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload {
|
||||||
|
return upload.(*gcsUpload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload gcsUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
|
||||||
|
id := upload.id
|
||||||
|
store := upload.store
|
||||||
|
|
||||||
prefix := fmt.Sprintf("%s_", store.keyWithPrefix(id))
|
prefix := fmt.Sprintf("%s_", store.keyWithPrefix(id))
|
||||||
filterParams := GCSFilterParams{
|
filterParams := GCSFilterParams{
|
||||||
Bucket: store.Bucket,
|
Bucket: store.Bucket,
|
||||||
Prefix: prefix,
|
Prefix: prefix,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
names, err := store.Service.FilterObjects(ctx, filterParams)
|
names, err := store.Service.FilterObjects(ctx, filterParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -116,8 +133,11 @@ func (store GCSStore) WriteChunk(id string, offset int64, src io.Reader) (int64,
|
||||||
|
|
||||||
const CONCURRENT_SIZE_REQUESTS = 32
|
const CONCURRENT_SIZE_REQUESTS = 32
|
||||||
|
|
||||||
func (store GCSStore) GetInfo(id string) (tusd.FileInfo, error) {
|
func (upload gcsUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
|
||||||
info := tusd.FileInfo{}
|
id := upload.id
|
||||||
|
store := upload.store
|
||||||
|
|
||||||
|
info := handler.FileInfo{}
|
||||||
i := fmt.Sprintf("%s.info", store.keyWithPrefix(id))
|
i := fmt.Sprintf("%s.info", store.keyWithPrefix(id))
|
||||||
|
|
||||||
params := GCSObjectParams{
|
params := GCSObjectParams{
|
||||||
|
@ -125,11 +145,10 @@ func (store GCSStore) GetInfo(id string) (tusd.FileInfo, error) {
|
||||||
ID: i,
|
ID: i,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
r, err := store.Service.ReadObject(ctx, params)
|
r, err := store.Service.ReadObject(ctx, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == storage.ErrObjectNotExist {
|
if err == storage.ErrObjectNotExist {
|
||||||
return info, tusd.ErrNotFound
|
return info, handler.ErrNotFound
|
||||||
}
|
}
|
||||||
return info, err
|
return info, err
|
||||||
}
|
}
|
||||||
|
@ -214,7 +233,7 @@ func (store GCSStore) GetInfo(id string) (tusd.FileInfo, error) {
|
||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store GCSStore) writeInfo(ctx context.Context, id string, info tusd.FileInfo) error {
|
func (store GCSStore) writeInfo(ctx context.Context, id string, info handler.FileInfo) error {
|
||||||
data, err := json.Marshal(info)
|
data, err := json.Marshal(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -236,14 +255,16 @@ func (store GCSStore) writeInfo(ctx context.Context, id string, info tusd.FileIn
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store GCSStore) FinishUpload(id string) error {
|
func (upload gcsUpload) FinishUpload(ctx context.Context) error {
|
||||||
|
id := upload.id
|
||||||
|
store := upload.store
|
||||||
|
|
||||||
prefix := fmt.Sprintf("%s_", store.keyWithPrefix(id))
|
prefix := fmt.Sprintf("%s_", store.keyWithPrefix(id))
|
||||||
filterParams := GCSFilterParams{
|
filterParams := GCSFilterParams{
|
||||||
Bucket: store.Bucket,
|
Bucket: store.Bucket,
|
||||||
Prefix: prefix,
|
Prefix: prefix,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
names, err := store.Service.FilterObjects(ctx, filterParams)
|
names, err := store.Service.FilterObjects(ctx, filterParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -265,7 +286,7 @@ func (store GCSStore) FinishUpload(id string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := store.GetInfo(id)
|
info, err := upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -283,13 +304,15 @@ func (store GCSStore) FinishUpload(id string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store GCSStore) Terminate(id string) error {
|
func (upload gcsUpload) Terminate(ctx context.Context) error {
|
||||||
|
id := upload.id
|
||||||
|
store := upload.store
|
||||||
|
|
||||||
filterParams := GCSFilterParams{
|
filterParams := GCSFilterParams{
|
||||||
Bucket: store.Bucket,
|
Bucket: store.Bucket,
|
||||||
Prefix: store.keyWithPrefix(id),
|
Prefix: store.keyWithPrefix(id),
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
err := store.Service.DeleteObjectsWithFilter(ctx, filterParams)
|
err := store.Service.DeleteObjectsWithFilter(ctx, filterParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -298,13 +321,15 @@ func (store GCSStore) Terminate(id string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store GCSStore) GetReader(id string) (io.Reader, error) {
|
func (upload gcsUpload) GetReader(ctx context.Context) (io.Reader, error) {
|
||||||
|
id := upload.id
|
||||||
|
store := upload.store
|
||||||
|
|
||||||
params := GCSObjectParams{
|
params := GCSObjectParams{
|
||||||
Bucket: store.Bucket,
|
Bucket: store.Bucket,
|
||||||
ID: store.keyWithPrefix(id),
|
ID: store.keyWithPrefix(id),
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
r, err := store.Service.ReadObject(ctx, params)
|
r, err := store.Service.ReadObject(ctx, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
|
@ -1,12 +1,12 @@
|
||||||
// Automatically generated by MockGen. DO NOT EDIT!
|
// Automatically generated by MockGen. DO NOT EDIT!
|
||||||
// Source: github.com/tus/tusd/gcsstore (interfaces: GCSReader,GCSAPI)
|
// Source: github.com/tus/tusd/pkg/gcsstore (interfaces: GCSReader,GCSAPI)
|
||||||
|
|
||||||
package gcsstore_test
|
package gcsstore_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
context "context"
|
context "context"
|
||||||
gomock "github.com/golang/mock/gomock"
|
gomock "github.com/golang/mock/gomock"
|
||||||
gcsstore "github.com/tus/tusd/gcsstore"
|
gcsstore "github.com/tus/tusd/pkg/gcsstore"
|
||||||
io "io"
|
io "io"
|
||||||
)
|
)
|
||||||
|
|
|
@ -2,34 +2,38 @@ package gcsstore_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
"cloud.google.com/go/storage"
|
"cloud.google.com/go/storage"
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/pkg/gcsstore"
|
||||||
"github.com/tus/tusd/gcsstore"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
// go:generate mockgen -destination=./gcsstore_mock_test.go -package=gcsstore_test github.com/tus/tusd/gcsstore GCSReader,GCSAPI
|
// go:generate mockgen -destination=./gcsstore_mock_test.go -package=gcsstore_test github.com/tus/tusd/pkg/gcsstore GCSReader,GCSAPI
|
||||||
|
|
||||||
const mockID = "123456789abcdefghijklmnopqrstuvwxyz"
|
const mockID = "123456789abcdefghijklmnopqrstuvwxyz"
|
||||||
const mockBucket = "bucket"
|
const mockBucket = "bucket"
|
||||||
const mockSize = 1337
|
const mockSize = 1337
|
||||||
const mockReaderData = "helloworld"
|
const mockReaderData = "helloworld"
|
||||||
|
|
||||||
var mockTusdInfoJson = fmt.Sprintf(`{"ID":"%s","Size":%d,"MetaData":{"foo":"bar"}}`, mockID, mockSize)
|
var mockTusdInfoJson = fmt.Sprintf(`{"ID":"%s","Size":%d,"MetaData":{"foo":"bar"},"Storage":{"Bucket":"bucket","Key":"%s","Type":"gcsstore"}}`, mockID, mockSize, mockID)
|
||||||
var mockTusdInfo = tusd.FileInfo{
|
var mockTusdInfo = handler.FileInfo{
|
||||||
ID: mockID,
|
ID: mockID,
|
||||||
Size: mockSize,
|
Size: mockSize,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
},
|
},
|
||||||
|
Storage: map[string]string{
|
||||||
|
"Type": "gcsstore",
|
||||||
|
"Bucket": mockBucket,
|
||||||
|
"Key": mockID,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var mockPartial0 = fmt.Sprintf("%s_0", mockID)
|
var mockPartial0 = fmt.Sprintf("%s_0", mockID)
|
||||||
|
@ -60,9 +64,9 @@ func TestNewUpload(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
service.EXPECT().WriteObject(ctx, params, r).Return(int64(r.Len()), nil)
|
service.EXPECT().WriteObject(ctx, params, r).Return(int64(r.Len()), nil)
|
||||||
|
|
||||||
id, err := store.NewUpload(mockTusdInfo)
|
upload, err := store.NewUpload(context.Background(), mockTusdInfo)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(id, mockID)
|
assert.NotNil(upload)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewUploadWithPrefix(t *testing.T) {
|
func TestNewUploadWithPrefix(t *testing.T) {
|
||||||
|
@ -76,7 +80,13 @@ func TestNewUploadWithPrefix(t *testing.T) {
|
||||||
|
|
||||||
assert.Equal(store.Bucket, mockBucket)
|
assert.Equal(store.Bucket, mockBucket)
|
||||||
|
|
||||||
data, err := json.Marshal(mockTusdInfo)
|
info := mockTusdInfo
|
||||||
|
info.Storage = map[string]string{
|
||||||
|
"Type": "gcsstore",
|
||||||
|
"Bucket": mockBucket,
|
||||||
|
"Key": "/path/to/file/" + mockID,
|
||||||
|
}
|
||||||
|
data, err := json.Marshal(info)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
r := bytes.NewReader(data)
|
r := bytes.NewReader(data)
|
||||||
|
@ -89,9 +99,9 @@ func TestNewUploadWithPrefix(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
service.EXPECT().WriteObject(ctx, params, r).Return(int64(r.Len()), nil)
|
service.EXPECT().WriteObject(ctx, params, r).Return(int64(r.Len()), nil)
|
||||||
|
|
||||||
id, err := store.NewUpload(mockTusdInfo)
|
upload, err := store.NewUpload(context.Background(), mockTusdInfo)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(id, mockID)
|
assert.NotNil(upload)
|
||||||
}
|
}
|
||||||
|
|
||||||
type MockGetInfoReader struct{}
|
type MockGetInfoReader struct{}
|
||||||
|
@ -168,16 +178,22 @@ func TestGetInfo(t *testing.T) {
|
||||||
service.EXPECT().FilterObjects(ctx, filterParams).Return(mockPartials, nil),
|
service.EXPECT().FilterObjects(ctx, filterParams).Return(mockPartials, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
ctxCancel, _ := context.WithCancel(ctx)
|
ctxCancel, cancel := context.WithCancel(ctx)
|
||||||
service.EXPECT().GetObjectSize(ctxCancel, mockObjectParams0).Return(size, nil)
|
service.EXPECT().GetObjectSize(ctxCancel, mockObjectParams0).Return(size, nil)
|
||||||
service.EXPECT().GetObjectSize(ctxCancel, mockObjectParams1).Return(size, nil)
|
service.EXPECT().GetObjectSize(ctxCancel, mockObjectParams1).Return(size, nil)
|
||||||
lastGetObjectSize := service.EXPECT().GetObjectSize(ctxCancel, mockObjectParams2).Return(size, nil)
|
lastGetObjectSize := service.EXPECT().GetObjectSize(ctxCancel, mockObjectParams2).Return(size, nil)
|
||||||
|
|
||||||
service.EXPECT().WriteObject(ctx, params, infoR).Return(int64(len(offsetInfoData)), nil).After(lastGetObjectSize)
|
service.EXPECT().WriteObject(ctx, params, infoR).Return(int64(len(offsetInfoData)), nil).After(lastGetObjectSize)
|
||||||
|
|
||||||
info, err := store.GetInfo(mockID)
|
upload, err := store.GetUpload(context.Background(), mockID)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
info, err := upload.GetInfo(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(mockTusdInfo, info)
|
assert.Equal(mockTusdInfo, info)
|
||||||
|
|
||||||
|
// Cancel the context to avoid getting an error from `go vet`
|
||||||
|
cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetInfoNotFound(t *testing.T) {
|
func TestGetInfoNotFound(t *testing.T) {
|
||||||
|
@ -198,8 +214,11 @@ func TestGetInfoNotFound(t *testing.T) {
|
||||||
service.EXPECT().ReadObject(ctx, params).Return(nil, storage.ErrObjectNotExist),
|
service.EXPECT().ReadObject(ctx, params).Return(nil, storage.ErrObjectNotExist),
|
||||||
)
|
)
|
||||||
|
|
||||||
_, err := store.GetInfo(mockID)
|
upload, err := store.GetUpload(context.Background(), mockID)
|
||||||
assert.Equal(tusd.ErrNotFound, err)
|
assert.Nil(err)
|
||||||
|
|
||||||
|
_, err = upload.GetInfo(context.Background())
|
||||||
|
assert.Equal(handler.ErrNotFound, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
type MockGetReader struct{}
|
type MockGetReader struct{}
|
||||||
|
@ -244,7 +263,11 @@ func TestGetReader(t *testing.T) {
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
service.EXPECT().ReadObject(ctx, params).Return(r, nil)
|
service.EXPECT().ReadObject(ctx, params).Return(r, nil)
|
||||||
reader, err := store.GetReader(mockID)
|
|
||||||
|
upload, err := store.GetUpload(context.Background(), mockID)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
reader, err := upload.GetReader(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
buf := make([]byte, len(mockReaderData))
|
buf := make([]byte, len(mockReaderData))
|
||||||
|
@ -272,7 +295,10 @@ func TestTerminate(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
service.EXPECT().DeleteObjectsWithFilter(ctx, filterParams).Return(nil)
|
service.EXPECT().DeleteObjectsWithFilter(ctx, filterParams).Return(nil)
|
||||||
|
|
||||||
err := store.Terminate(mockID)
|
upload, err := store.GetUpload(context.Background(), mockID)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
err = store.AsTerminatableUpload(upload).Terminate(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -350,7 +376,7 @@ func TestFinishUpload(t *testing.T) {
|
||||||
service.EXPECT().FilterObjects(ctx, filterParams2).Return(mockPartials, nil),
|
service.EXPECT().FilterObjects(ctx, filterParams2).Return(mockPartials, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
ctxCancel, _ := context.WithCancel(ctx)
|
ctxCancel, cancel := context.WithCancel(ctx)
|
||||||
service.EXPECT().GetObjectSize(ctxCancel, mockObjectParams0).Return(size, nil)
|
service.EXPECT().GetObjectSize(ctxCancel, mockObjectParams0).Return(size, nil)
|
||||||
service.EXPECT().GetObjectSize(ctxCancel, mockObjectParams1).Return(size, nil)
|
service.EXPECT().GetObjectSize(ctxCancel, mockObjectParams1).Return(size, nil)
|
||||||
lastGetObjectSize := service.EXPECT().GetObjectSize(ctxCancel, mockObjectParams2).Return(size, nil)
|
lastGetObjectSize := service.EXPECT().GetObjectSize(ctxCancel, mockObjectParams2).Return(size, nil)
|
||||||
|
@ -358,12 +384,18 @@ func TestFinishUpload(t *testing.T) {
|
||||||
writeObject := service.EXPECT().WriteObject(ctx, infoParams, infoR).Return(int64(len(offsetInfoData)), nil).After(lastGetObjectSize)
|
writeObject := service.EXPECT().WriteObject(ctx, infoParams, infoR).Return(int64(len(offsetInfoData)), nil).After(lastGetObjectSize)
|
||||||
service.EXPECT().SetObjectMetadata(ctx, objectParams, metadata).Return(nil).After(writeObject)
|
service.EXPECT().SetObjectMetadata(ctx, objectParams, metadata).Return(nil).After(writeObject)
|
||||||
|
|
||||||
err = store.FinishUpload(mockID)
|
upload, err := store.GetUpload(context.Background(), mockID)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
|
err = upload.FinishUpload(context.Background())
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
// Cancel the context to avoid getting an error from `go vet`
|
||||||
|
cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
var mockTusdChunk0InfoJson = fmt.Sprintf(`{"ID":"%s","Size":%d,"Offset":%d,"MetaData":{"foo":"bar"}}`, mockID, mockSize, mockSize/3)
|
var mockTusdChunk0InfoJson = fmt.Sprintf(`{"ID":"%s","Size":%d,"Offset":%d,"MetaData":{"foo":"bar"}}`, mockID, mockSize, mockSize/3)
|
||||||
var mockTusdChunk1Info = tusd.FileInfo{
|
var mockTusdChunk1Info = handler.FileInfo{
|
||||||
ID: mockID,
|
ID: mockID,
|
||||||
Size: mockSize,
|
Size: mockSize,
|
||||||
Offset: 455,
|
Offset: 455,
|
||||||
|
@ -430,7 +462,11 @@ func TestWriteChunk(t *testing.T) {
|
||||||
reader := bytes.NewReader([]byte(mockReaderData))
|
reader := bytes.NewReader([]byte(mockReaderData))
|
||||||
var offset int64
|
var offset int64
|
||||||
offset = mockSize / 3
|
offset = mockSize / 3
|
||||||
_, err := store.WriteChunk(mockID, offset, reader)
|
|
||||||
|
upload, err := store.GetUpload(context.Background(), mockID)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
_, err = upload.WriteChunk(context.Background(), offset, reader)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package tusd
|
package handler
|
||||||
|
|
||||||
// StoreComposer represents a composable data store. It consists of the core
|
// StoreComposer represents a composable data store. It consists of the core
|
||||||
// data store and optional extensions. Please consult the package's overview
|
// data store and optional extensions. Please consult the package's overview
|
||||||
|
@ -8,12 +8,8 @@ type StoreComposer struct {
|
||||||
|
|
||||||
UsesTerminater bool
|
UsesTerminater bool
|
||||||
Terminater TerminaterDataStore
|
Terminater TerminaterDataStore
|
||||||
UsesFinisher bool
|
|
||||||
Finisher FinisherDataStore
|
|
||||||
UsesLocker bool
|
UsesLocker bool
|
||||||
Locker LockerDataStore
|
Locker Locker
|
||||||
UsesGetReader bool
|
|
||||||
GetReader GetReaderDataStore
|
|
||||||
UsesConcater bool
|
UsesConcater bool
|
||||||
Concater ConcaterDataStore
|
Concater ConcaterDataStore
|
||||||
UsesLengthDeferrer bool
|
UsesLengthDeferrer bool
|
||||||
|
@ -25,35 +21,6 @@ func NewStoreComposer() *StoreComposer {
|
||||||
return &StoreComposer{}
|
return &StoreComposer{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// newStoreComposerFromDataStore creates a new store composer and attempts to
|
|
||||||
// extract the extensions for the provided store. This is intended to be used
|
|
||||||
// for transitioning from data stores to composers.
|
|
||||||
func newStoreComposerFromDataStore(store DataStore) *StoreComposer {
|
|
||||||
composer := NewStoreComposer()
|
|
||||||
composer.UseCore(store)
|
|
||||||
|
|
||||||
if mod, ok := store.(TerminaterDataStore); ok {
|
|
||||||
composer.UseTerminater(mod)
|
|
||||||
}
|
|
||||||
if mod, ok := store.(FinisherDataStore); ok {
|
|
||||||
composer.UseFinisher(mod)
|
|
||||||
}
|
|
||||||
if mod, ok := store.(LockerDataStore); ok {
|
|
||||||
composer.UseLocker(mod)
|
|
||||||
}
|
|
||||||
if mod, ok := store.(GetReaderDataStore); ok {
|
|
||||||
composer.UseGetReader(mod)
|
|
||||||
}
|
|
||||||
if mod, ok := store.(ConcaterDataStore); ok {
|
|
||||||
composer.UseConcater(mod)
|
|
||||||
}
|
|
||||||
if mod, ok := store.(LengthDeferrerDataStore); ok {
|
|
||||||
composer.UseLengthDeferrer(mod)
|
|
||||||
}
|
|
||||||
|
|
||||||
return composer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Capabilities returns a string representing the provided extensions in a
|
// Capabilities returns a string representing the provided extensions in a
|
||||||
// human-readable format meant for debugging.
|
// human-readable format meant for debugging.
|
||||||
func (store *StoreComposer) Capabilities() string {
|
func (store *StoreComposer) Capabilities() string {
|
||||||
|
@ -71,24 +38,12 @@ func (store *StoreComposer) Capabilities() string {
|
||||||
} else {
|
} else {
|
||||||
str += "✗"
|
str += "✗"
|
||||||
}
|
}
|
||||||
str += ` Finisher: `
|
|
||||||
if store.UsesFinisher {
|
|
||||||
str += "✓"
|
|
||||||
} else {
|
|
||||||
str += "✗"
|
|
||||||
}
|
|
||||||
str += ` Locker: `
|
str += ` Locker: `
|
||||||
if store.UsesLocker {
|
if store.UsesLocker {
|
||||||
str += "✓"
|
str += "✓"
|
||||||
} else {
|
} else {
|
||||||
str += "✗"
|
str += "✗"
|
||||||
}
|
}
|
||||||
str += ` GetReader: `
|
|
||||||
if store.UsesGetReader {
|
|
||||||
str += "✓"
|
|
||||||
} else {
|
|
||||||
str += "✗"
|
|
||||||
}
|
|
||||||
str += ` Concater: `
|
str += ` Concater: `
|
||||||
if store.UsesConcater {
|
if store.UsesConcater {
|
||||||
str += "✓"
|
str += "✓"
|
||||||
|
@ -115,18 +70,12 @@ func (store *StoreComposer) UseTerminater(ext TerminaterDataStore) {
|
||||||
store.UsesTerminater = ext != nil
|
store.UsesTerminater = ext != nil
|
||||||
store.Terminater = ext
|
store.Terminater = ext
|
||||||
}
|
}
|
||||||
func (store *StoreComposer) UseFinisher(ext FinisherDataStore) {
|
|
||||||
store.UsesFinisher = ext != nil
|
func (store *StoreComposer) UseLocker(ext Locker) {
|
||||||
store.Finisher = ext
|
|
||||||
}
|
|
||||||
func (store *StoreComposer) UseLocker(ext LockerDataStore) {
|
|
||||||
store.UsesLocker = ext != nil
|
store.UsesLocker = ext != nil
|
||||||
store.Locker = ext
|
store.Locker = ext
|
||||||
}
|
}
|
||||||
func (store *StoreComposer) UseGetReader(ext GetReaderDataStore) {
|
|
||||||
store.UsesGetReader = ext != nil
|
|
||||||
store.GetReader = ext
|
|
||||||
}
|
|
||||||
func (store *StoreComposer) UseConcater(ext ConcaterDataStore) {
|
func (store *StoreComposer) UseConcater(ext ConcaterDataStore) {
|
||||||
store.UsesConcater = ext != nil
|
store.UsesConcater = ext != nil
|
||||||
store.Concater = ext
|
store.Concater = ext
|
|
@ -1,4 +1,4 @@
|
||||||
package tusd
|
package handler
|
||||||
|
|
||||||
#define USE_FUNC(TYPE) \
|
#define USE_FUNC(TYPE) \
|
||||||
func (store *StoreComposer) Use ## TYPE(ext TYPE ## DataStore) { \
|
func (store *StoreComposer) Use ## TYPE(ext TYPE ## DataStore) { \
|
||||||
|
@ -39,23 +39,6 @@ func NewStoreComposer() *StoreComposer {
|
||||||
return &StoreComposer{}
|
return &StoreComposer{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// newStoreComposerFromDataStore creates a new store composer and attempts to
|
|
||||||
// extract the extensions for the provided store. This is intended to be used
|
|
||||||
// for transitioning from data stores to composers.
|
|
||||||
func newStoreComposerFromDataStore(store DataStore) *StoreComposer {
|
|
||||||
composer := NewStoreComposer()
|
|
||||||
composer.UseCore(store)
|
|
||||||
|
|
||||||
USE_FROM(Terminater)
|
|
||||||
USE_FROM(Finisher)
|
|
||||||
USE_FROM(Locker)
|
|
||||||
USE_FROM(GetReader)
|
|
||||||
USE_FROM(Concater)
|
|
||||||
USE_FROM(LengthDeferrer)
|
|
||||||
|
|
||||||
return composer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Capabilities returns a string representing the provided extensions in a
|
// Capabilities returns a string representing the provided extensions in a
|
||||||
// human-readable format meant for debugging.
|
// human-readable format meant for debugging.
|
||||||
func (store *StoreComposer) Capabilities() string {
|
func (store *StoreComposer) Capabilities() string {
|
|
@ -0,0 +1,23 @@
|
||||||
|
package handler_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/tus/tusd/pkg/filestore"
|
||||||
|
"github.com/tus/tusd/pkg/handler"
|
||||||
|
"github.com/tus/tusd/pkg/memorylocker"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ExampleNewStoreComposer() {
|
||||||
|
composer := handler.NewStoreComposer()
|
||||||
|
|
||||||
|
fs := filestore.New("./data")
|
||||||
|
fs.UseIn(composer)
|
||||||
|
|
||||||
|
ml := memorylocker.New()
|
||||||
|
ml.UseIn(composer)
|
||||||
|
|
||||||
|
config := handler.Config{
|
||||||
|
StoreComposer: composer,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _ = handler.NewHandler(config)
|
||||||
|
}
|
|
@ -0,0 +1,331 @@
|
||||||
|
package handler_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/mock/gomock"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
. "github.com/tus/tusd/pkg/handler"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConcat(t *testing.T) {
|
||||||
|
SubTest(t, "ExtensionDiscovery", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
composer = NewStoreComposer()
|
||||||
|
composer.UseCore(store)
|
||||||
|
composer.UseConcater(store)
|
||||||
|
|
||||||
|
handler, _ := NewHandler(Config{
|
||||||
|
StoreComposer: composer,
|
||||||
|
})
|
||||||
|
|
||||||
|
(&httpTest{
|
||||||
|
Method: "OPTIONS",
|
||||||
|
Code: http.StatusOK,
|
||||||
|
ResHeader: map[string]string{
|
||||||
|
"Tus-Extension": "creation,creation-with-upload,concatenation",
|
||||||
|
},
|
||||||
|
}).Run(handler, t)
|
||||||
|
})
|
||||||
|
|
||||||
|
SubTest(t, "Partial", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
SubTest(t, "Create", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
|
Size: 300,
|
||||||
|
IsPartial: true,
|
||||||
|
IsFinal: false,
|
||||||
|
PartialUploads: nil,
|
||||||
|
MetaData: make(map[string]string),
|
||||||
|
}).Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "foo",
|
||||||
|
Size: 300,
|
||||||
|
IsPartial: true,
|
||||||
|
IsFinal: false,
|
||||||
|
PartialUploads: nil,
|
||||||
|
MetaData: make(map[string]string),
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
handler, _ := NewHandler(Config{
|
||||||
|
BasePath: "files",
|
||||||
|
StoreComposer: composer,
|
||||||
|
})
|
||||||
|
|
||||||
|
(&httpTest{
|
||||||
|
Method: "POST",
|
||||||
|
ReqHeader: map[string]string{
|
||||||
|
"Tus-Resumable": "1.0.0",
|
||||||
|
"Upload-Length": "300",
|
||||||
|
"Upload-Concat": "partial",
|
||||||
|
},
|
||||||
|
Code: http.StatusCreated,
|
||||||
|
}).Run(handler, t)
|
||||||
|
})
|
||||||
|
|
||||||
|
SubTest(t, "Status", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "foo",
|
||||||
|
IsPartial: true,
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
handler, _ := NewHandler(Config{
|
||||||
|
BasePath: "files",
|
||||||
|
StoreComposer: composer,
|
||||||
|
})
|
||||||
|
|
||||||
|
(&httpTest{
|
||||||
|
Method: "HEAD",
|
||||||
|
URL: "foo",
|
||||||
|
ReqHeader: map[string]string{
|
||||||
|
"Tus-Resumable": "1.0.0",
|
||||||
|
},
|
||||||
|
Code: http.StatusOK,
|
||||||
|
ResHeader: map[string]string{
|
||||||
|
"Upload-Concat": "partial",
|
||||||
|
},
|
||||||
|
}).Run(handler, t)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
SubTest(t, "Final", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
SubTest(t, "Create", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
a := assert.New(t)
|
||||||
|
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
uploadA := NewMockFullUpload(ctrl)
|
||||||
|
uploadB := NewMockFullUpload(ctrl)
|
||||||
|
uploadC := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "a").Return(uploadA, nil),
|
||||||
|
uploadA.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
IsPartial: true,
|
||||||
|
Size: 5,
|
||||||
|
Offset: 5,
|
||||||
|
}, nil),
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "b").Return(uploadB, nil),
|
||||||
|
uploadB.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
IsPartial: true,
|
||||||
|
Size: 5,
|
||||||
|
Offset: 5,
|
||||||
|
}, nil),
|
||||||
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
|
Size: 10,
|
||||||
|
IsPartial: false,
|
||||||
|
IsFinal: true,
|
||||||
|
PartialUploads: []string{"a", "b"},
|
||||||
|
MetaData: make(map[string]string),
|
||||||
|
}).Return(uploadC, nil),
|
||||||
|
uploadC.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "foo",
|
||||||
|
Size: 10,
|
||||||
|
IsPartial: false,
|
||||||
|
IsFinal: true,
|
||||||
|
PartialUploads: []string{"a", "b"},
|
||||||
|
MetaData: make(map[string]string),
|
||||||
|
}, nil),
|
||||||
|
store.EXPECT().AsConcatableUpload(uploadC).Return(uploadC),
|
||||||
|
uploadC.EXPECT().ConcatUploads(context.Background(), []Upload{uploadA, uploadB}).Return(nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
handler, _ := NewHandler(Config{
|
||||||
|
BasePath: "files",
|
||||||
|
StoreComposer: composer,
|
||||||
|
NotifyCompleteUploads: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
c := make(chan HookEvent, 1)
|
||||||
|
handler.CompleteUploads = c
|
||||||
|
|
||||||
|
(&httpTest{
|
||||||
|
Method: "POST",
|
||||||
|
ReqHeader: map[string]string{
|
||||||
|
"Tus-Resumable": "1.0.0",
|
||||||
|
// A space between `final;` and the first URL should be allowed due to
|
||||||
|
// compatibility reasons, even if the specification does not define
|
||||||
|
// it. Therefore this character is included in this test case.
|
||||||
|
"Upload-Concat": "final; http://tus.io/files/a /files/b/",
|
||||||
|
"X-Custom-Header": "tada",
|
||||||
|
},
|
||||||
|
Code: http.StatusCreated,
|
||||||
|
}).Run(handler, t)
|
||||||
|
|
||||||
|
event := <-c
|
||||||
|
info := event.Upload
|
||||||
|
a.Equal("foo", info.ID)
|
||||||
|
a.EqualValues(10, info.Size)
|
||||||
|
a.EqualValues(10, info.Offset)
|
||||||
|
a.False(info.IsPartial)
|
||||||
|
a.True(info.IsFinal)
|
||||||
|
a.Equal([]string{"a", "b"}, info.PartialUploads)
|
||||||
|
|
||||||
|
req := event.HTTPRequest
|
||||||
|
a.Equal("POST", req.Method)
|
||||||
|
a.Equal("", req.URI)
|
||||||
|
a.Equal("tada", req.Header.Get("X-Custom-Header"))
|
||||||
|
})
|
||||||
|
|
||||||
|
SubTest(t, "Status", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "foo",
|
||||||
|
IsFinal: true,
|
||||||
|
PartialUploads: []string{"a", "b"},
|
||||||
|
Size: 10,
|
||||||
|
Offset: 10,
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
handler, _ := NewHandler(Config{
|
||||||
|
BasePath: "files",
|
||||||
|
StoreComposer: composer,
|
||||||
|
})
|
||||||
|
|
||||||
|
(&httpTest{
|
||||||
|
Method: "HEAD",
|
||||||
|
URL: "foo",
|
||||||
|
ReqHeader: map[string]string{
|
||||||
|
"Tus-Resumable": "1.0.0",
|
||||||
|
},
|
||||||
|
Code: http.StatusOK,
|
||||||
|
ResHeader: map[string]string{
|
||||||
|
"Upload-Concat": "final;http://tus.io/files/a http://tus.io/files/b",
|
||||||
|
"Upload-Length": "10",
|
||||||
|
"Upload-Offset": "10",
|
||||||
|
},
|
||||||
|
}).Run(handler, t)
|
||||||
|
})
|
||||||
|
|
||||||
|
SubTest(t, "CreateWithUnfinishedFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
|
// This upload is still unfinished (mismatching offset and size) and
|
||||||
|
// will therefore cause the POST request to fail.
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "c").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "c",
|
||||||
|
IsPartial: true,
|
||||||
|
Size: 5,
|
||||||
|
Offset: 3,
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
handler, _ := NewHandler(Config{
|
||||||
|
BasePath: "files",
|
||||||
|
StoreComposer: composer,
|
||||||
|
})
|
||||||
|
|
||||||
|
(&httpTest{
|
||||||
|
Method: "POST",
|
||||||
|
ReqHeader: map[string]string{
|
||||||
|
"Tus-Resumable": "1.0.0",
|
||||||
|
"Upload-Concat": "final;http://tus.io/files/c",
|
||||||
|
},
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
}).Run(handler, t)
|
||||||
|
})
|
||||||
|
|
||||||
|
SubTest(t, "CreateExceedingMaxSizeFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "huge").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "huge",
|
||||||
|
Size: 1000,
|
||||||
|
Offset: 1000,
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
handler, _ := NewHandler(Config{
|
||||||
|
MaxSize: 100,
|
||||||
|
BasePath: "files",
|
||||||
|
StoreComposer: composer,
|
||||||
|
})
|
||||||
|
|
||||||
|
(&httpTest{
|
||||||
|
Method: "POST",
|
||||||
|
ReqHeader: map[string]string{
|
||||||
|
"Tus-Resumable": "1.0.0",
|
||||||
|
"Upload-Concat": "final;/files/huge",
|
||||||
|
},
|
||||||
|
Code: http.StatusRequestEntityTooLarge,
|
||||||
|
}).Run(handler, t)
|
||||||
|
})
|
||||||
|
|
||||||
|
SubTest(t, "UploadToFinalFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "foo",
|
||||||
|
Size: 10,
|
||||||
|
Offset: 0,
|
||||||
|
IsFinal: true,
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
handler, _ := NewHandler(Config{
|
||||||
|
StoreComposer: composer,
|
||||||
|
})
|
||||||
|
|
||||||
|
(&httpTest{
|
||||||
|
Method: "PATCH",
|
||||||
|
URL: "foo",
|
||||||
|
ReqHeader: map[string]string{
|
||||||
|
"Tus-Resumable": "1.0.0",
|
||||||
|
"Content-Type": "application/offset+octet-stream",
|
||||||
|
"Upload-Offset": "5",
|
||||||
|
},
|
||||||
|
ReqBody: strings.NewReader("hello"),
|
||||||
|
Code: http.StatusForbidden,
|
||||||
|
}).Run(handler, t)
|
||||||
|
})
|
||||||
|
|
||||||
|
SubTest(t, "InvalidConcatHeaderFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
handler, _ := NewHandler(Config{
|
||||||
|
StoreComposer: composer,
|
||||||
|
})
|
||||||
|
|
||||||
|
(&httpTest{
|
||||||
|
Method: "POST",
|
||||||
|
URL: "",
|
||||||
|
ReqHeader: map[string]string{
|
||||||
|
"Tus-Resumable": "1.0.0",
|
||||||
|
"Upload-Concat": "final;",
|
||||||
|
},
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
}).Run(handler, t)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package tusd
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
@ -9,13 +9,10 @@ import (
|
||||||
|
|
||||||
// Config provides a way to configure the Handler depending on your needs.
|
// Config provides a way to configure the Handler depending on your needs.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// DataStore implementation used to store and retrieve the single uploads.
|
|
||||||
// The usage of this field is deprecated and should be avoided in favor of
|
|
||||||
// StoreComposer.
|
|
||||||
DataStore DataStore
|
|
||||||
// StoreComposer points to the store composer from which the core data store
|
// StoreComposer points to the store composer from which the core data store
|
||||||
// and optional dependencies should be taken. May only be nil if DataStore is
|
// and optional dependencies should be taken. May only be nil if DataStore is
|
||||||
// set.
|
// set.
|
||||||
|
// TODO: Remove pointer?
|
||||||
StoreComposer *StoreComposer
|
StoreComposer *StoreComposer
|
||||||
// MaxSize defines how many bytes may be stored in one single upload. If its
|
// MaxSize defines how many bytes may be stored in one single upload. If its
|
||||||
// value is is 0 or smaller no limit will be enforced.
|
// value is is 0 or smaller no limit will be enforced.
|
||||||
|
@ -43,6 +40,11 @@ type Config struct {
|
||||||
// potentially set by proxies when generating an absolute URL in the
|
// potentially set by proxies when generating an absolute URL in the
|
||||||
// response to POST requests.
|
// response to POST requests.
|
||||||
RespectForwardedHeaders bool
|
RespectForwardedHeaders bool
|
||||||
|
// PreUploadreateCCallback will be invoked before a new upload is created, if the
|
||||||
|
// property is supplied. If the callback returns nil, the upload will be created.
|
||||||
|
// Otherwise the HTTP request will be aborted. This can be used to implement
|
||||||
|
// validation of upload metadata etc.
|
||||||
|
PreUploadCreateCallback func(hook HookEvent) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) validate() error {
|
func (config *Config) validate() error {
|
||||||
|
@ -69,10 +71,7 @@ func (config *Config) validate() error {
|
||||||
config.isAbs = uri.IsAbs()
|
config.isAbs = uri.IsAbs()
|
||||||
|
|
||||||
if config.StoreComposer == nil {
|
if config.StoreComposer == nil {
|
||||||
config.StoreComposer = newStoreComposerFromDataStore(config.DataStore)
|
return errors.New("tusd: StoreComposer must no be nil")
|
||||||
config.DataStore = nil
|
|
||||||
} else if config.DataStore != nil {
|
|
||||||
return errors.New("tusd: either StoreComposer or DataStore may be set in Config, but not both")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.StoreComposer.Core == nil {
|
if config.StoreComposer.Core == nil {
|
|
@ -1,7 +1,7 @@
|
||||||
package tusd
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -9,23 +9,22 @@ import (
|
||||||
|
|
||||||
type zeroStore struct{}
|
type zeroStore struct{}
|
||||||
|
|
||||||
func (store zeroStore) NewUpload(info FileInfo) (string, error) {
|
func (store zeroStore) NewUpload(ctx context.Context, info FileInfo) (Upload, error) {
|
||||||
return "", nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
func (store zeroStore) WriteChunk(id string, offset int64, src io.Reader) (int64, error) {
|
func (store zeroStore) GetUpload(ctx context.Context, id string) (Upload, error) {
|
||||||
return 0, nil
|
return nil, nil
|
||||||
}
|
|
||||||
|
|
||||||
func (store zeroStore) GetInfo(id string) (FileInfo, error) {
|
|
||||||
return FileInfo{}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConfig(t *testing.T) {
|
func TestConfig(t *testing.T) {
|
||||||
a := assert.New(t)
|
a := assert.New(t)
|
||||||
|
|
||||||
|
composer := NewStoreComposer()
|
||||||
|
composer.UseCore(zeroStore{})
|
||||||
|
|
||||||
config := Config{
|
config := Config{
|
||||||
DataStore: zeroStore{},
|
StoreComposer: composer,
|
||||||
BasePath: "files",
|
BasePath: "files",
|
||||||
}
|
}
|
||||||
|
|
||||||
a.Nil(config.validate())
|
a.Nil(config.validate())
|
||||||
|
@ -43,16 +42,3 @@ func TestConfigEmptyCore(t *testing.T) {
|
||||||
|
|
||||||
a.Error(config.validate())
|
a.Error(config.validate())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConfigStoreAndComposer(t *testing.T) {
|
|
||||||
a := assert.New(t)
|
|
||||||
composer := NewStoreComposer()
|
|
||||||
composer.UseCore(zeroStore{})
|
|
||||||
|
|
||||||
config := Config{
|
|
||||||
StoreComposer: composer,
|
|
||||||
DataStore: zeroStore{},
|
|
||||||
}
|
|
||||||
|
|
||||||
a.Error(config.validate())
|
|
||||||
}
|
|
|
@ -1,17 +1,17 @@
|
||||||
package tusd_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
. "github.com/tus/tusd"
|
. "github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCORS(t *testing.T) {
|
func TestCORS(t *testing.T) {
|
||||||
SubTest(t, "Preflight", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "Preflight", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -29,9 +29,9 @@ func TestCORS(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "Request", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "Request", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -48,9 +48,9 @@ func TestCORS(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "AppendHeaders", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "AppendHeaders", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
req, _ := http.NewRequest("OPTIONS", "", nil)
|
req, _ := http.NewRequest("OPTIONS", "", nil)
|
|
@ -1,9 +1,8 @@
|
||||||
package tusd
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type MetaData map[string]string
|
type MetaData map[string]string
|
||||||
|
@ -27,6 +26,10 @@ type FileInfo struct {
|
||||||
// ordered slice containing the ids of the uploads of which the final upload
|
// ordered slice containing the ids of the uploads of which the final upload
|
||||||
// will consist after concatenation.
|
// will consist after concatenation.
|
||||||
PartialUploads []string
|
PartialUploads []string
|
||||||
|
// Storage contains information about where the data storage saves the upload,
|
||||||
|
// for example a file path. The available values vary depending on what data
|
||||||
|
// store is used. This map may also be nil.
|
||||||
|
Storage map[string]string
|
||||||
|
|
||||||
// stopUpload is the cancel function for the upload's context.Context. When
|
// stopUpload is the cancel function for the upload's context.Context. When
|
||||||
// invoked it will interrupt the writes to DataStore#WriteChunk.
|
// invoked it will interrupt the writes to DataStore#WriteChunk.
|
||||||
|
@ -44,12 +47,7 @@ func (f FileInfo) StopUpload() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type DataStore interface {
|
type Upload interface {
|
||||||
// Create a new upload using the size as the file's length. The method must
|
|
||||||
// return an unique id which is used to identify the upload. If no backend
|
|
||||||
// (e.g. Riak) specifes the id you may want to use the uid package to
|
|
||||||
// generate one. The properties Size and MetaData will be filled.
|
|
||||||
NewUpload(info FileInfo) (id string, err error)
|
|
||||||
// Write the chunk read from src into the file specified by the id at the
|
// Write the chunk read from src into the file specified by the id at the
|
||||||
// given offset. The handler will take care of validating the offset and
|
// given offset. The handler will take care of validating the offset and
|
||||||
// limiting the size of the src to not overflow the file's size. It may
|
// limiting the size of the src to not overflow the file's size. It may
|
||||||
|
@ -57,58 +55,11 @@ type DataStore interface {
|
||||||
// It will also lock resources while they are written to ensure only one
|
// It will also lock resources while they are written to ensure only one
|
||||||
// write happens per time.
|
// write happens per time.
|
||||||
// The function call must return the number of bytes written.
|
// The function call must return the number of bytes written.
|
||||||
WriteChunk(id string, offset int64, src io.Reader) (int64, error)
|
WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error)
|
||||||
// Read the fileinformation used to validate the offset and respond to HEAD
|
// Read the fileinformation used to validate the offset and respond to HEAD
|
||||||
// requests. It may return an os.ErrNotExist which will be interpreted as a
|
// requests. It may return an os.ErrNotExist which will be interpreted as a
|
||||||
// 404 Not Found.
|
// 404 Not Found.
|
||||||
GetInfo(id string) (FileInfo, error)
|
GetInfo(ctx context.Context) (FileInfo, error)
|
||||||
}
|
|
||||||
|
|
||||||
// TerminaterDataStore is the interface which must be implemented by DataStores
|
|
||||||
// if they want to receive DELETE requests using the Handler. If this interface
|
|
||||||
// is not implemented, no request handler for this method is attached.
|
|
||||||
type TerminaterDataStore interface {
|
|
||||||
// Terminate an upload so any further requests to the resource, both reading
|
|
||||||
// and writing, must return os.ErrNotExist or similar.
|
|
||||||
Terminate(id string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// FinisherDataStore is the interface which can be implemented by DataStores
|
|
||||||
// which need to do additional operations once an entire upload has been
|
|
||||||
// completed. These tasks may include but are not limited to freeing unused
|
|
||||||
// resources or notifying other services. For example, S3Store uses this
|
|
||||||
// interface for removing a temporary object.
|
|
||||||
type FinisherDataStore interface {
|
|
||||||
// FinishUpload executes additional operations for the finished upload which
|
|
||||||
// is specified by its ID.
|
|
||||||
FinishUpload(id string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// LockerDataStore is the interface required for custom lock persisting mechanisms.
|
|
||||||
// Common ways to store this information is in memory, on disk or using an
|
|
||||||
// external service, such as ZooKeeper.
|
|
||||||
// When multiple processes are attempting to access an upload, whether it be
|
|
||||||
// by reading or writing, a synchronization mechanism is required to prevent
|
|
||||||
// data corruption, especially to ensure correct offset values and the proper
|
|
||||||
// order of chunks inside a single upload.
|
|
||||||
type LockerDataStore interface {
|
|
||||||
// LockUpload attempts to obtain an exclusive lock for the upload specified
|
|
||||||
// by its id.
|
|
||||||
// If this operation fails because the resource is already locked, the
|
|
||||||
// tusd.ErrFileLocked must be returned. If no error is returned, the attempt
|
|
||||||
// is consider to be successful and the upload to be locked until UnlockUpload
|
|
||||||
// is invoked for the same upload.
|
|
||||||
LockUpload(id string) error
|
|
||||||
// UnlockUpload releases an existing lock for the given upload.
|
|
||||||
UnlockUpload(id string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetReaderDataStore is the interface which must be implemented if handler should
|
|
||||||
// expose and support the GET route. It will allow clients to download the
|
|
||||||
// content of an upload regardless whether it's finished or not.
|
|
||||||
// Please, be aware that this feature is not part of the official tus
|
|
||||||
// specification. Instead it's a custom mechanism by tusd.
|
|
||||||
type GetReaderDataStore interface {
|
|
||||||
// GetReader returns a reader which allows iterating of the content of an
|
// GetReader returns a reader which allows iterating of the content of an
|
||||||
// upload specified by its ID. It should attempt to provide a reader even if
|
// upload specified by its ID. It should attempt to provide a reader even if
|
||||||
// the upload has not been finished yet but it's not required.
|
// the upload has not been finished yet but it's not required.
|
||||||
|
@ -116,20 +67,55 @@ type GetReaderDataStore interface {
|
||||||
// Close() method will be invoked once everything has been read.
|
// Close() method will be invoked once everything has been read.
|
||||||
// If the given upload could not be found, the error tusd.ErrNotFound should
|
// If the given upload could not be found, the error tusd.ErrNotFound should
|
||||||
// be returned.
|
// be returned.
|
||||||
GetReader(id string) (io.Reader, error)
|
GetReader(ctx context.Context) (io.Reader, error)
|
||||||
|
// FinisherDataStore is the interface which can be implemented by DataStores
|
||||||
|
// which need to do additional operations once an entire upload has been
|
||||||
|
// completed. These tasks may include but are not limited to freeing unused
|
||||||
|
// resources or notifying other services. For example, S3Store uses this
|
||||||
|
// interface for removing a temporary object.
|
||||||
|
// FinishUpload executes additional operations for the finished upload which
|
||||||
|
// is specified by its ID.
|
||||||
|
FinishUpload(ctx context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataStore interface {
|
||||||
|
// Create a new upload using the size as the file's length. The method must
|
||||||
|
// return an unique id which is used to identify the upload. If no backend
|
||||||
|
// (e.g. Riak) specifes the id you may want to use the uid package to
|
||||||
|
// generate one. The properties Size and MetaData will be filled.
|
||||||
|
NewUpload(ctx context.Context, info FileInfo) (upload Upload, err error)
|
||||||
|
|
||||||
|
GetUpload(ctx context.Context, id string) (upload Upload, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type TerminatableUpload interface {
|
||||||
|
// Terminate an upload so any further requests to the resource, both reading
|
||||||
|
// and writing, must return os.ErrNotExist or similar.
|
||||||
|
Terminate(ctx context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// TerminaterDataStore is the interface which must be implemented by DataStores
|
||||||
|
// if they want to receive DELETE requests using the Handler. If this interface
|
||||||
|
// is not implemented, no request handler for this method is attached.
|
||||||
|
type TerminaterDataStore interface {
|
||||||
|
AsTerminatableUpload(upload Upload) TerminatableUpload
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConcaterDataStore is the interface required to be implemented if the
|
// ConcaterDataStore is the interface required to be implemented if the
|
||||||
// Concatenation extension should be enabled. Only in this case, the handler
|
// Concatenation extension should be enabled. Only in this case, the handler
|
||||||
// will parse and respect the Upload-Concat header.
|
// will parse and respect the Upload-Concat header.
|
||||||
type ConcaterDataStore interface {
|
type ConcaterDataStore interface {
|
||||||
// ConcatUploads concatenations the content from the provided partial uploads
|
AsConcatableUpload(upload Upload) ConcatableUpload
|
||||||
// and write the result in the destination upload which is specified by its
|
}
|
||||||
// ID. The caller (usually the handler) must and will ensure that this
|
|
||||||
|
type ConcatableUpload interface {
|
||||||
|
// ConcatUploads concatenates the content from the provided partial uploads
|
||||||
|
// and writes the result in the destination upload.
|
||||||
|
// The caller (usually the handler) must and will ensure that this
|
||||||
// destination upload has been created before with enough space to hold all
|
// destination upload has been created before with enough space to hold all
|
||||||
// partial uploads. The order, in which the partial uploads are supplied,
|
// partial uploads. The order, in which the partial uploads are supplied,
|
||||||
// must be respected during concatenation.
|
// must be respected during concatenation.
|
||||||
ConcatUploads(destination string, partialUploads []string) error
|
ConcatUploads(ctx context.Context, partialUploads []Upload) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// LengthDeferrerDataStore is the interface that must be implemented if the
|
// LengthDeferrerDataStore is the interface that must be implemented if the
|
||||||
|
@ -137,5 +123,34 @@ type ConcaterDataStore interface {
|
||||||
// client to upload files when their total size is not yet known. Instead, the
|
// client to upload files when their total size is not yet known. Instead, the
|
||||||
// client must send the total size as soon as it becomes known.
|
// client must send the total size as soon as it becomes known.
|
||||||
type LengthDeferrerDataStore interface {
|
type LengthDeferrerDataStore interface {
|
||||||
DeclareLength(id string, length int64) error
|
AsLengthDeclarableUpload(upload Upload) LengthDeclarableUpload
|
||||||
|
}
|
||||||
|
|
||||||
|
type LengthDeclarableUpload interface {
|
||||||
|
DeclareLength(ctx context.Context, length int64) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Locker is the interface required for custom lock persisting mechanisms.
|
||||||
|
// Common ways to store this information is in memory, on disk or using an
|
||||||
|
// external service, such as Redis.
|
||||||
|
// When multiple processes are attempting to access an upload, whether it be
|
||||||
|
// by reading or writing, a synchronization mechanism is required to prevent
|
||||||
|
// data corruption, especially to ensure correct offset values and the proper
|
||||||
|
// order of chunks inside a single upload.
|
||||||
|
type Locker interface {
|
||||||
|
// NewLock creates a new unlocked lock object for the given upload ID.
|
||||||
|
NewLock(id string) (Lock, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock is the interface for a lock as returned from a Locker.
|
||||||
|
type Lock interface {
|
||||||
|
// Lock attempts to obtain an exclusive lock for the upload specified
|
||||||
|
// by its id.
|
||||||
|
// If this operation fails because the resource is already locked, the
|
||||||
|
// tusd.ErrFileLocked must be returned. If no error is returned, the attempt
|
||||||
|
// is consider to be successful and the upload to be locked until UnlockUpload
|
||||||
|
// is invoked for the same upload.
|
||||||
|
Lock() error
|
||||||
|
// Unlock releases an existing lock for the given upload.
|
||||||
|
Unlock() error
|
||||||
}
|
}
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Package tusd provides ways to accept tus 1.0 calls using HTTP.
|
Package handler provides ways to accept tus 1.0 calls using HTTP.
|
||||||
|
|
||||||
tus is a protocol based on HTTP for resumable file uploads. Resumable means that
|
tus is a protocol based on HTTP for resumable file uploads. Resumable means that
|
||||||
an upload can be interrupted at any moment and can be resumed without
|
an upload can be interrupted at any moment and can be resumed without
|
||||||
|
@ -66,4 +66,4 @@ This handler can then be mounted to a specific path, e.g. /files:
|
||||||
|
|
||||||
http.Handle("/files/", http.StripPrefix("/files/", handler))
|
http.Handle("/files/", http.StripPrefix("/files/", handler))
|
||||||
*/
|
*/
|
||||||
package tusd
|
package handler
|
|
@ -1,12 +1,13 @@
|
||||||
package tusd_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
. "github.com/tus/tusd"
|
. "github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
type closingStringReader struct {
|
type closingStringReader struct {
|
||||||
|
@ -20,18 +21,22 @@ func (reader *closingStringReader) Close() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGet(t *testing.T) {
|
func TestGet(t *testing.T) {
|
||||||
SubTest(t, "Download", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "Download", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
reader := &closingStringReader{
|
reader := &closingStringReader{
|
||||||
Reader: strings.NewReader("hello"),
|
Reader: strings.NewReader("hello"),
|
||||||
}
|
}
|
||||||
|
|
||||||
ctrl := gomock.NewController(t)
|
ctrl := gomock.NewController(t)
|
||||||
defer ctrl.Finish()
|
defer ctrl.Finish()
|
||||||
locker := NewMockLocker(ctrl)
|
locker := NewMockFullLocker(ctrl)
|
||||||
|
lock := NewMockFullLock(ctrl)
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
locker.EXPECT().LockUpload("yes"),
|
locker.EXPECT().NewLock("yes").Return(lock, nil),
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
lock.EXPECT().Lock().Return(nil),
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 20,
|
Size: 20,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
|
@ -39,13 +44,12 @@ func TestGet(t *testing.T) {
|
||||||
"filetype": "image/jpeg",
|
"filetype": "image/jpeg",
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().GetReader("yes").Return(reader, nil),
|
upload.EXPECT().GetReader(context.Background()).Return(reader, nil),
|
||||||
locker.EXPECT().UnlockUpload("yes"),
|
lock.EXPECT().Unlock().Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
composer := NewStoreComposer()
|
composer = NewStoreComposer()
|
||||||
composer.UseCore(store)
|
composer.UseCore(store)
|
||||||
composer.UseGetReader(store)
|
|
||||||
composer.UseLocker(locker)
|
composer.UseLocker(locker)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -69,13 +73,20 @@ func TestGet(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "EmptyDownload", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "EmptyDownload", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
ctrl := gomock.NewController(t)
|
||||||
Offset: 0,
|
defer ctrl.Finish()
|
||||||
}, nil)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
Offset: 0,
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -90,31 +101,23 @@ func TestGet(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "NotProvided", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "InvalidFileType", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
composer := NewStoreComposer()
|
ctrl := gomock.NewController(t)
|
||||||
composer.UseCore(store)
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
handler, _ := NewUnroutedHandler(Config{
|
gomock.InOrder(
|
||||||
StoreComposer: composer,
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
})
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
Offset: 0,
|
||||||
(&httpTest{
|
MetaData: map[string]string{
|
||||||
Method: "GET",
|
"filetype": "non-a-valid-mime-type",
|
||||||
URL: "foo",
|
},
|
||||||
Code: http.StatusNotImplemented,
|
}, nil),
|
||||||
}).Run(http.HandlerFunc(handler.GetFile), t)
|
)
|
||||||
})
|
|
||||||
|
|
||||||
SubTest(t, "InvalidFileType", func(t *testing.T, store *MockFullDataStore) {
|
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
|
||||||
Offset: 0,
|
|
||||||
MetaData: map[string]string{
|
|
||||||
"filetype": "non-a-valid-mime-type",
|
|
||||||
},
|
|
||||||
}, nil)
|
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -130,17 +133,24 @@ func TestGet(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "NotWhitelistedFileType", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "NotWhitelistedFileType", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
ctrl := gomock.NewController(t)
|
||||||
Offset: 0,
|
defer ctrl.Finish()
|
||||||
MetaData: map[string]string{
|
upload := NewMockFullUpload(ctrl)
|
||||||
"filetype": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
|
||||||
"filename": "invoice.docx",
|
gomock.InOrder(
|
||||||
},
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
}, nil)
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
Offset: 0,
|
||||||
|
MetaData: map[string]string{
|
||||||
|
"filetype": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||||
|
"filename": "invoice.docx",
|
||||||
|
},
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
|
@ -1,4 +1,4 @@
|
||||||
package tusd
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -40,16 +40,12 @@ func NewHandler(config Config) (*Handler, error) {
|
||||||
mux.Post("", http.HandlerFunc(handler.PostFile))
|
mux.Post("", http.HandlerFunc(handler.PostFile))
|
||||||
mux.Head(":id", http.HandlerFunc(handler.HeadFile))
|
mux.Head(":id", http.HandlerFunc(handler.HeadFile))
|
||||||
mux.Add("PATCH", ":id", http.HandlerFunc(handler.PatchFile))
|
mux.Add("PATCH", ":id", http.HandlerFunc(handler.PatchFile))
|
||||||
|
mux.Get(":id", http.HandlerFunc(handler.GetFile))
|
||||||
|
|
||||||
// Only attach the DELETE handler if the Terminate() method is provided
|
// Only attach the DELETE handler if the Terminate() method is provided
|
||||||
if config.StoreComposer.UsesTerminater {
|
if config.StoreComposer.UsesTerminater {
|
||||||
mux.Del(":id", http.HandlerFunc(handler.DelFile))
|
mux.Del(":id", http.HandlerFunc(handler.DelFile))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GET handler requires the GetReader() method
|
|
||||||
if config.StoreComposer.UsesGetReader {
|
|
||||||
mux.Get(":id", http.HandlerFunc(handler.GetFile))
|
|
||||||
}
|
|
||||||
|
|
||||||
return routedHandler, nil
|
return routedHandler, nil
|
||||||
}
|
}
|
|
@ -0,0 +1,321 @@
|
||||||
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
|
// Source: utils_test.go
|
||||||
|
|
||||||
|
// Package handler_test is a generated GoMock package.
|
||||||
|
package handler_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
handler "github.com/tus/tusd/pkg/handler"
|
||||||
|
io "io"
|
||||||
|
reflect "reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockFullDataStore is a mock of FullDataStore interface
|
||||||
|
type MockFullDataStore struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockFullDataStoreMockRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockFullDataStoreMockRecorder is the mock recorder for MockFullDataStore
|
||||||
|
type MockFullDataStoreMockRecorder struct {
|
||||||
|
mock *MockFullDataStore
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockFullDataStore creates a new mock instance
|
||||||
|
func NewMockFullDataStore(ctrl *gomock.Controller) *MockFullDataStore {
|
||||||
|
mock := &MockFullDataStore{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockFullDataStoreMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use
|
||||||
|
func (m *MockFullDataStore) EXPECT() *MockFullDataStoreMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUpload mocks base method
|
||||||
|
func (m *MockFullDataStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "NewUpload", ctx, info)
|
||||||
|
ret0, _ := ret[0].(handler.Upload)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUpload indicates an expected call of NewUpload
|
||||||
|
func (mr *MockFullDataStoreMockRecorder) NewUpload(ctx, info interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewUpload", reflect.TypeOf((*MockFullDataStore)(nil).NewUpload), ctx, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUpload mocks base method
|
||||||
|
func (m *MockFullDataStore) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetUpload", ctx, id)
|
||||||
|
ret0, _ := ret[0].(handler.Upload)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUpload indicates an expected call of GetUpload
|
||||||
|
func (mr *MockFullDataStoreMockRecorder) GetUpload(ctx, id interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUpload", reflect.TypeOf((*MockFullDataStore)(nil).GetUpload), ctx, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsTerminatableUpload mocks base method
|
||||||
|
func (m *MockFullDataStore) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "AsTerminatableUpload", upload)
|
||||||
|
ret0, _ := ret[0].(handler.TerminatableUpload)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsTerminatableUpload indicates an expected call of AsTerminatableUpload
|
||||||
|
func (mr *MockFullDataStoreMockRecorder) AsTerminatableUpload(upload interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AsTerminatableUpload", reflect.TypeOf((*MockFullDataStore)(nil).AsTerminatableUpload), upload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsConcatableUpload mocks base method
|
||||||
|
func (m *MockFullDataStore) AsConcatableUpload(upload handler.Upload) handler.ConcatableUpload {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "AsConcatableUpload", upload)
|
||||||
|
ret0, _ := ret[0].(handler.ConcatableUpload)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsConcatableUpload indicates an expected call of AsConcatableUpload
|
||||||
|
func (mr *MockFullDataStoreMockRecorder) AsConcatableUpload(upload interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AsConcatableUpload", reflect.TypeOf((*MockFullDataStore)(nil).AsConcatableUpload), upload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsLengthDeclarableUpload mocks base method
|
||||||
|
func (m *MockFullDataStore) AsLengthDeclarableUpload(upload handler.Upload) handler.LengthDeclarableUpload {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "AsLengthDeclarableUpload", upload)
|
||||||
|
ret0, _ := ret[0].(handler.LengthDeclarableUpload)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsLengthDeclarableUpload indicates an expected call of AsLengthDeclarableUpload
|
||||||
|
func (mr *MockFullDataStoreMockRecorder) AsLengthDeclarableUpload(upload interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AsLengthDeclarableUpload", reflect.TypeOf((*MockFullDataStore)(nil).AsLengthDeclarableUpload), upload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockFullUpload is a mock of FullUpload interface
|
||||||
|
type MockFullUpload struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockFullUploadMockRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockFullUploadMockRecorder is the mock recorder for MockFullUpload
|
||||||
|
type MockFullUploadMockRecorder struct {
|
||||||
|
mock *MockFullUpload
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockFullUpload creates a new mock instance
|
||||||
|
func NewMockFullUpload(ctrl *gomock.Controller) *MockFullUpload {
|
||||||
|
mock := &MockFullUpload{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockFullUploadMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use
|
||||||
|
func (m *MockFullUpload) EXPECT() *MockFullUploadMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteChunk mocks base method
|
||||||
|
func (m *MockFullUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "WriteChunk", ctx, offset, src)
|
||||||
|
ret0, _ := ret[0].(int64)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteChunk indicates an expected call of WriteChunk
|
||||||
|
func (mr *MockFullUploadMockRecorder) WriteChunk(ctx, offset, src interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChunk", reflect.TypeOf((*MockFullUpload)(nil).WriteChunk), ctx, offset, src)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetInfo mocks base method
|
||||||
|
func (m *MockFullUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetInfo", ctx)
|
||||||
|
ret0, _ := ret[0].(handler.FileInfo)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetInfo indicates an expected call of GetInfo
|
||||||
|
func (mr *MockFullUploadMockRecorder) GetInfo(ctx interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInfo", reflect.TypeOf((*MockFullUpload)(nil).GetInfo), ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetReader mocks base method
|
||||||
|
func (m *MockFullUpload) GetReader(ctx context.Context) (io.Reader, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetReader", ctx)
|
||||||
|
ret0, _ := ret[0].(io.Reader)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetReader indicates an expected call of GetReader
|
||||||
|
func (mr *MockFullUploadMockRecorder) GetReader(ctx interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReader", reflect.TypeOf((*MockFullUpload)(nil).GetReader), ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FinishUpload mocks base method
|
||||||
|
func (m *MockFullUpload) FinishUpload(ctx context.Context) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "FinishUpload", ctx)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// FinishUpload indicates an expected call of FinishUpload
|
||||||
|
func (mr *MockFullUploadMockRecorder) FinishUpload(ctx interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinishUpload", reflect.TypeOf((*MockFullUpload)(nil).FinishUpload), ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Terminate mocks base method
|
||||||
|
func (m *MockFullUpload) Terminate(ctx context.Context) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Terminate", ctx)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Terminate indicates an expected call of Terminate
|
||||||
|
func (mr *MockFullUploadMockRecorder) Terminate(ctx interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terminate", reflect.TypeOf((*MockFullUpload)(nil).Terminate), ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeclareLength mocks base method
|
||||||
|
func (m *MockFullUpload) DeclareLength(ctx context.Context, length int64) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "DeclareLength", ctx, length)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeclareLength indicates an expected call of DeclareLength
|
||||||
|
func (mr *MockFullUploadMockRecorder) DeclareLength(ctx, length interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeclareLength", reflect.TypeOf((*MockFullUpload)(nil).DeclareLength), ctx, length)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConcatUploads mocks base method
|
||||||
|
func (m *MockFullUpload) ConcatUploads(ctx context.Context, partialUploads []handler.Upload) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ConcatUploads", ctx, partialUploads)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConcatUploads indicates an expected call of ConcatUploads
|
||||||
|
func (mr *MockFullUploadMockRecorder) ConcatUploads(ctx, partialUploads interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConcatUploads", reflect.TypeOf((*MockFullUpload)(nil).ConcatUploads), ctx, partialUploads)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockFullLocker is a mock of FullLocker interface
|
||||||
|
type MockFullLocker struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockFullLockerMockRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockFullLockerMockRecorder is the mock recorder for MockFullLocker
|
||||||
|
type MockFullLockerMockRecorder struct {
|
||||||
|
mock *MockFullLocker
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockFullLocker creates a new mock instance
|
||||||
|
func NewMockFullLocker(ctrl *gomock.Controller) *MockFullLocker {
|
||||||
|
mock := &MockFullLocker{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockFullLockerMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use
|
||||||
|
func (m *MockFullLocker) EXPECT() *MockFullLockerMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLock mocks base method
|
||||||
|
func (m *MockFullLocker) NewLock(id string) (handler.Lock, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "NewLock", id)
|
||||||
|
ret0, _ := ret[0].(handler.Lock)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLock indicates an expected call of NewLock
|
||||||
|
func (mr *MockFullLockerMockRecorder) NewLock(id interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewLock", reflect.TypeOf((*MockFullLocker)(nil).NewLock), id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockFullLock is a mock of FullLock interface
|
||||||
|
type MockFullLock struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockFullLockMockRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockFullLockMockRecorder is the mock recorder for MockFullLock
|
||||||
|
type MockFullLockMockRecorder struct {
|
||||||
|
mock *MockFullLock
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockFullLock creates a new mock instance
|
||||||
|
func NewMockFullLock(ctrl *gomock.Controller) *MockFullLock {
|
||||||
|
mock := &MockFullLock{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockFullLockMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use
|
||||||
|
func (m *MockFullLock) EXPECT() *MockFullLockMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock mocks base method
|
||||||
|
func (m *MockFullLock) Lock() error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Lock")
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock indicates an expected call of Lock
|
||||||
|
func (mr *MockFullLockMockRecorder) Lock() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lock", reflect.TypeOf((*MockFullLock)(nil).Lock))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlock mocks base method
|
||||||
|
func (m *MockFullLock) Unlock() error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Unlock")
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlock indicates an expected call of Unlock
|
||||||
|
func (mr *MockFullLockMockRecorder) Unlock() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unlock", reflect.TypeOf((*MockFullLock)(nil).Unlock))
|
||||||
|
}
|
|
@ -1,23 +1,28 @@
|
||||||
package tusd_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
. "github.com/tus/tusd"
|
. "github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHead(t *testing.T) {
|
func TestHead(t *testing.T) {
|
||||||
SubTest(t, "Status", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "Status", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
ctrl := gomock.NewController(t)
|
ctrl := gomock.NewController(t)
|
||||||
defer ctrl.Finish()
|
defer ctrl.Finish()
|
||||||
locker := NewMockLocker(ctrl)
|
locker := NewMockFullLocker(ctrl)
|
||||||
|
lock := NewMockFullLock(ctrl)
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
locker.EXPECT().LockUpload("yes"),
|
locker.EXPECT().NewLock("yes").Return(lock, nil),
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
lock.EXPECT().Lock().Return(nil),
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
Offset: 11,
|
Offset: 11,
|
||||||
Size: 44,
|
Size: 44,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
|
@ -25,10 +30,10 @@ func TestHead(t *testing.T) {
|
||||||
"type": "image/png",
|
"type": "image/png",
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
locker.EXPECT().UnlockUpload("yes"),
|
lock.EXPECT().Unlock().Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
composer := NewStoreComposer()
|
composer = NewStoreComposer()
|
||||||
composer.UseCore(store)
|
composer.UseCore(store)
|
||||||
composer.UseLocker(locker)
|
composer.UseLocker(locker)
|
||||||
|
|
||||||
|
@ -58,11 +63,11 @@ func TestHead(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().GetInfo("no").Return(FileInfo{}, os.ErrNotExist)
|
store.EXPECT().GetUpload(context.Background(), "no").Return(nil, os.ErrNotExist)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
res := (&httpTest{
|
res := (&httpTest{
|
||||||
|
@ -82,14 +87,21 @@ func TestHead(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "DeferLengthHeader", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "DeferLengthHeader", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
ctrl := gomock.NewController(t)
|
||||||
SizeIsDeferred: true,
|
defer ctrl.Finish()
|
||||||
Size: 0,
|
upload := NewMockFullUpload(ctrl)
|
||||||
}, nil)
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
SizeIsDeferred: true,
|
||||||
|
Size: 0,
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -105,16 +117,21 @@ func TestHead(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "NoDeferLengthHeader", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "NoDeferLengthHeader", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
SizeIsDeferred: false,
|
SizeIsDeferred: false,
|
||||||
Size: 10,
|
Size: 10,
|
||||||
}, nil),
|
}, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
|
@ -1,4 +1,4 @@
|
||||||
package tusd
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"log"
|
|
@ -1,4 +1,4 @@
|
||||||
package tusd
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
|
@ -1,15 +1,15 @@
|
||||||
package tusd_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
. "github.com/tus/tusd"
|
. "github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestOptions(t *testing.T) {
|
func TestOptions(t *testing.T) {
|
||||||
SubTest(t, "Discovery", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "Discovery", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
composer := NewStoreComposer()
|
composer = NewStoreComposer()
|
||||||
composer.UseCore(store)
|
composer.UseCore(store)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -29,9 +29,9 @@ func TestOptions(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "InvalidVersion", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "InvalidVersion", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
|
@ -1,6 +1,7 @@
|
||||||
package tusd_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -12,27 +13,32 @@ import (
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
. "github.com/tus/tusd"
|
. "github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPatch(t *testing.T) {
|
func TestPatch(t *testing.T) {
|
||||||
SubTest(t, "UploadChunk", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "UploadChunk", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 10,
|
Size: 10,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().WriteChunk("yes", int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
||||||
store.EXPECT().FinishUpload("yes"),
|
upload.EXPECT().FinishUpload(context.Background()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
NotifyCompleteUploads: true,
|
NotifyCompleteUploads: true,
|
||||||
})
|
})
|
||||||
|
|
||||||
c := make(chan FileInfo, 1)
|
c := make(chan HookEvent, 1)
|
||||||
handler.CompleteUploads = c
|
handler.CompleteUploads = c
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -51,24 +57,36 @@ func TestPatch(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
|
|
||||||
a := assert.New(t)
|
a := assert.New(t)
|
||||||
info := <-c
|
event := <-c
|
||||||
|
info := event.Upload
|
||||||
a.Equal("yes", info.ID)
|
a.Equal("yes", info.ID)
|
||||||
a.EqualValues(int64(10), info.Size)
|
a.EqualValues(int64(10), info.Size)
|
||||||
a.Equal(int64(10), info.Offset)
|
a.Equal(int64(10), info.Offset)
|
||||||
|
|
||||||
|
req := event.HTTPRequest
|
||||||
|
a.Equal("PATCH", req.Method)
|
||||||
|
a.Equal("yes", req.URI)
|
||||||
|
a.Equal("5", req.Header.Get("Upload-Offset"))
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "MethodOverriding", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "MethodOverriding", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 20,
|
Size: 10,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().WriteChunk("yes", int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
||||||
|
upload.EXPECT().FinishUpload(context.Background()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -88,14 +106,22 @@ func TestPatch(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "UploadChunkToFinished", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "UploadChunkToFinished", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
ctrl := gomock.NewController(t)
|
||||||
Offset: 20,
|
defer ctrl.Finish()
|
||||||
Size: 20,
|
upload := NewMockFullUpload(ctrl)
|
||||||
}, nil)
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "yes",
|
||||||
|
Offset: 20,
|
||||||
|
Size: 20,
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -114,11 +140,11 @@ func TestPatch(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().GetInfo("no").Return(FileInfo{}, os.ErrNotExist)
|
store.EXPECT().GetUpload(context.Background(), "no").Return(nil, os.ErrNotExist)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -133,13 +159,21 @@ func TestPatch(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "MissmatchingOffsetFail", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "MissmatchingOffsetFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
ctrl := gomock.NewController(t)
|
||||||
Offset: 5,
|
defer ctrl.Finish()
|
||||||
}, nil)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "yes",
|
||||||
|
Offset: 5,
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -154,14 +188,22 @@ func TestPatch(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "ExceedingMaxSizeFail", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "ExceedingMaxSizeFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
ctrl := gomock.NewController(t)
|
||||||
Offset: 5,
|
defer ctrl.Finish()
|
||||||
Size: 10,
|
upload := NewMockFullUpload(ctrl)
|
||||||
}, nil)
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "yes",
|
||||||
|
Offset: 5,
|
||||||
|
Size: 10,
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -177,9 +219,9 @@ func TestPatch(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "InvalidContentTypeFail", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "InvalidContentTypeFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -195,9 +237,9 @@ func TestPatch(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "InvalidOffsetFail", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "InvalidOffsetFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -213,7 +255,7 @@ func TestPatch(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "OverflowWithoutLength", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "OverflowWithoutLength", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
// In this test we attempt to upload more than 15 bytes to an upload
|
// In this test we attempt to upload more than 15 bytes to an upload
|
||||||
// which has only space for 15 bytes (offset of 5 and size of 20).
|
// which has only space for 15 bytes (offset of 5 and size of 20).
|
||||||
// The request does not contain the Content-Length header and the handler
|
// The request does not contain the Content-Length header and the handler
|
||||||
|
@ -221,18 +263,23 @@ func TestPatch(t *testing.T) {
|
||||||
// is that even if the uploader supplies more than 15 bytes, we only
|
// is that even if the uploader supplies more than 15 bytes, we only
|
||||||
// pass 15 bytes to the data store and ignore the rest.
|
// pass 15 bytes to the data store and ignore the rest.
|
||||||
|
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 20,
|
Size: 20,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().WriteChunk("yes", int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
|
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
|
||||||
store.EXPECT().FinishUpload("yes"),
|
upload.EXPECT().FinishUpload(context.Background()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
// Wrap the string.Reader in a NopCloser to hide its type. else
|
// Wrap the string.Reader in a NopCloser to hide its type. else
|
||||||
|
@ -257,22 +304,28 @@ func TestPatch(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "DeclareLengthOnFinalChunk", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "DeclareLengthOnFinalChunk", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 0,
|
Size: 0,
|
||||||
SizeIsDeferred: true,
|
SizeIsDeferred: true,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().DeclareLength("yes", int64(20)),
|
store.EXPECT().AsLengthDeclarableUpload(upload).Return(upload),
|
||||||
store.EXPECT().WriteChunk("yes", int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
|
upload.EXPECT().DeclareLength(context.Background(), int64(20)),
|
||||||
store.EXPECT().FinishUpload("yes"),
|
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
|
||||||
|
upload.EXPECT().FinishUpload(context.Background()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
MaxSize: 20,
|
MaxSize: 20,
|
||||||
})
|
})
|
||||||
|
|
||||||
body := strings.NewReader("hellothisismore")
|
body := strings.NewReader("hellothisismore")
|
||||||
|
@ -294,21 +347,27 @@ func TestPatch(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "DeclareLengthAfterFinalChunk", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "DeclareLengthAfterFinalChunk", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 20,
|
Offset: 20,
|
||||||
Size: 0,
|
Size: 0,
|
||||||
SizeIsDeferred: true,
|
SizeIsDeferred: true,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().DeclareLength("yes", int64(20)),
|
store.EXPECT().AsLengthDeclarableUpload(upload).Return(upload),
|
||||||
store.EXPECT().FinishUpload("yes"),
|
upload.EXPECT().DeclareLength(context.Background(), int64(20)),
|
||||||
|
upload.EXPECT().FinishUpload(context.Background()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
MaxSize: 20,
|
MaxSize: 20,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -326,29 +385,38 @@ func TestPatch(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "DeclareLengthOnNonFinalChunk", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "DeclareLengthOnNonFinalChunk", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload1 := NewMockFullUpload(ctrl)
|
||||||
|
upload2 := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload1, nil),
|
||||||
|
upload1.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 0,
|
Size: 0,
|
||||||
SizeIsDeferred: true,
|
SizeIsDeferred: true,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().DeclareLength("yes", int64(20)),
|
store.EXPECT().AsLengthDeclarableUpload(upload1).Return(upload1),
|
||||||
store.EXPECT().WriteChunk("yes", int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
upload1.EXPECT().DeclareLength(context.Background(), int64(20)),
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
upload1.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
||||||
|
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload2, nil),
|
||||||
|
upload2.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 10,
|
Offset: 10,
|
||||||
Size: 20,
|
Size: 20,
|
||||||
SizeIsDeferred: false,
|
SizeIsDeferred: false,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().WriteChunk("yes", int64(10), NewReaderMatcher("thisismore")).Return(int64(10), nil),
|
upload2.EXPECT().WriteChunk(context.Background(), int64(10), NewReaderMatcher("thisismore")).Return(int64(10), nil),
|
||||||
store.EXPECT().FinishUpload("yes"),
|
upload2.EXPECT().FinishUpload(context.Background()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
MaxSize: 20,
|
MaxSize: 20,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -383,22 +451,27 @@ func TestPatch(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "Locker", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "Locker", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
ctrl := gomock.NewController(t)
|
ctrl := gomock.NewController(t)
|
||||||
defer ctrl.Finish()
|
defer ctrl.Finish()
|
||||||
locker := NewMockLocker(ctrl)
|
locker := NewMockFullLocker(ctrl)
|
||||||
|
lock := NewMockFullLock(ctrl)
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
locker.EXPECT().LockUpload("yes").Return(nil),
|
locker.EXPECT().NewLock("yes").Return(lock, nil),
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
lock.EXPECT().Lock().Return(nil),
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "yes",
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 20,
|
Size: 20,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().WriteChunk("yes", int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
|
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
|
||||||
locker.EXPECT().UnlockUpload("yes").Return(nil),
|
lock.EXPECT().Unlock().Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
composer := NewStoreComposer()
|
composer = NewStoreComposer()
|
||||||
composer.UseCore(store)
|
composer.UseCore(store)
|
||||||
composer.UseLocker(locker)
|
composer.UseLocker(locker)
|
||||||
|
|
||||||
|
@ -419,22 +492,27 @@ func TestPatch(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "NotifyUploadProgress", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "NotifyUploadProgress", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 100,
|
Size: 100,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().WriteChunk("yes", int64(0), NewReaderMatcher("first second third")).Return(int64(18), nil),
|
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("first second third")).Return(int64(18), nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
NotifyUploadProgress: true,
|
NotifyUploadProgress: true,
|
||||||
})
|
})
|
||||||
|
|
||||||
c := make(chan FileInfo)
|
c := make(chan HookEvent)
|
||||||
handler.UploadProgress = c
|
handler.UploadProgress = c
|
||||||
|
|
||||||
reader, writer := io.Pipe()
|
reader, writer := io.Pipe()
|
||||||
|
@ -442,8 +520,9 @@ func TestPatch(t *testing.T) {
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
writer.Write([]byte("first "))
|
writer.Write([]byte("first "))
|
||||||
|
event := <-c
|
||||||
|
|
||||||
info := <-c
|
info := event.Upload
|
||||||
a.Equal("yes", info.ID)
|
a.Equal("yes", info.ID)
|
||||||
a.Equal(int64(100), info.Size)
|
a.Equal(int64(100), info.Size)
|
||||||
a.Equal(int64(6), info.Offset)
|
a.Equal(int64(6), info.Offset)
|
||||||
|
@ -451,7 +530,8 @@ func TestPatch(t *testing.T) {
|
||||||
writer.Write([]byte("second "))
|
writer.Write([]byte("second "))
|
||||||
writer.Write([]byte("third"))
|
writer.Write([]byte("third"))
|
||||||
|
|
||||||
info = <-c
|
event = <-c
|
||||||
|
info = event.Upload
|
||||||
a.Equal("yes", info.ID)
|
a.Equal("yes", info.ID)
|
||||||
a.Equal(int64(100), info.Size)
|
a.Equal(int64(100), info.Size)
|
||||||
a.Equal(int64(18), info.Offset)
|
a.Equal(int64(18), info.Offset)
|
||||||
|
@ -486,23 +566,29 @@ func TestPatch(t *testing.T) {
|
||||||
a.False(more)
|
a.False(more)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "StopUpload", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "StopUpload", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetInfo("yes").Return(FileInfo{
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 100,
|
Size: 100,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().WriteChunk("yes", int64(0), NewReaderMatcher("first ")).Return(int64(6), http.ErrBodyReadAfterClose),
|
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("first ")).Return(int64(6), http.ErrBodyReadAfterClose),
|
||||||
store.EXPECT().Terminate("yes").Return(nil),
|
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
|
||||||
|
upload.EXPECT().Terminate(context.Background()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
NotifyUploadProgress: true,
|
NotifyUploadProgress: true,
|
||||||
})
|
})
|
||||||
|
|
||||||
c := make(chan FileInfo)
|
c := make(chan HookEvent)
|
||||||
handler.UploadProgress = c
|
handler.UploadProgress = c
|
||||||
|
|
||||||
reader, writer := io.Pipe()
|
reader, writer := io.Pipe()
|
||||||
|
@ -511,7 +597,8 @@ func TestPatch(t *testing.T) {
|
||||||
go func() {
|
go func() {
|
||||||
writer.Write([]byte("first "))
|
writer.Write([]byte("first "))
|
||||||
|
|
||||||
info := <-c
|
event := <-c
|
||||||
|
info := event.Upload
|
||||||
info.StopUpload()
|
info.StopUpload()
|
||||||
|
|
||||||
// Wait a short time to ensure that the goroutine in the PATCH
|
// Wait a short time to ensure that the goroutine in the PATCH
|
|
@ -1,7 +1,8 @@
|
||||||
package tusd_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -9,26 +10,40 @@ import (
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
. "github.com/tus/tusd"
|
. "github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPost(t *testing.T) {
|
func TestPost(t *testing.T) {
|
||||||
SubTest(t, "Create", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "Create", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
ctrl := gomock.NewController(t)
|
||||||
Size: 300,
|
defer ctrl.Finish()
|
||||||
MetaData: map[string]string{
|
upload := NewMockFullUpload(ctrl)
|
||||||
"foo": "hello",
|
|
||||||
"bar": "world",
|
gomock.InOrder(
|
||||||
},
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
}).Return("foo", nil)
|
Size: 300,
|
||||||
|
MetaData: map[string]string{
|
||||||
|
"foo": "hello",
|
||||||
|
"bar": "world",
|
||||||
|
},
|
||||||
|
}).Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "foo",
|
||||||
|
Size: 300,
|
||||||
|
MetaData: map[string]string{
|
||||||
|
"foo": "hello",
|
||||||
|
"bar": "world",
|
||||||
|
},
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
BasePath: "https://buy.art/files/",
|
BasePath: "https://buy.art/files/",
|
||||||
NotifyCreatedUploads: true,
|
NotifyCreatedUploads: true,
|
||||||
})
|
})
|
||||||
|
|
||||||
c := make(chan FileInfo, 1)
|
c := make(chan HookEvent, 1)
|
||||||
handler.CreatedUploads = c
|
handler.CreatedUploads = c
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -45,28 +60,39 @@ func TestPost(t *testing.T) {
|
||||||
},
|
},
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
|
|
||||||
info := <-c
|
event := <-c
|
||||||
|
info := event.Upload
|
||||||
|
|
||||||
a := assert.New(t)
|
a := assert.New(t)
|
||||||
a.Equal("foo", info.ID)
|
a.Equal("foo", info.ID)
|
||||||
a.Equal(int64(300), info.Size)
|
a.Equal(int64(300), info.Size)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "CreateEmptyUpload", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "CreateEmptyUpload", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
ctrl := gomock.NewController(t)
|
||||||
Size: 0,
|
defer ctrl.Finish()
|
||||||
MetaData: map[string]string{},
|
upload := NewMockFullUpload(ctrl)
|
||||||
}).Return("foo", nil)
|
|
||||||
|
|
||||||
store.EXPECT().FinishUpload("foo").Return(nil)
|
gomock.InOrder(
|
||||||
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
|
Size: 0,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}).Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "foo",
|
||||||
|
Size: 0,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}, nil),
|
||||||
|
upload.EXPECT().FinishUpload(context.Background()).Return(nil),
|
||||||
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
BasePath: "https://buy.art/files/",
|
BasePath: "https://buy.art/files/",
|
||||||
NotifyCompleteUploads: true,
|
NotifyCompleteUploads: true,
|
||||||
})
|
})
|
||||||
|
|
||||||
handler.CompleteUploads = make(chan FileInfo, 1)
|
handler.CompleteUploads = make(chan HookEvent, 1)
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
|
@ -80,19 +106,24 @@ func TestPost(t *testing.T) {
|
||||||
},
|
},
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
|
|
||||||
info := <-handler.CompleteUploads
|
event := <-handler.CompleteUploads
|
||||||
|
info := event.Upload
|
||||||
|
|
||||||
a := assert.New(t)
|
a := assert.New(t)
|
||||||
a.Equal("foo", info.ID)
|
a.Equal("foo", info.ID)
|
||||||
a.Equal(int64(0), info.Size)
|
a.Equal(int64(0), info.Size)
|
||||||
a.Equal(int64(0), info.Offset)
|
a.Equal(int64(0), info.Offset)
|
||||||
|
|
||||||
|
req := event.HTTPRequest
|
||||||
|
a.Equal("POST", req.Method)
|
||||||
|
a.Equal("", req.URI)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "CreateExceedingMaxSizeFail", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "CreateExceedingMaxSizeFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
MaxSize: 400,
|
MaxSize: 400,
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
BasePath: "/files/",
|
BasePath: "/files/",
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -107,9 +138,9 @@ func TestPost(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "InvalidUploadLengthFail", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "InvalidUploadLengthFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -123,9 +154,9 @@ func TestPost(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "UploadLengthAndUploadDeferLengthFail", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "UploadLengthAndUploadDeferLengthFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -140,9 +171,9 @@ func TestPost(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "NeitherUploadLengthNorUploadDeferLengthFail", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "NeitherUploadLengthNorUploadDeferLengthFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -155,9 +186,9 @@ func TestPost(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "InvalidUploadDeferLengthFail", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "InvalidUploadDeferLengthFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -171,16 +202,27 @@ func TestPost(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "ForwardHeaders", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "ForwardHeaders", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
SubTest(t, "IgnoreXForwarded", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "IgnoreXForwarded", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
ctrl := gomock.NewController(t)
|
||||||
Size: 300,
|
defer ctrl.Finish()
|
||||||
MetaData: map[string]string{},
|
upload := NewMockFullUpload(ctrl)
|
||||||
}).Return("foo", nil)
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
|
Size: 300,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}).Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "foo",
|
||||||
|
Size: 300,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
BasePath: "/files/",
|
BasePath: "/files/",
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -198,14 +240,25 @@ func TestPost(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "RespectXForwarded", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "RespectXForwarded", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
ctrl := gomock.NewController(t)
|
||||||
Size: 300,
|
defer ctrl.Finish()
|
||||||
MetaData: map[string]string{},
|
upload := NewMockFullUpload(ctrl)
|
||||||
}).Return("foo", nil)
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
|
Size: 300,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}).Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "foo",
|
||||||
|
Size: 300,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
BasePath: "/files/",
|
BasePath: "/files/",
|
||||||
RespectForwardedHeaders: true,
|
RespectForwardedHeaders: true,
|
||||||
})
|
})
|
||||||
|
@ -225,14 +278,25 @@ func TestPost(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "RespectForwarded", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "RespectForwarded", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
ctrl := gomock.NewController(t)
|
||||||
Size: 300,
|
defer ctrl.Finish()
|
||||||
MetaData: map[string]string{},
|
upload := NewMockFullUpload(ctrl)
|
||||||
}).Return("foo", nil)
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
|
Size: 300,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}).Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "foo",
|
||||||
|
Size: 300,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
BasePath: "/files/",
|
BasePath: "/files/",
|
||||||
RespectForwardedHeaders: true,
|
RespectForwardedHeaders: true,
|
||||||
})
|
})
|
||||||
|
@ -253,14 +317,25 @@ func TestPost(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "FilterForwardedProtocol", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "FilterForwardedProtocol", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
ctrl := gomock.NewController(t)
|
||||||
Size: 300,
|
defer ctrl.Finish()
|
||||||
MetaData: map[string]string{},
|
upload := NewMockFullUpload(ctrl)
|
||||||
}).Return("foo", nil)
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
|
Size: 300,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}).Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "foo",
|
||||||
|
Size: 300,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
BasePath: "/files/",
|
BasePath: "/files/",
|
||||||
RespectForwardedHeaders: true,
|
RespectForwardedHeaders: true,
|
||||||
})
|
})
|
||||||
|
@ -281,26 +356,37 @@ func TestPost(t *testing.T) {
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "WithUpload", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "WithUpload", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
SubTest(t, "Create", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "Create", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
ctrl := gomock.NewController(t)
|
ctrl := gomock.NewController(t)
|
||||||
defer ctrl.Finish()
|
defer ctrl.Finish()
|
||||||
locker := NewMockLocker(ctrl)
|
locker := NewMockFullLocker(ctrl)
|
||||||
|
lock := NewMockFullLock(ctrl)
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
"foo": "hello",
|
"foo": "hello",
|
||||||
"bar": "world",
|
"bar": "world",
|
||||||
},
|
},
|
||||||
}).Return("foo", nil),
|
}).Return(upload, nil),
|
||||||
locker.EXPECT().LockUpload("foo"),
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
store.EXPECT().WriteChunk("foo", int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
|
ID: "foo",
|
||||||
locker.EXPECT().UnlockUpload("foo"),
|
Size: 300,
|
||||||
|
MetaData: map[string]string{
|
||||||
|
"foo": "hello",
|
||||||
|
"bar": "world",
|
||||||
|
},
|
||||||
|
}, nil),
|
||||||
|
locker.EXPECT().NewLock("foo").Return(lock, nil),
|
||||||
|
lock.EXPECT().Lock().Return(nil),
|
||||||
|
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
|
||||||
|
lock.EXPECT().Unlock().Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
composer := NewStoreComposer()
|
composer = NewStoreComposer()
|
||||||
composer.UseCore(store)
|
composer.UseCore(store)
|
||||||
composer.UseLocker(locker)
|
composer.UseLocker(locker)
|
||||||
|
|
||||||
|
@ -326,15 +412,26 @@ func TestPost(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "CreateExceedingUploadSize", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "CreateExceedingUploadSize", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
ctrl := gomock.NewController(t)
|
||||||
Size: 300,
|
defer ctrl.Finish()
|
||||||
MetaData: map[string]string{},
|
upload := NewMockFullUpload(ctrl)
|
||||||
}).Return("foo", nil)
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
|
Size: 300,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}).Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "foo",
|
||||||
|
Size: 300,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
BasePath: "/files/",
|
BasePath: "/files/",
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -349,15 +446,26 @@ func TestPost(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "IncorrectContentType", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "IncorrectContentType", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
ctrl := gomock.NewController(t)
|
||||||
Size: 300,
|
defer ctrl.Finish()
|
||||||
MetaData: map[string]string{},
|
upload := NewMockFullUpload(ctrl)
|
||||||
}).Return("foo", nil)
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
|
Size: 300,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}).Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "foo",
|
||||||
|
Size: 300,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
BasePath: "/files/",
|
BasePath: "/files/",
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -377,10 +485,10 @@ func TestPost(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "UploadToFinalUpload", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "UploadToFinalUpload", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
BasePath: "/files/",
|
BasePath: "/files/",
|
||||||
})
|
})
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
|
@ -0,0 +1,27 @@
|
||||||
|
package handler_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/tus/tusd/pkg/handler"
|
||||||
|
|
||||||
|
"github.com/golang/mock/gomock"
|
||||||
|
)
|
||||||
|
|
||||||
|
func SubTest(t *testing.T, name string, runTest func(*testing.T, *MockFullDataStore, *handler.StoreComposer)) {
|
||||||
|
t.Run(name, func(subT *testing.T) {
|
||||||
|
//subT.Parallel()
|
||||||
|
|
||||||
|
ctrl := gomock.NewController(subT)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
|
||||||
|
store := NewMockFullDataStore(ctrl)
|
||||||
|
composer := handler.NewStoreComposer()
|
||||||
|
composer.UseCore(store)
|
||||||
|
composer.UseTerminater(store)
|
||||||
|
composer.UseConcater(store)
|
||||||
|
composer.UseLengthDeferrer(store)
|
||||||
|
|
||||||
|
runTest(subT, store, composer)
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,18 +1,19 @@
|
||||||
package tusd_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
. "github.com/tus/tusd"
|
. "github.com/tus/tusd/pkg/handler"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTerminate(t *testing.T) {
|
func TestTerminate(t *testing.T) {
|
||||||
SubTest(t, "ExtensionDiscovery", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "ExtensionDiscovery", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
composer := NewStoreComposer()
|
composer = NewStoreComposer()
|
||||||
composer.UseCore(store)
|
composer.UseCore(store)
|
||||||
composer.UseTerminater(store)
|
composer.UseTerminater(store)
|
||||||
|
|
||||||
|
@ -29,22 +30,27 @@ func TestTerminate(t *testing.T) {
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "Termination", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "Termination", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
ctrl := gomock.NewController(t)
|
ctrl := gomock.NewController(t)
|
||||||
defer ctrl.Finish()
|
defer ctrl.Finish()
|
||||||
locker := NewMockLocker(ctrl)
|
locker := NewMockFullLocker(ctrl)
|
||||||
|
lock := NewMockFullLock(ctrl)
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
locker.EXPECT().LockUpload("foo"),
|
locker.EXPECT().NewLock("foo").Return(lock, nil),
|
||||||
store.EXPECT().GetInfo("foo").Return(FileInfo{
|
lock.EXPECT().Lock().Return(nil),
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 10,
|
Size: 10,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().Terminate("foo").Return(nil),
|
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
|
||||||
locker.EXPECT().UnlockUpload("foo"),
|
upload.EXPECT().Terminate(context.Background()).Return(nil),
|
||||||
|
lock.EXPECT().Unlock().Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
composer := NewStoreComposer()
|
composer = NewStoreComposer()
|
||||||
composer.UseCore(store)
|
composer.UseCore(store)
|
||||||
composer.UseTerminater(store)
|
composer.UseTerminater(store)
|
||||||
composer.UseLocker(locker)
|
composer.UseLocker(locker)
|
||||||
|
@ -54,7 +60,7 @@ func TestTerminate(t *testing.T) {
|
||||||
NotifyTerminatedUploads: true,
|
NotifyTerminatedUploads: true,
|
||||||
})
|
})
|
||||||
|
|
||||||
c := make(chan FileInfo, 1)
|
c := make(chan HookEvent, 1)
|
||||||
handler.TerminatedUploads = c
|
handler.TerminatedUploads = c
|
||||||
|
|
||||||
(&httpTest{
|
(&httpTest{
|
||||||
|
@ -66,15 +72,20 @@ func TestTerminate(t *testing.T) {
|
||||||
Code: http.StatusNoContent,
|
Code: http.StatusNoContent,
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
|
|
||||||
info := <-c
|
event := <-c
|
||||||
|
info := event.Upload
|
||||||
|
|
||||||
a := assert.New(t)
|
a := assert.New(t)
|
||||||
a.Equal("foo", info.ID)
|
a.Equal("foo", info.ID)
|
||||||
a.Equal(int64(10), info.Size)
|
a.Equal(int64(10), info.Size)
|
||||||
|
|
||||||
|
req := event.HTTPRequest
|
||||||
|
a.Equal("DELETE", req.Method)
|
||||||
|
a.Equal("foo", req.URI)
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "NotProvided", func(t *testing.T, store *MockFullDataStore) {
|
SubTest(t, "NotProvided", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
composer := NewStoreComposer()
|
composer = NewStoreComposer()
|
||||||
composer.UseCore(store)
|
composer.UseCore(store)
|
||||||
|
|
||||||
handler, _ := NewUnroutedHandler(Config{
|
handler, _ := NewUnroutedHandler(Config{
|
|
@ -1,6 +1,7 @@
|
||||||
package tusd
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
|
@ -14,8 +15,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const UploadLengthDeferred = "1"
|
const UploadLengthDeferred = "1"
|
||||||
|
@ -75,6 +74,40 @@ var (
|
||||||
ErrUploadStoppedByServer = NewHTTPError(errors.New("upload has been stopped by server"), http.StatusBadRequest)
|
ErrUploadStoppedByServer = NewHTTPError(errors.New("upload has been stopped by server"), http.StatusBadRequest)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// HTTPRequest contains basic details of an incoming HTTP request.
|
||||||
|
type HTTPRequest struct {
|
||||||
|
// Method is the HTTP method, e.g. POST or PATCH
|
||||||
|
Method string
|
||||||
|
// URI is the full HTTP request URI, e.g. /files/fooo
|
||||||
|
URI string
|
||||||
|
// RemoteAddr contains the network address that sent the request
|
||||||
|
RemoteAddr string
|
||||||
|
// Header contains all HTTP headers as present in the HTTP request.
|
||||||
|
Header http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
// HookEvent represents an event from tusd which can be handled by the application.
|
||||||
|
type HookEvent struct {
|
||||||
|
// Upload contains information about the upload that caused this hook
|
||||||
|
// to be fired.
|
||||||
|
Upload FileInfo
|
||||||
|
// HTTPRequest contains details about the HTTP request that reached
|
||||||
|
// tusd.
|
||||||
|
HTTPRequest HTTPRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHookEvent(info FileInfo, r *http.Request) HookEvent {
|
||||||
|
return HookEvent{
|
||||||
|
Upload: info,
|
||||||
|
HTTPRequest: HTTPRequest{
|
||||||
|
Method: r.Method,
|
||||||
|
URI: r.RequestURI,
|
||||||
|
RemoteAddr: r.RemoteAddr,
|
||||||
|
Header: r.Header,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// UnroutedHandler exposes methods to handle requests as part of the tus protocol,
|
// UnroutedHandler exposes methods to handle requests as part of the tus protocol,
|
||||||
// such as PostFile, HeadFile, PatchFile and DelFile. In addition the GetFile method
|
// such as PostFile, HeadFile, PatchFile and DelFile. In addition the GetFile method
|
||||||
// is provided which is, however, not part of the specification.
|
// is provided which is, however, not part of the specification.
|
||||||
|
@ -87,33 +120,33 @@ type UnroutedHandler struct {
|
||||||
extensions string
|
extensions string
|
||||||
|
|
||||||
// CompleteUploads is used to send notifications whenever an upload is
|
// CompleteUploads is used to send notifications whenever an upload is
|
||||||
// completed by a user. The FileInfo will contain information about this
|
// completed by a user. The HookEvent will contain information about this
|
||||||
// upload after it is completed. Sending to this channel will only
|
// upload after it is completed. Sending to this channel will only
|
||||||
// happen if the NotifyCompleteUploads field is set to true in the Config
|
// happen if the NotifyCompleteUploads field is set to true in the Config
|
||||||
// structure. Notifications will also be sent for completions using the
|
// structure. Notifications will also be sent for completions using the
|
||||||
// Concatenation extension.
|
// Concatenation extension.
|
||||||
CompleteUploads chan FileInfo
|
CompleteUploads chan HookEvent
|
||||||
// TerminatedUploads is used to send notifications whenever an upload is
|
// TerminatedUploads is used to send notifications whenever an upload is
|
||||||
// terminated by a user. The FileInfo will contain information about this
|
// terminated by a user. The HookEvent will contain information about this
|
||||||
// upload gathered before the termination. Sending to this channel will only
|
// upload gathered before the termination. Sending to this channel will only
|
||||||
// happen if the NotifyTerminatedUploads field is set to true in the Config
|
// happen if the NotifyTerminatedUploads field is set to true in the Config
|
||||||
// structure.
|
// structure.
|
||||||
TerminatedUploads chan FileInfo
|
TerminatedUploads chan HookEvent
|
||||||
// UploadProgress is used to send notifications about the progress of the
|
// UploadProgress is used to send notifications about the progress of the
|
||||||
// currently running uploads. For each open PATCH request, every second
|
// currently running uploads. For each open PATCH request, every second
|
||||||
// a FileInfo instance will be send over this channel with the Offset field
|
// a HookEvent instance will be send over this channel with the Offset field
|
||||||
// being set to the number of bytes which have been transfered to the server.
|
// being set to the number of bytes which have been transfered to the server.
|
||||||
// Please be aware that this number may be higher than the number of bytes
|
// Please be aware that this number may be higher than the number of bytes
|
||||||
// which have been stored by the data store! Sending to this channel will only
|
// which have been stored by the data store! Sending to this channel will only
|
||||||
// happen if the NotifyUploadProgress field is set to true in the Config
|
// happen if the NotifyUploadProgress field is set to true in the Config
|
||||||
// structure.
|
// structure.
|
||||||
UploadProgress chan FileInfo
|
UploadProgress chan HookEvent
|
||||||
// CreatedUploads is used to send notifications about the uploads having been
|
// CreatedUploads is used to send notifications about the uploads having been
|
||||||
// created. It triggers post creation and therefore has all the FileInfo incl.
|
// created. It triggers post creation and therefore has all the HookEvent incl.
|
||||||
// the ID available already. It facilitates the post-create hook. Sending to
|
// the ID available already. It facilitates the post-create hook. Sending to
|
||||||
// this channel will only happen if the NotifyCreatedUploads field is set to
|
// this channel will only happen if the NotifyCreatedUploads field is set to
|
||||||
// true in the Config structure.
|
// true in the Config structure.
|
||||||
CreatedUploads chan FileInfo
|
CreatedUploads chan HookEvent
|
||||||
// Metrics provides numbers of the usage for this handler.
|
// Metrics provides numbers of the usage for this handler.
|
||||||
Metrics Metrics
|
Metrics Metrics
|
||||||
}
|
}
|
||||||
|
@ -144,10 +177,10 @@ func NewUnroutedHandler(config Config) (*UnroutedHandler, error) {
|
||||||
composer: config.StoreComposer,
|
composer: config.StoreComposer,
|
||||||
basePath: config.BasePath,
|
basePath: config.BasePath,
|
||||||
isBasePathAbs: config.isAbs,
|
isBasePathAbs: config.isAbs,
|
||||||
CompleteUploads: make(chan FileInfo),
|
CompleteUploads: make(chan HookEvent),
|
||||||
TerminatedUploads: make(chan FileInfo),
|
TerminatedUploads: make(chan HookEvent),
|
||||||
UploadProgress: make(chan FileInfo),
|
UploadProgress: make(chan HookEvent),
|
||||||
CreatedUploads: make(chan FileInfo),
|
CreatedUploads: make(chan HookEvent),
|
||||||
logger: config.Logger,
|
logger: config.Logger,
|
||||||
extensions: extensions,
|
extensions: extensions,
|
||||||
Metrics: newMetrics(),
|
Metrics: newMetrics(),
|
||||||
|
@ -156,6 +189,13 @@ func NewUnroutedHandler(config Config) (*UnroutedHandler, error) {
|
||||||
return handler, nil
|
return handler, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SupportedExtensions returns a comma-separated list of the supported tus extensions.
|
||||||
|
// The availability of an extension usually depends on whether the provided data store
|
||||||
|
// implements some additional interfaces.
|
||||||
|
func (handler *UnroutedHandler) SupportedExtensions() string {
|
||||||
|
return handler.extensions
|
||||||
|
}
|
||||||
|
|
||||||
// Middleware checks various aspects of the request and ensures that it
|
// Middleware checks various aspects of the request and ensures that it
|
||||||
// conforms with the spec. Also handles method overriding for clients which
|
// conforms with the spec. Also handles method overriding for clients which
|
||||||
// cannot make PATCH AND DELETE requests. If you are using the tusd handlers
|
// cannot make PATCH AND DELETE requests. If you are using the tusd handlers
|
||||||
|
@ -234,6 +274,8 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||||
// PostFile creates a new file upload using the datastore after validating the
|
// PostFile creates a new file upload using the datastore after validating the
|
||||||
// length and parsing the metadata.
|
// length and parsing the metadata.
|
||||||
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
// Check for presence of application/offset+octet-stream. If another content
|
// Check for presence of application/offset+octet-stream. If another content
|
||||||
// type is defined, it will be ignored and treated as none was set because
|
// type is defined, it will be ignored and treated as none was set because
|
||||||
// some HTTP clients may enforce a default value for this header.
|
// some HTTP clients may enforce a default value for this header.
|
||||||
|
@ -247,7 +289,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse Upload-Concat header
|
// Parse Upload-Concat header
|
||||||
isPartial, isFinal, partialUploads, err := parseConcat(concatHeader)
|
isPartial, isFinal, partialUploadIDs, err := parseConcat(concatHeader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -258,6 +300,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
// Upload-Length header)
|
// Upload-Length header)
|
||||||
var size int64
|
var size int64
|
||||||
var sizeIsDeferred bool
|
var sizeIsDeferred bool
|
||||||
|
var partialUploads []Upload
|
||||||
if isFinal {
|
if isFinal {
|
||||||
// A final upload must not contain a chunk within the creation request
|
// A final upload must not contain a chunk within the creation request
|
||||||
if containsChunk {
|
if containsChunk {
|
||||||
|
@ -265,7 +308,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
size, err = handler.sizeOfUploads(partialUploads)
|
partialUploads, size, err = handler.sizeOfUploads(ctx, partialUploadIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -295,16 +338,29 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
MetaData: meta,
|
MetaData: meta,
|
||||||
IsPartial: isPartial,
|
IsPartial: isPartial,
|
||||||
IsFinal: isFinal,
|
IsFinal: isFinal,
|
||||||
PartialUploads: partialUploads,
|
PartialUploads: partialUploadIDs,
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := handler.composer.Core.NewUpload(info)
|
if handler.config.PreUploadCreateCallback != nil {
|
||||||
|
if err := handler.config.PreUploadCreateCallback(newHookEvent(info, r)); err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
upload, err := handler.composer.Core.NewUpload(ctx, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info.ID = id
|
info, err = upload.GetInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
id := info.ID
|
||||||
|
|
||||||
// Add the Location header directly after creating the new resource to even
|
// Add the Location header directly after creating the new resource to even
|
||||||
// include it in cases of failure when an error is returned
|
// include it in cases of failure when an error is returned
|
||||||
|
@ -315,33 +371,34 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
handler.log("UploadCreated", "id", id, "size", i64toa(size), "url", url)
|
handler.log("UploadCreated", "id", id, "size", i64toa(size), "url", url)
|
||||||
|
|
||||||
if handler.config.NotifyCreatedUploads {
|
if handler.config.NotifyCreatedUploads {
|
||||||
handler.CreatedUploads <- info
|
handler.CreatedUploads <- newHookEvent(info, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
if isFinal {
|
if isFinal {
|
||||||
if err := handler.composer.Concater.ConcatUploads(id, partialUploads); err != nil {
|
concatableUpload := handler.composer.Concater.AsConcatableUpload(upload)
|
||||||
|
if err := concatableUpload.ConcatUploads(ctx, partialUploads); err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
info.Offset = size
|
info.Offset = size
|
||||||
|
|
||||||
if handler.config.NotifyCompleteUploads {
|
if handler.config.NotifyCompleteUploads {
|
||||||
handler.CompleteUploads <- info
|
handler.CompleteUploads <- newHookEvent(info, r)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if containsChunk {
|
if containsChunk {
|
||||||
if handler.composer.UsesLocker {
|
if handler.composer.UsesLocker {
|
||||||
locker := handler.composer.Locker
|
lock, err := handler.lockUpload(id)
|
||||||
if err := locker.LockUpload(id); err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer locker.UnlockUpload(id)
|
defer lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := handler.writeChunk(id, info, w, r); err != nil {
|
if err := handler.writeChunk(upload, info, w, r); err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -349,7 +406,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
// Directly finish the upload if the upload is empty (i.e. has a size of 0).
|
// Directly finish the upload if the upload is empty (i.e. has a size of 0).
|
||||||
// This statement is in an else-if block to avoid causing duplicate calls
|
// This statement is in an else-if block to avoid causing duplicate calls
|
||||||
// to finishUploadIfComplete if an upload is empty and contains a chunk.
|
// to finishUploadIfComplete if an upload is empty and contains a chunk.
|
||||||
handler.finishUploadIfComplete(info)
|
handler.finishUploadIfComplete(ctx, upload, info, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.sendResp(w, r, http.StatusCreated)
|
handler.sendResp(w, r, http.StatusCreated)
|
||||||
|
@ -357,6 +414,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
|
|
||||||
// HeadFile returns the length and offset for the HEAD request
|
// HeadFile returns the length and offset for the HEAD request
|
||||||
func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
id, err := extractIDFromPath(r.URL.Path)
|
id, err := extractIDFromPath(r.URL.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -365,16 +423,22 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
|
||||||
}
|
}
|
||||||
|
|
||||||
if handler.composer.UsesLocker {
|
if handler.composer.UsesLocker {
|
||||||
locker := handler.composer.Locker
|
lock, err := handler.lockUpload(id)
|
||||||
if err := locker.LockUpload(id); err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer locker.UnlockUpload(id)
|
defer lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := handler.composer.Core.GetInfo(id)
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -414,6 +478,7 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
|
||||||
// PatchFile adds a chunk to an upload. This operation is only allowed
|
// PatchFile adds a chunk to an upload. This operation is only allowed
|
||||||
// if enough space in the upload is left.
|
// if enough space in the upload is left.
|
||||||
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
// Check for presence of application/offset+octet-stream
|
// Check for presence of application/offset+octet-stream
|
||||||
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
|
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
|
||||||
|
@ -435,16 +500,22 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||||
}
|
}
|
||||||
|
|
||||||
if handler.composer.UsesLocker {
|
if handler.composer.UsesLocker {
|
||||||
locker := handler.composer.Locker
|
lock, err := handler.lockUpload(id)
|
||||||
if err := locker.LockUpload(id); err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer locker.UnlockUpload(id)
|
defer lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := handler.composer.Core.GetInfo(id)
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -482,7 +553,9 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||||
handler.sendError(w, r, ErrInvalidUploadLength)
|
handler.sendError(w, r, ErrInvalidUploadLength)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := handler.composer.LengthDeferrer.DeclareLength(id, uploadLength); err != nil {
|
|
||||||
|
lengthDeclarableUpload := handler.composer.LengthDeferrer.AsLengthDeclarableUpload(upload)
|
||||||
|
if err := lengthDeclarableUpload.DeclareLength(ctx, uploadLength); err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -491,7 +564,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||||
info.SizeIsDeferred = false
|
info.SizeIsDeferred = false
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := handler.writeChunk(id, info, w, r); err != nil {
|
if err := handler.writeChunk(upload, info, w, r); err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -502,10 +575,13 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||||
// writeChunk reads the body from the requests r and appends it to the upload
|
// writeChunk reads the body from the requests r and appends it to the upload
|
||||||
// with the corresponding id. Afterwards, it will set the necessary response
|
// with the corresponding id. Afterwards, it will set the necessary response
|
||||||
// headers but will not send the response.
|
// headers but will not send the response.
|
||||||
func (handler *UnroutedHandler) writeChunk(id string, info FileInfo, w http.ResponseWriter, r *http.Request) error {
|
func (handler *UnroutedHandler) writeChunk(upload Upload, info FileInfo, w http.ResponseWriter, r *http.Request) error {
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
// Get Content-Length if possible
|
// Get Content-Length if possible
|
||||||
length := r.ContentLength
|
length := r.ContentLength
|
||||||
offset := info.Offset
|
offset := info.Offset
|
||||||
|
id := info.ID
|
||||||
|
|
||||||
// Test if this upload fits into the file's size
|
// Test if this upload fits into the file's size
|
||||||
if !info.SizeIsDeferred && offset+length > info.Size {
|
if !info.SizeIsDeferred && offset+length > info.Size {
|
||||||
|
@ -557,14 +633,14 @@ func (handler *UnroutedHandler) writeChunk(id string, info FileInfo, w http.Resp
|
||||||
|
|
||||||
if handler.config.NotifyUploadProgress {
|
if handler.config.NotifyUploadProgress {
|
||||||
var stopProgressEvents chan<- struct{}
|
var stopProgressEvents chan<- struct{}
|
||||||
reader, stopProgressEvents = handler.sendProgressMessages(info, reader)
|
reader, stopProgressEvents = handler.sendProgressMessages(newHookEvent(info, r), reader)
|
||||||
defer close(stopProgressEvents)
|
defer close(stopProgressEvents)
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
bytesWritten, err = handler.composer.Core.WriteChunk(id, offset, reader)
|
bytesWritten, err = upload.WriteChunk(ctx, offset, reader)
|
||||||
if terminateUpload && handler.composer.UsesTerminater {
|
if terminateUpload && handler.composer.UsesTerminater {
|
||||||
if terminateErr := handler.terminateUpload(id, info); terminateErr != nil {
|
if terminateErr := handler.terminateUpload(ctx, upload, info, r); terminateErr != nil {
|
||||||
// We only log this error and not show it to the user since this
|
// We only log this error and not show it to the user since this
|
||||||
// termination error is not relevant to the uploading client
|
// termination error is not relevant to the uploading client
|
||||||
handler.log("UploadStopTerminateError", "id", id, "error", terminateErr.Error())
|
handler.log("UploadStopTerminateError", "id", id, "error", terminateErr.Error())
|
||||||
|
@ -591,25 +667,23 @@ func (handler *UnroutedHandler) writeChunk(id string, info FileInfo, w http.Resp
|
||||||
handler.Metrics.incBytesReceived(uint64(bytesWritten))
|
handler.Metrics.incBytesReceived(uint64(bytesWritten))
|
||||||
info.Offset = newOffset
|
info.Offset = newOffset
|
||||||
|
|
||||||
return handler.finishUploadIfComplete(info)
|
return handler.finishUploadIfComplete(ctx, upload, info, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
// finishUploadIfComplete checks whether an upload is completed (i.e. upload offset
|
// finishUploadIfComplete checks whether an upload is completed (i.e. upload offset
|
||||||
// matches upload size) and if so, it will call the data store's FinishUpload
|
// matches upload size) and if so, it will call the data store's FinishUpload
|
||||||
// function and send the necessary message on the CompleteUpload channel.
|
// function and send the necessary message on the CompleteUpload channel.
|
||||||
func (handler *UnroutedHandler) finishUploadIfComplete(info FileInfo) error {
|
func (handler *UnroutedHandler) finishUploadIfComplete(ctx context.Context, upload Upload, info FileInfo, r *http.Request) error {
|
||||||
// If the upload is completed, ...
|
// If the upload is completed, ...
|
||||||
if !info.SizeIsDeferred && info.Offset == info.Size {
|
if !info.SizeIsDeferred && info.Offset == info.Size {
|
||||||
// ... allow custom mechanism to finish and cleanup the upload
|
// ... allow custom mechanism to finish and cleanup the upload
|
||||||
if handler.composer.UsesFinisher {
|
if err := upload.FinishUpload(ctx); err != nil {
|
||||||
if err := handler.composer.Finisher.FinishUpload(info.ID); err != nil {
|
return err
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ... send the info out to the channel
|
// ... send the info out to the channel
|
||||||
if handler.config.NotifyCompleteUploads {
|
if handler.config.NotifyCompleteUploads {
|
||||||
handler.CompleteUploads <- info
|
handler.CompleteUploads <- newHookEvent(info, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.Metrics.incUploadsFinished()
|
handler.Metrics.incUploadsFinished()
|
||||||
|
@ -621,10 +695,7 @@ func (handler *UnroutedHandler) finishUploadIfComplete(info FileInfo) error {
|
||||||
// GetFile handles requests to download a file using a GET request. This is not
|
// GetFile handles requests to download a file using a GET request. This is not
|
||||||
// part of the specification.
|
// part of the specification.
|
||||||
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
||||||
if !handler.composer.UsesGetReader {
|
ctx := r.Context()
|
||||||
handler.sendError(w, r, ErrNotImplemented)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
id, err := extractIDFromPath(r.URL.Path)
|
id, err := extractIDFromPath(r.URL.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -633,16 +704,22 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
||||||
}
|
}
|
||||||
|
|
||||||
if handler.composer.UsesLocker {
|
if handler.composer.UsesLocker {
|
||||||
locker := handler.composer.Locker
|
lock, err := handler.lockUpload(id)
|
||||||
if err := locker.LockUpload(id); err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer locker.UnlockUpload(id)
|
defer lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := handler.composer.Core.GetInfo(id)
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -661,7 +738,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
src, err := handler.composer.GetReader.GetReader(id)
|
src, err := upload.GetReader(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -739,6 +816,8 @@ func filterContentType(info FileInfo) (contentType string, contentDisposition st
|
||||||
|
|
||||||
// DelFile terminates an upload permanently.
|
// DelFile terminates an upload permanently.
|
||||||
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
// Abort the request handling if the required interface is not implemented
|
// Abort the request handling if the required interface is not implemented
|
||||||
if !handler.composer.UsesTerminater {
|
if !handler.composer.UsesTerminater {
|
||||||
handler.sendError(w, r, ErrNotImplemented)
|
handler.sendError(w, r, ErrNotImplemented)
|
||||||
|
@ -752,25 +831,31 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
|
||||||
}
|
}
|
||||||
|
|
||||||
if handler.composer.UsesLocker {
|
if handler.composer.UsesLocker {
|
||||||
locker := handler.composer.Locker
|
lock, err := handler.lockUpload(id)
|
||||||
if err := locker.LockUpload(id); err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer locker.UnlockUpload(id)
|
defer lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var info FileInfo
|
var info FileInfo
|
||||||
if handler.config.NotifyTerminatedUploads {
|
if handler.config.NotifyTerminatedUploads {
|
||||||
info, err = handler.composer.Core.GetInfo(id)
|
info, err = upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = handler.terminateUpload(id, info)
|
err = handler.terminateUpload(ctx, upload, info, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -784,14 +869,16 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
|
||||||
// and updates the statistics.
|
// and updates the statistics.
|
||||||
// Note the the info argument is only needed if the terminated uploads
|
// Note the the info argument is only needed if the terminated uploads
|
||||||
// notifications are enabled.
|
// notifications are enabled.
|
||||||
func (handler *UnroutedHandler) terminateUpload(id string, info FileInfo) error {
|
func (handler *UnroutedHandler) terminateUpload(ctx context.Context, upload Upload, info FileInfo, r *http.Request) error {
|
||||||
err := handler.composer.Terminater.Terminate(id)
|
terminatableUpload := handler.composer.Terminater.AsTerminatableUpload(upload)
|
||||||
|
|
||||||
|
err := terminatableUpload.Terminate(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if handler.config.NotifyTerminatedUploads {
|
if handler.config.NotifyTerminatedUploads {
|
||||||
handler.TerminatedUploads <- info
|
handler.TerminatedUploads <- newHookEvent(info, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.Metrics.incUploadsTerminated()
|
handler.Metrics.incUploadsTerminated()
|
||||||
|
@ -877,10 +964,10 @@ func (w *progressWriter) Write(b []byte) (int, error) {
|
||||||
// every second, indicating how much data has been transfered to the server.
|
// every second, indicating how much data has been transfered to the server.
|
||||||
// It will stop sending these instances once the returned channel has been
|
// It will stop sending these instances once the returned channel has been
|
||||||
// closed. The returned reader should be used to read the request body.
|
// closed. The returned reader should be used to read the request body.
|
||||||
func (handler *UnroutedHandler) sendProgressMessages(info FileInfo, reader io.Reader) (io.Reader, chan<- struct{}) {
|
func (handler *UnroutedHandler) sendProgressMessages(hook HookEvent, reader io.Reader) (io.Reader, chan<- struct{}) {
|
||||||
previousOffset := int64(0)
|
previousOffset := int64(0)
|
||||||
progress := &progressWriter{
|
progress := &progressWriter{
|
||||||
Offset: info.Offset,
|
Offset: hook.Upload.Offset,
|
||||||
}
|
}
|
||||||
stop := make(chan struct{}, 1)
|
stop := make(chan struct{}, 1)
|
||||||
reader = io.TeeReader(reader, progress)
|
reader = io.TeeReader(reader, progress)
|
||||||
|
@ -889,17 +976,17 @@ func (handler *UnroutedHandler) sendProgressMessages(info FileInfo, reader io.Re
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-stop:
|
||||||
info.Offset = atomic.LoadInt64(&progress.Offset)
|
hook.Upload.Offset = atomic.LoadInt64(&progress.Offset)
|
||||||
if info.Offset != previousOffset {
|
if hook.Upload.Offset != previousOffset {
|
||||||
handler.UploadProgress <- info
|
handler.UploadProgress <- hook
|
||||||
previousOffset = info.Offset
|
previousOffset = hook.Upload.Offset
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
case <-time.After(1 * time.Second):
|
case <-time.After(1 * time.Second):
|
||||||
info.Offset = atomic.LoadInt64(&progress.Offset)
|
hook.Upload.Offset = atomic.LoadInt64(&progress.Offset)
|
||||||
if info.Offset != previousOffset {
|
if hook.Upload.Offset != previousOffset {
|
||||||
handler.UploadProgress <- info
|
handler.UploadProgress <- hook
|
||||||
previousOffset = info.Offset
|
previousOffset = hook.Upload.Offset
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -949,19 +1036,27 @@ func getHostAndProtocol(r *http.Request, allowForwarded bool) (host, proto strin
|
||||||
// The get sum of all sizes for a list of upload ids while checking whether
|
// The get sum of all sizes for a list of upload ids while checking whether
|
||||||
// all of these uploads are finished yet. This is used to calculate the size
|
// all of these uploads are finished yet. This is used to calculate the size
|
||||||
// of a final resource.
|
// of a final resource.
|
||||||
func (handler *UnroutedHandler) sizeOfUploads(ids []string) (size int64, err error) {
|
func (handler *UnroutedHandler) sizeOfUploads(ctx context.Context, ids []string) (partialUploads []Upload, size int64, err error) {
|
||||||
for _, id := range ids {
|
partialUploads = make([]Upload, len(ids))
|
||||||
info, err := handler.composer.Core.GetInfo(id)
|
|
||||||
|
for i, id := range ids {
|
||||||
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return size, err
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := upload.GetInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if info.SizeIsDeferred || info.Offset != info.Size {
|
if info.SizeIsDeferred || info.Offset != info.Size {
|
||||||
err = ErrUploadNotFinished
|
err = ErrUploadNotFinished
|
||||||
return size, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
size += info.Size
|
size += info.Size
|
||||||
|
partialUploads[i] = upload
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -992,6 +1087,21 @@ func (handler *UnroutedHandler) validateNewUploadLengthHeaders(uploadLengthHeade
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// lockUpload creates a new lock for the given upload ID and attempts to lock it.
|
||||||
|
// The created lock is returned if it was aquired successfully.
|
||||||
|
func (handler *UnroutedHandler) lockUpload(id string) (Lock, error) {
|
||||||
|
lock, err := handler.composer.Locker.NewLock(id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := lock.Lock(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return lock, nil
|
||||||
|
}
|
||||||
|
|
||||||
// ParseMetadataHeader parses the Upload-Metadata header as defined in the
|
// ParseMetadataHeader parses the Upload-Metadata header as defined in the
|
||||||
// File Creation extension.
|
// File Creation extension.
|
||||||
// e.g. Upload-Metadata: name bHVucmpzLnBuZw==,type aW1hZ2UvcG5n
|
// e.g. Upload-Metadata: name bHVucmpzLnBuZw==,type aW1hZ2UvcG5n
|
|
@ -1,4 +1,4 @@
|
||||||
package tusd_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -10,10 +10,10 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate mockgen -package tusd_test -source utils_test.go -aux_files tusd=datastore.go -destination=handler_mock_test.go
|
//go:generate mockgen -package handler_test -source utils_test.go -aux_files handler=datastore.go -destination=handler_mock_test.go
|
||||||
|
|
||||||
// FullDataStore is an interface combining most interfaces for data stores.
|
// FullDataStore is an interface combining most interfaces for data stores.
|
||||||
// This is used by mockgen(1) to generate a mocked data store used for testing
|
// This is used by mockgen(1) to generate a mocked data store used for testing
|
||||||
|
@ -22,16 +22,25 @@ import (
|
||||||
// locking in every single test which would result in more verbose code.
|
// locking in every single test which would result in more verbose code.
|
||||||
// Therefore it has been moved into its own type definition, the Locker.
|
// Therefore it has been moved into its own type definition, the Locker.
|
||||||
type FullDataStore interface {
|
type FullDataStore interface {
|
||||||
tusd.DataStore
|
handler.DataStore
|
||||||
tusd.TerminaterDataStore
|
handler.TerminaterDataStore
|
||||||
tusd.ConcaterDataStore
|
handler.ConcaterDataStore
|
||||||
tusd.GetReaderDataStore
|
handler.LengthDeferrerDataStore
|
||||||
tusd.FinisherDataStore
|
|
||||||
tusd.LengthDeferrerDataStore
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Locker interface {
|
type FullUpload interface {
|
||||||
tusd.LockerDataStore
|
handler.Upload
|
||||||
|
handler.TerminatableUpload
|
||||||
|
handler.LengthDeclarableUpload
|
||||||
|
handler.ConcatableUpload
|
||||||
|
}
|
||||||
|
|
||||||
|
type FullLocker interface {
|
||||||
|
handler.Locker
|
||||||
|
}
|
||||||
|
|
||||||
|
type FullLock interface {
|
||||||
|
handler.Lock
|
||||||
}
|
}
|
||||||
|
|
||||||
type httpTest struct {
|
type httpTest struct {
|
||||||
|
@ -50,6 +59,7 @@ type httpTest struct {
|
||||||
|
|
||||||
func (test *httpTest) Run(handler http.Handler, t *testing.T) *httptest.ResponseRecorder {
|
func (test *httpTest) Run(handler http.Handler, t *testing.T) *httptest.ResponseRecorder {
|
||||||
req, _ := http.NewRequest(test.Method, test.URL, test.ReqBody)
|
req, _ := http.NewRequest(test.Method, test.URL, test.ReqBody)
|
||||||
|
req.RequestURI = test.URL
|
||||||
|
|
||||||
// Add headers
|
// Add headers
|
||||||
for key, value := range test.ReqHeader {
|
for key, value := range test.ReqHeader {
|
|
@ -13,7 +13,7 @@ package memorylocker
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MemoryLocker persists locks using memory and therefore allowing a simple and
|
// MemoryLocker persists locks using memory and therefore allowing a simple and
|
||||||
|
@ -24,13 +24,6 @@ type MemoryLocker struct {
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMemoryLocker creates a new in-memory locker. The DataStore parameter
|
|
||||||
// is only presented for back-wards compatibility and is ignored. Please
|
|
||||||
// use the New() function instead.
|
|
||||||
func NewMemoryLocker(_ tusd.DataStore) *MemoryLocker {
|
|
||||||
return New()
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new in-memory locker.
|
// New creates a new in-memory locker.
|
||||||
func New() *MemoryLocker {
|
func New() *MemoryLocker {
|
||||||
return &MemoryLocker{
|
return &MemoryLocker{
|
||||||
|
@ -39,33 +32,42 @@ func New() *MemoryLocker {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UseIn adds this locker to the passed composer.
|
// UseIn adds this locker to the passed composer.
|
||||||
func (locker *MemoryLocker) UseIn(composer *tusd.StoreComposer) {
|
func (locker *MemoryLocker) UseIn(composer *handler.StoreComposer) {
|
||||||
composer.UseLocker(locker)
|
composer.UseLocker(locker)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (locker *MemoryLocker) NewLock(id string) (handler.Lock, error) {
|
||||||
|
return memoryLock{locker, id}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type memoryLock struct {
|
||||||
|
locker *MemoryLocker
|
||||||
|
id string
|
||||||
|
}
|
||||||
|
|
||||||
// LockUpload tries to obtain the exclusive lock.
|
// LockUpload tries to obtain the exclusive lock.
|
||||||
func (locker *MemoryLocker) LockUpload(id string) error {
|
func (lock memoryLock) Lock() error {
|
||||||
locker.mutex.Lock()
|
lock.locker.mutex.Lock()
|
||||||
defer locker.mutex.Unlock()
|
defer lock.locker.mutex.Unlock()
|
||||||
|
|
||||||
// Ensure file is not locked
|
// Ensure file is not locked
|
||||||
if _, ok := locker.locks[id]; ok {
|
if _, ok := lock.locker.locks[lock.id]; ok {
|
||||||
return tusd.ErrFileLocked
|
return handler.ErrFileLocked
|
||||||
}
|
}
|
||||||
|
|
||||||
locker.locks[id] = struct{}{}
|
lock.locker.locks[lock.id] = struct{}{}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnlockUpload releases a lock. If no such lock exists, no error will be returned.
|
// UnlockUpload releases a lock. If no such lock exists, no error will be returned.
|
||||||
func (locker *MemoryLocker) UnlockUpload(id string) error {
|
func (lock memoryLock) Unlock() error {
|
||||||
locker.mutex.Lock()
|
lock.locker.mutex.Lock()
|
||||||
|
|
||||||
// Deleting a non-existing key does not end in unexpected errors or panic
|
// Deleting a non-existing key does not end in unexpected errors or panic
|
||||||
// since this operation results in a no-op
|
// since this operation results in a no-op
|
||||||
delete(locker.locks, id)
|
delete(lock.locker.locks, lock.id)
|
||||||
|
|
||||||
locker.mutex.Unlock()
|
lock.locker.mutex.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
|
@ -0,0 +1,30 @@
|
||||||
|
package memorylocker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"github.com/tus/tusd/pkg/handler"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ handler.Locker = &MemoryLocker{}
|
||||||
|
|
||||||
|
func TestMemoryLocker(t *testing.T) {
|
||||||
|
a := assert.New(t)
|
||||||
|
|
||||||
|
locker := New()
|
||||||
|
|
||||||
|
lock1, err := locker.NewLock("one")
|
||||||
|
a.NoError(err)
|
||||||
|
|
||||||
|
a.NoError(lock1.Lock())
|
||||||
|
a.Equal(handler.ErrFileLocked, lock1.Lock())
|
||||||
|
|
||||||
|
lock2, err := locker.NewLock("one")
|
||||||
|
a.NoError(err)
|
||||||
|
a.Equal(handler.ErrFileLocked, lock2.Lock())
|
||||||
|
|
||||||
|
a.NoError(lock1.Unlock())
|
||||||
|
a.NoError(lock1.Unlock())
|
||||||
|
}
|
|
@ -3,7 +3,7 @@
|
||||||
// Using the provided collector, you can easily expose metrics for tusd in the
|
// Using the provided collector, you can easily expose metrics for tusd in the
|
||||||
// Prometheus exposition format (https://prometheus.io/docs/instrumenting/exposition_formats/):
|
// Prometheus exposition format (https://prometheus.io/docs/instrumenting/exposition_formats/):
|
||||||
//
|
//
|
||||||
// handler, err := tusd.NewHandler(…)
|
// handler, err := handler.NewHandler(…)
|
||||||
// collector := prometheuscollector.New(handler.Metrics)
|
// collector := prometheuscollector.New(handler.Metrics)
|
||||||
// prometheus.MustRegister(collector)
|
// prometheus.MustRegister(collector)
|
||||||
package prometheuscollector
|
package prometheuscollector
|
||||||
|
@ -12,7 +12,7 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
@ -45,11 +45,11 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
type Collector struct {
|
type Collector struct {
|
||||||
metrics tusd.Metrics
|
metrics handler.Metrics
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new collector which read froms the provided Metrics struct.
|
// New creates a new collector which read froms the provided Metrics struct.
|
||||||
func New(metrics tusd.Metrics) Collector {
|
func New(metrics handler.Metrics) Collector {
|
||||||
return Collector{
|
return Collector{
|
||||||
metrics: metrics,
|
metrics: metrics,
|
||||||
}
|
}
|
|
@ -65,11 +65,12 @@
|
||||||
// consistency (https://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html#ConsistencyModel).
|
// consistency (https://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html#ConsistencyModel).
|
||||||
// Therefore, it is required to build additional measurements in order to
|
// Therefore, it is required to build additional measurements in order to
|
||||||
// prevent concurrent access to the same upload resources which may result in
|
// prevent concurrent access to the same upload resources which may result in
|
||||||
// data corruption. See tusd.LockerDataStore for more information.
|
// data corruption. See handler.LockerDataStore for more information.
|
||||||
package s3store
|
package s3store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -80,11 +81,12 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/internal/uid"
|
||||||
"github.com/tus/tusd/uid"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -94,7 +96,7 @@ import (
|
||||||
// for HTTP headers.
|
// for HTTP headers.
|
||||||
var nonASCIIRegexp = regexp.MustCompile(`([^\x00-\x7F]|[\r\n])`)
|
var nonASCIIRegexp = regexp.MustCompile(`([^\x00-\x7F]|[\r\n])`)
|
||||||
|
|
||||||
// See the tusd.DataStore interface for documentation about the different
|
// See the handler.DataStore interface for documentation about the different
|
||||||
// methods.
|
// methods.
|
||||||
type S3Store struct {
|
type S3Store struct {
|
||||||
// Bucket used to store the data in, e.g. "tusdstore.example.com"
|
// Bucket used to store the data in, e.g. "tusdstore.example.com"
|
||||||
|
@ -133,16 +135,16 @@ type S3Store struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type S3API interface {
|
type S3API interface {
|
||||||
PutObject(input *s3.PutObjectInput) (*s3.PutObjectOutput, error)
|
PutObjectWithContext(ctx context.Context, input *s3.PutObjectInput, opt ...request.Option) (*s3.PutObjectOutput, error)
|
||||||
ListParts(input *s3.ListPartsInput) (*s3.ListPartsOutput, error)
|
ListPartsWithContext(ctx context.Context, input *s3.ListPartsInput, opt ...request.Option) (*s3.ListPartsOutput, error)
|
||||||
UploadPart(input *s3.UploadPartInput) (*s3.UploadPartOutput, error)
|
UploadPartWithContext(ctx context.Context, input *s3.UploadPartInput, opt ...request.Option) (*s3.UploadPartOutput, error)
|
||||||
GetObject(input *s3.GetObjectInput) (*s3.GetObjectOutput, error)
|
GetObjectWithContext(ctx context.Context, input *s3.GetObjectInput, opt ...request.Option) (*s3.GetObjectOutput, error)
|
||||||
CreateMultipartUpload(input *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
|
CreateMultipartUploadWithContext(ctx context.Context, input *s3.CreateMultipartUploadInput, opt ...request.Option) (*s3.CreateMultipartUploadOutput, error)
|
||||||
AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error)
|
AbortMultipartUploadWithContext(ctx context.Context, input *s3.AbortMultipartUploadInput, opt ...request.Option) (*s3.AbortMultipartUploadOutput, error)
|
||||||
DeleteObject(input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
|
DeleteObjectWithContext(ctx context.Context, input *s3.DeleteObjectInput, opt ...request.Option) (*s3.DeleteObjectOutput, error)
|
||||||
DeleteObjects(input *s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error)
|
DeleteObjectsWithContext(ctx context.Context, input *s3.DeleteObjectsInput, opt ...request.Option) (*s3.DeleteObjectsOutput, error)
|
||||||
CompleteMultipartUpload(input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
|
CompleteMultipartUploadWithContext(ctx context.Context, input *s3.CompleteMultipartUploadInput, opt ...request.Option) (*s3.CompleteMultipartUploadOutput, error)
|
||||||
UploadPartCopy(input *s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
|
UploadPartCopyWithContext(ctx context.Context, input *s3.UploadPartCopyInput, opt ...request.Option) (*s3.UploadPartCopyOutput, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// New constructs a new storage using the supplied bucket and service object.
|
// New constructs a new storage using the supplied bucket and service object.
|
||||||
|
@ -159,19 +161,27 @@ func New(bucket string, service S3API) S3Store {
|
||||||
|
|
||||||
// UseIn sets this store as the core data store in the passed composer and adds
|
// UseIn sets this store as the core data store in the passed composer and adds
|
||||||
// all possible extension to it.
|
// all possible extension to it.
|
||||||
func (store S3Store) UseIn(composer *tusd.StoreComposer) {
|
func (store S3Store) UseIn(composer *handler.StoreComposer) {
|
||||||
composer.UseCore(store)
|
composer.UseCore(store)
|
||||||
composer.UseTerminater(store)
|
composer.UseTerminater(store)
|
||||||
composer.UseFinisher(store)
|
|
||||||
composer.UseGetReader(store)
|
|
||||||
composer.UseConcater(store)
|
composer.UseConcater(store)
|
||||||
composer.UseLengthDeferrer(store)
|
composer.UseLengthDeferrer(store)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) NewUpload(info tusd.FileInfo) (id string, err error) {
|
type s3Upload struct {
|
||||||
|
id string
|
||||||
|
store *S3Store
|
||||||
|
|
||||||
|
// info stores the upload's current FileInfo struct. It may be nil if it hasn't
|
||||||
|
// been fetched yet from S3. Never read or write to it directly but instead use
|
||||||
|
// the GetInfo and writeInfo functions.
|
||||||
|
info *handler.FileInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
||||||
// an upload larger than MaxObjectSize must throw an error
|
// an upload larger than MaxObjectSize must throw an error
|
||||||
if info.Size > store.MaxObjectSize {
|
if info.Size > store.MaxObjectSize {
|
||||||
return "", fmt.Errorf("s3store: upload size of %v bytes exceeds MaxObjectSize of %v bytes", info.Size, store.MaxObjectSize)
|
return nil, fmt.Errorf("s3store: upload size of %v bytes exceeds MaxObjectSize of %v bytes", info.Size, store.MaxObjectSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
var uploadId string
|
var uploadId string
|
||||||
|
@ -192,34 +202,64 @@ func (store S3Store) NewUpload(info tusd.FileInfo) (id string, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the actual multipart upload
|
// Create the actual multipart upload
|
||||||
res, err := store.Service.CreateMultipartUpload(&s3.CreateMultipartUploadInput{
|
res, err := store.Service.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId),
|
Key: store.keyWithPrefix(uploadId),
|
||||||
Metadata: metadata,
|
Metadata: metadata,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("s3store: unable to create multipart upload:\n%s", err)
|
return nil, fmt.Errorf("s3store: unable to create multipart upload:\n%s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
id = uploadId + "+" + *res.UploadId
|
id := uploadId + "+" + *res.UploadId
|
||||||
info.ID = id
|
info.ID = id
|
||||||
|
|
||||||
err = store.writeInfo(uploadId, info)
|
info.Storage = map[string]string{
|
||||||
if err != nil {
|
"Type": "s3store",
|
||||||
return "", fmt.Errorf("s3store: unable to create info file:\n%s", err)
|
"Bucket": store.Bucket,
|
||||||
|
"Key": *store.keyWithPrefix(uploadId),
|
||||||
}
|
}
|
||||||
|
|
||||||
return id, nil
|
upload := &s3Upload{id, &store, nil}
|
||||||
|
err = upload.writeInfo(ctx, info)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("s3store: unable to create info file:\n%s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return upload, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) writeInfo(uploadId string, info tusd.FileInfo) error {
|
func (store S3Store) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
|
||||||
|
return &s3Upload{id, &store, nil}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store S3Store) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload {
|
||||||
|
return upload.(*s3Upload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store S3Store) AsLengthDeclarableUpload(upload handler.Upload) handler.LengthDeclarableUpload {
|
||||||
|
return upload.(*s3Upload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store S3Store) AsConcatableUpload(upload handler.Upload) handler.ConcatableUpload {
|
||||||
|
return upload.(*s3Upload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *s3Upload) writeInfo(ctx context.Context, info handler.FileInfo) error {
|
||||||
|
id := upload.id
|
||||||
|
store := upload.store
|
||||||
|
|
||||||
|
uploadId, _ := splitIds(id)
|
||||||
|
|
||||||
|
upload.info = &info
|
||||||
|
|
||||||
infoJson, err := json.Marshal(info)
|
infoJson, err := json.Marshal(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create object on S3 containing information about the file
|
// Create object on S3 containing information about the file
|
||||||
_, err = store.Service.PutObject(&s3.PutObjectInput{
|
_, err = store.Service.PutObjectWithContext(ctx, &s3.PutObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId + ".info"),
|
Key: store.keyWithPrefix(uploadId + ".info"),
|
||||||
Body: bytes.NewReader(infoJson),
|
Body: bytes.NewReader(infoJson),
|
||||||
|
@ -229,11 +269,14 @@ func (store S3Store) writeInfo(uploadId string, info tusd.FileInfo) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) WriteChunk(id string, offset int64, src io.Reader) (int64, error) {
|
func (upload s3Upload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
|
||||||
|
id := upload.id
|
||||||
|
store := upload.store
|
||||||
|
|
||||||
uploadId, multipartId := splitIds(id)
|
uploadId, multipartId := splitIds(id)
|
||||||
|
|
||||||
// Get the total size of the current upload
|
// Get the total size of the current upload
|
||||||
info, err := store.GetInfo(id)
|
info, err := upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -246,7 +289,7 @@ func (store S3Store) WriteChunk(id string, offset int64, src io.Reader) (int64,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get number of parts to generate next number
|
// Get number of parts to generate next number
|
||||||
parts, err := store.listAllParts(id)
|
parts, err := store.listAllParts(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -254,7 +297,7 @@ func (store S3Store) WriteChunk(id string, offset int64, src io.Reader) (int64,
|
||||||
numParts := len(parts)
|
numParts := len(parts)
|
||||||
nextPartNum := int64(numParts + 1)
|
nextPartNum := int64(numParts + 1)
|
||||||
|
|
||||||
incompletePartFile, incompletePartSize, err := store.downloadIncompletePartForUpload(uploadId)
|
incompletePartFile, incompletePartSize, err := store.downloadIncompletePartForUpload(ctx, uploadId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -262,7 +305,7 @@ func (store S3Store) WriteChunk(id string, offset int64, src io.Reader) (int64,
|
||||||
defer os.Remove(incompletePartFile.Name())
|
defer os.Remove(incompletePartFile.Name())
|
||||||
defer incompletePartFile.Close()
|
defer incompletePartFile.Close()
|
||||||
|
|
||||||
if err := store.deleteIncompletePartForUpload(uploadId); err != nil {
|
if err := store.deleteIncompletePartForUpload(ctx, uploadId); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -304,7 +347,7 @@ func (store S3Store) WriteChunk(id string, offset int64, src io.Reader) (int64,
|
||||||
|
|
||||||
isFinalChunk := !info.SizeIsDeferred && (size == (offset-incompletePartSize)+n)
|
isFinalChunk := !info.SizeIsDeferred && (size == (offset-incompletePartSize)+n)
|
||||||
if n >= store.MinPartSize || isFinalChunk {
|
if n >= store.MinPartSize || isFinalChunk {
|
||||||
_, err = store.Service.UploadPart(&s3.UploadPartInput{
|
_, err = store.Service.UploadPartWithContext(ctx, &s3.UploadPartInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId),
|
Key: store.keyWithPrefix(uploadId),
|
||||||
UploadId: aws.String(multipartId),
|
UploadId: aws.String(multipartId),
|
||||||
|
@ -315,7 +358,7 @@ func (store S3Store) WriteChunk(id string, offset int64, src io.Reader) (int64,
|
||||||
return bytesUploaded, err
|
return bytesUploaded, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := store.putIncompletePartForUpload(uploadId, file); err != nil {
|
if err := store.putIncompletePartForUpload(ctx, uploadId, file); err != nil {
|
||||||
return bytesUploaded, err
|
return bytesUploaded, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -330,17 +373,33 @@ func (store S3Store) WriteChunk(id string, offset int64, src io.Reader) (int64,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) GetInfo(id string) (info tusd.FileInfo, err error) {
|
func (upload *s3Upload) GetInfo(ctx context.Context) (info handler.FileInfo, err error) {
|
||||||
|
if upload.info != nil {
|
||||||
|
return *upload.info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err = upload.fetchInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
upload.info = &info
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload s3Upload) fetchInfo(ctx context.Context) (info handler.FileInfo, err error) {
|
||||||
|
id := upload.id
|
||||||
|
store := upload.store
|
||||||
uploadId, _ := splitIds(id)
|
uploadId, _ := splitIds(id)
|
||||||
|
|
||||||
// Get file info stored in separate object
|
// Get file info stored in separate object
|
||||||
res, err := store.Service.GetObject(&s3.GetObjectInput{
|
res, err := store.Service.GetObjectWithContext(ctx, &s3.GetObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId + ".info"),
|
Key: store.keyWithPrefix(uploadId + ".info"),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isAwsError(err, "NoSuchKey") {
|
if isAwsError(err, "NoSuchKey") {
|
||||||
return info, tusd.ErrNotFound
|
return info, handler.ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return info, err
|
return info, err
|
||||||
|
@ -351,7 +410,7 @@ func (store S3Store) GetInfo(id string) (info tusd.FileInfo, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get uploaded parts and their offset
|
// Get uploaded parts and their offset
|
||||||
parts, err := store.listAllParts(id)
|
parts, err := store.listAllParts(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Check if the error is caused by the upload not being found. This happens
|
// Check if the error is caused by the upload not being found. This happens
|
||||||
// when the multipart upload has already been completed or aborted. Since
|
// when the multipart upload has already been completed or aborted. Since
|
||||||
|
@ -371,7 +430,7 @@ func (store S3Store) GetInfo(id string) (info tusd.FileInfo, err error) {
|
||||||
offset += *part.Size
|
offset += *part.Size
|
||||||
}
|
}
|
||||||
|
|
||||||
incompletePartObject, err := store.getIncompletePartForUpload(uploadId)
|
incompletePartObject, err := store.getIncompletePartForUpload(ctx, uploadId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return info, err
|
return info, err
|
||||||
}
|
}
|
||||||
|
@ -385,11 +444,13 @@ func (store S3Store) GetInfo(id string) (info tusd.FileInfo, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) GetReader(id string) (io.Reader, error) {
|
func (upload s3Upload) GetReader(ctx context.Context) (io.Reader, error) {
|
||||||
|
id := upload.id
|
||||||
|
store := upload.store
|
||||||
uploadId, multipartId := splitIds(id)
|
uploadId, multipartId := splitIds(id)
|
||||||
|
|
||||||
// Attempt to get upload content
|
// Attempt to get upload content
|
||||||
res, err := store.Service.GetObject(&s3.GetObjectInput{
|
res, err := store.Service.GetObjectWithContext(ctx, &s3.GetObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId),
|
Key: store.keyWithPrefix(uploadId),
|
||||||
})
|
})
|
||||||
|
@ -407,7 +468,7 @@ func (store S3Store) GetReader(id string) (io.Reader, error) {
|
||||||
|
|
||||||
// Test whether the multipart upload exists to find out if the upload
|
// Test whether the multipart upload exists to find out if the upload
|
||||||
// never existsted or just has not been finished yet
|
// never existsted or just has not been finished yet
|
||||||
_, err = store.Service.ListParts(&s3.ListPartsInput{
|
_, err = store.Service.ListPartsWithContext(ctx, &s3.ListPartsInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId),
|
Key: store.keyWithPrefix(uploadId),
|
||||||
UploadId: aws.String(multipartId),
|
UploadId: aws.String(multipartId),
|
||||||
|
@ -420,13 +481,15 @@ func (store S3Store) GetReader(id string) (io.Reader, error) {
|
||||||
|
|
||||||
if isAwsError(err, "NoSuchUpload") {
|
if isAwsError(err, "NoSuchUpload") {
|
||||||
// Neither the object nor the multipart upload exists, so we return a 404
|
// Neither the object nor the multipart upload exists, so we return a 404
|
||||||
return nil, tusd.ErrNotFound
|
return nil, handler.ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) Terminate(id string) error {
|
func (upload s3Upload) Terminate(ctx context.Context) error {
|
||||||
|
id := upload.id
|
||||||
|
store := upload.store
|
||||||
uploadId, multipartId := splitIds(id)
|
uploadId, multipartId := splitIds(id)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(2)
|
wg.Add(2)
|
||||||
|
@ -436,7 +499,7 @@ func (store S3Store) Terminate(id string) error {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
// Abort the multipart upload
|
// Abort the multipart upload
|
||||||
_, err := store.Service.AbortMultipartUpload(&s3.AbortMultipartUploadInput{
|
_, err := store.Service.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId),
|
Key: store.keyWithPrefix(uploadId),
|
||||||
UploadId: aws.String(multipartId),
|
UploadId: aws.String(multipartId),
|
||||||
|
@ -450,7 +513,7 @@ func (store S3Store) Terminate(id string) error {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
// Delete the info and content files
|
// Delete the info and content files
|
||||||
res, err := store.Service.DeleteObjects(&s3.DeleteObjectsInput{
|
res, err := store.Service.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Delete: &s3.Delete{
|
Delete: &s3.Delete{
|
||||||
Objects: []*s3.ObjectIdentifier{
|
Objects: []*s3.ObjectIdentifier{
|
||||||
|
@ -489,11 +552,13 @@ func (store S3Store) Terminate(id string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) FinishUpload(id string) error {
|
func (upload s3Upload) FinishUpload(ctx context.Context) error {
|
||||||
|
id := upload.id
|
||||||
|
store := upload.store
|
||||||
uploadId, multipartId := splitIds(id)
|
uploadId, multipartId := splitIds(id)
|
||||||
|
|
||||||
// Get uploaded parts
|
// Get uploaded parts
|
||||||
parts, err := store.listAllParts(id)
|
parts, err := store.listAllParts(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -509,7 +574,7 @@ func (store S3Store) FinishUpload(id string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = store.Service.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
|
_, err = store.Service.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId),
|
Key: store.keyWithPrefix(uploadId),
|
||||||
UploadId: aws.String(multipartId),
|
UploadId: aws.String(multipartId),
|
||||||
|
@ -521,8 +586,10 @@ func (store S3Store) FinishUpload(id string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) ConcatUploads(dest string, partialUploads []string) error {
|
func (upload *s3Upload) ConcatUploads(ctx context.Context, partialUploads []handler.Upload) error {
|
||||||
uploadId, multipartId := splitIds(dest)
|
id := upload.id
|
||||||
|
store := upload.store
|
||||||
|
uploadId, multipartId := splitIds(id)
|
||||||
|
|
||||||
numPartialUploads := len(partialUploads)
|
numPartialUploads := len(partialUploads)
|
||||||
errs := make([]error, 0, numPartialUploads)
|
errs := make([]error, 0, numPartialUploads)
|
||||||
|
@ -530,20 +597,21 @@ func (store S3Store) ConcatUploads(dest string, partialUploads []string) error {
|
||||||
// Copy partial uploads concurrently
|
// Copy partial uploads concurrently
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(numPartialUploads)
|
wg.Add(numPartialUploads)
|
||||||
for i, partialId := range partialUploads {
|
for i, partialUpload := range partialUploads {
|
||||||
|
partialS3Upload := partialUpload.(*s3Upload)
|
||||||
|
partialId, _ := splitIds(partialS3Upload.id)
|
||||||
|
|
||||||
go func(i int, partialId string) {
|
go func(i int, partialId string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
partialUploadId, _ := splitIds(partialId)
|
_, err := store.Service.UploadPartCopyWithContext(ctx, &s3.UploadPartCopyInput{
|
||||||
|
|
||||||
_, err := store.Service.UploadPartCopy(&s3.UploadPartCopyInput{
|
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId),
|
Key: store.keyWithPrefix(uploadId),
|
||||||
UploadId: aws.String(multipartId),
|
UploadId: aws.String(multipartId),
|
||||||
// Part numbers must be in the range of 1 to 10000, inclusive. Since
|
// Part numbers must be in the range of 1 to 10000, inclusive. Since
|
||||||
// slice indexes start at 0, we add 1 to ensure that i >= 1.
|
// slice indexes start at 0, we add 1 to ensure that i >= 1.
|
||||||
PartNumber: aws.Int64(int64(i + 1)),
|
PartNumber: aws.Int64(int64(i + 1)),
|
||||||
CopySource: aws.String(store.Bucket + "/" + partialUploadId),
|
CopySource: aws.String(store.Bucket + "/" + partialId),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
|
@ -558,28 +626,27 @@ func (store S3Store) ConcatUploads(dest string, partialUploads []string) error {
|
||||||
return newMultiError(errs)
|
return newMultiError(errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
return store.FinishUpload(dest)
|
return upload.FinishUpload(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) DeclareLength(id string, length int64) error {
|
func (upload s3Upload) DeclareLength(ctx context.Context, length int64) error {
|
||||||
uploadId, _ := splitIds(id)
|
info, err := upload.GetInfo(ctx)
|
||||||
info, err := store.GetInfo(id)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
info.Size = length
|
info.Size = length
|
||||||
info.SizeIsDeferred = false
|
info.SizeIsDeferred = false
|
||||||
|
|
||||||
return store.writeInfo(uploadId, info)
|
return upload.writeInfo(ctx, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) listAllParts(id string) (parts []*s3.Part, err error) {
|
func (store S3Store) listAllParts(ctx context.Context, id string) (parts []*s3.Part, err error) {
|
||||||
uploadId, multipartId := splitIds(id)
|
uploadId, multipartId := splitIds(id)
|
||||||
|
|
||||||
partMarker := int64(0)
|
partMarker := int64(0)
|
||||||
for {
|
for {
|
||||||
// Get uploaded parts
|
// Get uploaded parts
|
||||||
listPtr, err := store.Service.ListParts(&s3.ListPartsInput{
|
listPtr, err := store.Service.ListPartsWithContext(ctx, &s3.ListPartsInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId),
|
Key: store.keyWithPrefix(uploadId),
|
||||||
UploadId: aws.String(multipartId),
|
UploadId: aws.String(multipartId),
|
||||||
|
@ -600,8 +667,8 @@ func (store S3Store) listAllParts(id string) (parts []*s3.Part, err error) {
|
||||||
return parts, nil
|
return parts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) downloadIncompletePartForUpload(uploadId string) (*os.File, int64, error) {
|
func (store S3Store) downloadIncompletePartForUpload(ctx context.Context, uploadId string) (*os.File, int64, error) {
|
||||||
incompleteUploadObject, err := store.getIncompletePartForUpload(uploadId)
|
incompleteUploadObject, err := store.getIncompletePartForUpload(ctx, uploadId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
@ -632,8 +699,8 @@ func (store S3Store) downloadIncompletePartForUpload(uploadId string) (*os.File,
|
||||||
return partFile, n, nil
|
return partFile, n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) getIncompletePartForUpload(uploadId string) (*s3.GetObjectOutput, error) {
|
func (store S3Store) getIncompletePartForUpload(ctx context.Context, uploadId string) (*s3.GetObjectOutput, error) {
|
||||||
obj, err := store.Service.GetObject(&s3.GetObjectInput{
|
obj, err := store.Service.GetObjectWithContext(ctx, &s3.GetObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId + ".part"),
|
Key: store.keyWithPrefix(uploadId + ".part"),
|
||||||
})
|
})
|
||||||
|
@ -645,8 +712,8 @@ func (store S3Store) getIncompletePartForUpload(uploadId string) (*s3.GetObjectO
|
||||||
return obj, err
|
return obj, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) putIncompletePartForUpload(uploadId string, r io.ReadSeeker) error {
|
func (store S3Store) putIncompletePartForUpload(ctx context.Context, uploadId string, r io.ReadSeeker) error {
|
||||||
_, err := store.Service.PutObject(&s3.PutObjectInput{
|
_, err := store.Service.PutObjectWithContext(ctx, &s3.PutObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId + ".part"),
|
Key: store.keyWithPrefix(uploadId + ".part"),
|
||||||
Body: r,
|
Body: r,
|
||||||
|
@ -654,8 +721,8 @@ func (store S3Store) putIncompletePartForUpload(uploadId string, r io.ReadSeeker
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) deleteIncompletePartForUpload(uploadId string) error {
|
func (store S3Store) deleteIncompletePartForUpload(ctx context.Context, uploadId string) error {
|
||||||
_, err := store.Service.DeleteObject(&s3.DeleteObjectInput{
|
_, err := store.Service.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId + ".part"),
|
Key: store.keyWithPrefix(uploadId + ".part"),
|
||||||
})
|
})
|
|
@ -0,0 +1,236 @@
|
||||||
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
|
// Source: github.com/tus/tusd/pkg/s3store (interfaces: S3API)
|
||||||
|
|
||||||
|
// Package s3store is a generated GoMock package.
|
||||||
|
package s3store
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
request "github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
s3 "github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
reflect "reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockS3API is a mock of S3API interface
|
||||||
|
type MockS3API struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockS3APIMockRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockS3APIMockRecorder is the mock recorder for MockS3API
|
||||||
|
type MockS3APIMockRecorder struct {
|
||||||
|
mock *MockS3API
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockS3API creates a new mock instance
|
||||||
|
func NewMockS3API(ctrl *gomock.Controller) *MockS3API {
|
||||||
|
mock := &MockS3API{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockS3APIMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use
|
||||||
|
func (m *MockS3API) EXPECT() *MockS3APIMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// AbortMultipartUploadWithContext mocks base method
|
||||||
|
func (m *MockS3API) AbortMultipartUploadWithContext(arg0 context.Context, arg1 *s3.AbortMultipartUploadInput, arg2 ...request.Option) (*s3.AbortMultipartUploadOutput, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
for _, a := range arg2 {
|
||||||
|
varargs = append(varargs, a)
|
||||||
|
}
|
||||||
|
ret := m.ctrl.Call(m, "AbortMultipartUploadWithContext", varargs...)
|
||||||
|
ret0, _ := ret[0].(*s3.AbortMultipartUploadOutput)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// AbortMultipartUploadWithContext indicates an expected call of AbortMultipartUploadWithContext
|
||||||
|
func (mr *MockS3APIMockRecorder) AbortMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUploadWithContext), varargs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompleteMultipartUploadWithContext mocks base method
|
||||||
|
func (m *MockS3API) CompleteMultipartUploadWithContext(arg0 context.Context, arg1 *s3.CompleteMultipartUploadInput, arg2 ...request.Option) (*s3.CompleteMultipartUploadOutput, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
for _, a := range arg2 {
|
||||||
|
varargs = append(varargs, a)
|
||||||
|
}
|
||||||
|
ret := m.ctrl.Call(m, "CompleteMultipartUploadWithContext", varargs...)
|
||||||
|
ret0, _ := ret[0].(*s3.CompleteMultipartUploadOutput)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompleteMultipartUploadWithContext indicates an expected call of CompleteMultipartUploadWithContext
|
||||||
|
func (mr *MockS3APIMockRecorder) CompleteMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUploadWithContext), varargs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateMultipartUploadWithContext mocks base method
|
||||||
|
func (m *MockS3API) CreateMultipartUploadWithContext(arg0 context.Context, arg1 *s3.CreateMultipartUploadInput, arg2 ...request.Option) (*s3.CreateMultipartUploadOutput, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
for _, a := range arg2 {
|
||||||
|
varargs = append(varargs, a)
|
||||||
|
}
|
||||||
|
ret := m.ctrl.Call(m, "CreateMultipartUploadWithContext", varargs...)
|
||||||
|
ret0, _ := ret[0].(*s3.CreateMultipartUploadOutput)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateMultipartUploadWithContext indicates an expected call of CreateMultipartUploadWithContext
|
||||||
|
func (mr *MockS3APIMockRecorder) CreateMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUploadWithContext), varargs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteObjectWithContext mocks base method
|
||||||
|
func (m *MockS3API) DeleteObjectWithContext(arg0 context.Context, arg1 *s3.DeleteObjectInput, arg2 ...request.Option) (*s3.DeleteObjectOutput, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
for _, a := range arg2 {
|
||||||
|
varargs = append(varargs, a)
|
||||||
|
}
|
||||||
|
ret := m.ctrl.Call(m, "DeleteObjectWithContext", varargs...)
|
||||||
|
ret0, _ := ret[0].(*s3.DeleteObjectOutput)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteObjectWithContext indicates an expected call of DeleteObjectWithContext
|
||||||
|
func (mr *MockS3APIMockRecorder) DeleteObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectWithContext), varargs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteObjectsWithContext mocks base method
|
||||||
|
func (m *MockS3API) DeleteObjectsWithContext(arg0 context.Context, arg1 *s3.DeleteObjectsInput, arg2 ...request.Option) (*s3.DeleteObjectsOutput, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
for _, a := range arg2 {
|
||||||
|
varargs = append(varargs, a)
|
||||||
|
}
|
||||||
|
ret := m.ctrl.Call(m, "DeleteObjectsWithContext", varargs...)
|
||||||
|
ret0, _ := ret[0].(*s3.DeleteObjectsOutput)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteObjectsWithContext indicates an expected call of DeleteObjectsWithContext
|
||||||
|
func (mr *MockS3APIMockRecorder) DeleteObjectsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectsWithContext), varargs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetObjectWithContext mocks base method
|
||||||
|
func (m *MockS3API) GetObjectWithContext(arg0 context.Context, arg1 *s3.GetObjectInput, arg2 ...request.Option) (*s3.GetObjectOutput, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
for _, a := range arg2 {
|
||||||
|
varargs = append(varargs, a)
|
||||||
|
}
|
||||||
|
ret := m.ctrl.Call(m, "GetObjectWithContext", varargs...)
|
||||||
|
ret0, _ := ret[0].(*s3.GetObjectOutput)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetObjectWithContext indicates an expected call of GetObjectWithContext
|
||||||
|
func (mr *MockS3APIMockRecorder) GetObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectWithContext), varargs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListPartsWithContext mocks base method
|
||||||
|
func (m *MockS3API) ListPartsWithContext(arg0 context.Context, arg1 *s3.ListPartsInput, arg2 ...request.Option) (*s3.ListPartsOutput, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
for _, a := range arg2 {
|
||||||
|
varargs = append(varargs, a)
|
||||||
|
}
|
||||||
|
ret := m.ctrl.Call(m, "ListPartsWithContext", varargs...)
|
||||||
|
ret0, _ := ret[0].(*s3.ListPartsOutput)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListPartsWithContext indicates an expected call of ListPartsWithContext
|
||||||
|
func (mr *MockS3APIMockRecorder) ListPartsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsWithContext", reflect.TypeOf((*MockS3API)(nil).ListPartsWithContext), varargs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutObjectWithContext mocks base method
|
||||||
|
func (m *MockS3API) PutObjectWithContext(arg0 context.Context, arg1 *s3.PutObjectInput, arg2 ...request.Option) (*s3.PutObjectOutput, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
for _, a := range arg2 {
|
||||||
|
varargs = append(varargs, a)
|
||||||
|
}
|
||||||
|
ret := m.ctrl.Call(m, "PutObjectWithContext", varargs...)
|
||||||
|
ret0, _ := ret[0].(*s3.PutObjectOutput)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutObjectWithContext indicates an expected call of PutObjectWithContext
|
||||||
|
func (mr *MockS3APIMockRecorder) PutObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectWithContext), varargs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadPartCopyWithContext mocks base method
|
||||||
|
func (m *MockS3API) UploadPartCopyWithContext(arg0 context.Context, arg1 *s3.UploadPartCopyInput, arg2 ...request.Option) (*s3.UploadPartCopyOutput, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
for _, a := range arg2 {
|
||||||
|
varargs = append(varargs, a)
|
||||||
|
}
|
||||||
|
ret := m.ctrl.Call(m, "UploadPartCopyWithContext", varargs...)
|
||||||
|
ret0, _ := ret[0].(*s3.UploadPartCopyOutput)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadPartCopyWithContext indicates an expected call of UploadPartCopyWithContext
|
||||||
|
func (mr *MockS3APIMockRecorder) UploadPartCopyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopyWithContext", reflect.TypeOf((*MockS3API)(nil).UploadPartCopyWithContext), varargs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadPartWithContext mocks base method
|
||||||
|
func (m *MockS3API) UploadPartWithContext(arg0 context.Context, arg1 *s3.UploadPartInput, arg2 ...request.Option) (*s3.UploadPartOutput, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
for _, a := range arg2 {
|
||||||
|
varargs = append(varargs, a)
|
||||||
|
}
|
||||||
|
ret := m.ctrl.Call(m, "UploadPartWithContext", varargs...)
|
||||||
|
ret0, _ := ret[0].(*s3.UploadPartOutput)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadPartWithContext indicates an expected call of UploadPartWithContext
|
||||||
|
func (mr *MockS3APIMockRecorder) UploadPartWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartWithContext", reflect.TypeOf((*MockS3API)(nil).UploadPartWithContext), varargs...)
|
||||||
|
}
|
|
@ -2,6 +2,7 @@ package s3store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -13,17 +14,16 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate mockgen -destination=./s3store_mock_test.go -package=s3store github.com/tus/tusd/s3store S3API
|
//go:generate mockgen -destination=./s3store_mock_test.go -package=s3store github.com/tus/tusd/pkg/s3store S3API
|
||||||
|
|
||||||
// Test interface implementations
|
// Test interface implementations
|
||||||
var _ tusd.DataStore = S3Store{}
|
var _ handler.DataStore = S3Store{}
|
||||||
var _ tusd.GetReaderDataStore = S3Store{}
|
var _ handler.TerminaterDataStore = S3Store{}
|
||||||
var _ tusd.TerminaterDataStore = S3Store{}
|
var _ handler.ConcaterDataStore = S3Store{}
|
||||||
var _ tusd.FinisherDataStore = S3Store{}
|
var _ handler.LengthDeferrerDataStore = S3Store{}
|
||||||
var _ tusd.ConcaterDataStore = S3Store{}
|
|
||||||
|
|
||||||
func TestNewUpload(t *testing.T) {
|
func TestNewUpload(t *testing.T) {
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
|
@ -40,7 +40,7 @@ func TestNewUpload(t *testing.T) {
|
||||||
s2 := "men???hi"
|
s2 := "men???hi"
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().CreateMultipartUpload(&s3.CreateMultipartUploadInput{
|
s3obj.EXPECT().CreateMultipartUploadWithContext(context.Background(), &s3.CreateMultipartUploadInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
Metadata: map[string]*string{
|
Metadata: map[string]*string{
|
||||||
|
@ -50,15 +50,15 @@ func TestNewUpload(t *testing.T) {
|
||||||
}).Return(&s3.CreateMultipartUploadOutput{
|
}).Return(&s3.CreateMultipartUploadOutput{
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().PutObject(&s3.PutObjectInput{
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), &s3.PutObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"SizeIsDeferred":false,"Offset":0,"MetaData":{"bar":"menü\r\nhi","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null}`)),
|
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"SizeIsDeferred":false,"Offset":0,"MetaData":{"bar":"menü\r\nhi","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`)),
|
||||||
ContentLength: aws.Int64(int64(177)),
|
ContentLength: aws.Int64(int64(241)),
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
|
||||||
info := tusd.FileInfo{
|
info := handler.FileInfo{
|
||||||
ID: "uploadId",
|
ID: "uploadId",
|
||||||
Size: 500,
|
Size: 500,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
|
@ -67,9 +67,9 @@ func TestNewUpload(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := store.NewUpload(info)
|
upload, err := store.NewUpload(context.Background(), info)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal("uploadId+multipartId", id)
|
assert.NotNil(upload)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewUploadWithObjectPrefix(t *testing.T) {
|
func TestNewUploadWithObjectPrefix(t *testing.T) {
|
||||||
|
@ -88,7 +88,7 @@ func TestNewUploadWithObjectPrefix(t *testing.T) {
|
||||||
s2 := "men?"
|
s2 := "men?"
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().CreateMultipartUpload(&s3.CreateMultipartUploadInput{
|
s3obj.EXPECT().CreateMultipartUploadWithContext(context.Background(), &s3.CreateMultipartUploadInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("my/uploaded/files/uploadId"),
|
Key: aws.String("my/uploaded/files/uploadId"),
|
||||||
Metadata: map[string]*string{
|
Metadata: map[string]*string{
|
||||||
|
@ -98,15 +98,15 @@ func TestNewUploadWithObjectPrefix(t *testing.T) {
|
||||||
}).Return(&s3.CreateMultipartUploadOutput{
|
}).Return(&s3.CreateMultipartUploadOutput{
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().PutObject(&s3.PutObjectInput{
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), &s3.PutObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("my/uploaded/files/uploadId.info"),
|
Key: aws.String("my/uploaded/files/uploadId.info"),
|
||||||
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"SizeIsDeferred":false,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null}`)),
|
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"SizeIsDeferred":false,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"my/uploaded/files/uploadId","Type":"s3store"}}`)),
|
||||||
ContentLength: aws.Int64(int64(171)),
|
ContentLength: aws.Int64(int64(253)),
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
|
||||||
info := tusd.FileInfo{
|
info := handler.FileInfo{
|
||||||
ID: "uploadId",
|
ID: "uploadId",
|
||||||
Size: 500,
|
Size: 500,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
|
@ -115,9 +115,9 @@ func TestNewUploadWithObjectPrefix(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := store.NewUpload(info)
|
upload, err := store.NewUpload(context.Background(), info)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal("uploadId+multipartId", id)
|
assert.NotNil(upload)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewUploadLargerMaxObjectSize(t *testing.T) {
|
func TestNewUploadLargerMaxObjectSize(t *testing.T) {
|
||||||
|
@ -131,15 +131,15 @@ func TestNewUploadLargerMaxObjectSize(t *testing.T) {
|
||||||
assert.Equal("bucket", store.Bucket)
|
assert.Equal("bucket", store.Bucket)
|
||||||
assert.Equal(s3obj, store.Service)
|
assert.Equal(s3obj, store.Service)
|
||||||
|
|
||||||
info := tusd.FileInfo{
|
info := handler.FileInfo{
|
||||||
ID: "uploadId",
|
ID: "uploadId",
|
||||||
Size: store.MaxObjectSize + 1,
|
Size: store.MaxObjectSize + 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := store.NewUpload(info)
|
upload, err := store.NewUpload(context.Background(), info)
|
||||||
assert.NotNil(err)
|
assert.NotNil(err)
|
||||||
assert.EqualError(err, fmt.Sprintf("s3store: upload size of %v bytes exceeds MaxObjectSize of %v bytes", info.Size, store.MaxObjectSize))
|
assert.EqualError(err, fmt.Sprintf("s3store: upload size of %v bytes exceeds MaxObjectSize of %v bytes", info.Size, store.MaxObjectSize))
|
||||||
assert.Equal("", id)
|
assert.Nil(upload)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetInfoNotFound(t *testing.T) {
|
func TestGetInfoNotFound(t *testing.T) {
|
||||||
|
@ -150,13 +150,16 @@ func TestGetInfoNotFound(t *testing.T) {
|
||||||
s3obj := NewMockS3API(mockCtrl)
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
}).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
|
}).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
|
||||||
|
|
||||||
_, err := store.GetInfo("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Equal(tusd.ErrNotFound, err)
|
assert.Nil(err)
|
||||||
|
|
||||||
|
_, err = upload.GetInfo(context.Background())
|
||||||
|
assert.Equal(handler.ErrNotFound, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetInfo(t *testing.T) {
|
func TestGetInfo(t *testing.T) {
|
||||||
|
@ -168,13 +171,13 @@ func TestGetInfo(t *testing.T) {
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null}`))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"my/uploaded/files/uploadId","Type":"s3store"}}`))),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -191,7 +194,7 @@ func TestGetInfo(t *testing.T) {
|
||||||
NextPartNumberMarker: aws.Int64(2),
|
NextPartNumberMarker: aws.Int64(2),
|
||||||
IsTruncated: aws.Bool(true),
|
IsTruncated: aws.Bool(true),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -203,19 +206,25 @@ func TestGetInfo(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
|
||||||
)
|
)
|
||||||
|
|
||||||
info, err := store.GetInfo("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
info, err := upload.GetInfo(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(500), info.Size)
|
assert.Equal(int64(500), info.Size)
|
||||||
assert.Equal(int64(400), info.Offset)
|
assert.Equal(int64(400), info.Offset)
|
||||||
assert.Equal("uploadId+multipartId", info.ID)
|
assert.Equal("uploadId+multipartId", info.ID)
|
||||||
assert.Equal("hello", info.MetaData["foo"])
|
assert.Equal("hello", info.MetaData["foo"])
|
||||||
assert.Equal("menü", info.MetaData["bar"])
|
assert.Equal("menü", info.MetaData["bar"])
|
||||||
|
assert.Equal("s3store", info.Storage["Type"])
|
||||||
|
assert.Equal("bucket", info.Storage["Bucket"])
|
||||||
|
assert.Equal("my/uploaded/files/uploadId", info.Storage["Key"])
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetInfoWithIncompletePart(t *testing.T) {
|
func TestGetInfoWithIncompletePart(t *testing.T) {
|
||||||
|
@ -227,19 +236,19 @@ func TestGetInfoWithIncompletePart(t *testing.T) {
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null}`))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
PartNumberMarker: aws.Int64(0),
|
PartNumberMarker: aws.Int64(0),
|
||||||
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
|
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
|
@ -248,7 +257,10 @@ func TestGetInfoWithIncompletePart(t *testing.T) {
|
||||||
}, nil),
|
}, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
info, err := store.GetInfo("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
info, err := upload.GetInfo(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(10), info.Offset)
|
assert.Equal(int64(10), info.Offset)
|
||||||
assert.Equal("uploadId+multipartId", info.ID)
|
assert.Equal("uploadId+multipartId", info.ID)
|
||||||
|
@ -263,13 +275,13 @@ func TestGetInfoFinished(t *testing.T) {
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null}`))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -277,7 +289,10 @@ func TestGetInfoFinished(t *testing.T) {
|
||||||
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil)),
|
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil)),
|
||||||
)
|
)
|
||||||
|
|
||||||
info, err := store.GetInfo("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
info, err := upload.GetInfo(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(500), info.Size)
|
assert.Equal(int64(500), info.Size)
|
||||||
assert.Equal(int64(500), info.Offset)
|
assert.Equal(int64(500), info.Offset)
|
||||||
|
@ -291,14 +306,17 @@ func TestGetReader(t *testing.T) {
|
||||||
s3obj := NewMockS3API(mockCtrl)
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`hello world`))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`hello world`))),
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
content, err := store.GetReader("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
content, err := upload.GetReader(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(ioutil.NopCloser(bytes.NewReader([]byte(`hello world`))), content)
|
assert.Equal(ioutil.NopCloser(bytes.NewReader([]byte(`hello world`))), content)
|
||||||
}
|
}
|
||||||
|
@ -312,11 +330,11 @@ func TestGetReaderNotFound(t *testing.T) {
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
}).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
}).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -324,9 +342,12 @@ func TestGetReaderNotFound(t *testing.T) {
|
||||||
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil)),
|
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil)),
|
||||||
)
|
)
|
||||||
|
|
||||||
content, err := store.GetReader("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
content, err := upload.GetReader(context.Background())
|
||||||
assert.Nil(content)
|
assert.Nil(content)
|
||||||
assert.Equal(tusd.ErrNotFound, err)
|
assert.Equal(handler.ErrNotFound, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetReaderNotFinished(t *testing.T) {
|
func TestGetReaderNotFinished(t *testing.T) {
|
||||||
|
@ -338,11 +359,11 @@ func TestGetReaderNotFinished(t *testing.T) {
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
}).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
}).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -352,7 +373,10 @@ func TestGetReaderNotFinished(t *testing.T) {
|
||||||
}, nil),
|
}, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
content, err := store.GetReader("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
content, err := upload.GetReader(context.Background())
|
||||||
assert.Nil(content)
|
assert.Nil(content)
|
||||||
assert.Equal("cannot stream non-finished upload", err.Error())
|
assert.Equal("cannot stream non-finished upload", err.Error())
|
||||||
}
|
}
|
||||||
|
@ -366,13 +390,13 @@ func TestDeclareLength(t *testing.T) {
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":0,"SizeIsDeferred":true,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null}`))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":0,"SizeIsDeferred":true,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`))),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -380,19 +404,22 @@ func TestDeclareLength(t *testing.T) {
|
||||||
}).Return(&s3.ListPartsOutput{
|
}).Return(&s3.ListPartsOutput{
|
||||||
Parts: []*s3.Part{},
|
Parts: []*s3.Part{},
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(nil, awserr.New("NotFound", "Not Found", nil)),
|
}).Return(nil, awserr.New("NotFound", "Not Found", nil)),
|
||||||
s3obj.EXPECT().PutObject(&s3.PutObjectInput{
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), &s3.PutObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"SizeIsDeferred":false,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null}`)),
|
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"SizeIsDeferred":false,"Offset":0,"MetaData":{},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`)),
|
||||||
ContentLength: aws.Int64(int64(144)),
|
ContentLength: aws.Int64(int64(208)),
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
|
||||||
err := store.DeclareLength("uploadId+multipartId", 500)
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
err = store.AsLengthDeclarableUpload(upload).DeclareLength(context.Background(), 500)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -405,7 +432,7 @@ func TestFinishUpload(t *testing.T) {
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -426,7 +453,7 @@ func TestFinishUpload(t *testing.T) {
|
||||||
NextPartNumberMarker: aws.Int64(2),
|
NextPartNumberMarker: aws.Int64(2),
|
||||||
IsTruncated: aws.Bool(true),
|
IsTruncated: aws.Bool(true),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -440,7 +467,7 @@ func TestFinishUpload(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
|
s3obj.EXPECT().CompleteMultipartUploadWithContext(context.Background(), &s3.CompleteMultipartUploadInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -463,7 +490,10 @@ func TestFinishUpload(t *testing.T) {
|
||||||
}).Return(nil, nil),
|
}).Return(nil, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
err := store.FinishUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
err = upload.FinishUpload(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -480,13 +510,13 @@ func TestWriteChunk(t *testing.T) {
|
||||||
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null}`))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -501,11 +531,11 @@ func TestWriteChunk(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -520,39 +550,42 @@ func TestWriteChunk(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
||||||
s3obj.EXPECT().UploadPart(NewUploadPartInputMatcher(&s3.UploadPartInput{
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
PartNumber: aws.Int64(3),
|
PartNumber: aws.Int64(3),
|
||||||
Body: bytes.NewReader([]byte("1234")),
|
Body: bytes.NewReader([]byte("1234")),
|
||||||
})).Return(nil, nil),
|
})).Return(nil, nil),
|
||||||
s3obj.EXPECT().UploadPart(NewUploadPartInputMatcher(&s3.UploadPartInput{
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
PartNumber: aws.Int64(4),
|
PartNumber: aws.Int64(4),
|
||||||
Body: bytes.NewReader([]byte("5678")),
|
Body: bytes.NewReader([]byte("5678")),
|
||||||
})).Return(nil, nil),
|
})).Return(nil, nil),
|
||||||
s3obj.EXPECT().UploadPart(NewUploadPartInputMatcher(&s3.UploadPartInput{
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
PartNumber: aws.Int64(5),
|
PartNumber: aws.Int64(5),
|
||||||
Body: bytes.NewReader([]byte("90AB")),
|
Body: bytes.NewReader([]byte("90AB")),
|
||||||
})).Return(nil, nil),
|
})).Return(nil, nil),
|
||||||
s3obj.EXPECT().PutObject(NewPutObjectInputMatcher(&s3.PutObjectInput{
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
Body: bytes.NewReader([]byte("CD")),
|
Body: bytes.NewReader([]byte("CD")),
|
||||||
})).Return(nil, nil),
|
})).Return(nil, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
bytesRead, err := store.WriteChunk("uploadId+multipartId", 300, bytes.NewReader([]byte("1234567890ABCD")))
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
bytesRead, err := upload.WriteChunk(context.Background(), 300, bytes.NewReader([]byte("1234567890ABCD")))
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(14), bytesRead)
|
assert.Equal(int64(14), bytesRead)
|
||||||
}
|
}
|
||||||
|
@ -573,13 +606,13 @@ func TestWriteChunkWithUnexpectedEOF(t *testing.T) {
|
||||||
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null}`))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -594,11 +627,11 @@ func TestWriteChunkWithUnexpectedEOF(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -613,11 +646,11 @@ func TestWriteChunkWithUnexpectedEOF(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
||||||
s3obj.EXPECT().PutObject(NewPutObjectInputMatcher(&s3.PutObjectInput{
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
Body: bytes.NewReader([]byte("1234567890ABCD")),
|
Body: bytes.NewReader([]byte("1234567890ABCD")),
|
||||||
|
@ -631,7 +664,10 @@ func TestWriteChunkWithUnexpectedEOF(t *testing.T) {
|
||||||
writer.CloseWithError(io.ErrUnexpectedEOF)
|
writer.CloseWithError(io.ErrUnexpectedEOF)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
bytesRead, err := store.WriteChunk("uploadId+multipartId", 300, reader)
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
bytesRead, err := upload.WriteChunk(context.Background(), 300, reader)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(14), bytesRead)
|
assert.Equal(int64(14), bytesRead)
|
||||||
}
|
}
|
||||||
|
@ -645,13 +681,13 @@ func TestWriteChunkWriteIncompletePartBecauseTooSmall(t *testing.T) {
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null}`))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -666,11 +702,11 @@ func TestWriteChunkWriteIncompletePartBecauseTooSmall(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist", nil)),
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist", nil)),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -685,18 +721,21 @@ func TestWriteChunkWriteIncompletePartBecauseTooSmall(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
||||||
s3obj.EXPECT().PutObject(NewPutObjectInputMatcher(&s3.PutObjectInput{
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
Body: bytes.NewReader([]byte("1234567890")),
|
Body: bytes.NewReader([]byte("1234567890")),
|
||||||
})).Return(nil, nil),
|
})).Return(nil, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
bytesRead, err := store.WriteChunk("uploadId+multipartId", 300, bytes.NewReader([]byte("1234567890")))
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
bytesRead, err := upload.WriteChunk(context.Background(), 300, bytes.NewReader([]byte("1234567890")))
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(10), bytesRead)
|
assert.Equal(int64(10), bytesRead)
|
||||||
}
|
}
|
||||||
|
@ -714,50 +753,50 @@ func TestWriteChunkPrependsIncompletePart(t *testing.T) {
|
||||||
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":5,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null}`))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":5,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
PartNumberMarker: aws.Int64(0),
|
PartNumberMarker: aws.Int64(0),
|
||||||
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
|
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
ContentLength: aws.Int64(3),
|
ContentLength: aws.Int64(3),
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
PartNumberMarker: aws.Int64(0),
|
PartNumberMarker: aws.Int64(0),
|
||||||
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
|
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
ContentLength: aws.Int64(3),
|
ContentLength: aws.Int64(3),
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().DeleteObject(&s3.DeleteObjectInput{
|
s3obj.EXPECT().DeleteObjectWithContext(context.Background(), &s3.DeleteObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.DeleteObjectOutput{}, nil),
|
}).Return(&s3.DeleteObjectOutput{}, nil),
|
||||||
s3obj.EXPECT().UploadPart(NewUploadPartInputMatcher(&s3.UploadPartInput{
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
PartNumber: aws.Int64(1),
|
PartNumber: aws.Int64(1),
|
||||||
Body: bytes.NewReader([]byte("1234")),
|
Body: bytes.NewReader([]byte("1234")),
|
||||||
})).Return(nil, nil),
|
})).Return(nil, nil),
|
||||||
s3obj.EXPECT().UploadPart(NewUploadPartInputMatcher(&s3.UploadPartInput{
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -766,7 +805,10 @@ func TestWriteChunkPrependsIncompletePart(t *testing.T) {
|
||||||
})).Return(nil, nil),
|
})).Return(nil, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
bytesRead, err := store.WriteChunk("uploadId+multipartId", 3, bytes.NewReader([]byte("45")))
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
bytesRead, err := upload.WriteChunk(context.Background(), 3, bytes.NewReader([]byte("45")))
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(2), bytesRead)
|
assert.Equal(int64(2), bytesRead)
|
||||||
}
|
}
|
||||||
|
@ -784,57 +826,60 @@ func TestWriteChunkPrependsIncompletePartAndWritesANewIncompletePart(t *testing.
|
||||||
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":10,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null}`))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":10,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
PartNumberMarker: aws.Int64(0),
|
PartNumberMarker: aws.Int64(0),
|
||||||
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
|
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
ContentLength: aws.Int64(3),
|
ContentLength: aws.Int64(3),
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
PartNumberMarker: aws.Int64(0),
|
PartNumberMarker: aws.Int64(0),
|
||||||
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
|
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
||||||
ContentLength: aws.Int64(3),
|
ContentLength: aws.Int64(3),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().DeleteObject(&s3.DeleteObjectInput{
|
s3obj.EXPECT().DeleteObjectWithContext(context.Background(), &s3.DeleteObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.DeleteObjectOutput{}, nil),
|
}).Return(&s3.DeleteObjectOutput{}, nil),
|
||||||
s3obj.EXPECT().UploadPart(NewUploadPartInputMatcher(&s3.UploadPartInput{
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
PartNumber: aws.Int64(1),
|
PartNumber: aws.Int64(1),
|
||||||
Body: bytes.NewReader([]byte("1234")),
|
Body: bytes.NewReader([]byte("1234")),
|
||||||
})).Return(nil, nil),
|
})).Return(nil, nil),
|
||||||
s3obj.EXPECT().PutObject(NewPutObjectInputMatcher(&s3.PutObjectInput{
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
Body: bytes.NewReader([]byte("5")),
|
Body: bytes.NewReader([]byte("5")),
|
||||||
})).Return(nil, nil),
|
})).Return(nil, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
bytesRead, err := store.WriteChunk("uploadId+multipartId", 3, bytes.NewReader([]byte("45")))
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
bytesRead, err := upload.WriteChunk(context.Background(), 3, bytes.NewReader([]byte("45")))
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(2), bytesRead)
|
assert.Equal(int64(2), bytesRead)
|
||||||
}
|
}
|
||||||
|
@ -849,13 +894,13 @@ func TestWriteChunkAllowTooSmallLast(t *testing.T) {
|
||||||
store.MinPartSize = 20
|
store.MinPartSize = 20
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
}).Return(&s3.GetObjectOutput{
|
}).Return(&s3.GetObjectOutput{
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null}`))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -870,11 +915,11 @@ func TestWriteChunkAllowTooSmallLast(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("AccessDenied", "Access Denied.", nil)),
|
}).Return(&s3.GetObjectOutput{}, awserr.New("AccessDenied", "Access Denied.", nil)),
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -889,11 +934,11 @@ func TestWriteChunkAllowTooSmallLast(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
||||||
s3obj.EXPECT().UploadPart(NewUploadPartInputMatcher(&s3.UploadPartInput{
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -902,10 +947,13 @@ func TestWriteChunkAllowTooSmallLast(t *testing.T) {
|
||||||
})).Return(nil, nil),
|
})).Return(nil, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
// 10 bytes are missing for the upload to be finished (offset at 490 for 500
|
// 10 bytes are missing for the upload to be finished (offset at 490 for 500
|
||||||
// bytes file) but the minimum chunk size is higher (20). The chunk is
|
// bytes file) but the minimum chunk size is higher (20). The chunk is
|
||||||
// still uploaded since the last part may be smaller than the minimum.
|
// still uploaded since the last part may be smaller than the minimum.
|
||||||
bytesRead, err := store.WriteChunk("uploadId+multipartId", 490, bytes.NewReader([]byte("1234567890")))
|
bytesRead, err := upload.WriteChunk(context.Background(), 490, bytes.NewReader([]byte("1234567890")))
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(10), bytesRead)
|
assert.Equal(int64(10), bytesRead)
|
||||||
}
|
}
|
||||||
|
@ -919,13 +967,13 @@ func TestTerminate(t *testing.T) {
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
// Order is not important in this situation.
|
// Order is not important in this situation.
|
||||||
s3obj.EXPECT().AbortMultipartUpload(&s3.AbortMultipartUploadInput{
|
s3obj.EXPECT().AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
}).Return(nil, nil)
|
}).Return(nil, nil)
|
||||||
|
|
||||||
s3obj.EXPECT().DeleteObjects(&s3.DeleteObjectsInput{
|
s3obj.EXPECT().DeleteObjectsWithContext(context.Background(), &s3.DeleteObjectsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Delete: &s3.Delete{
|
Delete: &s3.Delete{
|
||||||
Objects: []*s3.ObjectIdentifier{
|
Objects: []*s3.ObjectIdentifier{
|
||||||
|
@ -943,7 +991,10 @@ func TestTerminate(t *testing.T) {
|
||||||
},
|
},
|
||||||
}).Return(&s3.DeleteObjectsOutput{}, nil)
|
}).Return(&s3.DeleteObjectsOutput{}, nil)
|
||||||
|
|
||||||
err := store.Terminate("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
err = store.AsTerminatableUpload(upload).Terminate(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -957,13 +1008,13 @@ func TestTerminateWithErrors(t *testing.T) {
|
||||||
|
|
||||||
// Order is not important in this situation.
|
// Order is not important in this situation.
|
||||||
// NoSuchUpload errors should be ignored
|
// NoSuchUpload errors should be ignored
|
||||||
s3obj.EXPECT().AbortMultipartUpload(&s3.AbortMultipartUploadInput{
|
s3obj.EXPECT().AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil))
|
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil))
|
||||||
|
|
||||||
s3obj.EXPECT().DeleteObjects(&s3.DeleteObjectsInput{
|
s3obj.EXPECT().DeleteObjectsWithContext(context.Background(), &s3.DeleteObjectsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Delete: &s3.Delete{
|
Delete: &s3.Delete{
|
||||||
Objects: []*s3.ObjectIdentifier{
|
Objects: []*s3.ObjectIdentifier{
|
||||||
|
@ -989,7 +1040,10 @@ func TestTerminateWithErrors(t *testing.T) {
|
||||||
},
|
},
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
err := store.Terminate("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
err = store.AsTerminatableUpload(upload).Terminate(context.Background())
|
||||||
assert.Equal("Multiple errors occurred:\n\tAWS S3 Error (hello) for object uploadId: it's me.\n", err.Error())
|
assert.Equal("Multiple errors occurred:\n\tAWS S3 Error (hello) for object uploadId: it's me.\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1001,7 +1055,7 @@ func TestConcatUploads(t *testing.T) {
|
||||||
s3obj := NewMockS3API(mockCtrl)
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
s3obj.EXPECT().UploadPartCopy(&s3.UploadPartCopyInput{
|
s3obj.EXPECT().UploadPartCopyWithContext(context.Background(), &s3.UploadPartCopyInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -1009,7 +1063,7 @@ func TestConcatUploads(t *testing.T) {
|
||||||
PartNumber: aws.Int64(1),
|
PartNumber: aws.Int64(1),
|
||||||
}).Return(nil, nil)
|
}).Return(nil, nil)
|
||||||
|
|
||||||
s3obj.EXPECT().UploadPartCopy(&s3.UploadPartCopyInput{
|
s3obj.EXPECT().UploadPartCopyWithContext(context.Background(), &s3.UploadPartCopyInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -1017,7 +1071,7 @@ func TestConcatUploads(t *testing.T) {
|
||||||
PartNumber: aws.Int64(2),
|
PartNumber: aws.Int64(2),
|
||||||
}).Return(nil, nil)
|
}).Return(nil, nil)
|
||||||
|
|
||||||
s3obj.EXPECT().UploadPartCopy(&s3.UploadPartCopyInput{
|
s3obj.EXPECT().UploadPartCopyWithContext(context.Background(), &s3.UploadPartCopyInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -1027,7 +1081,7 @@ func TestConcatUploads(t *testing.T) {
|
||||||
|
|
||||||
// Output from s3Store.FinishUpload
|
// Output from s3Store.FinishUpload
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -1048,7 +1102,7 @@ func TestConcatUploads(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
s3obj.EXPECT().CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
|
s3obj.EXPECT().CompleteMultipartUploadWithContext(context.Background(), &s3.CompleteMultipartUploadInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
UploadId: aws.String("multipartId"),
|
UploadId: aws.String("multipartId"),
|
||||||
|
@ -1071,10 +1125,20 @@ func TestConcatUploads(t *testing.T) {
|
||||||
}).Return(nil, nil),
|
}).Return(nil, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
err := store.ConcatUploads("uploadId+multipartId", []string{
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
"aaa+AAA",
|
assert.Nil(err)
|
||||||
"bbb+BBB",
|
|
||||||
"ccc+CCC",
|
uploadA, err := store.GetUpload(context.Background(), "aaa+AAA")
|
||||||
|
assert.Nil(err)
|
||||||
|
uploadB, err := store.GetUpload(context.Background(), "bbb+BBB")
|
||||||
|
assert.Nil(err)
|
||||||
|
uploadC, err := store.GetUpload(context.Background(), "ccc+CCC")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
err = store.AsConcatableUpload(upload).ConcatUploads(context.Background(), []handler.Upload{
|
||||||
|
uploadA,
|
||||||
|
uploadB,
|
||||||
|
uploadC,
|
||||||
})
|
})
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
}
|
}
|
|
@ -1,140 +0,0 @@
|
||||||
// Automatically generated by MockGen. DO NOT EDIT!
|
|
||||||
// Source: github.com/tus/tusd/s3store (interfaces: S3API)
|
|
||||||
|
|
||||||
package s3store
|
|
||||||
|
|
||||||
import (
|
|
||||||
s3 "github.com/aws/aws-sdk-go/service/s3"
|
|
||||||
gomock "github.com/golang/mock/gomock"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Mock of S3API interface
|
|
||||||
type MockS3API struct {
|
|
||||||
ctrl *gomock.Controller
|
|
||||||
recorder *_MockS3APIRecorder
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recorder for MockS3API (not exported)
|
|
||||||
type _MockS3APIRecorder struct {
|
|
||||||
mock *MockS3API
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMockS3API(ctrl *gomock.Controller) *MockS3API {
|
|
||||||
mock := &MockS3API{ctrl: ctrl}
|
|
||||||
mock.recorder = &_MockS3APIRecorder{mock}
|
|
||||||
return mock
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockS3API) EXPECT() *_MockS3APIRecorder {
|
|
||||||
return _m.recorder
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockS3API) AbortMultipartUpload(_param0 *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) {
|
|
||||||
ret := _m.ctrl.Call(_m, "AbortMultipartUpload", _param0)
|
|
||||||
ret0, _ := ret[0].(*s3.AbortMultipartUploadOutput)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockS3APIRecorder) AbortMultipartUpload(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "AbortMultipartUpload", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockS3API) CompleteMultipartUpload(_param0 *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) {
|
|
||||||
ret := _m.ctrl.Call(_m, "CompleteMultipartUpload", _param0)
|
|
||||||
ret0, _ := ret[0].(*s3.CompleteMultipartUploadOutput)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockS3APIRecorder) CompleteMultipartUpload(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "CompleteMultipartUpload", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockS3API) CreateMultipartUpload(_param0 *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) {
|
|
||||||
ret := _m.ctrl.Call(_m, "CreateMultipartUpload", _param0)
|
|
||||||
ret0, _ := ret[0].(*s3.CreateMultipartUploadOutput)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockS3APIRecorder) CreateMultipartUpload(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "CreateMultipartUpload", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockS3API) DeleteObject(_param0 *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
|
|
||||||
ret := _m.ctrl.Call(_m, "DeleteObject", _param0)
|
|
||||||
ret0, _ := ret[0].(*s3.DeleteObjectOutput)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockS3APIRecorder) DeleteObject(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "DeleteObject", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockS3API) DeleteObjects(_param0 *s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) {
|
|
||||||
ret := _m.ctrl.Call(_m, "DeleteObjects", _param0)
|
|
||||||
ret0, _ := ret[0].(*s3.DeleteObjectsOutput)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockS3APIRecorder) DeleteObjects(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "DeleteObjects", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockS3API) GetObject(_param0 *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
|
|
||||||
ret := _m.ctrl.Call(_m, "GetObject", _param0)
|
|
||||||
ret0, _ := ret[0].(*s3.GetObjectOutput)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockS3APIRecorder) GetObject(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "GetObject", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockS3API) ListParts(_param0 *s3.ListPartsInput) (*s3.ListPartsOutput, error) {
|
|
||||||
ret := _m.ctrl.Call(_m, "ListParts", _param0)
|
|
||||||
ret0, _ := ret[0].(*s3.ListPartsOutput)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockS3APIRecorder) ListParts(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "ListParts", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockS3API) PutObject(_param0 *s3.PutObjectInput) (*s3.PutObjectOutput, error) {
|
|
||||||
ret := _m.ctrl.Call(_m, "PutObject", _param0)
|
|
||||||
ret0, _ := ret[0].(*s3.PutObjectOutput)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockS3APIRecorder) PutObject(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "PutObject", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockS3API) UploadPart(_param0 *s3.UploadPartInput) (*s3.UploadPartOutput, error) {
|
|
||||||
ret := _m.ctrl.Call(_m, "UploadPart", _param0)
|
|
||||||
ret0, _ := ret[0].(*s3.UploadPartOutput)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockS3APIRecorder) UploadPart(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "UploadPart", arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_m *MockS3API) UploadPartCopy(_param0 *s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) {
|
|
||||||
ret := _m.ctrl.Call(_m, "UploadPartCopy", _param0)
|
|
||||||
ret0, _ := ret[0].(*s3.UploadPartCopyOutput)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_mr *_MockS3APIRecorder) UploadPartCopy(arg0 interface{}) *gomock.Call {
|
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "UploadPartCopy", arg0)
|
|
||||||
}
|
|
|
@ -11,14 +11,14 @@ compile linux amd64
|
||||||
compile linux arm
|
compile linux arm
|
||||||
compile darwin 386
|
compile darwin 386
|
||||||
compile darwin amd64
|
compile darwin amd64
|
||||||
#compile windows 386 .exe
|
compile windows 386 .exe
|
||||||
#compile windows amd64 .exe
|
compile windows amd64 .exe
|
||||||
|
|
||||||
maketar linux 386
|
maketar linux 386
|
||||||
maketar linux amd64
|
maketar linux amd64
|
||||||
maketar linux arm
|
maketar linux arm
|
||||||
makezip darwin 386
|
makezip darwin 386
|
||||||
makezip darwin amd64
|
makezip darwin amd64
|
||||||
#makezip windows 386 .exe
|
makezip windows 386 .exe
|
||||||
#makezip windows amd64 .exe
|
makezip windows amd64 .exe
|
||||||
makedep amd64
|
makedep amd64
|
|
@ -16,6 +16,7 @@ function compile {
|
||||||
rm -rf "$dir"
|
rm -rf "$dir"
|
||||||
mkdir -p "$dir"
|
mkdir -p "$dir"
|
||||||
GOOS=$os GOARCH=$arch go build \
|
GOOS=$os GOARCH=$arch go build \
|
||||||
|
-trimpath \
|
||||||
-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=${version} -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=${commit} -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'" \
|
-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=${version} -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=${commit} -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'" \
|
||||||
-o "$dir/tusd$ext" ./cmd/tusd/main.go
|
-o "$dir/tusd$ext" ./cmd/tusd/main.go
|
||||||
}
|
}
|
||||||
|
@ -57,7 +58,7 @@ function makedep {
|
||||||
echo "Maintainer: Marius <maerious@gmail.com>" >> "./$dir/DEBIAN/control"
|
echo "Maintainer: Marius <maerious@gmail.com>" >> "./$dir/DEBIAN/control"
|
||||||
echo "Section: devel" >> "./$dir/DEBIAN/control"
|
echo "Section: devel" >> "./$dir/DEBIAN/control"
|
||||||
echo "Priority: optional" >> "./$dir/DEBIAN/control"
|
echo "Priority: optional" >> "./$dir/DEBIAN/control"
|
||||||
echo "Version: ${version}" >> "./$dir/DEBIAN/control"
|
echo "Version: ${version:1}" >> "./$dir/DEBIAN/control"
|
||||||
echo "Architecture: ${arch}" >> "./$dir/DEBIAN/control"
|
echo "Architecture: ${arch}" >> "./$dir/DEBIAN/control"
|
||||||
echo "Homepage: https://github.com/tus/tusd" >> "./$dir/DEBIAN/control"
|
echo "Homepage: https://github.com/tus/tusd" >> "./$dir/DEBIAN/control"
|
||||||
echo "Built-Using: $(go version)" >> "./$dir/DEBIAN/control"
|
echo "Built-Using: $(go version)" >> "./$dir/DEBIAN/control"
|
|
@ -26,7 +26,7 @@ echo $KUBECONFIGVAR | python -m base64 -d > ${HOME}/.kube/config
|
||||||
echo "KUBECONFIG file written"
|
echo "KUBECONFIG file written"
|
||||||
|
|
||||||
sleep 10s # This cost me some precious debugging time.
|
sleep 10s # This cost me some precious debugging time.
|
||||||
kubectl apply -f "${__root}/.infra/kube/tusd-kube.yaml"
|
kubectl apply -f "${__root}/infra/kube/tusd-kube.yaml"
|
||||||
|
|
||||||
|
|
||||||
kubectl set image deployment/tusd --namespace=tus tusd=docker.io/tusproject/tusd:$TRAVIS_COMMIT
|
kubectl set image deployment/tusd --namespace=tus tusd=docker.io/tusproject/tusd:$TRAVIS_COMMIT
|
|
@ -0,0 +1,6 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
go test ./pkg/...
|
||||||
|
go vet ./pkg/...
|
|
@ -1,22 +0,0 @@
|
||||||
// +build go1.7
|
|
||||||
|
|
||||||
package tusd_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
|
||||||
)
|
|
||||||
|
|
||||||
func SubTest(t *testing.T, name string, runTest func(*testing.T, *MockFullDataStore)) {
|
|
||||||
t.Run(name, func(subT *testing.T) {
|
|
||||||
//subT.Parallel()
|
|
||||||
|
|
||||||
ctrl := gomock.NewController(subT)
|
|
||||||
defer ctrl.Finish()
|
|
||||||
|
|
||||||
store := NewMockFullDataStore(ctrl)
|
|
||||||
|
|
||||||
runTest(subT, store)
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,35 +0,0 @@
|
||||||
// +build !go1.7
|
|
||||||
|
|
||||||
package tusd_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
|
||||||
)
|
|
||||||
|
|
||||||
var subTestDepth = 0
|
|
||||||
|
|
||||||
func SubTest(t *testing.T, name string, runTest func(*testing.T, *MockFullDataStore)) {
|
|
||||||
subTestDepth++
|
|
||||||
defer func() { subTestDepth-- }()
|
|
||||||
p := strings.Repeat("\t", subTestDepth)
|
|
||||||
|
|
||||||
fmt.Println(p, "=== RUN SubTest:", name)
|
|
||||||
|
|
||||||
ctrl := gomock.NewController(t)
|
|
||||||
defer ctrl.Finish()
|
|
||||||
|
|
||||||
store := NewMockFullDataStore(ctrl)
|
|
||||||
|
|
||||||
runTest(t, store)
|
|
||||||
|
|
||||||
if t.Failed() {
|
|
||||||
fmt.Println(p, "--- FAIL SubTest:", name)
|
|
||||||
t.FailNow()
|
|
||||||
} else {
|
|
||||||
fmt.Println(p, "--- PASS SubTest:", name)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,8 +0,0 @@
|
||||||
{
|
|
||||||
"folders": [
|
|
||||||
{
|
|
||||||
"path": "."
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"settings": {}
|
|
||||||
}
|
|
|
@ -1,202 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright 2014 Google Inc.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
|
@ -1,437 +0,0 @@
|
||||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package metadata provides access to Google Compute Engine (GCE)
|
|
||||||
// metadata and API service accounts.
|
|
||||||
//
|
|
||||||
// This package is a wrapper around the GCE metadata service,
|
|
||||||
// as documented at https://developers.google.com/compute/docs/metadata.
|
|
||||||
package metadata // import "cloud.google.com/go/compute/metadata"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"golang.org/x/net/context/ctxhttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// metadataIP is the documented metadata server IP address.
|
|
||||||
metadataIP = "169.254.169.254"
|
|
||||||
|
|
||||||
// metadataHostEnv is the environment variable specifying the
|
|
||||||
// GCE metadata hostname. If empty, the default value of
|
|
||||||
// metadataIP ("169.254.169.254") is used instead.
|
|
||||||
// This is variable name is not defined by any spec, as far as
|
|
||||||
// I know; it was made up for the Go package.
|
|
||||||
metadataHostEnv = "GCE_METADATA_HOST"
|
|
||||||
|
|
||||||
userAgent = "gcloud-golang/0.1"
|
|
||||||
)
|
|
||||||
|
|
||||||
type cachedValue struct {
|
|
||||||
k string
|
|
||||||
trim bool
|
|
||||||
mu sync.Mutex
|
|
||||||
v string
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
projID = &cachedValue{k: "project/project-id", trim: true}
|
|
||||||
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
|
||||||
instID = &cachedValue{k: "instance/id", trim: true}
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
metaClient = &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Dial: (&net.Dialer{
|
|
||||||
Timeout: 2 * time.Second,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
}).Dial,
|
|
||||||
ResponseHeaderTimeout: 2 * time.Second,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
subscribeClient = &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Dial: (&net.Dialer{
|
|
||||||
Timeout: 2 * time.Second,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
}).Dial,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// NotDefinedError is returned when requested metadata is not defined.
|
|
||||||
//
|
|
||||||
// The underlying string is the suffix after "/computeMetadata/v1/".
|
|
||||||
//
|
|
||||||
// This error is not returned if the value is defined to be the empty
|
|
||||||
// string.
|
|
||||||
type NotDefinedError string
|
|
||||||
|
|
||||||
func (suffix NotDefinedError) Error() string {
|
|
||||||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a value from the metadata service.
|
|
||||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
|
||||||
//
|
|
||||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
|
||||||
// 169.254.169.254 will be used instead.
|
|
||||||
//
|
|
||||||
// If the requested metadata is not defined, the returned error will
|
|
||||||
// be of type NotDefinedError.
|
|
||||||
func Get(suffix string) (string, error) {
|
|
||||||
val, _, err := getETag(metaClient, suffix)
|
|
||||||
return val, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// getETag returns a value from the metadata service as well as the associated
|
|
||||||
// ETag using the provided client. This func is otherwise equivalent to Get.
|
|
||||||
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
|
|
||||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
|
||||||
// a container, which is an important use-case for local testing of cloud
|
|
||||||
// deployments. To enable spoofing of the metadata service, the environment
|
|
||||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
|
||||||
// requests shall go.
|
|
||||||
host := os.Getenv(metadataHostEnv)
|
|
||||||
if host == "" {
|
|
||||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
|
||||||
// binaries built with the "netgo" tag and without cgo won't
|
|
||||||
// know the search suffix for "metadata" is
|
|
||||||
// ".google.internal", and this IP address is documented as
|
|
||||||
// being stable anyway.
|
|
||||||
host = metadataIP
|
|
||||||
}
|
|
||||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
|
||||||
req, _ := http.NewRequest("GET", url, nil)
|
|
||||||
req.Header.Set("Metadata-Flavor", "Google")
|
|
||||||
req.Header.Set("User-Agent", userAgent)
|
|
||||||
res, err := client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode == http.StatusNotFound {
|
|
||||||
return "", "", NotDefinedError(suffix)
|
|
||||||
}
|
|
||||||
if res.StatusCode != 200 {
|
|
||||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
|
||||||
}
|
|
||||||
all, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
return string(all), res.Header.Get("Etag"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getTrimmed(suffix string) (s string, err error) {
|
|
||||||
s, err = Get(suffix)
|
|
||||||
s = strings.TrimSpace(s)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cachedValue) get() (v string, err error) {
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
c.mu.Lock()
|
|
||||||
if c.v != "" {
|
|
||||||
return c.v, nil
|
|
||||||
}
|
|
||||||
if c.trim {
|
|
||||||
v, err = getTrimmed(c.k)
|
|
||||||
} else {
|
|
||||||
v, err = Get(c.k)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
c.v = v
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
onGCEOnce sync.Once
|
|
||||||
onGCE bool
|
|
||||||
)
|
|
||||||
|
|
||||||
// OnGCE reports whether this process is running on Google Compute Engine.
|
|
||||||
func OnGCE() bool {
|
|
||||||
onGCEOnce.Do(initOnGCE)
|
|
||||||
return onGCE
|
|
||||||
}
|
|
||||||
|
|
||||||
func initOnGCE() {
|
|
||||||
onGCE = testOnGCE()
|
|
||||||
}
|
|
||||||
|
|
||||||
func testOnGCE() bool {
|
|
||||||
// The user explicitly said they're on GCE, so trust them.
|
|
||||||
if os.Getenv(metadataHostEnv) != "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
resc := make(chan bool, 2)
|
|
||||||
|
|
||||||
// Try two strategies in parallel.
|
|
||||||
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
|
|
||||||
go func() {
|
|
||||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
|
||||||
req.Header.Set("User-Agent", userAgent)
|
|
||||||
res, err := ctxhttp.Do(ctx, metaClient, req)
|
|
||||||
if err != nil {
|
|
||||||
resc <- false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
addrs, err := net.LookupHost("metadata.google.internal")
|
|
||||||
if err != nil || len(addrs) == 0 {
|
|
||||||
resc <- false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resc <- strsContains(addrs, metadataIP)
|
|
||||||
}()
|
|
||||||
|
|
||||||
tryHarder := systemInfoSuggestsGCE()
|
|
||||||
if tryHarder {
|
|
||||||
res := <-resc
|
|
||||||
if res {
|
|
||||||
// The first strategy succeeded, so let's use it.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Wait for either the DNS or metadata server probe to
|
|
||||||
// contradict the other one and say we are running on
|
|
||||||
// GCE. Give it a lot of time to do so, since the system
|
|
||||||
// info already suggests we're running on a GCE BIOS.
|
|
||||||
timer := time.NewTimer(5 * time.Second)
|
|
||||||
defer timer.Stop()
|
|
||||||
select {
|
|
||||||
case res = <-resc:
|
|
||||||
return res
|
|
||||||
case <-timer.C:
|
|
||||||
// Too slow. Who knows what this system is.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// There's no hint from the system info that we're running on
|
|
||||||
// GCE, so use the first probe's result as truth, whether it's
|
|
||||||
// true or false. The goal here is to optimize for speed for
|
|
||||||
// users who are NOT running on GCE. We can't assume that
|
|
||||||
// either a DNS lookup or an HTTP request to a blackholed IP
|
|
||||||
// address is fast. Worst case this should return when the
|
|
||||||
// metaClient's Transport.ResponseHeaderTimeout or
|
|
||||||
// Transport.Dial.Timeout fires (in two seconds).
|
|
||||||
return <-resc
|
|
||||||
}
|
|
||||||
|
|
||||||
// systemInfoSuggestsGCE reports whether the local system (without
|
|
||||||
// doing network requests) suggests that we're running on GCE. If this
|
|
||||||
// returns true, testOnGCE tries a bit harder to reach its metadata
|
|
||||||
// server.
|
|
||||||
func systemInfoSuggestsGCE() bool {
|
|
||||||
if runtime.GOOS != "linux" {
|
|
||||||
// We don't have any non-Linux clues available, at least yet.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
|
||||||
name := strings.TrimSpace(string(slurp))
|
|
||||||
return name == "Google" || name == "Google Compute Engine"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subscribe subscribes to a value from the metadata service.
|
|
||||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
|
||||||
// The suffix may contain query parameters.
|
|
||||||
//
|
|
||||||
// Subscribe calls fn with the latest metadata value indicated by the provided
|
|
||||||
// suffix. If the metadata value is deleted, fn is called with the empty string
|
|
||||||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
|
||||||
// is deleted. Subscribe returns the error value returned from the last call to
|
|
||||||
// fn, which may be nil when ok == false.
|
|
||||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
|
||||||
const failedSubscribeSleep = time.Second * 5
|
|
||||||
|
|
||||||
// First check to see if the metadata value exists at all.
|
|
||||||
val, lastETag, err := getETag(subscribeClient, suffix)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := fn(val, true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ok := true
|
|
||||||
if strings.ContainsRune(suffix, '?') {
|
|
||||||
suffix += "&wait_for_change=true&last_etag="
|
|
||||||
} else {
|
|
||||||
suffix += "?wait_for_change=true&last_etag="
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
|
|
||||||
if err != nil {
|
|
||||||
if _, deleted := err.(NotDefinedError); !deleted {
|
|
||||||
time.Sleep(failedSubscribeSleep)
|
|
||||||
continue // Retry on other errors.
|
|
||||||
}
|
|
||||||
ok = false
|
|
||||||
}
|
|
||||||
lastETag = etag
|
|
||||||
|
|
||||||
if err := fn(val, ok); err != nil || !ok {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectID returns the current instance's project ID string.
|
|
||||||
func ProjectID() (string, error) { return projID.get() }
|
|
||||||
|
|
||||||
// NumericProjectID returns the current instance's numeric project ID.
|
|
||||||
func NumericProjectID() (string, error) { return projNum.get() }
|
|
||||||
|
|
||||||
// InternalIP returns the instance's primary internal IP address.
|
|
||||||
func InternalIP() (string, error) {
|
|
||||||
return getTrimmed("instance/network-interfaces/0/ip")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExternalIP returns the instance's primary external (public) IP address.
|
|
||||||
func ExternalIP() (string, error) {
|
|
||||||
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hostname returns the instance's hostname. This will be of the form
|
|
||||||
// "<instanceID>.c.<projID>.internal".
|
|
||||||
func Hostname() (string, error) {
|
|
||||||
return getTrimmed("instance/hostname")
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceTags returns the list of user-defined instance tags,
|
|
||||||
// assigned when initially creating a GCE instance.
|
|
||||||
func InstanceTags() ([]string, error) {
|
|
||||||
var s []string
|
|
||||||
j, err := Get("instance/tags")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceID returns the current VM's numeric instance ID.
|
|
||||||
func InstanceID() (string, error) {
|
|
||||||
return instID.get()
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceName returns the current VM's instance ID string.
|
|
||||||
func InstanceName() (string, error) {
|
|
||||||
host, err := Hostname()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return strings.Split(host, ".")[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
|
||||||
func Zone() (string, error) {
|
|
||||||
zone, err := getTrimmed("instance/zone")
|
|
||||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceAttributes returns the list of user-defined attributes,
|
|
||||||
// assigned when initially creating a GCE VM instance. The value of an
|
|
||||||
// attribute can be obtained with InstanceAttributeValue.
|
|
||||||
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
|
|
||||||
|
|
||||||
// ProjectAttributes returns the list of user-defined attributes
|
|
||||||
// applying to the project as a whole, not just this VM. The value of
|
|
||||||
// an attribute can be obtained with ProjectAttributeValue.
|
|
||||||
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
|
|
||||||
|
|
||||||
func lines(suffix string) ([]string, error) {
|
|
||||||
j, err := Get(suffix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
|
||||||
for i := range s {
|
|
||||||
s[i] = strings.TrimSpace(s[i])
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceAttributeValue returns the value of the provided VM
|
|
||||||
// instance attribute.
|
|
||||||
//
|
|
||||||
// If the requested attribute is not defined, the returned error will
|
|
||||||
// be of type NotDefinedError.
|
|
||||||
//
|
|
||||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
|
||||||
// defined to be the empty string.
|
|
||||||
func InstanceAttributeValue(attr string) (string, error) {
|
|
||||||
return Get("instance/attributes/" + attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectAttributeValue returns the value of the provided
|
|
||||||
// project attribute.
|
|
||||||
//
|
|
||||||
// If the requested attribute is not defined, the returned error will
|
|
||||||
// be of type NotDefinedError.
|
|
||||||
//
|
|
||||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
|
||||||
// defined to be the empty string.
|
|
||||||
func ProjectAttributeValue(attr string) (string, error) {
|
|
||||||
return Get("project/attributes/" + attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scopes returns the service account scopes for the given account.
|
|
||||||
// The account may be empty or the string "default" to use the instance's
|
|
||||||
// main account.
|
|
||||||
func Scopes(serviceAccount string) ([]string, error) {
|
|
||||||
if serviceAccount == "" {
|
|
||||||
serviceAccount = "default"
|
|
||||||
}
|
|
||||||
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
|
||||||
}
|
|
||||||
|
|
||||||
func strsContains(ss []string, s string) bool {
|
|
||||||
for _, v := range ss {
|
|
||||||
if v == s {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
|
@ -1,256 +0,0 @@
|
||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package iam supports the resource-specific operations of Google Cloud
|
|
||||||
// IAM (Identity and Access Management) for the Google Cloud Libraries.
|
|
||||||
// See https://cloud.google.com/iam for more about IAM.
|
|
||||||
//
|
|
||||||
// Users of the Google Cloud Libraries will typically not use this package
|
|
||||||
// directly. Instead they will begin with some resource that supports IAM, like
|
|
||||||
// a pubsub topic, and call its IAM method to get a Handle for that resource.
|
|
||||||
package iam
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "google.golang.org/genproto/googleapis/iam/v1"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
// client abstracts the IAMPolicy API to allow multiple implementations.
|
|
||||||
type client interface {
|
|
||||||
Get(ctx context.Context, resource string) (*pb.Policy, error)
|
|
||||||
Set(ctx context.Context, resource string, p *pb.Policy) error
|
|
||||||
Test(ctx context.Context, resource string, perms []string) ([]string, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// grpcClient implements client for the standard gRPC-based IAMPolicy service.
|
|
||||||
type grpcClient struct {
|
|
||||||
c pb.IAMPolicyClient
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
|
|
||||||
proto, err := g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return proto, nil
|
|
||||||
}
|
|
||||||
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
|
|
||||||
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
|
|
||||||
Resource: resource,
|
|
||||||
Policy: p,
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
|
||||||
res, err := g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
|
|
||||||
Resource: resource,
|
|
||||||
Permissions: perms,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return res.Permissions, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Handle provides IAM operations for a resource.
|
|
||||||
type Handle struct {
|
|
||||||
c client
|
|
||||||
resource string
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalNewHandle is for use by the Google Cloud Libraries only.
|
|
||||||
//
|
|
||||||
// InternalNewHandle returns a Handle for resource.
|
|
||||||
// The conn parameter refers to a server that must support the IAMPolicy service.
|
|
||||||
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
|
|
||||||
return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalNewHandleClient is for use by the Google Cloud Libraries only.
|
|
||||||
//
|
|
||||||
// InternalNewHandleClient returns a Handle for resource using the given
|
|
||||||
// client implementation.
|
|
||||||
func InternalNewHandleClient(c client, resource string) *Handle {
|
|
||||||
return &Handle{
|
|
||||||
c: c,
|
|
||||||
resource: resource,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Policy retrieves the IAM policy for the resource.
|
|
||||||
func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
|
|
||||||
proto, err := h.c.Get(ctx, h.resource)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Policy{InternalProto: proto}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPolicy replaces the resource's current policy with the supplied Policy.
|
|
||||||
//
|
|
||||||
// If policy was created from a prior call to Get, then the modification will
|
|
||||||
// only succeed if the policy has not changed since the Get.
|
|
||||||
func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
|
|
||||||
return h.c.Set(ctx, h.resource, policy.InternalProto)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestPermissions returns the subset of permissions that the caller has on the resource.
|
|
||||||
func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
|
|
||||||
return h.c.Test(ctx, h.resource, permissions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A RoleName is a name representing a collection of permissions.
|
|
||||||
type RoleName string
|
|
||||||
|
|
||||||
// Common role names.
|
|
||||||
const (
|
|
||||||
Owner RoleName = "roles/owner"
|
|
||||||
Editor RoleName = "roles/editor"
|
|
||||||
Viewer RoleName = "roles/viewer"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// AllUsers is a special member that denotes all users, even unauthenticated ones.
|
|
||||||
AllUsers = "allUsers"
|
|
||||||
|
|
||||||
// AllAuthenticatedUsers is a special member that denotes all authenticated users.
|
|
||||||
AllAuthenticatedUsers = "allAuthenticatedUsers"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Policy is a list of Bindings representing roles
|
|
||||||
// granted to members.
|
|
||||||
//
|
|
||||||
// The zero Policy is a valid policy with no bindings.
|
|
||||||
type Policy struct {
|
|
||||||
// TODO(jba): when type aliases are available, put Policy into an internal package
|
|
||||||
// and provide an exported alias here.
|
|
||||||
|
|
||||||
// This field is exported for use by the Google Cloud Libraries only.
|
|
||||||
// It may become unexported in a future release.
|
|
||||||
InternalProto *pb.Policy
|
|
||||||
}
|
|
||||||
|
|
||||||
// Members returns the list of members with the supplied role.
|
|
||||||
// The return value should not be modified. Use Add and Remove
|
|
||||||
// to modify the members of a role.
|
|
||||||
func (p *Policy) Members(r RoleName) []string {
|
|
||||||
b := p.binding(r)
|
|
||||||
if b == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return b.Members
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasRole reports whether member has role r.
|
|
||||||
func (p *Policy) HasRole(member string, r RoleName) bool {
|
|
||||||
return memberIndex(member, p.binding(r)) >= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds member member to role r if it is not already present.
|
|
||||||
// A new binding is created if there is no binding for the role.
|
|
||||||
func (p *Policy) Add(member string, r RoleName) {
|
|
||||||
b := p.binding(r)
|
|
||||||
if b == nil {
|
|
||||||
if p.InternalProto == nil {
|
|
||||||
p.InternalProto = &pb.Policy{}
|
|
||||||
}
|
|
||||||
p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{
|
|
||||||
Role: string(r),
|
|
||||||
Members: []string{member},
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if memberIndex(member, b) < 0 {
|
|
||||||
b.Members = append(b.Members, member)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes member from role r if it is present.
|
|
||||||
func (p *Policy) Remove(member string, r RoleName) {
|
|
||||||
bi := p.bindingIndex(r)
|
|
||||||
if bi < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bindings := p.InternalProto.Bindings
|
|
||||||
b := bindings[bi]
|
|
||||||
mi := memberIndex(member, b)
|
|
||||||
if mi < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Order doesn't matter for bindings or members, so to remove, move the last item
|
|
||||||
// into the removed spot and shrink the slice.
|
|
||||||
if len(b.Members) == 1 {
|
|
||||||
// Remove binding.
|
|
||||||
last := len(bindings) - 1
|
|
||||||
bindings[bi] = bindings[last]
|
|
||||||
bindings[last] = nil
|
|
||||||
p.InternalProto.Bindings = bindings[:last]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Remove member.
|
|
||||||
// TODO(jba): worry about multiple copies of m?
|
|
||||||
last := len(b.Members) - 1
|
|
||||||
b.Members[mi] = b.Members[last]
|
|
||||||
b.Members[last] = ""
|
|
||||||
b.Members = b.Members[:last]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Roles returns the names of all the roles that appear in the Policy.
|
|
||||||
func (p *Policy) Roles() []RoleName {
|
|
||||||
if p.InternalProto == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var rns []RoleName
|
|
||||||
for _, b := range p.InternalProto.Bindings {
|
|
||||||
rns = append(rns, RoleName(b.Role))
|
|
||||||
}
|
|
||||||
return rns
|
|
||||||
}
|
|
||||||
|
|
||||||
// binding returns the Binding for the suppied role, or nil if there isn't one.
|
|
||||||
func (p *Policy) binding(r RoleName) *pb.Binding {
|
|
||||||
i := p.bindingIndex(r)
|
|
||||||
if i < 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return p.InternalProto.Bindings[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Policy) bindingIndex(r RoleName) int {
|
|
||||||
if p.InternalProto == nil {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
for i, b := range p.InternalProto.Bindings {
|
|
||||||
if b.Role == string(r) {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// memberIndex returns the index of m in b's Members, or -1 if not found.
|
|
||||||
func memberIndex(m string, b *pb.Binding) int {
|
|
||||||
if b == nil {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
for i, mm := range b.Members {
|
|
||||||
if mm == m {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
|
@ -1,54 +0,0 @@
|
||||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Annotate prepends msg to the error message in err, attempting
|
|
||||||
// to preserve other information in err, like an error code.
|
|
||||||
//
|
|
||||||
// Annotate panics if err is nil.
|
|
||||||
//
|
|
||||||
// Annotate knows about these error types:
|
|
||||||
// - "google.golang.org/grpc/status".Status
|
|
||||||
// - "google.golang.org/api/googleapi".Error
|
|
||||||
// If the error is not one of these types, Annotate behaves
|
|
||||||
// like
|
|
||||||
// fmt.Errorf("%s: %v", msg, err)
|
|
||||||
func Annotate(err error, msg string) error {
|
|
||||||
if err == nil {
|
|
||||||
panic("Annotate called with nil")
|
|
||||||
}
|
|
||||||
if s, ok := status.FromError(err); ok {
|
|
||||||
p := s.Proto()
|
|
||||||
p.Message = msg + ": " + p.Message
|
|
||||||
return status.ErrorProto(p)
|
|
||||||
}
|
|
||||||
if g, ok := err.(*googleapi.Error); ok {
|
|
||||||
g.Message = msg + ": " + g.Message
|
|
||||||
return g
|
|
||||||
}
|
|
||||||
return fmt.Errorf("%s: %v", msg, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Annotatef uses format and args to format a string, then calls Annotate.
|
|
||||||
func Annotatef(err error, format string, args ...interface{}) error {
|
|
||||||
return Annotate(err, fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
|
@ -1,108 +0,0 @@
|
||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package optional provides versions of primitive types that can
|
|
||||||
// be nil. These are useful in methods that update some of an API object's
|
|
||||||
// fields.
|
|
||||||
package optional
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// Bool is either a bool or nil.
|
|
||||||
Bool interface{}
|
|
||||||
|
|
||||||
// String is either a string or nil.
|
|
||||||
String interface{}
|
|
||||||
|
|
||||||
// Int is either an int or nil.
|
|
||||||
Int interface{}
|
|
||||||
|
|
||||||
// Uint is either a uint or nil.
|
|
||||||
Uint interface{}
|
|
||||||
|
|
||||||
// Float64 is either a float64 or nil.
|
|
||||||
Float64 interface{}
|
|
||||||
|
|
||||||
// Duration is either a time.Duration or nil.
|
|
||||||
Duration interface{}
|
|
||||||
)
|
|
||||||
|
|
||||||
// ToBool returns its argument as a bool.
|
|
||||||
// It panics if its argument is nil or not a bool.
|
|
||||||
func ToBool(v Bool) bool {
|
|
||||||
x, ok := v.(bool)
|
|
||||||
if !ok {
|
|
||||||
doPanic("Bool", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToString returns its argument as a string.
|
|
||||||
// It panics if its argument is nil or not a string.
|
|
||||||
func ToString(v String) string {
|
|
||||||
x, ok := v.(string)
|
|
||||||
if !ok {
|
|
||||||
doPanic("String", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToInt returns its argument as an int.
|
|
||||||
// It panics if its argument is nil or not an int.
|
|
||||||
func ToInt(v Int) int {
|
|
||||||
x, ok := v.(int)
|
|
||||||
if !ok {
|
|
||||||
doPanic("Int", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToUint returns its argument as a uint.
|
|
||||||
// It panics if its argument is nil or not a uint.
|
|
||||||
func ToUint(v Uint) uint {
|
|
||||||
x, ok := v.(uint)
|
|
||||||
if !ok {
|
|
||||||
doPanic("Uint", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToFloat64 returns its argument as a float64.
|
|
||||||
// It panics if its argument is nil or not a float64.
|
|
||||||
func ToFloat64(v Float64) float64 {
|
|
||||||
x, ok := v.(float64)
|
|
||||||
if !ok {
|
|
||||||
doPanic("Float64", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToDuration returns its argument as a time.Duration.
|
|
||||||
// It panics if its argument is nil or not a time.Duration.
|
|
||||||
func ToDuration(v Duration) time.Duration {
|
|
||||||
x, ok := v.(time.Duration)
|
|
||||||
if !ok {
|
|
||||||
doPanic("Duration", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
func doPanic(capType string, v interface{}) {
|
|
||||||
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
|
|
||||||
}
|
|
|
@ -1,55 +0,0 @@
|
||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
gax "github.com/googleapis/gax-go"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Retry calls the supplied function f repeatedly according to the provided
|
|
||||||
// backoff parameters. It returns when one of the following occurs:
|
|
||||||
// When f's first return value is true, Retry immediately returns with f's second
|
|
||||||
// return value.
|
|
||||||
// When the provided context is done, Retry returns with an error that
|
|
||||||
// includes both ctx.Error() and the last error returned by f.
|
|
||||||
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
|
|
||||||
return retry(ctx, bo, f, gax.Sleep)
|
|
||||||
}
|
|
||||||
|
|
||||||
func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
|
|
||||||
sleep func(context.Context, time.Duration) error) error {
|
|
||||||
var lastErr error
|
|
||||||
for {
|
|
||||||
stop, err := f()
|
|
||||||
if stop {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Remember the last "real" error from f.
|
|
||||||
if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
|
|
||||||
lastErr = err
|
|
||||||
}
|
|
||||||
p := bo.Pause()
|
|
||||||
if cerr := sleep(ctx, p); cerr != nil {
|
|
||||||
if lastErr != nil {
|
|
||||||
return Annotatef(lastErr, "retry failed with %v; last error", cerr)
|
|
||||||
}
|
|
||||||
return cerr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,6 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
today=$(date +%Y%m%d)
|
|
||||||
|
|
||||||
sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE
|
|
||||||
|
|
|
@ -1,71 +0,0 @@
|
||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
//go:generate ./update_version.sh
|
|
||||||
|
|
||||||
// Package version contains version information for Google Cloud Client
|
|
||||||
// Libraries for Go, as reported in request headers.
|
|
||||||
package version
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Repo is the current version of the client libraries in this
|
|
||||||
// repo. It should be a date in YYYYMMDD format.
|
|
||||||
const Repo = "20170928"
|
|
||||||
|
|
||||||
// Go returns the Go runtime version. The returned string
|
|
||||||
// has no whitespace.
|
|
||||||
func Go() string {
|
|
||||||
return goVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
var goVersion = goVer(runtime.Version())
|
|
||||||
|
|
||||||
const develPrefix = "devel +"
|
|
||||||
|
|
||||||
func goVer(s string) string {
|
|
||||||
if strings.HasPrefix(s, develPrefix) {
|
|
||||||
s = s[len(develPrefix):]
|
|
||||||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
|
||||||
s = s[:p]
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(s, "go1") {
|
|
||||||
s = s[2:]
|
|
||||||
var prerelease string
|
|
||||||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
|
||||||
s, prerelease = s[:p], s[p:]
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(s, ".") {
|
|
||||||
s += "0"
|
|
||||||
} else if strings.Count(s, ".") < 2 {
|
|
||||||
s += ".0"
|
|
||||||
}
|
|
||||||
if prerelease != "" {
|
|
||||||
s += "-" + prerelease
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func notSemverRune(r rune) bool {
|
|
||||||
return strings.IndexRune("0123456789.", r) < 0
|
|
||||||
}
|
|
|
@ -1,235 +0,0 @@
|
||||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
raw "google.golang.org/api/storage/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ACLRole is the level of access to grant.
|
|
||||||
type ACLRole string
|
|
||||||
|
|
||||||
const (
|
|
||||||
RoleOwner ACLRole = "OWNER"
|
|
||||||
RoleReader ACLRole = "READER"
|
|
||||||
RoleWriter ACLRole = "WRITER"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ACLEntity refers to a user or group.
|
|
||||||
// They are sometimes referred to as grantees.
|
|
||||||
//
|
|
||||||
// It could be in the form of:
|
|
||||||
// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
|
|
||||||
// "domain-<domain>" and "project-team-<projectId>".
|
|
||||||
//
|
|
||||||
// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
|
|
||||||
type ACLEntity string
|
|
||||||
|
|
||||||
const (
|
|
||||||
AllUsers ACLEntity = "allUsers"
|
|
||||||
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket.
|
|
||||||
type ACLRule struct {
|
|
||||||
Entity ACLEntity
|
|
||||||
Role ACLRole
|
|
||||||
}
|
|
||||||
|
|
||||||
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
|
|
||||||
type ACLHandle struct {
|
|
||||||
c *Client
|
|
||||||
bucket string
|
|
||||||
object string
|
|
||||||
isDefault bool
|
|
||||||
userProject string // for requester-pays buckets
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete permanently deletes the ACL entry for the given entity.
|
|
||||||
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
|
|
||||||
if a.object != "" {
|
|
||||||
return a.objectDelete(ctx, entity)
|
|
||||||
}
|
|
||||||
if a.isDefault {
|
|
||||||
return a.bucketDefaultDelete(ctx, entity)
|
|
||||||
}
|
|
||||||
return a.bucketDelete(ctx, entity)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets the permission level for the given entity.
|
|
||||||
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
|
||||||
if a.object != "" {
|
|
||||||
return a.objectSet(ctx, entity, role, false)
|
|
||||||
}
|
|
||||||
if a.isDefault {
|
|
||||||
return a.objectSet(ctx, entity, role, true)
|
|
||||||
}
|
|
||||||
return a.bucketSet(ctx, entity, role)
|
|
||||||
}
|
|
||||||
|
|
||||||
// List retrieves ACL entries.
|
|
||||||
func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) {
|
|
||||||
if a.object != "" {
|
|
||||||
return a.objectList(ctx)
|
|
||||||
}
|
|
||||||
if a.isDefault {
|
|
||||||
return a.bucketDefaultList(ctx)
|
|
||||||
}
|
|
||||||
return a.bucketList(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
|
||||||
var acls *raw.ObjectAccessControls
|
|
||||||
var err error
|
|
||||||
err = runWithRetry(ctx, func() error {
|
|
||||||
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
acls, err = req.Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return toACLRules(acls.Items), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
|
||||||
return runWithRetry(ctx, func() error {
|
|
||||||
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
return req.Do()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
|
||||||
var acls *raw.BucketAccessControls
|
|
||||||
var err error
|
|
||||||
err = runWithRetry(ctx, func() error {
|
|
||||||
req := a.c.raw.BucketAccessControls.List(a.bucket)
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
acls, err = req.Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r := make([]ACLRule, len(acls.Items))
|
|
||||||
for i, v := range acls.Items {
|
|
||||||
r[i].Entity = ACLEntity(v.Entity)
|
|
||||||
r[i].Role = ACLRole(v.Role)
|
|
||||||
}
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
|
||||||
acl := &raw.BucketAccessControl{
|
|
||||||
Bucket: a.bucket,
|
|
||||||
Entity: string(entity),
|
|
||||||
Role: string(role),
|
|
||||||
}
|
|
||||||
err := runWithRetry(ctx, func() error {
|
|
||||||
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
_, err := req.Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
|
||||||
err := runWithRetry(ctx, func() error {
|
|
||||||
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
return req.Do()
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
|
||||||
var acls *raw.ObjectAccessControls
|
|
||||||
var err error
|
|
||||||
err = runWithRetry(ctx, func() error {
|
|
||||||
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
acls, err = req.Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return toACLRules(acls.Items), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
|
|
||||||
type setRequest interface {
|
|
||||||
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
|
|
||||||
Header() http.Header
|
|
||||||
}
|
|
||||||
|
|
||||||
acl := &raw.ObjectAccessControl{
|
|
||||||
Bucket: a.bucket,
|
|
||||||
Entity: string(entity),
|
|
||||||
Role: string(role),
|
|
||||||
}
|
|
||||||
var req setRequest
|
|
||||||
if isBucketDefault {
|
|
||||||
req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl)
|
|
||||||
} else {
|
|
||||||
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
|
|
||||||
}
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
return runWithRetry(ctx, func() error {
|
|
||||||
_, err := req.Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
|
||||||
return runWithRetry(ctx, func() error {
|
|
||||||
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
return req.Do()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) configureCall(call interface {
|
|
||||||
Header() http.Header
|
|
||||||
}, ctx context.Context) {
|
|
||||||
vc := reflect.ValueOf(call)
|
|
||||||
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
|
|
||||||
if a.userProject != "" {
|
|
||||||
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
|
|
||||||
}
|
|
||||||
setClientHeader(call.Header())
|
|
||||||
}
|
|
||||||
|
|
||||||
func toACLRules(items []*raw.ObjectAccessControl) []ACLRule {
|
|
||||||
r := make([]ACLRule, 0, len(items))
|
|
||||||
for _, item := range items {
|
|
||||||
r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)})
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
|
@ -1,767 +0,0 @@
|
||||||
// Copyright 2014 Google Inc. LiveAndArchived Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal/optional"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
"google.golang.org/api/iterator"
|
|
||||||
raw "google.golang.org/api/storage/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BucketHandle provides operations on a Google Cloud Storage bucket.
|
|
||||||
// Use Client.Bucket to get a handle.
|
|
||||||
type BucketHandle struct {
|
|
||||||
c *Client
|
|
||||||
name string
|
|
||||||
acl ACLHandle
|
|
||||||
defaultObjectACL ACLHandle
|
|
||||||
conds *BucketConditions
|
|
||||||
userProject string // project for Requester Pays buckets
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bucket returns a BucketHandle, which provides operations on the named bucket.
|
|
||||||
// This call does not perform any network operations.
|
|
||||||
//
|
|
||||||
// The supplied name must contain only lowercase letters, numbers, dashes,
|
|
||||||
// underscores, and dots. The full specification for valid bucket names can be
|
|
||||||
// found at:
|
|
||||||
// https://cloud.google.com/storage/docs/bucket-naming
|
|
||||||
func (c *Client) Bucket(name string) *BucketHandle {
|
|
||||||
return &BucketHandle{
|
|
||||||
c: c,
|
|
||||||
name: name,
|
|
||||||
acl: ACLHandle{
|
|
||||||
c: c,
|
|
||||||
bucket: name,
|
|
||||||
},
|
|
||||||
defaultObjectACL: ACLHandle{
|
|
||||||
c: c,
|
|
||||||
bucket: name,
|
|
||||||
isDefault: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create creates the Bucket in the project.
|
|
||||||
// If attrs is nil the API defaults will be used.
|
|
||||||
func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error {
|
|
||||||
var bkt *raw.Bucket
|
|
||||||
if attrs != nil {
|
|
||||||
bkt = attrs.toRawBucket()
|
|
||||||
} else {
|
|
||||||
bkt = &raw.Bucket{}
|
|
||||||
}
|
|
||||||
bkt.Name = b.name
|
|
||||||
// If there is lifecycle information but no location, explicitly set
|
|
||||||
// the location. This is a GCS quirk/bug.
|
|
||||||
if bkt.Location == "" && bkt.Lifecycle != nil {
|
|
||||||
bkt.Location = "US"
|
|
||||||
}
|
|
||||||
req := b.c.raw.Buckets.Insert(projectID, bkt)
|
|
||||||
setClientHeader(req.Header())
|
|
||||||
return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes the Bucket.
|
|
||||||
func (b *BucketHandle) Delete(ctx context.Context) error {
|
|
||||||
req, err := b.newDeleteCall()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return runWithRetry(ctx, func() error { return req.Context(ctx).Do() })
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) {
|
|
||||||
req := b.c.raw.Buckets.Delete(b.name)
|
|
||||||
setClientHeader(req.Header())
|
|
||||||
if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if b.userProject != "" {
|
|
||||||
req.UserProject(b.userProject)
|
|
||||||
}
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ACL returns an ACLHandle, which provides access to the bucket's access control list.
|
|
||||||
// This controls who can list, create or overwrite the objects in a bucket.
|
|
||||||
// This call does not perform any network operations.
|
|
||||||
func (b *BucketHandle) ACL() *ACLHandle {
|
|
||||||
return &b.acl
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs.
|
|
||||||
// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL.
|
|
||||||
// This call does not perform any network operations.
|
|
||||||
func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
|
|
||||||
return &b.defaultObjectACL
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object returns an ObjectHandle, which provides operations on the named object.
|
|
||||||
// This call does not perform any network operations.
|
|
||||||
//
|
|
||||||
// name must consist entirely of valid UTF-8-encoded runes. The full specification
|
|
||||||
// for valid object names can be found at:
|
|
||||||
// https://cloud.google.com/storage/docs/bucket-naming
|
|
||||||
func (b *BucketHandle) Object(name string) *ObjectHandle {
|
|
||||||
return &ObjectHandle{
|
|
||||||
c: b.c,
|
|
||||||
bucket: b.name,
|
|
||||||
object: name,
|
|
||||||
acl: ACLHandle{
|
|
||||||
c: b.c,
|
|
||||||
bucket: b.name,
|
|
||||||
object: name,
|
|
||||||
userProject: b.userProject,
|
|
||||||
},
|
|
||||||
gen: -1,
|
|
||||||
userProject: b.userProject,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attrs returns the metadata for the bucket.
|
|
||||||
func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
|
|
||||||
req, err := b.newGetCall()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var resp *raw.Bucket
|
|
||||||
err = runWithRetry(ctx, func() error {
|
|
||||||
resp, err = req.Context(ctx).Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
|
|
||||||
return nil, ErrBucketNotExist
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return newBucket(resp), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
|
|
||||||
req := b.c.raw.Buckets.Get(b.name).Projection("full")
|
|
||||||
setClientHeader(req.Header())
|
|
||||||
if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if b.userProject != "" {
|
|
||||||
req.UserProject(b.userProject)
|
|
||||||
}
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (*BucketAttrs, error) {
|
|
||||||
req, err := b.newPatchCall(&uattrs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// TODO(jba): retry iff metagen is set?
|
|
||||||
rb, err := req.Context(ctx).Do()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return newBucket(rb), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
|
|
||||||
rb := uattrs.toRawBucket()
|
|
||||||
req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full")
|
|
||||||
setClientHeader(req.Header())
|
|
||||||
if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if b.userProject != "" {
|
|
||||||
req.UserProject(b.userProject)
|
|
||||||
}
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BucketAttrs represents the metadata for a Google Cloud Storage bucket.
|
|
||||||
// Read-only fields are ignored by BucketHandle.Create.
|
|
||||||
type BucketAttrs struct {
|
|
||||||
// Name is the name of the bucket.
|
|
||||||
// This field is read-only.
|
|
||||||
Name string
|
|
||||||
|
|
||||||
// ACL is the list of access control rules on the bucket.
|
|
||||||
ACL []ACLRule
|
|
||||||
|
|
||||||
// DefaultObjectACL is the list of access controls to
|
|
||||||
// apply to new objects when no object ACL is provided.
|
|
||||||
DefaultObjectACL []ACLRule
|
|
||||||
|
|
||||||
// Location is the location of the bucket. It defaults to "US".
|
|
||||||
Location string
|
|
||||||
|
|
||||||
// MetaGeneration is the metadata generation of the bucket.
|
|
||||||
// This field is read-only.
|
|
||||||
MetaGeneration int64
|
|
||||||
|
|
||||||
// StorageClass is the default storage class of the bucket. This defines
|
|
||||||
// how objects in the bucket are stored and determines the SLA
|
|
||||||
// and the cost of storage. Typical values are "MULTI_REGIONAL",
|
|
||||||
// "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and
|
|
||||||
// "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which
|
|
||||||
// is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on
|
|
||||||
// the bucket's location settings.
|
|
||||||
StorageClass string
|
|
||||||
|
|
||||||
// Created is the creation time of the bucket.
|
|
||||||
// This field is read-only.
|
|
||||||
Created time.Time
|
|
||||||
|
|
||||||
// VersioningEnabled reports whether this bucket has versioning enabled.
|
|
||||||
VersioningEnabled bool
|
|
||||||
|
|
||||||
// Labels are the bucket's labels.
|
|
||||||
Labels map[string]string
|
|
||||||
|
|
||||||
// RequesterPays reports whether the bucket is a Requester Pays bucket.
|
|
||||||
// Clients performing operations on Requester Pays buckets must provide
|
|
||||||
// a user project (see BucketHandle.UserProject), which will be billed
|
|
||||||
// for the operations.
|
|
||||||
RequesterPays bool
|
|
||||||
// Lifecycle is the lifecycle configuration for objects in the bucket.
|
|
||||||
Lifecycle Lifecycle
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lifecycle is the lifecycle configuration for objects in the bucket.
|
|
||||||
type Lifecycle struct {
|
|
||||||
Rules []LifecycleRule
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule.
|
|
||||||
rfc3339Date = "2006-01-02"
|
|
||||||
|
|
||||||
// DeleteAction is a lifecycle action that deletes a live and/or archived
|
|
||||||
// objects. Takes precendence over SetStorageClass actions.
|
|
||||||
DeleteAction = "Delete"
|
|
||||||
|
|
||||||
// SetStorageClassAction changes the storage class of live and/or archived
|
|
||||||
// objects.
|
|
||||||
SetStorageClassAction = "SetStorageClass"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LifecycleRule is a lifecycle configuration rule.
|
|
||||||
//
|
|
||||||
// When all the configured conditions are met by an object in the bucket, the
|
|
||||||
// configured action will automatically be taken on that object.
|
|
||||||
type LifecycleRule struct {
|
|
||||||
// Action is the action to take when all of the associated conditions are
|
|
||||||
// met.
|
|
||||||
Action LifecycleAction
|
|
||||||
|
|
||||||
// Condition is the set of conditions that must be met for the associated
|
|
||||||
// action to be taken.
|
|
||||||
Condition LifecycleCondition
|
|
||||||
}
|
|
||||||
|
|
||||||
// LifecycleAction is a lifecycle configuration action.
|
|
||||||
type LifecycleAction struct {
|
|
||||||
// Type is the type of action to take on matching objects.
|
|
||||||
//
|
|
||||||
// Acceptable values are "Delete" to delete matching objects and
|
|
||||||
// "SetStorageClass" to set the storage class defined in StorageClass on
|
|
||||||
// matching objects.
|
|
||||||
Type string
|
|
||||||
|
|
||||||
// StorageClass is the storage class to set on matching objects if the Action
|
|
||||||
// is "SetStorageClass".
|
|
||||||
StorageClass string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Liveness specifies whether the object is live or not.
|
|
||||||
type Liveness int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// LiveAndArchived includes both live and archived objects.
|
|
||||||
LiveAndArchived Liveness = iota
|
|
||||||
// Live specifies that the object is still live.
|
|
||||||
Live
|
|
||||||
// Archived specifies that the object is archived.
|
|
||||||
Archived
|
|
||||||
)
|
|
||||||
|
|
||||||
// LifecycleCondition is a set of conditions used to match objects and take an
|
|
||||||
// action automatically.
|
|
||||||
//
|
|
||||||
// All configured conditions must be met for the associated action to be taken.
|
|
||||||
type LifecycleCondition struct {
|
|
||||||
// AgeInDays is the age of the object in days.
|
|
||||||
AgeInDays int64
|
|
||||||
|
|
||||||
// CreatedBefore is the time the object was created.
|
|
||||||
//
|
|
||||||
// This condition is satisfied when an object is created before midnight of
|
|
||||||
// the specified date in UTC.
|
|
||||||
CreatedBefore time.Time
|
|
||||||
|
|
||||||
// Liveness specifies the object's liveness. Relevant only for versioned objects
|
|
||||||
Liveness Liveness
|
|
||||||
|
|
||||||
// MatchesStorageClasses is the condition matching the object's storage
|
|
||||||
// class.
|
|
||||||
//
|
|
||||||
// Values include "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE",
|
|
||||||
// "STANDARD", and "DURABLE_REDUCED_AVAILABILITY".
|
|
||||||
MatchesStorageClasses []string
|
|
||||||
|
|
||||||
// NumNewerVersions is the condition matching objects with a number of newer versions.
|
|
||||||
//
|
|
||||||
// If the value is N, this condition is satisfied when there are at least N
|
|
||||||
// versions (including the live version) newer than this version of the
|
|
||||||
// object.
|
|
||||||
NumNewerVersions int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBucket(b *raw.Bucket) *BucketAttrs {
|
|
||||||
if b == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
bucket := &BucketAttrs{
|
|
||||||
Name: b.Name,
|
|
||||||
Location: b.Location,
|
|
||||||
MetaGeneration: b.Metageneration,
|
|
||||||
StorageClass: b.StorageClass,
|
|
||||||
Created: convertTime(b.TimeCreated),
|
|
||||||
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
|
|
||||||
Labels: b.Labels,
|
|
||||||
RequesterPays: b.Billing != nil && b.Billing.RequesterPays,
|
|
||||||
Lifecycle: toLifecycle(b.Lifecycle),
|
|
||||||
}
|
|
||||||
acl := make([]ACLRule, len(b.Acl))
|
|
||||||
for i, rule := range b.Acl {
|
|
||||||
acl[i] = ACLRule{
|
|
||||||
Entity: ACLEntity(rule.Entity),
|
|
||||||
Role: ACLRole(rule.Role),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bucket.ACL = acl
|
|
||||||
objACL := make([]ACLRule, len(b.DefaultObjectAcl))
|
|
||||||
for i, rule := range b.DefaultObjectAcl {
|
|
||||||
objACL[i] = ACLRule{
|
|
||||||
Entity: ACLEntity(rule.Entity),
|
|
||||||
Role: ACLRole(rule.Role),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bucket.DefaultObjectACL = objACL
|
|
||||||
return bucket
|
|
||||||
}
|
|
||||||
|
|
||||||
// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
|
|
||||||
func (b *BucketAttrs) toRawBucket() *raw.Bucket {
|
|
||||||
var acl []*raw.BucketAccessControl
|
|
||||||
if len(b.ACL) > 0 {
|
|
||||||
acl = make([]*raw.BucketAccessControl, len(b.ACL))
|
|
||||||
for i, rule := range b.ACL {
|
|
||||||
acl[i] = &raw.BucketAccessControl{
|
|
||||||
Entity: string(rule.Entity),
|
|
||||||
Role: string(rule.Role),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dACL := toRawObjectACL(b.DefaultObjectACL)
|
|
||||||
// Copy label map.
|
|
||||||
var labels map[string]string
|
|
||||||
if len(b.Labels) > 0 {
|
|
||||||
labels = make(map[string]string, len(b.Labels))
|
|
||||||
for k, v := range b.Labels {
|
|
||||||
labels[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Ignore VersioningEnabled if it is false. This is OK because
|
|
||||||
// we only call this method when creating a bucket, and by default
|
|
||||||
// new buckets have versioning off.
|
|
||||||
var v *raw.BucketVersioning
|
|
||||||
if b.VersioningEnabled {
|
|
||||||
v = &raw.BucketVersioning{Enabled: true}
|
|
||||||
}
|
|
||||||
var bb *raw.BucketBilling
|
|
||||||
if b.RequesterPays {
|
|
||||||
bb = &raw.BucketBilling{RequesterPays: true}
|
|
||||||
}
|
|
||||||
return &raw.Bucket{
|
|
||||||
Name: b.Name,
|
|
||||||
DefaultObjectAcl: dACL,
|
|
||||||
Location: b.Location,
|
|
||||||
StorageClass: b.StorageClass,
|
|
||||||
Acl: acl,
|
|
||||||
Versioning: v,
|
|
||||||
Labels: labels,
|
|
||||||
Billing: bb,
|
|
||||||
Lifecycle: toRawLifecycle(b.Lifecycle),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type BucketAttrsToUpdate struct {
|
|
||||||
// VersioningEnabled, if set, updates whether the bucket uses versioning.
|
|
||||||
VersioningEnabled optional.Bool
|
|
||||||
|
|
||||||
// RequesterPays, if set, updates whether the bucket is a Requester Pays bucket.
|
|
||||||
RequesterPays optional.Bool
|
|
||||||
|
|
||||||
setLabels map[string]string
|
|
||||||
deleteLabels map[string]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLabel causes a label to be added or modified when ua is used
|
|
||||||
// in a call to Bucket.Update.
|
|
||||||
func (ua *BucketAttrsToUpdate) SetLabel(name, value string) {
|
|
||||||
if ua.setLabels == nil {
|
|
||||||
ua.setLabels = map[string]string{}
|
|
||||||
}
|
|
||||||
ua.setLabels[name] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteLabel causes a label to be deleted when ua is used in a
|
|
||||||
// call to Bucket.Update.
|
|
||||||
func (ua *BucketAttrsToUpdate) DeleteLabel(name string) {
|
|
||||||
if ua.deleteLabels == nil {
|
|
||||||
ua.deleteLabels = map[string]bool{}
|
|
||||||
}
|
|
||||||
ua.deleteLabels[name] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
|
|
||||||
rb := &raw.Bucket{}
|
|
||||||
if ua.VersioningEnabled != nil {
|
|
||||||
rb.Versioning = &raw.BucketVersioning{
|
|
||||||
Enabled: optional.ToBool(ua.VersioningEnabled),
|
|
||||||
ForceSendFields: []string{"Enabled"},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ua.RequesterPays != nil {
|
|
||||||
rb.Billing = &raw.BucketBilling{
|
|
||||||
RequesterPays: optional.ToBool(ua.RequesterPays),
|
|
||||||
ForceSendFields: []string{"RequesterPays"},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ua.setLabels != nil || ua.deleteLabels != nil {
|
|
||||||
rb.Labels = map[string]string{}
|
|
||||||
for k, v := range ua.setLabels {
|
|
||||||
rb.Labels[k] = v
|
|
||||||
}
|
|
||||||
if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 {
|
|
||||||
rb.ForceSendFields = append(rb.ForceSendFields, "Labels")
|
|
||||||
}
|
|
||||||
for l := range ua.deleteLabels {
|
|
||||||
rb.NullFields = append(rb.NullFields, "Labels."+l)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return rb
|
|
||||||
}
|
|
||||||
|
|
||||||
// If returns a new BucketHandle that applies a set of preconditions.
|
|
||||||
// Preconditions already set on the BucketHandle are ignored.
|
|
||||||
// Operations on the new handle will only occur if the preconditions are
|
|
||||||
// satisfied. The only valid preconditions for buckets are MetagenerationMatch
|
|
||||||
// and MetagenerationNotMatch.
|
|
||||||
func (b *BucketHandle) If(conds BucketConditions) *BucketHandle {
|
|
||||||
b2 := *b
|
|
||||||
b2.conds = &conds
|
|
||||||
return &b2
|
|
||||||
}
|
|
||||||
|
|
||||||
// BucketConditions constrain bucket methods to act on specific metagenerations.
|
|
||||||
//
|
|
||||||
// The zero value is an empty set of constraints.
|
|
||||||
type BucketConditions struct {
|
|
||||||
// MetagenerationMatch specifies that the bucket must have the given
|
|
||||||
// metageneration for the operation to occur.
|
|
||||||
// If MetagenerationMatch is zero, it has no effect.
|
|
||||||
MetagenerationMatch int64
|
|
||||||
|
|
||||||
// MetagenerationNotMatch specifies that the bucket must not have the given
|
|
||||||
// metageneration for the operation to occur.
|
|
||||||
// If MetagenerationNotMatch is zero, it has no effect.
|
|
||||||
MetagenerationNotMatch int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *BucketConditions) validate(method string) error {
|
|
||||||
if *c == (BucketConditions{}) {
|
|
||||||
return fmt.Errorf("storage: %s: empty conditions", method)
|
|
||||||
}
|
|
||||||
if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 {
|
|
||||||
return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserProject returns a new BucketHandle that passes the project ID as the user
|
|
||||||
// project for all subsequent calls. Calls with a user project will be billed to that
|
|
||||||
// project rather than to the bucket's owning project.
|
|
||||||
//
|
|
||||||
// A user project is required for all operations on Requester Pays buckets.
|
|
||||||
func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
|
|
||||||
b2 := *b
|
|
||||||
b2.userProject = projectID
|
|
||||||
b2.acl.userProject = projectID
|
|
||||||
b2.defaultObjectACL.userProject = projectID
|
|
||||||
return &b2
|
|
||||||
}
|
|
||||||
|
|
||||||
// applyBucketConds modifies the provided call using the conditions in conds.
|
|
||||||
// call is something that quacks like a *raw.WhateverCall.
|
|
||||||
func applyBucketConds(method string, conds *BucketConditions, call interface{}) error {
|
|
||||||
if conds == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := conds.validate(method); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cval := reflect.ValueOf(call)
|
|
||||||
switch {
|
|
||||||
case conds.MetagenerationMatch != 0:
|
|
||||||
if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) {
|
|
||||||
return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
|
|
||||||
}
|
|
||||||
case conds.MetagenerationNotMatch != 0:
|
|
||||||
if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) {
|
|
||||||
return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
|
|
||||||
var rl raw.BucketLifecycle
|
|
||||||
if len(l.Rules) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for _, r := range l.Rules {
|
|
||||||
rr := &raw.BucketLifecycleRule{
|
|
||||||
Action: &raw.BucketLifecycleRuleAction{
|
|
||||||
Type: r.Action.Type,
|
|
||||||
StorageClass: r.Action.StorageClass,
|
|
||||||
},
|
|
||||||
Condition: &raw.BucketLifecycleRuleCondition{
|
|
||||||
Age: r.Condition.AgeInDays,
|
|
||||||
MatchesStorageClass: r.Condition.MatchesStorageClasses,
|
|
||||||
NumNewerVersions: r.Condition.NumNewerVersions,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
switch r.Condition.Liveness {
|
|
||||||
case LiveAndArchived:
|
|
||||||
rr.Condition.IsLive = nil
|
|
||||||
case Live:
|
|
||||||
rr.Condition.IsLive = googleapi.Bool(true)
|
|
||||||
case Archived:
|
|
||||||
rr.Condition.IsLive = googleapi.Bool(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !r.Condition.CreatedBefore.IsZero() {
|
|
||||||
rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date)
|
|
||||||
}
|
|
||||||
rl.Rule = append(rl.Rule, rr)
|
|
||||||
}
|
|
||||||
return &rl
|
|
||||||
}
|
|
||||||
|
|
||||||
func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
|
|
||||||
var l Lifecycle
|
|
||||||
if rl == nil {
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
for _, rr := range rl.Rule {
|
|
||||||
r := LifecycleRule{
|
|
||||||
Action: LifecycleAction{
|
|
||||||
Type: rr.Action.Type,
|
|
||||||
StorageClass: rr.Action.StorageClass,
|
|
||||||
},
|
|
||||||
Condition: LifecycleCondition{
|
|
||||||
AgeInDays: rr.Condition.Age,
|
|
||||||
MatchesStorageClasses: rr.Condition.MatchesStorageClass,
|
|
||||||
NumNewerVersions: rr.Condition.NumNewerVersions,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case rr.Condition.IsLive == nil:
|
|
||||||
r.Condition.Liveness = LiveAndArchived
|
|
||||||
case *rr.Condition.IsLive == true:
|
|
||||||
r.Condition.Liveness = Live
|
|
||||||
case *rr.Condition.IsLive == false:
|
|
||||||
r.Condition.Liveness = Archived
|
|
||||||
}
|
|
||||||
|
|
||||||
if rr.Condition.CreatedBefore != "" {
|
|
||||||
r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore)
|
|
||||||
}
|
|
||||||
l.Rules = append(l.Rules, r)
|
|
||||||
}
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// Objects returns an iterator over the objects in the bucket that match the Query q.
|
|
||||||
// If q is nil, no filtering is done.
|
|
||||||
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
|
|
||||||
it := &ObjectIterator{
|
|
||||||
ctx: ctx,
|
|
||||||
bucket: b,
|
|
||||||
}
|
|
||||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
|
||||||
it.fetch,
|
|
||||||
func() int { return len(it.items) },
|
|
||||||
func() interface{} { b := it.items; it.items = nil; return b })
|
|
||||||
if q != nil {
|
|
||||||
it.query = *q
|
|
||||||
}
|
|
||||||
return it
|
|
||||||
}
|
|
||||||
|
|
||||||
// An ObjectIterator is an iterator over ObjectAttrs.
|
|
||||||
type ObjectIterator struct {
|
|
||||||
ctx context.Context
|
|
||||||
bucket *BucketHandle
|
|
||||||
query Query
|
|
||||||
pageInfo *iterator.PageInfo
|
|
||||||
nextFunc func() error
|
|
||||||
items []*ObjectAttrs
|
|
||||||
}
|
|
||||||
|
|
||||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
|
||||||
func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
|
||||||
|
|
||||||
// Next returns the next result. Its second return value is iterator.Done if
|
|
||||||
// there are no more results. Once Next returns iterator.Done, all subsequent
|
|
||||||
// calls will return iterator.Done.
|
|
||||||
//
|
|
||||||
// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will
|
|
||||||
// have a non-empty Prefix field, and a zero value for all other fields. These
|
|
||||||
// represent prefixes.
|
|
||||||
func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
|
|
||||||
if err := it.nextFunc(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
item := it.items[0]
|
|
||||||
it.items = it.items[1:]
|
|
||||||
return item, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) {
|
|
||||||
req := it.bucket.c.raw.Objects.List(it.bucket.name)
|
|
||||||
setClientHeader(req.Header())
|
|
||||||
req.Projection("full")
|
|
||||||
req.Delimiter(it.query.Delimiter)
|
|
||||||
req.Prefix(it.query.Prefix)
|
|
||||||
req.Versions(it.query.Versions)
|
|
||||||
req.PageToken(pageToken)
|
|
||||||
if it.bucket.userProject != "" {
|
|
||||||
req.UserProject(it.bucket.userProject)
|
|
||||||
}
|
|
||||||
if pageSize > 0 {
|
|
||||||
req.MaxResults(int64(pageSize))
|
|
||||||
}
|
|
||||||
var resp *raw.Objects
|
|
||||||
var err error
|
|
||||||
err = runWithRetry(it.ctx, func() error {
|
|
||||||
resp, err = req.Context(it.ctx).Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
|
|
||||||
err = ErrBucketNotExist
|
|
||||||
}
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
for _, item := range resp.Items {
|
|
||||||
it.items = append(it.items, newObject(item))
|
|
||||||
}
|
|
||||||
for _, prefix := range resp.Prefixes {
|
|
||||||
it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
|
|
||||||
}
|
|
||||||
return resp.NextPageToken, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(jbd): Add storage.buckets.update.
|
|
||||||
|
|
||||||
// Buckets returns an iterator over the buckets in the project. You may
|
|
||||||
// optionally set the iterator's Prefix field to restrict the list to buckets
|
|
||||||
// whose names begin with the prefix. By default, all buckets in the project
|
|
||||||
// are returned.
|
|
||||||
func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator {
|
|
||||||
it := &BucketIterator{
|
|
||||||
ctx: ctx,
|
|
||||||
client: c,
|
|
||||||
projectID: projectID,
|
|
||||||
}
|
|
||||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
|
||||||
it.fetch,
|
|
||||||
func() int { return len(it.buckets) },
|
|
||||||
func() interface{} { b := it.buckets; it.buckets = nil; return b })
|
|
||||||
return it
|
|
||||||
}
|
|
||||||
|
|
||||||
// A BucketIterator is an iterator over BucketAttrs.
|
|
||||||
type BucketIterator struct {
|
|
||||||
// Prefix restricts the iterator to buckets whose names begin with it.
|
|
||||||
Prefix string
|
|
||||||
|
|
||||||
ctx context.Context
|
|
||||||
client *Client
|
|
||||||
projectID string
|
|
||||||
buckets []*BucketAttrs
|
|
||||||
pageInfo *iterator.PageInfo
|
|
||||||
nextFunc func() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the next result. Its second return value is iterator.Done if
|
|
||||||
// there are no more results. Once Next returns iterator.Done, all subsequent
|
|
||||||
// calls will return iterator.Done.
|
|
||||||
func (it *BucketIterator) Next() (*BucketAttrs, error) {
|
|
||||||
if err := it.nextFunc(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b := it.buckets[0]
|
|
||||||
it.buckets = it.buckets[1:]
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
|
||||||
func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
|
||||||
|
|
||||||
func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) {
|
|
||||||
req := it.client.raw.Buckets.List(it.projectID)
|
|
||||||
setClientHeader(req.Header())
|
|
||||||
req.Projection("full")
|
|
||||||
req.Prefix(it.Prefix)
|
|
||||||
req.PageToken(pageToken)
|
|
||||||
if pageSize > 0 {
|
|
||||||
req.MaxResults(int64(pageSize))
|
|
||||||
}
|
|
||||||
var resp *raw.Buckets
|
|
||||||
var err error
|
|
||||||
err = runWithRetry(it.ctx, func() error {
|
|
||||||
resp, err = req.Context(it.ctx).Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
for _, item := range resp.Items {
|
|
||||||
it.buckets = append(it.buckets, newBucket(item))
|
|
||||||
}
|
|
||||||
return resp.NextPageToken, nil
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue