Merge branch 'composer'

Conflicts:
	cmd/tusd/main.go
This commit is contained in:
Marius 2016-03-11 22:05:01 +01:00
commit 8e6a6fbf5a
20 changed files with 602 additions and 206 deletions

View File

@ -1,7 +1,10 @@
# tusd
[![Build Status](https://travis-ci.org/tus/tusd.svg?branch=master)](https://travis-ci.org/tus/tusd)
[![Build status](https://ci.appveyor.com/api/projects/status/2y6fa4nyknoxmyc8/branch/master?svg=true)](https://ci.appveyor.com/project/Acconut/tusd/branch/master)
> **tus** is a protocol based on HTTP for *resumable file uploads*. Resumable
> means that an upload can be interrupted at any moment and can be resumed without
> re-uploading the previous data again. An interruption may happen willingly, if
> the user wants to pause, or by accident in case of an network issue or server
> outage.
tusd is the official reference implementation of the [tus resumable upload
protocol](http://www.tus.io/protocols/resumable-upload.html). The protocol
@ -13,6 +16,14 @@ moment allowing to continue seamlessly after e.g. network interruptions.
## Getting started
### Download pre-builts binaries (recommended)
You can download ready-to-use packages including binaries for OS X, Linux and
Windows in various formats of the
[latest release](https://github.com/tus/tusd/releases/latest).
### Compile from source
**Requirements:**
* [Go](http://golang.org/doc/install) (1.3 or newer)
@ -35,7 +46,7 @@ go run cmd/tusd/main.go
## Using tusd manually
Besides from running tusd using the provided binary, you can embed it into
your own Golang program:
your own Go program:
```go
package main
@ -56,20 +67,26 @@ func main() {
Path: "./uploads",
}
// Create a new HTTP handler for the tusd server by providing
// a configuration object. The DataStore property must be set
// in order to allow the handler to function.
// A storage backend for tusd may consist of multiple different parts which
// handle upload creation, locking, termination and so on. The composer is a
// place where all those seperated pieces are joined together. In this example
// we only use the file store but you may plug in multiple.
composer := tusd.NewStoreComposer()
store.UseIn(composer)
// Create a new HTTP handler for the tusd server by providing a configuration.
// The StoreComposer property must be set to allow the handler to function.
handler, err := tusd.NewHandler(tusd.Config{
BasePath: "files/",
DataStore: store,
BasePath: "files/",
StoreComposer: composer,
})
if err != nil {
panic("Unable to create handler: %s", err)
}
// Right now, nothing has happened since we need to start the
// HTTP server on our own. In the end, tusd will listen on
// and accept request at http://localhost:8080/files
// Right now, nothing has happened since we need to start the HTTP server on
// our own. In the end, tusd will start listening on and accept request at
// http://localhost:8080/files
http.Handle("files/", http.StripPrefix("files/", handler))
err = http.ListenAndServe(":8080", nil)
if err != nil {
@ -78,8 +95,8 @@ func main() {
}
```
If you need to customize the GET and DELETE endpoints use
`tusd.NewUnroutedHandler` instead of `tusd.NewHandler`.
Please consult the [online documentation](https://godoc.org/github.com/tus/tusd)
for more details about tusd's APIs and its sub-packages.
## Implementing own storages
@ -95,8 +112,22 @@ interface and using the new struct in the [configuration object](https://godoc.o
Please consult the documentation about detailed information about the
required methods.
## Packages
This repository does not only contain the HTTP server's code but also other
useful tools:
* [**s3store**](https://godoc.org/github.com/tus/tusd/s3store): A storage backend using AWS S3
* [**filestore**](https://godoc.org/github.com/tus/tusd/filestore): A storage backend using the local file system
* [**memorylocker**](https://godoc.org/github.com/tus/tusd/memorylocker): An in-memory locker for handling concurrent uploads
* [**consullocker**](https://godoc.org/github.com/tus/tusd/consullocker): A locker using the distributed Consul service
* [**limitedstore**](https://godoc.org/github.com/tus/tusd/limitedstore): A storage wrapper limiting the total used space for uploads
## Running the testsuite
[![Build Status](https://travis-ci.org/tus/tusd.svg?branch=master)](https://travis-ci.org/tus/tusd)
[![Build status](https://ci.appveyor.com/api/projects/status/2y6fa4nyknoxmyc8/branch/master?svg=true)](https://ci.appveyor.com/project/Acconut/tusd/branch/master)
```bash
go test -v ./...
```

View File

@ -17,6 +17,7 @@ import (
"github.com/tus/tusd"
"github.com/tus/tusd/filestore"
"github.com/tus/tusd/limitedstore"
"github.com/tus/tusd/memorylocker"
"github.com/tus/tusd/s3store"
"github.com/aws/aws-sdk-go/aws"
@ -50,7 +51,7 @@ var greeting string
func init() {
flag.StringVar(&httpHost, "host", "0.0.0.0", "Host to bind HTTP server to")
flag.StringVar(&httpPort, "port", "1080", "Port to bind HTTP server to")
flag.Int64Var(&maxSize, "max-size", 0, "Maximum size of uploads in bytes")
flag.Int64Var(&maxSize, "max-size", 0, "Maximum size of a single upload in bytes")
flag.StringVar(&dir, "dir", "./data", "Directory to store uploads in")
flag.Int64Var(&storeSize, "store-size", 0, "Size of space allowed for storage")
flag.StringVar(&basepath, "base-path", "/files/", "Basepath of the HTTP server")
@ -96,27 +97,32 @@ func main() {
return
}
var store tusd.TerminaterDataStore
// Attempt to use S3 as a backend if the -s3-bucket option has been supplied.
// If not, we default to storing them locally on disk.
composer := tusd.NewStoreComposer()
if s3Bucket == "" {
stdout.Printf("Using '%s' as directory storage.\n", dir)
if err := os.MkdirAll(dir, os.FileMode(0775)); err != nil {
stderr.Fatalf("Unable to ensure directory exists: %s", err)
}
store = filestore.New(dir)
store := filestore.New(dir)
store.UseIn(composer)
} else {
stdout.Printf("Using 's3://%s' as S3 bucket for storage.\n", s3Bucket)
// Derive credentials from AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID and
// AWS_REGION environment variables.
credentials := aws.NewConfig().WithCredentials(credentials.NewEnvCredentials())
store = s3store.New(s3Bucket, s3.New(session.New(), credentials))
store := s3store.New(s3Bucket, s3.New(session.New(), credentials))
store.UseIn(composer)
locker := memorylocker.New()
locker.UseIn(composer)
}
if storeSize > 0 {
store = limitedstore.New(storeSize, store)
limitedstore.New(storeSize, composer.Core, composer.Terminater).UseIn(composer)
stdout.Printf("Using %.2fMB as storage size.\n", float64(storeSize)/1024/1024)
// We need to ensure that a single upload can fit into the storage size
@ -130,7 +136,7 @@ func main() {
handler, err := tusd.NewHandler(tusd.Config{
MaxSize: maxSize,
BasePath: basepath,
DataStore: store,
StoreComposer: composer,
NotifyCompleteUploads: true,
})
if err != nil {
@ -140,6 +146,8 @@ func main() {
address := httpHost + ":" + httpPort
stdout.Printf("Using %s as address to listen.\n", address)
stdout.Printf(composer.Capabilities())
go func() {
for {
select {

122
composer.go Normal file
View File

@ -0,0 +1,122 @@
package tusd
// StoreComposer represents a composable data store. It consists of the core
// data store and optional extensions. Please consult the package's overview
// for a more detailed introduction in how to use this structure.
type StoreComposer struct {
Core DataStore
UsesTerminater bool
Terminater TerminaterDataStore
UsesFinisher bool
Finisher FinisherDataStore
UsesLocker bool
Locker LockerDataStore
UsesGetReader bool
GetReader GetReaderDataStore
UsesConcater bool
Concater ConcaterDataStore
}
// NewStoreComposer creates a new and empty store composer.
func NewStoreComposer() *StoreComposer {
return &StoreComposer{}
}
// newStoreComposerFromDataStore creates a new store composer and attempts to
// extract the extensions for the provided store. This is intended to be used
// for transitioning from data stores to composers.
func newStoreComposerFromDataStore(store DataStore) *StoreComposer {
composer := NewStoreComposer()
composer.UseCore(store)
if mod, ok := store.(TerminaterDataStore); ok {
composer.UseTerminater(mod)
}
if mod, ok := store.(FinisherDataStore); ok {
composer.UseFinisher(mod)
}
if mod, ok := store.(LockerDataStore); ok {
composer.UseLocker(mod)
}
if mod, ok := store.(GetReaderDataStore); ok {
composer.UseGetReader(mod)
}
if mod, ok := store.(ConcaterDataStore); ok {
composer.UseConcater(mod)
}
return composer
}
// Capabilities returns a string representing the provided extensions in a
// human-readable format meant for debugging.
func (store *StoreComposer) Capabilities() string {
str := "Core: "
if store.Core != nil {
str += "✓"
} else {
str += "✗"
}
str += ` Terminater: `
if store.UsesTerminater {
str += "✓"
} else {
str += "✗"
}
str += ` Finisher: `
if store.UsesFinisher {
str += "✓"
} else {
str += "✗"
}
str += ` Locker: `
if store.UsesLocker {
str += "✓"
} else {
str += "✗"
}
str += ` GetReader: `
if store.UsesGetReader {
str += "✓"
} else {
str += "✗"
}
str += ` Concater: `
if store.UsesConcater {
str += "✓"
} else {
str += "✗"
}
return str
}
// UseCore will set the used core data store. If the argument is nil, the
// property will be unset.
func (store *StoreComposer) UseCore(core DataStore) {
store.Core = core
}
func (store *StoreComposer) UseTerminater(ext TerminaterDataStore) {
store.UsesTerminater = ext != nil
store.Terminater = ext
}
func (store *StoreComposer) UseFinisher(ext FinisherDataStore) {
store.UsesFinisher = ext != nil
store.Finisher = ext
}
func (store *StoreComposer) UseLocker(ext LockerDataStore) {
store.UsesLocker = ext != nil
store.Locker = ext
}
func (store *StoreComposer) UseGetReader(ext GetReaderDataStore) {
store.UsesGetReader = ext != nil
store.GetReader = ext
}
func (store *StoreComposer) UseConcater(ext ConcaterDataStore) {
store.UsesConcater = ext != nil
store.Concater = ext
}

87
composer.mgo Normal file
View File

@ -0,0 +1,87 @@
package tusd
#define USE_FUNC(TYPE) \
func (store *StoreComposer) Use ## TYPE(ext TYPE ## DataStore) { \
store.Uses ## TYPE = ext != nil; \
store.TYPE = ext; \
}
#define USE_FIELD(TYPE) Uses ## TYPE bool; \
TYPE TYPE ## DataStore
#define USE_FROM(TYPE) if mod, ok := store.(TYPE ## DataStore); ok { \
composer.Use ## TYPE (mod) \
}
#define USE_CAP(TYPE) str += ` TYPE: `; \
if store.Uses ## TYPE { \
str += "✓" \
} else { \
str += "✗" \
}
// StoreComposer represents a composable data store. It consists of the core
// data store and optional extensions. Please consult the package's overview
// for a more detailed introduction in how to use this structure.
type StoreComposer struct {
Core DataStore
USE_FIELD(Terminater)
USE_FIELD(Finisher)
USE_FIELD(Locker)
USE_FIELD(GetReader)
USE_FIELD(Concater)
}
// NewStoreComposer creates a new and empty store composer.
func NewStoreComposer() *StoreComposer {
return &StoreComposer{}
}
// newStoreComposerFromDataStore creates a new store composer and attempts to
// extract the extensions for the provided store. This is intended to be used
// for transitioning from data stores to composers.
func newStoreComposerFromDataStore(store DataStore) *StoreComposer {
composer := NewStoreComposer()
composer.UseCore(store)
USE_FROM(Terminater)
USE_FROM(Finisher)
USE_FROM(Locker)
USE_FROM(GetReader)
USE_FROM(Concater)
return composer
}
// Capabilities returns a string representing the provided extensions in a
// human-readable format meant for debugging.
func (store *StoreComposer) Capabilities() string {
str := "Core: "
if store.Core != nil {
str += "✓"
} else {
str += "✗"
}
USE_CAP(Terminater)
USE_CAP(Finisher)
USE_CAP(Locker)
USE_CAP(GetReader)
USE_CAP(Concater)
return str
}
// UseCore will set the used core data store. If the argument is nil, the
// property will be unset.
func (store *StoreComposer) UseCore(core DataStore) {
store.Core = core
}
USE_FUNC(Terminater)
USE_FUNC(Finisher)
USE_FUNC(Locker)
USE_FUNC(GetReader)
USE_FUNC(Concater)

27
composer_test.go Normal file
View File

@ -0,0 +1,27 @@
package tusd_test
import (
"github.com/tus/tusd"
"github.com/tus/tusd/consullocker"
"github.com/tus/tusd/filestore"
"github.com/tus/tusd/limitedstore"
)
func ExampleNewStoreComposer() {
composer := tusd.NewStoreComposer()
fs := filestore.New("./data")
fs.UseIn(composer)
cl := consullocker.New(nil)
cl.UseIn(composer)
ls := limitedstore.New(1024*1024*1024, composer.Core, composer.Terminater)
ls.UseIn(composer)
config := tusd.Config{
StoreComposer: composer,
}
_, _ = tusd.NewHandler(config)
}

70
config.go Normal file
View File

@ -0,0 +1,70 @@
package tusd
import (
"errors"
"log"
"net/url"
"os"
)
// Config provides a way to configure the Handler depending on your needs.
type Config struct {
// DataStore implementation used to store and retrieve the single uploads.
// Must no be nil.
DataStore DataStore
StoreComposer *StoreComposer
// MaxSize defines how many bytes may be stored in one single upload. If its
// value is is 0 or smaller no limit will be enforced.
MaxSize int64
// BasePath defines the URL path used for handling uploads, e.g. "/files/".
// If no trailing slash is presented it will be added. You may specify an
// absolute URL containing a scheme, e.g. "http://tus.io"
BasePath string
isAbs bool
// Initiate the CompleteUploads channel in the Handler struct in order to
// be notified about complete uploads
NotifyCompleteUploads bool
// Logger the logger to use internally
Logger *log.Logger
// Respect the X-Forwarded-Host, X-Forwarded-Proto and Forwarded headers
// potentially set by proxies when generating an absolute URL in the
// reponse to POST requests.
RespectForwardedHeaders bool
}
func (config *Config) validate() error {
if config.Logger == nil {
config.Logger = log.New(os.Stdout, "[tusd] ", 0)
}
base := config.BasePath
uri, err := url.Parse(base)
if err != nil {
return err
}
// Ensure base path ends with slash to remove logic from absFileURL
if base != "" && string(base[len(base)-1]) != "/" {
base += "/"
}
// Ensure base path begins with slash if not absolute (starts with scheme)
if !uri.IsAbs() && len(base) > 0 && string(base[0]) != "/" {
base = "/" + base
}
config.BasePath = base
config.isAbs = uri.IsAbs()
if config.StoreComposer == nil {
config.StoreComposer = newStoreComposerFromDataStore(config.DataStore)
config.DataStore = nil
} else if config.DataStore != nil {
return errors.New("tusd: either StoreComposer or DataStore may be set in Config, but not both")
}
if config.StoreComposer.Core == nil {
return errors.New("tusd: StoreComposer in Config needs to contain a non-nil core")
}
return nil
}

58
config_test.go Normal file
View File

@ -0,0 +1,58 @@
package tusd
import (
"io"
"testing"
"github.com/stretchr/testify/assert"
)
type zeroStore struct{}
func (store zeroStore) NewUpload(info FileInfo) (string, error) {
return "", nil
}
func (store zeroStore) WriteChunk(id string, offset int64, src io.Reader) (int64, error) {
return 0, nil
}
func (store zeroStore) GetInfo(id string) (FileInfo, error) {
return FileInfo{}, nil
}
func TestConfig(t *testing.T) {
a := assert.New(t)
config := Config{
DataStore: zeroStore{},
BasePath: "files",
}
a.Nil(config.validate())
a.NotNil(config.Logger)
a.NotNil(config.StoreComposer)
a.Equal("/files/", config.BasePath)
}
func TestConfigEmptyCore(t *testing.T) {
a := assert.New(t)
config := Config{
StoreComposer: NewStoreComposer(),
}
a.Error(config.validate())
}
func TestConfigStoreAndComposer(t *testing.T) {
a := assert.New(t)
composer := NewStoreComposer()
composer.UseCore(zeroStore{})
config := Config{
StoreComposer: composer,
DataStore: zeroStore{},
}
a.Error(config.validate())
}

View File

@ -45,6 +45,11 @@ func New(client *consul.Client) *ConsulLocker {
}
}
// UseIn adds this locker to the passed composer.
func (locker *ConsulLocker) UseIn(composer *tusd.StoreComposer) {
composer.UseLocker(locker)
}
// LockUpload tries to obtain the exclusive lock.
func (locker *ConsulLocker) LockUpload(id string) error {
lock, err := locker.Client.LockOpts(&consul.LockOptions{

View File

@ -8,7 +8,11 @@ import (
)
func TestCORS(t *testing.T) {
handler, _ := NewHandler(Config{})
store := NewStoreComposer()
store.UseCore(zeroStore{})
handler, _ := NewHandler(Config{
StoreComposer: store,
})
(&httpTest{
Name: "Preflight request",

View File

@ -49,8 +49,6 @@ type DataStore interface {
// if they want to receive DELETE requests using the Handler. If this interface
// is not implemented, no request handler for this method is attached.
type TerminaterDataStore interface {
DataStore
// Terminate an upload so any further requests to the resource, both reading
// and writing, must return os.ErrNotExist or similar.
Terminate(id string) error
@ -62,8 +60,6 @@ type TerminaterDataStore interface {
// resources or notifying other services. For example, S3Store uses this
// interface for removing a temporary object.
type FinisherDataStore interface {
DataStore
// FinishUpload executes additional operations for the finished upload which
// is specified by its ID.
FinishUpload(id string) error
@ -77,8 +73,6 @@ type FinisherDataStore interface {
// data corruption, especially to ensure correct offset values and the proper
// order of chunks inside a single upload.
type LockerDataStore interface {
DataStore
// LockUpload attempts to obtain an exclusive lock for the upload specified
// by its id.
// If this operation fails because the resource is already locked, the
@ -96,8 +90,6 @@ type LockerDataStore interface {
// Please, be aware that this feature is not part of the official tus
// specification. Instead it's a custom mechanism by tusd.
type GetReaderDataStore interface {
DataStore
// GetReader returns a reader which allows iterating of the content of an
// upload specified by its ID. It should attempt to provide a reader even if
// the upload has not been finished yet but it's not required.
@ -112,8 +104,6 @@ type GetReaderDataStore interface {
// Concatenation extension should be enabled. Only in this case, the handler
// will parse and respect the Upload-Concat header.
type ConcaterDataStore interface {
DataStore
// ConcatUploads concatenations the content from the provided partial uploads
// and write the result in the destination upload which is specified by its
// ID. The caller (usually the handler) must and will ensure that this

69
doc.go Normal file
View File

@ -0,0 +1,69 @@
/*
Package tusd provides ways to accept tus 1.0 calls using HTTP.
tus is a protocol based on HTTP for resumable file uploads. Resumable means that
an upload can be interrupted at any moment and can be resumed without
re-uploading the previous data again. An interruption may happen willingly, if
the user wants to pause, or by accident in case of an network issue or server
outage (http://tus.io).
The basics of tusd
tusd was designed in way which allows an flexible and customizable usage. We
wanted to avoid binding this package to a specific storage system particularly
a proprietary third-party software. Therefore tusd is an abstract layer whose
only job is to accept incoming HTTP requests, validate them according to the
specification and finally passes them to the data store.
The data store is another important component in tusd's architecture whose
purpose is to do the actual file handling. It has to write the incoming upload
to a persistent storage system and retrieve information about an upload's
current state. Therefore it is the only part of the system which communicates
directly with the underlying storage system, whether it be the local disk, a
remote FTP server or cloud providers such as AWS S3.
Using a store composer
The only hard requirements for a data store can be found in the DataStore
interface. It contains methods for creating uploads (NewUpload), writing to
them (WriteChunk) and retrieving their status (GetInfo). However, there
are many more features which are not mandatory but may still be used.
These are contained in their own interfaces which all share the *DataStore
suffix. For example, GetReaderDataStore which enables downloading uploads or
TerminaterDataStore which allows uploads to be terminated.
The store composer offers a way to combine the basic data store - the core -
implementation and these additional extensions:
composer := tusd.NewStoreComposer()
composer.UseCore(dataStore) // Implements DataStore
composer.UseTerminater(terminater) // Implements TerminaterDataStore
composer.UseLocker(locker) // Implements LockerDataStore
The corresponding methods for adding an extension to the composer are prefixed
with Use* followed by the name of the corresponding interface. However, most
data store provide multiple extensions and adding all of them manually can be
tedious and error-prone. Therefore, all data store distributed with tusd provide
an UseIn() method which does this job automatically. For example, this is the
S3 store in action (see S3Store.UseIn):
store := s3store.New()
locker := memorylocker.New()
composer := tusd.NewStoreComposer()
store.UseIn(composer)
locker.UseIn(composer)
Finally, once you are done with composing your data store, you can pass it
inside the Config struct in order to create create a new tusd HTTP handler:
config := tusd.Config{
StoreComposer: composer,
BasePath: "/files/",
}
handler, err := tusd.NewHandler(config)
This handler can then be mounted to a specific path, e.g. /files:
http.Handle("/files/", http.StripPrefix("/files/", handler))
*/
package tusd

View File

@ -46,6 +46,15 @@ func New(path string) FileStore {
return FileStore{path}
}
// UseIn sets this store as the core data store in the passed composer and adds
// all possible extension to it.
func (store FileStore) UseIn(composer *tusd.StoreComposer) {
composer.UseCore(store)
composer.UseGetReader(store)
composer.UseTerminater(store)
composer.UseLocker(store)
}
func (store FileStore) NewUpload(info tusd.FileInfo) (id string, err error) {
id = uid.Uid()
info.ID = id

View File

@ -1,4 +1,3 @@
// Package tusd provides ways to accept tusd calls using HTTP.
package tusd
import (
@ -22,6 +21,10 @@ type Handler struct {
// endpoints to be customized. These are not part of the protocol so can be
// changed depending on your needs.
func NewHandler(config Config) (*Handler, error) {
if err := config.validate(); err != nil {
return nil, err
}
handler, err := NewUnroutedHandler(config)
if err != nil {
return nil, err
@ -41,12 +44,12 @@ func NewHandler(config Config) (*Handler, error) {
mux.Add("PATCH", ":id", http.HandlerFunc(handler.PatchFile))
// Only attach the DELETE handler if the Terminate() method is provided
if _, ok := config.DataStore.(TerminaterDataStore); ok {
if config.StoreComposer.UsesTerminater {
mux.Del(":id", http.HandlerFunc(handler.DelFile))
}
// GET handler requires the GetReader() method
if _, ok := config.DataStore.(GetReaderDataStore); ok {
if config.StoreComposer.UsesGetReader {
mux.Get(":id", http.HandlerFunc(handler.GetFile))
}

View File

@ -12,27 +12,19 @@
// properly. Two tusd.FileStore instances using the same directory, for example.
// In addition the limited store will keep a list of the uploads' IDs in memory
// which may create a growing memory leak.
//
// While LimitedStore implements the GetReader, LockUpload, UnlockUpload,
// FinishUpload and ConcatUploads methods, it does not contain proper definitions
// for them. When invoked, the call will be passed to the underlying
// data store as long as it provides these methods. If not, either an error
// is returned or nothing happens (see the specific methods for more
// detailed information). The motivation behind this decision was, that this
// allows to expose the additional extensions implemented using the
// interfaces, such as GetReaderDataStore.
package limitedstore
import (
"github.com/tus/tusd"
"io"
"sort"
"sync"
)
type LimitedStore struct {
tusd.DataStore
terminater tusd.TerminaterDataStore
StoreSize int64
tusd.TerminaterDataStore
uploads map[string]int64
usedSize int64
@ -55,15 +47,21 @@ func (p pairlist) Less(i, j int) bool { return p[i].value < p[j].value }
// New creates a new limited store with the given size as the maximum storage
// size. The wrapped data store needs to implement the TerminaterDataStore
// interface, in order to provide the required Terminate method.
func New(storeSize int64, dataStore tusd.TerminaterDataStore) *LimitedStore {
func New(storeSize int64, dataStore tusd.DataStore, terminater tusd.TerminaterDataStore) *LimitedStore {
return &LimitedStore{
StoreSize: storeSize,
TerminaterDataStore: dataStore,
uploads: make(map[string]int64),
mutex: new(sync.Mutex),
StoreSize: storeSize,
DataStore: dataStore,
terminater: terminater,
uploads: make(map[string]int64),
mutex: new(sync.Mutex),
}
}
func (store *LimitedStore) UseIn(composer *tusd.StoreComposer) {
composer.UseCore(store)
composer.UseTerminater(store)
}
func (store *LimitedStore) NewUpload(info tusd.FileInfo) (string, error) {
store.mutex.Lock()
defer store.mutex.Unlock()
@ -72,7 +70,7 @@ func (store *LimitedStore) NewUpload(info tusd.FileInfo) (string, error) {
return "", err
}
id, err := store.TerminaterDataStore.NewUpload(info)
id, err := store.DataStore.NewUpload(info)
if err != nil {
return "", err
}
@ -91,7 +89,7 @@ func (store *LimitedStore) Terminate(id string) error {
}
func (store *LimitedStore) terminate(id string) error {
err := store.TerminaterDataStore.Terminate(id)
err := store.terminater.Terminate(id)
if err != nil {
return err
}
@ -135,55 +133,3 @@ func (store *LimitedStore) ensureSpace(size int64) error {
return nil
}
// GetReader will pass the call to the underlying data store if it implements
// the tusd.GetReaderDataStore interface. Else tusd.ErrNotImplemented will be
// returned.
func (store *LimitedStore) GetReader(id string) (io.Reader, error) {
if s, ok := store.TerminaterDataStore.(tusd.GetReaderDataStore); ok {
return s.GetReader(id)
} else {
return nil, tusd.ErrNotImplemented
}
}
// LockUpload will pass the call to the underlying data store if it implements
// the tusd.LockerDataStore interface. Else this function simply returns nil.
func (store *LimitedStore) LockUpload(id string) error {
if s, ok := store.TerminaterDataStore.(tusd.LockerDataStore); ok {
return s.LockUpload(id)
}
return nil
}
// UnlockUpload will pass the call to the underlying data store if it implements
// the tusd.LockerDataStore interface. Else this function simply returns nil.
func (store *LimitedStore) UnlockUpload(id string) error {
if s, ok := store.TerminaterDataStore.(tusd.LockerDataStore); ok {
return s.UnlockUpload(id)
}
return nil
}
// FinishUpload will pass the call to the underlying data store if it implements
// the tusd.FinisherDataStore interface. Else this function simply returns nil.
func (store *LimitedStore) FinishUpload(id string) error {
if s, ok := store.TerminaterDataStore.(tusd.FinisherDataStore); ok {
return s.FinishUpload(id)
}
return nil
}
// ConcatUploads will pass the call to the underlying data store if it implements
// the tusd.ConcaterDataStore interface. Else tusd.ErrNotImplemented will be
// returned.
func (store *LimitedStore) ConcatUploads(dest string, src []string) error {
if s, ok := store.TerminaterDataStore.(tusd.ConcaterDataStore); ok {
return s.ConcatUploads(dest, src)
} else {
return tusd.ErrNotImplemented
}
}

View File

@ -11,11 +11,7 @@ import (
)
var _ tusd.DataStore = &LimitedStore{}
var _ tusd.GetReaderDataStore = &LimitedStore{}
var _ tusd.TerminaterDataStore = &LimitedStore{}
var _ tusd.LockerDataStore = &LimitedStore{}
var _ tusd.ConcaterDataStore = &LimitedStore{}
var _ tusd.FinisherDataStore = &LimitedStore{}
type dataStore struct {
t *assert.Assertions
@ -45,10 +41,6 @@ func (store *dataStore) GetInfo(id string) (tusd.FileInfo, error) {
return tusd.FileInfo{}, nil
}
func (store *dataStore) GetReader(id string) (io.Reader, error) {
return nil, tusd.ErrNotImplemented
}
func (store *dataStore) Terminate(id string) error {
// We expect the uploads to be terminated in a specific order (the bigger
// come first)
@ -66,7 +58,7 @@ func TestLimitedStore(t *testing.T) {
dataStore := &dataStore{
t: a,
}
store := New(100, dataStore)
store := New(100, dataStore, dataStore)
// Create new upload (30 bytes)
id, err := store.NewUpload(tusd.FileInfo{

View File

@ -11,6 +11,8 @@
package memorylocker
import (
"sync"
"github.com/tus/tusd"
)
@ -18,20 +20,34 @@ import (
// cheap mechansim. Locks will only exist as long as this object is kept in
// reference and will be erased if the program exits.
type MemoryLocker struct {
tusd.DataStore
locks map[string]bool
mutex *sync.Mutex
}
// New creates a new lock memory wrapper around the provided storage.
func NewMemoryLocker(store tusd.DataStore) *MemoryLocker {
// NewMemoryLocker creates a new in-memory locker. The DataStore parameter
// is only presented for back-wards compatibility and is ignored. Please
// use the New() function instead.
func NewMemoryLocker(_ tusd.DataStore) *MemoryLocker {
return New()
}
// New creates a new in-memory locker.
func New() *MemoryLocker {
return &MemoryLocker{
DataStore: store,
locks: make(map[string]bool),
locks: make(map[string]bool),
mutex: new(sync.Mutex),
}
}
// UseIn adds this locker to the passed composer.
func (locker *MemoryLocker) UseIn(composer *tusd.StoreComposer) {
composer.UseLocker(locker)
}
// LockUpload tries to obtain the exclusive lock.
func (locker *MemoryLocker) LockUpload(id string) error {
locker.mutex.Lock()
defer locker.mutex.Unlock()
// Ensure file is not locked
if _, ok := locker.locks[id]; ok {
@ -45,6 +61,9 @@ func (locker *MemoryLocker) LockUpload(id string) error {
// UnlockUpload releases a lock. If no such lock exists, no error will be returned.
func (locker *MemoryLocker) UnlockUpload(id string) error {
locker.mutex.Lock()
defer locker.mutex.Unlock()
// Deleting a non-existing key does not end in unexpected errors or panic
// since this operation results in a no-op
delete(locker.locks, id)

View File

@ -1,7 +1,6 @@
package memorylocker
import (
"io"
"testing"
"github.com/stretchr/testify/assert"
@ -9,28 +8,11 @@ import (
"github.com/tus/tusd"
)
type zeroStore struct{}
func (store zeroStore) NewUpload(info tusd.FileInfo) (string, error) {
return "", nil
}
func (store zeroStore) WriteChunk(id string, offset int64, src io.Reader) (int64, error) {
return 0, nil
}
func (store zeroStore) GetInfo(id string) (tusd.FileInfo, error) {
return tusd.FileInfo{}, nil
}
func (store zeroStore) GetReader(id string) (io.Reader, error) {
return nil, tusd.ErrNotImplemented
}
func TestMemoryLocker(t *testing.T) {
a := assert.New(t)
var locker tusd.LockerDataStore
locker = NewMemoryLocker(&zeroStore{})
locker = New()
a.NoError(locker.LockUpload("one"))
a.Equal(tusd.ErrFileLocked, locker.LockUpload("one"))

View File

@ -8,8 +8,11 @@ import (
)
func TestOptions(t *testing.T) {
store := NewStoreComposer()
store.UseCore(zeroStore{})
handler, _ := NewHandler(Config{
MaxSize: 400,
StoreComposer: store,
MaxSize: 400,
})
(&httpTest{

View File

@ -131,6 +131,16 @@ func New(bucket string, service s3iface.S3API) S3Store {
}
}
// UseIn sets this store as the core data store in the passed composer and adds
// all possible extension to it.
func (store S3Store) UseIn(composer *tusd.StoreComposer) {
composer.UseCore(store)
composer.UseTerminater(store)
composer.UseFinisher(store)
composer.UseGetReader(store)
composer.UseConcater(store)
}
func (store S3Store) NewUpload(info tusd.FileInfo) (id string, err error) {
var uploadId string
if info.ID == "" {

View File

@ -6,7 +6,6 @@ import (
"io"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
@ -52,35 +51,12 @@ var ErrStatusCodes = map[error]int{
ErrModifyFinal: http.StatusForbidden,
}
// Config provides a way to configure the Handler depending on your needs.
type Config struct {
// DataStore implementation used to store and retrieve the single uploads.
// Must no be nil.
DataStore DataStore
// MaxSize defines how many bytes may be stored in one single upload. If its
// value is is 0 or smaller no limit will be enforced.
MaxSize int64
// BasePath defines the URL path used for handling uploads, e.g. "/files/".
// If no trailing slash is presented it will be added. You may specify an
// absolute URL containing a scheme, e.g. "http://tus.io"
BasePath string
// Initiate the CompleteUploads channel in the Handler struct in order to
// be notified about complete uploads
NotifyCompleteUploads bool
// Logger the logger to use internally
Logger *log.Logger
// Respect the X-Forwarded-Host, X-Forwarded-Proto and Forwarded headers
// potentially set by proxies when generating an absolute URL in the
// reponse to POST requests.
RespectForwardedHeaders bool
}
// UnroutedHandler exposes methods to handle requests as part of the tus protocol,
// such as PostFile, HeadFile, PatchFile and DelFile. In addition the GetFile method
// is provided which is, however, not part of the specification.
type UnroutedHandler struct {
config Config
dataStore DataStore
composer *StoreComposer
isBasePathAbs bool
basePath string
logger *log.Logger
@ -97,42 +73,26 @@ type UnroutedHandler struct {
// a router (aka mux) of your choice. If you are looking for preconfigured
// handler see NewHandler.
func NewUnroutedHandler(config Config) (*UnroutedHandler, error) {
logger := config.Logger
if logger == nil {
logger = log.New(os.Stdout, "[tusd] ", 0)
}
base := config.BasePath
uri, err := url.Parse(base)
if err != nil {
if err := config.validate(); err != nil {
return nil, err
}
// Ensure base path ends with slash to remove logic from absFileURL
if base != "" && string(base[len(base)-1]) != "/" {
base += "/"
}
// Ensure base path begins with slash if not absolute (starts with scheme)
if !uri.IsAbs() && len(base) > 0 && string(base[0]) != "/" {
base = "/" + base
}
// Only promote extesions using the Tus-Extension header which are implemented
extensions := "creation"
if _, ok := config.DataStore.(TerminaterDataStore); ok {
if config.StoreComposer.UsesTerminater {
extensions += ",termination"
}
if _, ok := config.DataStore.(ConcaterDataStore); ok {
if config.StoreComposer.UsesConcater {
extensions += ",concatenation"
}
handler := &UnroutedHandler{
config: config,
dataStore: config.DataStore,
basePath: base,
isBasePathAbs: uri.IsAbs(),
composer: config.StoreComposer,
basePath: config.BasePath,
isBasePathAbs: config.isAbs,
CompleteUploads: make(chan FileInfo),
logger: logger,
logger: config.Logger,
extensions: extensions,
}
@ -211,8 +171,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
// Only use the proper Upload-Concat header if the concatenation extension
// is even supported by the data store.
var concatHeader string
concatStore, ok := handler.dataStore.(ConcaterDataStore)
if ok {
if handler.composer.UsesConcater {
concatHeader = r.Header.Get("Upload-Concat")
}
@ -258,14 +217,14 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
PartialUploads: partialUploads,
}
id, err := handler.dataStore.NewUpload(info)
id, err := handler.composer.Core.NewUpload(info)
if err != nil {
handler.sendError(w, r, err)
return
}
if isFinal {
if err := concatStore.ConcatUploads(id, partialUploads); err != nil {
if err := handler.composer.Concater.ConcatUploads(id, partialUploads); err != nil {
handler.sendError(w, r, err)
return
}
@ -290,7 +249,8 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
return
}
if locker, ok := handler.dataStore.(LockerDataStore); ok {
if handler.composer.UsesLocker {
locker := handler.composer.Locker
if err := locker.LockUpload(id); err != nil {
handler.sendError(w, r, err)
return
@ -299,7 +259,7 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
defer locker.UnlockUpload(id)
}
info, err := handler.dataStore.GetInfo(id)
info, err := handler.composer.Core.GetInfo(id)
if err != nil {
handler.sendError(w, r, err)
return
@ -350,7 +310,8 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
return
}
if locker, ok := handler.dataStore.(LockerDataStore); ok {
if handler.composer.UsesLocker {
locker := handler.composer.Locker
if err := locker.LockUpload(id); err != nil {
handler.sendError(w, r, err)
return
@ -359,7 +320,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
defer locker.UnlockUpload(id)
}
info, err := handler.dataStore.GetInfo(id)
info, err := handler.composer.Core.GetInfo(id)
if err != nil {
handler.sendError(w, r, err)
return
@ -393,7 +354,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
// Limit the
reader := io.LimitReader(r.Body, maxSize)
bytesWritten, err := handler.dataStore.WriteChunk(id, offset, reader)
bytesWritten, err := handler.composer.Core.WriteChunk(id, offset, reader)
if err != nil {
handler.sendError(w, r, err)
return
@ -406,8 +367,8 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
// If the upload is completed, ...
if newOffset == info.Size {
// ... allow custom mechanism to finish and cleanup the upload
if store, ok := handler.dataStore.(FinisherDataStore); ok {
if err := store.FinishUpload(id); err != nil {
if handler.composer.UsesFinisher {
if err := handler.composer.Finisher.FinishUpload(id); err != nil {
handler.sendError(w, r, err)
return
}
@ -426,8 +387,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
// GetFile handles requests to download a file using a GET request. This is not
// part of the specification.
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
dataStore, ok := handler.dataStore.(GetReaderDataStore)
if !ok {
if !handler.composer.UsesGetReader {
handler.sendError(w, r, ErrNotImplemented)
return
}
@ -438,7 +398,8 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
return
}
if locker, ok := handler.dataStore.(LockerDataStore); ok {
if handler.composer.UsesLocker {
locker := handler.composer.Locker
if err := locker.LockUpload(id); err != nil {
handler.sendError(w, r, err)
return
@ -447,7 +408,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
defer locker.UnlockUpload(id)
}
info, err := handler.dataStore.GetInfo(id)
info, err := handler.composer.Core.GetInfo(id)
if err != nil {
handler.sendError(w, r, err)
return
@ -460,7 +421,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
}
// Get reader
src, err := dataStore.GetReader(id)
src, err := handler.composer.GetReader.GetReader(id)
if err != nil {
handler.sendError(w, r, err)
return
@ -479,8 +440,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
// DelFile terminates an upload permanently.
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
// Abort the request handling if the required interface is not implemented
tstore, ok := handler.config.DataStore.(TerminaterDataStore)
if !ok {
if !handler.composer.UsesTerminater {
handler.sendError(w, r, ErrNotImplemented)
return
}
@ -491,7 +451,8 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
return
}
if locker, ok := handler.dataStore.(LockerDataStore); ok {
if handler.composer.UsesLocker {
locker := handler.composer.Locker
if err := locker.LockUpload(id); err != nil {
handler.sendError(w, r, err)
return
@ -500,7 +461,7 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
defer locker.UnlockUpload(id)
}
err = tstore.Terminate(id)
err = handler.composer.Terminater.Terminate(id)
if err != nil {
handler.sendError(w, r, err)
return
@ -591,7 +552,7 @@ func getHostAndProtocol(r *http.Request, allowForwarded bool) (host, proto strin
// of a final resource.
func (handler *UnroutedHandler) sizeOfUploads(ids []string) (size int64, err error) {
for _, id := range ids {
info, err := handler.dataStore.GetInfo(id)
info, err := handler.composer.Core.GetInfo(id)
if err != nil {
return size, err
}