Add initial draft of StoreComposr
This commit is contained in:
parent
13c27e1c19
commit
21ae1c45a7
|
@ -12,6 +12,7 @@ import (
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd"
|
||||||
"github.com/tus/tusd/filestore"
|
"github.com/tus/tusd/filestore"
|
||||||
"github.com/tus/tusd/limitedstore"
|
"github.com/tus/tusd/limitedstore"
|
||||||
|
"github.com/tus/tusd/memorylocker"
|
||||||
"github.com/tus/tusd/s3store"
|
"github.com/tus/tusd/s3store"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
@ -58,25 +59,30 @@ func main() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var store tusd.TerminaterDataStore
|
composer := tusd.NewStoreComposer()
|
||||||
if s3Bucket == "" {
|
if s3Bucket == "" {
|
||||||
stdout.Printf("Using '%s' as directory storage.\n", dir)
|
stdout.Printf("Using '%s' as directory storage.\n", dir)
|
||||||
if err := os.MkdirAll(dir, os.FileMode(0775)); err != nil {
|
if err := os.MkdirAll(dir, os.FileMode(0775)); err != nil {
|
||||||
stderr.Fatalf("Unable to ensure directory exists: %s", err)
|
stderr.Fatalf("Unable to ensure directory exists: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
store = filestore.New(dir)
|
store := filestore.New(dir)
|
||||||
|
store.UseIn(composer)
|
||||||
} else {
|
} else {
|
||||||
stdout.Printf("Using 's3://%s' as S3 bucket for storage.\n", s3Bucket)
|
stdout.Printf("Using 's3://%s' as S3 bucket for storage.\n", s3Bucket)
|
||||||
|
|
||||||
// Derive credentials from AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID and
|
// Derive credentials from AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID and
|
||||||
// AWS_REGION environment variables.
|
// AWS_REGION environment variables.
|
||||||
credentials := aws.NewConfig().WithCredentials(credentials.NewEnvCredentials())
|
credentials := aws.NewConfig().WithCredentials(credentials.NewEnvCredentials())
|
||||||
store = s3store.New(s3Bucket, s3.New(session.New(), credentials))
|
store := s3store.New(s3Bucket, s3.New(session.New(), credentials))
|
||||||
|
store.UseIn(composer)
|
||||||
|
|
||||||
|
locker := memorylocker.New()
|
||||||
|
locker.UseIn(composer)
|
||||||
}
|
}
|
||||||
|
|
||||||
if storeSize > 0 {
|
if storeSize > 0 {
|
||||||
store = limitedstore.New(storeSize, store)
|
limitedstore.New(storeSize, composer.Core, composer.Terminater).UseIn(composer)
|
||||||
stdout.Printf("Using %.2fMB as storage size.\n", float64(storeSize)/1024/1024)
|
stdout.Printf("Using %.2fMB as storage size.\n", float64(storeSize)/1024/1024)
|
||||||
|
|
||||||
// We need to ensure that a single upload can fit into the storage size
|
// We need to ensure that a single upload can fit into the storage size
|
||||||
|
@ -90,7 +96,7 @@ func main() {
|
||||||
handler, err := tusd.NewHandler(tusd.Config{
|
handler, err := tusd.NewHandler(tusd.Config{
|
||||||
MaxSize: maxSize,
|
MaxSize: maxSize,
|
||||||
BasePath: "files/",
|
BasePath: "files/",
|
||||||
DataStore: store,
|
StoreComposer: composer,
|
||||||
NotifyCompleteUploads: true,
|
NotifyCompleteUploads: true,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -0,0 +1,68 @@
|
||||||
|
package tusd
|
||||||
|
|
||||||
|
type StoreComposer struct {
|
||||||
|
Core DataStore
|
||||||
|
|
||||||
|
UsesTerminater bool
|
||||||
|
Terminater TerminaterDataStore
|
||||||
|
UsesFinisher bool
|
||||||
|
Finisher FinisherDataStore
|
||||||
|
UsesLocker bool
|
||||||
|
Locker LockerDataStore
|
||||||
|
UsesGetReader bool
|
||||||
|
GetReader GetReaderDataStore
|
||||||
|
UsesConcater bool
|
||||||
|
Concater ConcaterDataStore
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStoreComposer() *StoreComposer {
|
||||||
|
return &StoreComposer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStoreComposerFromDataStore(store DataStore) *StoreComposer {
|
||||||
|
composer := NewStoreComposer()
|
||||||
|
composer.UseCore(store)
|
||||||
|
|
||||||
|
if mod, ok := store.(TerminaterDataStore); ok {
|
||||||
|
composer.UseTerminater(mod)
|
||||||
|
}
|
||||||
|
if mod, ok := store.(FinisherDataStore); ok {
|
||||||
|
composer.UseFinisher(mod)
|
||||||
|
}
|
||||||
|
if mod, ok := store.(LockerDataStore); ok {
|
||||||
|
composer.UseLocker(mod)
|
||||||
|
}
|
||||||
|
if mod, ok := store.(GetReaderDataStore); ok {
|
||||||
|
composer.UseGetReader(mod)
|
||||||
|
}
|
||||||
|
if mod, ok := store.(ConcaterDataStore); ok {
|
||||||
|
composer.UseConcater(mod)
|
||||||
|
}
|
||||||
|
|
||||||
|
return composer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *StoreComposer) UseCore(core DataStore) {
|
||||||
|
store.Core = core
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *StoreComposer) UseTerminater(x TerminaterDataStore) {
|
||||||
|
store.UsesTerminater = x != nil
|
||||||
|
store.Terminater = x
|
||||||
|
}
|
||||||
|
func (store *StoreComposer) UseFinisher(x FinisherDataStore) {
|
||||||
|
store.UsesFinisher = x != nil
|
||||||
|
store.Finisher = x
|
||||||
|
}
|
||||||
|
func (store *StoreComposer) UseLocker(x LockerDataStore) {
|
||||||
|
store.UsesLocker = x != nil
|
||||||
|
store.Locker = x
|
||||||
|
}
|
||||||
|
func (store *StoreComposer) UseGetReader(x GetReaderDataStore) {
|
||||||
|
store.UsesGetReader = x != nil
|
||||||
|
store.GetReader = x
|
||||||
|
}
|
||||||
|
func (store *StoreComposer) UseConcater(x ConcaterDataStore) {
|
||||||
|
store.UsesConcater = x != nil
|
||||||
|
store.Concater = x
|
||||||
|
}
|
|
@ -0,0 +1,50 @@
|
||||||
|
package tusd
|
||||||
|
|
||||||
|
#define USE_FUNC(TYPE) func (store *StoreComposer) Use ## TYPE(x TYPE ## DataStore) { \
|
||||||
|
store.Uses ## TYPE = x != nil; \
|
||||||
|
store.TYPE = x; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define USE_FIELD(TYPE) Uses ## TYPE bool; \
|
||||||
|
TYPE TYPE ## DataStore
|
||||||
|
|
||||||
|
#define USE_FROM(TYPE) if mod, ok := store.(TYPE ## DataStore); ok { \
|
||||||
|
composer.Use ## TYPE (mod) \
|
||||||
|
}
|
||||||
|
|
||||||
|
type StoreComposer struct {
|
||||||
|
Core DataStore
|
||||||
|
|
||||||
|
USE_FIELD(Terminater)
|
||||||
|
USE_FIELD(Finisher)
|
||||||
|
USE_FIELD(Locker)
|
||||||
|
USE_FIELD(GetReader)
|
||||||
|
USE_FIELD(Concater)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStoreComposer() *StoreComposer {
|
||||||
|
return &StoreComposer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStoreComposerFromDataStore(store DataStore) *StoreComposer {
|
||||||
|
composer := NewStoreComposer()
|
||||||
|
composer.UseCore(store)
|
||||||
|
|
||||||
|
USE_FROM(Terminater)
|
||||||
|
USE_FROM(Finisher)
|
||||||
|
USE_FROM(Locker)
|
||||||
|
USE_FROM(GetReader)
|
||||||
|
USE_FROM(Concater)
|
||||||
|
|
||||||
|
return composer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *StoreComposer) UseCore(core DataStore) {
|
||||||
|
store.Core = core
|
||||||
|
}
|
||||||
|
|
||||||
|
USE_FUNC(Terminater)
|
||||||
|
USE_FUNC(Finisher)
|
||||||
|
USE_FUNC(Locker)
|
||||||
|
USE_FUNC(GetReader)
|
||||||
|
USE_FUNC(Concater)
|
|
@ -0,0 +1,64 @@
|
||||||
|
package tusd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config provides a way to configure the Handler depending on your needs.
|
||||||
|
type Config struct {
|
||||||
|
// DataStore implementation used to store and retrieve the single uploads.
|
||||||
|
// Must no be nil.
|
||||||
|
DataStore DataStore
|
||||||
|
StoreComposer *StoreComposer
|
||||||
|
// MaxSize defines how many bytes may be stored in one single upload. If its
|
||||||
|
// value is is 0 or smaller no limit will be enforced.
|
||||||
|
MaxSize int64
|
||||||
|
// BasePath defines the URL path used for handling uploads, e.g. "/files/".
|
||||||
|
// If no trailing slash is presented it will be added. You may specify an
|
||||||
|
// absolute URL containing a scheme, e.g. "http://tus.io"
|
||||||
|
BasePath string
|
||||||
|
isAbs bool
|
||||||
|
// Initiate the CompleteUploads channel in the Handler struct in order to
|
||||||
|
// be notified about complete uploads
|
||||||
|
NotifyCompleteUploads bool
|
||||||
|
// Logger the logger to use internally
|
||||||
|
Logger *log.Logger
|
||||||
|
// Respect the X-Forwarded-Host, X-Forwarded-Proto and Forwarded headers
|
||||||
|
// potentially set by proxies when generating an absolute URL in the
|
||||||
|
// reponse to POST requests.
|
||||||
|
RespectForwardedHeaders bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *Config) validate() error {
|
||||||
|
if config.Logger == nil {
|
||||||
|
config.Logger = log.New(os.Stdout, "[tusd] ", 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
base := config.BasePath
|
||||||
|
uri, err := url.Parse(base)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure base path ends with slash to remove logic from absFileURL
|
||||||
|
if base != "" && string(base[len(base)-1]) != "/" {
|
||||||
|
base += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure base path begins with slash if not absolute (starts with scheme)
|
||||||
|
if !uri.IsAbs() && len(base) > 0 && string(base[0]) != "/" {
|
||||||
|
base = "/" + base
|
||||||
|
}
|
||||||
|
config.BasePath = base
|
||||||
|
config.isAbs = uri.IsAbs()
|
||||||
|
|
||||||
|
if config.StoreComposer == nil {
|
||||||
|
config.StoreComposer = NewStoreComposerFromDataStore(config.DataStore)
|
||||||
|
} else if config.DataStore != nil {
|
||||||
|
// TODO: consider returning an error
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
10
datastore.go
10
datastore.go
|
@ -49,8 +49,6 @@ type DataStore interface {
|
||||||
// if they want to receive DELETE requests using the Handler. If this interface
|
// if they want to receive DELETE requests using the Handler. If this interface
|
||||||
// is not implemented, no request handler for this method is attached.
|
// is not implemented, no request handler for this method is attached.
|
||||||
type TerminaterDataStore interface {
|
type TerminaterDataStore interface {
|
||||||
DataStore
|
|
||||||
|
|
||||||
// Terminate an upload so any further requests to the resource, both reading
|
// Terminate an upload so any further requests to the resource, both reading
|
||||||
// and writing, must return os.ErrNotExist or similar.
|
// and writing, must return os.ErrNotExist or similar.
|
||||||
Terminate(id string) error
|
Terminate(id string) error
|
||||||
|
@ -62,8 +60,6 @@ type TerminaterDataStore interface {
|
||||||
// resources or notifying other services. For example, S3Store uses this
|
// resources or notifying other services. For example, S3Store uses this
|
||||||
// interface for removing a temporary object.
|
// interface for removing a temporary object.
|
||||||
type FinisherDataStore interface {
|
type FinisherDataStore interface {
|
||||||
DataStore
|
|
||||||
|
|
||||||
// FinishUpload executes additional operations for the finished upload which
|
// FinishUpload executes additional operations for the finished upload which
|
||||||
// is specified by its ID.
|
// is specified by its ID.
|
||||||
FinishUpload(id string) error
|
FinishUpload(id string) error
|
||||||
|
@ -77,8 +73,6 @@ type FinisherDataStore interface {
|
||||||
// data corruption, especially to ensure correct offset values and the proper
|
// data corruption, especially to ensure correct offset values and the proper
|
||||||
// order of chunks inside a single upload.
|
// order of chunks inside a single upload.
|
||||||
type LockerDataStore interface {
|
type LockerDataStore interface {
|
||||||
DataStore
|
|
||||||
|
|
||||||
// LockUpload attempts to obtain an exclusive lock for the upload specified
|
// LockUpload attempts to obtain an exclusive lock for the upload specified
|
||||||
// by its id.
|
// by its id.
|
||||||
// If this operation fails because the resource is already locked, the
|
// If this operation fails because the resource is already locked, the
|
||||||
|
@ -96,8 +90,6 @@ type LockerDataStore interface {
|
||||||
// Please, be aware that this feature is not part of the official tus
|
// Please, be aware that this feature is not part of the official tus
|
||||||
// specification. Instead it's a custom mechanism by tusd.
|
// specification. Instead it's a custom mechanism by tusd.
|
||||||
type GetReaderDataStore interface {
|
type GetReaderDataStore interface {
|
||||||
DataStore
|
|
||||||
|
|
||||||
// GetReader returns a reader which allows iterating of the content of an
|
// GetReader returns a reader which allows iterating of the content of an
|
||||||
// upload specified by its ID. It should attempt to provide a reader even if
|
// upload specified by its ID. It should attempt to provide a reader even if
|
||||||
// the upload has not been finished yet but it's not required.
|
// the upload has not been finished yet but it's not required.
|
||||||
|
@ -112,8 +104,6 @@ type GetReaderDataStore interface {
|
||||||
// Concatenation extension should be enabled. Only in this case, the handler
|
// Concatenation extension should be enabled. Only in this case, the handler
|
||||||
// will parse and respect the Upload-Concat header.
|
// will parse and respect the Upload-Concat header.
|
||||||
type ConcaterDataStore interface {
|
type ConcaterDataStore interface {
|
||||||
DataStore
|
|
||||||
|
|
||||||
// ConcatUploads concatenations the content from the provided partial uploads
|
// ConcatUploads concatenations the content from the provided partial uploads
|
||||||
// and write the result in the destination upload which is specified by its
|
// and write the result in the destination upload which is specified by its
|
||||||
// ID. The caller (usually the handler) must and will ensure that this
|
// ID. The caller (usually the handler) must and will ensure that this
|
||||||
|
|
|
@ -46,6 +46,13 @@ func New(path string) FileStore {
|
||||||
return FileStore{path}
|
return FileStore{path}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (store FileStore) UseIn(composer *tusd.StoreComposer) {
|
||||||
|
composer.UseCore(store)
|
||||||
|
composer.UseGetReader(store)
|
||||||
|
composer.UseTerminater(store)
|
||||||
|
composer.UseLocker(store)
|
||||||
|
}
|
||||||
|
|
||||||
func (store FileStore) NewUpload(info tusd.FileInfo) (id string, err error) {
|
func (store FileStore) NewUpload(info tusd.FileInfo) (id string, err error) {
|
||||||
id = uid.Uid()
|
id = uid.Uid()
|
||||||
info.ID = id
|
info.ID = id
|
||||||
|
|
|
@ -22,6 +22,10 @@ type Handler struct {
|
||||||
// endpoints to be customized. These are not part of the protocol so can be
|
// endpoints to be customized. These are not part of the protocol so can be
|
||||||
// changed depending on your needs.
|
// changed depending on your needs.
|
||||||
func NewHandler(config Config) (*Handler, error) {
|
func NewHandler(config Config) (*Handler, error) {
|
||||||
|
if err := config.validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
handler, err := NewUnroutedHandler(config)
|
handler, err := NewUnroutedHandler(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -41,12 +45,12 @@ func NewHandler(config Config) (*Handler, error) {
|
||||||
mux.Add("PATCH", ":id", http.HandlerFunc(handler.PatchFile))
|
mux.Add("PATCH", ":id", http.HandlerFunc(handler.PatchFile))
|
||||||
|
|
||||||
// Only attach the DELETE handler if the Terminate() method is provided
|
// Only attach the DELETE handler if the Terminate() method is provided
|
||||||
if _, ok := config.DataStore.(TerminaterDataStore); ok {
|
if config.StoreComposer.UsesTerminater {
|
||||||
mux.Del(":id", http.HandlerFunc(handler.DelFile))
|
mux.Del(":id", http.HandlerFunc(handler.DelFile))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GET handler requires the GetReader() method
|
// GET handler requires the GetReader() method
|
||||||
if _, ok := config.DataStore.(GetReaderDataStore); ok {
|
if config.StoreComposer.UsesGetReader {
|
||||||
mux.Get(":id", http.HandlerFunc(handler.GetFile))
|
mux.Get(":id", http.HandlerFunc(handler.GetFile))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,27 +12,19 @@
|
||||||
// properly. Two tusd.FileStore instances using the same directory, for example.
|
// properly. Two tusd.FileStore instances using the same directory, for example.
|
||||||
// In addition the limited store will keep a list of the uploads' IDs in memory
|
// In addition the limited store will keep a list of the uploads' IDs in memory
|
||||||
// which may create a growing memory leak.
|
// which may create a growing memory leak.
|
||||||
//
|
|
||||||
// While LimitedStore implements the GetReader, LockUpload, UnlockUpload,
|
|
||||||
// FinishUpload and ConcatUploads methods, it does not contain proper definitions
|
|
||||||
// for them. When invoked, the call will be passed to the underlying
|
|
||||||
// data store as long as it provides these methods. If not, either an error
|
|
||||||
// is returned or nothing happens (see the specific methods for more
|
|
||||||
// detailed information). The motivation behind this decision was, that this
|
|
||||||
// allows to expose the additional extensions implemented using the
|
|
||||||
// interfaces, such as GetReaderDataStore.
|
|
||||||
package limitedstore
|
package limitedstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd"
|
||||||
"io"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
type LimitedStore struct {
|
type LimitedStore struct {
|
||||||
|
tusd.DataStore
|
||||||
|
terminater tusd.TerminaterDataStore
|
||||||
|
|
||||||
StoreSize int64
|
StoreSize int64
|
||||||
tusd.TerminaterDataStore
|
|
||||||
|
|
||||||
uploads map[string]int64
|
uploads map[string]int64
|
||||||
usedSize int64
|
usedSize int64
|
||||||
|
@ -55,15 +47,21 @@ func (p pairlist) Less(i, j int) bool { return p[i].value < p[j].value }
|
||||||
// New creates a new limited store with the given size as the maximum storage
|
// New creates a new limited store with the given size as the maximum storage
|
||||||
// size. The wrapped data store needs to implement the TerminaterDataStore
|
// size. The wrapped data store needs to implement the TerminaterDataStore
|
||||||
// interface, in order to provide the required Terminate method.
|
// interface, in order to provide the required Terminate method.
|
||||||
func New(storeSize int64, dataStore tusd.TerminaterDataStore) *LimitedStore {
|
func New(storeSize int64, dataStore tusd.DataStore, terminater tusd.TerminaterDataStore) *LimitedStore {
|
||||||
return &LimitedStore{
|
return &LimitedStore{
|
||||||
StoreSize: storeSize,
|
StoreSize: storeSize,
|
||||||
TerminaterDataStore: dataStore,
|
DataStore: dataStore,
|
||||||
uploads: make(map[string]int64),
|
terminater: terminater,
|
||||||
mutex: new(sync.Mutex),
|
uploads: make(map[string]int64),
|
||||||
|
mutex: new(sync.Mutex),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (store *LimitedStore) UseIn(composer *tusd.StoreComposer) {
|
||||||
|
composer.UseCore(store)
|
||||||
|
composer.UseTerminater(store)
|
||||||
|
}
|
||||||
|
|
||||||
func (store *LimitedStore) NewUpload(info tusd.FileInfo) (string, error) {
|
func (store *LimitedStore) NewUpload(info tusd.FileInfo) (string, error) {
|
||||||
store.mutex.Lock()
|
store.mutex.Lock()
|
||||||
defer store.mutex.Unlock()
|
defer store.mutex.Unlock()
|
||||||
|
@ -72,7 +70,7 @@ func (store *LimitedStore) NewUpload(info tusd.FileInfo) (string, error) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := store.TerminaterDataStore.NewUpload(info)
|
id, err := store.DataStore.NewUpload(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -91,7 +89,7 @@ func (store *LimitedStore) Terminate(id string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *LimitedStore) terminate(id string) error {
|
func (store *LimitedStore) terminate(id string) error {
|
||||||
err := store.TerminaterDataStore.Terminate(id)
|
err := store.terminater.Terminate(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -135,55 +133,3 @@ func (store *LimitedStore) ensureSpace(size int64) error {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetReader will pass the call to the underlying data store if it implements
|
|
||||||
// the tusd.GetReaderDataStore interface. Else tusd.ErrNotImplemented will be
|
|
||||||
// returned.
|
|
||||||
func (store *LimitedStore) GetReader(id string) (io.Reader, error) {
|
|
||||||
if s, ok := store.TerminaterDataStore.(tusd.GetReaderDataStore); ok {
|
|
||||||
return s.GetReader(id)
|
|
||||||
} else {
|
|
||||||
return nil, tusd.ErrNotImplemented
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// LockUpload will pass the call to the underlying data store if it implements
|
|
||||||
// the tusd.LockerDataStore interface. Else this function simply returns nil.
|
|
||||||
func (store *LimitedStore) LockUpload(id string) error {
|
|
||||||
if s, ok := store.TerminaterDataStore.(tusd.LockerDataStore); ok {
|
|
||||||
return s.LockUpload(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnlockUpload will pass the call to the underlying data store if it implements
|
|
||||||
// the tusd.LockerDataStore interface. Else this function simply returns nil.
|
|
||||||
func (store *LimitedStore) UnlockUpload(id string) error {
|
|
||||||
if s, ok := store.TerminaterDataStore.(tusd.LockerDataStore); ok {
|
|
||||||
return s.UnlockUpload(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FinishUpload will pass the call to the underlying data store if it implements
|
|
||||||
// the tusd.FinisherDataStore interface. Else this function simply returns nil.
|
|
||||||
func (store *LimitedStore) FinishUpload(id string) error {
|
|
||||||
if s, ok := store.TerminaterDataStore.(tusd.FinisherDataStore); ok {
|
|
||||||
return s.FinishUpload(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConcatUploads will pass the call to the underlying data store if it implements
|
|
||||||
// the tusd.ConcaterDataStore interface. Else tusd.ErrNotImplemented will be
|
|
||||||
// returned.
|
|
||||||
func (store *LimitedStore) ConcatUploads(dest string, src []string) error {
|
|
||||||
if s, ok := store.TerminaterDataStore.(tusd.ConcaterDataStore); ok {
|
|
||||||
return s.ConcatUploads(dest, src)
|
|
||||||
} else {
|
|
||||||
return tusd.ErrNotImplemented
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -11,11 +11,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ tusd.DataStore = &LimitedStore{}
|
var _ tusd.DataStore = &LimitedStore{}
|
||||||
var _ tusd.GetReaderDataStore = &LimitedStore{}
|
|
||||||
var _ tusd.TerminaterDataStore = &LimitedStore{}
|
var _ tusd.TerminaterDataStore = &LimitedStore{}
|
||||||
var _ tusd.LockerDataStore = &LimitedStore{}
|
|
||||||
var _ tusd.ConcaterDataStore = &LimitedStore{}
|
|
||||||
var _ tusd.FinisherDataStore = &LimitedStore{}
|
|
||||||
|
|
||||||
type dataStore struct {
|
type dataStore struct {
|
||||||
t *assert.Assertions
|
t *assert.Assertions
|
||||||
|
@ -45,10 +41,6 @@ func (store *dataStore) GetInfo(id string) (tusd.FileInfo, error) {
|
||||||
return tusd.FileInfo{}, nil
|
return tusd.FileInfo{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *dataStore) GetReader(id string) (io.Reader, error) {
|
|
||||||
return nil, tusd.ErrNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *dataStore) Terminate(id string) error {
|
func (store *dataStore) Terminate(id string) error {
|
||||||
// We expect the uploads to be terminated in a specific order (the bigger
|
// We expect the uploads to be terminated in a specific order (the bigger
|
||||||
// come first)
|
// come first)
|
||||||
|
@ -66,7 +58,7 @@ func TestLimitedStore(t *testing.T) {
|
||||||
dataStore := &dataStore{
|
dataStore := &dataStore{
|
||||||
t: a,
|
t: a,
|
||||||
}
|
}
|
||||||
store := New(100, dataStore)
|
store := New(100, dataStore, dataStore)
|
||||||
|
|
||||||
// Create new upload (30 bytes)
|
// Create new upload (30 bytes)
|
||||||
id, err := store.NewUpload(tusd.FileInfo{
|
id, err := store.NewUpload(tusd.FileInfo{
|
||||||
|
|
|
@ -18,21 +18,26 @@ import (
|
||||||
// cheap mechansim. Locks will only exist as long as this object is kept in
|
// cheap mechansim. Locks will only exist as long as this object is kept in
|
||||||
// reference and will be erased if the program exits.
|
// reference and will be erased if the program exits.
|
||||||
type MemoryLocker struct {
|
type MemoryLocker struct {
|
||||||
tusd.DataStore
|
|
||||||
locks map[string]bool
|
locks map[string]bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new lock memory wrapper around the provided storage.
|
// New creates a new lock memory.
|
||||||
func NewMemoryLocker(store tusd.DataStore) *MemoryLocker {
|
func NewMemoryLocker(_ tusd.DataStore) *MemoryLocker {
|
||||||
|
return New()
|
||||||
|
}
|
||||||
|
|
||||||
|
func New() *MemoryLocker {
|
||||||
return &MemoryLocker{
|
return &MemoryLocker{
|
||||||
DataStore: store,
|
locks: make(map[string]bool),
|
||||||
locks: make(map[string]bool),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (locker *MemoryLocker) UseIn(composer *tusd.StoreComposer) {
|
||||||
|
composer.UseLocker(locker)
|
||||||
|
}
|
||||||
|
|
||||||
// LockUpload tries to obtain the exclusive lock.
|
// LockUpload tries to obtain the exclusive lock.
|
||||||
func (locker *MemoryLocker) LockUpload(id string) error {
|
func (locker *MemoryLocker) LockUpload(id string) error {
|
||||||
|
|
||||||
// Ensure file is not locked
|
// Ensure file is not locked
|
||||||
if _, ok := locker.locks[id]; ok {
|
if _, ok := locker.locks[id]; ok {
|
||||||
return tusd.ErrFileLocked
|
return tusd.ErrFileLocked
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package memorylocker
|
package memorylocker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -9,28 +8,11 @@ import (
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd"
|
||||||
)
|
)
|
||||||
|
|
||||||
type zeroStore struct{}
|
|
||||||
|
|
||||||
func (store zeroStore) NewUpload(info tusd.FileInfo) (string, error) {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
func (store zeroStore) WriteChunk(id string, offset int64, src io.Reader) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store zeroStore) GetInfo(id string) (tusd.FileInfo, error) {
|
|
||||||
return tusd.FileInfo{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store zeroStore) GetReader(id string) (io.Reader, error) {
|
|
||||||
return nil, tusd.ErrNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemoryLocker(t *testing.T) {
|
func TestMemoryLocker(t *testing.T) {
|
||||||
a := assert.New(t)
|
a := assert.New(t)
|
||||||
|
|
||||||
var locker tusd.LockerDataStore
|
var locker tusd.LockerDataStore
|
||||||
locker = NewMemoryLocker(&zeroStore{})
|
locker = New()
|
||||||
|
|
||||||
a.NoError(locker.LockUpload("one"))
|
a.NoError(locker.LockUpload("one"))
|
||||||
a.Equal(tusd.ErrFileLocked, locker.LockUpload("one"))
|
a.Equal(tusd.ErrFileLocked, locker.LockUpload("one"))
|
||||||
|
|
|
@ -131,6 +131,14 @@ func New(bucket string, service s3iface.S3API) S3Store {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (store S3Store) UseIn(composer *tusd.StoreComposer) {
|
||||||
|
composer.UseCore(store)
|
||||||
|
composer.UseTerminater(store)
|
||||||
|
composer.UseFinisher(store)
|
||||||
|
composer.UseGetReader(store)
|
||||||
|
composer.UseConcater(store)
|
||||||
|
}
|
||||||
|
|
||||||
func (store S3Store) NewUpload(info tusd.FileInfo) (id string, err error) {
|
func (store S3Store) NewUpload(info tusd.FileInfo) (id string, err error) {
|
||||||
var uploadId string
|
var uploadId string
|
||||||
if info.ID == "" {
|
if info.ID == "" {
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -52,35 +51,12 @@ var ErrStatusCodes = map[error]int{
|
||||||
ErrModifyFinal: http.StatusForbidden,
|
ErrModifyFinal: http.StatusForbidden,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config provides a way to configure the Handler depending on your needs.
|
|
||||||
type Config struct {
|
|
||||||
// DataStore implementation used to store and retrieve the single uploads.
|
|
||||||
// Must no be nil.
|
|
||||||
DataStore DataStore
|
|
||||||
// MaxSize defines how many bytes may be stored in one single upload. If its
|
|
||||||
// value is is 0 or smaller no limit will be enforced.
|
|
||||||
MaxSize int64
|
|
||||||
// BasePath defines the URL path used for handling uploads, e.g. "/files/".
|
|
||||||
// If no trailing slash is presented it will be added. You may specify an
|
|
||||||
// absolute URL containing a scheme, e.g. "http://tus.io"
|
|
||||||
BasePath string
|
|
||||||
// Initiate the CompleteUploads channel in the Handler struct in order to
|
|
||||||
// be notified about complete uploads
|
|
||||||
NotifyCompleteUploads bool
|
|
||||||
// Logger the logger to use internally
|
|
||||||
Logger *log.Logger
|
|
||||||
// Respect the X-Forwarded-Host, X-Forwarded-Proto and Forwarded headers
|
|
||||||
// potentially set by proxies when generating an absolute URL in the
|
|
||||||
// reponse to POST requests.
|
|
||||||
RespectForwardedHeaders bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnroutedHandler exposes methods to handle requests as part of the tus protocol,
|
// UnroutedHandler exposes methods to handle requests as part of the tus protocol,
|
||||||
// such as PostFile, HeadFile, PatchFile and DelFile. In addition the GetFile method
|
// such as PostFile, HeadFile, PatchFile and DelFile. In addition the GetFile method
|
||||||
// is provided which is, however, not part of the specification.
|
// is provided which is, however, not part of the specification.
|
||||||
type UnroutedHandler struct {
|
type UnroutedHandler struct {
|
||||||
config Config
|
config Config
|
||||||
dataStore DataStore
|
composer *StoreComposer
|
||||||
isBasePathAbs bool
|
isBasePathAbs bool
|
||||||
basePath string
|
basePath string
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
|
@ -97,42 +73,26 @@ type UnroutedHandler struct {
|
||||||
// a router (aka mux) of your choice. If you are looking for preconfigured
|
// a router (aka mux) of your choice. If you are looking for preconfigured
|
||||||
// handler see NewHandler.
|
// handler see NewHandler.
|
||||||
func NewUnroutedHandler(config Config) (*UnroutedHandler, error) {
|
func NewUnroutedHandler(config Config) (*UnroutedHandler, error) {
|
||||||
logger := config.Logger
|
if err := config.validate(); err != nil {
|
||||||
if logger == nil {
|
|
||||||
logger = log.New(os.Stdout, "[tusd] ", 0)
|
|
||||||
}
|
|
||||||
base := config.BasePath
|
|
||||||
uri, err := url.Parse(base)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure base path ends with slash to remove logic from absFileURL
|
|
||||||
if base != "" && string(base[len(base)-1]) != "/" {
|
|
||||||
base += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure base path begins with slash if not absolute (starts with scheme)
|
|
||||||
if !uri.IsAbs() && len(base) > 0 && string(base[0]) != "/" {
|
|
||||||
base = "/" + base
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only promote extesions using the Tus-Extension header which are implemented
|
// Only promote extesions using the Tus-Extension header which are implemented
|
||||||
extensions := "creation"
|
extensions := "creation"
|
||||||
if _, ok := config.DataStore.(TerminaterDataStore); ok {
|
if config.StoreComposer.UsesTerminater {
|
||||||
extensions += ",termination"
|
extensions += ",termination"
|
||||||
}
|
}
|
||||||
if _, ok := config.DataStore.(ConcaterDataStore); ok {
|
if config.StoreComposer.UsesConcater {
|
||||||
extensions += ",concatenation"
|
extensions += ",concatenation"
|
||||||
}
|
}
|
||||||
|
|
||||||
handler := &UnroutedHandler{
|
handler := &UnroutedHandler{
|
||||||
config: config,
|
config: config,
|
||||||
dataStore: config.DataStore,
|
composer: config.StoreComposer,
|
||||||
basePath: base,
|
basePath: config.BasePath,
|
||||||
isBasePathAbs: uri.IsAbs(),
|
isBasePathAbs: config.isAbs,
|
||||||
CompleteUploads: make(chan FileInfo),
|
CompleteUploads: make(chan FileInfo),
|
||||||
logger: logger,
|
logger: config.Logger,
|
||||||
extensions: extensions,
|
extensions: extensions,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,8 +171,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
// Only use the proper Upload-Concat header if the concatenation extension
|
// Only use the proper Upload-Concat header if the concatenation extension
|
||||||
// is even supported by the data store.
|
// is even supported by the data store.
|
||||||
var concatHeader string
|
var concatHeader string
|
||||||
concatStore, ok := handler.dataStore.(ConcaterDataStore)
|
if handler.composer.UsesConcater {
|
||||||
if ok {
|
|
||||||
concatHeader = r.Header.Get("Upload-Concat")
|
concatHeader = r.Header.Get("Upload-Concat")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -258,14 +217,14 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
PartialUploads: partialUploads,
|
PartialUploads: partialUploads,
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := handler.dataStore.NewUpload(info)
|
id, err := handler.composer.Core.NewUpload(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if isFinal {
|
if isFinal {
|
||||||
if err := concatStore.ConcatUploads(id, partialUploads); err != nil {
|
if err := handler.composer.Concater.ConcatUploads(id, partialUploads); err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -290,7 +249,8 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if locker, ok := handler.dataStore.(LockerDataStore); ok {
|
if handler.composer.UsesLocker {
|
||||||
|
locker := handler.composer.Locker
|
||||||
if err := locker.LockUpload(id); err != nil {
|
if err := locker.LockUpload(id); err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -299,7 +259,7 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
|
||||||
defer locker.UnlockUpload(id)
|
defer locker.UnlockUpload(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := handler.dataStore.GetInfo(id)
|
info, err := handler.composer.Core.GetInfo(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -350,7 +310,8 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if locker, ok := handler.dataStore.(LockerDataStore); ok {
|
if handler.composer.UsesLocker {
|
||||||
|
locker := handler.composer.Locker
|
||||||
if err := locker.LockUpload(id); err != nil {
|
if err := locker.LockUpload(id); err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -359,7 +320,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||||
defer locker.UnlockUpload(id)
|
defer locker.UnlockUpload(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := handler.dataStore.GetInfo(id)
|
info, err := handler.composer.Core.GetInfo(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -393,7 +354,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||||
// Limit the
|
// Limit the
|
||||||
reader := io.LimitReader(r.Body, maxSize)
|
reader := io.LimitReader(r.Body, maxSize)
|
||||||
|
|
||||||
bytesWritten, err := handler.dataStore.WriteChunk(id, offset, reader)
|
bytesWritten, err := handler.composer.Core.WriteChunk(id, offset, reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -406,8 +367,8 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||||
// If the upload is completed, ...
|
// If the upload is completed, ...
|
||||||
if newOffset == info.Size {
|
if newOffset == info.Size {
|
||||||
// ... allow custom mechanism to finish and cleanup the upload
|
// ... allow custom mechanism to finish and cleanup the upload
|
||||||
if store, ok := handler.dataStore.(FinisherDataStore); ok {
|
if handler.composer.UsesFinisher {
|
||||||
if err := store.FinishUpload(id); err != nil {
|
if err := handler.composer.Finisher.FinishUpload(id); err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -426,8 +387,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||||
// GetFile handles requests to download a file using a GET request. This is not
|
// GetFile handles requests to download a file using a GET request. This is not
|
||||||
// part of the specification.
|
// part of the specification.
|
||||||
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
||||||
dataStore, ok := handler.dataStore.(GetReaderDataStore)
|
if !handler.composer.UsesGetReader {
|
||||||
if !ok {
|
|
||||||
handler.sendError(w, r, ErrNotImplemented)
|
handler.sendError(w, r, ErrNotImplemented)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -438,7 +398,8 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if locker, ok := handler.dataStore.(LockerDataStore); ok {
|
if handler.composer.UsesLocker {
|
||||||
|
locker := handler.composer.Locker
|
||||||
if err := locker.LockUpload(id); err != nil {
|
if err := locker.LockUpload(id); err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -447,7 +408,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
||||||
defer locker.UnlockUpload(id)
|
defer locker.UnlockUpload(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := handler.dataStore.GetInfo(id)
|
info, err := handler.composer.Core.GetInfo(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -460,7 +421,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get reader
|
// Get reader
|
||||||
src, err := dataStore.GetReader(id)
|
src, err := handler.composer.GetReader.GetReader(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -479,8 +440,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
||||||
// DelFile terminates an upload permanently.
|
// DelFile terminates an upload permanently.
|
||||||
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
|
||||||
// Abort the request handling if the required interface is not implemented
|
// Abort the request handling if the required interface is not implemented
|
||||||
tstore, ok := handler.config.DataStore.(TerminaterDataStore)
|
if !handler.composer.UsesTerminater {
|
||||||
if !ok {
|
|
||||||
handler.sendError(w, r, ErrNotImplemented)
|
handler.sendError(w, r, ErrNotImplemented)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -491,7 +451,8 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if locker, ok := handler.dataStore.(LockerDataStore); ok {
|
if handler.composer.UsesLocker {
|
||||||
|
locker := handler.composer.Locker
|
||||||
if err := locker.LockUpload(id); err != nil {
|
if err := locker.LockUpload(id); err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -500,7 +461,7 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
|
||||||
defer locker.UnlockUpload(id)
|
defer locker.UnlockUpload(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = tstore.Terminate(id)
|
err = handler.composer.Terminater.Terminate(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -591,7 +552,7 @@ func getHostAndProtocol(r *http.Request, allowForwarded bool) (host, proto strin
|
||||||
// of a final resource.
|
// of a final resource.
|
||||||
func (handler *UnroutedHandler) sizeOfUploads(ids []string) (size int64, err error) {
|
func (handler *UnroutedHandler) sizeOfUploads(ids []string) (size int64, err error) {
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
info, err := handler.dataStore.GetInfo(id)
|
info, err := handler.composer.Core.GetInfo(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return size, err
|
return size, err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue