Add initial draft of StoreComposr

This commit is contained in:
Marius 2016-02-21 23:25:35 +01:00
parent 13c27e1c19
commit 21ae1c45a7
13 changed files with 273 additions and 190 deletions

View File

@ -12,6 +12,7 @@ import (
"github.com/tus/tusd"
"github.com/tus/tusd/filestore"
"github.com/tus/tusd/limitedstore"
"github.com/tus/tusd/memorylocker"
"github.com/tus/tusd/s3store"
"github.com/aws/aws-sdk-go/aws"
@ -58,25 +59,30 @@ func main() {
return
}
var store tusd.TerminaterDataStore
composer := tusd.NewStoreComposer()
if s3Bucket == "" {
stdout.Printf("Using '%s' as directory storage.\n", dir)
if err := os.MkdirAll(dir, os.FileMode(0775)); err != nil {
stderr.Fatalf("Unable to ensure directory exists: %s", err)
}
store = filestore.New(dir)
store := filestore.New(dir)
store.UseIn(composer)
} else {
stdout.Printf("Using 's3://%s' as S3 bucket for storage.\n", s3Bucket)
// Derive credentials from AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID and
// AWS_REGION environment variables.
credentials := aws.NewConfig().WithCredentials(credentials.NewEnvCredentials())
store = s3store.New(s3Bucket, s3.New(session.New(), credentials))
store := s3store.New(s3Bucket, s3.New(session.New(), credentials))
store.UseIn(composer)
locker := memorylocker.New()
locker.UseIn(composer)
}
if storeSize > 0 {
store = limitedstore.New(storeSize, store)
limitedstore.New(storeSize, composer.Core, composer.Terminater).UseIn(composer)
stdout.Printf("Using %.2fMB as storage size.\n", float64(storeSize)/1024/1024)
// We need to ensure that a single upload can fit into the storage size
@ -90,7 +96,7 @@ func main() {
handler, err := tusd.NewHandler(tusd.Config{
MaxSize: maxSize,
BasePath: "files/",
DataStore: store,
StoreComposer: composer,
NotifyCompleteUploads: true,
})
if err != nil {

68
composer.go Normal file
View File

@ -0,0 +1,68 @@
package tusd
type StoreComposer struct {
Core DataStore
UsesTerminater bool
Terminater TerminaterDataStore
UsesFinisher bool
Finisher FinisherDataStore
UsesLocker bool
Locker LockerDataStore
UsesGetReader bool
GetReader GetReaderDataStore
UsesConcater bool
Concater ConcaterDataStore
}
func NewStoreComposer() *StoreComposer {
return &StoreComposer{}
}
func NewStoreComposerFromDataStore(store DataStore) *StoreComposer {
composer := NewStoreComposer()
composer.UseCore(store)
if mod, ok := store.(TerminaterDataStore); ok {
composer.UseTerminater(mod)
}
if mod, ok := store.(FinisherDataStore); ok {
composer.UseFinisher(mod)
}
if mod, ok := store.(LockerDataStore); ok {
composer.UseLocker(mod)
}
if mod, ok := store.(GetReaderDataStore); ok {
composer.UseGetReader(mod)
}
if mod, ok := store.(ConcaterDataStore); ok {
composer.UseConcater(mod)
}
return composer
}
func (store *StoreComposer) UseCore(core DataStore) {
store.Core = core
}
func (store *StoreComposer) UseTerminater(x TerminaterDataStore) {
store.UsesTerminater = x != nil
store.Terminater = x
}
func (store *StoreComposer) UseFinisher(x FinisherDataStore) {
store.UsesFinisher = x != nil
store.Finisher = x
}
func (store *StoreComposer) UseLocker(x LockerDataStore) {
store.UsesLocker = x != nil
store.Locker = x
}
func (store *StoreComposer) UseGetReader(x GetReaderDataStore) {
store.UsesGetReader = x != nil
store.GetReader = x
}
func (store *StoreComposer) UseConcater(x ConcaterDataStore) {
store.UsesConcater = x != nil
store.Concater = x
}

50
composer.mgo Normal file
View File

@ -0,0 +1,50 @@
package tusd
#define USE_FUNC(TYPE) func (store *StoreComposer) Use ## TYPE(x TYPE ## DataStore) { \
store.Uses ## TYPE = x != nil; \
store.TYPE = x; \
}
#define USE_FIELD(TYPE) Uses ## TYPE bool; \
TYPE TYPE ## DataStore
#define USE_FROM(TYPE) if mod, ok := store.(TYPE ## DataStore); ok { \
composer.Use ## TYPE (mod) \
}
type StoreComposer struct {
Core DataStore
USE_FIELD(Terminater)
USE_FIELD(Finisher)
USE_FIELD(Locker)
USE_FIELD(GetReader)
USE_FIELD(Concater)
}
func NewStoreComposer() *StoreComposer {
return &StoreComposer{}
}
func NewStoreComposerFromDataStore(store DataStore) *StoreComposer {
composer := NewStoreComposer()
composer.UseCore(store)
USE_FROM(Terminater)
USE_FROM(Finisher)
USE_FROM(Locker)
USE_FROM(GetReader)
USE_FROM(Concater)
return composer
}
func (store *StoreComposer) UseCore(core DataStore) {
store.Core = core
}
USE_FUNC(Terminater)
USE_FUNC(Finisher)
USE_FUNC(Locker)
USE_FUNC(GetReader)
USE_FUNC(Concater)

64
config.go Normal file
View File

@ -0,0 +1,64 @@
package tusd
import (
"log"
"net/url"
"os"
)
// Config provides a way to configure the Handler depending on your needs.
type Config struct {
// DataStore implementation used to store and retrieve the single uploads.
// Must no be nil.
DataStore DataStore
StoreComposer *StoreComposer
// MaxSize defines how many bytes may be stored in one single upload. If its
// value is is 0 or smaller no limit will be enforced.
MaxSize int64
// BasePath defines the URL path used for handling uploads, e.g. "/files/".
// If no trailing slash is presented it will be added. You may specify an
// absolute URL containing a scheme, e.g. "http://tus.io"
BasePath string
isAbs bool
// Initiate the CompleteUploads channel in the Handler struct in order to
// be notified about complete uploads
NotifyCompleteUploads bool
// Logger the logger to use internally
Logger *log.Logger
// Respect the X-Forwarded-Host, X-Forwarded-Proto and Forwarded headers
// potentially set by proxies when generating an absolute URL in the
// reponse to POST requests.
RespectForwardedHeaders bool
}
func (config *Config) validate() error {
if config.Logger == nil {
config.Logger = log.New(os.Stdout, "[tusd] ", 0)
}
base := config.BasePath
uri, err := url.Parse(base)
if err != nil {
return err
}
// Ensure base path ends with slash to remove logic from absFileURL
if base != "" && string(base[len(base)-1]) != "/" {
base += "/"
}
// Ensure base path begins with slash if not absolute (starts with scheme)
if !uri.IsAbs() && len(base) > 0 && string(base[0]) != "/" {
base = "/" + base
}
config.BasePath = base
config.isAbs = uri.IsAbs()
if config.StoreComposer == nil {
config.StoreComposer = NewStoreComposerFromDataStore(config.DataStore)
} else if config.DataStore != nil {
// TODO: consider returning an error
}
return nil
}

View File

@ -49,8 +49,6 @@ type DataStore interface {
// if they want to receive DELETE requests using the Handler. If this interface
// is not implemented, no request handler for this method is attached.
type TerminaterDataStore interface {
DataStore
// Terminate an upload so any further requests to the resource, both reading
// and writing, must return os.ErrNotExist or similar.
Terminate(id string) error
@ -62,8 +60,6 @@ type TerminaterDataStore interface {
// resources or notifying other services. For example, S3Store uses this
// interface for removing a temporary object.
type FinisherDataStore interface {
DataStore
// FinishUpload executes additional operations for the finished upload which
// is specified by its ID.
FinishUpload(id string) error
@ -77,8 +73,6 @@ type FinisherDataStore interface {
// data corruption, especially to ensure correct offset values and the proper
// order of chunks inside a single upload.
type LockerDataStore interface {
DataStore
// LockUpload attempts to obtain an exclusive lock for the upload specified
// by its id.
// If this operation fails because the resource is already locked, the
@ -96,8 +90,6 @@ type LockerDataStore interface {
// Please, be aware that this feature is not part of the official tus
// specification. Instead it's a custom mechanism by tusd.
type GetReaderDataStore interface {
DataStore
// GetReader returns a reader which allows iterating of the content of an
// upload specified by its ID. It should attempt to provide a reader even if
// the upload has not been finished yet but it's not required.
@ -112,8 +104,6 @@ type GetReaderDataStore interface {
// Concatenation extension should be enabled. Only in this case, the handler
// will parse and respect the Upload-Concat header.
type ConcaterDataStore interface {
DataStore
// ConcatUploads concatenations the content from the provided partial uploads
// and write the result in the destination upload which is specified by its
// ID. The caller (usually the handler) must and will ensure that this

View File

@ -46,6 +46,13 @@ func New(path string) FileStore {
return FileStore{path}
}
func (store FileStore) UseIn(composer *tusd.StoreComposer) {
composer.UseCore(store)
composer.UseGetReader(store)
composer.UseTerminater(store)
composer.UseLocker(store)
}
func (store FileStore) NewUpload(info tusd.FileInfo) (id string, err error) {
id = uid.Uid()
info.ID = id

View File

@ -22,6 +22,10 @@ type Handler struct {
// endpoints to be customized. These are not part of the protocol so can be
// changed depending on your needs.
func NewHandler(config Config) (*Handler, error) {
if err := config.validate(); err != nil {
return nil, err
}
handler, err := NewUnroutedHandler(config)
if err != nil {
return nil, err
@ -41,12 +45,12 @@ func NewHandler(config Config) (*Handler, error) {
mux.Add("PATCH", ":id", http.HandlerFunc(handler.PatchFile))
// Only attach the DELETE handler if the Terminate() method is provided
if _, ok := config.DataStore.(TerminaterDataStore); ok {
if config.StoreComposer.UsesTerminater {
mux.Del(":id", http.HandlerFunc(handler.DelFile))
}
// GET handler requires the GetReader() method
if _, ok := config.DataStore.(GetReaderDataStore); ok {
if config.StoreComposer.UsesGetReader {
mux.Get(":id", http.HandlerFunc(handler.GetFile))
}

View File

@ -12,27 +12,19 @@
// properly. Two tusd.FileStore instances using the same directory, for example.
// In addition the limited store will keep a list of the uploads' IDs in memory
// which may create a growing memory leak.
//
// While LimitedStore implements the GetReader, LockUpload, UnlockUpload,
// FinishUpload and ConcatUploads methods, it does not contain proper definitions
// for them. When invoked, the call will be passed to the underlying
// data store as long as it provides these methods. If not, either an error
// is returned or nothing happens (see the specific methods for more
// detailed information). The motivation behind this decision was, that this
// allows to expose the additional extensions implemented using the
// interfaces, such as GetReaderDataStore.
package limitedstore
import (
"github.com/tus/tusd"
"io"
"sort"
"sync"
)
type LimitedStore struct {
tusd.DataStore
terminater tusd.TerminaterDataStore
StoreSize int64
tusd.TerminaterDataStore
uploads map[string]int64
usedSize int64
@ -55,15 +47,21 @@ func (p pairlist) Less(i, j int) bool { return p[i].value < p[j].value }
// New creates a new limited store with the given size as the maximum storage
// size. The wrapped data store needs to implement the TerminaterDataStore
// interface, in order to provide the required Terminate method.
func New(storeSize int64, dataStore tusd.TerminaterDataStore) *LimitedStore {
func New(storeSize int64, dataStore tusd.DataStore, terminater tusd.TerminaterDataStore) *LimitedStore {
return &LimitedStore{
StoreSize: storeSize,
TerminaterDataStore: dataStore,
uploads: make(map[string]int64),
mutex: new(sync.Mutex),
StoreSize: storeSize,
DataStore: dataStore,
terminater: terminater,
uploads: make(map[string]int64),
mutex: new(sync.Mutex),
}
}
func (store *LimitedStore) UseIn(composer *tusd.StoreComposer) {
composer.UseCore(store)
composer.UseTerminater(store)
}
func (store *LimitedStore) NewUpload(info tusd.FileInfo) (string, error) {
store.mutex.Lock()
defer store.mutex.Unlock()
@ -72,7 +70,7 @@ func (store *LimitedStore) NewUpload(info tusd.FileInfo) (string, error) {
return "", err
}
id, err := store.TerminaterDataStore.NewUpload(info)
id, err := store.DataStore.NewUpload(info)
if err != nil {
return "", err
}
@ -91,7 +89,7 @@ func (store *LimitedStore) Terminate(id string) error {
}
func (store *LimitedStore) terminate(id string) error {
err := store.TerminaterDataStore.Terminate(id)
err := store.terminater.Terminate(id)
if err != nil {
return err
}
@ -135,55 +133,3 @@ func (store *LimitedStore) ensureSpace(size int64) error {
return nil
}
// GetReader will pass the call to the underlying data store if it implements
// the tusd.GetReaderDataStore interface. Else tusd.ErrNotImplemented will be
// returned.
func (store *LimitedStore) GetReader(id string) (io.Reader, error) {
if s, ok := store.TerminaterDataStore.(tusd.GetReaderDataStore); ok {
return s.GetReader(id)
} else {
return nil, tusd.ErrNotImplemented
}
}
// LockUpload will pass the call to the underlying data store if it implements
// the tusd.LockerDataStore interface. Else this function simply returns nil.
func (store *LimitedStore) LockUpload(id string) error {
if s, ok := store.TerminaterDataStore.(tusd.LockerDataStore); ok {
return s.LockUpload(id)
}
return nil
}
// UnlockUpload will pass the call to the underlying data store if it implements
// the tusd.LockerDataStore interface. Else this function simply returns nil.
func (store *LimitedStore) UnlockUpload(id string) error {
if s, ok := store.TerminaterDataStore.(tusd.LockerDataStore); ok {
return s.UnlockUpload(id)
}
return nil
}
// FinishUpload will pass the call to the underlying data store if it implements
// the tusd.FinisherDataStore interface. Else this function simply returns nil.
func (store *LimitedStore) FinishUpload(id string) error {
if s, ok := store.TerminaterDataStore.(tusd.FinisherDataStore); ok {
return s.FinishUpload(id)
}
return nil
}
// ConcatUploads will pass the call to the underlying data store if it implements
// the tusd.ConcaterDataStore interface. Else tusd.ErrNotImplemented will be
// returned.
func (store *LimitedStore) ConcatUploads(dest string, src []string) error {
if s, ok := store.TerminaterDataStore.(tusd.ConcaterDataStore); ok {
return s.ConcatUploads(dest, src)
} else {
return tusd.ErrNotImplemented
}
}

View File

@ -11,11 +11,7 @@ import (
)
var _ tusd.DataStore = &LimitedStore{}
var _ tusd.GetReaderDataStore = &LimitedStore{}
var _ tusd.TerminaterDataStore = &LimitedStore{}
var _ tusd.LockerDataStore = &LimitedStore{}
var _ tusd.ConcaterDataStore = &LimitedStore{}
var _ tusd.FinisherDataStore = &LimitedStore{}
type dataStore struct {
t *assert.Assertions
@ -45,10 +41,6 @@ func (store *dataStore) GetInfo(id string) (tusd.FileInfo, error) {
return tusd.FileInfo{}, nil
}
func (store *dataStore) GetReader(id string) (io.Reader, error) {
return nil, tusd.ErrNotImplemented
}
func (store *dataStore) Terminate(id string) error {
// We expect the uploads to be terminated in a specific order (the bigger
// come first)
@ -66,7 +58,7 @@ func TestLimitedStore(t *testing.T) {
dataStore := &dataStore{
t: a,
}
store := New(100, dataStore)
store := New(100, dataStore, dataStore)
// Create new upload (30 bytes)
id, err := store.NewUpload(tusd.FileInfo{

View File

@ -18,21 +18,26 @@ import (
// cheap mechansim. Locks will only exist as long as this object is kept in
// reference and will be erased if the program exits.
type MemoryLocker struct {
tusd.DataStore
locks map[string]bool
}
// New creates a new lock memory wrapper around the provided storage.
func NewMemoryLocker(store tusd.DataStore) *MemoryLocker {
// New creates a new lock memory.
func NewMemoryLocker(_ tusd.DataStore) *MemoryLocker {
return New()
}
func New() *MemoryLocker {
return &MemoryLocker{
DataStore: store,
locks: make(map[string]bool),
locks: make(map[string]bool),
}
}
func (locker *MemoryLocker) UseIn(composer *tusd.StoreComposer) {
composer.UseLocker(locker)
}
// LockUpload tries to obtain the exclusive lock.
func (locker *MemoryLocker) LockUpload(id string) error {
// Ensure file is not locked
if _, ok := locker.locks[id]; ok {
return tusd.ErrFileLocked

View File

@ -1,7 +1,6 @@
package memorylocker
import (
"io"
"testing"
"github.com/stretchr/testify/assert"
@ -9,28 +8,11 @@ import (
"github.com/tus/tusd"
)
type zeroStore struct{}
func (store zeroStore) NewUpload(info tusd.FileInfo) (string, error) {
return "", nil
}
func (store zeroStore) WriteChunk(id string, offset int64, src io.Reader) (int64, error) {
return 0, nil
}
func (store zeroStore) GetInfo(id string) (tusd.FileInfo, error) {
return tusd.FileInfo{}, nil
}
func (store zeroStore) GetReader(id string) (io.Reader, error) {
return nil, tusd.ErrNotImplemented
}
func TestMemoryLocker(t *testing.T) {
a := assert.New(t)
var locker tusd.LockerDataStore
locker = NewMemoryLocker(&zeroStore{})
locker = New()
a.NoError(locker.LockUpload("one"))
a.Equal(tusd.ErrFileLocked, locker.LockUpload("one"))

View File

@ -131,6 +131,14 @@ func New(bucket string, service s3iface.S3API) S3Store {
}
}
func (store S3Store) UseIn(composer *tusd.StoreComposer) {
composer.UseCore(store)
composer.UseTerminater(store)
composer.UseFinisher(store)
composer.UseGetReader(store)
composer.UseConcater(store)
}
func (store S3Store) NewUpload(info tusd.FileInfo) (id string, err error) {
var uploadId string
if info.ID == "" {

View File

@ -6,7 +6,6 @@ import (
"io"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
@ -52,35 +51,12 @@ var ErrStatusCodes = map[error]int{
ErrModifyFinal: http.StatusForbidden,
}
// Config provides a way to configure the Handler depending on your needs.
type Config struct {
// DataStore implementation used to store and retrieve the single uploads.
// Must no be nil.
DataStore DataStore
// MaxSize defines how many bytes may be stored in one single upload. If its
// value is is 0 or smaller no limit will be enforced.
MaxSize int64
// BasePath defines the URL path used for handling uploads, e.g. "/files/".
// If no trailing slash is presented it will be added. You may specify an
// absolute URL containing a scheme, e.g. "http://tus.io"
BasePath string
// Initiate the CompleteUploads channel in the Handler struct in order to
// be notified about complete uploads
NotifyCompleteUploads bool
// Logger the logger to use internally
Logger *log.Logger
// Respect the X-Forwarded-Host, X-Forwarded-Proto and Forwarded headers
// potentially set by proxies when generating an absolute URL in the
// reponse to POST requests.
RespectForwardedHeaders bool
}
// UnroutedHandler exposes methods to handle requests as part of the tus protocol,
// such as PostFile, HeadFile, PatchFile and DelFile. In addition the GetFile method
// is provided which is, however, not part of the specification.
type UnroutedHandler struct {
config Config
dataStore DataStore
composer *StoreComposer
isBasePathAbs bool
basePath string
logger *log.Logger
@ -97,42 +73,26 @@ type UnroutedHandler struct {
// a router (aka mux) of your choice. If you are looking for preconfigured
// handler see NewHandler.
func NewUnroutedHandler(config Config) (*UnroutedHandler, error) {
logger := config.Logger
if logger == nil {
logger = log.New(os.Stdout, "[tusd] ", 0)
}
base := config.BasePath
uri, err := url.Parse(base)
if err != nil {
if err := config.validate(); err != nil {
return nil, err
}
// Ensure base path ends with slash to remove logic from absFileURL
if base != "" && string(base[len(base)-1]) != "/" {
base += "/"
}
// Ensure base path begins with slash if not absolute (starts with scheme)
if !uri.IsAbs() && len(base) > 0 && string(base[0]) != "/" {
base = "/" + base
}
// Only promote extesions using the Tus-Extension header which are implemented
extensions := "creation"
if _, ok := config.DataStore.(TerminaterDataStore); ok {
if config.StoreComposer.UsesTerminater {
extensions += ",termination"
}
if _, ok := config.DataStore.(ConcaterDataStore); ok {
if config.StoreComposer.UsesConcater {
extensions += ",concatenation"
}
handler := &UnroutedHandler{
config: config,
dataStore: config.DataStore,
basePath: base,
isBasePathAbs: uri.IsAbs(),
composer: config.StoreComposer,
basePath: config.BasePath,
isBasePathAbs: config.isAbs,
CompleteUploads: make(chan FileInfo),
logger: logger,
logger: config.Logger,
extensions: extensions,
}
@ -211,8 +171,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
// Only use the proper Upload-Concat header if the concatenation extension
// is even supported by the data store.
var concatHeader string
concatStore, ok := handler.dataStore.(ConcaterDataStore)
if ok {
if handler.composer.UsesConcater {
concatHeader = r.Header.Get("Upload-Concat")
}
@ -258,14 +217,14 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
PartialUploads: partialUploads,
}
id, err := handler.dataStore.NewUpload(info)
id, err := handler.composer.Core.NewUpload(info)
if err != nil {
handler.sendError(w, r, err)
return
}
if isFinal {
if err := concatStore.ConcatUploads(id, partialUploads); err != nil {
if err := handler.composer.Concater.ConcatUploads(id, partialUploads); err != nil {
handler.sendError(w, r, err)
return
}
@ -290,7 +249,8 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
return
}
if locker, ok := handler.dataStore.(LockerDataStore); ok {
if handler.composer.UsesLocker {
locker := handler.composer.Locker
if err := locker.LockUpload(id); err != nil {
handler.sendError(w, r, err)
return
@ -299,7 +259,7 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
defer locker.UnlockUpload(id)
}
info, err := handler.dataStore.GetInfo(id)
info, err := handler.composer.Core.GetInfo(id)
if err != nil {
handler.sendError(w, r, err)
return
@ -350,7 +310,8 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
return
}
if locker, ok := handler.dataStore.(LockerDataStore); ok {
if handler.composer.UsesLocker {
locker := handler.composer.Locker
if err := locker.LockUpload(id); err != nil {
handler.sendError(w, r, err)
return
@ -359,7 +320,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
defer locker.UnlockUpload(id)
}
info, err := handler.dataStore.GetInfo(id)
info, err := handler.composer.Core.GetInfo(id)
if err != nil {
handler.sendError(w, r, err)
return
@ -393,7 +354,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
// Limit the
reader := io.LimitReader(r.Body, maxSize)
bytesWritten, err := handler.dataStore.WriteChunk(id, offset, reader)
bytesWritten, err := handler.composer.Core.WriteChunk(id, offset, reader)
if err != nil {
handler.sendError(w, r, err)
return
@ -406,8 +367,8 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
// If the upload is completed, ...
if newOffset == info.Size {
// ... allow custom mechanism to finish and cleanup the upload
if store, ok := handler.dataStore.(FinisherDataStore); ok {
if err := store.FinishUpload(id); err != nil {
if handler.composer.UsesFinisher {
if err := handler.composer.Finisher.FinishUpload(id); err != nil {
handler.sendError(w, r, err)
return
}
@ -426,8 +387,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
// GetFile handles requests to download a file using a GET request. This is not
// part of the specification.
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
dataStore, ok := handler.dataStore.(GetReaderDataStore)
if !ok {
if !handler.composer.UsesGetReader {
handler.sendError(w, r, ErrNotImplemented)
return
}
@ -438,7 +398,8 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
return
}
if locker, ok := handler.dataStore.(LockerDataStore); ok {
if handler.composer.UsesLocker {
locker := handler.composer.Locker
if err := locker.LockUpload(id); err != nil {
handler.sendError(w, r, err)
return
@ -447,7 +408,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
defer locker.UnlockUpload(id)
}
info, err := handler.dataStore.GetInfo(id)
info, err := handler.composer.Core.GetInfo(id)
if err != nil {
handler.sendError(w, r, err)
return
@ -460,7 +421,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
}
// Get reader
src, err := dataStore.GetReader(id)
src, err := handler.composer.GetReader.GetReader(id)
if err != nil {
handler.sendError(w, r, err)
return
@ -479,8 +440,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
// DelFile terminates an upload permanently.
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
// Abort the request handling if the required interface is not implemented
tstore, ok := handler.config.DataStore.(TerminaterDataStore)
if !ok {
if !handler.composer.UsesTerminater {
handler.sendError(w, r, ErrNotImplemented)
return
}
@ -491,7 +451,8 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
return
}
if locker, ok := handler.dataStore.(LockerDataStore); ok {
if handler.composer.UsesLocker {
locker := handler.composer.Locker
if err := locker.LockUpload(id); err != nil {
handler.sendError(w, r, err)
return
@ -500,7 +461,7 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
defer locker.UnlockUpload(id)
}
err = tstore.Terminate(id)
err = handler.composer.Terminater.Terminate(id)
if err != nil {
handler.sendError(w, r, err)
return
@ -591,7 +552,7 @@ func getHostAndProtocol(r *http.Request, allowForwarded bool) (host, proto strin
// of a final resource.
func (handler *UnroutedHandler) sizeOfUploads(ids []string) (size int64, err error) {
for _, id := range ids {
info, err := handler.dataStore.GetInfo(id)
info, err := handler.composer.Core.GetInfo(id)
if err != nil {
return size, err
}