limitedstore: Remove package due to bad functionality

This commit is contained in:
Marius 2019-06-11 17:06:33 +02:00
parent dc092ddd46
commit 32cf95aefe
5 changed files with 0 additions and 244 deletions

View File

@ -227,7 +227,6 @@ useful tools:
* [**gcsstore**](https://godoc.org/github.com/tus/tusd/gcsstore): A storage backend using Google cloud storage * [**gcsstore**](https://godoc.org/github.com/tus/tusd/gcsstore): A storage backend using Google cloud storage
* [**memorylocker**](https://godoc.org/github.com/tus/tusd/memorylocker): An in-memory locker for handling concurrent uploads * [**memorylocker**](https://godoc.org/github.com/tus/tusd/memorylocker): An in-memory locker for handling concurrent uploads
* [**etcd3locker**](https://godoc.org/github.com/tus/tusd/etcd3locker): A locker using the distributed KV etcd3 store * [**etcd3locker**](https://godoc.org/github.com/tus/tusd/etcd3locker): A locker using the distributed KV etcd3 store
* [**limitedstore**](https://godoc.org/github.com/tus/tusd/limitedstore): A storage wrapper limiting the total used space for uploads
### 3rd-Party tusd Packages ### 3rd-Party tusd Packages

View File

@ -6,7 +6,6 @@ import (
"github.com/tus/tusd" "github.com/tus/tusd"
"github.com/tus/tusd/filestore" "github.com/tus/tusd/filestore"
"github.com/tus/tusd/gcsstore" "github.com/tus/tusd/gcsstore"
"github.com/tus/tusd/limitedstore"
"github.com/tus/tusd/memorylocker" "github.com/tus/tusd/memorylocker"
"github.com/tus/tusd/s3store" "github.com/tus/tusd/s3store"
@ -73,18 +72,5 @@ func CreateComposer() {
store.UseIn(Composer) store.UseIn(Composer)
} }
storeSize := Flags.StoreSize
maxSize := Flags.MaxSize
if storeSize > 0 {
limitedstore.New(storeSize, Composer.Core, Composer.Terminater).UseIn(Composer)
stdout.Printf("Using %.2fMB as storage size.\n", float64(storeSize)/1024/1024)
// We need to ensure that a single upload can fit into the storage size
if maxSize > storeSize || maxSize == 0 {
Flags.MaxSize = storeSize
}
}
stdout.Printf("Using %.2fMB as maximum size.\n", float64(Flags.MaxSize)/1024/1024) stdout.Printf("Using %.2fMB as maximum size.\n", float64(Flags.MaxSize)/1024/1024)
} }

View File

@ -12,7 +12,6 @@ var Flags struct {
HttpSock string HttpSock string
MaxSize int64 MaxSize int64
UploadDir string UploadDir string
StoreSize int64
Basepath string Basepath string
Timeout int64 Timeout int64
S3Bucket string S3Bucket string
@ -41,7 +40,6 @@ func ParseFlags() {
flag.StringVar(&Flags.HttpSock, "unix-sock", "", "If set, will listen to a UNIX socket at this location instead of a TCP socket") flag.StringVar(&Flags.HttpSock, "unix-sock", "", "If set, will listen to a UNIX socket at this location instead of a TCP socket")
flag.Int64Var(&Flags.MaxSize, "max-size", 0, "Maximum size of a single upload in bytes") flag.Int64Var(&Flags.MaxSize, "max-size", 0, "Maximum size of a single upload in bytes")
flag.StringVar(&Flags.UploadDir, "dir", "./data", "Directory to store uploads in") flag.StringVar(&Flags.UploadDir, "dir", "./data", "Directory to store uploads in")
flag.Int64Var(&Flags.StoreSize, "store-size", 0, "Size of space allowed for storage")
flag.StringVar(&Flags.Basepath, "base-path", "/files/", "Basepath of the HTTP server") flag.StringVar(&Flags.Basepath, "base-path", "/files/", "Basepath of the HTTP server")
flag.Int64Var(&Flags.Timeout, "timeout", 30*1000, "Read timeout for connections in milliseconds. A zero value means that reads will not timeout") flag.Int64Var(&Flags.Timeout, "timeout", 30*1000, "Read timeout for connections in milliseconds. A zero value means that reads will not timeout")
flag.StringVar(&Flags.S3Bucket, "s3-bucket", "", "Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)") flag.StringVar(&Flags.S3Bucket, "s3-bucket", "", "Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)")

View File

@ -1,140 +0,0 @@
// Package limitedstore provides a storage with a limited space.
//
// This goal is achieved by using a simple wrapper around existing
// datastores (tusd.DataStore) while limiting the used storage size.
// It will start terminating existing uploads if not enough space is left in
// order to create a new upload.
// The order in which the uploads will be terminated is defined by their size,
// whereas the biggest ones are deleted first.
// This package's functionality is very limited and naive. It will terminate
// uploads whether they are finished yet or not. Only one datastore is allowed to
// access the underlying storage else the limited store will not function
// properly. Two tusd.FileStore instances using the same directory, for example.
// In addition the limited store will keep a list of the uploads' IDs in memory
// which may create a growing memory leak.
package limitedstore
import (
"os"
"sort"
"sync"
"github.com/tus/tusd"
)
type LimitedStore struct {
tusd.DataStore
terminater tusd.TerminaterDataStore
StoreSize int64
uploads map[string]int64
usedSize int64
mutex *sync.Mutex
}
// pair structure to perform map-sorting
type pair struct {
key string
value int64
}
type pairlist []pair
func (p pairlist) Len() int { return len(p) }
func (p pairlist) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p pairlist) Less(i, j int) bool { return p[i].value < p[j].value }
// New creates a new limited store with the given size as the maximum storage
// size. The wrapped data store needs to implement the TerminaterDataStore
// interface, in order to provide the required Terminate method.
func New(storeSize int64, dataStore tusd.DataStore, terminater tusd.TerminaterDataStore) *LimitedStore {
return &LimitedStore{
StoreSize: storeSize,
DataStore: dataStore,
terminater: terminater,
uploads: make(map[string]int64),
mutex: new(sync.Mutex),
}
}
func (store *LimitedStore) UseIn(composer *tusd.StoreComposer) {
composer.UseCore(store)
composer.UseTerminater(store)
}
func (store *LimitedStore) NewUpload(info tusd.FileInfo) (string, error) {
store.mutex.Lock()
defer store.mutex.Unlock()
if err := store.ensureSpace(info.Size); err != nil {
return "", err
}
id, err := store.DataStore.NewUpload(info)
if err != nil {
return "", err
}
store.usedSize += info.Size
store.uploads[id] = info.Size
return id, nil
}
func (store *LimitedStore) Terminate(id string) error {
store.mutex.Lock()
defer store.mutex.Unlock()
return store.terminate(id)
}
func (store *LimitedStore) terminate(id string) error {
err := store.terminater.Terminate(id)
// Ignore the error if the upload could not be found. In this case, the upload
// has likely already been removed by another service (e.g. a cron job) and we
// just remove the upload from our internal list and claim the used space back.
if err != nil && err != tusd.ErrNotFound && !os.IsNotExist(err) {
return err
}
size := store.uploads[id]
delete(store.uploads, id)
store.usedSize -= size
return nil
}
// Ensure enough space is available to store an upload of the specified size.
// It will terminate uploads until enough space is freed.
func (store *LimitedStore) ensureSpace(size int64) error {
if (store.usedSize + size) <= store.StoreSize {
// Enough space is available to store the new upload
return nil
}
sortedUploads := make(pairlist, len(store.uploads))
i := 0
for u, h := range store.uploads {
sortedUploads[i] = pair{u, h}
i++
}
sort.Sort(sort.Reverse(sortedUploads))
// Forward traversal through the uploads in terms of size, biggest upload first
for _, k := range sortedUploads {
id := k.key
if err := store.terminate(id); err != nil {
return err
}
if (store.usedSize + size) <= store.StoreSize {
// Enough space has been freed to store the new upload
return nil
}
}
return nil
}

View File

@ -1,87 +0,0 @@
package limitedstore
import (
"io"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/tus/tusd"
)
var _ tusd.DataStore = &LimitedStore{}
var _ tusd.TerminaterDataStore = &LimitedStore{}
type dataStore struct {
t *assert.Assertions
numCreatedUploads int
numTerminatedUploads int
}
func (store *dataStore) NewUpload(info tusd.FileInfo) (string, error) {
uploadId := store.numCreatedUploads
// We expect the uploads to be created in a specific order.
// These sizes correlate to this order.
expectedSize := []int64{30, 60, 80}[uploadId]
store.t.Equal(expectedSize, info.Size)
store.numCreatedUploads += 1
return strconv.Itoa(uploadId), nil
}
func (store *dataStore) WriteChunk(id string, offset int64, src io.Reader) (int64, error) {
return 0, nil
}
func (store *dataStore) GetInfo(id string) (tusd.FileInfo, error) {
return tusd.FileInfo{}, nil
}
func (store *dataStore) Terminate(id string) error {
// We expect the uploads to be terminated in a specific order (the bigger
// come first)
expectedUploadId := []string{"1", "0"}[store.numTerminatedUploads]
store.t.Equal(expectedUploadId, id)
store.numTerminatedUploads += 1
return nil
}
func TestLimitedStore(t *testing.T) {
a := assert.New(t)
dataStore := &dataStore{
t: a,
}
store := New(100, dataStore, dataStore)
// Create new upload (30 bytes)
id, err := store.NewUpload(tusd.FileInfo{
Size: 30,
})
a.NoError(err)
a.Equal("0", id)
// Create new upload (60 bytes)
id, err = store.NewUpload(tusd.FileInfo{
Size: 60,
})
a.NoError(err)
a.Equal("1", id)
// Create new upload (80 bytes)
id, err = store.NewUpload(tusd.FileInfo{
Size: 80,
})
a.NoError(err)
a.Equal("2", id)
if dataStore.numTerminatedUploads != 2 {
t.Error("expected two uploads to be terminated")
}
}