rewrite tusd

* expose tusd.DataStore and extracted FileStore
* use pat for routing
* allow absolute BasePaths
* requires StripPrefix
* add support for 1.0 core
* update date in license
This commit is contained in:
Acconut 2015-02-01 14:57:57 +01:00
parent 3db2976bd5
commit a70bd4cfa3
20 changed files with 804 additions and 1244 deletions

4
.gitignore vendored
View File

@ -1,2 +1,2 @@
/tus_data
/gopath
tusd/data
cover.out

View File

@ -1,4 +1,4 @@
Copyright (c) 2013 Transloadit Ltd and Contributors
Copyright (c) 2013-2015 Transloadit Ltd and Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in

View File

@ -10,7 +10,7 @@ In the future tusd may be extended with additional functionality to make it
suitable as a standalone production upload server, but for now this is not a
priority.
**Protocol version:** 0.2.1
**Protocol version:** 1.0.0
## Getting started

20
datastore.go Normal file
View File

@ -0,0 +1,20 @@
package tusd
import (
"io"
)
type MetaData map[string]string
type FileInfo struct {
Id string
Size int64
Offset int64
MetaData MetaData
}
type DataStore interface {
NewUpload(size int64, metaData MetaData) (string, error)
WriteChunk(id string, offset int64, src io.Reader) error
GetInfo(id string) (FileInfo, error)
}

9
dev.sh
View File

@ -1,9 +0,0 @@
#!/usr/bin/bash
# usage: source dev.sh
#
# dev.sh simplifies development by setting up a local GOPATH.
export GOPATH=`pwd`/gopath
src_dir="${GOPATH}/src/github.com/tus/tusd"
mkdir -p "${src_dir}"
ln -fs "`pwd`/src" "${src_dir}"

95
filestore/filestore.go Normal file
View File

@ -0,0 +1,95 @@
package filestore
import (
"encoding/json"
"io"
"io/ioutil"
"os"
"github.com/tus/tusd"
"github.com/tus/tusd/uid"
)
var defaultFilePerm = os.FileMode(0666)
type FileStore struct {
Path string
}
func (store FileStore) NewUpload(size int64, metaData tusd.MetaData) (id string, err error) {
id = uid.Uid()
info := tusd.FileInfo{
Id: id,
Size: size,
Offset: 0,
MetaData: metaData,
}
// Create .bin file with no content
file, err := os.OpenFile(store.binPath(id), os.O_CREATE|os.O_WRONLY, defaultFilePerm)
if err != nil {
return
}
defer file.Close()
// writeInfo creates the file by itself if necessary
err = store.writeInfo(id, info)
return
}
func (store FileStore) WriteChunk(id string, offset int64, src io.Reader) error {
file, err := os.OpenFile(store.binPath(id), os.O_WRONLY|os.O_APPEND, defaultFilePerm)
if err != nil {
return err
}
defer file.Close()
n, err := io.Copy(file, src)
if n > 0 {
if err := store.setOffset(id, offset+n); err != nil {
return err
}
}
return err
}
func (store FileStore) GetInfo(id string) (tusd.FileInfo, error) {
info := tusd.FileInfo{}
data, err := ioutil.ReadFile(store.infoPath(id))
if err != nil {
return info, err
}
err = json.Unmarshal(data, &info)
return info, err
}
func (store FileStore) binPath(id string) string {
return store.Path + "/" + id + ".bin"
}
func (store FileStore) infoPath(id string) string {
return store.Path + "/" + id + ".info"
}
func (store FileStore) writeInfo(id string, info tusd.FileInfo) error {
data, err := json.Marshal(info)
if err != nil {
return err
}
return ioutil.WriteFile(store.infoPath(id), data, defaultFilePerm)
}
func (store FileStore) setOffset(id string, offset int64) error {
info, err := store.GetInfo(id)
if err != nil {
return err
}
// never decrement the offset
if info.Offset >= offset {
return nil
}
info.Offset = offset
return store.writeInfo(id, info)
}

273
handler.go Normal file
View File

@ -0,0 +1,273 @@
package tusd
import (
"errors"
"io"
"log"
"net/http"
"net/url"
"os"
"strconv"
"github.com/bmizerany/pat"
)
var logger = log.New(os.Stdout, "[tusd] ", 0)
var (
ErrUnsupportedVersion = errors.New("unsupported version")
ErrMaxSizeExceeded = errors.New("maximum size exceeded")
ErrInvalidEntityLength = errors.New("missing or invalid Entity-Length header")
ErrInvalidOffset = errors.New("missing or invalid Offset header")
ErrNotFound = errors.New("upload not found")
ErrFileLocked = errors.New("file currently locked")
ErrIllegalOffset = errors.New("illegal offset")
ErrSizeExceeded = errors.New("resource's size exceeded")
)
var ErrStatusCodes = map[error]int{
ErrUnsupportedVersion: http.StatusPreconditionFailed,
ErrMaxSizeExceeded: http.StatusRequestEntityTooLarge,
ErrInvalidEntityLength: http.StatusBadRequest,
ErrInvalidOffset: http.StatusBadRequest,
ErrNotFound: http.StatusNotFound,
ErrFileLocked: 423, // Locked (WebDAV) (RFC 4918)
ErrIllegalOffset: http.StatusConflict,
ErrSizeExceeded: http.StatusRequestEntityTooLarge,
}
type Config struct {
DataStore DataStore
// MaxSize defines how many bytes may be stored in one single upload. If its
// value is is 0 or smaller no limit will be enforced.
MaxSize int64
// BasePath defines the URL path used for handling uploads, e.g. "/files/".
// If no trailing slash is presented it will be added. You may specify an
// absolute URL containing a scheme, e.g. "http://tus.io"
BasePath string
}
type Handler struct {
config Config
dataStore DataStore
isBasePathAbs bool
basePath string
routeHandler http.Handler
locks map[string]bool
}
func NewHandler(config Config) (*Handler, error) {
base := config.BasePath
uri, err := url.Parse(base)
if err != nil {
return nil, err
}
// Ensure base path ends with slash to remove logic from absFileUrl
if base != "" && string(base[len(base)-1]) != "/" {
base += "/"
}
// Ensure base path begins with slash if not absolute (starts with scheme)
if !uri.IsAbs() && len(base) > 0 && string(base[0]) != "/" {
base = "/" + base
}
mux := pat.New()
handler := &Handler{
config: config,
dataStore: config.DataStore,
basePath: base,
isBasePathAbs: uri.IsAbs(),
routeHandler: mux,
locks: make(map[string]bool),
}
mux.Post("", http.HandlerFunc(handler.postFile))
mux.Head(":id", http.HandlerFunc(handler.headFile))
mux.Add("PATCH", ":id", http.HandlerFunc(handler.patchFile))
return handler, nil
}
func (handler *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
go logger.Println(r.Method, r.URL.Path)
header := w.Header()
if origin := r.Header.Get("Origin"); origin != "" {
header.Set("Access-Control-Allow-Origin", origin)
if r.Method == "OPTIONS" {
// Preflight request
header.Set("Access-Control-Allow-Methods", "POST, HEAD, PATCH, OPTIONS")
header.Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Entity-Length, Offset, TUS-Resumable")
header.Set("Access-Control-Max-Age", "86400")
} else {
// Actual request
header.Set("Access-Control-Expose-Headers", "Offset, Location, Entity-Length, TUS-Version, TUS-Resumable, TUS-Max-Size, TUS-Extension")
}
}
// Set current version used by the server
header.Set("TUS-Resumable", "1.0.0")
// Set appropriated headers in case of OPTIONS method allowing protocol
// discovery and end with an 204 No Content
if r.Method == "OPTIONS" {
if handler.config.MaxSize > 0 {
header.Set("TUS-Max-Size", strconv.FormatInt(handler.config.MaxSize, 10))
}
header.Set("TUS-Version", "1.0.0")
header.Set("TUS-Extension", "file-creation")
w.WriteHeader(http.StatusNoContent)
return
}
// Test if the version sent by the client is supported
if r.Header.Get("TUS-Resumable") != "1.0.0" {
handler.sendError(w, ErrUnsupportedVersion)
return
}
// Proceed with routing the request
handler.routeHandler.ServeHTTP(w, r)
}
func (handler *Handler) postFile(w http.ResponseWriter, r *http.Request) {
size, err := strconv.ParseInt(r.Header.Get("Entity-Length"), 10, 64)
if err != nil || size < 0 {
handler.sendError(w, ErrInvalidEntityLength)
return
}
// Test whether the size is still allowed
if size > handler.config.MaxSize {
handler.sendError(w, ErrMaxSizeExceeded)
return
}
// @TODO: implement metadata extension
meta := make(map[string]string)
id, err := handler.dataStore.NewUpload(size, meta)
if err != nil {
handler.sendError(w, err)
return
}
url := handler.absFileUrl(r, id)
w.Header().Set("Location", url)
w.WriteHeader(http.StatusCreated)
}
func (handler *Handler) headFile(w http.ResponseWriter, r *http.Request) {
id := r.URL.Query().Get(":id")
info, err := handler.dataStore.GetInfo(id)
if err != nil {
// Interpret os.ErrNotExist as 404 Not Found
if os.IsNotExist(err) {
err = ErrNotFound
}
handler.sendError(w, err)
return
}
w.Header().Set("Entity-Length", strconv.FormatInt(info.Size, 10))
w.Header().Set("Offset", strconv.FormatInt(info.Offset, 10))
w.WriteHeader(http.StatusNoContent)
}
func (handler *Handler) patchFile(w http.ResponseWriter, r *http.Request) {
id := r.URL.Query().Get(":id")
// Ensure file is not locked
if _, ok := handler.locks[id]; ok {
handler.sendError(w, ErrFileLocked)
return
}
// Lock file for further writes (heads are allowed)
handler.locks[id] = true
// File will be unlocked regardless of an error or success
defer func() {
delete(handler.locks, id)
}()
info, err := handler.dataStore.GetInfo(id)
if err != nil {
if os.IsNotExist(err) {
err = ErrNotFound
}
handler.sendError(w, err)
return
}
// Ensure the offsets match
offset, err := strconv.ParseInt(r.Header.Get("Offset"), 10, 64)
if err != nil {
handler.sendError(w, ErrInvalidOffset)
return
}
if offset != info.Offset {
handler.sendError(w, ErrIllegalOffset)
return
}
// Get Content-Length if possible
length := r.ContentLength
// Test if this upload fits into the file's size
if offset+length > info.Size {
handler.sendError(w, ErrSizeExceeded)
return
}
maxSize := info.Size - offset
if length > 0 {
maxSize = length
}
// Limit the
reader := io.LimitReader(r.Body, maxSize)
err = handler.dataStore.WriteChunk(id, offset, reader)
if err != nil {
handler.sendError(w, err)
return
}
w.WriteHeader(http.StatusNoContent)
}
func (handler *Handler) sendError(w http.ResponseWriter, err error) {
status, ok := ErrStatusCodes[err]
if !ok {
status = 500
}
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(status)
w.Write([]byte(err.Error() + "\n"))
}
func (handler *Handler) absFileUrl(r *http.Request, id string) string {
if handler.isBasePathAbs {
return handler.basePath + id
}
// Read origin and protocol from request
url := "http://"
if r.TLS != nil {
url = "https://"
}
url += r.Host + handler.basePath + id
return url
}

381
handler_test.go Normal file
View File

@ -0,0 +1,381 @@
package tusd
import (
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
)
type zeroStore struct{}
func (store zeroStore) NewUpload(size int64, metaData MetaData) (string, error) {
return "", nil
}
func (store zeroStore) WriteChunk(id string, offset int64, src io.Reader) error {
return nil
}
func (store zeroStore) GetInfo(id string) (FileInfo, error) {
return FileInfo{}, nil
}
func TestCORS(t *testing.T) {
handler, _ := NewHandler(Config{})
// Test preflight request
req, _ := http.NewRequest("OPTIONS", "", nil)
req.Header.Set("Origin", "tus.io")
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
t.Errorf("Expected 204 No Content for OPTIONS request (got %v)", w.Code)
}
headers := []string{
"Access-Control-Allow-Headers",
"Access-Control-Allow-Methods",
"Access-Control-Max-Age",
}
for _, header := range headers {
if _, ok := w.HeaderMap[header]; !ok {
t.Errorf("Header '%s' not contained in response", header)
}
}
origin := w.HeaderMap.Get("Access-Control-Allow-Origin")
if origin != "tus.io" {
t.Errorf("Allowed origin not 'tus.io' but '%s'", origin)
}
// Test actual request
req, _ = http.NewRequest("GET", "", nil)
req.Header.Set("Origin", "tus.io")
w = httptest.NewRecorder()
handler.ServeHTTP(w, req)
origin = w.HeaderMap.Get("Access-Control-Allow-Origin")
if origin != "tus.io" {
t.Errorf("Allowed origin not 'tus.io' but '%s'", origin)
}
if _, ok := w.HeaderMap["Access-Control-Expose-Headers"]; !ok {
t.Error("Expose-Headers not contained in response")
}
}
func TestProtocolDiscovery(t *testing.T) {
handler, _ := NewHandler(Config{
MaxSize: 400,
})
// Test successful OPTIONS request
req, _ := http.NewRequest("OPTIONS", "", nil)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
t.Errorf("Expected 204 No Content for OPTIONS request (got %v)", w.Code)
}
headers := map[string]string{
"TUS-Extension": "file-creation",
"TUS-Version": "1.0.0",
"TUS-Resumable": "1.0.0",
"TUS-Max-Size": "400",
}
for header, value := range headers {
if v := w.HeaderMap.Get(header); value != v {
t.Errorf("Header '%s' not contained in response", header)
}
}
// Invalid or unsupported version
req, _ = http.NewRequest("GET", "", nil)
req.Header.Set("TUS-Resumable", "foo")
w = httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusPreconditionFailed {
t.Errorf("Expected 412 Precondition Failed (got %v)", w.Code)
}
}
type postStore struct {
t *testing.T
zeroStore
}
func (s postStore) NewUpload(size int64, metaData MetaData) (string, error) {
if size != 300 {
s.t.Errorf("Expected size to be 300 (got %v)", size)
}
return "foo", nil
}
func TestFileCreation(t *testing.T) {
handler, _ := NewHandler(Config{
MaxSize: 400,
BasePath: "files",
DataStore: postStore{
t: t,
},
})
// Test successful request
req, _ := http.NewRequest("POST", "", nil)
req.Header.Set("TUS-Resumable", "1.0.0")
req.Header.Set("Entity-Length", "300")
req.Host = "tus.io"
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
t.Errorf("Expected 201 Created for OPTIONS request (got %v)", w.Code)
}
if location := w.HeaderMap.Get("Location"); location != "http://tus.io/files/foo" {
t.Errorf("Unexpected location header (got '%v')", location)
}
// Test exceeding MaxSize
req, _ = http.NewRequest("POST", "", nil)
req.Header.Set("TUS-Resumable", "1.0.0")
req.Header.Set("Entity-Length", "500")
w = httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusRequestEntityTooLarge {
t.Errorf("Expected %v for OPTIONS request (got %v)", http.StatusRequestEntityTooLarge, w.Code)
}
}
type headStore struct {
zeroStore
}
func (s headStore) GetInfo(id string) (FileInfo, error) {
if id != "yes" {
return FileInfo{}, os.ErrNotExist
}
return FileInfo{
Offset: 11,
Size: 44,
}, nil
}
func TestGetInfo(t *testing.T) {
handler, _ := NewHandler(Config{
BasePath: "https://buy.art/",
DataStore: headStore{},
})
// Test successful request
req, _ := http.NewRequest("HEAD", "yes", nil)
req.Header.Set("TUS-Resumable", "1.0.0")
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
t.Errorf("Expected %v (got %v)", http.StatusNoContent, w.Code)
}
headers := map[string]string{
"Offset": "11",
"Entity-Length": "44",
}
for header, value := range headers {
if v := w.HeaderMap.Get(header); value != v {
t.Errorf("Unexpected header value '%s': %v", header, v)
}
}
// Test non-existing file
req, _ = http.NewRequest("HEAD", "no", nil)
req.Header.Set("TUS-Resumable", "1.0.0")
w = httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
t.Errorf("Expected %v (got %v)", http.StatusNotFound, w.Code)
}
}
type patchStore struct {
zeroStore
t *testing.T
called bool
}
func (s patchStore) GetInfo(id string) (FileInfo, error) {
if id != "yes" {
return FileInfo{}, os.ErrNotExist
}
return FileInfo{
Offset: 5,
Size: 20,
}, nil
}
func (s patchStore) WriteChunk(id string, offset int64, src io.Reader) error {
if s.called {
s.t.Errorf("WriteChunk must be called only once")
}
s.called = true
if offset != 5 {
s.t.Errorf("Expected offset to be 5 (got %v)", offset)
}
data, err := ioutil.ReadAll(src)
if err != nil {
s.t.Error(err)
}
if string(data) != "hello" {
s.t.Errorf("Expected source to be 'hello'")
}
return nil
}
func TestPatch(t *testing.T) {
handler, _ := NewHandler(Config{
MaxSize: 100,
DataStore: patchStore{
t: t,
},
})
// Test successful request
req, _ := http.NewRequest("PATCH", "yes", strings.NewReader("hello"))
req.Header.Set("TUS-Resumable", "1.0.0")
req.Header.Set("Offset", "5")
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
t.Errorf("Expected %v (got %v)", http.StatusNoContent, w.Code)
}
// Test non-existing file
req, _ = http.NewRequest("PATCH", "no", nil)
req.Header.Set("TUS-Resumable", "1.0.0")
req.Header.Set("Offset", "0")
w = httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
t.Errorf("Expected %v (got %v)", http.StatusNotFound, w.Code)
}
// Test wrong offset
req, _ = http.NewRequest("PATCH", "yes", nil)
req.Header.Set("TUS-Resumable", "1.0.0")
req.Header.Set("Offset", "4")
w = httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusConflict {
t.Errorf("Expected %v (got %v)", http.StatusConflict, w.Code)
}
// Test exceeding file size
req, _ = http.NewRequest("PATCH", "yes", strings.NewReader("hellothisismorethan15bytes"))
req.Header.Set("TUS-Resumable", "1.0.0")
req.Header.Set("Offset", "5")
w = httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusRequestEntityTooLarge {
t.Errorf("Expected %v (got %v)", http.StatusRequestEntityTooLarge, w.Code)
}
}
type overflowPatchStore struct {
zeroStore
t *testing.T
called bool
}
func (s overflowPatchStore) GetInfo(id string) (FileInfo, error) {
if id != "yes" {
return FileInfo{}, os.ErrNotExist
}
return FileInfo{
Offset: 5,
Size: 20,
}, nil
}
func (s overflowPatchStore) WriteChunk(id string, offset int64, src io.Reader) error {
if s.called {
s.t.Errorf("WriteChunk must be called only once")
}
s.called = true
if offset != 5 {
s.t.Errorf("Expected offset to be 5 (got %v)", offset)
}
data, err := ioutil.ReadAll(src)
if err != nil {
s.t.Error(err)
}
if len(data) != 15 {
s.t.Errorf("Expected 15 bytes got %v", len(data))
}
return nil
}
// noEOFReader implements io.Reader, io.Writer, io.Closer but does not return
// an io.EOF when the internal buffer is empty. This way we can simulate slow
// networks.
type noEOFReader struct {
closed bool
buffer []byte
}
func (r *noEOFReader) Read(dst []byte) (int, error) {
if r.closed && len(r.buffer) == 0 {
return 0, io.EOF
}
n := copy(dst, r.buffer)
r.buffer = r.buffer[n:]
return n, nil
}
func (r *noEOFReader) Close() error {
r.closed = true
return nil
}
func (r *noEOFReader) Write(src []byte) (int, error) {
r.buffer = append(r.buffer, src...)
return len(src), nil
}
func TestPatchOverflow(t *testing.T) {
handler, _ := NewHandler(Config{
MaxSize: 100,
DataStore: overflowPatchStore{
t: t,
},
})
body := &noEOFReader{}
go func() {
body.Write([]byte("hellothisismorethan15bytes"))
body.Close()
}()
// Test too big body exceeding file size
req, _ := http.NewRequest("PATCH", "yes", body)
req.Header.Set("TUS-Resumable", "1.0.0")
req.Header.Set("Offset", "5")
req.Header.Set("Content-Length", "3")
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
t.Errorf("Expected %v (got %v)", http.StatusNoContent, w.Code)
}
}

View File

@ -1,72 +0,0 @@
#!/bin/bash
#
# This script demonstrates basic interaction with tusd form BASH/curl.
# Can also be used as a simple way to test, or extend to see how it
# responds to edge cases or learn the basic tech.
#
# Compatible with tus resumable upload protocol 0.1
# Constants
SERVICE="localhost:1080"
# Environment
set -e
__FILE__="$(test -L "${0}" && readlink "${0}" || echo "${0}")"
__DIR__="$(cd "$(dirname "${__FILE__}")"; echo $(pwd);)"
# POST requests the upload location
echo -ne "POST '${SERVICE}' \t\t\t\t\t\t\t"
location=$(curl -s \
--include \
--request POST \
--header 'Content-Range: bytes */26' \
${SERVICE}/files |awk -F': ' '/^Location/ {print $2}' |tr -d '\r')
# `tr -d '\r'` is required or location will have one in it ---^
echo "<-- Location: ${location}"
# PUT some data
echo -ne "PUT '${SERVICE}${location}' \t\t"
status=$(curl -s \
--include \
--request PUT \
--header 'Content-Length: 3' \
--header 'Content-Range: bytes 0-2/26' \
--data 'abc' \
${SERVICE}${location} |head -1 |tr -d '\r')
echo "<-- ${status}"
# check that data with HEAD
echo -ne "HEAD '${SERVICE}${location}' \t\t"
has_range=$(curl -s -I -X HEAD ${SERVICE}${location} |awk -F': ' '/^Range/ {print $2}' |tr -d '\r')
echo "<-- Range: ${has_range}"
# NB: getting partials is not supported and results in a
# CopyN of size %!s(int64=26) failed with: EOF
# should you try uncommenting this:
#echo -ne "GET '${SERVICE}${location}' \t\t"
#has_content=$(curl -s ${SERVICE}${location})
#echo "<-- ${has_content}"
# PUT some data
echo -ne "PUT '${SERVICE}${location}' \t\t"
status=$(curl -s \
--include \
--request PUT \
--header 'Content-Length: 3' \
--header 'Content-Range: bytes 23-25/26' \
--data 'xyz' \
${SERVICE}${location} |head -1 |tr -d '\r')
echo "<-- ${status}"
# check that data with HEAD
echo -ne "HEAD '${SERVICE}${location}' \t\t"
has_range=$(curl -s -I -X HEAD ${SERVICE}${location} |awk -F': ' '/^Range/ {print $2}' |tr -d '\r')
echo "<-- Range: ${has_range}"
# get that data with GET
echo -ne "GET '${SERVICE}${location}' \t\t"
has_content=$(curl -s ${SERVICE}${location})
echo "<-- ${has_content}"

View File

@ -1,100 +0,0 @@
package main
import (
tushttp "github.com/tus/tusd/src/http"
"log"
"net/http"
"os"
"path/filepath"
"strconv"
"time"
)
const basePath = "/files/"
func main() {
log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)
log.Printf("tusd started")
addr := ":1080"
if envPort := os.Getenv("TUSD_PORT"); envPort != "" {
addr = ":" + envPort
}
maxSize := int64(1024 * 1024 * 1024)
if envMaxSize := os.Getenv("TUSD_DATA_STORE_MAXSIZE"); envMaxSize != "" {
parsed, err := strconv.ParseInt(envMaxSize, 10, 64)
if err != nil {
panic("bad TUSD_DATA_STORE_MAXSIZE: " + err.Error())
}
maxSize = parsed
}
dir := os.Getenv("TUSD_DATA_DIR")
if dir == "" {
if workingDir, err := os.Getwd(); err != nil {
panic(err)
} else {
dir = filepath.Join(workingDir, "tus_data")
}
}
tusConfig := tushttp.HandlerConfig{
Dir: dir,
MaxSize: maxSize,
BasePath: basePath,
}
log.Printf("handler config: %+v", tusConfig)
tusHandler, err := tushttp.NewHandler(tusConfig)
if err != nil {
panic(err)
}
http.HandleFunc(basePath, func(w http.ResponseWriter, r *http.Request) {
// Allow CORS for almost everything. This needs to be revisted / limited to
// routes and methods that need it.
// Domains allowed to make requests
w.Header().Add("Access-Control-Allow-Origin", "*")
// Methods clients are allowed to use
w.Header().Add("Access-Control-Allow-Methods", "HEAD,GET,PUT,POST,PATCH,DELETE")
// Headers clients are allowed to send
w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, Content-Disposition, Final-Length, Offset")
// Headers clients are allowed to receive
w.Header().Add("Access-Control-Expose-Headers", "Location, Range, Content-Disposition, Offset")
if r.Method == "OPTIONS" {
return
}
tusHandler.ServeHTTP(w, r)
})
go handleUploads(tusHandler)
// On http package's default action, a broken http connection will cause io.Copy() stuck because it always suppose more data will coming and wait for them infinitely
// To prevent it happen, we should set a specific timeout value on http server
s := &http.Server{
Addr: addr,
Handler: nil,
ReadTimeout: 8 * time.Second,
WriteTimeout: 8 * time.Second,
MaxHeaderBytes: 0,
}
log.Printf("servering clients at http://localhost%s", addr)
if err := s.ListenAndServe(); err != nil {
panic(err)
}
}
func handleUploads(tus *tushttp.Handler) {
for {
select {
case err := <-tus.Error:
log.Printf("error: %s", err)
}
}
}

View File

@ -1,66 +0,0 @@
package http
import (
"sort"
)
// chunk holds the offsets for a partial piece of data
type chunk struct {
Start int64 `json:"start"`
End int64 `json:"end"`
}
// Size returns the number of bytes between Start and End.
func (c chunk) Size() int64 {
return c.End - c.Start + 1
}
// chunkSet holds a set of chunks and helps with adding/merging new chunks into
// set set.
type chunkSet []chunk
// Add merges a newChunk into a chunkSet. This may lead to the chunk being
// combined with one or more adjecent chunks, possibly shrinking the chunkSet
// down to a single member.
func (c *chunkSet) Add(newChunk chunk) {
if newChunk.Size() <= 0 {
return
}
*c = append(*c, newChunk)
sort.Sort(c)
// merge chunks that can be combined
for i := 0; i < len(*c)-1; i++ {
current := (*c)[i]
next := (*c)[i+1]
if current.End+1 < next.Start {
continue
}
*c = append((*c)[0:i], (*c)[i+1:]...)
if current.End > next.End {
(*c)[i].End = current.End
}
if current.Start < next.Start {
(*c)[i].Start = current.Start
}
i--
}
}
func (c chunkSet) Len() int {
return len(c)
}
func (c chunkSet) Less(i, j int) bool {
return c[i].Start < c[j].Start
}
func (c chunkSet) Swap(i, j int) {
c[i], c[j] = c[j], c[i]
}

View File

@ -1,99 +0,0 @@
package http
import (
"fmt"
"testing"
)
var chunkSet_AddTests = []struct {
Name string
Add []chunk
Expect []chunk
}{
{
Name: "add one",
Add: []chunk{{Start: 1, End: 5}},
Expect: []chunk{{Start: 1, End: 5}},
},
{
Name: "add twice",
Add: []chunk{{Start: 1, End: 5}, {Start: 1, End: 5}},
Expect: []chunk{{Start: 1, End: 5}},
},
{
Name: "append",
Add: []chunk{{Start: 1, End: 5}, {Start: 7, End: 10}},
Expect: []chunk{{Start: 1, End: 5}, {Start: 7, End: 10}},
},
{
Name: "insert",
Add: []chunk{{Start: 0, End: 5}, {Start: 12, End: 15}, {Start: 7, End: 10}},
Expect: []chunk{{Start: 0, End: 5}, {Start: 7, End: 10}, {Start: 12, End: 15}},
},
{
Name: "prepend",
Add: []chunk{{Start: 5, End: 10}, {Start: 1, End: 3}},
Expect: []chunk{{Start: 1, End: 3}, {Start: 5, End: 10}},
},
{
Name: "grow start",
Add: []chunk{{Start: 1, End: 5}, {Start: 0, End: 5}},
Expect: []chunk{{Start: 0, End: 5}},
},
{
Name: "grow end",
Add: []chunk{{Start: 1, End: 5}, {Start: 1, End: 6}},
Expect: []chunk{{Start: 1, End: 6}},
},
{
Name: "grow end with multiple items",
Add: []chunk{{Start: 1, End: 5}, {Start: 7, End: 10}, {Start: 8, End: 15}},
Expect: []chunk{{Start: 1, End: 5}, {Start: 7, End: 15}},
},
{
Name: "grow exact end match",
Add: []chunk{{Start: 1, End: 5}, {Start: 6, End: 6}},
Expect: []chunk{{Start: 1, End: 6}},
},
{
Name: "sink",
Add: []chunk{{Start: 1, End: 5}, {Start: 2, End: 3}},
Expect: []chunk{{Start: 1, End: 5}},
},
{
Name: "swallow",
Add: []chunk{{Start: 1, End: 5}, {Start: 6, End: 10}, {Start: 0, End: 11}},
Expect: []chunk{{Start: 0, End: 11}},
},
{
Name: "ignore 0 byte chunks",
Add: []chunk{{Start: 0, End: -1}},
Expect: []chunk{},
},
{
Name: "ignore invalid chunks",
Add: []chunk{{Start: 0, End: -2}},
Expect: []chunk{},
},
}
func Test_chunkSet_Add(t *testing.T) {
for _, test := range chunkSet_AddTests {
var chunks chunkSet
for _, chunk := range test.Add {
chunks.Add(chunk)
}
expected := fmt.Sprintf("%+v", test.Expect)
got := fmt.Sprintf("%+v", chunks)
if got != expected {
t.Errorf(
"Failed test '%s':\nexpected: %s\ngot: %s",
test.Name,
expected,
got,
)
}
}
}

View File

@ -1 +0,0 @@
package http

View File

@ -1,261 +0,0 @@
package http
import (
"encoding/json"
"errors"
"io"
"io/ioutil"
"log"
"os"
"path"
"sort"
"sync"
"time"
)
const defaultFilePerm = 0666
// @TODO should not be exported for now, the API isn't stable / done well
type dataStore struct {
dir string
maxSize int64
// infoLocksLock locks the infosLocks map
infoLocksLock *sync.Mutex
// infoLocks locks the .info files
infoLocks map[string]*sync.RWMutex
}
func newDataStore(dir string, maxSize int64) *dataStore {
store := &dataStore{
dir: dir,
maxSize: maxSize,
infoLocksLock: &sync.Mutex{},
infoLocks: make(map[string]*sync.RWMutex),
}
go store.gcLoop()
return store
}
// infoLock returns the lock for the .info file of the given file id.
func (s *dataStore) infoLock(id string) *sync.RWMutex {
s.infoLocksLock.Lock()
defer s.infoLocksLock.Unlock()
lock := s.infoLocks[id]
if lock == nil {
lock = &sync.RWMutex{}
s.infoLocks[id] = lock
}
return lock
}
func (s *dataStore) CreateFile(id string, finalLength int64, meta map[string]string) error {
file, err := os.OpenFile(s.filePath(id), os.O_CREATE|os.O_WRONLY, defaultFilePerm)
if err != nil {
return err
}
defer file.Close()
s.infoLock(id).Lock()
defer s.infoLock(id).Unlock()
return s.writeInfo(id, FileInfo{FinalLength: finalLength, Meta: meta})
}
func (s *dataStore) WriteFileChunk(id string, offset int64, src io.Reader) error {
file, err := os.OpenFile(s.filePath(id), os.O_WRONLY, defaultFilePerm)
if err != nil {
return err
}
defer file.Close()
if n, err := file.Seek(offset, os.SEEK_SET); err != nil {
return err
} else if n != offset {
return errors.New("WriteFileChunk: seek failure")
}
n, err := io.Copy(file, src)
if n > 0 {
if err := s.setOffset(id, offset+n); err != nil {
return err
}
}
return err
}
func (s *dataStore) ReadFile(id string) (io.ReadCloser, error) {
return os.Open(s.filePath(id))
}
func (s *dataStore) GetInfo(id string) (FileInfo, error) {
s.infoLock(id).RLock()
defer s.infoLock(id).RUnlock()
return s.getInfo(id)
}
// getInfo is the same as GetInfo, but does not apply any locks, requiring
// the caller to take care of this.
func (s *dataStore) getInfo(id string) (FileInfo, error) {
info := FileInfo{}
data, err := ioutil.ReadFile(s.infoPath(id))
if err != nil {
return info, err
}
err = json.Unmarshal(data, &info)
return info, err
}
func (s *dataStore) writeInfo(id string, info FileInfo) error {
data, err := json.Marshal(info)
if err != nil {
return err
}
return ioutil.WriteFile(s.infoPath(id), data, defaultFilePerm)
}
// setOffset updates the offset of a file, unless the current offset on disk is
// already greater.
func (s *dataStore) setOffset(id string, offset int64) error {
s.infoLock(id).Lock()
defer s.infoLock(id).Unlock()
info, err := s.getInfo(id)
if err != nil {
return err
}
// never decrement the offset
if info.Offset >= offset {
return nil
}
info.Offset = offset
return s.writeInfo(id, info)
}
func (s *dataStore) filePath(id string) string {
return path.Join(s.dir, id) + ".bin"
}
func (s *dataStore) infoPath(id string) string {
return path.Join(s.dir, id) + ".info"
}
// TODO: This works for now, but it would be better if we would trigger gc()
// manually whenever a storage operation will need more space, telling gc() how
// much space we need. If the amount of space required fits into the max, we
// can simply ignore the gc request, otherwise delete just as much as we need.
func (s *dataStore) gcLoop() {
for {
if before, after, err := s.gc(); err != nil {
log.Printf("dataStore: gc error: %s", err)
} else if before != after {
log.Printf("dataStore: gc before: %d, after: %d", before, after)
}
time.Sleep(1 * time.Second)
}
}
// BUG: gc could interfer with active uploads if storage pressure is high. To
// fix this we need a mechanism to detect this scenario and reject new storage
// ops if the current storage ops require all of the available dataStore size.
// gc shrinks the amount of bytes used by the dataStore to <= maxSize by
// deleting the oldest files according to their mtime.
func (s *dataStore) gc() (before int64, after int64, err error) {
dataDir, err := os.Open(s.dir)
if err != nil {
return
}
defer dataDir.Close()
stats, err := dataDir.Readdir(-1)
if err != nil {
return
}
sortableStats := sortableFiles(stats)
sort.Sort(sortableStats)
deleted := make(map[string]bool, len(sortableStats))
// Delete enough files so that we are <= maxSize
for _, stat := range sortableStats {
size := stat.Size()
before += size
if before <= s.maxSize {
after += size
continue
}
name := stat.Name()
fullPath := path.Join(s.dir, name)
if err = os.Remove(fullPath); err != nil {
return
}
deleted[fullPath] = true
}
// Make sure we did not delete a .info file but forgot the .bin or vice-versa.
for fullPath, _ := range deleted {
ext := path.Ext(fullPath)
base := fullPath[0 : len(fullPath)-len(ext)]
counterPath := ""
if ext == ".bin" {
counterPath = base + ".info"
} else if ext == ".info" {
counterPath = base + ".bin"
}
if counterPath == "" || deleted[counterPath] {
continue
}
stat, statErr := os.Stat(counterPath)
if statErr != nil {
if os.IsNotExist(statErr) {
continue
}
err = statErr
return
}
err = os.Remove(counterPath)
if err != nil {
return
}
after -= stat.Size()
}
return
}
type sortableFiles []os.FileInfo
func (s sortableFiles) Len() int {
return len(s)
}
func (s sortableFiles) Less(i, j int) bool {
return s[i].ModTime().After(s[j].ModTime())
}
func (s sortableFiles) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
type FileInfo struct {
Offset int64
FinalLength int64
Meta map[string]string
}

View File

@ -1,3 +0,0 @@
// Package http contains a client and server implementation of the tus protocol
// and is meant to be used by other applications.
package http

View File

@ -1,252 +0,0 @@
package http
import (
"errors"
"fmt"
"io"
"net/http"
"os"
"path"
"regexp"
"strconv"
"strings"
)
var fileUrlMatcher = regexp.MustCompile("^/([a-z0-9]{32})$")
// HandlerConfig holds the configuration for a tus Handler.
type HandlerConfig struct {
// Dir points to a filesystem path used by tus to store uploaded and partial
// files. Will be created if does not exist yet. Required.
Dir string
// MaxSize defines how many bytes may be stored inside Dir. Exceeding this
// limit will cause the oldest upload files to be deleted until enough space
// is available again. Required.
MaxSize int64
// BasePath defines the url path used for handling uploads, e.g. "/files/".
// Must contain a trailling "/". Requests not matching this base path will
// cause a 404, so make sure you dispatch only appropriate requests to the
// handler. Required.
BasePath string
}
// NewHandler returns an initialized Handler. An error may occur if the
// config.Dir is not writable.
func NewHandler(config HandlerConfig) (*Handler, error) {
// Ensure the data store directory exists
if err := os.MkdirAll(config.Dir, 0777); err != nil {
return nil, err
}
errChan := make(chan error)
return &Handler{
store: newDataStore(config.Dir, config.MaxSize),
config: config,
Error: errChan,
sendError: errChan,
}, nil
}
// Handler is a http.Handler that implements tus resumable upload protocol.
type Handler struct {
store *dataStore
config HandlerConfig
// Error provides error events for logging purposes.
Error <-chan error
// same chan as Error, used for sending.
sendError chan<- error
}
// ServeHTTP processes an incoming request according to the tus protocol.
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Verify that url matches BasePath
absPath := r.URL.Path
if !strings.HasPrefix(absPath, h.config.BasePath) {
err := errors.New("unknown url: " + absPath + " - does not match BasePath: " + h.config.BasePath)
h.err(err, w, http.StatusNotFound)
return
}
// example relPath results: "/", "/f81d4fae7dec11d0a76500a0c91e6bf6", etc.
relPath := absPath[len(h.config.BasePath)-1:]
// file creation request
if relPath == "/" {
if r.Method == "POST" {
h.createFile(w, r)
return
}
// handle invalid method
w.Header().Set("Allow", "POST")
err := errors.New(r.Method + " used against file creation url. Only POST is allowed.")
h.err(err, w, http.StatusMethodNotAllowed)
return
}
if matches := fileUrlMatcher.FindStringSubmatch(relPath); matches != nil {
id := matches[1]
if r.Method == "PATCH" {
h.patchFile(w, r, id)
return
} else if r.Method == "HEAD" {
h.headFile(w, r, id)
return
} else if r.Method == "GET" {
h.getFile(w, r, id)
return
}
// handle invalid method
allowed := "HEAD,PATCH"
w.Header().Set("Allow", allowed)
err := errors.New(r.Method + " used against file creation url. Allowed: " + allowed)
h.err(err, w, http.StatusMethodNotAllowed)
return
}
// handle unknown url
err := errors.New("unknown url: " + absPath + " - does not match file pattern")
h.err(err, w, http.StatusNotFound)
}
func (h *Handler) createFile(w http.ResponseWriter, r *http.Request) {
id := uid()
finalLength, err := getPositiveIntHeader(r, "Final-Length")
if err != nil {
h.err(err, w, http.StatusBadRequest)
return
}
// @TODO: Define meta data extension and implement it here
// @TODO: Make max finalLength configurable, reply with error if exceeded.
// This should go into the protocol as well.
if err := h.store.CreateFile(id, finalLength, nil); err != nil {
h.err(err, w, http.StatusInternalServerError)
return
}
w.Header().Set("Location", h.absUrl(r, "/"+id))
w.WriteHeader(http.StatusCreated)
}
func (h *Handler) patchFile(w http.ResponseWriter, r *http.Request, id string) {
offset, err := getPositiveIntHeader(r, "Offset")
if err != nil {
h.err(err, w, http.StatusBadRequest)
return
}
info, err := h.store.GetInfo(id)
if err != nil {
h.err(err, w, http.StatusInternalServerError)
return
}
if offset > info.Offset {
err = fmt.Errorf("Offset: %d exceeds current offset: %d", offset, info.Offset)
h.err(err, w, http.StatusForbidden)
return
}
// @TODO Test offset < current offset
err = h.store.WriteFileChunk(id, offset, r.Body)
if err != nil {
// @TODO handle 404 properly (goes for all h.err calls)
h.err(err, w, http.StatusInternalServerError)
return
}
}
func (h *Handler) headFile(w http.ResponseWriter, r *http.Request, id string) {
info, err := h.store.GetInfo(id)
if err != nil {
w.Header().Set("Content-Length", "0")
w.WriteHeader(http.StatusNotFound)
return
}
w.Header().Set("Offset", fmt.Sprintf("%d", info.Offset))
}
// GET requests on files aren't part of the protocol yet,
// but it is implemented here anyway for the demo. It still lacks the meta data
// extension in order to send the proper content type header.
func (h *Handler) getFile(w http.ResponseWriter, r *http.Request, fileId string) {
info, err := h.store.GetInfo(fileId)
if os.IsNotExist(err) {
h.err(err, w, http.StatusNotFound)
return
}
if err != nil {
h.err(err, w, http.StatusInternalServerError)
return
}
data, err := h.store.ReadFile(fileId)
if os.IsNotExist(err) {
h.err(err, w, http.StatusNotFound)
return
}
if err != nil {
h.err(err, w, http.StatusInternalServerError)
return
}
defer data.Close()
w.Header().Set("Offset", strconv.FormatInt(info.Offset, 10))
// @TODO: Once the meta extension is done, send the proper content type here
//w.Header().Set("Content-Type", info.Meta.ContentType)
w.Header().Set("Content-Length", strconv.FormatInt(info.FinalLength, 10))
if _, err := io.CopyN(w, data, info.FinalLength); err != nil {
return
}
}
func getPositiveIntHeader(r *http.Request, key string) (int64, error) {
val := r.Header.Get(key)
if val == "" {
return 0, errors.New(key + " header must not be empty")
}
intVal, err := strconv.ParseInt(val, 10, 64)
if err != nil {
return 0, errors.New("invalid " + key + " header: " + err.Error())
} else if intVal < 0 {
return 0, errors.New(key + " header must be > 0")
}
return intVal, nil
}
// absUrl turn a relPath (e.g. "/foo") into an absolute url (e.g.
// "http://example.com/foo").
//
// @TODO: Look at r.TLS to determine the url scheme.
// @TODO: Make url prefix user configurable (optional) to deal with reverse
// proxies. This could be done by turning BasePath into BaseURL that
// that could be relative or absolute.
func (h *Handler) absUrl(r *http.Request, relPath string) string {
return "http://" + r.Host + path.Clean(h.config.BasePath+relPath)
}
// err sends a http error response and publishes to the Error channel.
func (h *Handler) err(err error, w http.ResponseWriter, status int) {
w.WriteHeader(status)
io.WriteString(w, err.Error()+"\n")
// non-blocking send
select {
case h.sendError <- err:
default:
}
}

View File

@ -1,353 +0,0 @@
// handler_test.go focuses on functional tests that verify that the Handler
// implements the tus protocol correctly.
package http
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"regexp"
"strings"
"testing"
)
const basePath = "/files/"
func Setup() *TestSetup {
dir, err := ioutil.TempDir("", "tus_handler_test")
if err != nil {
panic(err)
}
config := HandlerConfig{
Dir: dir,
MaxSize: 1024 * 1024,
BasePath: basePath,
}
handler, err := NewHandler(config)
if err != nil {
panic(err)
}
server := httptest.NewServer(handler)
return &TestSetup{
Handler: handler,
Server: server,
}
}
type TestSetup struct {
Handler *Handler
Server *httptest.Server
}
func (s *TestSetup) Teardown() {
s.Server.Close()
if err := os.RemoveAll(s.Handler.config.Dir); err != nil {
panic(err)
}
}
var Protocol_FileCreation_Tests = []struct {
Description string
*TestRequest
}{
{
Description: "Bad method",
TestRequest: &TestRequest{
Method: "PUT",
ExpectStatusCode: http.StatusMethodNotAllowed,
ExpectHeaders: map[string]string{"Allow": "POST"},
},
},
{
Description: "Missing Final-Length header",
TestRequest: &TestRequest{
ExpectStatusCode: http.StatusBadRequest,
},
},
{
Description: "Invalid Final-Length header",
TestRequest: &TestRequest{
Headers: map[string]string{"Final-Length": "fuck"},
ExpectStatusCode: http.StatusBadRequest,
},
},
{
Description: "Negative Final-Length header",
TestRequest: &TestRequest{
Headers: map[string]string{"Final-Length": "-10"},
ExpectStatusCode: http.StatusBadRequest,
},
},
{
Description: "Valid Request",
TestRequest: &TestRequest{
Headers: map[string]string{"Final-Length": "1024"},
ExpectStatusCode: http.StatusCreated,
MatchHeaders: map[string]*regexp.Regexp{
"Location": regexp.MustCompile("^http://.+" + regexp.QuoteMeta(basePath) + "[a-z0-9]{32}$"),
},
},
},
}
func TestProtocol_FileCreation(t *testing.T) {
setup := Setup()
defer setup.Teardown()
for _, test := range Protocol_FileCreation_Tests {
t.Logf("test: %s", test.Description)
test.Url = setup.Server.URL + setup.Handler.config.BasePath
if test.Method == "" {
test.Method = "POST"
}
if err := test.Do(); err != nil {
t.Error(err)
continue
}
}
}
var Protocol_Core_Tests = []struct {
Description string
FinalLength int64
Requests []TestRequest
ExpectFileContent string
}{
{
Description: "Bad method",
FinalLength: 1024,
Requests: []TestRequest{
{
Method: "PUT",
ExpectStatusCode: http.StatusMethodNotAllowed,
ExpectHeaders: map[string]string{"Allow": "HEAD,PATCH"},
},
},
},
{
Description: "Missing Offset header",
FinalLength: 5,
Requests: []TestRequest{
{Method: "PATCH", Body: "hello", ExpectStatusCode: http.StatusBadRequest},
},
},
{
Description: "Negative Offset header",
FinalLength: 5,
Requests: []TestRequest{
{
Method: "PATCH",
Headers: map[string]string{"Offset": "-10"},
Body: "hello",
ExpectStatusCode: http.StatusBadRequest,
},
},
},
{
Description: "Invalid Offset header",
FinalLength: 5,
Requests: []TestRequest{
{
Method: "PATCH",
Headers: map[string]string{"Offset": "lalala"},
Body: "hello",
ExpectStatusCode: http.StatusBadRequest,
},
},
},
{
Description: "Single PATCH Upload",
FinalLength: 5,
ExpectFileContent: "hello",
Requests: []TestRequest{
{
Method: "PATCH",
Headers: map[string]string{"Offset": "0"},
Body: "hello",
ExpectStatusCode: http.StatusOK,
},
},
},
{
Description: "Simple Resume",
FinalLength: 11,
ExpectFileContent: "hello world",
Requests: []TestRequest{
{
Method: "PATCH",
Headers: map[string]string{"Offset": "0"},
Body: "hello",
ExpectStatusCode: http.StatusOK,
},
{
Method: "HEAD",
ExpectStatusCode: http.StatusOK,
ExpectHeaders: map[string]string{"Offset": "5"},
},
{
Method: "PATCH",
Headers: map[string]string{"Offset": "5"},
Body: " world",
ExpectStatusCode: http.StatusOK,
},
},
},
{
Description: "Overlapping Resume",
FinalLength: 11,
ExpectFileContent: "hello world",
Requests: []TestRequest{
{
Method: "PATCH",
Headers: map[string]string{"Offset": "0"},
Body: "hello wo",
ExpectStatusCode: http.StatusOK,
},
{
Method: "HEAD",
ExpectStatusCode: http.StatusOK,
ExpectHeaders: map[string]string{"Offset": "8"},
},
{
Method: "PATCH",
Headers: map[string]string{"Offset": "5"},
Body: " world",
ExpectStatusCode: http.StatusOK,
},
},
},
{
Description: "Offset exceeded",
FinalLength: 5,
Requests: []TestRequest{
{
Method: "PATCH",
Headers: map[string]string{"Offset": "1"},
// Not sure if this is the right status to use. Once the parallel
// chunks protocol spec is done, we can use NotImplemented as a
// status until we implement support for this.
ExpectStatusCode: http.StatusForbidden,
},
},
},
}
func TestProtocol_Core(t *testing.T) {
setup := Setup()
defer setup.Teardown()
Tests:
for _, test := range Protocol_Core_Tests {
t.Logf("test: %s", test.Description)
location := createFile(setup, test.FinalLength)
for i, request := range test.Requests {
t.Logf("- request #%d: %s", i+1, request.Method)
request.Url = location
if err := request.Do(); err != nil {
t.Error(err)
continue Tests
}
}
if test.ExpectFileContent != "" {
id := regexp.MustCompile("[a-z0-9]{32}$").FindString(location)
reader, err := setup.Handler.store.ReadFile(id)
if err != nil {
t.Error(err)
continue Tests
}
content, err := ioutil.ReadAll(reader)
if err != nil {
t.Error(err)
continue Tests
}
if string(content) != test.ExpectFileContent {
t.Errorf("expected content: %s, got: %s", test.ExpectFileContent, content)
continue Tests
}
}
}
}
// TestRequest is a test helper that performs and validates requests according
// to the struct fields below.
type TestRequest struct {
Method string
Url string
Headers map[string]string
ExpectStatusCode int
ExpectHeaders map[string]string
MatchHeaders map[string]*regexp.Regexp
Response *http.Response
Body string
}
func (r *TestRequest) Do() error {
req, err := http.NewRequest(r.Method, r.Url, strings.NewReader(r.Body))
if err != nil {
return err
}
for key, val := range r.Headers {
req.Header.Set(key, val)
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != r.ExpectStatusCode {
return fmt.Errorf("unexpected status code: %d, expected: %d", res.StatusCode, r.ExpectStatusCode)
}
for key, val := range r.ExpectHeaders {
if got := res.Header.Get(key); got != val {
return fmt.Errorf("expected \"%s: %s\" header, but got: \"%s: %s\"", key, val, key, got)
}
}
for key, matcher := range r.MatchHeaders {
got := res.Header.Get(key)
if !matcher.MatchString(got) {
return fmt.Errorf("expected %s header to match: %s but got: %s", key, matcher.String(), got)
}
}
r.Response = res
return nil
}
// createFile is a test helper that creates a new file and returns the url.
func createFile(setup *TestSetup, finalLength int64) (url string) {
req := TestRequest{
Method: "POST",
Url: setup.Server.URL + setup.Handler.config.BasePath,
Headers: map[string]string{"Final-Length": fmt.Sprintf("%d", finalLength)},
ExpectStatusCode: http.StatusCreated,
}
if err := req.Do(); err != nil {
panic(err)
}
location := req.Response.Header.Get("Location")
if location == "" {
panic("empty Location header")
}
return location
}

View File

@ -1,21 +0,0 @@
package http
import (
"encoding/hex"
"fmt"
"testing"
)
func BenchmarkFmtString(b *testing.B) {
id := []byte("1234567891234567")
for i := 0; i < b.N; i++ {
fmt.Sprintf("%x", id)
}
}
func BenchmarkHexString(b *testing.B) {
id := []byte("1234567891234567")
for i := 0; i < b.N; i++ {
hex.EncodeToString(id)
}
}

29
tusd/main.go Normal file
View File

@ -0,0 +1,29 @@
package main
import (
"github.com/tus/tusd"
"github.com/tus/tusd/filestore"
"net/http"
)
func main() {
store := filestore.FileStore{
Path: "./data/",
}
handler, err := tusd.NewHandler(tusd.Config{
MaxSize: 1024 * 1024 * 1024,
BasePath: "files/",
DataStore: store,
})
if err != nil {
panic(err)
}
http.Handle("/files/", http.StripPrefix("/files/", handler))
err = http.ListenAndServe(":1080", nil)
if err != nil {
panic(err)
}
}

View File

@ -1,4 +1,4 @@
package http
package uid
import (
"crypto/rand"
@ -11,7 +11,7 @@ import (
// without the dashes and significant bits.
//
// See: http://en.wikipedia.org/wiki/UUID#Random_UUID_probability_of_duplicates
func uid() string {
func Uid() string {
id := make([]byte, 16)
_, err := io.ReadFull(rand.Reader, id)
if err != nil {
@ -19,6 +19,5 @@ func uid() string {
// for random bits.
panic(err)
}
return hex.EncodeToString(id)
}