Correct linting issue and misspellings
This commit is contained in:
parent
055406a783
commit
513fe9fcf3
|
@ -35,7 +35,7 @@ type Config struct {
|
||||||
Logger *log.Logger
|
Logger *log.Logger
|
||||||
// Respect the X-Forwarded-Host, X-Forwarded-Proto and Forwarded headers
|
// Respect the X-Forwarded-Host, X-Forwarded-Proto and Forwarded headers
|
||||||
// potentially set by proxies when generating an absolute URL in the
|
// potentially set by proxies when generating an absolute URL in the
|
||||||
// reponse to POST requests.
|
// response to POST requests.
|
||||||
RespectForwardedHeaders bool
|
RespectForwardedHeaders bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -69,7 +69,7 @@ type FinisherDataStore interface {
|
||||||
// Common ways to store this information is in memory, on disk or using an
|
// Common ways to store this information is in memory, on disk or using an
|
||||||
// external service, such as ZooKeeper.
|
// external service, such as ZooKeeper.
|
||||||
// When multiple processes are attempting to access an upload, whether it be
|
// When multiple processes are attempting to access an upload, whether it be
|
||||||
// by reading or writing, a syncronization mechanism is required to prevent
|
// by reading or writing, a synchronization mechanism is required to prevent
|
||||||
// data corruption, especially to ensure correct offset values and the proper
|
// data corruption, especially to ensure correct offset values and the proper
|
||||||
// order of chunks inside a single upload.
|
// order of chunks inside a single upload.
|
||||||
type LockerDataStore interface {
|
type LockerDataStore interface {
|
||||||
|
|
|
@ -7,9 +7,9 @@
|
||||||
// No cleanup is performed so you may want to run a cronjob to ensure your disk
|
// No cleanup is performed so you may want to run a cronjob to ensure your disk
|
||||||
// is not filled up with old and finished uploads.
|
// is not filled up with old and finished uploads.
|
||||||
//
|
//
|
||||||
// In addition, it provides an exclusive upload locking mechansim using lock files
|
// In addition, it provides an exclusive upload locking mechanism using lock files
|
||||||
// which are stored on disk. Each of them stores the PID of the process which
|
// which are stored on disk. Each of them stores the PID of the process which
|
||||||
// aquired the lock. This allows locks to be automatically freed when a process
|
// acquired the lock. This allows locks to be automatically freed when a process
|
||||||
// is unable to release it on its own because the process is not alive anymore.
|
// is unable to release it on its own because the process is not alive anymore.
|
||||||
// For more information, consult the documentation for tusd.LockerDataStore
|
// For more information, consult the documentation for tusd.LockerDataStore
|
||||||
// interface, which is implemented by FileStore
|
// interface, which is implemented by FileStore
|
||||||
|
@ -161,7 +161,7 @@ func (store FileStore) UnlockUpload(id string) error {
|
||||||
|
|
||||||
// A "no such file or directory" will be returned if no lockfile was found.
|
// A "no such file or directory" will be returned if no lockfile was found.
|
||||||
// Since this means that the file has never been locked, we drop the error
|
// Since this means that the file has never been locked, we drop the error
|
||||||
// and continue as if nothing happend.
|
// and continue as if nothing happened.
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
// Package memorylocker provides an in-memory locking mechanism.
|
// Package memorylocker provides an in-memory locking mechanism.
|
||||||
//
|
//
|
||||||
// When multiple processes are attempting to access an upload, whether it be
|
// When multiple processes are attempting to access an upload, whether it be
|
||||||
// by reading or writing, a syncronization mechanism is required to prevent
|
// by reading or writing, a synchronization mechanism is required to prevent
|
||||||
// data corruption, especially to ensure correct offset values and the proper
|
// data corruption, especially to ensure correct offset values and the proper
|
||||||
// order of chunks inside a single upload.
|
// order of chunks inside a single upload.
|
||||||
//
|
//
|
||||||
// MemoryLocker persists locks using memory and therefore allowing a simple and
|
// MemoryLocker persists locks using memory and therefore allowing a simple and
|
||||||
// cheap mechansim. Locks will only exist as long as this object is kept in
|
// cheap mechanism. Locks will only exist as long as this object is kept in
|
||||||
// reference and will be erased if the program exits.
|
// reference and will be erased if the program exits.
|
||||||
package memorylocker
|
package memorylocker
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// MemoryLocker persists locks using memory and therefore allowing a simple and
|
// MemoryLocker persists locks using memory and therefore allowing a simple and
|
||||||
// cheap mechansim. Locks will only exist as long as this object is kept in
|
// cheap mechanism. Locks will only exist as long as this object is kept in
|
||||||
// reference and will be erased if the program exits.
|
// reference and will be erased if the program exits.
|
||||||
type MemoryLocker struct {
|
type MemoryLocker struct {
|
||||||
locks map[string]bool
|
locks map[string]bool
|
||||||
|
|
|
@ -80,7 +80,7 @@ func newMetrics() Metrics {
|
||||||
func newErrorsTotalMap() map[string]*uint64 {
|
func newErrorsTotalMap() map[string]*uint64 {
|
||||||
m := make(map[string]*uint64, len(ErrStatusCodes)+1)
|
m := make(map[string]*uint64, len(ErrStatusCodes)+1)
|
||||||
|
|
||||||
for err, _ := range ErrStatusCodes {
|
for err := range ErrStatusCodes {
|
||||||
m[err.Error()] = new(uint64)
|
m[err.Error()] = new(uint64)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func newMultiError(errs []error) error {
|
func newMultiError(errs []error) error {
|
||||||
message := "Multiple errors occured:\n"
|
message := "Multiple errors occurred:\n"
|
||||||
for _, err := range errs {
|
for _, err := range errs {
|
||||||
message += "\t" + err.Error() + "\n"
|
message += "\t" + err.Error() + "\n"
|
||||||
}
|
}
|
||||||
|
|
|
@ -272,7 +272,7 @@ func (store S3Store) WriteChunk(id string, offset int64, src io.Reader) (int64,
|
||||||
func (store S3Store) GetInfo(id string) (info tusd.FileInfo, err error) {
|
func (store S3Store) GetInfo(id string) (info tusd.FileInfo, err error) {
|
||||||
uploadId, multipartId := splitIds(id)
|
uploadId, multipartId := splitIds(id)
|
||||||
|
|
||||||
// Get file info stored in seperate object
|
// Get file info stored in separate object
|
||||||
res, err := store.Service.GetObject(&s3.GetObjectInput{
|
res, err := store.Service.GetObject(&s3.GetObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: aws.String(uploadId + ".info"),
|
Key: aws.String(uploadId + ".info"),
|
||||||
|
@ -335,7 +335,7 @@ func (store S3Store) GetReader(id string) (io.Reader, error) {
|
||||||
Key: aws.String(uploadId),
|
Key: aws.String(uploadId),
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// No error occured, and we are able to stream the object
|
// No error occurred, and we are able to stream the object
|
||||||
return res.Body, nil
|
return res.Body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -538,7 +538,7 @@ func TestTerminateWithErrors(t *testing.T) {
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
err := store.Terminate("uploadId+multipartId")
|
err := store.Terminate("uploadId+multipartId")
|
||||||
assert.Equal("Multiple errors occured:\n\tAWS S3 Error (hello) for object uploadId: it's me.\n", err.Error())
|
assert.Equal("Multiple errors occurred:\n\tAWS S3 Error (hello) for object uploadId: it's me.\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConcatUploads(t *testing.T) {
|
func TestConcatUploads(t *testing.T) {
|
||||||
|
|
|
@ -15,7 +15,7 @@ func Uid() string {
|
||||||
id := make([]byte, 16)
|
id := make([]byte, 16)
|
||||||
_, err := io.ReadFull(rand.Reader, id)
|
_, err := io.ReadFull(rand.Reader, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This is probably an appropiate way to handle errors from our source
|
// This is probably an appropriate way to handle errors from our source
|
||||||
// for random bits.
|
// for random bits.
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue