refactor: add performance tracking and logging

This commit is contained in:
Derrick Hammer 2024-02-28 12:13:26 -05:00
parent b030de9714
commit 5f0e2d2e15
Signed by: pcfreak30
GPG Key ID: C997C339BE476FF2
1 changed files with 16 additions and 1 deletions

View File

@ -9,6 +9,7 @@ import (
"math"
"net/http"
"sort"
"time"
"git.lumeweb.com/LumeWeb/portal/db/models"
@ -356,6 +357,10 @@ func (s StorageServiceDefault) S3MultipartUpload(ctx context.Context, data io.Re
s3Upload.Bucket = bucket
s3Upload.Key = key
startTime := time.Now()
var totalUploadDuration time.Duration
var currentAverageDuration time.Duration
ret := s.db.Model(&s3Upload).First(&s3Upload)
if ret.Error != nil {
if !errors.Is(ret.Error, gorm.ErrRecordNotFound) {
@ -415,6 +420,7 @@ func (s StorageServiceDefault) S3MultipartUpload(ctx context.Context, data io.Re
if partNum <= int(lastPartNumber) {
continue
}
partStartTime := time.Now()
uploadPartOutput, err := client.UploadPart(ctx, &s3.UploadPartInput{
Bucket: aws.String(bucket),
@ -441,7 +447,13 @@ func (s StorageServiceDefault) S3MultipartUpload(ctx context.Context, data io.Re
PartNumber: aws.Int32(int32(partNum)),
})
s.logger.Debug("Completed part", zap.Int("partNum", partNum), zap.Int("totalParts", totalParts), zap.Uint64("partSize", partSize), zap.Int("readSize", readSize), zap.Int("size", int(size)), zap.Int("totalParts", totalParts), zap.Int("partNum", partNum), zap.String("key", key), zap.String("bucket", bucket))
partDuration := time.Since(partStartTime)
totalUploadDuration += partDuration
currentAverageDuration = totalUploadDuration / time.Duration(partNum)
s.logger.Debug("Completed part", zap.Int("partNum", partNum), zap.Int("totalParts", totalParts), zap.Uint64("partSize", partSize), zap.Int("readSize", readSize), zap.Int("size", int(size)), zap.Int("totalParts", totalParts), zap.Int("partNum", partNum), zap.String("key", key), zap.String("bucket", bucket), zap.Duration("durationMs", partDuration),
zap.Duration("currentAverageDurationMs", currentAverageDuration))
}
// Ensure parts are ordered by part number before completing the upload
@ -465,6 +477,9 @@ func (s StorageServiceDefault) S3MultipartUpload(ctx context.Context, data io.Re
return tx.Error
}
endTime := time.Now()
s.logger.Debug("S3 multipart upload complete", zap.String("key", key), zap.String("bucket", bucket), zap.Duration("duration", endTime.Sub(startTime)))
return nil
}