store only chunk, record file

This commit is contained in:
celogeek 2022-03-04 16:59:26 +01:00
parent 8a8a6e50b4
commit 5621e59e42
Signed by: celogeek
GPG Key ID: E6B7BDCFC446233A
8 changed files with 133 additions and 121 deletions

View File

@ -16,6 +16,7 @@ func (s *Service) Migrate() {
tx.AutoMigrate(&models.Account{}) tx.AutoMigrate(&models.Account{})
tx.AutoMigrate(&models.Session{}) tx.AutoMigrate(&models.Session{})
tx.AutoMigrate(&models.File{}) tx.AutoMigrate(&models.File{})
tx.AutoMigrate(&models.FileChunk{})
} }
func (s *Service) DBConfig() { func (s *Service) DBConfig() {

View File

@ -5,8 +5,15 @@ import (
) )
func (s *Service) Error(c *gin.Context, code int, err error) { func (s *Service) Error(c *gin.Context, code int, err error) {
var status string
if code >= 200 && code < 400 {
status = "success"
} else {
status = "failed"
}
c.AbortWithStatusJSON(code, gin.H{ c.AbortWithStatusJSON(code, gin.H{
"status": "failed", "status": status,
"error": err.Error(), "error": err.Error(),
}) })
} }

View File

@ -2,20 +2,23 @@ package api
import ( import (
"bytes" "bytes"
"fmt" "errors"
"io" "io"
"net/http" "net/http"
"path/filepath" "strings"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"gitlab.celogeek.com/photos/api/internal/photos/models"
"gitlab.celogeek.com/photos/api/internal/photoserrors" "gitlab.celogeek.com/photos/api/internal/photoserrors"
"gorm.io/gorm"
) )
var CHUNK_SIZE int64 = 1 << 20 var CHUNK_SIZE int64 = 1 << 20
type File struct { type File struct {
Sum string `json:"sum"` Name string `json:"name"`
Chunks []string `json:"chunks"` Checksum string `json:"checksum"`
Chunks []string `json:"chunks"`
} }
func (s *Service) FileCreate(c *gin.Context) { func (s *Service) FileCreate(c *gin.Context) {
@ -25,7 +28,12 @@ func (s *Service) FileCreate(c *gin.Context) {
return return
} }
if len(file.Sum) != 40 { if len(file.Name) < 1 {
s.Error(c, http.StatusBadRequest, photoserrors.ErrStoreMissingName)
return
}
if len(file.Checksum) != 40 {
s.Error(c, http.StatusBadRequest, photoserrors.ErrStoreBadChecksum) s.Error(c, http.StatusBadRequest, photoserrors.ErrStoreBadChecksum)
return return
} }
@ -41,48 +49,61 @@ func (s *Service) FileCreate(c *gin.Context) {
} }
} }
r, rs, err := s.Store.CombineTemp(file.Sum, file.Chunks) r, rs, err := s.Store.Combine(file.Chunks)
if err != nil { if err != nil {
s.Error(c, http.StatusInternalServerError, err) if strings.HasPrefix(err.Error(), "chunk") && strings.HasSuffix(err.Error(), "doesn't exists") {
s.Error(c, http.StatusBadRequest, err)
} else {
s.Error(c, http.StatusInternalServerError, err)
}
return return
} }
if r != file.Sum { if r != file.Checksum {
fmt.Printf("R=%s, O=%s\n", r, file.Sum)
s.Error(c, http.StatusExpectationFailed, photoserrors.ErrStoreMismatchChecksum) s.Error(c, http.StatusExpectationFailed, photoserrors.ErrStoreMismatchChecksum)
return return
} }
if err = s.Store.CommitTemp(file.Sum, file.Chunks); err != nil { sess := s.CurrentSession(c)
f := &models.File{
Name: file.Name,
Checksum: file.Checksum,
Size: rs,
AuthorId: &sess.AccountId,
}
err = s.DB.Transaction(func(tx *gorm.DB) error {
if err := tx.Create(f).Error; err != nil {
return err
}
for i, chunk := range file.Chunks {
fc := &models.FileChunk{
FileId: f.ID,
Part: uint32(i + 1),
Checksum: chunk,
}
if err := tx.Create(fc).Error; err != nil {
return err
}
}
return nil
})
if err != nil {
s.Error(c, http.StatusInternalServerError, err) s.Error(c, http.StatusInternalServerError, err)
return return
} }
c.JSON(http.StatusOK, gin.H{ c.JSON(http.StatusOK, gin.H{
"status": "success", "status": "success",
"sum": file.Sum, "sum": file.Checksum,
"nbChunks": len(file.Chunks), "nbChunks": len(file.Chunks),
"size": rs, "size": rs,
}) })
} }
func (s *Service) FileCreateTemp(c *gin.Context) { func (s *Service) FileCreateChunk(c *gin.Context) {
var (
origsum = c.Param("origsum")
sumb = c.Param("sum")
)
if len(origsum) != 40 || len(sumb) != 40 {
s.Error(c, http.StatusBadRequest, photoserrors.ErrStoreBadChecksum)
return
}
originaldir := s.Store.Dir("original", origsum)
originalname := filepath.Join(originaldir, origsum)
if s.Store.FileExists(originalname) {
s.Error(c, http.StatusBadRequest, photoserrors.ErrStoreAlreadyExists)
return
}
if c.Request.ContentLength > CHUNK_SIZE { if c.Request.ContentLength > CHUNK_SIZE {
s.Error(c, http.StatusBadRequest, photoserrors.ErrStoreBadChunkSize) s.Error(c, http.StatusBadRequest, photoserrors.ErrStoreBadChunkSize)
return return
@ -92,8 +113,12 @@ func (s *Service) FileCreateTemp(c *gin.Context) {
io.Copy(b, c.Request.Body) io.Copy(b, c.Request.Body)
c.Request.Body.Close() c.Request.Body.Close()
if err := s.Store.SaveTemp(origsum, sumb, b.Bytes()); err != nil { if err := s.Store.NewChunk(b.Bytes()).Save(); err != nil {
s.Error(c, http.StatusBadRequest, err) if errors.Is(err, photoserrors.ErrStoreChunkAlreadyExists) {
s.Error(c, http.StatusOK, err)
} else {
s.Error(c, http.StatusBadRequest, err)
}
return return
} }

View File

@ -60,7 +60,7 @@ func (s *Service) SetupRoutes() {
album := s.Gin.Group("/file") album := s.Gin.Group("/file")
album.Use(s.RequireSession) album.Use(s.RequireSession)
album.POST("/", s.FileCreate) album.POST("/", s.FileCreate)
album.POST("/tmp/:origsum/:sum", s.FileCreateTemp) album.POST("/chunk", s.FileCreateChunk)
s.Gin.NoRoute(func(c *gin.Context) { s.Gin.NoRoute(func(c *gin.Context) {
s.Error(c, http.StatusNotFound, photoserrors.ErrReqNotFound) s.Error(c, http.StatusNotFound, photoserrors.ErrReqNotFound)
@ -75,10 +75,8 @@ func (s *Service) PrepareStore() {
if !d.IsDir() { if !d.IsDir() {
s.LogErr.Fatal("Store", photoserrors.ErrStorePathNotADirectory) s.LogErr.Fatal("Store", photoserrors.ErrStorePathNotADirectory)
} }
if err := s.Store.MkDirs([]string{"tmp", "original"}); err != nil {
s.LogErr.Fatal("Store", err)
}
} }
func (s *Service) Run() error { func (s *Service) Run() error {
rand.Seed(time.Now().UnixNano()) rand.Seed(time.Now().UnixNano())
s.PrepareStore() s.PrepareStore()

View File

@ -6,6 +6,7 @@ type File struct {
ID uint32 `gorm:"primary_key" json:"id"` ID uint32 `gorm:"primary_key" json:"id"`
Name string `gorm:"not null" json:"name"` Name string `gorm:"not null" json:"name"`
Checksum string `gorm:"unique;size:44;not null"` Checksum string `gorm:"unique;size:44;not null"`
Size uint64 `gorm:"not null"`
Author *Account `gorm:"constraint:OnDelete:SET NULL,OnUpdate:CASCADE" json:"author"` Author *Account `gorm:"constraint:OnDelete:SET NULL,OnUpdate:CASCADE" json:"author"`
AuthorId *uint32 `json:"-"` AuthorId *uint32 `json:"-"`
CreatedAt time.Time `json:"created_at"` CreatedAt time.Time `json:"created_at"`

View File

@ -0,0 +1,12 @@
package models
import "time"
type FileChunk struct {
FileId uint32
File *File `gorm:"constraint:OnDelete:CASCADE,OnUpdate:CASCADE"`
Part uint32
Checksum string `gorm:"unique;size:44;not null"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}

View File

@ -4,11 +4,13 @@ import "errors"
var ( var (
// Store // Store
ErrStorePathNotADirectory = errors.New("store path is not a directory") ErrStorePathNotADirectory = errors.New("store path is not a directory")
ErrStoreBadChecksum = errors.New("checksum should be sha1 in hex format") ErrStoreBadChecksum = errors.New("checksum should be sha1 in hex format")
ErrStoreBadChunkSize = errors.New("part file size should be 1MB max") ErrStoreBadChunkSize = errors.New("part file size should be 1MB max")
ErrStoreMissingChunks = errors.New("part checksum missing") ErrStoreMissingChunks = errors.New("part checksum missing")
ErrStoreWrongChecksum = errors.New("wrong checksum") ErrStoreWrongChecksum = errors.New("wrong checksum")
ErrStoreMismatchChecksum = errors.New("part files doesn't match the original checksum") ErrStoreMismatchChecksum = errors.New("part files doesn't match the original checksum")
ErrStoreAlreadyExists = errors.New("original file already exists") ErrStoreAlreadyExists = errors.New("original file already exists")
ErrStoreChunkAlreadyExists = errors.New("chunk file already exists")
ErrStoreMissingName = errors.New("name required")
) )

View File

@ -15,115 +15,81 @@ type Store struct {
Path string Path string
} }
func (s *Store) Dir(path, sum string) string { type Chunk struct {
return filepath.Join(s.Path, path, sum[0:1], sum[1:2]) *Store
Sum string
Bytes []byte
} }
func (s *Store) MkDirs(dirs []string) error { func (s *Store) NewChunk(b []byte) *Chunk {
for _, dir := range dirs { sum := sha1.New()
if err := os.MkdirAll(filepath.Join(s.Path, dir), 0755); err != nil { sum.Write(b)
return err sumString := hex.EncodeToString(sum.Sum(nil))
}
return &Chunk{s, sumString, b}
}
func (s *Store) LoadChunk(sum string) (*Chunk, error) {
c := &Chunk{s, sum, nil}
if !c.FileExists() {
return nil, fmt.Errorf("chunk %s doesn't exists", sum)
} }
return nil b, err := os.ReadFile(c.Filename())
if err != nil {
return nil, err
}
c.Bytes = b
return c, nil
} }
func (s *Store) FileExists(filename string) bool { func (c *Chunk) Dir() string {
fs, err := os.Stat(filename) return filepath.Join(c.Path, "storage", c.Sum[0:1], c.Sum[1:2])
}
func (c *Chunk) Filename() string {
return filepath.Join(c.Dir(), c.Sum)
}
func (c *Chunk) FileExists() bool {
fs, err := os.Stat(c.Filename())
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
return false return false
} }
return !fs.IsDir() return !fs.IsDir()
} }
func (s *Store) SaveTemp(path string, sumb string, b []byte) error { func (c *Chunk) Mkdir() error {
sum := sha1.New() return os.MkdirAll(c.Dir(), 0755)
sum.Write(b) }
sumString := hex.EncodeToString(sum.Sum(nil))
if sumb != sumString { func (c *Chunk) Save() error {
return photoserrors.ErrStoreWrongChecksum if c.FileExists() {
return photoserrors.ErrStoreChunkAlreadyExists
} }
tmpdir := filepath.Join("tmp", path) if err := c.Mkdir(); err != nil {
dir := s.Dir(tmpdir, sumString)
filename := filepath.Join(dir, sumString)
if s.FileExists(filename) {
return nil
}
if err := os.MkdirAll(dir, 0755); err != nil {
return err return err
} }
fs, err := os.Create(filename) fs, err := os.Create(c.Filename())
if err != nil { if err != nil {
return err return err
} }
defer fs.Close() defer fs.Close()
_, err = fs.Write(b) _, err = fs.Write(c.Bytes)
return err return err
} }
func (s *Store) CombineTemp(path string, sumb []string) (string, uint64, error) { func (s *Store) Combine(sumb []string) (string, uint64, error) {
tmpdir := filepath.Join("tmp", path)
sum := sha1.New() sum := sha1.New()
size := uint64(0) size := uint64(0)
for _, sb := range sumb { for _, sb := range sumb {
dir := s.Dir(tmpdir, sb) c, err := s.LoadChunk(sb)
filename := filepath.Join(dir, sb)
if _, err := os.Stat(filename); errors.Is(err, os.ErrNotExist) {
return "", 0, fmt.Errorf("%s: chunk %s doesn't exists", path, sb)
}
b, err := os.ReadFile(filename)
if err != nil { if err != nil {
return "", 0, err return "", 0, err
} }
sum.Write(b) sum.Write(c.Bytes)
size += uint64(len(b)) size += uint64(len(c.Bytes))
} }
return hex.EncodeToString(sum.Sum(nil)), size, nil return hex.EncodeToString(sum.Sum(nil)), size, nil
} }
func (s *Store) CommitTemp(path string, sumb []string) error {
tmpdir := filepath.Join("tmp", path)
originaldir := s.Dir("original", path)
originalname := filepath.Join(originaldir, path)
if s.FileExists(originalname) {
return photoserrors.ErrStoreAlreadyExists
}
os.MkdirAll(originaldir, 0755)
fs, err := os.Create(originalname)
if err != nil {
return err
}
defer fs.Close()
for _, sb := range sumb {
dir := s.Dir(tmpdir, sb)
filename := filepath.Join(dir, sb)
if _, err := os.Stat(filename); errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("%s: chunk %s doesn't exists", path, sb)
}
b, err := os.ReadFile(filename)
if err != nil {
return err
}
if _, err := fs.Write(b); err != nil {
return err
}
}
for _, sb := range sumb {
dir := s.Dir(tmpdir, sb)
filename := filepath.Join(dir, sb)
if err := os.Remove(filename); err != nil {
return err
}
}
return os.RemoveAll(filepath.Join(s.Path, tmpdir))
}