2023-03-12 18:00:57 +03:00
|
|
|
// GoToSocial
|
|
|
|
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
|
|
|
// SPDX-License-Identifier: AGPL-3.0-or-later
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2022-01-08 19:17:01 +03:00
|
|
|
|
2022-01-02 17:00:53 +03:00
|
|
|
package media
|
|
|
|
|
2022-01-03 19:37:38 +03:00
|
|
|
import (
|
2022-01-16 20:52:55 +03:00
|
|
|
"bytes"
|
2024-06-26 18:01:16 +03:00
|
|
|
"cmp"
|
2022-01-04 19:37:54 +03:00
|
|
|
"context"
|
2023-01-11 14:13:13 +03:00
|
|
|
"image/jpeg"
|
2022-01-16 20:52:55 +03:00
|
|
|
"io"
|
2022-01-08 19:17:01 +03:00
|
|
|
"time"
|
2022-01-03 19:37:38 +03:00
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
errorsv2 "codeberg.org/gruf/go-errors/v2"
|
2023-02-13 21:40:48 +03:00
|
|
|
"codeberg.org/gruf/go-runners"
|
2024-04-26 15:50:46 +03:00
|
|
|
terminator "codeberg.org/superseriousbusiness/exif-terminator"
|
2023-01-11 14:13:13 +03:00
|
|
|
"github.com/disintegration/imaging"
|
|
|
|
"github.com/h2non/filetype"
|
2024-06-26 18:01:16 +03:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
|
2023-06-22 22:46:36 +03:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
2022-01-03 19:37:38 +03:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
2022-07-19 11:47:55 +03:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/log"
|
2023-11-10 21:29:26 +03:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
2022-01-11 19:49:14 +03:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/uris"
|
2023-11-10 21:29:26 +03:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/util"
|
2022-01-03 19:37:38 +03:00
|
|
|
)
|
2022-01-02 17:00:53 +03:00
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// ProcessingMedia represents a piece of media
|
|
|
|
// currently being processed. It exposes functions
|
|
|
|
// for retrieving data from the process.
|
2022-01-11 19:49:14 +03:00
|
|
|
type ProcessingMedia struct {
|
2024-06-26 18:01:16 +03:00
|
|
|
media *gtsmodel.MediaAttachment // processing media attachment details
|
|
|
|
dataFn DataFunc // load-data function, returns media stream
|
|
|
|
done bool // done is set when process finishes with non ctx canceled type error
|
|
|
|
proc runners.Processor // proc helps synchronize only a singular running processing instance
|
|
|
|
err error // error stores permanent error value when done
|
|
|
|
mgr *Manager // mgr instance (access to db / storage)
|
2022-01-03 19:37:38 +03:00
|
|
|
}
|
|
|
|
|
2024-06-26 18:01:16 +03:00
|
|
|
// ID returns the ID of the underlying media.
|
|
|
|
func (p *ProcessingMedia) ID() string {
|
2023-01-11 14:13:13 +03:00
|
|
|
return p.media.ID // immutable, safe outside mutex.
|
2022-01-11 19:49:14 +03:00
|
|
|
}
|
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// LoadAttachment blocks until the thumbnail and
|
|
|
|
// fullsize content has been processed, and then
|
|
|
|
// returns the attachment.
|
|
|
|
//
|
|
|
|
// If processing could not be completed fully
|
|
|
|
// then an error will be returned. The attachment
|
|
|
|
// will still be returned in that case, but it will
|
|
|
|
// only be partially complete and should be treated
|
|
|
|
// as a placeholder.
|
2024-06-26 18:01:16 +03:00
|
|
|
func (p *ProcessingMedia) Load(ctx context.Context) (*gtsmodel.MediaAttachment, error) {
|
2023-02-13 21:40:48 +03:00
|
|
|
media, done, err := p.load(ctx)
|
|
|
|
if !done {
|
2024-06-26 18:01:16 +03:00
|
|
|
// On a context-canceled error (marked as !done), requeue for loading.
|
|
|
|
p.mgr.state.Workers.Dereference.Queue.Push(func(ctx context.Context) {
|
|
|
|
if _, _, err := p.load(ctx); err != nil {
|
|
|
|
log.Errorf(ctx, "error loading media: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2023-02-13 21:40:48 +03:00
|
|
|
}
|
2023-11-10 21:29:26 +03:00
|
|
|
return media, err
|
2023-02-13 21:40:48 +03:00
|
|
|
}
|
|
|
|
|
2024-06-26 18:01:16 +03:00
|
|
|
// load is the package private form of load() that is wrapped to catch context canceled.
|
|
|
|
func (p *ProcessingMedia) load(ctx context.Context) (
|
|
|
|
media *gtsmodel.MediaAttachment,
|
|
|
|
done bool,
|
|
|
|
err error,
|
|
|
|
) {
|
2023-02-13 21:40:48 +03:00
|
|
|
err = p.proc.Process(func() error {
|
2024-06-26 18:01:16 +03:00
|
|
|
if done = p.done; done {
|
2023-02-13 21:40:48 +03:00
|
|
|
// Already proc'd.
|
|
|
|
return p.err
|
|
|
|
}
|
2022-01-16 20:52:55 +03:00
|
|
|
|
2023-01-11 14:13:13 +03:00
|
|
|
defer func() {
|
2023-02-13 21:40:48 +03:00
|
|
|
// This is only done when ctx NOT cancelled.
|
2024-06-26 18:01:16 +03:00
|
|
|
done = (err == nil || !errorsv2.IsV2(err,
|
2023-02-13 21:40:48 +03:00
|
|
|
context.Canceled,
|
|
|
|
context.DeadlineExceeded,
|
2024-06-26 18:01:16 +03:00
|
|
|
))
|
2023-02-13 21:40:48 +03:00
|
|
|
|
|
|
|
if !done {
|
|
|
|
return
|
2023-01-11 14:13:13 +03:00
|
|
|
}
|
2022-01-11 19:49:14 +03:00
|
|
|
|
2024-06-26 18:01:16 +03:00
|
|
|
// Anything from here, we
|
|
|
|
// need to ensure happens
|
|
|
|
// (i.e. no ctx canceled).
|
|
|
|
ctx = gtscontext.WithValues(
|
|
|
|
context.Background(),
|
|
|
|
ctx, // values
|
|
|
|
)
|
|
|
|
|
|
|
|
// On error or unknown media types, perform error cleanup.
|
|
|
|
if err != nil || p.media.Type == gtsmodel.FileTypeUnknown {
|
|
|
|
p.cleanup(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update with latest details, whatever happened.
|
|
|
|
e := p.mgr.state.DB.UpdateAttachment(ctx, p.media)
|
|
|
|
if e != nil {
|
|
|
|
log.Errorf(ctx, "error updating media in db: %v", e)
|
|
|
|
}
|
|
|
|
|
2023-02-13 21:40:48 +03:00
|
|
|
// Store final values.
|
|
|
|
p.done = true
|
|
|
|
p.err = err
|
2023-01-11 14:13:13 +03:00
|
|
|
}()
|
|
|
|
|
2024-06-26 18:01:16 +03:00
|
|
|
// TODO: in time update this
|
|
|
|
// to perhaps follow a similar
|
|
|
|
// freshness window to statuses
|
|
|
|
// / accounts? But that's a big
|
|
|
|
// maybe, media don't change in
|
|
|
|
// the same way so this is largely
|
|
|
|
// just to slow down fail retries.
|
|
|
|
const maxfreq = 6 * time.Hour
|
|
|
|
|
|
|
|
// Check whether media is uncached but repeatedly failing,
|
|
|
|
// specifically limit the frequency at which we allow this.
|
|
|
|
if !p.media.UpdatedAt.Equal(p.media.CreatedAt) && // i.e. not new
|
|
|
|
p.media.UpdatedAt.Add(maxfreq).Before(time.Now()) {
|
|
|
|
return nil
|
|
|
|
}
|
2023-11-10 21:29:26 +03:00
|
|
|
|
2023-01-11 14:13:13 +03:00
|
|
|
// Attempt to store media and calculate
|
|
|
|
// full-size media attachment details.
|
2023-11-10 21:29:26 +03:00
|
|
|
//
|
|
|
|
// This will update p.media as it goes.
|
2024-06-26 18:01:16 +03:00
|
|
|
if err = p.store(ctx); err != nil {
|
|
|
|
return err
|
2023-01-11 14:13:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Finish processing by reloading media into
|
|
|
|
// memory to get dimension and generate a thumb.
|
2023-11-10 21:29:26 +03:00
|
|
|
//
|
|
|
|
// This will update p.media as it goes.
|
2024-06-26 18:01:16 +03:00
|
|
|
if err = p.finish(ctx); err != nil {
|
|
|
|
return err //nolint:revive
|
2023-01-11 14:13:13 +03:00
|
|
|
}
|
2022-01-11 19:49:14 +03:00
|
|
|
|
2024-06-26 18:01:16 +03:00
|
|
|
return nil
|
2023-01-11 14:13:13 +03:00
|
|
|
})
|
2024-06-26 18:01:16 +03:00
|
|
|
media = p.media
|
|
|
|
return
|
2022-01-11 19:49:14 +03:00
|
|
|
}
|
|
|
|
|
2023-01-11 14:13:13 +03:00
|
|
|
// store calls the data function attached to p if it hasn't been called yet,
|
|
|
|
// and updates the underlying attachment fields as necessary. It will then stream
|
|
|
|
// bytes from p's reader directly into storage so that it can be retrieved later.
|
|
|
|
func (p *ProcessingMedia) store(ctx context.Context) error {
|
|
|
|
// Load media from provided data fun
|
|
|
|
rc, sz, err := p.dataFn(ctx)
|
|
|
|
if err != nil {
|
2023-06-22 22:46:36 +03:00
|
|
|
return gtserror.Newf("error executing data function: %w", err)
|
2023-01-11 14:13:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
// Ensure data reader gets closed on return.
|
|
|
|
if err := rc.Close(); err != nil {
|
2023-02-17 14:02:29 +03:00
|
|
|
log.Errorf(ctx, "error closing data reader: %v", err)
|
2023-01-11 14:13:13 +03:00
|
|
|
}
|
|
|
|
}()
|
2022-03-22 14:42:34 +03:00
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// Assume we're given correct file
|
|
|
|
// size, we can overwrite this later
|
|
|
|
// once we know THE TRUTH.
|
|
|
|
fileSize := int(sz)
|
|
|
|
p.media.File.FileSize = fileSize
|
|
|
|
|
|
|
|
// Prepare to read bytes from
|
|
|
|
// file header or magic number.
|
|
|
|
hdrBuf := newHdrBuf(fileSize)
|
|
|
|
|
|
|
|
// Read into buffer as much as possible.
|
|
|
|
//
|
|
|
|
// UnexpectedEOF means we couldn't read up to the
|
|
|
|
// given size, but we may still have read something.
|
|
|
|
//
|
|
|
|
// EOF means we couldn't read anything at all.
|
|
|
|
//
|
|
|
|
// Any other error likely means the connection messed up.
|
|
|
|
//
|
|
|
|
// In other words, rather counterintuitively, we
|
|
|
|
// can only proceed on no error or unexpected error!
|
|
|
|
n, err := io.ReadFull(rc, hdrBuf)
|
|
|
|
if err != nil {
|
|
|
|
if err != io.ErrUnexpectedEOF {
|
|
|
|
return gtserror.Newf("error reading first bytes of incoming media: %w", err)
|
|
|
|
}
|
2022-12-17 07:38:56 +03:00
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// Initial file size was misreported, so we didn't read
|
|
|
|
// fully into hdrBuf. Reslice it to the size we did read.
|
|
|
|
hdrBuf = hdrBuf[:n]
|
|
|
|
fileSize = n
|
|
|
|
p.media.File.FileSize = fileSize
|
2023-01-11 14:13:13 +03:00
|
|
|
}
|
2022-12-17 07:38:56 +03:00
|
|
|
|
2023-01-11 14:13:13 +03:00
|
|
|
// Parse file type info from header buffer.
|
2023-11-10 21:29:26 +03:00
|
|
|
// This should only ever error if the buffer
|
|
|
|
// is empty (ie., the attachment is 0 bytes).
|
2023-01-11 14:13:13 +03:00
|
|
|
info, err := filetype.Match(hdrBuf)
|
|
|
|
if err != nil {
|
2023-06-22 22:46:36 +03:00
|
|
|
return gtserror.Newf("error parsing file type: %w", err)
|
2023-01-11 14:13:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Recombine header bytes with remaining stream
|
|
|
|
r := io.MultiReader(bytes.NewReader(hdrBuf), rc)
|
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// Assume we'll put
|
|
|
|
// this file in storage.
|
|
|
|
store := true
|
|
|
|
|
2023-01-11 14:13:13 +03:00
|
|
|
switch info.Extension {
|
|
|
|
case "mp4":
|
2023-11-10 21:29:26 +03:00
|
|
|
// No problem.
|
2023-01-11 14:13:13 +03:00
|
|
|
|
|
|
|
case "gif":
|
2023-11-10 21:29:26 +03:00
|
|
|
// No problem
|
2023-01-11 14:13:13 +03:00
|
|
|
|
|
|
|
case "jpg", "jpeg", "png", "webp":
|
2023-11-10 21:29:26 +03:00
|
|
|
if fileSize > 0 {
|
|
|
|
// A file size was provided so we can clean
|
|
|
|
// exif data from image as we're streaming it.
|
|
|
|
r, err = terminator.Terminate(r, fileSize, info.Extension)
|
2022-12-17 07:38:56 +03:00
|
|
|
if err != nil {
|
2023-06-22 22:46:36 +03:00
|
|
|
return gtserror.Newf("error cleaning exif data: %w", err)
|
2022-12-17 07:38:56 +03:00
|
|
|
}
|
2022-01-16 20:52:55 +03:00
|
|
|
}
|
|
|
|
|
2023-01-11 14:13:13 +03:00
|
|
|
default:
|
2024-06-26 18:01:16 +03:00
|
|
|
// The file is not a supported format that we can process, so we can't do much with it.
|
|
|
|
log.Warnf(ctx, "unsupported media extension '%s'; not caching locally", info.Extension)
|
2023-11-10 21:29:26 +03:00
|
|
|
store = false
|
2023-01-11 14:13:13 +03:00
|
|
|
}
|
2022-01-04 19:37:54 +03:00
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// Fill in correct attachment
|
2024-06-26 18:01:16 +03:00
|
|
|
// data now we've parsed it.
|
2023-11-10 21:29:26 +03:00
|
|
|
p.media.URL = uris.URIForAttachment(
|
|
|
|
p.media.AccountID,
|
|
|
|
string(TypeAttachment),
|
|
|
|
string(SizeOriginal),
|
|
|
|
p.media.ID,
|
|
|
|
info.Extension,
|
|
|
|
)
|
|
|
|
|
2024-06-26 18:01:16 +03:00
|
|
|
// Prefer discovered MIME, fallback to generic data stream.
|
|
|
|
mime := cmp.Or(info.MIME.Value, "application/octet-stream")
|
2023-11-10 21:29:26 +03:00
|
|
|
p.media.File.ContentType = mime
|
|
|
|
|
2024-06-26 18:01:16 +03:00
|
|
|
// Calculate final media attachment file path.
|
2023-11-10 21:29:26 +03:00
|
|
|
p.media.File.Path = uris.StoragePathForAttachment(
|
2023-01-11 14:13:13 +03:00
|
|
|
p.media.AccountID,
|
2023-11-10 21:29:26 +03:00
|
|
|
string(TypeAttachment),
|
|
|
|
string(SizeOriginal),
|
2023-01-11 14:13:13 +03:00
|
|
|
p.media.ID,
|
|
|
|
info.Extension,
|
|
|
|
)
|
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// We should only try to store the file if it's
|
|
|
|
// a format we can keep processing, otherwise be
|
|
|
|
// a bit cheeky: don't store it and let users
|
|
|
|
// click through to the remote server instead.
|
|
|
|
if !store {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// File shouldn't already exist in storage at this point,
|
|
|
|
// but we do a check as it's worth logging / cleaning up.
|
2023-02-13 21:40:48 +03:00
|
|
|
if have, _ := p.mgr.state.Storage.Has(ctx, p.media.File.Path); have {
|
2024-06-26 18:01:16 +03:00
|
|
|
log.Warnf(ctx, "media already exists at: %s", p.media.File.Path)
|
2023-01-11 14:13:13 +03:00
|
|
|
|
|
|
|
// Attempt to remove existing media at storage path (might be broken / out-of-date)
|
2023-02-13 21:40:48 +03:00
|
|
|
if err := p.mgr.state.Storage.Delete(ctx, p.media.File.Path); err != nil {
|
2024-06-26 18:01:16 +03:00
|
|
|
return gtserror.Newf("error removing media %s from storage: %v", p.media.File.Path, err)
|
2022-01-09 20:41:22 +03:00
|
|
|
}
|
2022-01-04 19:37:54 +03:00
|
|
|
}
|
|
|
|
|
2024-06-26 18:01:16 +03:00
|
|
|
// Write the final reader stream to our storage driver.
|
|
|
|
sz, err = p.mgr.state.Storage.PutStream(ctx, p.media.File.Path, r)
|
2023-01-11 14:13:13 +03:00
|
|
|
if err != nil {
|
2023-06-22 22:46:36 +03:00
|
|
|
return gtserror.Newf("error writing media to storage: %w", err)
|
2023-01-11 14:13:13 +03:00
|
|
|
}
|
2022-01-04 19:37:54 +03:00
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// Set actual written size
|
|
|
|
// as authoritative file size.
|
2024-06-26 18:01:16 +03:00
|
|
|
p.media.File.FileSize = int(sz)
|
2023-11-10 21:29:26 +03:00
|
|
|
|
|
|
|
// We can now consider this cached.
|
|
|
|
p.media.Cached = util.Ptr(true)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2023-01-11 14:13:13 +03:00
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
func (p *ProcessingMedia) finish(ctx context.Context) error {
|
2024-06-26 18:01:16 +03:00
|
|
|
// Nothing else to do if
|
|
|
|
// media was not cached.
|
2023-11-10 21:29:26 +03:00
|
|
|
if !*p.media.Cached {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get a stream to the original file for further processing.
|
2023-02-13 21:40:48 +03:00
|
|
|
rc, err := p.mgr.state.Storage.GetStream(ctx, p.media.File.Path)
|
2023-01-11 14:13:13 +03:00
|
|
|
if err != nil {
|
2023-06-22 22:46:36 +03:00
|
|
|
return gtserror.Newf("error loading file from storage: %w", err)
|
2023-01-11 14:13:13 +03:00
|
|
|
}
|
|
|
|
defer rc.Close()
|
2022-09-19 14:43:22 +03:00
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// fullImg is the processed version of
|
|
|
|
// the original (stripped + reoriented).
|
2023-01-11 14:13:13 +03:00
|
|
|
var fullImg *gtsImage
|
2022-01-04 19:37:54 +03:00
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// Depending on the content type, we
|
|
|
|
// can do various types of decoding.
|
2023-01-11 14:13:13 +03:00
|
|
|
switch p.media.File.ContentType {
|
2023-11-10 21:29:26 +03:00
|
|
|
|
2023-01-11 14:13:13 +03:00
|
|
|
// .jpeg, .gif, .webp image type
|
|
|
|
case mimeImageJpeg, mimeImageGif, mimeImageWebp:
|
2024-06-26 18:01:16 +03:00
|
|
|
fullImg, err = decodeImage(rc,
|
2023-11-10 21:29:26 +03:00
|
|
|
imaging.AutoOrientation(true),
|
|
|
|
)
|
2022-01-08 15:45:42 +03:00
|
|
|
if err != nil {
|
2023-06-22 22:46:36 +03:00
|
|
|
return gtserror.Newf("error decoding image: %w", err)
|
2022-01-08 15:45:42 +03:00
|
|
|
}
|
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// Mark as no longer unknown type now
|
|
|
|
// we know for sure we can decode it.
|
|
|
|
p.media.Type = gtsmodel.FileTypeImage
|
|
|
|
|
2023-01-11 14:13:13 +03:00
|
|
|
// .png image (requires ancillary chunk stripping)
|
|
|
|
case mimeImagePng:
|
2023-11-10 21:29:26 +03:00
|
|
|
fullImg, err = decodeImage(
|
|
|
|
&pngAncillaryChunkStripper{Reader: rc},
|
|
|
|
imaging.AutoOrientation(true),
|
|
|
|
)
|
2023-01-11 14:13:13 +03:00
|
|
|
if err != nil {
|
2023-06-22 22:46:36 +03:00
|
|
|
return gtserror.Newf("error decoding image: %w", err)
|
2022-01-08 19:17:01 +03:00
|
|
|
}
|
2022-12-22 13:48:28 +03:00
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// Mark as no longer unknown type now
|
|
|
|
// we know for sure we can decode it.
|
|
|
|
p.media.Type = gtsmodel.FileTypeImage
|
|
|
|
|
2023-01-11 14:13:13 +03:00
|
|
|
// .mp4 video type
|
|
|
|
case mimeVideoMp4:
|
|
|
|
video, err := decodeVideoFrame(rc)
|
|
|
|
if err != nil {
|
2023-06-22 22:46:36 +03:00
|
|
|
return gtserror.Newf("error decoding video: %w", err)
|
2022-12-22 13:48:28 +03:00
|
|
|
}
|
2022-01-08 19:17:01 +03:00
|
|
|
|
2023-01-11 14:13:13 +03:00
|
|
|
// Set video frame as image.
|
|
|
|
fullImg = video.frame
|
2022-01-04 19:37:54 +03:00
|
|
|
|
2023-01-11 14:13:13 +03:00
|
|
|
// Set video metadata in attachment info.
|
|
|
|
p.media.FileMeta.Original.Duration = &video.duration
|
|
|
|
p.media.FileMeta.Original.Framerate = &video.framerate
|
|
|
|
p.media.FileMeta.Original.Bitrate = &video.bitrate
|
2023-11-10 21:29:26 +03:00
|
|
|
|
|
|
|
// Mark as no longer unknown type now
|
|
|
|
// we know for sure we can decode it.
|
|
|
|
p.media.Type = gtsmodel.FileTypeVideo
|
2023-01-11 14:13:13 +03:00
|
|
|
}
|
2022-01-03 19:37:38 +03:00
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// fullImg should be in-memory by
|
|
|
|
// now so we're done with storage.
|
2023-01-11 14:13:13 +03:00
|
|
|
if err := rc.Close(); err != nil {
|
2023-06-22 22:46:36 +03:00
|
|
|
return gtserror.Newf("error closing file: %w", err)
|
2022-01-11 19:49:14 +03:00
|
|
|
}
|
2022-01-08 19:17:01 +03:00
|
|
|
|
2023-01-11 14:13:13 +03:00
|
|
|
// Set full-size dimensions in attachment info.
|
2024-06-26 18:01:16 +03:00
|
|
|
p.media.FileMeta.Original.Width = fullImg.Width()
|
|
|
|
p.media.FileMeta.Original.Height = fullImg.Height()
|
|
|
|
p.media.FileMeta.Original.Size = fullImg.Size()
|
2023-01-11 14:13:13 +03:00
|
|
|
p.media.FileMeta.Original.Aspect = fullImg.AspectRatio()
|
|
|
|
|
|
|
|
// Get smaller thumbnail image
|
|
|
|
thumbImg := fullImg.Thumbnail()
|
|
|
|
|
|
|
|
// Garbage collector, you may
|
|
|
|
// now take our large son.
|
|
|
|
fullImg = nil
|
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// Only generate blurhash
|
|
|
|
// from thumb if necessary.
|
|
|
|
if p.media.Blurhash == "" {
|
|
|
|
hash, err := thumbImg.Blurhash()
|
|
|
|
if err != nil {
|
|
|
|
return gtserror.Newf("error generating blurhash: %w", err)
|
|
|
|
}
|
2022-01-16 20:52:55 +03:00
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// Set the attachment blurhash.
|
|
|
|
p.media.Blurhash = hash
|
|
|
|
}
|
2022-11-03 17:03:12 +03:00
|
|
|
|
2024-06-26 18:01:16 +03:00
|
|
|
// Thumbnail shouldn't exist in storage at this point,
|
2023-11-10 21:29:26 +03:00
|
|
|
// but we do a check as it's worth logging / cleaning up.
|
2023-02-13 21:40:48 +03:00
|
|
|
if have, _ := p.mgr.state.Storage.Has(ctx, p.media.Thumbnail.Path); have {
|
2024-06-26 18:01:16 +03:00
|
|
|
log.Warnf(ctx, "thumbnail already exists at: %s", p.media.Thumbnail.Path)
|
2022-03-21 15:41:44 +03:00
|
|
|
|
2024-06-26 18:01:16 +03:00
|
|
|
// Attempt to remove existing thumbnail (might be broken / out-of-date).
|
2023-02-13 21:40:48 +03:00
|
|
|
if err := p.mgr.state.Storage.Delete(ctx, p.media.Thumbnail.Path); err != nil {
|
2024-06-26 18:01:16 +03:00
|
|
|
return gtserror.Newf("error removing thumbnail %s from storage: %v", p.media.Thumbnail.Path, err)
|
2023-01-11 14:13:13 +03:00
|
|
|
}
|
2022-01-08 15:45:42 +03:00
|
|
|
}
|
|
|
|
|
2023-01-11 14:13:13 +03:00
|
|
|
// Create a thumbnail JPEG encoder stream.
|
|
|
|
enc := thumbImg.ToJPEG(&jpeg.Options{
|
2024-06-26 18:01:16 +03:00
|
|
|
|
2023-11-10 21:29:26 +03:00
|
|
|
// Good enough for
|
|
|
|
// a thumbnail.
|
|
|
|
Quality: 70,
|
2023-01-11 14:13:13 +03:00
|
|
|
})
|
2022-01-08 15:45:42 +03:00
|
|
|
|
2024-06-26 18:01:16 +03:00
|
|
|
// Stream-encode the JPEG thumbnail image into our storage driver.
|
2023-02-13 21:40:48 +03:00
|
|
|
sz, err := p.mgr.state.Storage.PutStream(ctx, p.media.Thumbnail.Path, enc)
|
2023-01-11 14:13:13 +03:00
|
|
|
if err != nil {
|
2023-06-22 22:46:36 +03:00
|
|
|
return gtserror.Newf("error stream-encoding thumbnail to storage: %w", err)
|
2022-01-15 19:41:18 +03:00
|
|
|
}
|
|
|
|
|
2024-06-26 18:01:16 +03:00
|
|
|
// Set final written thumb size.
|
|
|
|
p.media.Thumbnail.FileSize = int(sz)
|
|
|
|
|
2023-01-11 14:13:13 +03:00
|
|
|
// Set thumbnail dimensions in attachment info.
|
|
|
|
p.media.FileMeta.Small = gtsmodel.Small{
|
2024-06-26 18:01:16 +03:00
|
|
|
Width: thumbImg.Width(),
|
|
|
|
Height: thumbImg.Height(),
|
|
|
|
Size: thumbImg.Size(),
|
2023-01-11 14:13:13 +03:00
|
|
|
Aspect: thumbImg.AspectRatio(),
|
2022-01-11 19:49:14 +03:00
|
|
|
}
|
2022-01-16 20:52:55 +03:00
|
|
|
|
2024-06-26 18:01:16 +03:00
|
|
|
// Finally set the attachment as processed.
|
2023-01-11 14:13:13 +03:00
|
|
|
p.media.Processing = gtsmodel.ProcessingStatusProcessed
|
2022-02-22 15:50:33 +03:00
|
|
|
|
2022-01-11 19:49:14 +03:00
|
|
|
return nil
|
2022-01-10 20:36:09 +03:00
|
|
|
}
|
2024-06-26 18:01:16 +03:00
|
|
|
|
|
|
|
// cleanup will remove any traces of processing media from storage.
|
|
|
|
// and perform any other necessary cleanup steps after failure.
|
|
|
|
func (p *ProcessingMedia) cleanup(ctx context.Context) {
|
|
|
|
var err error
|
|
|
|
|
|
|
|
if p.media.File.Path != "" {
|
|
|
|
// Ensure media file at path is deleted from storage.
|
|
|
|
err = p.mgr.state.Storage.Delete(ctx, p.media.File.Path)
|
|
|
|
if err != nil && !storage.IsNotFound(err) {
|
|
|
|
log.Errorf(ctx, "error deleting %s: %v", p.media.File.Path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if p.media.Thumbnail.Path != "" {
|
|
|
|
// Ensure media thumbnail at path is deleted from storage.
|
|
|
|
err = p.mgr.state.Storage.Delete(ctx, p.media.Thumbnail.Path)
|
|
|
|
if err != nil && !storage.IsNotFound(err) {
|
|
|
|
log.Errorf(ctx, "error deleting %s: %v", p.media.Thumbnail.Path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Also ensure marked as unknown and finished
|
|
|
|
// processing so gets inserted as placeholder URL.
|
|
|
|
p.media.Processing = gtsmodel.ProcessingStatusProcessed
|
|
|
|
p.media.Type = gtsmodel.FileTypeUnknown
|
|
|
|
p.media.Cached = util.Ptr(false)
|
|
|
|
}
|