2023-03-12 18:00:57 +03:00
|
|
|
// GoToSocial
|
|
|
|
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
|
|
|
// SPDX-License-Identifier: AGPL-3.0-or-later
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2021-07-09 19:32:48 +03:00
|
|
|
|
2021-08-25 16:34:33 +03:00
|
|
|
package bundb
|
2021-07-09 19:32:48 +03:00
|
|
|
|
|
|
|
import (
|
2021-08-20 13:26:56 +03:00
|
|
|
"context"
|
2023-08-22 16:41:51 +03:00
|
|
|
"errors"
|
2023-05-25 11:37:38 +03:00
|
|
|
"fmt"
|
2024-01-17 17:54:30 +03:00
|
|
|
"slices"
|
2022-10-29 18:10:28 +03:00
|
|
|
"time"
|
2021-07-09 19:32:48 +03:00
|
|
|
|
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/db"
|
2023-05-25 11:37:38 +03:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
|
2023-08-22 16:41:51 +03:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
2021-07-09 19:32:48 +03:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
2022-10-29 18:10:28 +03:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/id"
|
2022-12-08 20:35:14 +03:00
|
|
|
"github.com/superseriousbusiness/gotosocial/internal/state"
|
2021-08-25 16:34:33 +03:00
|
|
|
"github.com/uptrace/bun"
|
2021-07-09 19:32:48 +03:00
|
|
|
)
|
|
|
|
|
2021-08-20 13:26:56 +03:00
|
|
|
type timelineDB struct {
|
2024-02-07 17:43:27 +03:00
|
|
|
db *bun.DB
|
2022-12-08 20:35:14 +03:00
|
|
|
state *state.State
|
2021-08-20 13:26:56 +03:00
|
|
|
}
|
|
|
|
|
2023-07-25 11:34:05 +03:00
|
|
|
func (t *timelineDB) GetHomeTimeline(ctx context.Context, accountID string, maxID string, sinceID string, minID string, limit int, local bool) ([]*gtsmodel.Status, error) {
|
2022-10-08 14:50:48 +03:00
|
|
|
// Ensure reasonable
|
|
|
|
if limit < 0 {
|
|
|
|
limit = 0
|
|
|
|
}
|
|
|
|
|
2021-08-29 17:41:41 +03:00
|
|
|
// Make educated guess for slice size
|
2023-04-06 14:43:13 +03:00
|
|
|
var (
|
|
|
|
statusIDs = make([]string, 0, limit)
|
|
|
|
frontToBack = true
|
|
|
|
)
|
2021-08-29 17:41:41 +03:00
|
|
|
|
2024-09-11 13:55:25 +03:00
|
|
|
// As this is the home timeline, it should be
|
|
|
|
// populated by statuses from accounts followed
|
|
|
|
// by accountID, and posts from accountID itself.
|
|
|
|
//
|
|
|
|
// So, begin by seeing who accountID follows.
|
|
|
|
// It should be a little cheaper to do this in
|
|
|
|
// a separate query like this, rather than using
|
|
|
|
// a join, since followIDs are cached in memory.
|
|
|
|
follows, err := t.state.DB.GetAccountFollows(
|
|
|
|
gtscontext.SetBarebones(ctx),
|
|
|
|
accountID,
|
|
|
|
nil, // select all
|
|
|
|
)
|
|
|
|
if err != nil && !errors.Is(err, db.ErrNoEntries) {
|
|
|
|
return nil, gtserror.Newf("db error getting follows for account %s: %w", accountID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// To take account of exclusive lists, get all of
|
|
|
|
// this account's lists, so we can filter out follows
|
|
|
|
// that are in contained in exclusive lists.
|
2024-09-16 19:46:09 +03:00
|
|
|
lists, err := t.state.DB.GetListsByAccountID(ctx, accountID)
|
2024-09-11 13:55:25 +03:00
|
|
|
if err != nil && !errors.Is(err, db.ErrNoEntries) {
|
|
|
|
return nil, gtserror.Newf("db error getting lists for account %s: %w", accountID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Index all follow IDs that fall in exclusive lists.
|
|
|
|
ignoreFollowIDs := make(map[string]struct{})
|
|
|
|
for _, list := range lists {
|
|
|
|
if !*list.Exclusive {
|
|
|
|
// Not exclusive,
|
|
|
|
// we don't care.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-09-16 19:46:09 +03:00
|
|
|
// Fetch all follow IDs of the entries ccontained in this list.
|
|
|
|
listFollowIDs, err := t.state.DB.GetFollowIDsInList(ctx, list.ID, nil)
|
|
|
|
if err != nil && !errors.Is(err, db.ErrNoEntries) {
|
|
|
|
return nil, gtserror.Newf("db error getting list entry follow ids: %w", err)
|
|
|
|
}
|
|
|
|
|
2024-09-11 13:55:25 +03:00
|
|
|
// Exclusive list, index all its follow IDs.
|
2024-09-16 19:46:09 +03:00
|
|
|
for _, followID := range listFollowIDs {
|
|
|
|
ignoreFollowIDs[followID] = struct{}{}
|
2024-09-11 13:55:25 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extract just the accountID from each follow,
|
|
|
|
// ignoring follows that are in exclusive lists.
|
|
|
|
targetAccountIDs := make([]string, 0, len(follows)+1)
|
|
|
|
for _, f := range follows {
|
|
|
|
_, ignore := ignoreFollowIDs[f.ID]
|
|
|
|
if !ignore {
|
|
|
|
targetAccountIDs = append(
|
|
|
|
targetAccountIDs,
|
|
|
|
f.TargetAccountID,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add accountID itself as a pseudo follow so that
|
|
|
|
// accountID can see its own posts in the timeline.
|
|
|
|
targetAccountIDs = append(targetAccountIDs, accountID)
|
|
|
|
|
|
|
|
// Now start building the database query.
|
2023-07-25 11:34:05 +03:00
|
|
|
q := t.db.
|
2021-08-25 16:34:33 +03:00
|
|
|
NewSelect().
|
2022-10-08 14:50:48 +03:00
|
|
|
TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
|
2022-07-10 18:18:21 +03:00
|
|
|
// Select only IDs from table
|
2023-06-04 22:17:28 +03:00
|
|
|
Column("status.id")
|
2021-07-09 19:32:48 +03:00
|
|
|
|
2023-04-06 18:11:25 +03:00
|
|
|
if maxID == "" || maxID >= id.Highest {
|
[performance] refactoring + add fave / follow / request / visibility caching (#1607)
* refactor visibility checking, add caching for visibility
* invalidate visibility cache items on account / status deletes
* fix requester ID passed to visibility cache nil ptr
* de-interface caches, fix home / public timeline caching + visibility
* finish adding code comments for visibility filter
* fix angry goconst linter warnings
* actually finish adding filter visibility code comments for timeline functions
* move home timeline status author check to after visibility
* remove now-unused code
* add more code comments
* add TODO code comment, update printed cache start names
* update printed cache names on stop
* start adding separate follow(request) delete db functions, add specific visibility cache tests
* add relationship type caching
* fix getting local account follows / followed-bys, other small codebase improvements
* simplify invalidation using cache hooks, add more GetAccountBy___() functions
* fix boosting to return 404 if not boostable but no error (to not leak status ID)
* remove dead code
* improved placement of cache invalidation
* update license headers
* add example follow, follow-request config entries
* add example visibility cache configuration to config file
* use specific PutFollowRequest() instead of just Put()
* add tests for all GetAccountBy()
* add GetBlockBy() tests
* update block to check primitive fields
* update and finish adding Get{Account,Block,Follow,FollowRequest}By() tests
* fix copy-pasted code
* update envparsing test
* whitespace
* fix bun struct tag
* add license header to gtscontext
* fix old license header
* improved error creation to not use fmt.Errorf() when not needed
* fix various rebase conflicts, fix account test
* remove commented-out code, fix-up mention caching
* fix mention select bun statement
* ensure mention target account populated, pass in context to customrenderer logging
* remove more uncommented code, fix typeutil test
* add statusfave database model caching
* add status fave cache configuration
* add status fave cache example config
* woops, catch missed error. nice catch linter!
* add back testrig panic on nil db
* update example configuration to match defaults, slight tweak to cache configuration defaults
* update envparsing test with new defaults
* fetch followingget to use the follow target account
* use accounnt.IsLocal() instead of empty domain check
* use constants for the cache visibility type check
* use bun.In() for notification type restriction in db query
* include replies when fetching PublicTimeline() (to account for single-author threads in Visibility{}.StatusPublicTimelineable())
* use bun query building for nested select statements to ensure working with postgres
* update public timeline future status checks to match visibility filter
* same as previous, for home timeline
* update public timeline tests to dynamically check for appropriate statuses
* migrate accounts to allow unique constraint on public_key
* provide minimal account with publicKey
---------
Signed-off-by: kim <grufwub@gmail.com>
Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
2023-03-28 16:03:14 +03:00
|
|
|
const future = 24 * time.Hour
|
|
|
|
|
2022-10-29 18:10:28 +03:00
|
|
|
var err error
|
[performance] refactoring + add fave / follow / request / visibility caching (#1607)
* refactor visibility checking, add caching for visibility
* invalidate visibility cache items on account / status deletes
* fix requester ID passed to visibility cache nil ptr
* de-interface caches, fix home / public timeline caching + visibility
* finish adding code comments for visibility filter
* fix angry goconst linter warnings
* actually finish adding filter visibility code comments for timeline functions
* move home timeline status author check to after visibility
* remove now-unused code
* add more code comments
* add TODO code comment, update printed cache start names
* update printed cache names on stop
* start adding separate follow(request) delete db functions, add specific visibility cache tests
* add relationship type caching
* fix getting local account follows / followed-bys, other small codebase improvements
* simplify invalidation using cache hooks, add more GetAccountBy___() functions
* fix boosting to return 404 if not boostable but no error (to not leak status ID)
* remove dead code
* improved placement of cache invalidation
* update license headers
* add example follow, follow-request config entries
* add example visibility cache configuration to config file
* use specific PutFollowRequest() instead of just Put()
* add tests for all GetAccountBy()
* add GetBlockBy() tests
* update block to check primitive fields
* update and finish adding Get{Account,Block,Follow,FollowRequest}By() tests
* fix copy-pasted code
* update envparsing test
* whitespace
* fix bun struct tag
* add license header to gtscontext
* fix old license header
* improved error creation to not use fmt.Errorf() when not needed
* fix various rebase conflicts, fix account test
* remove commented-out code, fix-up mention caching
* fix mention select bun statement
* ensure mention target account populated, pass in context to customrenderer logging
* remove more uncommented code, fix typeutil test
* add statusfave database model caching
* add status fave cache configuration
* add status fave cache example config
* woops, catch missed error. nice catch linter!
* add back testrig panic on nil db
* update example configuration to match defaults, slight tweak to cache configuration defaults
* update envparsing test with new defaults
* fetch followingget to use the follow target account
* use accounnt.IsLocal() instead of empty domain check
* use constants for the cache visibility type check
* use bun.In() for notification type restriction in db query
* include replies when fetching PublicTimeline() (to account for single-author threads in Visibility{}.StatusPublicTimelineable())
* use bun query building for nested select statements to ensure working with postgres
* update public timeline future status checks to match visibility filter
* same as previous, for home timeline
* update public timeline tests to dynamically check for appropriate statuses
* migrate accounts to allow unique constraint on public_key
* provide minimal account with publicKey
---------
Signed-off-by: kim <grufwub@gmail.com>
Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
2023-03-28 16:03:14 +03:00
|
|
|
|
|
|
|
// don't return statuses more than 24hr in the future
|
|
|
|
maxID, err = id.NewULIDFromTime(time.Now().Add(future))
|
2022-10-29 18:10:28 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-09 19:32:48 +03:00
|
|
|
}
|
|
|
|
|
2022-10-29 18:10:28 +03:00
|
|
|
// return only statuses LOWER (ie., older) than maxID
|
|
|
|
q = q.Where("? < ?", bun.Ident("status.id"), maxID)
|
|
|
|
|
2021-07-09 19:32:48 +03:00
|
|
|
if sinceID != "" {
|
2021-08-15 19:43:08 +03:00
|
|
|
// return only statuses HIGHER (ie., newer) than sinceID
|
2022-10-08 14:50:48 +03:00
|
|
|
q = q.Where("? > ?", bun.Ident("status.id"), sinceID)
|
2021-07-09 19:32:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if minID != "" {
|
2021-08-15 19:43:08 +03:00
|
|
|
// return only statuses HIGHER (ie., newer) than minID
|
2022-10-08 14:50:48 +03:00
|
|
|
q = q.Where("? > ?", bun.Ident("status.id"), minID)
|
2023-04-06 14:43:13 +03:00
|
|
|
|
|
|
|
// page up
|
|
|
|
frontToBack = false
|
2021-07-09 19:32:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if local {
|
2021-08-15 19:43:08 +03:00
|
|
|
// return only statuses posted by local account havers
|
2022-10-08 14:50:48 +03:00
|
|
|
q = q.Where("? = ?", bun.Ident("status.local"), local)
|
2021-07-09 19:32:48 +03:00
|
|
|
}
|
|
|
|
|
2023-08-22 16:41:51 +03:00
|
|
|
// Select only statuses authored by
|
|
|
|
// accounts with IDs in the slice.
|
|
|
|
q = q.Where(
|
|
|
|
"? IN (?)",
|
|
|
|
bun.Ident("status.account_id"),
|
|
|
|
bun.In(targetAccountIDs),
|
|
|
|
)
|
2021-07-09 19:32:48 +03:00
|
|
|
|
2024-08-24 12:49:37 +03:00
|
|
|
// Only include statuses that aren't pending approval.
|
|
|
|
q = q.Where("NOT ? = ?", bun.Ident("status.pending_approval"), true)
|
|
|
|
|
|
|
|
if limit > 0 {
|
|
|
|
// limit amount of statuses returned
|
|
|
|
q = q.Limit(limit)
|
|
|
|
}
|
|
|
|
|
|
|
|
if frontToBack {
|
|
|
|
// Page down.
|
|
|
|
q = q.Order("status.id DESC")
|
|
|
|
} else {
|
|
|
|
// Page up.
|
|
|
|
q = q.Order("status.id ASC")
|
|
|
|
}
|
|
|
|
|
2022-07-10 18:18:21 +03:00
|
|
|
if err := q.Scan(ctx, &statusIDs); err != nil {
|
2023-08-17 19:26:21 +03:00
|
|
|
return nil, err
|
2021-08-29 17:41:41 +03:00
|
|
|
}
|
2022-07-10 18:18:21 +03:00
|
|
|
|
2023-04-06 14:43:13 +03:00
|
|
|
if len(statusIDs) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2022-07-10 18:18:21 +03:00
|
|
|
|
2023-04-06 14:43:13 +03:00
|
|
|
// If we're paging up, we still want statuses
|
|
|
|
// to be sorted by ID desc, so reverse ids slice.
|
|
|
|
// https://zchee.github.io/golang-wiki/SliceTricks/#reversing
|
|
|
|
if !frontToBack {
|
|
|
|
for l, r := 0, len(statusIDs)-1; l < r; l, r = l+1, r-1 {
|
|
|
|
statusIDs[l], statusIDs[r] = statusIDs[r], statusIDs[l]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-19 15:57:29 +03:00
|
|
|
// Return status IDs loaded from cache + db.
|
|
|
|
return t.state.DB.GetStatusesByIDs(ctx, statusIDs)
|
2021-07-09 19:32:48 +03:00
|
|
|
}
|
|
|
|
|
2023-07-25 11:34:05 +03:00
|
|
|
func (t *timelineDB) GetPublicTimeline(ctx context.Context, maxID string, sinceID string, minID string, limit int, local bool) ([]*gtsmodel.Status, error) {
|
2021-08-29 17:41:41 +03:00
|
|
|
// Ensure reasonable
|
|
|
|
if limit < 0 {
|
|
|
|
limit = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make educated guess for slice size
|
2023-09-29 16:31:10 +03:00
|
|
|
var (
|
|
|
|
statusIDs = make([]string, 0, limit)
|
|
|
|
frontToBack = true
|
|
|
|
)
|
2021-07-09 19:32:48 +03:00
|
|
|
|
2023-07-25 11:34:05 +03:00
|
|
|
q := t.db.
|
2021-08-25 16:34:33 +03:00
|
|
|
NewSelect().
|
2022-10-08 14:50:48 +03:00
|
|
|
TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
|
2023-07-05 13:34:37 +03:00
|
|
|
// Public only.
|
2022-10-08 14:50:48 +03:00
|
|
|
Where("? = ?", bun.Ident("status.visibility"), gtsmodel.VisibilityPublic).
|
2023-07-05 13:34:37 +03:00
|
|
|
// Ignore boosts.
|
|
|
|
Where("? IS NULL", bun.Ident("status.boost_of_id")).
|
2023-09-29 16:31:10 +03:00
|
|
|
// Select only IDs from table
|
|
|
|
Column("status.id")
|
2021-07-09 19:32:48 +03:00
|
|
|
|
2023-09-29 16:31:10 +03:00
|
|
|
if maxID == "" || maxID >= id.Highest {
|
[performance] refactoring + add fave / follow / request / visibility caching (#1607)
* refactor visibility checking, add caching for visibility
* invalidate visibility cache items on account / status deletes
* fix requester ID passed to visibility cache nil ptr
* de-interface caches, fix home / public timeline caching + visibility
* finish adding code comments for visibility filter
* fix angry goconst linter warnings
* actually finish adding filter visibility code comments for timeline functions
* move home timeline status author check to after visibility
* remove now-unused code
* add more code comments
* add TODO code comment, update printed cache start names
* update printed cache names on stop
* start adding separate follow(request) delete db functions, add specific visibility cache tests
* add relationship type caching
* fix getting local account follows / followed-bys, other small codebase improvements
* simplify invalidation using cache hooks, add more GetAccountBy___() functions
* fix boosting to return 404 if not boostable but no error (to not leak status ID)
* remove dead code
* improved placement of cache invalidation
* update license headers
* add example follow, follow-request config entries
* add example visibility cache configuration to config file
* use specific PutFollowRequest() instead of just Put()
* add tests for all GetAccountBy()
* add GetBlockBy() tests
* update block to check primitive fields
* update and finish adding Get{Account,Block,Follow,FollowRequest}By() tests
* fix copy-pasted code
* update envparsing test
* whitespace
* fix bun struct tag
* add license header to gtscontext
* fix old license header
* improved error creation to not use fmt.Errorf() when not needed
* fix various rebase conflicts, fix account test
* remove commented-out code, fix-up mention caching
* fix mention select bun statement
* ensure mention target account populated, pass in context to customrenderer logging
* remove more uncommented code, fix typeutil test
* add statusfave database model caching
* add status fave cache configuration
* add status fave cache example config
* woops, catch missed error. nice catch linter!
* add back testrig panic on nil db
* update example configuration to match defaults, slight tweak to cache configuration defaults
* update envparsing test with new defaults
* fetch followingget to use the follow target account
* use accounnt.IsLocal() instead of empty domain check
* use constants for the cache visibility type check
* use bun.In() for notification type restriction in db query
* include replies when fetching PublicTimeline() (to account for single-author threads in Visibility{}.StatusPublicTimelineable())
* use bun query building for nested select statements to ensure working with postgres
* update public timeline future status checks to match visibility filter
* same as previous, for home timeline
* update public timeline tests to dynamically check for appropriate statuses
* migrate accounts to allow unique constraint on public_key
* provide minimal account with publicKey
---------
Signed-off-by: kim <grufwub@gmail.com>
Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
2023-03-28 16:03:14 +03:00
|
|
|
const future = 24 * time.Hour
|
|
|
|
|
2022-10-29 18:10:28 +03:00
|
|
|
var err error
|
[performance] refactoring + add fave / follow / request / visibility caching (#1607)
* refactor visibility checking, add caching for visibility
* invalidate visibility cache items on account / status deletes
* fix requester ID passed to visibility cache nil ptr
* de-interface caches, fix home / public timeline caching + visibility
* finish adding code comments for visibility filter
* fix angry goconst linter warnings
* actually finish adding filter visibility code comments for timeline functions
* move home timeline status author check to after visibility
* remove now-unused code
* add more code comments
* add TODO code comment, update printed cache start names
* update printed cache names on stop
* start adding separate follow(request) delete db functions, add specific visibility cache tests
* add relationship type caching
* fix getting local account follows / followed-bys, other small codebase improvements
* simplify invalidation using cache hooks, add more GetAccountBy___() functions
* fix boosting to return 404 if not boostable but no error (to not leak status ID)
* remove dead code
* improved placement of cache invalidation
* update license headers
* add example follow, follow-request config entries
* add example visibility cache configuration to config file
* use specific PutFollowRequest() instead of just Put()
* add tests for all GetAccountBy()
* add GetBlockBy() tests
* update block to check primitive fields
* update and finish adding Get{Account,Block,Follow,FollowRequest}By() tests
* fix copy-pasted code
* update envparsing test
* whitespace
* fix bun struct tag
* add license header to gtscontext
* fix old license header
* improved error creation to not use fmt.Errorf() when not needed
* fix various rebase conflicts, fix account test
* remove commented-out code, fix-up mention caching
* fix mention select bun statement
* ensure mention target account populated, pass in context to customrenderer logging
* remove more uncommented code, fix typeutil test
* add statusfave database model caching
* add status fave cache configuration
* add status fave cache example config
* woops, catch missed error. nice catch linter!
* add back testrig panic on nil db
* update example configuration to match defaults, slight tweak to cache configuration defaults
* update envparsing test with new defaults
* fetch followingget to use the follow target account
* use accounnt.IsLocal() instead of empty domain check
* use constants for the cache visibility type check
* use bun.In() for notification type restriction in db query
* include replies when fetching PublicTimeline() (to account for single-author threads in Visibility{}.StatusPublicTimelineable())
* use bun query building for nested select statements to ensure working with postgres
* update public timeline future status checks to match visibility filter
* same as previous, for home timeline
* update public timeline tests to dynamically check for appropriate statuses
* migrate accounts to allow unique constraint on public_key
* provide minimal account with publicKey
---------
Signed-off-by: kim <grufwub@gmail.com>
Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
2023-03-28 16:03:14 +03:00
|
|
|
|
|
|
|
// don't return statuses more than 24hr in the future
|
|
|
|
maxID, err = id.NewULIDFromTime(time.Now().Add(future))
|
2022-10-29 18:10:28 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-09 19:32:48 +03:00
|
|
|
}
|
|
|
|
|
2022-10-29 18:10:28 +03:00
|
|
|
// return only statuses LOWER (ie., older) than maxID
|
|
|
|
q = q.Where("? < ?", bun.Ident("status.id"), maxID)
|
|
|
|
|
2021-07-09 19:32:48 +03:00
|
|
|
if sinceID != "" {
|
2023-09-29 16:31:10 +03:00
|
|
|
// return only statuses HIGHER (ie., newer) than sinceID
|
2022-10-08 14:50:48 +03:00
|
|
|
q = q.Where("? > ?", bun.Ident("status.id"), sinceID)
|
2021-07-09 19:32:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if minID != "" {
|
2023-09-29 16:31:10 +03:00
|
|
|
// return only statuses HIGHER (ie., newer) than minID
|
2022-10-08 14:50:48 +03:00
|
|
|
q = q.Where("? > ?", bun.Ident("status.id"), minID)
|
2023-09-29 16:31:10 +03:00
|
|
|
|
|
|
|
// page up
|
|
|
|
frontToBack = false
|
2021-07-09 19:32:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if local {
|
2023-09-29 16:31:10 +03:00
|
|
|
// return only statuses posted by local account havers
|
2022-10-08 14:50:48 +03:00
|
|
|
q = q.Where("? = ?", bun.Ident("status.local"), local)
|
2021-07-09 19:32:48 +03:00
|
|
|
}
|
|
|
|
|
2024-08-24 12:49:37 +03:00
|
|
|
// Only include statuses that aren't pending approval.
|
|
|
|
q = q.Where("NOT ? = ?", bun.Ident("status.pending_approval"), true)
|
|
|
|
|
2021-07-09 19:32:48 +03:00
|
|
|
if limit > 0 {
|
2023-09-29 16:31:10 +03:00
|
|
|
// limit amount of statuses returned
|
2021-07-09 19:32:48 +03:00
|
|
|
q = q.Limit(limit)
|
|
|
|
}
|
|
|
|
|
2023-09-29 16:31:10 +03:00
|
|
|
if frontToBack {
|
|
|
|
// Page down.
|
|
|
|
q = q.Order("status.id DESC")
|
|
|
|
} else {
|
|
|
|
// Page up.
|
|
|
|
q = q.Order("status.id ASC")
|
|
|
|
}
|
|
|
|
|
2022-07-10 18:18:21 +03:00
|
|
|
if err := q.Scan(ctx, &statusIDs); err != nil {
|
2023-08-17 19:26:21 +03:00
|
|
|
return nil, err
|
2021-08-29 17:41:41 +03:00
|
|
|
}
|
2022-07-10 18:18:21 +03:00
|
|
|
|
2023-09-29 16:31:10 +03:00
|
|
|
if len(statusIDs) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're paging up, we still want statuses
|
|
|
|
// to be sorted by ID desc, so reverse ids slice.
|
|
|
|
// https://zchee.github.io/golang-wiki/SliceTricks/#reversing
|
|
|
|
if !frontToBack {
|
|
|
|
for l, r := 0, len(statusIDs)-1; l < r; l, r = l+1, r-1 {
|
|
|
|
statusIDs[l], statusIDs[r] = statusIDs[r], statusIDs[l]
|
|
|
|
}
|
|
|
|
}
|
2022-07-10 18:18:21 +03:00
|
|
|
|
2024-01-19 15:57:29 +03:00
|
|
|
// Return status IDs loaded from cache + db.
|
|
|
|
return t.state.DB.GetStatusesByIDs(ctx, statusIDs)
|
2021-07-09 19:32:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO optimize this query and the logic here, because it's slow as balls -- it takes like a literal second to return with a limit of 20!
|
|
|
|
// It might be worth serving it through a timeline instead of raw DB queries, like we do for Home feeds.
|
2023-07-25 11:34:05 +03:00
|
|
|
func (t *timelineDB) GetFavedTimeline(ctx context.Context, accountID string, maxID string, minID string, limit int) ([]*gtsmodel.Status, string, string, error) {
|
2021-08-29 17:41:41 +03:00
|
|
|
// Ensure reasonable
|
|
|
|
if limit < 0 {
|
|
|
|
limit = 0
|
|
|
|
}
|
2021-07-09 19:32:48 +03:00
|
|
|
|
2021-08-29 17:41:41 +03:00
|
|
|
// Make educated guess for slice size
|
|
|
|
faves := make([]*gtsmodel.StatusFave, 0, limit)
|
2021-07-09 19:32:48 +03:00
|
|
|
|
2023-07-25 11:34:05 +03:00
|
|
|
fq := t.db.
|
2021-08-25 16:34:33 +03:00
|
|
|
NewSelect().
|
|
|
|
Model(&faves).
|
2022-10-08 14:50:48 +03:00
|
|
|
Where("? = ?", bun.Ident("status_fave.account_id"), accountID).
|
|
|
|
Order("status_fave.id DESC")
|
2021-07-09 19:32:48 +03:00
|
|
|
|
|
|
|
if maxID != "" {
|
2022-10-08 14:50:48 +03:00
|
|
|
fq = fq.Where("? < ?", bun.Ident("status_fave.id"), maxID)
|
2021-07-09 19:32:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if minID != "" {
|
2022-10-08 14:50:48 +03:00
|
|
|
fq = fq.Where("? > ?", bun.Ident("status_fave.id"), minID)
|
2021-07-09 19:32:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if limit > 0 {
|
|
|
|
fq = fq.Limit(limit)
|
|
|
|
}
|
|
|
|
|
2021-08-25 16:34:33 +03:00
|
|
|
err := fq.Scan(ctx)
|
2021-07-09 19:32:48 +03:00
|
|
|
if err != nil {
|
2023-08-17 19:26:21 +03:00
|
|
|
return nil, "", "", err
|
2021-07-09 19:32:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(faves) == 0 {
|
2021-08-20 13:26:56 +03:00
|
|
|
return nil, "", "", db.ErrNoEntries
|
2021-07-09 19:32:48 +03:00
|
|
|
}
|
|
|
|
|
2022-07-10 18:18:21 +03:00
|
|
|
// Sort by favourite ID rather than status ID
|
2024-01-17 17:54:30 +03:00
|
|
|
slices.SortFunc(faves, func(a, b *gtsmodel.StatusFave) int {
|
|
|
|
const k = -1
|
|
|
|
switch {
|
|
|
|
case a.ID > b.ID:
|
|
|
|
return +k
|
|
|
|
case a.ID < b.ID:
|
|
|
|
return -k
|
|
|
|
default:
|
|
|
|
return 0
|
|
|
|
}
|
2022-07-10 18:18:21 +03:00
|
|
|
})
|
2021-07-09 19:32:48 +03:00
|
|
|
|
2024-01-19 15:57:29 +03:00
|
|
|
// Convert fave IDs to status IDs.
|
|
|
|
statusIDs := make([]string, len(faves))
|
|
|
|
for i, fave := range faves {
|
|
|
|
statusIDs[i] = fave.StatusID
|
|
|
|
}
|
2021-07-09 19:32:48 +03:00
|
|
|
|
2024-01-19 15:57:29 +03:00
|
|
|
statuses, err := t.state.DB.GetStatusesByIDs(ctx, statusIDs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, "", "", err
|
2021-07-09 19:32:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
nextMaxID := faves[len(faves)-1].ID
|
|
|
|
prevMinID := faves[0].ID
|
|
|
|
return statuses, nextMaxID, prevMinID, nil
|
|
|
|
}
|
2023-05-25 11:37:38 +03:00
|
|
|
|
|
|
|
func (t *timelineDB) GetListTimeline(
|
|
|
|
ctx context.Context,
|
|
|
|
listID string,
|
|
|
|
maxID string,
|
|
|
|
sinceID string,
|
|
|
|
minID string,
|
|
|
|
limit int,
|
|
|
|
) ([]*gtsmodel.Status, error) {
|
|
|
|
// Ensure reasonable
|
|
|
|
if limit < 0 {
|
|
|
|
limit = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make educated guess for slice size
|
|
|
|
var (
|
|
|
|
statusIDs = make([]string, 0, limit)
|
|
|
|
frontToBack = true
|
|
|
|
)
|
|
|
|
|
2024-09-16 19:46:09 +03:00
|
|
|
// Fetch all follow IDs contained in list from DB.
|
|
|
|
followIDs, err := t.state.DB.GetFollowIDsInList(
|
|
|
|
ctx, listID, nil,
|
2023-05-25 11:37:38 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
2024-09-16 19:46:09 +03:00
|
|
|
return nil, fmt.Errorf("error getting follows in list: %w", err)
|
2023-05-25 11:37:38 +03:00
|
|
|
}
|
|
|
|
|
2024-09-16 19:46:09 +03:00
|
|
|
// If there's no list follows we can't
|
2024-09-02 19:15:12 +03:00
|
|
|
// possibly return anything for this list.
|
2024-09-16 19:46:09 +03:00
|
|
|
if len(followIDs) == 0 {
|
2024-09-02 19:15:12 +03:00
|
|
|
return make([]*gtsmodel.Status, 0), nil
|
|
|
|
}
|
|
|
|
|
2023-05-25 11:37:38 +03:00
|
|
|
// Select target account IDs from follows.
|
2023-07-25 11:34:05 +03:00
|
|
|
subQ := t.db.
|
2023-05-25 11:37:38 +03:00
|
|
|
NewSelect().
|
|
|
|
TableExpr("? AS ?", bun.Ident("follows"), bun.Ident("follow")).
|
|
|
|
Column("follow.target_account_id").
|
|
|
|
Where("? IN (?)", bun.Ident("follow.id"), bun.In(followIDs))
|
|
|
|
|
|
|
|
// Select only status IDs created
|
|
|
|
// by one of the followed accounts.
|
2023-07-25 11:34:05 +03:00
|
|
|
q := t.db.
|
2023-05-25 11:37:38 +03:00
|
|
|
NewSelect().
|
|
|
|
TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
|
|
|
|
// Select only IDs from table
|
|
|
|
Column("status.id").
|
|
|
|
Where("? IN (?)", bun.Ident("status.account_id"), subQ)
|
|
|
|
|
|
|
|
if maxID == "" || maxID >= id.Highest {
|
|
|
|
const future = 24 * time.Hour
|
|
|
|
|
|
|
|
var err error
|
|
|
|
|
|
|
|
// don't return statuses more than 24hr in the future
|
|
|
|
maxID, err = id.NewULIDFromTime(time.Now().Add(future))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// return only statuses LOWER (ie., older) than maxID
|
|
|
|
q = q.Where("? < ?", bun.Ident("status.id"), maxID)
|
|
|
|
|
|
|
|
if sinceID != "" {
|
|
|
|
// return only statuses HIGHER (ie., newer) than sinceID
|
|
|
|
q = q.Where("? > ?", bun.Ident("status.id"), sinceID)
|
|
|
|
}
|
|
|
|
|
|
|
|
if minID != "" {
|
|
|
|
// return only statuses HIGHER (ie., newer) than minID
|
|
|
|
q = q.Where("? > ?", bun.Ident("status.id"), minID)
|
|
|
|
|
|
|
|
// page up
|
|
|
|
frontToBack = false
|
|
|
|
}
|
|
|
|
|
2024-08-24 12:49:37 +03:00
|
|
|
// Only include statuses that aren't pending approval.
|
|
|
|
q = q.Where("NOT ? = ?", bun.Ident("status.pending_approval"), true)
|
|
|
|
|
2023-05-25 11:37:38 +03:00
|
|
|
if limit > 0 {
|
|
|
|
// limit amount of statuses returned
|
|
|
|
q = q.Limit(limit)
|
|
|
|
}
|
|
|
|
|
|
|
|
if frontToBack {
|
|
|
|
// Page down.
|
|
|
|
q = q.Order("status.id DESC")
|
|
|
|
} else {
|
|
|
|
// Page up.
|
|
|
|
q = q.Order("status.id ASC")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := q.Scan(ctx, &statusIDs); err != nil {
|
2023-08-17 19:26:21 +03:00
|
|
|
return nil, err
|
2023-05-25 11:37:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(statusIDs) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're paging up, we still want statuses
|
|
|
|
// to be sorted by ID desc, so reverse ids slice.
|
|
|
|
// https://zchee.github.io/golang-wiki/SliceTricks/#reversing
|
|
|
|
if !frontToBack {
|
|
|
|
for l, r := 0, len(statusIDs)-1; l < r; l, r = l+1, r-1 {
|
|
|
|
statusIDs[l], statusIDs[r] = statusIDs[r], statusIDs[l]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-19 15:57:29 +03:00
|
|
|
// Return status IDs loaded from cache + db.
|
|
|
|
return t.state.DB.GetStatusesByIDs(ctx, statusIDs)
|
2023-05-25 11:37:38 +03:00
|
|
|
}
|
2023-07-31 16:47:35 +03:00
|
|
|
|
|
|
|
func (t *timelineDB) GetTagTimeline(
|
|
|
|
ctx context.Context,
|
|
|
|
tagID string,
|
|
|
|
maxID string,
|
|
|
|
sinceID string,
|
|
|
|
minID string,
|
|
|
|
limit int,
|
|
|
|
) ([]*gtsmodel.Status, error) {
|
|
|
|
// Ensure reasonable
|
|
|
|
if limit < 0 {
|
|
|
|
limit = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make educated guess for slice size
|
|
|
|
var (
|
|
|
|
statusIDs = make([]string, 0, limit)
|
|
|
|
frontToBack = true
|
|
|
|
)
|
|
|
|
|
|
|
|
q := t.db.
|
|
|
|
NewSelect().
|
|
|
|
TableExpr("? AS ?", bun.Ident("status_to_tags"), bun.Ident("status_to_tag")).
|
|
|
|
Column("status_to_tag.status_id").
|
|
|
|
// Join with statuses for filtering.
|
|
|
|
Join(
|
|
|
|
"INNER JOIN ? AS ? ON ? = ?",
|
|
|
|
bun.Ident("statuses"), bun.Ident("status"),
|
|
|
|
bun.Ident("status.id"), bun.Ident("status_to_tag.status_id"),
|
|
|
|
).
|
|
|
|
// Public only.
|
|
|
|
Where("? = ?", bun.Ident("status.visibility"), gtsmodel.VisibilityPublic).
|
|
|
|
// This tag only.
|
|
|
|
Where("? = ?", bun.Ident("status_to_tag.tag_id"), tagID)
|
|
|
|
|
|
|
|
if maxID == "" || maxID >= id.Highest {
|
|
|
|
const future = 24 * time.Hour
|
|
|
|
|
|
|
|
var err error
|
|
|
|
|
|
|
|
// don't return statuses more than 24hr in the future
|
|
|
|
maxID, err = id.NewULIDFromTime(time.Now().Add(future))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// return only statuses LOWER (ie., older) than maxID
|
|
|
|
q = q.Where("? < ?", bun.Ident("status_to_tag.status_id"), maxID)
|
|
|
|
|
|
|
|
if sinceID != "" {
|
|
|
|
// return only statuses HIGHER (ie., newer) than sinceID
|
|
|
|
q = q.Where("? > ?", bun.Ident("status_to_tag.status_id"), sinceID)
|
|
|
|
}
|
|
|
|
|
|
|
|
if minID != "" {
|
|
|
|
// return only statuses HIGHER (ie., newer) than minID
|
|
|
|
q = q.Where("? > ?", bun.Ident("status_to_tag.status_id"), minID)
|
|
|
|
|
|
|
|
// page up
|
|
|
|
frontToBack = false
|
|
|
|
}
|
|
|
|
|
2024-08-24 12:49:37 +03:00
|
|
|
// Only include statuses that aren't pending approval.
|
|
|
|
q = q.Where("NOT ? = ?", bun.Ident("status.pending_approval"), true)
|
|
|
|
|
2023-07-31 16:47:35 +03:00
|
|
|
if limit > 0 {
|
|
|
|
// limit amount of statuses returned
|
|
|
|
q = q.Limit(limit)
|
|
|
|
}
|
|
|
|
|
|
|
|
if frontToBack {
|
|
|
|
// Page down.
|
|
|
|
q = q.Order("status_to_tag.status_id DESC")
|
|
|
|
} else {
|
|
|
|
// Page up.
|
|
|
|
q = q.Order("status_to_tag.status_id ASC")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := q.Scan(ctx, &statusIDs); err != nil {
|
2023-08-17 19:26:21 +03:00
|
|
|
return nil, err
|
2023-07-31 16:47:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(statusIDs) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're paging up, we still want statuses
|
|
|
|
// to be sorted by ID desc, so reverse ids slice.
|
|
|
|
// https://zchee.github.io/golang-wiki/SliceTricks/#reversing
|
|
|
|
if !frontToBack {
|
|
|
|
for l, r := 0, len(statusIDs)-1; l < r; l, r = l+1, r-1 {
|
|
|
|
statusIDs[l], statusIDs[r] = statusIDs[r], statusIDs[l]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-19 15:57:29 +03:00
|
|
|
// Return status IDs loaded from cache + db.
|
|
|
|
return t.state.DB.GetStatusesByIDs(ctx, statusIDs)
|
2023-07-31 16:47:35 +03:00
|
|
|
}
|