mirror of
https://codeberg.org/superseriousbusiness/gotosocial.git
synced 2024-12-24 09:58:17 +03:00
00adf18c24
* add automatic cache max size generation based on ratios of a singular fixed memory target Signed-off-by: kim <grufwub@gmail.com> * remove now-unused cache max-size config variables Signed-off-by: kim <grufwub@gmail.com> * slight ratio tweak Signed-off-by: kim <grufwub@gmail.com> * remove unused visibility config var Signed-off-by: kim <grufwub@gmail.com> * add secret little ratio config trick Signed-off-by: kim <grufwub@gmail.com> * fixed a word Signed-off-by: kim <grufwub@gmail.com> * update cache library to remove use of TTL in result caches + slice cache Signed-off-by: kim <grufwub@gmail.com> * update other cache usages to use correct interface Signed-off-by: kim <grufwub@gmail.com> * update example config to explain the cache memory target Signed-off-by: kim <grufwub@gmail.com> * update env parsing test with new config values Signed-off-by: kim <grufwub@gmail.com> * do some ratio twiddling Signed-off-by: kim <grufwub@gmail.com> * add missing header * update envparsing with latest defaults Signed-off-by: kim <grufwub@gmail.com> * update size calculations to take into account result cache, simple cache and extra map overheads Signed-off-by: kim <grufwub@gmail.com> * tweak the ratios some more Signed-off-by: kim <grufwub@gmail.com> * more nan rampaging Signed-off-by: kim <grufwub@gmail.com> * fix envparsing script Signed-off-by: kim <grufwub@gmail.com> * update cache library, add sweep function to keep caches trim Signed-off-by: kim <grufwub@gmail.com> * sweep caches once a minute Signed-off-by: kim <grufwub@gmail.com> * add a regular job to sweep caches and keep under 80% utilisation Signed-off-by: kim <grufwub@gmail.com> * remove dead code Signed-off-by: kim <grufwub@gmail.com> * add new size library used to libraries section of readme Signed-off-by: kim <grufwub@gmail.com> * add better explanations for the mem-ratio numbers Signed-off-by: kim <grufwub@gmail.com> * update go-cache Signed-off-by: kim <grufwub@gmail.com> * library version bump Signed-off-by: kim <grufwub@gmail.com> * update cache.result{} size model estimation Signed-off-by: kim <grufwub@gmail.com> --------- Signed-off-by: kim <grufwub@gmail.com>
142 lines
3.4 KiB
Go
142 lines
3.4 KiB
Go
// Package size implements run-time calculation of size of the variable.
|
|
// Source code is based on "binary.Size()" function from Go standard library.
|
|
// size.Of() omits size of slices, arrays and maps containers itself (24, 24 and 8 bytes).
|
|
// When counting maps separate calculations are done for keys and values.
|
|
package size
|
|
|
|
import (
|
|
"reflect"
|
|
"unsafe"
|
|
)
|
|
|
|
// Of returns the size of 'v' in bytes.
|
|
// If there is an error during calculation, Of returns -1.
|
|
func Of(v interface{}) int {
|
|
// Cache with every visited pointer so we don't count two pointers
|
|
// to the same memory twice.
|
|
cache := make(map[uintptr]bool)
|
|
return sizeOf(reflect.Indirect(reflect.ValueOf(v)), cache)
|
|
}
|
|
|
|
// sizeOf returns the number of bytes the actual data represented by v occupies in memory.
|
|
// If there is an error, sizeOf returns -1.
|
|
func sizeOf(v reflect.Value, cache map[uintptr]bool) int {
|
|
switch v.Kind() {
|
|
|
|
case reflect.Array:
|
|
sum := 0
|
|
for i := 0; i < v.Len(); i++ {
|
|
s := sizeOf(v.Index(i), cache)
|
|
if s < 0 {
|
|
return -1
|
|
}
|
|
sum += s
|
|
}
|
|
|
|
return sum + (v.Cap()-v.Len())*int(v.Type().Elem().Size())
|
|
|
|
case reflect.Slice:
|
|
// return 0 if this node has been visited already
|
|
if cache[v.Pointer()] {
|
|
return 0
|
|
}
|
|
cache[v.Pointer()] = true
|
|
|
|
sum := 0
|
|
for i := 0; i < v.Len(); i++ {
|
|
s := sizeOf(v.Index(i), cache)
|
|
if s < 0 {
|
|
return -1
|
|
}
|
|
sum += s
|
|
}
|
|
|
|
sum += (v.Cap() - v.Len()) * int(v.Type().Elem().Size())
|
|
|
|
return sum + int(v.Type().Size())
|
|
|
|
case reflect.Struct:
|
|
sum := 0
|
|
for i, n := 0, v.NumField(); i < n; i++ {
|
|
s := sizeOf(v.Field(i), cache)
|
|
if s < 0 {
|
|
return -1
|
|
}
|
|
sum += s
|
|
}
|
|
|
|
// Look for struct padding.
|
|
padding := int(v.Type().Size())
|
|
for i, n := 0, v.NumField(); i < n; i++ {
|
|
padding -= int(v.Field(i).Type().Size())
|
|
}
|
|
|
|
return sum + padding
|
|
|
|
case reflect.String:
|
|
s := v.String()
|
|
hdr := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
|
if cache[hdr.Data] {
|
|
return int(v.Type().Size())
|
|
}
|
|
cache[hdr.Data] = true
|
|
return len(s) + int(v.Type().Size())
|
|
|
|
case reflect.Ptr:
|
|
// return Ptr size if this node has been visited already (infinite recursion)
|
|
if cache[v.Pointer()] {
|
|
return int(v.Type().Size())
|
|
}
|
|
cache[v.Pointer()] = true
|
|
if v.IsNil() {
|
|
return int(reflect.New(v.Type()).Type().Size())
|
|
}
|
|
s := sizeOf(reflect.Indirect(v), cache)
|
|
if s < 0 {
|
|
return -1
|
|
}
|
|
return s + int(v.Type().Size())
|
|
|
|
case reflect.Bool,
|
|
reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
|
|
reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
|
reflect.Int, reflect.Uint,
|
|
reflect.Chan,
|
|
reflect.Uintptr,
|
|
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128,
|
|
reflect.Func:
|
|
return int(v.Type().Size())
|
|
|
|
case reflect.Map:
|
|
// return 0 if this node has been visited already (infinite recursion)
|
|
if cache[v.Pointer()] {
|
|
return 0
|
|
}
|
|
cache[v.Pointer()] = true
|
|
sum := 0
|
|
keys := v.MapKeys()
|
|
for i := range keys {
|
|
val := v.MapIndex(keys[i])
|
|
// calculate size of key and value separately
|
|
sv := sizeOf(val, cache)
|
|
if sv < 0 {
|
|
return -1
|
|
}
|
|
sum += sv
|
|
sk := sizeOf(keys[i], cache)
|
|
if sk < 0 {
|
|
return -1
|
|
}
|
|
sum += sk
|
|
}
|
|
// Include overhead due to unused map buckets. 10.79 comes
|
|
// from https://golang.org/src/runtime/map.go.
|
|
return sum + int(v.Type().Size()) + int(float64(len(keys))*10.79)
|
|
|
|
case reflect.Interface:
|
|
return sizeOf(v.Elem(), cache) + int(v.Type().Size())
|
|
|
|
}
|
|
|
|
return -1
|
|
}
|