[chore] update go-structr and go-mangler to no longer rely on modern-go/reflect2 (#3026)

* updates go-structr and go-mangler to no longer rely on modern-go/reflect2 (*phew* now we're go1.23 safe)

* update go-structr version

* bump go-structr to improve memory usage (v. slightly) in certain conditions
This commit is contained in:
kim 2024-06-21 15:43:17 +00:00 committed by GitHub
parent 7b1ccbd65a
commit b93087ceb4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 346 additions and 490 deletions

4
go.mod
View file

@ -21,7 +21,7 @@ require (
codeberg.org/gruf/go-runners v1.6.2 codeberg.org/gruf/go-runners v1.6.2
codeberg.org/gruf/go-sched v1.2.3 codeberg.org/gruf/go-sched v1.2.3
codeberg.org/gruf/go-storage v0.1.1 codeberg.org/gruf/go-storage v0.1.1
codeberg.org/gruf/go-structr v0.8.5 codeberg.org/gruf/go-structr v0.8.7
codeberg.org/superseriousbusiness/exif-terminator v0.7.0 codeberg.org/superseriousbusiness/exif-terminator v0.7.0
github.com/DmitriyVTitov/size v1.5.0 github.com/DmitriyVTitov/size v1.5.0
github.com/KimMachineGun/automemlimit v0.6.1 github.com/KimMachineGun/automemlimit v0.6.1
@ -88,7 +88,7 @@ require (
codeberg.org/gruf/go-atomics v1.1.0 // indirect codeberg.org/gruf/go-atomics v1.1.0 // indirect
codeberg.org/gruf/go-bitutil v1.1.0 // indirect codeberg.org/gruf/go-bitutil v1.1.0 // indirect
codeberg.org/gruf/go-fastpath/v2 v2.0.0 // indirect codeberg.org/gruf/go-fastpath/v2 v2.0.0 // indirect
codeberg.org/gruf/go-mangler v1.3.0 // indirect codeberg.org/gruf/go-mangler v1.4.0 // indirect
codeberg.org/gruf/go-maps v1.0.3 // indirect codeberg.org/gruf/go-maps v1.0.3 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Masterminds/semver/v3 v3.2.1 // indirect

8
go.sum
View file

@ -62,8 +62,8 @@ codeberg.org/gruf/go-logger/v2 v2.2.1 h1:RP2u059EQKTBFV3cN8X6xDxNk2RkzqdgXGKflKq
codeberg.org/gruf/go-logger/v2 v2.2.1/go.mod h1:m/vBfG5jNUmYXI8Hg9aVSk7Pn8YgEBITQB/B/CzdRss= codeberg.org/gruf/go-logger/v2 v2.2.1/go.mod h1:m/vBfG5jNUmYXI8Hg9aVSk7Pn8YgEBITQB/B/CzdRss=
codeberg.org/gruf/go-loosy v0.0.0-20231007123304-bb910d1ab5c4 h1:IXwfoU7f2whT6+JKIKskNl/hBlmWmnF1vZd84Eb3cyA= codeberg.org/gruf/go-loosy v0.0.0-20231007123304-bb910d1ab5c4 h1:IXwfoU7f2whT6+JKIKskNl/hBlmWmnF1vZd84Eb3cyA=
codeberg.org/gruf/go-loosy v0.0.0-20231007123304-bb910d1ab5c4/go.mod h1:fiO8HE1wjZCephcYmRRsVnNI/i0+mhy44Z5dQalS0rM= codeberg.org/gruf/go-loosy v0.0.0-20231007123304-bb910d1ab5c4/go.mod h1:fiO8HE1wjZCephcYmRRsVnNI/i0+mhy44Z5dQalS0rM=
codeberg.org/gruf/go-mangler v1.3.0 h1:cf0vuuLJuEhoIukPHj+MUBIQSWxZcfEYt2Eo/r7Rstk= codeberg.org/gruf/go-mangler v1.4.0 h1:yOQMygLgCnU0ERt1JDAtv/LsjDwJtAdRpwhm648rA/E=
codeberg.org/gruf/go-mangler v1.3.0/go.mod h1:jnOA76AQoaO2kTHi0DlTTVaFYfRM+9fzs8f4XO6MsOk= codeberg.org/gruf/go-mangler v1.4.0/go.mod h1:TVbrburPF+UjuRSwxH1tHP3pZZXzdyJJO8+PToTEiKg=
codeberg.org/gruf/go-maps v1.0.3 h1:VDwhnnaVNUIy5O93CvkcE2IZXnMB1+IJjzfop9V12es= codeberg.org/gruf/go-maps v1.0.3 h1:VDwhnnaVNUIy5O93CvkcE2IZXnMB1+IJjzfop9V12es=
codeberg.org/gruf/go-maps v1.0.3/go.mod h1:D5LNDxlC9rsDuVQVM6JObaVGAdHB6g2dTdOdkh1aXWA= codeberg.org/gruf/go-maps v1.0.3/go.mod h1:D5LNDxlC9rsDuVQVM6JObaVGAdHB6g2dTdOdkh1aXWA=
codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760 h1:m2/UCRXhjDwAg4vyji6iKCpomKw6P4PmBOUi5DvAMH4= codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760 h1:m2/UCRXhjDwAg4vyji6iKCpomKw6P4PmBOUi5DvAMH4=
@ -76,8 +76,8 @@ codeberg.org/gruf/go-sched v1.2.3 h1:H5ViDxxzOBR3uIyGBCf0eH8b1L8wMybOXcdtUUTXZHk
codeberg.org/gruf/go-sched v1.2.3/go.mod h1:vT9uB6KWFIIwnG9vcPY2a0alYNoqdL1mSzRM8I+PK7A= codeberg.org/gruf/go-sched v1.2.3/go.mod h1:vT9uB6KWFIIwnG9vcPY2a0alYNoqdL1mSzRM8I+PK7A=
codeberg.org/gruf/go-storage v0.1.1 h1:CSX1PMMg/7vqqK8aCFtq94xCrOB3xhj7eWIvzILdLpY= codeberg.org/gruf/go-storage v0.1.1 h1:CSX1PMMg/7vqqK8aCFtq94xCrOB3xhj7eWIvzILdLpY=
codeberg.org/gruf/go-storage v0.1.1/go.mod h1:145IWMUOc6YpIiZIiCIEwkkNZZPiSbwMnZxRjSc5q6c= codeberg.org/gruf/go-storage v0.1.1/go.mod h1:145IWMUOc6YpIiZIiCIEwkkNZZPiSbwMnZxRjSc5q6c=
codeberg.org/gruf/go-structr v0.8.5 h1:WQuvLSQFyFwMjdU7dCWvgcjuhk07oWdSl9guShekzGQ= codeberg.org/gruf/go-structr v0.8.7 h1:agYCI6tSXU4JHVYPwZk3Og5rrBePNVv5iPWsDu7ZJIw=
codeberg.org/gruf/go-structr v0.8.5/go.mod h1:c5UvVDSA3lZ1kv05V+7pXkO8u8Jea+VRWFDRFBCOxSA= codeberg.org/gruf/go-structr v0.8.7/go.mod h1:O0FTNgzUnUKwWey4dEW99QD8rPezKPi5sxCVxYOJ1Fg=
codeberg.org/superseriousbusiness/exif-terminator v0.7.0 h1:Y6VApSXhKqExG0H2hZ2JelRK4xmWdjDQjn13CpEfzko= codeberg.org/superseriousbusiness/exif-terminator v0.7.0 h1:Y6VApSXhKqExG0H2hZ2JelRK4xmWdjDQjn13CpEfzko=
codeberg.org/superseriousbusiness/exif-terminator v0.7.0/go.mod h1:gCWKduudUWFzsnixoMzu0FYVdxHWG+AbXnZ50DqxsUE= codeberg.org/superseriousbusiness/exif-terminator v0.7.0/go.mod h1:gCWKduudUWFzsnixoMzu0FYVdxHWG+AbXnZ50DqxsUE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=

View file

@ -2,13 +2,13 @@
[Documentation](https://pkg.go.dev/codeberg.org/gruf/go-mangler). [Documentation](https://pkg.go.dev/codeberg.org/gruf/go-mangler).
To put it simply is a bit of an odd library. It aims to provide incredibly fast, unique string outputs for all default supported input data types during a given runtime instance. To put it simply is a bit of an odd library. It aims to provide incredibly fast, unique string outputs for all default supported input data types during a given runtime instance. See `mangler.String()`for supported types.
It is useful, for example, for use as part of larger abstractions involving hashmaps. That was my particular usecase anyways... It is useful, for example, for use as part of larger abstractions involving hashmaps. That was my particular usecase anyways...
This package does make liberal use of the "unsafe" package. This package does make liberal use of the "unsafe" package.
Benchmarks are below. Those with missing values panicked during our set of benchmarks, usually a case of not handling nil values elegantly. Please note the more important thing to notice here is the relative difference in benchmark scores, the actual `ns/op`,`B/op`,`allocs/op` accounts for running through over 80 possible test cases, including some not-ideal situations. Benchmarks are below. Please note the more important thing to notice here is the relative difference in benchmark scores, the actual `ns/op`,`B/op`,`allocs/op` accounts for running through ~80 possible test cases, including some not-ideal situations.
The choice of libraries in the benchmark are just a selection of libraries that could be used in a similar manner to this one, i.e. serializing in some manner. The choice of libraries in the benchmark are just a selection of libraries that could be used in a similar manner to this one, i.e. serializing in some manner.
@ -18,24 +18,14 @@ goos: linux
goarch: amd64 goarch: amd64
pkg: codeberg.org/gruf/go-mangler pkg: codeberg.org/gruf/go-mangler
cpu: 11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz cpu: 11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz
BenchmarkMangle BenchmarkMangle-8 1278526 966.0 ns/op 0 B/op 0 allocs/op
BenchmarkMangle-8 877761 1323 ns/op 0 B/op 0 allocs/op BenchmarkMangleKnown-8 3443587 345.9 ns/op 0 B/op 0 allocs/op
BenchmarkMangleKnown BenchmarkJSON-8 228962 4717 ns/op 1849 B/op 99 allocs/op
BenchmarkMangleKnown-8 1462954 814.5 ns/op 0 B/op 0 allocs/op BenchmarkLoosy-8 307194 3447 ns/op 776 B/op 65 allocs/op
BenchmarkJSON BenchmarkFmt-8 150254 7405 ns/op 1377 B/op 143 allocs/op
BenchmarkJSON-8 199930 5910 ns/op 2698 B/op 119 allocs/op BenchmarkFxmackerCbor-8 364411 3037 ns/op 1224 B/op 105 allocs/op
BenchmarkLoosy BenchmarkMitchellhHashStructure-8 102272 11268 ns/op 8996 B/op 1000 allocs/op
BenchmarkLoosy-8 307575 3718 ns/op 664 B/op 53 allocs/op BenchmarkCnfStructhash-8 6789 168703 ns/op 288301 B/op 5779 allocs/op
BenchmarkBinary
BenchmarkBinary-8 413216 2640 ns/op 3824 B/op 116 allocs/op
BenchmarkFmt
BenchmarkFmt-8 133429 8568 ns/op 3010 B/op 207 allocs/op
BenchmarkFxmackerCbor
BenchmarkFxmackerCbor-8 258562 4268 ns/op 2118 B/op 134 allocs/op
BenchmarkMitchellhHashStructure
BenchmarkMitchellhHashStructure-8 88941 13049 ns/op 10269 B/op 1096 allocs/op
BenchmarkCnfStructhash
BenchmarkCnfStructhash-8 5586 179537 ns/op 290373 B/op 5863 allocs/op
PASS PASS
ok codeberg.org/gruf/go-mangler 12.469s ok codeberg.org/gruf/go-mangler 11.715s
``` ```

View file

@ -3,16 +3,6 @@ package mangler
import ( import (
"reflect" "reflect"
"unsafe" "unsafe"
"github.com/modern-go/reflect2"
)
type (
byteser interface{ Bytes() []byte }
stringer interface{ String() string }
binarymarshaler interface{ MarshalBinary() ([]byte, error) }
textmarshaler interface{ MarshalText() ([]byte, error) }
jsonmarshaler interface{ MarshalJSON() ([]byte, error) }
) )
func append_uint16(b []byte, u uint16) []byte { func append_uint16(b []byte, u uint16) []byte {
@ -44,21 +34,28 @@ func append_uint64(b []byte, u uint64) []byte {
) )
} }
func deref_ptr_mangler(rtype reflect.Type, mangle Mangler, count int) Mangler { type typecontext struct {
if rtype == nil || mangle == nil || count == 0 { ntype reflect.Type
rtype reflect.Type
}
func deref_ptr_mangler(ctx typecontext, mangle Mangler, n uint) Mangler {
if mangle == nil || n == 0 {
panic("bad input") panic("bad input")
} }
// Get reflect2's type for later // Non-nested value types,
// unsafe interface data repacking, // i.e. just direct ptrs to
type2 := reflect2.Type2(rtype) // primitives require one
// less dereference to ptr.
if ctx.ntype == nil {
n--
}
return func(buf []byte, value any) []byte { return func(buf []byte, ptr unsafe.Pointer) []byte {
// Get raw value data.
ptr := eface_data(value)
// Deref n - 1 number times. // Deref n number times.
for i := 0; i < count-1; i++ { for i := n; i > 0; i-- {
if ptr == nil { if ptr == nil {
// Check for nil values // Check for nil values
@ -72,38 +69,63 @@ func deref_ptr_mangler(rtype reflect.Type, mangle Mangler, count int) Mangler {
} }
if ptr == nil { if ptr == nil {
// Final nil value check. // Check for nil values
buf = append(buf, '0') buf = append(buf, '0')
return buf return buf
} }
// Repack and mangle fully deref'd // Mangle fully deref'd
value = type2.UnsafeIndirect(ptr)
buf = append(buf, '1') buf = append(buf, '1')
return mangle(buf, value) buf = mangle(buf, ptr)
return buf
} }
} }
func iter_slice_mangler(rtype reflect.Type, mangle Mangler) Mangler { func iter_slice_mangler(ctx typecontext, mangle Mangler) Mangler {
if rtype == nil || mangle == nil { if ctx.rtype == nil || mangle == nil {
panic("bad input") panic("bad input")
} }
// Get reflect2's type for later // memory size of elem.
// unsafe slice data manipulation. esz := ctx.rtype.Size()
slice2 := reflect2.Type2(rtype).(*reflect2.UnsafeSliceType)
return func(buf []byte, value any) []byte { return func(buf []byte, ptr unsafe.Pointer) []byte {
// Get raw value data. // Get data as slice hdr.
ptr := eface_data(value) hdr := (*slice_header)(ptr)
// Get length of slice value. for i := 0; i < hdr.len; i++ {
n := slice2.UnsafeLengthOf(ptr) // Mangle data at slice index.
eptr := array_at(hdr.data, esz, i)
buf = mangle(buf, eptr)
buf = append(buf, ',')
}
if hdr.len > 0 {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}
func iter_array_mangler(ctx typecontext, mangle Mangler) Mangler {
if ctx.rtype == nil || mangle == nil {
panic("bad input")
}
// no. array elements.
n := ctx.ntype.Len()
// memory size of elem.
esz := ctx.rtype.Size()
return func(buf []byte, ptr unsafe.Pointer) []byte {
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
// Mangle data at each slice index. // Mangle data at array index.
e := slice2.UnsafeGetIndex(ptr, i) offset := esz * uintptr(i)
buf = mangle(buf, e) eptr := add(ptr, offset)
buf = mangle(buf, eptr)
buf = append(buf, ',') buf = append(buf, ',')
} }
@ -116,118 +138,34 @@ func iter_slice_mangler(rtype reflect.Type, mangle Mangler) Mangler {
} }
} }
func iter_array_mangler(rtype reflect.Type, mangle Mangler) Mangler { func iter_struct_mangler(ctx typecontext, manglers []Mangler) Mangler {
if rtype == nil || mangle == nil { if ctx.rtype == nil || len(manglers) != ctx.rtype.NumField() {
panic("bad input")
}
// Get reflect2's type for later
// unsafe slice data manipulation.
array2 := reflect2.Type2(rtype).(*reflect2.UnsafeArrayType)
n := array2.Len()
return func(buf []byte, value any) []byte {
// Get raw value data.
ptr := eface_data(value)
for i := 0; i < n; i++ {
// Mangle data at each slice index.
e := array2.UnsafeGetIndex(ptr, i)
buf = mangle(buf, e)
buf = append(buf, ',')
}
if n > 0 {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}
func iter_map_mangler(rtype reflect.Type, kmangle, emangle Mangler) Mangler {
if rtype == nil || kmangle == nil || emangle == nil {
panic("bad input")
}
// Get reflect2's type for later
// unsafe map data manipulation.
map2 := reflect2.Type2(rtype).(*reflect2.UnsafeMapType)
key2, elem2 := map2.Key(), map2.Elem()
return func(buf []byte, value any) []byte {
// Get raw value data.
ptr := eface_data(value)
ptr = indirect_ptr(ptr)
// Create iterator for map value.
iter := map2.UnsafeIterate(ptr)
// Check if empty map.
empty := !iter.HasNext()
for iter.HasNext() {
// Get key + elem data as ifaces.
kptr, eptr := iter.UnsafeNext()
key := key2.UnsafeIndirect(kptr)
elem := elem2.UnsafeIndirect(eptr)
// Mangle data for key + elem.
buf = kmangle(buf, key)
buf = append(buf, ':')
buf = emangle(buf, elem)
buf = append(buf, ',')
}
if !empty {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}
func iter_struct_mangler(rtype reflect.Type, manglers []Mangler) Mangler {
if rtype == nil || len(manglers) != rtype.NumField() {
panic("bad input") panic("bad input")
} }
type field struct { type field struct {
type2 reflect2.Type offset uintptr
field *reflect2.UnsafeStructField
mangle Mangler mangle Mangler
} }
// Get reflect2's type for later
// unsafe struct field data access.
struct2 := reflect2.Type2(rtype).(*reflect2.UnsafeStructType)
// Bundle together the fields and manglers. // Bundle together the fields and manglers.
fields := make([]field, rtype.NumField()) fields := make([]field, ctx.rtype.NumField())
for i := range fields { for i := range fields {
fields[i].field = struct2.Field(i).(*reflect2.UnsafeStructField) rfield := ctx.rtype.FieldByIndex([]int{i})
fields[i].type2 = fields[i].field.Type() fields[i].offset = rfield.Offset
fields[i].mangle = manglers[i] fields[i].mangle = manglers[i]
if fields[i].type2 == nil || if fields[i].mangle == nil {
fields[i].field == nil ||
fields[i].mangle == nil {
panic("bad input") panic("bad input")
} }
} }
return func(buf []byte, value any) []byte { return func(buf []byte, ptr unsafe.Pointer) []byte {
// Get raw value data.
ptr := eface_data(value)
for i := range fields { for i := range fields {
// Get struct field as iface via offset. // Get struct field ptr via offset.
fptr := fields[i].field.UnsafeGet(ptr) fptr := add(ptr, fields[i].offset)
field := fields[i].type2.UnsafeIndirect(fptr)
// Mangle the struct field data. // Mangle the struct field data.
buf = fields[i].mangle(buf, field) buf = fields[i].mangle(buf, fptr)
buf = append(buf, ',') buf = append(buf, ',')
} }
@ -240,8 +178,20 @@ func iter_struct_mangler(rtype reflect.Type, manglers []Mangler) Mangler {
} }
} }
func indirect_ptr(p unsafe.Pointer) unsafe.Pointer { // array_at returns ptr to index in array at ptr, given element size.
return unsafe.Pointer(&p) func array_at(ptr unsafe.Pointer, esz uintptr, i int) unsafe.Pointer {
return unsafe.Pointer(uintptr(ptr) + esz*uintptr(i))
}
// add returns the ptr addition of starting ptr and a delta.
func add(ptr unsafe.Pointer, delta uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(ptr) + delta)
}
type slice_header struct {
data unsafe.Pointer
len int
cap int
} }
func eface_data(a any) unsafe.Pointer { func eface_data(a any) unsafe.Pointer {

View file

@ -6,9 +6,11 @@ import (
// loadMangler is the top-most Mangler load function. It guarantees that a Mangler // loadMangler is the top-most Mangler load function. It guarantees that a Mangler
// function will be returned for given value interface{} and reflected type. Else panics. // function will be returned for given value interface{} and reflected type. Else panics.
func loadMangler(a any, t reflect.Type) Mangler { func loadMangler(t reflect.Type) Mangler {
ctx := typecontext{rtype: t}
// Load mangler fn // Load mangler fn
mng := load(a, t) mng := load(ctx)
if mng != nil { if mng != nil {
return mng return mng
} }
@ -19,31 +21,14 @@ func loadMangler(a any, t reflect.Type) Mangler {
// load will load a Mangler or reflect Mangler for given type and iface 'a'. // load will load a Mangler or reflect Mangler for given type and iface 'a'.
// Note: allocates new interface value if nil provided, i.e. if coming via reflection. // Note: allocates new interface value if nil provided, i.e. if coming via reflection.
func load(a any, t reflect.Type) Mangler { func load(ctx typecontext) Mangler {
if t == nil { if ctx.rtype == nil {
// There is no reflect type to search by // There is no reflect type to search by
panic("cannot mangle nil interface{} type") panic("cannot mangle nil interface{} type")
} }
if a == nil { // Search by reflection.
// Alloc new iface instance mng := loadReflect(ctx)
v := reflect.New(t).Elem()
a = v.Interface()
}
// Check for Mangled implementation.
if _, ok := a.(Mangled); ok {
return mangle_mangled
}
// Search mangler by reflection.
mng := loadReflect(t)
if mng != nil {
return mng
}
// Prefer iface mangler.
mng = loadIface(a)
if mng != nil { if mng != nil {
return mng return mng
} }
@ -51,46 +36,24 @@ func load(a any, t reflect.Type) Mangler {
return nil return nil
} }
// loadIface is used as a near-last-resort interface{} type switch
// loader for types implementating other known (slower) functions.
func loadIface(a any) Mangler {
switch a.(type) {
case binarymarshaler:
return mangle_binary
case byteser:
return mangle_byteser
case stringer:
return mangle_stringer
case textmarshaler:
return mangle_text
case jsonmarshaler:
return mangle_json
default:
return nil
}
}
// loadReflect will load a Mangler (or rMangler) function for the given reflected type info. // loadReflect will load a Mangler (or rMangler) function for the given reflected type info.
// NOTE: this is used as the top level load function for nested reflective searches. // NOTE: this is used as the top level load function for nested reflective searches.
func loadReflect(t reflect.Type) Mangler { func loadReflect(ctx typecontext) Mangler {
switch t.Kind() { switch ctx.rtype.Kind() {
case reflect.Pointer: case reflect.Pointer:
return loadReflectPtr(t) return loadReflectPtr(ctx)
case reflect.String: case reflect.String:
return mangle_string return mangle_string
case reflect.Struct: case reflect.Struct:
return loadReflectStruct(t) return loadReflectStruct(ctx)
case reflect.Array: case reflect.Array:
return loadReflectArray(t) return loadReflectArray(ctx)
case reflect.Slice: case reflect.Slice:
return loadReflectSlice(t) return loadReflectSlice(ctx)
case reflect.Map:
return loadReflectMap(t)
case reflect.Bool: case reflect.Bool:
return mangle_bool return mangle_bool
@ -98,7 +61,7 @@ func loadReflect(t reflect.Type) Mangler {
case reflect.Int, case reflect.Int,
reflect.Uint, reflect.Uint,
reflect.Uintptr: reflect.Uintptr:
return mangle_platform_int() return mangle_int
case reflect.Int8, reflect.Uint8: case reflect.Int8, reflect.Uint8:
return mangle_8bit return mangle_8bit
@ -131,21 +94,18 @@ func loadReflect(t reflect.Type) Mangler {
// loadReflectPtr loads a Mangler (or rMangler) function for a ptr's element type. // loadReflectPtr loads a Mangler (or rMangler) function for a ptr's element type.
// This also handles further dereferencing of any further ptr indrections (e.g. ***int). // This also handles further dereferencing of any further ptr indrections (e.g. ***int).
func loadReflectPtr(t reflect.Type) Mangler { func loadReflectPtr(ctx typecontext) Mangler {
var count int var n uint
// Elem
et := t
// Iteratively dereference ptrs // Iteratively dereference ptrs
for et.Kind() == reflect.Pointer { for ctx.rtype.Kind() == reflect.Pointer {
et = et.Elem() ctx.rtype = ctx.rtype.Elem()
count++ n++
} }
// Search for ptr elemn type mangler. // Search for elemn type mangler.
if mng := load(nil, et); mng != nil { if mng := load(ctx); mng != nil {
return deref_ptr_mangler(et, mng, count) return deref_ptr_mangler(ctx, mng, n)
} }
return nil return nil
@ -153,8 +113,8 @@ func loadReflectPtr(t reflect.Type) Mangler {
// loadReflectKnownSlice loads a Mangler function for a // loadReflectKnownSlice loads a Mangler function for a
// known slice-of-element type (in this case, primtives). // known slice-of-element type (in this case, primtives).
func loadReflectKnownSlice(et reflect.Type) Mangler { func loadReflectKnownSlice(ctx typecontext) Mangler {
switch et.Kind() { switch ctx.rtype.Kind() {
case reflect.String: case reflect.String:
return mangle_string_slice return mangle_string_slice
@ -164,7 +124,7 @@ func loadReflectKnownSlice(et reflect.Type) Mangler {
case reflect.Int, case reflect.Int,
reflect.Uint, reflect.Uint,
reflect.Uintptr: reflect.Uintptr:
return mangle_platform_int_slice() return mangle_int_slice
case reflect.Int8, reflect.Uint8: case reflect.Int8, reflect.Uint8:
return mangle_8bit_slice return mangle_8bit_slice
@ -196,64 +156,60 @@ func loadReflectKnownSlice(et reflect.Type) Mangler {
} }
// loadReflectSlice ... // loadReflectSlice ...
func loadReflectSlice(t reflect.Type) Mangler { func loadReflectSlice(ctx typecontext) Mangler {
// Element type // Set nesting type.
et := t.Elem() ctx.ntype = ctx.rtype
// Get nested element type.
ctx.rtype = ctx.rtype.Elem()
// Preferably look for known slice mangler func // Preferably look for known slice mangler func
if mng := loadReflectKnownSlice(et); mng != nil { if mng := loadReflectKnownSlice(ctx); mng != nil {
return mng return mng
} }
// Fallback to nested mangler iteration. // Use nested mangler iteration.
if mng := load(nil, et); mng != nil { if mng := load(ctx); mng != nil {
return iter_slice_mangler(t, mng) return iter_slice_mangler(ctx, mng)
} }
return nil return nil
} }
// loadReflectArray ... // loadReflectArray ...
func loadReflectArray(t reflect.Type) Mangler { func loadReflectArray(ctx typecontext) Mangler {
// Element type. // Set nesting type.
et := t.Elem() ctx.ntype = ctx.rtype
// Get nested element type.
ctx.rtype = ctx.rtype.Elem()
// Use manglers for nested iteration. // Use manglers for nested iteration.
if mng := load(nil, et); mng != nil { if mng := load(ctx); mng != nil {
return iter_array_mangler(t, mng) return iter_array_mangler(ctx, mng)
}
return nil
}
// loadReflectMap ...
func loadReflectMap(t reflect.Type) Mangler {
// Map types.
kt := t.Key()
et := t.Elem()
// Load manglers.
kmng := load(nil, kt)
emng := load(nil, et)
// Use manglers for nested iteration.
if kmng != nil && emng != nil {
return iter_map_mangler(t, kmng, emng)
} }
return nil return nil
} }
// loadReflectStruct ... // loadReflectStruct ...
func loadReflectStruct(t reflect.Type) Mangler { func loadReflectStruct(ctx typecontext) Mangler {
var mngs []Mangler var mngs []Mangler
// Gather manglers for all fields. // Set nesting type.
for i := 0; i < t.NumField(); i++ { ctx.ntype = ctx.rtype
field := t.Field(i)
// Load mangler for field type. // Gather manglers for all fields.
mng := load(nil, field.Type) for i := 0; i < ctx.ntype.NumField(); i++ {
// Field typectx.
ctx := typecontext{
ntype: ctx.ntype,
rtype: ctx.ntype.Field(i).Type,
}
// Load mangler.
mng := load(ctx)
if mng == nil { if mng == nil {
return nil return nil
} }
@ -263,5 +219,5 @@ func loadReflectStruct(t reflect.Type) Mangler {
} }
// Use manglers for nested iteration. // Use manglers for nested iteration.
return iter_struct_mangler(t, mngs) return iter_struct_mangler(ctx, mngs)
} }

View file

@ -10,15 +10,11 @@ import (
// type ptrs => Mangler functions. // type ptrs => Mangler functions.
var manglers sync.Map var manglers sync.Map
// Mangled is an interface that allows any type to implement a custom
// Mangler function to improve performance when mangling this type.
type Mangled interface{ Mangle(buf []byte) []byte }
// Mangler is a function that will take an input interface value of known // Mangler is a function that will take an input interface value of known
// type, and append it in mangled serialized form to the given byte buffer. // type, and append it in mangled serialized form to the given byte buffer.
// While the value type is an interface, the Mangler functions are accessed // While the value type is an interface, the Mangler functions are accessed
// by the value's runtime type pointer, allowing the input value type to be known. // by the value's runtime type pointer, allowing the input value type to be known.
type Mangler func(buf []byte, value any) []byte type Mangler func(buf []byte, ptr unsafe.Pointer) []byte
// Get will fetch the Mangler function for given runtime type. // Get will fetch the Mangler function for given runtime type.
// Note that the returned mangler will be a no-op in the case // Note that the returned mangler will be a no-op in the case
@ -34,27 +30,19 @@ func Get(t reflect.Type) Mangler {
if !ok { if !ok {
// Load mangler function // Load mangler function
mng = loadMangler(nil, t) mng = loadMangler(t)
} else { } else {
// cast cached value // cast cached value
mng = v.(Mangler) mng = v.(Mangler)
} }
// Get platform int mangler func. return func(buf []byte, ptr unsafe.Pointer) []byte {
mangle_int := mangle_platform_int()
return func(buf []byte, value any) []byte {
// Type check passed against original type.
if vt := reflect.TypeOf(value); vt != t {
return buf
}
// First write the type ptr (this adds // First write the type ptr (this adds
// a unique prefix for each runtime type). // a unique prefix for each runtime type).
buf = mangle_int(buf, uptr) buf = append_uint64(buf, uint64(uptr))
// Finally, mangle value // Finally, mangle value
return mng(buf, value) return mng(buf, ptr)
} }
} }
@ -94,23 +82,21 @@ func Append(b []byte, a any) []byte {
v, ok := manglers.Load(uptr) v, ok := manglers.Load(uptr)
if !ok { if !ok {
// Load mangler into cache // Load into cache
mng = loadMangler(nil, t) mng = loadMangler(t)
manglers.Store(uptr, mng) manglers.Store(uptr, mng)
} else { } else {
// cast cached value // cast cached value
mng = v.(Mangler) mng = v.(Mangler)
} }
// Get platform int mangler func.
mangle_int := mangle_platform_int()
// First write the type ptr (this adds // First write the type ptr (this adds
// a unique prefix for each runtime type). // a unique prefix for each runtime type).
b = mangle_int(b, uptr) b = append_uint64(b, uint64(uptr))
// Finally, mangle value // Finally, mangle value
return mng(b, a) ptr := eface_data(a)
return mng(b, ptr)
} }
// String will return the mangled format of input value 'a'. This // String will return the mangled format of input value 'a'. This
@ -136,18 +122,8 @@ func Append(b []byte, a any) []byte {
// - complex64,complex128 // - complex64,complex128
// - arbitrary structs // - arbitrary structs
// - all type aliases of above // - all type aliases of above
// - time.Time{}
// - url.URL{}
// - net.IPAddr{}
// - netip.Addr{}, netip.AddrPort{}
// - mangler.Mangled{}
// - fmt.Stringer{}
// - json.Marshaler{}
// - encoding.BinaryMarshaler{}
// - encoding.TextMarshaler{}
// - all pointers to the above // - all pointers to the above
// - all slices / arrays of the above // - all slices / arrays of the above
// - all map keys / values of the above
func String(a any) string { func String(a any) string {
b := Append(make([]byte, 0, 32), a) b := Append(make([]byte, 0, 32), a)
return *(*string)(unsafe.Pointer(&b)) return *(*string)(unsafe.Pointer(&b))

View file

@ -1,7 +1,7 @@
package mangler package mangler
import ( import (
"math/bits" "unsafe"
_ "unsafe" _ "unsafe"
) )
@ -11,12 +11,12 @@ import (
// not only those types directly, but anything type-aliased to those // not only those types directly, but anything type-aliased to those
// types. e.g. `time.Duration` directly as int64. // types. e.g. `time.Duration` directly as int64.
func mangle_string(buf []byte, a any) []byte { func mangle_string(buf []byte, ptr unsafe.Pointer) []byte {
return append(buf, *(*string)(eface_data(a))...) return append(buf, *(*string)(ptr)...)
} }
func mangle_string_slice(buf []byte, a any) []byte { func mangle_string_slice(buf []byte, ptr unsafe.Pointer) []byte {
s := *(*[]string)(eface_data(a)) s := *(*[]string)(ptr)
for _, s := range s { for _, s := range s {
buf = append(buf, s...) buf = append(buf, s...)
buf = append(buf, ',') buf = append(buf, ',')
@ -27,15 +27,15 @@ func mangle_string_slice(buf []byte, a any) []byte {
return buf return buf
} }
func mangle_bool(buf []byte, a any) []byte { func mangle_bool(buf []byte, ptr unsafe.Pointer) []byte {
if *(*bool)(eface_data(a)) { if *(*bool)(ptr) {
return append(buf, '1') return append(buf, '1')
} }
return append(buf, '0') return append(buf, '0')
} }
func mangle_bool_slice(buf []byte, a any) []byte { func mangle_bool_slice(buf []byte, ptr unsafe.Pointer) []byte {
for _, b := range *(*[]bool)(eface_data(a)) { for _, b := range *(*[]bool)(ptr) {
if b { if b {
buf = append(buf, '1') buf = append(buf, '1')
} else { } else {
@ -45,146 +45,69 @@ func mangle_bool_slice(buf []byte, a any) []byte {
return buf return buf
} }
func mangle_8bit(buf []byte, a any) []byte { func mangle_8bit(buf []byte, ptr unsafe.Pointer) []byte {
return append(buf, *(*uint8)(eface_data(a))) return append(buf, *(*uint8)(ptr))
} }
func mangle_8bit_slice(buf []byte, a any) []byte { func mangle_8bit_slice(buf []byte, ptr unsafe.Pointer) []byte {
return append(buf, *(*[]uint8)(eface_data(a))...) return append(buf, *(*[]uint8)(ptr)...)
} }
func mangle_16bit(buf []byte, a any) []byte { func mangle_16bit(buf []byte, ptr unsafe.Pointer) []byte {
return append_uint16(buf, *(*uint16)(eface_data(a))) return append_uint16(buf, *(*uint16)(ptr))
} }
func mangle_16bit_slice(buf []byte, a any) []byte { func mangle_16bit_slice(buf []byte, ptr unsafe.Pointer) []byte {
for _, u := range *(*[]uint16)(eface_data(a)) { for _, u := range *(*[]uint16)(ptr) {
buf = append_uint16(buf, u) buf = append_uint16(buf, u)
} }
return buf return buf
} }
func mangle_32bit(buf []byte, a any) []byte { func mangle_32bit(buf []byte, ptr unsafe.Pointer) []byte {
return append_uint32(buf, *(*uint32)(eface_data(a))) return append_uint32(buf, *(*uint32)(ptr))
} }
func mangle_32bit_slice(buf []byte, a any) []byte { func mangle_32bit_slice(buf []byte, ptr unsafe.Pointer) []byte {
for _, u := range *(*[]uint32)(eface_data(a)) { for _, u := range *(*[]uint32)(ptr) {
buf = append_uint32(buf, u) buf = append_uint32(buf, u)
} }
return buf return buf
} }
func mangle_64bit(buf []byte, a any) []byte { func mangle_64bit(buf []byte, ptr unsafe.Pointer) []byte {
return append_uint64(buf, *(*uint64)(eface_data(a))) return append_uint64(buf, *(*uint64)(ptr))
} }
func mangle_64bit_slice(buf []byte, a any) []byte { func mangle_64bit_slice(buf []byte, ptr unsafe.Pointer) []byte {
for _, u := range *(*[]uint64)(eface_data(a)) { for _, u := range *(*[]uint64)(ptr) {
buf = append_uint64(buf, u) buf = append_uint64(buf, u)
} }
return buf return buf
} }
func mangle_platform_int() Mangler { func mangle_int(buf []byte, ptr unsafe.Pointer) []byte {
switch bits.UintSize { return append_uint64(buf, uint64(*(*uint)(ptr)))
case 32:
return mangle_32bit
case 64:
return mangle_64bit
default:
panic("unexpected platform int size")
}
} }
func mangle_platform_int_slice() Mangler { func mangle_int_slice(buf []byte, ptr unsafe.Pointer) []byte {
switch bits.UintSize { for _, u := range *(*[]uint)(ptr) {
case 32: buf = append_uint64(buf, uint64(u))
return mangle_32bit_slice
case 64:
return mangle_64bit_slice
default:
panic("unexpected platform int size")
} }
return buf
} }
func mangle_128bit(buf []byte, a any) []byte { func mangle_128bit(buf []byte, ptr unsafe.Pointer) []byte {
u2 := *(*[2]uint64)(eface_data(a)) u2 := *(*[2]uint64)(ptr)
buf = append_uint64(buf, u2[0]) buf = append_uint64(buf, u2[0])
buf = append_uint64(buf, u2[1]) buf = append_uint64(buf, u2[1])
return buf return buf
} }
func mangle_128bit_slice(buf []byte, a any) []byte { func mangle_128bit_slice(buf []byte, ptr unsafe.Pointer) []byte {
for _, u2 := range *(*[][2]uint64)(eface_data(a)) { for _, u2 := range *(*[][2]uint64)(ptr) {
buf = append_uint64(buf, u2[0]) buf = append_uint64(buf, u2[0])
buf = append_uint64(buf, u2[1]) buf = append_uint64(buf, u2[1])
} }
return buf return buf
} }
func mangle_mangled(buf []byte, a any) []byte {
if v := a.(Mangled); v != nil {
buf = append(buf, '1')
return v.Mangle(buf)
}
buf = append(buf, '0')
return buf
}
func mangle_binary(buf []byte, a any) []byte {
if v := a.(binarymarshaler); v != nil {
b, err := v.MarshalBinary()
if err != nil {
panic("mangle_binary: " + err.Error())
}
buf = append(buf, '1')
return append(buf, b...)
}
buf = append(buf, '0')
return buf
}
func mangle_byteser(buf []byte, a any) []byte {
if v := a.(byteser); v != nil {
buf = append(buf, '1')
return append(buf, v.Bytes()...)
}
buf = append(buf, '0')
return buf
}
func mangle_stringer(buf []byte, a any) []byte {
if v := a.(stringer); v != nil {
buf = append(buf, '1')
return append(buf, v.String()...)
}
buf = append(buf, '0')
return buf
}
func mangle_text(buf []byte, a any) []byte {
if v := a.(textmarshaler); v != nil {
b, err := v.MarshalText()
if err != nil {
panic("mangle_text: " + err.Error())
}
buf = append(buf, '1')
return append(buf, b...)
}
buf = append(buf, '0')
return buf
}
func mangle_json(buf []byte, a any) []byte {
if v := a.(jsonmarshaler); v != nil {
b, err := v.MarshalJSON()
if err != nil {
panic("mangle_json: " + err.Error())
}
buf = append(buf, '1')
return append(buf, b...)
}
buf = append(buf, '0')
return buf
}

View file

@ -194,8 +194,7 @@ func (c *Cache[T]) Put(values ...T) {
// Store all passed values. // Store all passed values.
for i := range values { for i := range values {
c.store_value( c.store_value(
nil, nil, "",
Key{},
values[i], values[i],
) )
} }
@ -302,9 +301,9 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er
// the provided value, so it is // the provided value, so it is
// safe for us to return as-is. // safe for us to return as-is.
if err != nil { if err != nil {
c.store_error(index, key, err) c.store_error(index, key.key, err)
} else { } else {
c.store_value(index, key, val) c.store_value(index, key.key, val)
} }
// Done with lock. // Done with lock.
@ -388,8 +387,7 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
// Store all uncached values. // Store all uncached values.
for i := range uncached { for i := range uncached {
c.store_value( c.store_value(
nil, nil, "",
Key{},
uncached[i], uncached[i],
) )
} }
@ -511,6 +509,11 @@ func (c *Cache[T]) Trim(perc float64) {
c.delete(item) c.delete(item)
} }
// Compact index data stores.
for i := range c.indices {
c.indices[i].data.Compact()
}
// Done with lock. // Done with lock.
c.mutex.Unlock() c.mutex.Unlock()
} }
@ -535,10 +538,9 @@ func (c *Cache[T]) Debug() map[string]any {
m["indices"] = indices m["indices"] = indices
for i := range c.indices { for i := range c.indices {
var n uint64 var n uint64
c.indices[i].data.Iter(func(_ string, l *list) (stop bool) { for _, l := range c.indices[i].data.m {
n += uint64(l.len) n += uint64(l.len)
return }
})
indices[c.indices[i].name] = n indices[c.indices[i].name] = n
} }
c.mutex.Unlock() c.mutex.Unlock()
@ -553,7 +555,7 @@ func (c *Cache[T]) Cap() int {
return m return m
} }
func (c *Cache[T]) store_value(index *Index, key Key, value T) { func (c *Cache[T]) store_value(index *Index, key string, value T) {
// Alloc new index item. // Alloc new index item.
item := new_indexed_item() item := new_indexed_item()
if cap(item.indexed) < len(c.indices) { if cap(item.indexed) < len(c.indices) {
@ -569,7 +571,7 @@ func (c *Cache[T]) store_value(index *Index, key Key, value T) {
if index != nil { if index != nil {
// Append item to index. // Append item to index.
index.append(key.key, item) index.append(key, item)
} }
// Get ptr to value data. // Get ptr to value data.
@ -619,7 +621,7 @@ func (c *Cache[T]) store_value(index *Index, key Key, value T) {
} }
} }
func (c *Cache[T]) store_error(index *Index, key Key, err error) { func (c *Cache[T]) store_error(index *Index, key string, err error) {
if index == nil { if index == nil {
// nothing we // nothing we
// can do here. // can do here.
@ -639,7 +641,7 @@ func (c *Cache[T]) store_error(index *Index, key Key, err error) {
item.data = err item.data = err
// Append item to index. // Append item to index.
index.append(key.key, item) index.append(key, item)
// Add item to main lru list. // Add item to main lru list.
c.lru.push_front(&item.elem) c.lru.push_front(&item.elem)

View file

@ -7,8 +7,6 @@ import (
"unsafe" "unsafe"
"codeberg.org/gruf/go-byteutil" "codeberg.org/gruf/go-byteutil"
"github.com/dolthub/swiss"
) )
// IndexConfig defines config variables // IndexConfig defines config variables
@ -72,7 +70,7 @@ type Index struct {
// index_entry{} which also contains the exact // index_entry{} which also contains the exact
// key each result is stored under. the hash map // key each result is stored under. the hash map
// only keys by the xxh3 hash checksum for speed. // only keys by the xxh3 hash checksum for speed.
data *swiss.Map[string, *list] data hashmap
// struct fields encompassed by // struct fields encompassed by
// keys (+ hashes) of this index. // keys (+ hashes) of this index.
@ -93,8 +91,12 @@ func (i *Index) Name() string {
// the type of lookup this Index uses in cache. // the type of lookup this Index uses in cache.
// NOTE: panics on incorrect no. parts / types given. // NOTE: panics on incorrect no. parts / types given.
func (i *Index) Key(parts ...any) Key { func (i *Index) Key(parts ...any) Key {
ptrs := make([]unsafe.Pointer, len(parts))
for x, part := range parts {
ptrs[x] = eface_data(part)
}
buf := new_buffer() buf := new_buffer()
key := i.key(buf, parts) key := i.key(buf, ptrs)
free_buffer(buf) free_buffer(buf)
return Key{ return Key{
raw: parts, raw: parts,
@ -109,7 +111,11 @@ func (i *Index) Keys(parts ...[]any) []Key {
keys := make([]Key, 0, len(parts)) keys := make([]Key, 0, len(parts))
buf := new_buffer() buf := new_buffer()
for _, parts := range parts { for _, parts := range parts {
key := i.key(buf, parts) ptrs := make([]unsafe.Pointer, len(parts))
for x, part := range parts {
ptrs[x] = eface_data(part)
}
key := i.key(buf, ptrs)
if key == "" { if key == "" {
continue continue
} }
@ -160,8 +166,9 @@ func (i *Index) init(t reflect.Type, cfg IndexConfig, cap int) {
i.fields[x] = find_field(t, names) i.fields[x] = find_field(t, names)
} }
// Initialize index_entry list store. // Initialize store for
i.data = swiss.NewMap[string, *list](uint32(cap)) // index_entry lists.
i.data.init(cap)
} }
// get_one will fetch one indexed item under key. // get_one will fetch one indexed item under key.
@ -203,7 +210,7 @@ func (i *Index) get(key string, hook func(*indexed_item)) {
} }
// key uses hasher to generate Key{} from given raw parts. // key uses hasher to generate Key{} from given raw parts.
func (i *Index) key(buf *byteutil.Buffer, parts []any) string { func (i *Index) key(buf *byteutil.Buffer, parts []unsafe.Pointer) string {
if len(parts) != len(i.fields) { if len(parts) != len(i.fields) {
panicf("incorrect number key parts: want=%d received=%d", panicf("incorrect number key parts: want=%d received=%d",
len(i.fields), len(i.fields),
@ -332,33 +339,6 @@ func (i *Index) delete_entry(entry *index_entry) {
entry.item.drop_index(entry) entry.item.drop_index(entry)
} }
// compact will reduce the size of underlying
// index map if the cap vastly exceeds len.
func (i *Index) compact() {
// Maximum load factor before
// 'swiss' allocates new hmap:
// maxLoad = 7 / 8
//
// So we apply the inverse/2, once
// $maxLoad/2 % of hmap is empty we
// compact the map to drop buckets.
len := i.data.Count()
cap := i.data.Capacity()
if cap-len > (cap*7)/(8*2) {
// Create a new map only as big as required.
data := swiss.NewMap[string, *list](uint32(len))
i.data.Iter(func(k string, v *list) (stop bool) {
data.Put(k, v)
return false
})
// Set new map.
i.data = data
}
}
// index_entry represents a single entry // index_entry represents a single entry
// in an Index{}, where it will be accessible // in an Index{}, where it will be accessible
// by Key{} pointing to a containing list{}. // by Key{} pointing to a containing list{}.

59
vendor/codeberg.org/gruf/go-structr/map.go generated vendored Normal file
View file

@ -0,0 +1,59 @@
package structr
type hashmap struct {
m map[string]*list
n int
}
func (m *hashmap) init(cap int) {
m.m = make(map[string]*list, cap)
m.n = cap
}
func (m *hashmap) Get(key string) (*list, bool) {
list, ok := m.m[key]
return list, ok
}
func (m *hashmap) Put(key string, list *list) {
m.m[key] = list
if n := len(m.m); n > m.n {
m.n = n
}
}
func (m *hashmap) Delete(key string) {
delete(m.m, key)
}
func (m *hashmap) Compact() {
// Noop when hashmap size
// is too small to matter.
if m.n < 2048 {
return
}
// Difference between maximum map
// size and the current map size.
diff := m.n - len(m.m)
// Maximum load factor before
// runtime allocates new hmap:
// maxLoad = 13 / 16
//
// So we apply the inverse/2, once
// $maxLoad/2 % of hmap is empty we
// compact the map to drop buckets.
if 2*16*diff > m.n*13 {
// Create new map only big as required.
m2 := make(map[string]*list, len(m.m))
for k, v := range m.m {
m2[k] = v
}
// Set new.
m.m = m2
m.n = len(m2)
}
}

View file

@ -214,10 +214,9 @@ func (q *Queue[T]) Debug() map[string]any {
m["indices"] = indices m["indices"] = indices
for i := range q.indices { for i := range q.indices {
var n uint64 var n uint64
q.indices[i].data.Iter(func(_ string, l *list) (stop bool) { for _, l := range q.indices[i].data.m {
n += uint64(l.len) n += uint64(l.len)
return }
})
indices[q.indices[i].name] = n indices[q.indices[i].name] = n
} }
q.mutex.Unlock() q.mutex.Unlock()
@ -331,8 +330,8 @@ func (q *Queue[T]) delete(item *indexed_item) {
// Drop this index_entry. // Drop this index_entry.
index.delete_entry(entry) index.delete_entry(entry)
// Check compact. // Check compact map.
index.compact() index.data.Compact()
} }
// Drop entry from queue list. // Drop entry from queue list.

View file

@ -73,10 +73,9 @@ func (q *QueueCtx[T]) Debug() map[string]any {
m["indices"] = indices m["indices"] = indices
for i := range q.indices { for i := range q.indices {
var n uint64 var n uint64
q.indices[i].data.Iter(func(_ string, l *list) (stop bool) { for _, l := range q.indices[i].data.m {
n += uint64(l.len) n += uint64(l.len)
return }
})
indices[q.indices[i].name] = n indices[q.indices[i].name] = n
} }
q.mutex.Unlock() q.mutex.Unlock()

View file

@ -8,18 +8,13 @@ import (
"unsafe" "unsafe"
"codeberg.org/gruf/go-mangler" "codeberg.org/gruf/go-mangler"
"github.com/modern-go/reflect2"
) )
// struct_field contains pre-prepared type // struct_field contains pre-prepared type
// information about a struct's field member, // information about a struct's field member,
// including memory offset and hash function. // including memory offset and hash function.
type struct_field struct { type struct_field struct {
rtype reflect.Type
// type2 contains the reflect2
// type information for this field,
// used in repacking it as eface.
type2 reflect2.Type
// offsets defines whereabouts in // offsets defines whereabouts in
// memory this field is located. // memory this field is located.
@ -109,25 +104,27 @@ func find_field(t reflect.Type, names []string) (sfield struct_field) {
t = field.Type t = field.Type
} }
// Get field type as reflect2. // Set final type.
sfield.type2 = reflect2.Type2(t) sfield.rtype = t
// Find mangler for field type. // Find mangler for field type.
sfield.mangle = mangler.Get(t) sfield.mangle = mangler.Get(t)
// Set possible zero value and its string. // Get new zero value data ptr.
sfield.zero = sfield.type2.UnsafeNew() v := reflect.New(t).Elem()
i := sfield.type2.UnsafeIndirect(sfield.zero) zptr := eface_data(v.Interface())
sfield.zerostr = string(sfield.mangle(nil, i)) zstr := sfield.mangle(nil, zptr)
sfield.zerostr = string(zstr)
sfield.zero = zptr
return return
} }
// extract_fields extracts given structfields from the provided value type, // extract_fields extracts given structfields from the provided value type,
// this is done using predetermined struct field memory offset locations. // this is done using predetermined struct field memory offset locations.
func extract_fields(ptr unsafe.Pointer, fields []struct_field) []any { func extract_fields(ptr unsafe.Pointer, fields []struct_field) []unsafe.Pointer {
// Prepare slice of field ifaces. // Prepare slice of field value pointers.
ifaces := make([]any, len(fields)) ptrs := make([]unsafe.Pointer, len(fields))
for i, field := range fields { for i, field := range fields {
// loop scope. // loop scope.
@ -136,10 +133,7 @@ func extract_fields(ptr unsafe.Pointer, fields []struct_field) []any {
for _, offset := range field.offsets { for _, offset := range field.offsets {
// Dereference any ptrs to offset. // Dereference any ptrs to offset.
fptr = deref(fptr, offset.derefs) fptr = deref(fptr, offset.derefs)
if fptr == nil { if fptr == nil {
// Use zero value.
fptr = field.zero
break break
} }
@ -148,11 +142,31 @@ func extract_fields(ptr unsafe.Pointer, fields []struct_field) []any {
offset.offset) offset.offset)
} }
// Repack value data ptr as empty interface. if like_ptr(field.rtype) && fptr != nil {
ifaces[i] = field.type2.UnsafeIndirect(fptr) // Further dereference value ptr.
fptr = *(*unsafe.Pointer)(fptr)
} }
return ifaces if fptr == nil {
// Use zero value.
fptr = field.zero
}
ptrs[i] = fptr
}
return ptrs
}
// like_ptr returns whether type's kind is ptr-like.
func like_ptr(t reflect.Type) bool {
switch t.Kind() {
case reflect.Pointer,
reflect.Map,
reflect.Chan,
reflect.Func:
return true
}
return false
} }
// deref will dereference ptr 'n' times (or until nil). // deref will dereference ptr 'n' times (or until nil).

View file

@ -1,5 +1,7 @@
package structr package structr
import "unsafe"
// once only executes 'fn' once. // once only executes 'fn' once.
func once(fn func()) func() { func once(fn func()) func() {
var once int32 var once int32
@ -11,3 +13,9 @@ func once(fn func()) func() {
fn() fn()
} }
} }
// eface_data returns the data ptr from an empty interface.
func eface_data(a any) unsafe.Pointer {
type eface struct{ _, data unsafe.Pointer }
return (*eface)(unsafe.Pointer(&a)).data
}

4
vendor/modules.txt vendored
View file

@ -43,7 +43,7 @@ codeberg.org/gruf/go-list
# codeberg.org/gruf/go-logger/v2 v2.2.1 # codeberg.org/gruf/go-logger/v2 v2.2.1
## explicit; go 1.19 ## explicit; go 1.19
codeberg.org/gruf/go-logger/v2/level codeberg.org/gruf/go-logger/v2/level
# codeberg.org/gruf/go-mangler v1.3.0 # codeberg.org/gruf/go-mangler v1.4.0
## explicit; go 1.19 ## explicit; go 1.19
codeberg.org/gruf/go-mangler codeberg.org/gruf/go-mangler
# codeberg.org/gruf/go-maps v1.0.3 # codeberg.org/gruf/go-maps v1.0.3
@ -68,7 +68,7 @@ codeberg.org/gruf/go-storage/disk
codeberg.org/gruf/go-storage/internal codeberg.org/gruf/go-storage/internal
codeberg.org/gruf/go-storage/memory codeberg.org/gruf/go-storage/memory
codeberg.org/gruf/go-storage/s3 codeberg.org/gruf/go-storage/s3
# codeberg.org/gruf/go-structr v0.8.5 # codeberg.org/gruf/go-structr v0.8.7
## explicit; go 1.21 ## explicit; go 1.21
codeberg.org/gruf/go-structr codeberg.org/gruf/go-structr
# codeberg.org/superseriousbusiness/exif-terminator v0.7.0 # codeberg.org/superseriousbusiness/exif-terminator v0.7.0