Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update to cogent lab from leabra compatible branch #132

Merged
merged 27 commits into from
Dec 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
210dc22
update paths to new tensor
rcoreilly Oct 8, 2024
212ff08
most packages updated to new tensor
rcoreilly Oct 8, 2024
5fe0795
more tensor updates
rcoreilly Oct 8, 2024
78693e7
more goal updating
rcoreilly Oct 14, 2024
27a957c
merged 2 commits from main fixing GetSampleShape, gen files
rcoreilly Oct 31, 2024
5e912d5
goal: fix actrf to use current tensor api. not going to be able to do…
rcoreilly Oct 31, 2024
5bb0ebf
merge looper into goal
rcoreilly Nov 5, 2024
a0db352
Merge branch 'main' into goal
rcoreilly Nov 14, 2024
309c4b4
goal: first pass egui building on databrowser, env updated to new tab…
rcoreilly Nov 15, 2024
46b6fd7
goal: egui fix config
rcoreilly Nov 15, 2024
e500726
goal: looper doesn't include etime or stack directly (still uses etim…
rcoreilly Nov 15, 2024
dc33a5e
remove ViewUpdate from egui. add OnStop function.
rcoreilly Nov 15, 2024
43c06fd
looper uses level instead of time to index loops -- much clearer and …
rcoreilly Nov 16, 2024
e131ff4
major simplification and improvement for params -- use closures, so m…
rcoreilly Nov 19, 2024
7e5664e
patgen first pass fix -- tests need work
rcoreilly Nov 22, 2024
4b9e697
goal: patgen test outputs updated to new format
rcoreilly Nov 23, 2024
0511a19
looper returns stop level, egui uses that to pass stop info, so count…
rcoreilly Nov 23, 2024
78a0d48
update to latest core goal revision and remove obsolete stuff -- full…
rcoreilly Nov 25, 2024
362e1fd
fix tests
rcoreilly Nov 25, 2024
cd1be93
plot selected unit working, with styling
rcoreilly Dec 1, 2024
2bf1680
NetView plot selected unit uses tensorfs.CurRoot and databrowser.CurT…
rcoreilly Dec 1, 2024
4ebc8c8
netview does Current OnShow -- activated on tab switch. network goes …
rcoreilly Dec 12, 2024
1d210e2
netview layout fixes including counter text sizing using pw which eli…
rcoreilly Dec 12, 2024
ea37889
update to Dir instead of RecycleDir, and bigger min / max spinners
rcoreilly Dec 20, 2024
e41ccf1
go mod update to latest core
rcoreilly Dec 20, 2024
cdb9fc4
update to new cogent lab repo
rcoreilly Dec 23, 2024
38ee131
update to latest lab tabber
rcoreilly Dec 23, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ Here are the other packages from [Cogent Core](https://github.com/cogentcore/cor

* [envs](https://github.com/emer/envs) has misc standalone environments that can be good starting points, including managing files, visual images, etc.

* [ttail](https://github.com/cogentcore/core/tree/main/tensor/cmd/ttail) is a `tail` program for interactively viewing tabular (csv, tsv, etc) log files in a terminal CLI environment! `go install cogentcore.org/core/tensor/cmd/ttail@latest` from anywhere to install.
* [ttail](https://github.com/cogentcore/core/tree/main/tensor/cmd/ttail) is a `tail` program for interactively viewing tabular (csv, tsv, etc) log files in a terminal CLI environment! `go install cogentcore.org/lab/tensor/cmd/ttail@latest` from anywhere to install.

* [eTorch](https://github.com/emer/etorch) is the emergent interface to PyTorch models, providing emergent GUI NetView etc for these models.

Expand Down
40 changes: 20 additions & 20 deletions actrf/actrf.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,10 @@ package actrf
//go:generate core generate -add-types

import (
"cogentcore.org/core/tensor"
"cogentcore.org/core/tensor/stats/norm"
"slices"

"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensor"
)

// RF is used for computing an activation-based receptive field.
Expand Down Expand Up @@ -59,17 +61,15 @@ func (af *RF) InitShape(act, src tensor.Tensor) []int {
aNy, aNx, _, _ := tensor.Projection2DShape(act.Shape(), false)
sNy, sNx, _, _ := tensor.Projection2DShape(src.Shape(), false)
oshp := []int{aNy, aNx, sNy, sNx}
if tensor.EqualInts(af.RF.Shp.Sizes, oshp) {
if slices.Equal(af.RF.Shape().Sizes, oshp) {
return oshp
}
snm := []string{"ActY", "ActX", "SrcY", "SrcX"}
sshp := []int{sNy, sNx}
ssnm := []string{"SrcY", "SrcX"}
af.RF.SetShape(oshp, snm...)
af.NormRF.SetShape(oshp, snm...)
af.SumProd.SetShape(oshp, snm...)
af.NormSrc.SetShape(sshp, ssnm...)
af.SumSrc.SetShape(sshp, ssnm...)
af.RF.SetShapeSizes(oshp...)
af.NormRF.SetShapeSizes(oshp...)
af.SumProd.SetShapeSizes(oshp...)
af.NormSrc.SetShapeSizes(sshp...)
af.SumSrc.SetShapeSizes(sshp...)

af.ConfigView(&af.RF)
af.ConfigView(&af.NormRF)
Expand All @@ -81,10 +81,11 @@ func (af *RF) InitShape(act, src tensor.Tensor) []int {

// ConfigView configures the view params on the tensor
func (af *RF) ConfigView(tsr *tensor.Float32) {
tsr.SetMetaData("colormap", "Viridis")
tsr.SetMetaData("grid-fill", "1") // remove extra lines
tsr.SetMetaData("fix-min", "true")
tsr.SetMetaData("min", "0")
// todo:meta
// tsr.SetMetaData("colormap", "Viridis")
// tsr.SetMetaData("grid-fill", "1") // remove extra lines
// tsr.SetMetaData("fix-min", "true")
// tsr.SetMetaData("min", "0")
}

// Reset reinitializes the Sum accumulators -- must have called Init first
Expand All @@ -106,11 +107,11 @@ func (af *RF) Add(act, src tensor.Tensor, thr float32) {
if tv < thr {
continue
}
af.SumSrc.AddScalar([]int{sy, sx}, float64(tv))
af.SumSrc.SetAdd(tv, sy, sx)
for ay := 0; ay < aNy; ay++ {
for ax := 0; ax < aNx; ax++ {
av := float32(tensor.Projection2DValue(act, false, ay, ax))
af.SumProd.AddScalar([]int{ay, ax, sy, sx}, float64(av*tv))
af.SumProd.SetAdd(av*tv, ay, ax, sy, sx)
}
}
}
Expand All @@ -126,7 +127,7 @@ func (af *RF) Avg() {
var maxSrc float32
for sy := 0; sy < sNy; sy++ {
for sx := 0; sx < sNx; sx++ {
src := af.SumSrc.Value([]int{sy, sx})
src := af.SumSrc.Value(sy, sx)
if src == 0 {
continue
}
Expand All @@ -135,7 +136,7 @@ func (af *RF) Avg() {
}
for ay := 0; ay < aNy; ay++ {
for ax := 0; ax < aNx; ax++ {
oo := af.SumProd.Shape().Offset([]int{ay, ax, sy, sx})
oo := af.SumProd.Shape().IndexTo1D(ay, ax, sy, sx)
af.RF.Values[oo] = af.SumProd.Values[oo] / src
}
}
Expand All @@ -151,8 +152,7 @@ func (af *RF) Avg() {

// Norm computes unit norm of RF values -- must be called after Avg
func (af *RF) Norm() {
af.NormRF.CopyFrom(&af.RF)
norm.TensorUnit(&af.NormRF, 2) // 2 = norm within outer 2 dims = norm each src within
stats.UnitNormOut(&af.RF, &af.NormRF)
}

// AvgNorm computes RF as SumProd / SumTarg and then does Norm.
Expand Down
2 changes: 1 addition & 1 deletion actrf/actrfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import (
"fmt"

"cogentcore.org/core/base/errors"
"cogentcore.org/core/tensor"
"cogentcore.org/lab/tensor"
)

// RFs manages multiple named RF's -- each one must be initialized first
Expand Down
4 changes: 2 additions & 2 deletions actrf/mpi.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
package actrf

import (
"cogentcore.org/core/base/mpi"
"cogentcore.org/core/tensor/tensormpi"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/tensor/tensormpi"
)

// MPISum aggregates RF Sum data across all processors in given mpi communicator.
Expand Down
7 changes: 3 additions & 4 deletions actrf/running.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

package actrf

import "cogentcore.org/core/tensor"
import "cogentcore.org/lab/tensor"

// RunningAvg computes a running-average activation-based receptive field
// for activities act relative to source activations src (the thing we're projecting rf onto)
Expand All @@ -17,15 +17,14 @@ func RunningAvg(out *tensor.Float32, act, src tensor.Tensor, tau float32) {
aNy, aNx, _, _ := tensor.Projection2DShape(act.Shape(), false)
tNy, tNx, _, _ := tensor.Projection2DShape(src.Shape(), false)
oshp := []int{aNy, aNx, tNy, tNx}
out.SetShape(oshp, "ActY", "ActX", "SrcY", "SrcX")
out.SetShapeSizes(oshp...)
for ay := 0; ay < aNy; ay++ {
for ax := 0; ax < aNx; ax++ {
av := float32(tensor.Projection2DValue(act, false, ay, ax))
for ty := 0; ty < tNy; ty++ {
for tx := 0; tx < tNx; tx++ {
tv := float32(tensor.Projection2DValue(src, false, ty, tx))
oi := []int{ay, ax, ty, tx}
oo := out.Shape().Offset(oi)
oo := out.Shape().IndexTo1D(ay, ax, ty, tx)
ov := out.Values[oo]
nv := cdt*ov + dt*tv*av
out.Values[oo] = nv
Expand Down
2 changes: 1 addition & 1 deletion chem/stater.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

package chem

import "cogentcore.org/core/tensor/table"
import "cogentcore.org/lab/table"

// The Stater interface defines the functions implemented for State
// structures containing chem state variables.
Expand Down
35 changes: 1 addition & 34 deletions confusion/confusion.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,7 @@ package confusion

//go:generate core generate -add-types

import (
"fmt"
"math"

"cogentcore.org/core/core"
"cogentcore.org/core/tensor"
"cogentcore.org/core/tensor/stats/simat"
)
/*

// Matrix computes the confusion matrix, with rows representing
// the ground truth correct class, and columns representing the
Expand Down Expand Up @@ -208,30 +201,4 @@ func (cm *Matrix) SaveCSV(fname core.Filename) {
func (cm *Matrix) OpenCSV(fname core.Filename) {
tensor.OpenCSV(&cm.Prob, fname, ',')
}

/*
var MatrixProps = tree.Props{
"ToolBar": tree.PropSlice{
{"SaveCSV", tree.Props{
"label": "Save CSV...",
"icon": "file-save",
"desc": "Save CSV-formatted confusion probabilities (Probs)",
"Args": tree.PropSlice{
{"CSV File Name", tree.Props{
"ext": ".csv",
}},
},
}},
{"OpenCSV", tree.Props{
"label": "Open CSV...",
"icon": "file-open",
"desc": "Open CSV-formatted confusion probabilities (Probs)",
"Args": tree.PropSlice{
{"Weights File Name", tree.Props{
"ext": ".csv",
}},
},
}},
},
}
*/
9 changes: 0 additions & 9 deletions confusion/typegen.go

This file was deleted.

12 changes: 6 additions & 6 deletions decoder/linear.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@ package decoder
import (
"fmt"

"cogentcore.org/core/base/mpi"
"cogentcore.org/core/math32"
"cogentcore.org/core/tensor"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/tensor"
)

type ActivationFunc func(float32) float32
Expand Down Expand Up @@ -61,7 +61,7 @@ type Linear struct {
// Layer is the subset of emer.Layer that is used by this code
type Layer interface {
Name() string
UnitValuesTensor(tsr tensor.Tensor, varNm string, di int) error
UnitValuesTensor(tsr tensor.Values, varNm string, di int) error
Shape() *tensor.Shape
}

Expand Down Expand Up @@ -111,7 +111,7 @@ func (dec *Linear) Init(nOutputs, nInputs int, poolIndex int, activationFn Activ
dec.NOutputs = nOutputs
dec.Units = make([]LinearUnit, dec.NOutputs)
dec.Inputs = make([]float32, dec.NInputs)
dec.Weights.SetShape([]int{dec.NOutputs, dec.NInputs}, "Outputs", "Inputs")
dec.Weights.SetShapeSizes(dec.NOutputs, dec.NInputs)
for i := range dec.Weights.Values {
dec.Weights.Values[i] = 0.1
}
Expand Down Expand Up @@ -207,7 +207,7 @@ func (dec *Linear) Input(varNm string, di int) {
shape := ly.Shape()
y := dec.PoolIndex / shape.DimSize(1)
x := dec.PoolIndex % shape.DimSize(1)
tsr = tsr.SubSpace([]int{y, x}).(*tensor.Float32)
tsr = tsr.SubSpace(y, x).(*tensor.Float32)
}
for j, v := range tsr.Values {
dec.Inputs[off+j] = v
Expand Down Expand Up @@ -259,7 +259,7 @@ func (dec *Linear) Back() float32 {
// Returns SSE (sum squared error) of difference between targets and outputs.
func (dec *Linear) BackMPI() float32 {
if dec.MPIDWts.Len() != dec.Weights.Len() {
dec.MPIDWts.CopyShapeFrom(&dec.Weights)
tensor.SetShapeFrom(&dec.MPIDWts, &dec.Weights)
}
var sse float32
for ui := range dec.Units {
Expand Down
32 changes: 16 additions & 16 deletions decoder/linear_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,25 +8,25 @@ import (
"fmt"
"testing"

"cogentcore.org/core/tensor"
"cogentcore.org/lab/tensor"
"github.com/stretchr/testify/assert"
)

// TestLayer implements a Layer
type TestLayer struct {
tensors map[string]tensor.Tensor
tensors map[string]tensor.Values
}

func (tl *TestLayer) Name() string {
return "TestLayer"
}

func (tl *TestLayer) UnitValuesTensor(tsr tensor.Tensor, varNm string, di int) error {
func (tl *TestLayer) UnitValuesTensor(tsr tensor.Values, varNm string, di int) error {
src, ok := tl.tensors[varNm]
if !ok {
return fmt.Errorf("bad key: %s", varNm)
}
tsr.CopyShapeFrom(src)
tensor.SetShapeFrom(tsr, src)
tsr.CopyFrom(src)
return nil
}
Expand Down Expand Up @@ -94,58 +94,58 @@ func TestLinearLogistic(t *testing.T) {

func TestInputPool1D(t *testing.T) {
dec := Linear{}
shape := tensor.NewShape([]int{1, 5, 6, 6})
shape := tensor.NewShape(1, 5, 6, 6)
vals := make([]float32, shape.Len())
for i := range vals {
vals[i] = float32(i)
}
tsr := tensor.NewFloat32(shape.Sizes)
tsr := tensor.NewFloat32(shape.Sizes...)
tsr.SetNumRows(1)
for i := range tsr.Values {
tsr.Values[i] = vals[i]
}
layer := TestLayer{tensors: map[string]tensor.Tensor{"var0": tsr}}
layer := TestLayer{tensors: map[string]tensor.Values{"var0": tsr}}
dec.InitPool(2, &layer, 0, IdentityFunc)
dec.Input("var0", 0)
expected := tsr.SubSpace([]int{0, 0}).(*tensor.Float32).Values
expected := tsr.SubSpace(0, 0).(*tensor.Float32).Values
assert.Equal(t, expected, dec.Inputs)

dec.InitPool(2, &layer, 1, IdentityFunc)
dec.Input("var0", 0)
expected = tsr.SubSpace([]int{0, 1}).(*tensor.Float32).Values
expected = tsr.SubSpace(0, 1).(*tensor.Float32).Values
assert.Equal(t, expected, dec.Inputs)
}

func TestInputPool2D(t *testing.T) {
dec := Linear{}
shape := tensor.NewShape([]int{2, 5, 6, 6})
shape := tensor.NewShape(2, 5, 6, 6)
vals := make([]float32, shape.Len())
for i := range vals {
vals[i] = float32(i)
}
tsr := tensor.NewFloat32(shape.Sizes)
tsr := tensor.NewFloat32(shape.Sizes...)
for i := range tsr.Values {
tsr.Values[i] = vals[i]
}

layer := TestLayer{tensors: map[string]tensor.Tensor{"var0": tsr}}
layer := TestLayer{tensors: map[string]tensor.Values{"var0": tsr}}
dec.InitPool(2, &layer, 0, IdentityFunc)
dec.Input("var0", 0)
expected := tsr.SubSpace([]int{0, 0}).(*tensor.Float32).Values
expected := tsr.SubSpace(0, 0).(*tensor.Float32).Values
assert.Equal(t, expected, dec.Inputs)

dec.InitPool(2, &layer, 1, IdentityFunc)
dec.Input("var0", 0)
expected = tsr.SubSpace([]int{0, 1}).(*tensor.Float32).Values
expected = tsr.SubSpace(0, 1).(*tensor.Float32).Values
assert.Equal(t, expected, dec.Inputs)

dec.InitPool(2, &layer, 5, IdentityFunc)
dec.Input("var0", 0)
expected = tsr.SubSpace([]int{1, 0}).(*tensor.Float32).Values
expected = tsr.SubSpace(1, 0).(*tensor.Float32).Values
assert.Equal(t, expected, dec.Inputs)

dec.InitPool(2, &layer, 9, IdentityFunc)
dec.Input("var0", 0)
expected = tsr.SubSpace([]int{1, 4}).(*tensor.Float32).Values
expected = tsr.SubSpace(1, 4).(*tensor.Float32).Values
assert.Equal(t, expected, dec.Inputs)
}
8 changes: 4 additions & 4 deletions decoder/softmax.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@ import (
"path/filepath"
"sort"

"cogentcore.org/core/base/mpi"
"cogentcore.org/core/math32"
"cogentcore.org/core/tensor"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/emer"
)

Expand Down Expand Up @@ -93,7 +93,7 @@ func (sm *SoftMax) Init(ncats, ninputs int) {
sm.Units = make([]SoftMaxUnit, ncats)
sm.Sorted = make([]int, ncats)
sm.Inputs = make([]float32, sm.NInputs)
sm.Weights.SetShape([]int{sm.NCats, sm.NInputs}, "Cats", "Inputs")
sm.Weights.SetShapeSizes(sm.NCats, sm.NInputs)
for i := range sm.Weights.Values {
sm.Weights.Values[i] = .1
}
Expand Down Expand Up @@ -215,7 +215,7 @@ func (sm *SoftMax) Back() {
// MPI version shares weight changes across nodes
func (sm *SoftMax) BackMPI() {
if sm.MPIDWts.Len() != sm.Weights.Len() {
sm.MPIDWts.CopyShapeFrom(&sm.Weights)
tensor.SetShapeFrom(&sm.MPIDWts, &sm.Weights)
}
lr := sm.Lrate
for ui := range sm.Units {
Expand Down
Loading
Loading