diff --git a/README.md b/README.md index 883d573a..afa538cc 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ As of v1.8.0, _data parallel_ processing of multiple input patterns in parallel ```Go net.InitExt(ctx) // clear any existing inputs -for di := uint32(0); di < ctx.NetIndexes.NData; di++ { +for di := uint32(0); di < ctx.NData; di++ { ev.Step() for _, lnm := range lays { ly := ss.Net.AxonLayerByName(lnm) @@ -128,7 +128,7 @@ func (ss *Sim) NetViewCounters(tm etime.Times) { case time == etime.Trial: trl := ss.Stats.Int("Trial") row = trl - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { ss.Stats.SetInt("Trial", trl+di) ss.TrialStats(di) ss.StatCounters(di) diff --git a/axon/act-path.go b/axon/act-path.go index 363d2136..d5049f80 100644 --- a/axon/act-path.go +++ b/axon/act-path.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line act-path.goal:1 // Copyright (c) 2019, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -56,19 +56,29 @@ type SynComParams struct { // type of conductance (G) communicated by this pathway GType PathGTypes - // additional synaptic delay in msec for inputs arriving at this pathway. Must be <= MaxDelay which is set during network building based on MaxDelay of any existing Path in the network. Delay = 0 means a spike reaches receivers in the next Cycle, which is the minimum time (1 msec). Biologically, subtract 1 from biological synaptic delay values to set corresponding Delay value. + // additional synaptic delay in msec for inputs arriving at this pathway. + // Must be <= MaxDelay which is set during network building based on MaxDelay + // of any existing Path in the network. Delay = 0 means a spike reaches + // receivers in the next Cycle, which is the minimum time (1 msec). + // Biologically, subtract 1 from biological synaptic delay values to set + // corresponding Delay value. Delay uint32 `min:"0" default:"2"` - // maximum value of Delay -- based on MaxDelay values when the BuildGBuf function was called when the network was built -- cannot set it longer than this, except by calling BuildGBuf on network after changing MaxDelay to a larger value in any pathway in the network. + // maximum value of Delay, based on MaxDelay values when the BuildGBuf + // function was called during [Network.Build]. Cannot set it longer than this, + // except by calling BuildGBuf on network after changing MaxDelay to a larger + // value in any pathway in the network. MaxDelay uint32 `edit:"-"` - // probability of synaptic transmission failure -- if > 0, then weights are turned off at random as a function of PFail (times 1-SWt if PFailSwt) + // probability of synaptic transmission failure: if > 0, then weights are + // turned off at random as a function of PFail (times 1-SWt if PFailSwt). PFail float32 - // if true, then probability of failure is inversely proportional to SWt structural / slow weight value (i.e., multiply PFail * (1-SWt))) + // if true, then probability of failure is inversely proportional to SWt + // structural / slow weight value (i.e., multiply PFail * (1-SWt))). PFailSWt slbool.Bool - // delay length = actual length of the GBuf buffer per neuron = Delay+1 -- just for speed + // delay length = actual length of the GBuf buffer per neuron = Delay+1; just for speed DelLen uint32 `display:"-"` pad, pad1 float32 diff --git a/axon/act-path.goal b/axon/act-path.goal index 2d41464d..7e905438 100644 --- a/axon/act-path.goal +++ b/axon/act-path.goal @@ -54,19 +54,29 @@ type SynComParams struct { // type of conductance (G) communicated by this pathway GType PathGTypes - // additional synaptic delay in msec for inputs arriving at this pathway. Must be <= MaxDelay which is set during network building based on MaxDelay of any existing Path in the network. Delay = 0 means a spike reaches receivers in the next Cycle, which is the minimum time (1 msec). Biologically, subtract 1 from biological synaptic delay values to set corresponding Delay value. + // additional synaptic delay in msec for inputs arriving at this pathway. + // Must be <= MaxDelay which is set during network building based on MaxDelay + // of any existing Path in the network. Delay = 0 means a spike reaches + // receivers in the next Cycle, which is the minimum time (1 msec). + // Biologically, subtract 1 from biological synaptic delay values to set + // corresponding Delay value. Delay uint32 `min:"0" default:"2"` - // maximum value of Delay -- based on MaxDelay values when the BuildGBuf function was called when the network was built -- cannot set it longer than this, except by calling BuildGBuf on network after changing MaxDelay to a larger value in any pathway in the network. + // maximum value of Delay, based on MaxDelay values when the BuildGBuf + // function was called during [Network.Build]. Cannot set it longer than this, + // except by calling BuildGBuf on network after changing MaxDelay to a larger + // value in any pathway in the network. MaxDelay uint32 `edit:"-"` - // probability of synaptic transmission failure -- if > 0, then weights are turned off at random as a function of PFail (times 1-SWt if PFailSwt) + // probability of synaptic transmission failure: if > 0, then weights are + //turned off at random as a function of PFail (times 1-SWt if PFailSwt). PFail float32 - // if true, then probability of failure is inversely proportional to SWt structural / slow weight value (i.e., multiply PFail * (1-SWt))) + // if true, then probability of failure is inversely proportional to SWt + // structural / slow weight value (i.e., multiply PFail * (1-SWt))). PFailSWt slbool.Bool - // delay length = actual length of the GBuf buffer per neuron = Delay+1 -- just for speed + // delay length = actual length of the GBuf buffer per neuron = Delay+1; just for speed DelLen uint32 `display:"-"` pad, pad1 float32 diff --git a/axon/act.go b/axon/act.go index af07d8af..76b64d77 100644 --- a/axon/act.go +++ b/axon/act.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line act.goal:1 // Copyright (c) 2019, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/axon/basic_test.go b/axon/basic_test.go index 8cf74257..2953a5f3 100644 --- a/axon/basic_test.go +++ b/axon/basic_test.go @@ -106,7 +106,7 @@ func newTestNet(ctx *Context, nData int) *Network { testNet.Rubicon.Defaults() testNet.Build(ctx) - ctx.NetIndexes.NData = uint32(nData) + ctx.NData = uint32(nData) testNet.Defaults() testNet.ApplyParams(ParamSets["Base"], false) // false) // true) // no msg testNet.InitWeights(ctx) // get GScale here @@ -131,7 +131,7 @@ func newTestNetFull(ctx *Context, nData int) *Network { testNet.ConnectLayers(outLay, hidLay, full, BackPath) testNet.Build(ctx) - ctx.NetIndexes.NData = uint32(nData) + ctx.NData = uint32(nData) testNet.Defaults() testNet.ApplyParams(ParamSets["Base"], false) // false) // true) // no msg testNet.InitWeights(ctx) // get GScale here @@ -563,7 +563,7 @@ func NetDebugAct(t *testing.T, printValues bool, gpu bool, nData int, initWts bo // fine-grained diff test, e.g., see the GPU version. func RunDebugAct(t *testing.T, ctx *Context, testNet *Network, printValues bool, gpu bool, initWts bool) map[string]float32 { - nData := int(ctx.NetIndexes.NData) + nData := int(ctx.NData) valMap := make(map[string]float32) inPats := newInPats() inLay := testNet.LayerByName("Input") @@ -1025,7 +1025,7 @@ func NetDebugLearn(t *testing.T, printValues bool, gpu bool, maxData, nData int, testNet.ApplyParams(ParamSets["SubMean"], false) } - ctx.NetIndexes.NData = uint32(nData) + ctx.NData = uint32(nData) return RunDebugLearn(t, ctx, testNet, printValues, gpu, initWts, slowAdapt) } @@ -1034,7 +1034,7 @@ func NetDebugLearn(t *testing.T, printValues bool, gpu bool, maxData, nData int, // fine-grained diff test, e.g., see the GPU version. func RunDebugLearn(t *testing.T, ctx *Context, testNet *Network, printValues bool, gpu bool, initWts, slowAdapt bool) map[string]float32 { - nData := int(ctx.NetIndexes.NData) + nData := int(ctx.NData) valMap := make(map[string]float32) inPats := newInPats() inLay := testNet.LayerByName("Input") diff --git a/axon/enumgen.go b/axon/enumgen.go index db8498e8..2a6add4b 100644 --- a/axon/enumgen.go +++ b/axon/enumgen.go @@ -6,16 +6,16 @@ import ( "cogentcore.org/core/enums" ) -var _PathGTypesValues = []PathGTypes{0, 1, 2, 3, 4, 17} +var _PathGTypesValues = []PathGTypes{0, 1, 2, 3, 4, 26} // PathGTypesN is the highest valid value for type PathGTypes, plus one. -const PathGTypesN PathGTypes = 18 +const PathGTypesN PathGTypes = 27 -var _PathGTypesValueMap = map[string]PathGTypes{`ExcitatoryG`: 0, `InhibitoryG`: 1, `ModulatoryG`: 2, `MaintG`: 3, `ContextG`: 4, `PathGTypesN`: 17} +var _PathGTypesValueMap = map[string]PathGTypes{`ExcitatoryG`: 0, `InhibitoryG`: 1, `ModulatoryG`: 2, `MaintG`: 3, `ContextG`: 4, `PathGTypesN`: 26} -var _PathGTypesDescMap = map[PathGTypes]string{0: `Excitatory pathways drive Ge conductance on receiving neurons, which send to GiRaw and GiSyn neuron variables.`, 1: `Inhibitory pathways drive Gi inhibitory conductance, which send to GiRaw and GiSyn neuron variables.`, 2: `Modulatory pathways have a multiplicative effect on other inputs, which send to GModRaw and GModSyn neuron variables.`, 3: `Maintenance pathways drive unique set of NMDA channels that support strong active maintenance abilities. Send to GMaintRaw and GMaintSyn neuron variables.`, 4: `Context pathways are for inputs to CT layers, which update only at the end of the plus phase, and send to CtxtGe.`, 17: ``} +var _PathGTypesDescMap = map[PathGTypes]string{0: `Excitatory pathways drive Ge conductance on receiving neurons, which send to GiRaw and GiSyn neuron variables.`, 1: `Inhibitory pathways drive Gi inhibitory conductance, which send to GiRaw and GiSyn neuron variables.`, 2: `Modulatory pathways have a multiplicative effect on other inputs, which send to GModRaw and GModSyn neuron variables.`, 3: `Maintenance pathways drive unique set of NMDA channels that support strong active maintenance abilities. Send to GMaintRaw and GMaintSyn neuron variables.`, 4: `Context pathways are for inputs to CT layers, which update only at the end of the plus phase, and send to CtxtGe.`, 26: ``} -var _PathGTypesMap = map[PathGTypes]string{0: `ExcitatoryG`, 1: `InhibitoryG`, 2: `ModulatoryG`, 3: `MaintG`, 4: `ContextG`, 17: `PathGTypesN`} +var _PathGTypesMap = map[PathGTypes]string{0: `ExcitatoryG`, 1: `InhibitoryG`, 2: `ModulatoryG`, 3: `MaintG`, 4: `ContextG`, 26: `PathGTypesN`} // String returns the string representation of this PathGTypes value. func (i PathGTypes) String() string { return enums.String(i, _PathGTypesMap) } @@ -49,16 +49,16 @@ func (i *PathGTypes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "PathGTypes") } -var _GlobalScalarVarsValues = []GlobalScalarVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 69} +var _GlobalScalarVarsValues = []GlobalScalarVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 78} // GlobalScalarVarsN is the highest valid value for type GlobalScalarVars, plus one. -const GlobalScalarVarsN GlobalScalarVars = 70 +const GlobalScalarVarsN GlobalScalarVars = 79 -var _GlobalScalarVarsValueMap = map[string]GlobalScalarVars{`GvRew`: 0, `GvHasRew`: 1, `GvRewPred`: 2, `GvPrevPred`: 3, `GvHadRew`: 4, `GvDA`: 5, `GvDAtonic`: 6, `GvACh`: 7, `GvNE`: 8, `GvSer`: 9, `GvAChRaw`: 10, `GvGoalMaint`: 11, `GvVSMatrixJustGated`: 12, `GvVSMatrixHasGated`: 13, `GvCuriosityPoolGated`: 14, `GvTime`: 15, `GvEffort`: 16, `GvUrgencyRaw`: 17, `GvUrgency`: 18, `GvHasPosUS`: 19, `GvHadPosUS`: 20, `GvNegUSOutcome`: 21, `GvHadNegUSOutcome`: 22, `GvPVposSum`: 23, `GvPVpos`: 24, `GvPVnegSum`: 25, `GvPVneg`: 26, `GvPVposEst`: 27, `GvPVposVar`: 28, `GvPVnegEst`: 29, `GvPVnegVar`: 30, `GvGoalDistEst`: 31, `GvGoalDistPrev`: 32, `GvProgressRate`: 33, `GvGiveUpUtility`: 34, `GvContUtility`: 35, `GvGiveUpTiming`: 36, `GvContTiming`: 37, `GvGiveUpProgress`: 38, `GvContProgress`: 39, `GvGiveUpSum`: 40, `GvContSum`: 41, `GvGiveUpProb`: 42, `GvGiveUp`: 43, `GvGaveUp`: 44, `GvVSPatchPos`: 45, `GvVSPatchPosThr`: 46, `GvVSPatchPosRPE`: 47, `GvVSPatchPosSum`: 48, `GvVSPatchPosPrev`: 49, `GvVSPatchPosVar`: 50, `GvLHbDip`: 51, `GvLHbBurst`: 52, `GvLHbPVDA`: 53, `GvCeMpos`: 54, `GvCeMneg`: 55, `GvVtaDA`: 56, `GlobalScalarVarsN`: 69} +var _GlobalScalarVarsValueMap = map[string]GlobalScalarVars{`GvRew`: 0, `GvHasRew`: 1, `GvRewPred`: 2, `GvPrevPred`: 3, `GvHadRew`: 4, `GvDA`: 5, `GvDAtonic`: 6, `GvACh`: 7, `GvNE`: 8, `GvSer`: 9, `GvAChRaw`: 10, `GvGoalMaint`: 11, `GvVSMatrixJustGated`: 12, `GvVSMatrixHasGated`: 13, `GvCuriosityPoolGated`: 14, `GvTime`: 15, `GvEffort`: 16, `GvUrgencyRaw`: 17, `GvUrgency`: 18, `GvHasPosUS`: 19, `GvHadPosUS`: 20, `GvNegUSOutcome`: 21, `GvHadNegUSOutcome`: 22, `GvPVposSum`: 23, `GvPVpos`: 24, `GvPVnegSum`: 25, `GvPVneg`: 26, `GvPVposEst`: 27, `GvPVposVar`: 28, `GvPVnegEst`: 29, `GvPVnegVar`: 30, `GvGoalDistEst`: 31, `GvGoalDistPrev`: 32, `GvProgressRate`: 33, `GvGiveUpUtility`: 34, `GvContUtility`: 35, `GvGiveUpTiming`: 36, `GvContTiming`: 37, `GvGiveUpProgress`: 38, `GvContProgress`: 39, `GvGiveUpSum`: 40, `GvContSum`: 41, `GvGiveUpProb`: 42, `GvGiveUp`: 43, `GvGaveUp`: 44, `GvVSPatchPos`: 45, `GvVSPatchPosThr`: 46, `GvVSPatchPosRPE`: 47, `GvVSPatchPosSum`: 48, `GvVSPatchPosPrev`: 49, `GvVSPatchPosVar`: 50, `GvLHbDip`: 51, `GvLHbBurst`: 52, `GvLHbPVDA`: 53, `GvCeMpos`: 54, `GvCeMneg`: 55, `GvVtaDA`: 56, `GlobalScalarVarsN`: 78} -var _GlobalScalarVarsDescMap = map[GlobalScalarVars]string{0: `Rew is the external reward value. Must also set HasRew flag when Rew is set, otherwise it is ignored. This is computed by the Rubicon algorithm from US inputs set by Net.Rubicon methods, and can be directly set in simpler RL cases.`, 1: `HasRew must be set to true (1) when an external reward / US input is present, otherwise Rew is ignored. This is also set when Rubicon BOA model gives up. This drives ACh release in the Rubicon model.`, 2: `RewPred is the reward prediction, computed by a special reward prediction layer, e.g., the VSPatch layer in the Rubicon algorithm.`, 3: `PrevPred is previous time step reward prediction, e.g., for TDPredLayer`, 4: `HadRew is HasRew state from the previous trial, copied from HasRew in NewState. Used for updating Effort, Urgency at start of new trial.`, 5: `DA is phasic dopamine that drives learning moreso than performance, representing reward prediction error, signaled as phasic increases or decreases in activity relative to a tonic baseline, which is represented by a value of 0. Released by the VTA (ventral tegmental area), or SNc (substantia nigra pars compacta).`, 6: `DAtonic is tonic dopamine, which has modulatory instead of learning effects. Increases can drive greater propensity to engage in activities by biasing Go vs No pathways in the basal ganglia, for example as a function of Urgency.`, 7: `ACh is acetylcholine, activated by salient events, particularly at the onset of a reward / punishment outcome (US), or onset of a conditioned stimulus (CS). Driven by BLA -> PPtg that detects changes in BLA activity, via LDTLayer type.`, 8: `NE is norepinepherine -- not yet in use`, 9: `Ser is serotonin -- not yet in use`, 10: `AChRaw is raw ACh value used in updating global ACh value by LDTLayer.`, 11: `GoalMaint is the normalized (0-1) goal maintenance activity, set in ApplyRubicon function at start of trial. Drives top-down inhibition of LDT layer / ACh activity.`, 12: `VSMatrixJustGated is VSMatrix just gated (to engage goal maintenance in PFC areas), set at end of plus phase. This excludes any gating happening at time of US.`, 13: `VSMatrixHasGated is VSMatrix has gated since the last time HasRew was set (US outcome received or expected one failed to be received).`, 14: `CuriosityPoolGated is true if VSMatrixJustGated and the first pool representing the curiosity / novelty drive gated. This can change the giving up Effort.Max parameter.`, 15: `Time is the raw time counter, incrementing upward during goal engaged window. This is also copied directly into NegUS[0] which tracks time, but we maintain a separate effort value to make it clearer.`, 16: `Effort is the raw effort counter, incrementing upward for each effort step during goal engaged window. This is also copied directly into NegUS[1] which tracks effort, but we maintain a separate effort value to make it clearer.`, 17: `UrgencyRaw is the raw effort for urgency, incrementing upward from effort increments per step when _not_ goal engaged.`, 18: `Urgency is the overall urgency activity level (normalized 0-1), computed from logistic function of GvUrgencyRaw. This drives DAtonic activity to increasingly bias Go firing.`, 19: `HasPosUS indicates has positive US on this trial, drives goal accomplishment logic and gating.`, 20: `HadPosUS is state from the previous trial (copied from HasPosUS in NewState).`, 21: `NegUSOutcome indicates that a phasic negative US stimulus was experienced, driving phasic ACh, VSMatrix gating to reset current goal engaged plan (if any), and phasic dopamine based on the outcome.`, 22: `HadNegUSOutcome is state from the previous trial (copied from NegUSOutcome in NewState)`, 23: `PVposSum is the total weighted positive valence primary value = sum of Weight * USpos * Drive`, 24: `PVpos is the normalized positive valence primary value = (1 - 1/(1+PVposGain * PVposSum))`, 25: `PVnegSum is the total weighted negative valence primary values including costs = sum of Weight * Cost + Weight * USneg`, 26: `PVpos is the normalized negative valence primary values, including costs = (1 - 1/(1+PVnegGain * PVnegSum))`, 27: `PVposEst is the estimated PVpos final outcome value decoded from the network PVposFinal layer`, 28: `PVposVar is the estimated variance or uncertainty in the PVpos final outcome value decoded from the network PVposFinal layer.`, 29: `PVnegEst is the estimated PVneg final outcome value decoded from the network PVnegFinal layer.`, 30: `PVnegVar is the estimated variance or uncertainty in the PVneg final outcome value decoded from the network PVnegFinal layer.`, 31: `GoalDistEst is the estimate of distance to the goal, in trial step units, decreasing down to 0 as the goal approaches.`, 32: `GoalDistPrev is the previous estimate of distance to the goal, in trial step units, decreasing down to 0 as the goal approaches.`, 33: `ProgressRate is the negative time average change in GoalDistEst, i.e., positive values indicate continued approach to the goal, while negative values represent moving away from the goal.`, 34: `GiveUpUtility is total GiveUp weight as a function of Cost.`, 35: `ContUtility is total Continue weight as a function of expected positive outcome PVposEst.`, 36: `GiveUpTiming is total GiveUp weight as a function of VSPatchPosSum * (1 - VSPatchPosVar).`, 37: `ContTiming is total Continue weight as a function of (1 - VSPatchPosSum) * VSPatchPosVar.`, 38: `GiveUpProgress is total GiveUp weight as a function of ProgressRate.`, 39: `ContProgress is total Continue weight as a function of ProgressRate.`, 40: `GiveUpSum is total GiveUp weight: Utility + Timing + Progress.`, 41: `ContSum is total Continue weight: Utility + Timing + Progress.`, 42: `GiveUpProb is the probability of giving up: 1 / (1 + (GvContSum / GvGiveUpSum))`, 43: `GiveUp is true if a reset was triggered probabilistically based on GiveUpProb.`, 44: `GaveUp is copy of GiveUp from previous trial.`, 45: `VSPatchPos is the net shunting input from VSPatch (PosD1, named PVi in original Rubicon) computed as the Max of US-specific VSPatch saved values, subtracting D1 - D2. This is also stored as GvRewPred.`, 46: `VSPatchPosThr is a thresholded version of GvVSPatchPos, applying Rubicon.LHb.VSPatchNonRewThr threshold for non-reward trials. This is the version used for computing DA.`, 47: `VSPatchPosRPE is the reward prediction error for the VSPatchPos reward prediction without any thresholding applied, and only for PV events. This is used to train the VSPatch, assuming a local feedback circuit that does not have the effective thresholding used for the broadcast critic signal that trains the rest of the network.`, 48: `VSPatchPosSum is the sum of VSPatchPos over goal engaged trials, representing the integrated prediction that the US is going to occur`, 49: `VSPatchPosPrev is the previous trial VSPatchPosSum`, 50: `VSPatchPosVar is the integrated temporal variance of VSPatchPos over goal engaged trials, which determines when the VSPatchPosSum has stabilized`, 51: `computed LHb activity level that drives dipping / pausing of DA firing, when VSPatch pos prediction > actual PV reward drive or PVneg > PVpos`, 52: `LHbBurst is computed LHb activity level that drives bursts of DA firing, when actual PV reward drive > VSPatch pos prediction`, 53: `LHbPVDA is GvLHbBurst - GvLHbDip -- the LHb contribution to DA, reflecting PV and VSPatch (PVi), but not the CS (LV) contributions`, 54: `CeMpos is positive valence central nucleus of the amygdala (CeM) LV (learned value) activity, reflecting |BLAposAcqD1 - BLAposExtD2|_+ positively rectified. CeM sets Raw directly. Note that a positive US onset even with no active Drive will be reflected here, enabling learning about unexpected outcomes.`, 55: `CeMneg is negative valence central nucleus of the amygdala (CeM) LV (learned value) activity, reflecting |BLAnegAcqD2 - BLAnegExtD1|_+ positively rectified. CeM sets Raw directly`, 56: `VtaDA is overall dopamine value reflecting all of the different inputs`, 69: ``} +var _GlobalScalarVarsDescMap = map[GlobalScalarVars]string{0: `Rew is the external reward value. Must also set HasRew flag when Rew is set, otherwise it is ignored. This is computed by the Rubicon algorithm from US inputs set by Net.Rubicon methods, and can be directly set in simpler RL cases.`, 1: `HasRew must be set to true (1) when an external reward / US input is present, otherwise Rew is ignored. This is also set when Rubicon BOA model gives up. This drives ACh release in the Rubicon model.`, 2: `RewPred is the reward prediction, computed by a special reward prediction layer, e.g., the VSPatch layer in the Rubicon algorithm.`, 3: `PrevPred is previous time step reward prediction, e.g., for TDPredLayer`, 4: `HadRew is HasRew state from the previous trial, copied from HasRew in NewState. Used for updating Effort, Urgency at start of new trial.`, 5: `DA is phasic dopamine that drives learning moreso than performance, representing reward prediction error, signaled as phasic increases or decreases in activity relative to a tonic baseline, which is represented by a value of 0. Released by the VTA (ventral tegmental area), or SNc (substantia nigra pars compacta).`, 6: `DAtonic is tonic dopamine, which has modulatory instead of learning effects. Increases can drive greater propensity to engage in activities by biasing Go vs No pathways in the basal ganglia, for example as a function of Urgency.`, 7: `ACh is acetylcholine, activated by salient events, particularly at the onset of a reward / punishment outcome (US), or onset of a conditioned stimulus (CS). Driven by BLA -> PPtg that detects changes in BLA activity, via LDTLayer type.`, 8: `NE is norepinepherine -- not yet in use`, 9: `Ser is serotonin -- not yet in use`, 10: `AChRaw is raw ACh value used in updating global ACh value by LDTLayer.`, 11: `GoalMaint is the normalized (0-1) goal maintenance activity, set in ApplyRubicon function at start of trial. Drives top-down inhibition of LDT layer / ACh activity.`, 12: `VSMatrixJustGated is VSMatrix just gated (to engage goal maintenance in PFC areas), set at end of plus phase. This excludes any gating happening at time of US.`, 13: `VSMatrixHasGated is VSMatrix has gated since the last time HasRew was set (US outcome received or expected one failed to be received).`, 14: `CuriosityPoolGated is true if VSMatrixJustGated and the first pool representing the curiosity / novelty drive gated. This can change the giving up Effort.Max parameter.`, 15: `Time is the raw time counter, incrementing upward during goal engaged window. This is also copied directly into NegUS[0] which tracks time, but we maintain a separate effort value to make it clearer.`, 16: `Effort is the raw effort counter, incrementing upward for each effort step during goal engaged window. This is also copied directly into NegUS[1] which tracks effort, but we maintain a separate effort value to make it clearer.`, 17: `UrgencyRaw is the raw effort for urgency, incrementing upward from effort increments per step when _not_ goal engaged.`, 18: `Urgency is the overall urgency activity level (normalized 0-1), computed from logistic function of GvUrgencyRaw. This drives DAtonic activity to increasingly bias Go firing.`, 19: `HasPosUS indicates has positive US on this trial, drives goal accomplishment logic and gating.`, 20: `HadPosUS is state from the previous trial (copied from HasPosUS in NewState).`, 21: `NegUSOutcome indicates that a phasic negative US stimulus was experienced, driving phasic ACh, VSMatrix gating to reset current goal engaged plan (if any), and phasic dopamine based on the outcome.`, 22: `HadNegUSOutcome is state from the previous trial (copied from NegUSOutcome in NewState)`, 23: `PVposSum is the total weighted positive valence primary value = sum of Weight * USpos * Drive`, 24: `PVpos is the normalized positive valence primary value = (1 - 1/(1+PVposGain * PVposSum))`, 25: `PVnegSum is the total weighted negative valence primary values including costs = sum of Weight * Cost + Weight * USneg`, 26: `PVpos is the normalized negative valence primary values, including costs = (1 - 1/(1+PVnegGain * PVnegSum))`, 27: `PVposEst is the estimated PVpos final outcome value decoded from the network PVposFinal layer`, 28: `PVposVar is the estimated variance or uncertainty in the PVpos final outcome value decoded from the network PVposFinal layer.`, 29: `PVnegEst is the estimated PVneg final outcome value decoded from the network PVnegFinal layer.`, 30: `PVnegVar is the estimated variance or uncertainty in the PVneg final outcome value decoded from the network PVnegFinal layer.`, 31: `GoalDistEst is the estimate of distance to the goal, in trial step units, decreasing down to 0 as the goal approaches.`, 32: `GoalDistPrev is the previous estimate of distance to the goal, in trial step units, decreasing down to 0 as the goal approaches.`, 33: `ProgressRate is the negative time average change in GoalDistEst, i.e., positive values indicate continued approach to the goal, while negative values represent moving away from the goal.`, 34: `GiveUpUtility is total GiveUp weight as a function of Cost.`, 35: `ContUtility is total Continue weight as a function of expected positive outcome PVposEst.`, 36: `GiveUpTiming is total GiveUp weight as a function of VSPatchPosSum * (1 - VSPatchPosVar).`, 37: `ContTiming is total Continue weight as a function of (1 - VSPatchPosSum) * VSPatchPosVar.`, 38: `GiveUpProgress is total GiveUp weight as a function of ProgressRate.`, 39: `ContProgress is total Continue weight as a function of ProgressRate.`, 40: `GiveUpSum is total GiveUp weight: Utility + Timing + Progress.`, 41: `ContSum is total Continue weight: Utility + Timing + Progress.`, 42: `GiveUpProb is the probability of giving up: 1 / (1 + (GvContSum / GvGiveUpSum))`, 43: `GiveUp is true if a reset was triggered probabilistically based on GiveUpProb.`, 44: `GaveUp is copy of GiveUp from previous trial.`, 45: `VSPatchPos is the net shunting input from VSPatch (PosD1, named PVi in original Rubicon) computed as the Max of US-specific VSPatch saved values, subtracting D1 - D2. This is also stored as GvRewPred.`, 46: `VSPatchPosThr is a thresholded version of GvVSPatchPos, applying Rubicon.LHb.VSPatchNonRewThr threshold for non-reward trials. This is the version used for computing DA.`, 47: `VSPatchPosRPE is the reward prediction error for the VSPatchPos reward prediction without any thresholding applied, and only for PV events. This is used to train the VSPatch, assuming a local feedback circuit that does not have the effective thresholding used for the broadcast critic signal that trains the rest of the network.`, 48: `VSPatchPosSum is the sum of VSPatchPos over goal engaged trials, representing the integrated prediction that the US is going to occur`, 49: `VSPatchPosPrev is the previous trial VSPatchPosSum`, 50: `VSPatchPosVar is the integrated temporal variance of VSPatchPos over goal engaged trials, which determines when the VSPatchPosSum has stabilized`, 51: `computed LHb activity level that drives dipping / pausing of DA firing, when VSPatch pos prediction > actual PV reward drive or PVneg > PVpos`, 52: `LHbBurst is computed LHb activity level that drives bursts of DA firing, when actual PV reward drive > VSPatch pos prediction`, 53: `LHbPVDA is GvLHbBurst - GvLHbDip -- the LHb contribution to DA, reflecting PV and VSPatch (PVi), but not the CS (LV) contributions`, 54: `CeMpos is positive valence central nucleus of the amygdala (CeM) LV (learned value) activity, reflecting |BLAposAcqD1 - BLAposExtD2|_+ positively rectified. CeM sets Raw directly. Note that a positive US onset even with no active Drive will be reflected here, enabling learning about unexpected outcomes.`, 55: `CeMneg is negative valence central nucleus of the amygdala (CeM) LV (learned value) activity, reflecting |BLAnegAcqD2 - BLAnegExtD1|_+ positively rectified. CeM sets Raw directly`, 56: `VtaDA is overall dopamine value reflecting all of the different inputs`, 78: ``} -var _GlobalScalarVarsMap = map[GlobalScalarVars]string{0: `GvRew`, 1: `GvHasRew`, 2: `GvRewPred`, 3: `GvPrevPred`, 4: `GvHadRew`, 5: `GvDA`, 6: `GvDAtonic`, 7: `GvACh`, 8: `GvNE`, 9: `GvSer`, 10: `GvAChRaw`, 11: `GvGoalMaint`, 12: `GvVSMatrixJustGated`, 13: `GvVSMatrixHasGated`, 14: `GvCuriosityPoolGated`, 15: `GvTime`, 16: `GvEffort`, 17: `GvUrgencyRaw`, 18: `GvUrgency`, 19: `GvHasPosUS`, 20: `GvHadPosUS`, 21: `GvNegUSOutcome`, 22: `GvHadNegUSOutcome`, 23: `GvPVposSum`, 24: `GvPVpos`, 25: `GvPVnegSum`, 26: `GvPVneg`, 27: `GvPVposEst`, 28: `GvPVposVar`, 29: `GvPVnegEst`, 30: `GvPVnegVar`, 31: `GvGoalDistEst`, 32: `GvGoalDistPrev`, 33: `GvProgressRate`, 34: `GvGiveUpUtility`, 35: `GvContUtility`, 36: `GvGiveUpTiming`, 37: `GvContTiming`, 38: `GvGiveUpProgress`, 39: `GvContProgress`, 40: `GvGiveUpSum`, 41: `GvContSum`, 42: `GvGiveUpProb`, 43: `GvGiveUp`, 44: `GvGaveUp`, 45: `GvVSPatchPos`, 46: `GvVSPatchPosThr`, 47: `GvVSPatchPosRPE`, 48: `GvVSPatchPosSum`, 49: `GvVSPatchPosPrev`, 50: `GvVSPatchPosVar`, 51: `GvLHbDip`, 52: `GvLHbBurst`, 53: `GvLHbPVDA`, 54: `GvCeMpos`, 55: `GvCeMneg`, 56: `GvVtaDA`, 69: `GlobalScalarVarsN`} +var _GlobalScalarVarsMap = map[GlobalScalarVars]string{0: `GvRew`, 1: `GvHasRew`, 2: `GvRewPred`, 3: `GvPrevPred`, 4: `GvHadRew`, 5: `GvDA`, 6: `GvDAtonic`, 7: `GvACh`, 8: `GvNE`, 9: `GvSer`, 10: `GvAChRaw`, 11: `GvGoalMaint`, 12: `GvVSMatrixJustGated`, 13: `GvVSMatrixHasGated`, 14: `GvCuriosityPoolGated`, 15: `GvTime`, 16: `GvEffort`, 17: `GvUrgencyRaw`, 18: `GvUrgency`, 19: `GvHasPosUS`, 20: `GvHadPosUS`, 21: `GvNegUSOutcome`, 22: `GvHadNegUSOutcome`, 23: `GvPVposSum`, 24: `GvPVpos`, 25: `GvPVnegSum`, 26: `GvPVneg`, 27: `GvPVposEst`, 28: `GvPVposVar`, 29: `GvPVnegEst`, 30: `GvPVnegVar`, 31: `GvGoalDistEst`, 32: `GvGoalDistPrev`, 33: `GvProgressRate`, 34: `GvGiveUpUtility`, 35: `GvContUtility`, 36: `GvGiveUpTiming`, 37: `GvContTiming`, 38: `GvGiveUpProgress`, 39: `GvContProgress`, 40: `GvGiveUpSum`, 41: `GvContSum`, 42: `GvGiveUpProb`, 43: `GvGiveUp`, 44: `GvGaveUp`, 45: `GvVSPatchPos`, 46: `GvVSPatchPosThr`, 47: `GvVSPatchPosRPE`, 48: `GvVSPatchPosSum`, 49: `GvVSPatchPosPrev`, 50: `GvVSPatchPosVar`, 51: `GvLHbDip`, 52: `GvLHbBurst`, 53: `GvLHbPVDA`, 54: `GvCeMpos`, 55: `GvCeMneg`, 56: `GvVtaDA`, 78: `GlobalScalarVarsN`} // String returns the string representation of this GlobalScalarVars value. func (i GlobalScalarVars) String() string { return enums.String(i, _GlobalScalarVarsMap) } @@ -92,16 +92,16 @@ func (i *GlobalScalarVars) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "GlobalScalarVars") } -var _GlobalVectorVarsValues = []GlobalVectorVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 22} +var _GlobalVectorVarsValues = []GlobalVectorVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 31} // GlobalVectorVarsN is the highest valid value for type GlobalVectorVars, plus one. -const GlobalVectorVarsN GlobalVectorVars = 23 +const GlobalVectorVarsN GlobalVectorVars = 32 -var _GlobalVectorVarsValueMap = map[string]GlobalVectorVars{`GvCost`: 0, `GvCostRaw`: 1, `GvUSneg`: 2, `GvUSnegRaw`: 3, `GvDrives`: 4, `GvUSpos`: 5, `GvVSPatchD1`: 6, `GvVSPatchD2`: 7, `GvOFCposPTMaint`: 8, `GvVSMatrixPoolGated`: 9, `GlobalVectorVarsN`: 22} +var _GlobalVectorVarsValueMap = map[string]GlobalVectorVars{`GvCost`: 0, `GvCostRaw`: 1, `GvUSneg`: 2, `GvUSnegRaw`: 3, `GvDrives`: 4, `GvUSpos`: 5, `GvVSPatchD1`: 6, `GvVSPatchD2`: 7, `GvOFCposPTMaint`: 8, `GvVSMatrixPoolGated`: 9, `GlobalVectorVarsN`: 31} -var _GlobalVectorVarsDescMap = map[GlobalVectorVars]string{0: `Cost are Time, Effort, etc costs, as normalized version of corresponding raw. NCosts of them`, 1: `CostRaw are raw, linearly incremented negative valence US outcomes, this value is also integrated together with all US vals for PVneg`, 2: `USneg are negative valence US outcomes, normalized version of raw. NNegUSs of them`, 3: `USnegRaw are raw, linearly incremented negative valence US outcomes, this value is also integrated together with all US vals for PVneg`, 4: `Drives are current drive state, updated with optional homeostatic exponential return to baseline values.`, 5: `USpos are current positive-valence drive-satisfying input(s) (unconditioned stimuli = US)`, 6: `VSPatch is current reward predicting VSPatch (PosD1) values.`, 7: `VSPatch is current reward predicting VSPatch (PosD2) values.`, 8: `OFCposPTMaint is activity level of given OFCposPT maintenance pool used in anticipating potential USpos outcome value.`, 9: `VSMatrixPoolGated indicates whether given VSMatrix pool gated this is reset after last goal accomplished -- records gating since then.`, 22: ``} +var _GlobalVectorVarsDescMap = map[GlobalVectorVars]string{0: `Cost are Time, Effort, etc costs, as normalized version of corresponding raw. NCosts of them`, 1: `CostRaw are raw, linearly incremented negative valence US outcomes, this value is also integrated together with all US vals for PVneg`, 2: `USneg are negative valence US outcomes, normalized version of raw. NNegUSs of them`, 3: `USnegRaw are raw, linearly incremented negative valence US outcomes, this value is also integrated together with all US vals for PVneg`, 4: `Drives are current drive state, updated with optional homeostatic exponential return to baseline values.`, 5: `USpos are current positive-valence drive-satisfying input(s) (unconditioned stimuli = US)`, 6: `VSPatch is current reward predicting VSPatch (PosD1) values.`, 7: `VSPatch is current reward predicting VSPatch (PosD2) values.`, 8: `OFCposPTMaint is activity level of given OFCposPT maintenance pool used in anticipating potential USpos outcome value.`, 9: `VSMatrixPoolGated indicates whether given VSMatrix pool gated this is reset after last goal accomplished -- records gating since then.`, 31: ``} -var _GlobalVectorVarsMap = map[GlobalVectorVars]string{0: `GvCost`, 1: `GvCostRaw`, 2: `GvUSneg`, 3: `GvUSnegRaw`, 4: `GvDrives`, 5: `GvUSpos`, 6: `GvVSPatchD1`, 7: `GvVSPatchD2`, 8: `GvOFCposPTMaint`, 9: `GvVSMatrixPoolGated`, 22: `GlobalVectorVarsN`} +var _GlobalVectorVarsMap = map[GlobalVectorVars]string{0: `GvCost`, 1: `GvCostRaw`, 2: `GvUSneg`, 3: `GvUSnegRaw`, 4: `GvDrives`, 5: `GvUSpos`, 6: `GvVSPatchD1`, 7: `GvVSPatchD2`, 8: `GvOFCposPTMaint`, 9: `GvVSMatrixPoolGated`, 31: `GlobalVectorVarsN`} // String returns the string representation of this GlobalVectorVars value. func (i GlobalVectorVars) String() string { return enums.String(i, _GlobalVectorVarsMap) } @@ -135,16 +135,16 @@ func (i *GlobalVectorVars) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "GlobalVectorVars") } -var _GPUVarsValues = []GPUVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 23} +var _GPUVarsValues = []GPUVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 32} // GPUVarsN is the highest valid value for type GPUVars, plus one. -const GPUVarsN GPUVars = 24 +const GPUVarsN GPUVars = 33 -var _GPUVarsValueMap = map[string]GPUVars{`LayersVar`: 0, `PathsVar`: 1, `NetworkIxsVar`: 2, `NeuronIxsVar`: 3, `SynapseIxsVar`: 4, `PathSendConVar`: 5, `RecvPathIxsVar`: 6, `CtxVar`: 7, `NeuronsVar`: 8, `NeuronAvgsVar`: 9, `PoolsVar`: 10, `LayValuesVar`: 11, `GlobalScalarsVar`: 12, `GlobalVectorsVar`: 13, `ExtsVar`: 14, `PathGBufVar`: 15, `PathGSynsVar`: 16, `SynapsesVar`: 17, `SynapseTracesVar`: 18, `GPUVarsN`: 23} +var _GPUVarsValueMap = map[string]GPUVars{`LayersVar`: 0, `PathsVar`: 1, `NetworkIxsVar`: 2, `NeuronIxsVar`: 3, `SynapseIxsVar`: 4, `PathSendConVar`: 5, `RecvPathIxsVar`: 6, `CtxVar`: 7, `NeuronsVar`: 8, `NeuronAvgsVar`: 9, `PoolsVar`: 10, `LayValuesVar`: 11, `GlobalScalarsVar`: 12, `GlobalVectorsVar`: 13, `ExtsVar`: 14, `PathGBufVar`: 15, `PathGSynsVar`: 16, `SynapsesVar`: 17, `SynapseTracesVar`: 18, `GPUVarsN`: 32} -var _GPUVarsDescMap = map[GPUVars]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``, 5: ``, 6: ``, 7: ``, 8: ``, 9: ``, 10: ``, 11: ``, 12: ``, 13: ``, 14: ``, 15: ``, 16: ``, 17: ``, 18: ``, 23: ``} +var _GPUVarsDescMap = map[GPUVars]string{0: ``, 1: ``, 2: ``, 3: ``, 4: ``, 5: ``, 6: ``, 7: ``, 8: ``, 9: ``, 10: ``, 11: ``, 12: ``, 13: ``, 14: ``, 15: ``, 16: ``, 17: ``, 18: ``, 32: ``} -var _GPUVarsMap = map[GPUVars]string{0: `LayersVar`, 1: `PathsVar`, 2: `NetworkIxsVar`, 3: `NeuronIxsVar`, 4: `SynapseIxsVar`, 5: `PathSendConVar`, 6: `RecvPathIxsVar`, 7: `CtxVar`, 8: `NeuronsVar`, 9: `NeuronAvgsVar`, 10: `PoolsVar`, 11: `LayValuesVar`, 12: `GlobalScalarsVar`, 13: `GlobalVectorsVar`, 14: `ExtsVar`, 15: `PathGBufVar`, 16: `PathGSynsVar`, 17: `SynapsesVar`, 18: `SynapseTracesVar`, 23: `GPUVarsN`} +var _GPUVarsMap = map[GPUVars]string{0: `LayersVar`, 1: `PathsVar`, 2: `NetworkIxsVar`, 3: `NeuronIxsVar`, 4: `SynapseIxsVar`, 5: `PathSendConVar`, 6: `RecvPathIxsVar`, 7: `CtxVar`, 8: `NeuronsVar`, 9: `NeuronAvgsVar`, 10: `PoolsVar`, 11: `LayValuesVar`, 12: `GlobalScalarsVar`, 13: `GlobalVectorsVar`, 14: `ExtsVar`, 15: `PathGBufVar`, 16: `PathGSynsVar`, 17: `SynapsesVar`, 18: `SynapseTracesVar`, 32: `GPUVarsN`} // String returns the string representation of this GPUVars value. func (i GPUVars) String() string { return enums.String(i, _GPUVarsMap) } @@ -176,16 +176,16 @@ func (i GPUVars) MarshalText() ([]byte, error) { return []byte(i.String()), nil // UnmarshalText implements the [encoding.TextUnmarshaler] interface. func (i *GPUVars) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "GPUVars") } -var _LayerTypesValues = []LayerTypes{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 49} +var _LayerTypesValues = []LayerTypes{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 58} // LayerTypesN is the highest valid value for type LayerTypes, plus one. -const LayerTypesN LayerTypes = 50 +const LayerTypesN LayerTypes = 59 -var _LayerTypesValueMap = map[string]LayerTypes{`SuperLayer`: 0, `InputLayer`: 1, `TargetLayer`: 2, `CompareLayer`: 3, `CTLayer`: 4, `PulvinarLayer`: 5, `TRNLayer`: 6, `PTMaintLayer`: 7, `PTPredLayer`: 8, `MatrixLayer`: 9, `STNLayer`: 10, `GPLayer`: 11, `BGThalLayer`: 12, `VSGatedLayer`: 13, `BLALayer`: 14, `CeMLayer`: 15, `VSPatchLayer`: 16, `LHbLayer`: 17, `DrivesLayer`: 18, `UrgencyLayer`: 19, `USLayer`: 20, `PVLayer`: 21, `LDTLayer`: 22, `VTALayer`: 23, `RewLayer`: 24, `RWPredLayer`: 25, `RWDaLayer`: 26, `TDPredLayer`: 27, `TDIntegLayer`: 28, `TDDaLayer`: 29, `LayerTypesN`: 49} +var _LayerTypesValueMap = map[string]LayerTypes{`SuperLayer`: 0, `InputLayer`: 1, `TargetLayer`: 2, `CompareLayer`: 3, `CTLayer`: 4, `PulvinarLayer`: 5, `TRNLayer`: 6, `PTMaintLayer`: 7, `PTPredLayer`: 8, `MatrixLayer`: 9, `STNLayer`: 10, `GPLayer`: 11, `BGThalLayer`: 12, `VSGatedLayer`: 13, `BLALayer`: 14, `CeMLayer`: 15, `VSPatchLayer`: 16, `LHbLayer`: 17, `DrivesLayer`: 18, `UrgencyLayer`: 19, `USLayer`: 20, `PVLayer`: 21, `LDTLayer`: 22, `VTALayer`: 23, `RewLayer`: 24, `RWPredLayer`: 25, `RWDaLayer`: 26, `TDPredLayer`: 27, `TDIntegLayer`: 28, `TDDaLayer`: 29, `LayerTypesN`: 58} -var _LayerTypesDescMap = map[LayerTypes]string{0: `Super is a superficial cortical layer (lamina 2-3-4) which does not receive direct input or targets. In more generic models, it should be used as a Hidden layer, and maps onto the Hidden type in LayerTypes.`, 1: `Input is a layer that receives direct external input in its Ext inputs. Biologically, it can be a primary sensory layer, or a thalamic layer.`, 2: `Target is a layer that receives direct external target inputs used for driving plus-phase learning. Simple target layers are generally not used in more biological models, which instead use predictive learning via Pulvinar or related mechanisms.`, 3: `Compare is a layer that receives external comparison inputs, which drive statistics but do NOT drive activation or learning directly. It is rarely used in axon.`, 4: `CT are layer 6 corticothalamic projecting neurons, which drive "top down" predictions in Pulvinar layers. They maintain information over time via stronger NMDA channels and use maintained prior state information to generate predictions about current states forming on Super layers that then drive PT (5IB) bursting activity, which are the plus-phase drivers of Pulvinar activity.`, 5: `Pulvinar are thalamic relay cell neurons in the higher-order Pulvinar nucleus of the thalamus, and functionally isomorphic neurons in the MD thalamus, and potentially other areas. These cells alternately reflect predictions driven by CT pathways, and actual outcomes driven by 5IB Burst activity from corresponding PT or Super layer neurons that provide strong driving inputs.`, 6: `TRNLayer is thalamic reticular nucleus layer for inhibitory competition within the thalamus.`, 7: `PTMaintLayer implements the subset of pyramidal tract (PT) layer 5 intrinsic bursting (5IB) deep neurons that exhibit robust, stable maintenance of activity over the duration of a goal engaged window, modulated by basal ganglia (BG) disinhibitory gating, supported by strong MaintNMDA channels and recurrent excitation. The lateral PTSelfMaint pathway uses MaintG to drive GMaintRaw input that feeds into the stronger, longer MaintNMDA channels, and the ThalToPT ModulatoryG pathway from BGThalamus multiplicatively modulates the strength of other inputs, such that only at the time of BG gating are these strong enough to drive sustained active maintenance. Use Act.Dend.ModGain to parameterize.`, 8: `PTPredLayer implements the subset of pyramidal tract (PT) layer 5 intrinsic bursting (5IB) deep neurons that combine modulatory input from PTMaintLayer sustained maintenance and CTLayer dynamic predictive learning that helps to predict state changes during the period of active goal maintenance. This layer provides the primary input to VSPatch US-timing prediction layers, and other layers that require predictive dynamic`, 9: `MatrixLayer represents the matrisome medium spiny neurons (MSNs) that are the main Go / NoGo gating units in BG. These are strongly modulated by phasic dopamine: D1 = Go, D2 = NoGo.`, 10: `STNLayer represents subthalamic nucleus neurons, with two subtypes: STNp are more strongly driven and get over bursting threshold, driving strong, rapid activation of the KCa channels, causing a long pause in firing, which creates a window during which GPe dynamics resolve Go vs. No balance. STNs are more weakly driven and thus more slowly activate KCa, resulting in a longer period of activation, during which the GPi is inhibited to prevent premature gating based only MtxGo inhibition -- gating only occurs when GPePr signal has had a chance to integrate its MtxNo inputs.`, 11: `GPLayer represents a globus pallidus layer in the BG, including: GPeOut, GPePr, GPeAk (arkypallidal), and GPi. Typically just a single unit per Pool representing a given stripe.`, 12: `BGThalLayer represents a BG gated thalamic layer, which receives BG gating in the form of an inhibitory pathway from GPi. Located mainly in the Ventral thalamus: VA / VM / VL, and also parts of MD mediodorsal thalamus.`, 13: `VSGated represents explicit coding of VS gating status: JustGated and HasGated (since last US or failed predicted US), For visualization and / or motor action signaling.`, 14: `BLALayer represents a basolateral amygdala layer which learns to associate arbitrary stimuli (CSs) with behaviorally salient outcomes (USs)`, 15: `CeMLayer represents a central nucleus of the amygdala layer.`, 16: `VSPatchLayer represents a ventral striatum patch layer, which learns to represent the expected amount of dopamine reward and projects both directly with shunting inhibition to the VTA and indirectly via the LHb / RMTg to cancel phasic dopamine firing to expected rewards (i.e., reward prediction error).`, 17: `LHbLayer represents the lateral habenula, which drives dipping in the VTA. It tracks the Global LHb values for visualization purposes -- updated by VTALayer.`, 18: `DrivesLayer represents the Drives in .Rubicon framework. It tracks the Global Drives values for visualization and predictive learning purposes.`, 19: `UrgencyLayer represents the Urgency factor in Rubicon framework. It tracks the Global Urgency.Urge value for visualization and predictive learning purposes.`, 20: `USLayer represents a US unconditioned stimulus layer (USpos or USneg). It tracks the Global USpos or USneg, for visualization and predictive learning purposes. Actual US inputs are set in Rubicon.`, 21: `PVLayer represents a PV primary value layer (PVpos or PVneg) representing the total primary value as a function of US inputs, drives, and effort. It tracks the Global VTA.PVpos, PVneg values for visualization and predictive learning purposes.`, 22: `LDTLayer represents the laterodorsal tegmentum layer, which is the primary limbic ACh (acetylcholine) driver to other ACh: BG cholinergic interneurons (CIN) and nucleus basalis ACh areas. The phasic ACh release signals reward salient inputs from CS, US and US omssion, and it drives widespread disinhibition of BG gating and VTA DA firing. It receives excitation from superior colliculus which computes a temporal derivative (stimulus specific adaptation, SSA) of sensory inputs, and inhibitory input from OFC, ACC driving suppression of distracting inputs during goal-engaged states.`, 23: `VTALayer represents the ventral tegmental area, which releases dopamine. It computes final DA value from Rubicon-computed LHb PVDA (primary value DA), updated at start of each trial from updated US, Effort, etc state, and cycle-by-cycle LV learned value state reflecting CS inputs, in the Amygdala (CeM). Its activity reflects this DA level, which is effectively broadcast vial Global state values to all layers.`, 24: `RewLayer represents positive or negative reward values across 2 units, showing spiking rates for each, and Act always represents signed value.`, 25: `RWPredLayer computes reward prediction for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the Rubicon framework). Activity is computed as linear function of excitatory conductance (which can be negative -- there are no constraints). Use with RWPath which does simple delta-rule learning on minus-plus.`, 26: `RWDaLayer computes a dopamine (DA) signal based on a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the Rubicon framework). It computes difference between r(t) and RWPred values. r(t) is accessed directly from a Rew layer -- if no external input then no DA is computed -- critical for effective use of RW only for PV cases. RWPred prediction is also accessed directly from Rew layer to avoid any issues.`, 27: `TDPredLayer is the temporal differences reward prediction layer. It represents estimated value V(t) in the minus phase, and computes estimated V(t+1) based on its learned weights in plus phase, using the TDPredPath pathway type for DA modulated learning.`, 28: `TDIntegLayer is the temporal differences reward integration layer. It represents estimated value V(t) from prior time step in the minus phase, and estimated discount * V(t+1) + r(t) in the plus phase. It gets Rew, PrevPred from Context.NeuroMod, and Special LayerValues from TDPredLayer.`, 29: `TDDaLayer computes a dopamine (DA) signal as the temporal difference (TD) between the TDIntegLayer activations in the minus and plus phase. These are retrieved from Special LayerValues.`, 49: ``} +var _LayerTypesDescMap = map[LayerTypes]string{0: `Super is a superficial cortical layer (lamina 2-3-4) which does not receive direct input or targets. In more generic models, it should be used as a Hidden layer, and maps onto the Hidden type in LayerTypes.`, 1: `Input is a layer that receives direct external input in its Ext inputs. Biologically, it can be a primary sensory layer, or a thalamic layer.`, 2: `Target is a layer that receives direct external target inputs used for driving plus-phase learning. Simple target layers are generally not used in more biological models, which instead use predictive learning via Pulvinar or related mechanisms.`, 3: `Compare is a layer that receives external comparison inputs, which drive statistics but do NOT drive activation or learning directly. It is rarely used in axon.`, 4: `CT are layer 6 corticothalamic projecting neurons, which drive "top down" predictions in Pulvinar layers. They maintain information over time via stronger NMDA channels and use maintained prior state information to generate predictions about current states forming on Super layers that then drive PT (5IB) bursting activity, which are the plus-phase drivers of Pulvinar activity.`, 5: `Pulvinar are thalamic relay cell neurons in the higher-order Pulvinar nucleus of the thalamus, and functionally isomorphic neurons in the MD thalamus, and potentially other areas. These cells alternately reflect predictions driven by CT pathways, and actual outcomes driven by 5IB Burst activity from corresponding PT or Super layer neurons that provide strong driving inputs.`, 6: `TRNLayer is thalamic reticular nucleus layer for inhibitory competition within the thalamus.`, 7: `PTMaintLayer implements the subset of pyramidal tract (PT) layer 5 intrinsic bursting (5IB) deep neurons that exhibit robust, stable maintenance of activity over the duration of a goal engaged window, modulated by basal ganglia (BG) disinhibitory gating, supported by strong MaintNMDA channels and recurrent excitation. The lateral PTSelfMaint pathway uses MaintG to drive GMaintRaw input that feeds into the stronger, longer MaintNMDA channels, and the ThalToPT ModulatoryG pathway from BGThalamus multiplicatively modulates the strength of other inputs, such that only at the time of BG gating are these strong enough to drive sustained active maintenance. Use Act.Dend.ModGain to parameterize.`, 8: `PTPredLayer implements the subset of pyramidal tract (PT) layer 5 intrinsic bursting (5IB) deep neurons that combine modulatory input from PTMaintLayer sustained maintenance and CTLayer dynamic predictive learning that helps to predict state changes during the period of active goal maintenance. This layer provides the primary input to VSPatch US-timing prediction layers, and other layers that require predictive dynamic`, 9: `MatrixLayer represents the matrisome medium spiny neurons (MSNs) that are the main Go / NoGo gating units in BG. These are strongly modulated by phasic dopamine: D1 = Go, D2 = NoGo.`, 10: `STNLayer represents subthalamic nucleus neurons, with two subtypes: STNp are more strongly driven and get over bursting threshold, driving strong, rapid activation of the KCa channels, causing a long pause in firing, which creates a window during which GPe dynamics resolve Go vs. No balance. STNs are more weakly driven and thus more slowly activate KCa, resulting in a longer period of activation, during which the GPi is inhibited to prevent premature gating based only MtxGo inhibition -- gating only occurs when GPePr signal has had a chance to integrate its MtxNo inputs.`, 11: `GPLayer represents a globus pallidus layer in the BG, including: GPeOut, GPePr, GPeAk (arkypallidal), and GPi. Typically just a single unit per Pool representing a given stripe.`, 12: `BGThalLayer represents a BG gated thalamic layer, which receives BG gating in the form of an inhibitory pathway from GPi. Located mainly in the Ventral thalamus: VA / VM / VL, and also parts of MD mediodorsal thalamus.`, 13: `VSGated represents explicit coding of VS gating status: JustGated and HasGated (since last US or failed predicted US), For visualization and / or motor action signaling.`, 14: `BLALayer represents a basolateral amygdala layer which learns to associate arbitrary stimuli (CSs) with behaviorally salient outcomes (USs)`, 15: `CeMLayer represents a central nucleus of the amygdala layer.`, 16: `VSPatchLayer represents a ventral striatum patch layer, which learns to represent the expected amount of dopamine reward and projects both directly with shunting inhibition to the VTA and indirectly via the LHb / RMTg to cancel phasic dopamine firing to expected rewards (i.e., reward prediction error).`, 17: `LHbLayer represents the lateral habenula, which drives dipping in the VTA. It tracks the Global LHb values for visualization purposes -- updated by VTALayer.`, 18: `DrivesLayer represents the Drives in .Rubicon framework. It tracks the Global Drives values for visualization and predictive learning purposes.`, 19: `UrgencyLayer represents the Urgency factor in Rubicon framework. It tracks the Global Urgency.Urge value for visualization and predictive learning purposes.`, 20: `USLayer represents a US unconditioned stimulus layer (USpos or USneg). It tracks the Global USpos or USneg, for visualization and predictive learning purposes. Actual US inputs are set in Rubicon.`, 21: `PVLayer represents a PV primary value layer (PVpos or PVneg) representing the total primary value as a function of US inputs, drives, and effort. It tracks the Global VTA.PVpos, PVneg values for visualization and predictive learning purposes.`, 22: `LDTLayer represents the laterodorsal tegmentum layer, which is the primary limbic ACh (acetylcholine) driver to other ACh: BG cholinergic interneurons (CIN) and nucleus basalis ACh areas. The phasic ACh release signals reward salient inputs from CS, US and US omssion, and it drives widespread disinhibition of BG gating and VTA DA firing. It receives excitation from superior colliculus which computes a temporal derivative (stimulus specific adaptation, SSA) of sensory inputs, and inhibitory input from OFC, ACC driving suppression of distracting inputs during goal-engaged states.`, 23: `VTALayer represents the ventral tegmental area, which releases dopamine. It computes final DA value from Rubicon-computed LHb PVDA (primary value DA), updated at start of each trial from updated US, Effort, etc state, and cycle-by-cycle LV learned value state reflecting CS inputs, in the Amygdala (CeM). Its activity reflects this DA level, which is effectively broadcast vial Global state values to all layers.`, 24: `RewLayer represents positive or negative reward values across 2 units, showing spiking rates for each, and Act always represents signed value.`, 25: `RWPredLayer computes reward prediction for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the Rubicon framework). Activity is computed as linear function of excitatory conductance (which can be negative -- there are no constraints). Use with RWPath which does simple delta-rule learning on minus-plus.`, 26: `RWDaLayer computes a dopamine (DA) signal based on a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the Rubicon framework). It computes difference between r(t) and RWPred values. r(t) is accessed directly from a Rew layer -- if no external input then no DA is computed -- critical for effective use of RW only for PV cases. RWPred prediction is also accessed directly from Rew layer to avoid any issues.`, 27: `TDPredLayer is the temporal differences reward prediction layer. It represents estimated value V(t) in the minus phase, and computes estimated V(t+1) based on its learned weights in plus phase, using the TDPredPath pathway type for DA modulated learning.`, 28: `TDIntegLayer is the temporal differences reward integration layer. It represents estimated value V(t) from prior time step in the minus phase, and estimated discount * V(t+1) + r(t) in the plus phase. It gets Rew, PrevPred from Context.NeuroMod, and Special LayerValues from TDPredLayer.`, 29: `TDDaLayer computes a dopamine (DA) signal as the temporal difference (TD) between the TDIntegLayer activations in the minus and plus phase. These are retrieved from Special LayerValues.`, 58: ``} -var _LayerTypesMap = map[LayerTypes]string{0: `SuperLayer`, 1: `InputLayer`, 2: `TargetLayer`, 3: `CompareLayer`, 4: `CTLayer`, 5: `PulvinarLayer`, 6: `TRNLayer`, 7: `PTMaintLayer`, 8: `PTPredLayer`, 9: `MatrixLayer`, 10: `STNLayer`, 11: `GPLayer`, 12: `BGThalLayer`, 13: `VSGatedLayer`, 14: `BLALayer`, 15: `CeMLayer`, 16: `VSPatchLayer`, 17: `LHbLayer`, 18: `DrivesLayer`, 19: `UrgencyLayer`, 20: `USLayer`, 21: `PVLayer`, 22: `LDTLayer`, 23: `VTALayer`, 24: `RewLayer`, 25: `RWPredLayer`, 26: `RWDaLayer`, 27: `TDPredLayer`, 28: `TDIntegLayer`, 29: `TDDaLayer`, 49: `LayerTypesN`} +var _LayerTypesMap = map[LayerTypes]string{0: `SuperLayer`, 1: `InputLayer`, 2: `TargetLayer`, 3: `CompareLayer`, 4: `CTLayer`, 5: `PulvinarLayer`, 6: `TRNLayer`, 7: `PTMaintLayer`, 8: `PTPredLayer`, 9: `MatrixLayer`, 10: `STNLayer`, 11: `GPLayer`, 12: `BGThalLayer`, 13: `VSGatedLayer`, 14: `BLALayer`, 15: `CeMLayer`, 16: `VSPatchLayer`, 17: `LHbLayer`, 18: `DrivesLayer`, 19: `UrgencyLayer`, 20: `USLayer`, 21: `PVLayer`, 22: `LDTLayer`, 23: `VTALayer`, 24: `RewLayer`, 25: `RWPredLayer`, 26: `RWDaLayer`, 27: `TDPredLayer`, 28: `TDIntegLayer`, 29: `TDDaLayer`, 58: `LayerTypesN`} // String returns the string representation of this LayerTypes value. func (i LayerTypes) String() string { return enums.String(i, _LayerTypesMap) } @@ -219,16 +219,16 @@ func (i *LayerTypes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "LayerTypes") } -var _DAModTypesValues = []DAModTypes{0, 1, 2, 3, 23} +var _DAModTypesValues = []DAModTypes{0, 1, 2, 3, 32} // DAModTypesN is the highest valid value for type DAModTypes, plus one. -const DAModTypesN DAModTypes = 24 +const DAModTypesN DAModTypes = 33 -var _DAModTypesValueMap = map[string]DAModTypes{`NoDAMod`: 0, `D1Mod`: 1, `D2Mod`: 2, `D1AbsMod`: 3, `DAModTypesN`: 23} +var _DAModTypesValueMap = map[string]DAModTypes{`NoDAMod`: 0, `D1Mod`: 1, `D2Mod`: 2, `D1AbsMod`: 3, `DAModTypesN`: 32} -var _DAModTypesDescMap = map[DAModTypes]string{0: `NoDAMod means there is no effect of dopamine on neural activity`, 1: `D1Mod is for neurons that primarily express dopamine D1 receptors, which are excitatory from DA bursts, inhibitory from dips. Cortical neurons can generally use this type, while subcortical populations are more diverse in having both D1 and D2 subtypes.`, 2: `D2Mod is for neurons that primarily express dopamine D2 receptors, which are excitatory from DA dips, inhibitory from bursts.`, 3: `D1AbsMod is like D1Mod, except the absolute value of DA is used instead of the signed value. There are a subset of DA neurons that send increased DA for both negative and positive outcomes, targeting frontal neurons.`, 23: ``} +var _DAModTypesDescMap = map[DAModTypes]string{0: `NoDAMod means there is no effect of dopamine on neural activity`, 1: `D1Mod is for neurons that primarily express dopamine D1 receptors, which are excitatory from DA bursts, inhibitory from dips. Cortical neurons can generally use this type, while subcortical populations are more diverse in having both D1 and D2 subtypes.`, 2: `D2Mod is for neurons that primarily express dopamine D2 receptors, which are excitatory from DA dips, inhibitory from bursts.`, 3: `D1AbsMod is like D1Mod, except the absolute value of DA is used instead of the signed value. There are a subset of DA neurons that send increased DA for both negative and positive outcomes, targeting frontal neurons.`, 32: ``} -var _DAModTypesMap = map[DAModTypes]string{0: `NoDAMod`, 1: `D1Mod`, 2: `D2Mod`, 3: `D1AbsMod`, 23: `DAModTypesN`} +var _DAModTypesMap = map[DAModTypes]string{0: `NoDAMod`, 1: `D1Mod`, 2: `D2Mod`, 3: `D1AbsMod`, 32: `DAModTypesN`} // String returns the string representation of this DAModTypes value. func (i DAModTypes) String() string { return enums.String(i, _DAModTypesMap) } @@ -262,16 +262,16 @@ func (i *DAModTypes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "DAModTypes") } -var _ValenceTypesValues = []ValenceTypes{0, 1, 2, 22} +var _ValenceTypesValues = []ValenceTypes{0, 1, 2, 31} // ValenceTypesN is the highest valid value for type ValenceTypes, plus one. -const ValenceTypesN ValenceTypes = 23 +const ValenceTypesN ValenceTypes = 32 -var _ValenceTypesValueMap = map[string]ValenceTypes{`Positive`: 0, `Negative`: 1, `Cost`: 2, `ValenceTypesN`: 22} +var _ValenceTypesValueMap = map[string]ValenceTypes{`Positive`: 0, `Negative`: 1, `Cost`: 2, `ValenceTypesN`: 31} -var _ValenceTypesDescMap = map[ValenceTypes]string{0: `Positive valence codes for outcomes aligned with drives / goals.`, 1: `Negative valence codes for harmful or aversive outcomes.`, 2: `Cost codes for continous ongoing cost factors such as Time and Effort`, 22: ``} +var _ValenceTypesDescMap = map[ValenceTypes]string{0: `Positive valence codes for outcomes aligned with drives / goals.`, 1: `Negative valence codes for harmful or aversive outcomes.`, 2: `Cost codes for continous ongoing cost factors such as Time and Effort`, 31: ``} -var _ValenceTypesMap = map[ValenceTypes]string{0: `Positive`, 1: `Negative`, 2: `Cost`, 22: `ValenceTypesN`} +var _ValenceTypesMap = map[ValenceTypes]string{0: `Positive`, 1: `Negative`, 2: `Cost`, 31: `ValenceTypesN`} // String returns the string representation of this ValenceTypes value. func (i ValenceTypes) String() string { return enums.String(i, _ValenceTypesMap) } @@ -305,16 +305,16 @@ func (i *ValenceTypes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "ValenceTypes") } -var _NeuronFlagsValues = []NeuronFlags{1, 2, 4, 8, 21} +var _NeuronFlagsValues = []NeuronFlags{1, 2, 4, 8, 30} // NeuronFlagsN is the highest valid value for type NeuronFlags, plus one. -const NeuronFlagsN NeuronFlags = 22 +const NeuronFlagsN NeuronFlags = 31 -var _NeuronFlagsValueMap = map[string]NeuronFlags{`NeuronOff`: 1, `NeuronHasExt`: 2, `NeuronHasTarg`: 4, `NeuronHasCmpr`: 8, `NeuronFlagsN`: 21} +var _NeuronFlagsValueMap = map[string]NeuronFlags{`NeuronOff`: 1, `NeuronHasExt`: 2, `NeuronHasTarg`: 4, `NeuronHasCmpr`: 8, `NeuronFlagsN`: 30} -var _NeuronFlagsDescMap = map[NeuronFlags]string{1: `NeuronOff flag indicates that this neuron has been turned off (i.e., lesioned)`, 2: `NeuronHasExt means the neuron has external input in its Ext field`, 4: `NeuronHasTarg means the neuron has external target input in its Target field`, 8: `NeuronHasCmpr means the neuron has external comparison input in its Target field -- used for computing comparison statistics but does not drive neural activity ever`, 21: ``} +var _NeuronFlagsDescMap = map[NeuronFlags]string{1: `NeuronOff flag indicates that this neuron has been turned off (i.e., lesioned)`, 2: `NeuronHasExt means the neuron has external input in its Ext field`, 4: `NeuronHasTarg means the neuron has external target input in its Target field`, 8: `NeuronHasCmpr means the neuron has external comparison input in its Target field -- used for computing comparison statistics but does not drive neural activity ever`, 30: ``} -var _NeuronFlagsMap = map[NeuronFlags]string{1: `NeuronOff`, 2: `NeuronHasExt`, 4: `NeuronHasTarg`, 8: `NeuronHasCmpr`, 21: `NeuronFlagsN`} +var _NeuronFlagsMap = map[NeuronFlags]string{1: `NeuronOff`, 2: `NeuronHasExt`, 4: `NeuronHasTarg`, 8: `NeuronHasCmpr`, 30: `NeuronFlagsN`} // String returns the string representation of this NeuronFlags value. func (i NeuronFlags) String() string { return enums.String(i, _NeuronFlagsMap) } @@ -348,16 +348,16 @@ func (i *NeuronFlags) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "NeuronFlags") } -var _NeuronVarsValues = []NeuronVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 103} +var _NeuronVarsValues = []NeuronVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 112} // NeuronVarsN is the highest valid value for type NeuronVars, plus one. -const NeuronVarsN NeuronVars = 104 +const NeuronVarsN NeuronVars = 113 -var _NeuronVarsValueMap = map[string]NeuronVars{`Spike`: 0, `Spiked`: 1, `Act`: 2, `ActInt`: 3, `Ge`: 4, `Gi`: 5, `Gk`: 6, `Inet`: 7, `Vm`: 8, `VmDend`: 9, `ISI`: 10, `ISIAvg`: 11, `Ext`: 12, `Target`: 13, `CaSpkM`: 14, `CaSpkP`: 15, `CaSpkD`: 16, `CaSpkPM`: 17, `CaLrn`: 18, `NrnCaM`: 19, `NrnCaP`: 20, `NrnCaD`: 21, `CaDiff`: 22, `RLRate`: 23, `GnmdaSyn`: 24, `Gnmda`: 25, `GnmdaLrn`: 26, `GnmdaMaint`: 27, `NmdaCa`: 28, `Gvgcc`: 29, `VgccM`: 30, `VgccH`: 31, `VgccCa`: 32, `VgccCaInt`: 33, `Burst`: 34, `BurstPrv`: 35, `CtxtGe`: 36, `CtxtGeRaw`: 37, `CtxtGeOrig`: 38, `GgabaB`: 39, `GABAB`: 40, `GABABx`: 41, `Gak`: 42, `SSGi`: 43, `SSGiDend`: 44, `GknaMed`: 45, `GknaSlow`: 46, `Gkir`: 47, `KirM`: 48, `Gsk`: 49, `SKCaIn`: 50, `SKCaR`: 51, `SKCaM`: 52, `Gmahp`: 53, `MahpN`: 54, `Gsahp`: 55, `SahpCa`: 56, `SahpN`: 57, `ActM`: 58, `ActP`: 59, `SpkSt1`: 60, `SpkSt2`: 61, `SpkMax`: 62, `SpkMaxCa`: 63, `SpkBin0`: 64, `SpkBin1`: 65, `SpkBin2`: 66, `SpkBin3`: 67, `SpkBin4`: 68, `SpkBin5`: 69, `SpkBin6`: 70, `SpkBin7`: 71, `SpkPrv`: 72, `GeNoise`: 73, `GeNoiseP`: 74, `GiNoise`: 75, `GiNoiseP`: 76, `GeExt`: 77, `GeRaw`: 78, `GeSyn`: 79, `GiRaw`: 80, `GiSyn`: 81, `GeInt`: 82, `GeIntNorm`: 83, `GiInt`: 84, `GModRaw`: 85, `GModSyn`: 86, `SMaintP`: 87, `GMaintRaw`: 88, `GMaintSyn`: 89, `NrnFlags`: 90, `NeuronVarsN`: 103} +var _NeuronVarsValueMap = map[string]NeuronVars{`Spike`: 0, `Spiked`: 1, `Act`: 2, `ActInt`: 3, `Ge`: 4, `Gi`: 5, `Gk`: 6, `Inet`: 7, `Vm`: 8, `VmDend`: 9, `ISI`: 10, `ISIAvg`: 11, `Ext`: 12, `Target`: 13, `CaSpkM`: 14, `CaSpkP`: 15, `CaSpkD`: 16, `CaSpkPM`: 17, `CaLrn`: 18, `NrnCaM`: 19, `NrnCaP`: 20, `NrnCaD`: 21, `CaDiff`: 22, `RLRate`: 23, `GnmdaSyn`: 24, `Gnmda`: 25, `GnmdaLrn`: 26, `GnmdaMaint`: 27, `NmdaCa`: 28, `Gvgcc`: 29, `VgccM`: 30, `VgccH`: 31, `VgccCa`: 32, `VgccCaInt`: 33, `Burst`: 34, `BurstPrv`: 35, `CtxtGe`: 36, `CtxtGeRaw`: 37, `CtxtGeOrig`: 38, `GgabaB`: 39, `GABAB`: 40, `GABABx`: 41, `Gak`: 42, `SSGi`: 43, `SSGiDend`: 44, `GknaMed`: 45, `GknaSlow`: 46, `Gkir`: 47, `KirM`: 48, `Gsk`: 49, `SKCaIn`: 50, `SKCaR`: 51, `SKCaM`: 52, `Gmahp`: 53, `MahpN`: 54, `Gsahp`: 55, `SahpCa`: 56, `SahpN`: 57, `ActM`: 58, `ActP`: 59, `SpkSt1`: 60, `SpkSt2`: 61, `SpkMax`: 62, `SpkMaxCa`: 63, `SpkBin0`: 64, `SpkBin1`: 65, `SpkBin2`: 66, `SpkBin3`: 67, `SpkBin4`: 68, `SpkBin5`: 69, `SpkBin6`: 70, `SpkBin7`: 71, `SpkPrv`: 72, `GeNoise`: 73, `GeNoiseP`: 74, `GiNoise`: 75, `GiNoiseP`: 76, `GeExt`: 77, `GeRaw`: 78, `GeSyn`: 79, `GiRaw`: 80, `GiSyn`: 81, `GeInt`: 82, `GeIntNorm`: 83, `GiInt`: 84, `GModRaw`: 85, `GModSyn`: 86, `SMaintP`: 87, `GMaintRaw`: 88, `GMaintSyn`: 89, `NrnFlags`: 90, `NeuronVarsN`: 112} -var _NeuronVarsDescMap = map[NeuronVars]string{0: `Spike is whether neuron has spiked or not on this cycle (0 or 1)`, 1: `Spiked is 1 if neuron has spiked within the last 10 cycles (msecs), corresponding to a nominal max spiking rate of 100 Hz, 0 otherwise -- useful for visualization and computing activity levels in terms of average spiked levels.`, 2: `Act is rate-coded activation value reflecting instantaneous estimated rate of spiking, based on 1 / ISIAvg. This drives feedback inhibition in the FFFB function (todo: this will change when better inhibition is implemented), and is integrated over time for ActInt which is then used for performance statistics and layer average activations, etc. Should not be used for learning or other computations.`, 3: `ActInt is integrated running-average activation value computed from Act with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall activation state across the ThetaCycle time scale, as the overall response of network to current input state -- this is copied to ActM and ActP at the ends of the minus and plus phases, respectively, and used in computing performance-level statistics (which are typically based on ActM). Should not be used for learning or other computations.`, 4: `Ge is total excitatory conductance, including all forms of excitation (e.g., NMDA) -- does *not* include Gbar.E`, 5: `Gi is total inhibitory synaptic conductance -- the net inhibitory input to the neuron -- does *not* include Gbar.I`, 6: `Gk is total potassium conductance, typically reflecting sodium-gated potassium currents involved in adaptation effects -- does *not* include Gbar.K`, 7: `Inet is net current produced by all channels -- drives update of Vm`, 8: `Vm is membrane potential -- integrates Inet current over time`, 9: `VmDend is dendritic membrane potential -- has a slower time constant, is not subject to the VmR reset after spiking`, 10: `ISI is current inter-spike-interval -- counts up since last spike. Starts at -1 when initialized.`, 11: `ISIAvg is average inter-spike-interval -- average time interval between spikes, integrated with ISITau rate constant (relatively fast) to capture something close to an instantaneous spiking rate. Starts at -1 when initialized, and goes to -2 after first spike, and is only valid after the second spike post-initialization.`, 12: `Ext is external input: drives activation of unit from outside influences (e.g., sensory input)`, 13: `Target is the target value: drives learning to produce this activation value`, 14: `CaSpkM is spike-driven calcium trace used as a neuron-level proxy for synpatic credit assignment factor based on continuous time-integrated spiking: exponential integration of SpikeG * Spike at MTau time constant (typically 5). Simulates a calmodulin (CaM) like signal at the most abstract level.`, 15: `CaSpkP is continuous cascaded integration of CaSpkM at PTau time constant (typically 40), representing neuron-level purely spiking version of plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of Act.`, 16: `CaSpkD is continuous cascaded integration CaSpkP at DTau time constant (typically 40), representing neuron-level purely spiking version of minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of Act.`, 17: `CaSpkPM is minus-phase snapshot of the CaSpkP value -- similar to ActM but using a more directly spike-integrated value.`, 18: `CaLrn is recv neuron calcium signal used to drive temporal error difference component of standard learning rule, combining NMDA (NmdaCa) and spiking-driven VGCC (VgccCaInt) calcium sources (vs. CaSpk* which only reflects spiking component). This is integrated into CaM, CaP, CaD, and temporal derivative is CaP - CaD (CaMKII - DAPK1). This approximates the backprop error derivative on net input, but VGCC component adds a proportion of recv activation delta as well -- a balance of both works best. The synaptic-level trace multiplier provides the credit assignment factor, reflecting coincident activity and potentially integrated over longer multi-trial timescales.`, 19: `NrnCaM is integrated CaLrn at MTau timescale (typically 5), simulating a calmodulin (CaM) like signal, which then drives CaP, CaD for delta signal driving error-driven learning.`, 20: `NrnCaP is cascaded integration of CaM at PTau time constant (typically 40), representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule.`, 21: `NrnCaD is cascaded integratoin of CaP at DTau time constant (typically 40), representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule.`, 22: `CaDiff is difference between CaP - CaD -- this is the error signal that drives error-driven learning.`, 23: `RLRate is recv-unit based learning rate multiplier, reflecting the sigmoid derivative computed from the CaSpkD of recv unit, and the normalized difference CaSpkP - CaSpkD / MAX(CaSpkP - CaSpkD).`, 24: `GnmdaSyn is integrated NMDA recv synaptic current -- adds GeRaw and decays with time constant`, 25: `Gnmda is net postsynaptic (recv) NMDA conductance, after Mg V-gating and Gbar -- added directly to Ge as it has the same reversal potential`, 26: `GnmdaLrn is learning version of integrated NMDA recv synaptic current -- adds GeRaw and decays with time constant -- drives NmdaCa that then drives CaM for learning`, 27: `GnmdaMaint is net postsynaptic maintenance NMDA conductance, computed from GMaintSyn and GMaintRaw, after Mg V-gating and Gbar -- added directly to Ge as it has the same reversal potential`, 28: `NmdaCa is NMDA calcium computed from GnmdaLrn, drives learning via CaM`, 29: `Gvgcc is conductance (via Ca) for VGCC voltage gated calcium channels`, 30: `VgccM is activation gate of VGCC channels`, 31: `VgccH inactivation gate of VGCC channels`, 32: `VgccCa is instantaneous VGCC calcium flux -- can be driven by spiking or directly from Gvgcc`, 33: `VgccCaInt time-integrated VGCC calcium flux -- this is actually what drives learning`, 34: `Burst is 5IB bursting activation value, computed by thresholding regular CaSpkP value in Super superficial layers`, 35: `BurstPrv is previous Burst bursting activation from prior time step -- used for context-based learning`, 36: `CtxtGe is context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`, 37: `CtxtGeRaw is raw update of context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`, 38: `CtxtGeOrig is original CtxtGe value prior to any decay factor -- updates at end of plus phase.`, 39: `GgabaB is net GABA-B conductance, after Vm gating and Gbar + Gbase -- applies to Gk, not Gi, for GIRK, with .1 reversal potential.`, 40: `GABAB is GABA-B / GIRK activation -- time-integrated value with rise and decay time constants`, 41: `GABABx is GABA-B / GIRK internal drive variable -- gets the raw activation and decays`, 42: `Gak is conductance of A-type K potassium channels`, 43: `SSGi is SST+ somatostatin positive slow spiking inhibition`, 44: `SSGiDend is amount of SST+ somatostatin positive slow spiking inhibition applied to dendritic Vm (VmDend)`, 45: `GknaMed is conductance of sodium-gated potassium channel (KNa) medium dynamics (Slick), which produces accommodation / adaptation of firing`, 46: `GknaSlow is conductance of sodium-gated potassium channel (KNa) slow dynamics (Slack), which produces accommodation / adaptation of firing`, 47: `Gkir is the conductance of the potassium (K) inwardly rectifying channel, which is strongest at low membrane potentials. Can be modulated by DA.`, 48: `KirM is the Kir potassium (K) inwardly rectifying gating value`, 49: `Gsk is Calcium-gated potassium channel conductance as a function of Gbar * SKCaM.`, 50: `SKCaIn is intracellular calcium store level, available to be released with spiking as SKCaR, which can bind to SKCa receptors and drive K current. replenishment is a function of spiking activity being below a threshold`, 51: `SKCaR released amount of intracellular calcium, from SKCaIn, as a function of spiking events. this can bind to SKCa channels and drive K currents.`, 52: `SKCaM is Calcium-gated potassium channel gating factor, driven by SKCaR via a Hill equation as in chans.SKPCaParams.`, 53: `Gmahp is medium time scale AHP conductance`, 54: `MahpN is accumulating voltage-gated gating value for the medium time scale AHP`, 55: `Gsahp is slow time scale AHP conductance`, 56: `SahpCa is slowly accumulating calcium value that drives the slow AHP`, 57: `SahpN is the sAHP gating value`, 58: `ActM is ActInt activation state at end of third quarter, representing the posterior-cortical minus phase activation -- used for statistics and monitoring network performance. Should not be used for learning or other computations.`, 59: `ActP is ActInt activation state at end of fourth quarter, representing the posterior-cortical plus_phase activation -- used for statistics and monitoring network performance. Should not be used for learning or other computations.`, 60: `SpkSt1 is the activation state at specific time point within current state processing window (e.g., 50 msec for beta cycle within standard theta cycle), as saved by SpkSt1() function. Used for example in hippocampus for CA3, CA1 learning`, 61: `SpkSt2 is the activation state at specific time point within current state processing window (e.g., 100 msec for beta cycle within standard theta cycle), as saved by SpkSt2() function. Used for example in hippocampus for CA3, CA1 learning`, 62: `SpkMax is maximum CaSpkP across one theta cycle time window (max of SpkMaxCa) -- used for specialized algorithms that have more phasic behavior within a single trial, e.g., BG Matrix layer gating. Also useful for visualization of peak activity of neurons.`, 63: `SpkMaxCa is Ca integrated like CaSpkP but only starting at MaxCycStart cycle, to prevent inclusion of carryover spiking from prior theta cycle trial -- the PTau time constant otherwise results in significant carryover. This is the input to SpkMax`, 64: `SpkBin has aggregated spikes within 50 msec bins across the theta cycle, for computing synaptic calcium efficiently`, 65: ``, 66: ``, 67: ``, 68: ``, 69: ``, 70: ``, 71: ``, 72: `SpkPrv is final CaSpkD activation state at end of previous theta cycle. used for specialized learning mechanisms that operate on delayed sending activations.`, 73: `GeNoise is integrated noise excitatory conductance, added into Ge`, 74: `GeNoiseP is accumulating poisson probability factor for driving excitatory noise spiking -- multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as function of noise firing rate.`, 75: `GiNoise is integrated noise inhibotyr conductance, added into Gi`, 76: `GiNoiseP is accumulating poisson probability factor for driving inhibitory noise spiking -- multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as a function of noise firing rate.`, 77: `GeExt is extra excitatory conductance added to Ge -- from Ext input, GeCtxt etc`, 78: `GeRaw is raw excitatory conductance (net input) received from senders = current raw spiking drive`, 79: `GeSyn is time-integrated total excitatory synaptic conductance, with an instantaneous rise time from each spike (in GeRaw) and exponential decay with Dt.GeTau, aggregated over pathways -- does *not* include Gbar.E`, 80: `GiRaw is raw inhibitory conductance (net input) received from senders = current raw spiking drive`, 81: `GiSyn is time-integrated total inhibitory synaptic conductance, with an instantaneous rise time from each spike (in GiRaw) and exponential decay with Dt.GiTau, aggregated over pathways -- does *not* include Gbar.I. This is added with computed FFFB inhibition to get the full inhibition in Gi`, 82: `GeInt is integrated running-average activation value computed from Ge with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall Ge level across the ThetaCycle time scale (Ge itself fluctuates considerably) -- useful for stats to set strength of connections etc to get neurons into right range of overall excitatory drive`, 83: `GeIntNorm is normalized GeInt value (divided by the layer maximum) -- this is used for learning in layers that require learning on subthreshold activity`, 84: `GiInt is integrated running-average activation value computed from GiSyn with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall synaptic Gi level across the ThetaCycle time scale (Gi itself fluctuates considerably) -- useful for stats to set strength of connections etc to get neurons into right range of overall inhibitory drive`, 85: `GModRaw is raw modulatory conductance, received from GType = ModulatoryG pathways`, 86: `GModSyn is syn integrated modulatory conductance, received from GType = ModulatoryG pathways`, 87: `SMaintP is accumulating poisson probability factor for driving self-maintenance by simulating a population of mutually interconnected neurons. multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda based on accumulating self maint factor`, 88: `GMaintRaw is raw maintenance conductance, received from GType = MaintG pathways`, 89: `GMaintSyn is syn integrated maintenance conductance, integrated using MaintNMDA params.`, 90: `NrnFlags are bit flags for binary state variables, which are converted to / from uint32. These need to be in Vars because they can be differential per data (for ext inputs) and are writable (indexes are read only).`, 103: ``} +var _NeuronVarsDescMap = map[NeuronVars]string{0: `Spike is whether neuron has spiked or not on this cycle (0 or 1)`, 1: `Spiked is 1 if neuron has spiked within the last 10 cycles (msecs), corresponding to a nominal max spiking rate of 100 Hz, 0 otherwise -- useful for visualization and computing activity levels in terms of average spiked levels.`, 2: `Act is rate-coded activation value reflecting instantaneous estimated rate of spiking, based on 1 / ISIAvg. This drives feedback inhibition in the FFFB function (todo: this will change when better inhibition is implemented), and is integrated over time for ActInt which is then used for performance statistics and layer average activations, etc. Should not be used for learning or other computations.`, 3: `ActInt is integrated running-average activation value computed from Act with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall activation state across the ThetaCycle time scale, as the overall response of network to current input state -- this is copied to ActM and ActP at the ends of the minus and plus phases, respectively, and used in computing performance-level statistics (which are typically based on ActM). Should not be used for learning or other computations.`, 4: `Ge is total excitatory conductance, including all forms of excitation (e.g., NMDA) -- does *not* include Gbar.E`, 5: `Gi is total inhibitory synaptic conductance -- the net inhibitory input to the neuron -- does *not* include Gbar.I`, 6: `Gk is total potassium conductance, typically reflecting sodium-gated potassium currents involved in adaptation effects -- does *not* include Gbar.K`, 7: `Inet is net current produced by all channels -- drives update of Vm`, 8: `Vm is membrane potential -- integrates Inet current over time`, 9: `VmDend is dendritic membrane potential -- has a slower time constant, is not subject to the VmR reset after spiking`, 10: `ISI is current inter-spike-interval -- counts up since last spike. Starts at -1 when initialized.`, 11: `ISIAvg is average inter-spike-interval -- average time interval between spikes, integrated with ISITau rate constant (relatively fast) to capture something close to an instantaneous spiking rate. Starts at -1 when initialized, and goes to -2 after first spike, and is only valid after the second spike post-initialization.`, 12: `Ext is external input: drives activation of unit from outside influences (e.g., sensory input)`, 13: `Target is the target value: drives learning to produce this activation value`, 14: `CaSpkM is spike-driven calcium trace used as a neuron-level proxy for synpatic credit assignment factor based on continuous time-integrated spiking: exponential integration of SpikeG * Spike at MTau time constant (typically 5). Simulates a calmodulin (CaM) like signal at the most abstract level.`, 15: `CaSpkP is continuous cascaded integration of CaSpkM at PTau time constant (typically 40), representing neuron-level purely spiking version of plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of Act.`, 16: `CaSpkD is continuous cascaded integration CaSpkP at DTau time constant (typically 40), representing neuron-level purely spiking version of minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of Act.`, 17: `CaSpkPM is minus-phase snapshot of the CaSpkP value -- similar to ActM but using a more directly spike-integrated value.`, 18: `CaLrn is recv neuron calcium signal used to drive temporal error difference component of standard learning rule, combining NMDA (NmdaCa) and spiking-driven VGCC (VgccCaInt) calcium sources (vs. CaSpk* which only reflects spiking component). This is integrated into CaM, CaP, CaD, and temporal derivative is CaP - CaD (CaMKII - DAPK1). This approximates the backprop error derivative on net input, but VGCC component adds a proportion of recv activation delta as well -- a balance of both works best. The synaptic-level trace multiplier provides the credit assignment factor, reflecting coincident activity and potentially integrated over longer multi-trial timescales.`, 19: `NrnCaM is integrated CaLrn at MTau timescale (typically 5), simulating a calmodulin (CaM) like signal, which then drives CaP, CaD for delta signal driving error-driven learning.`, 20: `NrnCaP is cascaded integration of CaM at PTau time constant (typically 40), representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule.`, 21: `NrnCaD is cascaded integratoin of CaP at DTau time constant (typically 40), representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule.`, 22: `CaDiff is difference between CaP - CaD -- this is the error signal that drives error-driven learning.`, 23: `RLRate is recv-unit based learning rate multiplier, reflecting the sigmoid derivative computed from the CaSpkD of recv unit, and the normalized difference CaSpkP - CaSpkD / MAX(CaSpkP - CaSpkD).`, 24: `GnmdaSyn is integrated NMDA recv synaptic current -- adds GeRaw and decays with time constant`, 25: `Gnmda is net postsynaptic (recv) NMDA conductance, after Mg V-gating and Gbar -- added directly to Ge as it has the same reversal potential`, 26: `GnmdaLrn is learning version of integrated NMDA recv synaptic current -- adds GeRaw and decays with time constant -- drives NmdaCa that then drives CaM for learning`, 27: `GnmdaMaint is net postsynaptic maintenance NMDA conductance, computed from GMaintSyn and GMaintRaw, after Mg V-gating and Gbar -- added directly to Ge as it has the same reversal potential`, 28: `NmdaCa is NMDA calcium computed from GnmdaLrn, drives learning via CaM`, 29: `Gvgcc is conductance (via Ca) for VGCC voltage gated calcium channels`, 30: `VgccM is activation gate of VGCC channels`, 31: `VgccH inactivation gate of VGCC channels`, 32: `VgccCa is instantaneous VGCC calcium flux -- can be driven by spiking or directly from Gvgcc`, 33: `VgccCaInt time-integrated VGCC calcium flux -- this is actually what drives learning`, 34: `Burst is 5IB bursting activation value, computed by thresholding regular CaSpkP value in Super superficial layers`, 35: `BurstPrv is previous Burst bursting activation from prior time step -- used for context-based learning`, 36: `CtxtGe is context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`, 37: `CtxtGeRaw is raw update of context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`, 38: `CtxtGeOrig is original CtxtGe value prior to any decay factor -- updates at end of plus phase.`, 39: `GgabaB is net GABA-B conductance, after Vm gating and Gbar + Gbase -- applies to Gk, not Gi, for GIRK, with .1 reversal potential.`, 40: `GABAB is GABA-B / GIRK activation -- time-integrated value with rise and decay time constants`, 41: `GABABx is GABA-B / GIRK internal drive variable -- gets the raw activation and decays`, 42: `Gak is conductance of A-type K potassium channels`, 43: `SSGi is SST+ somatostatin positive slow spiking inhibition`, 44: `SSGiDend is amount of SST+ somatostatin positive slow spiking inhibition applied to dendritic Vm (VmDend)`, 45: `GknaMed is conductance of sodium-gated potassium channel (KNa) medium dynamics (Slick), which produces accommodation / adaptation of firing`, 46: `GknaSlow is conductance of sodium-gated potassium channel (KNa) slow dynamics (Slack), which produces accommodation / adaptation of firing`, 47: `Gkir is the conductance of the potassium (K) inwardly rectifying channel, which is strongest at low membrane potentials. Can be modulated by DA.`, 48: `KirM is the Kir potassium (K) inwardly rectifying gating value`, 49: `Gsk is Calcium-gated potassium channel conductance as a function of Gbar * SKCaM.`, 50: `SKCaIn is intracellular calcium store level, available to be released with spiking as SKCaR, which can bind to SKCa receptors and drive K current. replenishment is a function of spiking activity being below a threshold`, 51: `SKCaR released amount of intracellular calcium, from SKCaIn, as a function of spiking events. this can bind to SKCa channels and drive K currents.`, 52: `SKCaM is Calcium-gated potassium channel gating factor, driven by SKCaR via a Hill equation as in chans.SKPCaParams.`, 53: `Gmahp is medium time scale AHP conductance`, 54: `MahpN is accumulating voltage-gated gating value for the medium time scale AHP`, 55: `Gsahp is slow time scale AHP conductance`, 56: `SahpCa is slowly accumulating calcium value that drives the slow AHP`, 57: `SahpN is the sAHP gating value`, 58: `ActM is ActInt activation state at end of third quarter, representing the posterior-cortical minus phase activation -- used for statistics and monitoring network performance. Should not be used for learning or other computations.`, 59: `ActP is ActInt activation state at end of fourth quarter, representing the posterior-cortical plus_phase activation -- used for statistics and monitoring network performance. Should not be used for learning or other computations.`, 60: `SpkSt1 is the activation state at specific time point within current state processing window (e.g., 50 msec for beta cycle within standard theta cycle), as saved by SpkSt1() function. Used for example in hippocampus for CA3, CA1 learning`, 61: `SpkSt2 is the activation state at specific time point within current state processing window (e.g., 100 msec for beta cycle within standard theta cycle), as saved by SpkSt2() function. Used for example in hippocampus for CA3, CA1 learning`, 62: `SpkMax is maximum CaSpkP across one theta cycle time window (max of SpkMaxCa) -- used for specialized algorithms that have more phasic behavior within a single trial, e.g., BG Matrix layer gating. Also useful for visualization of peak activity of neurons.`, 63: `SpkMaxCa is Ca integrated like CaSpkP but only starting at MaxCycStart cycle, to prevent inclusion of carryover spiking from prior theta cycle trial -- the PTau time constant otherwise results in significant carryover. This is the input to SpkMax`, 64: `SpkBin has aggregated spikes within 50 msec bins across the theta cycle, for computing synaptic calcium efficiently`, 65: ``, 66: ``, 67: ``, 68: ``, 69: ``, 70: ``, 71: ``, 72: `SpkPrv is final CaSpkD activation state at end of previous theta cycle. used for specialized learning mechanisms that operate on delayed sending activations.`, 73: `GeNoise is integrated noise excitatory conductance, added into Ge`, 74: `GeNoiseP is accumulating poisson probability factor for driving excitatory noise spiking -- multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as function of noise firing rate.`, 75: `GiNoise is integrated noise inhibotyr conductance, added into Gi`, 76: `GiNoiseP is accumulating poisson probability factor for driving inhibitory noise spiking -- multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as a function of noise firing rate.`, 77: `GeExt is extra excitatory conductance added to Ge -- from Ext input, GeCtxt etc`, 78: `GeRaw is raw excitatory conductance (net input) received from senders = current raw spiking drive`, 79: `GeSyn is time-integrated total excitatory synaptic conductance, with an instantaneous rise time from each spike (in GeRaw) and exponential decay with Dt.GeTau, aggregated over pathways -- does *not* include Gbar.E`, 80: `GiRaw is raw inhibitory conductance (net input) received from senders = current raw spiking drive`, 81: `GiSyn is time-integrated total inhibitory synaptic conductance, with an instantaneous rise time from each spike (in GiRaw) and exponential decay with Dt.GiTau, aggregated over pathways -- does *not* include Gbar.I. This is added with computed FFFB inhibition to get the full inhibition in Gi`, 82: `GeInt is integrated running-average activation value computed from Ge with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall Ge level across the ThetaCycle time scale (Ge itself fluctuates considerably) -- useful for stats to set strength of connections etc to get neurons into right range of overall excitatory drive`, 83: `GeIntNorm is normalized GeInt value (divided by the layer maximum) -- this is used for learning in layers that require learning on subthreshold activity`, 84: `GiInt is integrated running-average activation value computed from GiSyn with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall synaptic Gi level across the ThetaCycle time scale (Gi itself fluctuates considerably) -- useful for stats to set strength of connections etc to get neurons into right range of overall inhibitory drive`, 85: `GModRaw is raw modulatory conductance, received from GType = ModulatoryG pathways`, 86: `GModSyn is syn integrated modulatory conductance, received from GType = ModulatoryG pathways`, 87: `SMaintP is accumulating poisson probability factor for driving self-maintenance by simulating a population of mutually interconnected neurons. multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda based on accumulating self maint factor`, 88: `GMaintRaw is raw maintenance conductance, received from GType = MaintG pathways`, 89: `GMaintSyn is syn integrated maintenance conductance, integrated using MaintNMDA params.`, 90: `NrnFlags are bit flags for binary state variables, which are converted to / from uint32. These need to be in Vars because they can be differential per data (for ext inputs) and are writable (indexes are read only).`, 112: ``} -var _NeuronVarsMap = map[NeuronVars]string{0: `Spike`, 1: `Spiked`, 2: `Act`, 3: `ActInt`, 4: `Ge`, 5: `Gi`, 6: `Gk`, 7: `Inet`, 8: `Vm`, 9: `VmDend`, 10: `ISI`, 11: `ISIAvg`, 12: `Ext`, 13: `Target`, 14: `CaSpkM`, 15: `CaSpkP`, 16: `CaSpkD`, 17: `CaSpkPM`, 18: `CaLrn`, 19: `NrnCaM`, 20: `NrnCaP`, 21: `NrnCaD`, 22: `CaDiff`, 23: `RLRate`, 24: `GnmdaSyn`, 25: `Gnmda`, 26: `GnmdaLrn`, 27: `GnmdaMaint`, 28: `NmdaCa`, 29: `Gvgcc`, 30: `VgccM`, 31: `VgccH`, 32: `VgccCa`, 33: `VgccCaInt`, 34: `Burst`, 35: `BurstPrv`, 36: `CtxtGe`, 37: `CtxtGeRaw`, 38: `CtxtGeOrig`, 39: `GgabaB`, 40: `GABAB`, 41: `GABABx`, 42: `Gak`, 43: `SSGi`, 44: `SSGiDend`, 45: `GknaMed`, 46: `GknaSlow`, 47: `Gkir`, 48: `KirM`, 49: `Gsk`, 50: `SKCaIn`, 51: `SKCaR`, 52: `SKCaM`, 53: `Gmahp`, 54: `MahpN`, 55: `Gsahp`, 56: `SahpCa`, 57: `SahpN`, 58: `ActM`, 59: `ActP`, 60: `SpkSt1`, 61: `SpkSt2`, 62: `SpkMax`, 63: `SpkMaxCa`, 64: `SpkBin0`, 65: `SpkBin1`, 66: `SpkBin2`, 67: `SpkBin3`, 68: `SpkBin4`, 69: `SpkBin5`, 70: `SpkBin6`, 71: `SpkBin7`, 72: `SpkPrv`, 73: `GeNoise`, 74: `GeNoiseP`, 75: `GiNoise`, 76: `GiNoiseP`, 77: `GeExt`, 78: `GeRaw`, 79: `GeSyn`, 80: `GiRaw`, 81: `GiSyn`, 82: `GeInt`, 83: `GeIntNorm`, 84: `GiInt`, 85: `GModRaw`, 86: `GModSyn`, 87: `SMaintP`, 88: `GMaintRaw`, 89: `GMaintSyn`, 90: `NrnFlags`, 103: `NeuronVarsN`} +var _NeuronVarsMap = map[NeuronVars]string{0: `Spike`, 1: `Spiked`, 2: `Act`, 3: `ActInt`, 4: `Ge`, 5: `Gi`, 6: `Gk`, 7: `Inet`, 8: `Vm`, 9: `VmDend`, 10: `ISI`, 11: `ISIAvg`, 12: `Ext`, 13: `Target`, 14: `CaSpkM`, 15: `CaSpkP`, 16: `CaSpkD`, 17: `CaSpkPM`, 18: `CaLrn`, 19: `NrnCaM`, 20: `NrnCaP`, 21: `NrnCaD`, 22: `CaDiff`, 23: `RLRate`, 24: `GnmdaSyn`, 25: `Gnmda`, 26: `GnmdaLrn`, 27: `GnmdaMaint`, 28: `NmdaCa`, 29: `Gvgcc`, 30: `VgccM`, 31: `VgccH`, 32: `VgccCa`, 33: `VgccCaInt`, 34: `Burst`, 35: `BurstPrv`, 36: `CtxtGe`, 37: `CtxtGeRaw`, 38: `CtxtGeOrig`, 39: `GgabaB`, 40: `GABAB`, 41: `GABABx`, 42: `Gak`, 43: `SSGi`, 44: `SSGiDend`, 45: `GknaMed`, 46: `GknaSlow`, 47: `Gkir`, 48: `KirM`, 49: `Gsk`, 50: `SKCaIn`, 51: `SKCaR`, 52: `SKCaM`, 53: `Gmahp`, 54: `MahpN`, 55: `Gsahp`, 56: `SahpCa`, 57: `SahpN`, 58: `ActM`, 59: `ActP`, 60: `SpkSt1`, 61: `SpkSt2`, 62: `SpkMax`, 63: `SpkMaxCa`, 64: `SpkBin0`, 65: `SpkBin1`, 66: `SpkBin2`, 67: `SpkBin3`, 68: `SpkBin4`, 69: `SpkBin5`, 70: `SpkBin6`, 71: `SpkBin7`, 72: `SpkPrv`, 73: `GeNoise`, 74: `GeNoiseP`, 75: `GiNoise`, 76: `GiNoiseP`, 77: `GeExt`, 78: `GeRaw`, 79: `GeSyn`, 80: `GiRaw`, 81: `GiSyn`, 82: `GeInt`, 83: `GeIntNorm`, 84: `GiInt`, 85: `GModRaw`, 86: `GModSyn`, 87: `SMaintP`, 88: `GMaintRaw`, 89: `GMaintSyn`, 90: `NrnFlags`, 112: `NeuronVarsN`} // String returns the string representation of this NeuronVars value. func (i NeuronVars) String() string { return enums.String(i, _NeuronVarsMap) } @@ -391,16 +391,16 @@ func (i *NeuronVars) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "NeuronVars") } -var _NeuronAvgVarsValues = []NeuronAvgVars{0, 1, 2, 3, 4, 5, 6, 19} +var _NeuronAvgVarsValues = []NeuronAvgVars{0, 1, 2, 3, 4, 5, 6, 28} // NeuronAvgVarsN is the highest valid value for type NeuronAvgVars, plus one. -const NeuronAvgVarsN NeuronAvgVars = 20 +const NeuronAvgVarsN NeuronAvgVars = 29 -var _NeuronAvgVarsValueMap = map[string]NeuronAvgVars{`ActAvg`: 0, `AvgPct`: 1, `TrgAvg`: 2, `DTrgAvg`: 3, `AvgDif`: 4, `GeBase`: 5, `GiBase`: 6, `NeuronAvgVarsN`: 19} +var _NeuronAvgVarsValueMap = map[string]NeuronAvgVars{`ActAvg`: 0, `AvgPct`: 1, `TrgAvg`: 2, `DTrgAvg`: 3, `AvgDif`: 4, `GeBase`: 5, `GiBase`: 6, `NeuronAvgVarsN`: 28} -var _NeuronAvgVarsDescMap = map[NeuronAvgVars]string{0: `ActAvg is average activation (of minus phase activation state) over long time intervals (time constant = Dt.LongAvgTau). Useful for finding hog units and seeing overall distribution of activation.`, 1: `AvgPct is ActAvg as a proportion of overall layer activation. This is used for synaptic scaling to match TrgAvg activation, updated at SlowInterval intervals.`, 2: `TrgAvg is neuron's target average activation as a proportion of overall layer activation, assigned during weight initialization, driving synaptic scaling relative to AvgPct.`, 3: `DTrgAvg is change in neuron's target average activation as a result of unit-wise error gradient. Acts like a bias weight. MPI needs to share these across processors.`, 4: `AvgDif is AvgPct - TrgAvg, i.e., the error in overall activity level relative to set point for this neuron, which drives synaptic scaling. Updated at SlowInterval intervals.`, 5: `GeBase is baseline level of Ge, added to GeRaw, for intrinsic excitability.`, 6: `GiBase is baseline level of Gi, added to GiRaw, for intrinsic excitability.`, 19: ``} +var _NeuronAvgVarsDescMap = map[NeuronAvgVars]string{0: `ActAvg is average activation (of minus phase activation state) over long time intervals (time constant = Dt.LongAvgTau). Useful for finding hog units and seeing overall distribution of activation.`, 1: `AvgPct is ActAvg as a proportion of overall layer activation. This is used for synaptic scaling to match TrgAvg activation, updated at SlowInterval intervals.`, 2: `TrgAvg is neuron's target average activation as a proportion of overall layer activation, assigned during weight initialization, driving synaptic scaling relative to AvgPct.`, 3: `DTrgAvg is change in neuron's target average activation as a result of unit-wise error gradient. Acts like a bias weight. MPI needs to share these across processors.`, 4: `AvgDif is AvgPct - TrgAvg, i.e., the error in overall activity level relative to set point for this neuron, which drives synaptic scaling. Updated at SlowInterval intervals.`, 5: `GeBase is baseline level of Ge, added to GeRaw, for intrinsic excitability.`, 6: `GiBase is baseline level of Gi, added to GiRaw, for intrinsic excitability.`, 28: ``} -var _NeuronAvgVarsMap = map[NeuronAvgVars]string{0: `ActAvg`, 1: `AvgPct`, 2: `TrgAvg`, 3: `DTrgAvg`, 4: `AvgDif`, 5: `GeBase`, 6: `GiBase`, 19: `NeuronAvgVarsN`} +var _NeuronAvgVarsMap = map[NeuronAvgVars]string{0: `ActAvg`, 1: `AvgPct`, 2: `TrgAvg`, 3: `DTrgAvg`, 4: `AvgDif`, 5: `GeBase`, 6: `GiBase`, 28: `NeuronAvgVarsN`} // String returns the string representation of this NeuronAvgVars value. func (i NeuronAvgVars) String() string { return enums.String(i, _NeuronAvgVarsMap) } @@ -434,16 +434,16 @@ func (i *NeuronAvgVars) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "NeuronAvgVars") } -var _NeuronIndexVarsValues = []NeuronIndexVars{0, 1, 2, 15} +var _NeuronIndexVarsValues = []NeuronIndexVars{0, 1, 2, 24} // NeuronIndexVarsN is the highest valid value for type NeuronIndexVars, plus one. -const NeuronIndexVarsN NeuronIndexVars = 16 +const NeuronIndexVarsN NeuronIndexVars = 25 -var _NeuronIndexVarsValueMap = map[string]NeuronIndexVars{`NrnNeurIndex`: 0, `NrnLayIndex`: 1, `NrnSubPool`: 2, `NeuronIndexVarsN`: 15} +var _NeuronIndexVarsValueMap = map[string]NeuronIndexVars{`NrnNeurIndex`: 0, `NrnLayIndex`: 1, `NrnSubPool`: 2, `NeuronIndexVarsN`: 24} -var _NeuronIndexVarsDescMap = map[NeuronIndexVars]string{0: `NrnNeurIndex is the index of this neuron within its owning layer.`, 1: `NrnLayIndex is the index of the layer that this neuron belongs to, needed for neuron-level parallel code.`, 2: `NrnSubPool is the index of the sub-level inhibitory pool for this neuron (only for 4D shapes, the pool (unit-group / hypercolumn) structure level). Indicies start at 1 -- 0 is layer-level pool (is 0 if no sub-pools).`, 15: ``} +var _NeuronIndexVarsDescMap = map[NeuronIndexVars]string{0: `NrnNeurIndex is the index of this neuron within its owning layer.`, 1: `NrnLayIndex is the index of the layer that this neuron belongs to, needed for neuron-level parallel code.`, 2: `NrnSubPool is the index of the sub-level inhibitory pool for this neuron (only for 4D shapes, the pool (unit-group / hypercolumn) structure level). Indicies start at 1 -- 0 is layer-level pool (is 0 if no sub-pools).`, 24: ``} -var _NeuronIndexVarsMap = map[NeuronIndexVars]string{0: `NrnNeurIndex`, 1: `NrnLayIndex`, 2: `NrnSubPool`, 15: `NeuronIndexVarsN`} +var _NeuronIndexVarsMap = map[NeuronIndexVars]string{0: `NrnNeurIndex`, 1: `NrnLayIndex`, 2: `NrnSubPool`, 24: `NeuronIndexVarsN`} // String returns the string representation of this NeuronIndexVars value. func (i NeuronIndexVars) String() string { return enums.String(i, _NeuronIndexVarsMap) } @@ -477,16 +477,16 @@ func (i *NeuronIndexVars) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "NeuronIndexVars") } -var _PathTypesValues = []PathTypes{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 31} +var _PathTypesValues = []PathTypes{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 40} // PathTypesN is the highest valid value for type PathTypes, plus one. -const PathTypesN PathTypes = 32 +const PathTypesN PathTypes = 41 -var _PathTypesValueMap = map[string]PathTypes{`ForwardPath`: 0, `BackPath`: 1, `LateralPath`: 2, `InhibPath`: 3, `CTCtxtPath`: 4, `RWPath`: 5, `TDPredPath`: 6, `BLAPath`: 7, `HipPath`: 8, `VSPatchPath`: 9, `VSMatrixPath`: 10, `DSMatrixPath`: 11, `PathTypesN`: 31} +var _PathTypesValueMap = map[string]PathTypes{`ForwardPath`: 0, `BackPath`: 1, `LateralPath`: 2, `InhibPath`: 3, `CTCtxtPath`: 4, `RWPath`: 5, `TDPredPath`: 6, `BLAPath`: 7, `HipPath`: 8, `VSPatchPath`: 9, `VSMatrixPath`: 10, `DSMatrixPath`: 11, `PathTypesN`: 40} -var _PathTypesDescMap = map[PathTypes]string{0: `Forward is a feedforward, bottom-up pathway from sensory inputs to higher layers`, 1: `Back is a feedback, top-down pathway from higher layers back to lower layers`, 2: `Lateral is a lateral pathway within the same layer / area`, 3: `Inhib is an inhibitory pathway that drives inhibitory synaptic conductances instead of the default excitatory ones.`, 4: `CTCtxt are pathways from Superficial layers to CT layers that send Burst activations drive updating of CtxtGe excitatory conductance, at end of plus (51B Bursting) phase. Biologically, this pathway comes from the PT layer 5IB neurons, but it is simpler to use the Super neurons directly, and PT are optional for most network types. These pathways also use a special learning rule that takes into account the temporal delays in the activation states. Can also add self context from CT for deeper temporal context.`, 5: `RWPath does dopamine-modulated learning for reward prediction: Da * Send.CaSpkP (integrated current spiking activity). Uses RLPredPath parameters. Use in RWPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.`, 6: `TDPredPath does dopamine-modulated learning for reward prediction: DWt = Da * Send.SpkPrv (activity on *previous* timestep) Uses RLPredPath parameters. Use in TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.`, 7: `BLAPath implements the Rubicon BLA learning rule: dW = ACh * X_t-1 * (Y_t - Y_t-1) The recv delta is across trials, where the US should activate on trial boundary, to enable sufficient time for gating through to OFC, so BLA initially learns based on US present - US absent. It can also learn based on CS onset if there is a prior CS that predicts that.`, 8: ``, 9: `VSPatchPath implements the VSPatch learning rule: dW = ACh * DA * X * Y where DA is D1 vs. D2 modulated DA level, X = sending activity factor, Y = receiving activity factor, and ACh provides overall modulation.`, 10: `VSMatrixPath is for ventral striatum matrix (SPN / MSN) neurons supporting trace-based learning, where an initial trace of synaptic co-activity is formed, and then modulated by subsequent phasic dopamine & ACh when an outcome occurs. This bridges the temporal gap between gating activity and subsequent outcomes, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level (from CINs in biology).`, 11: `DSMatrixPath is for dorsal striatum matrix (SPN / MSN) neurons supporting trace-based learning, where an initial trace of synaptic co-activity is formed, and then modulated by subsequent phasic dopamine & ACh when an outcome occurs. This bridges the temporal gap between gating activity and subsequent outcomes, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level (from CINs in biology).`, 31: ``} +var _PathTypesDescMap = map[PathTypes]string{0: `Forward is a feedforward, bottom-up pathway from sensory inputs to higher layers`, 1: `Back is a feedback, top-down pathway from higher layers back to lower layers`, 2: `Lateral is a lateral pathway within the same layer / area`, 3: `Inhib is an inhibitory pathway that drives inhibitory synaptic conductances instead of the default excitatory ones.`, 4: `CTCtxt are pathways from Superficial layers to CT layers that send Burst activations drive updating of CtxtGe excitatory conductance, at end of plus (51B Bursting) phase. Biologically, this pathway comes from the PT layer 5IB neurons, but it is simpler to use the Super neurons directly, and PT are optional for most network types. These pathways also use a special learning rule that takes into account the temporal delays in the activation states. Can also add self context from CT for deeper temporal context.`, 5: `RWPath does dopamine-modulated learning for reward prediction: Da * Send.CaSpkP (integrated current spiking activity). Uses RLPredPath parameters. Use in RWPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.`, 6: `TDPredPath does dopamine-modulated learning for reward prediction: DWt = Da * Send.SpkPrv (activity on *previous* timestep) Uses RLPredPath parameters. Use in TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.`, 7: `BLAPath implements the Rubicon BLA learning rule: dW = ACh * X_t-1 * (Y_t - Y_t-1) The recv delta is across trials, where the US should activate on trial boundary, to enable sufficient time for gating through to OFC, so BLA initially learns based on US present - US absent. It can also learn based on CS onset if there is a prior CS that predicts that.`, 8: ``, 9: `VSPatchPath implements the VSPatch learning rule: dW = ACh * DA * X * Y where DA is D1 vs. D2 modulated DA level, X = sending activity factor, Y = receiving activity factor, and ACh provides overall modulation.`, 10: `VSMatrixPath is for ventral striatum matrix (SPN / MSN) neurons supporting trace-based learning, where an initial trace of synaptic co-activity is formed, and then modulated by subsequent phasic dopamine & ACh when an outcome occurs. This bridges the temporal gap between gating activity and subsequent outcomes, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level (from CINs in biology).`, 11: `DSMatrixPath is for dorsal striatum matrix (SPN / MSN) neurons supporting trace-based learning, where an initial trace of synaptic co-activity is formed, and then modulated by subsequent phasic dopamine & ACh when an outcome occurs. This bridges the temporal gap between gating activity and subsequent outcomes, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level (from CINs in biology).`, 40: ``} -var _PathTypesMap = map[PathTypes]string{0: `ForwardPath`, 1: `BackPath`, 2: `LateralPath`, 3: `InhibPath`, 4: `CTCtxtPath`, 5: `RWPath`, 6: `TDPredPath`, 7: `BLAPath`, 8: `HipPath`, 9: `VSPatchPath`, 10: `VSMatrixPath`, 11: `DSMatrixPath`, 31: `PathTypesN`} +var _PathTypesMap = map[PathTypes]string{0: `ForwardPath`, 1: `BackPath`, 2: `LateralPath`, 3: `InhibPath`, 4: `CTCtxtPath`, 5: `RWPath`, 6: `TDPredPath`, 7: `BLAPath`, 8: `HipPath`, 9: `VSPatchPath`, 10: `VSMatrixPath`, 11: `DSMatrixPath`, 40: `PathTypesN`} // String returns the string representation of this PathTypes value. func (i PathTypes) String() string { return enums.String(i, _PathTypesMap) } @@ -520,16 +520,16 @@ func (i *PathTypes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "PathTypes") } -var _GPLayerTypesValues = []GPLayerTypes{0, 1, 2, 22} +var _GPLayerTypesValues = []GPLayerTypes{0, 1, 2, 31} // GPLayerTypesN is the highest valid value for type GPLayerTypes, plus one. -const GPLayerTypesN GPLayerTypes = 23 +const GPLayerTypesN GPLayerTypes = 32 -var _GPLayerTypesValueMap = map[string]GPLayerTypes{`GPePr`: 0, `GPeAk`: 1, `GPi`: 2, `GPLayerTypesN`: 22} +var _GPLayerTypesValueMap = map[string]GPLayerTypes{`GPePr`: 0, `GPeAk`: 1, `GPi`: 2, `GPLayerTypesN`: 31} -var _GPLayerTypesDescMap = map[GPLayerTypes]string{0: `GPePr is the set of prototypical GPe neurons, mediating classical NoGo`, 1: `GPeAk is arkypallidal layer of GPe neurons, receiving inhibition from GPePr and projecting inhibition to Mtx`, 2: `GPi is the inner globus pallidus, functionally equivalent to SNr, receiving from MtxGo and GPePr, and sending inhibition to VThal`, 22: ``} +var _GPLayerTypesDescMap = map[GPLayerTypes]string{0: `GPePr is the set of prototypical GPe neurons, mediating classical NoGo`, 1: `GPeAk is arkypallidal layer of GPe neurons, receiving inhibition from GPePr and projecting inhibition to Mtx`, 2: `GPi is the inner globus pallidus, functionally equivalent to SNr, receiving from MtxGo and GPePr, and sending inhibition to VThal`, 31: ``} -var _GPLayerTypesMap = map[GPLayerTypes]string{0: `GPePr`, 1: `GPeAk`, 2: `GPi`, 22: `GPLayerTypesN`} +var _GPLayerTypesMap = map[GPLayerTypes]string{0: `GPePr`, 1: `GPeAk`, 2: `GPi`, 31: `GPLayerTypesN`} // String returns the string representation of this GPLayerTypes value. func (i GPLayerTypes) String() string { return enums.String(i, _GPLayerTypesMap) } @@ -563,16 +563,16 @@ func (i *GPLayerTypes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "GPLayerTypes") } -var _SynapseVarsValues = []SynapseVars{0, 1, 2, 3, 4, 14} +var _SynapseVarsValues = []SynapseVars{0, 1, 2, 3, 4, 23} // SynapseVarsN is the highest valid value for type SynapseVars, plus one. -const SynapseVarsN SynapseVars = 15 +const SynapseVarsN SynapseVars = 24 -var _SynapseVarsValueMap = map[string]SynapseVars{`Wt`: 0, `LWt`: 1, `SWt`: 2, `DWt`: 3, `DSWt`: 4, `SynapseVarsN`: 14} +var _SynapseVarsValueMap = map[string]SynapseVars{`Wt`: 0, `LWt`: 1, `SWt`: 2, `DWt`: 3, `DSWt`: 4, `SynapseVarsN`: 23} -var _SynapseVarsDescMap = map[SynapseVars]string{0: `Wt is effective synaptic weight value, determining how much conductance one spike drives on the receiving neuron, representing the actual number of effective AMPA receptors in the synapse. Wt = SWt * WtSig(LWt), where WtSig produces values between 0-2 based on LWt, centered on 1.`, 1: `LWt is rapidly learning, linear weight value -- learns according to the lrate specified in the connection spec. Biologically, this represents the internal biochemical processes that drive the trafficking of AMPA receptors in the synaptic density. Initially all LWt are .5, which gives 1 from WtSig function.`, 2: `SWt is slowly adapting structural weight value, which acts as a multiplicative scaling factor on synaptic efficacy: biologically represents the physical size and efficacy of the dendritic spine. SWt values adapt in an outer loop along with synaptic scaling, with constraints to prevent runaway positive feedback loops and maintain variance and further capacity to learn. Initial variance is all in SWt, with LWt set to .5, and scaling absorbs some of LWt into SWt.`, 3: `DWt is delta (change in) synaptic weight, from learning -- updates LWt which then updates Wt.`, 4: `DSWt is change in SWt slow synaptic weight -- accumulates DWt`, 14: ``} +var _SynapseVarsDescMap = map[SynapseVars]string{0: `Wt is effective synaptic weight value, determining how much conductance one spike drives on the receiving neuron, representing the actual number of effective AMPA receptors in the synapse. Wt = SWt * WtSig(LWt), where WtSig produces values between 0-2 based on LWt, centered on 1.`, 1: `LWt is rapidly learning, linear weight value -- learns according to the lrate specified in the connection spec. Biologically, this represents the internal biochemical processes that drive the trafficking of AMPA receptors in the synaptic density. Initially all LWt are .5, which gives 1 from WtSig function.`, 2: `SWt is slowly adapting structural weight value, which acts as a multiplicative scaling factor on synaptic efficacy: biologically represents the physical size and efficacy of the dendritic spine. SWt values adapt in an outer loop along with synaptic scaling, with constraints to prevent runaway positive feedback loops and maintain variance and further capacity to learn. Initial variance is all in SWt, with LWt set to .5, and scaling absorbs some of LWt into SWt.`, 3: `DWt is delta (change in) synaptic weight, from learning -- updates LWt which then updates Wt.`, 4: `DSWt is change in SWt slow synaptic weight -- accumulates DWt`, 23: ``} -var _SynapseVarsMap = map[SynapseVars]string{0: `Wt`, 1: `LWt`, 2: `SWt`, 3: `DWt`, 4: `DSWt`, 14: `SynapseVarsN`} +var _SynapseVarsMap = map[SynapseVars]string{0: `Wt`, 1: `LWt`, 2: `SWt`, 3: `DWt`, 4: `DSWt`, 23: `SynapseVarsN`} // String returns the string representation of this SynapseVars value. func (i SynapseVars) String() string { return enums.String(i, _SynapseVarsMap) } @@ -606,16 +606,16 @@ func (i *SynapseVars) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "SynapseVars") } -var _SynapseTraceVarsValues = []SynapseTraceVars{0, 1, 2, 12} +var _SynapseTraceVarsValues = []SynapseTraceVars{0, 1, 2, 21} // SynapseTraceVarsN is the highest valid value for type SynapseTraceVars, plus one. -const SynapseTraceVarsN SynapseTraceVars = 13 +const SynapseTraceVarsN SynapseTraceVars = 22 -var _SynapseTraceVarsValueMap = map[string]SynapseTraceVars{`Tr`: 0, `DTr`: 1, `DiDWt`: 2, `SynapseTraceVarsN`: 12} +var _SynapseTraceVarsValueMap = map[string]SynapseTraceVars{`Tr`: 0, `DTr`: 1, `DiDWt`: 2, `SynapseTraceVarsN`: 21} -var _SynapseTraceVarsDescMap = map[SynapseTraceVars]string{0: `Tr is trace of synaptic activity over time, which is used for credit assignment in learning. In MatrixPath this is a tag that is then updated later when US occurs.`, 1: `DTr is delta (change in) Tr trace of synaptic activity over time.`, 2: `DiDWt is delta weight for each data parallel index (Di). This is directly computed from the Ca values (in cortical version) and then aggregated into the overall DWt (which may be further integrated across MPI nodes), which then drives changes in Wt values.`, 12: ``} +var _SynapseTraceVarsDescMap = map[SynapseTraceVars]string{0: `Tr is trace of synaptic activity over time, which is used for credit assignment in learning. In MatrixPath this is a tag that is then updated later when US occurs.`, 1: `DTr is delta (change in) Tr trace of synaptic activity over time.`, 2: `DiDWt is delta weight for each data parallel index (Di). This is directly computed from the Ca values (in cortical version) and then aggregated into the overall DWt (which may be further integrated across MPI nodes), which then drives changes in Wt values.`, 21: ``} -var _SynapseTraceVarsMap = map[SynapseTraceVars]string{0: `Tr`, 1: `DTr`, 2: `DiDWt`, 12: `SynapseTraceVarsN`} +var _SynapseTraceVarsMap = map[SynapseTraceVars]string{0: `Tr`, 1: `DTr`, 2: `DiDWt`, 21: `SynapseTraceVarsN`} // String returns the string representation of this SynapseTraceVars value. func (i SynapseTraceVars) String() string { return enums.String(i, _SynapseTraceVarsMap) } @@ -649,16 +649,16 @@ func (i *SynapseTraceVars) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "SynapseTraceVars") } -var _SynapseIndexVarsValues = []SynapseIndexVars{0, 1, 2, 12} +var _SynapseIndexVarsValues = []SynapseIndexVars{0, 1, 2, 21} // SynapseIndexVarsN is the highest valid value for type SynapseIndexVars, plus one. -const SynapseIndexVarsN SynapseIndexVars = 13 +const SynapseIndexVarsN SynapseIndexVars = 22 -var _SynapseIndexVarsValueMap = map[string]SynapseIndexVars{`SynRecvIndex`: 0, `SynSendIndex`: 1, `SynPathIndex`: 2, `SynapseIndexVarsN`: 12} +var _SynapseIndexVarsValueMap = map[string]SynapseIndexVars{`SynRecvIndex`: 0, `SynSendIndex`: 1, `SynPathIndex`: 2, `SynapseIndexVarsN`: 21} -var _SynapseIndexVarsDescMap = map[SynapseIndexVars]string{0: `SynRecvIndex is receiving neuron index in network's global list of neurons`, 1: `SynSendIndex is sending neuron index in network's global list of neurons`, 2: `SynPathIndex is pathway index in global list of pathways organized as [Layers][RecvPaths]`, 12: ``} +var _SynapseIndexVarsDescMap = map[SynapseIndexVars]string{0: `SynRecvIndex is receiving neuron index in network's global list of neurons`, 1: `SynSendIndex is sending neuron index in network's global list of neurons`, 2: `SynPathIndex is pathway index in global list of pathways organized as [Layers][RecvPaths]`, 21: ``} -var _SynapseIndexVarsMap = map[SynapseIndexVars]string{0: `SynRecvIndex`, 1: `SynSendIndex`, 2: `SynPathIndex`, 12: `SynapseIndexVarsN`} +var _SynapseIndexVarsMap = map[SynapseIndexVars]string{0: `SynRecvIndex`, 1: `SynSendIndex`, 2: `SynPathIndex`, 21: `SynapseIndexVarsN`} // String returns the string representation of this SynapseIndexVars value. func (i SynapseIndexVars) String() string { return enums.String(i, _SynapseIndexVarsMap) } diff --git a/axon/layer-algo.go b/axon/layer-algo.go index daa7d72a..298d57ff 100644 --- a/axon/layer-algo.go +++ b/axon/layer-algo.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line layer-algo.goal:1 // Copyright (c) 2019, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -20,8 +20,6 @@ import ( // layer-algo.go has the core algorithm methods. -//gosl:start - // LDTSrcLayAct returns the overall activity level for given source layer // for purposes of computing ACh salience value. // Typically the input is a superior colliculus (SC) layer that rapidly @@ -743,5 +741,3 @@ func (ly *Layer) SetSubMean(trgAvg, path float32) { pj.Params.Learn.Trace.SubMean = path } } - -//gosl:end diff --git a/axon/layer-algo.goal b/axon/layer-algo.goal index 99e025bd..10440662 100644 --- a/axon/layer-algo.goal +++ b/axon/layer-algo.goal @@ -18,8 +18,6 @@ import ( // layer-algo.go has the core algorithm methods. -//gosl:start - // LDTSrcLayAct returns the overall activity level for given source layer // for purposes of computing ACh salience value. // Typically the input is a superior colliculus (SC) layer that rapidly @@ -739,6 +737,4 @@ func (ly *Layer) SetSubMean(trgAvg, path float32) { } } -//gosl:end - diff --git a/axon/layer.go b/axon/layer.go index 6719e834..935fa307 100644 --- a/axon/layer.go +++ b/axon/layer.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line layer.goal:1 // Copyright (c) 2019, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/axon/layerbase.go b/axon/layerbase.go index b268a26a..862965c8 100644 --- a/axon/layerbase.go +++ b/axon/layerbase.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line layerbase.goal:1 // Copyright (c) 2019, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/axon/layerparams.go b/axon/layerparams.go index 9e987467..dbe481b5 100644 --- a/axon/layerparams.go +++ b/axon/layerparams.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line layerparams.goal:1 // Copyright (c) 2023, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/axon/learn.go b/axon/learn.go index c0b26db6..7a85c4a3 100644 --- a/axon/learn.go +++ b/axon/learn.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line learn.goal:1 // Copyright (c) 2019, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/axon/network.go b/axon/network.go index 9103dfb3..0b78d604 100644 --- a/axon/network.go +++ b/axon/network.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line network.goal:1 // Copyright (c) 2019, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -30,7 +30,6 @@ import ( // all data parallel values. The current Context.NData should be set // properly prior to calling this and subsequent Cycle methods. func (nt *Network) NewState(ctx *Context) { - // nt.Context().NetIndexes.NData = ctx.NetIndexes.NData // if nt.GPU.On { // todo: this has a bug in neuron-level access in updating SpkPrv // // nt.GPU.RunNewState() @@ -497,7 +496,8 @@ func (nt *Network) UpdateExtFlags(ctx *Context) { // SynFail updates synaptic failure func (nt *Network) SynFail(ctx *Context) { - nt.PathMapSeq(func(pj *Path) { pj.SynFail(ctx) }, "SynFail") + // todo: + // nt.PathMapSeq(func(pj *Path) { pj.SynFail(ctx) }, "SynFail") } // LRateMod sets the LRate modulation parameter for Paths, which is @@ -683,7 +683,8 @@ func (nt *Network) SizeReport(detail bool) string { varBytes := 4 synVarBytes := 4 - maxData := int(nt.MaxData) + nix := nt.NetIxs() + maxData := int(nix.MaxData) memNeuron := int(NeuronVarsN)*maxData*varBytes + int(NeuronAvgVarsN)*varBytes + int(NeuronIndexVarsN)*varBytes memSynapse := int(SynapseVarsN)*varBytes + int(SynapseTraceVarsN)*maxData*varBytes + int(SynapseIndexVarsN)*varBytes @@ -722,7 +723,7 @@ func (nt *Network) SizeReport(detail bool) string { synCaMem := nt.SynapseTraces.Len() * synVarBytes fmt.Fprintf(&b, "\n\n%14s:\t Neurons: %d\t NeurMem: %v \t Syns: %d \t SynIndexes: %v \t SynWts: %v \t SynCa: %v\n", - nt.Name, nt.NNeurons, (datasize.Size)(nrnMem).String(), nt.NSyns, + nt.Name, nix.NNeurons, (datasize.Size)(nrnMem).String(), nix.NSyns, (datasize.Size)(synIndexMem).String(), (datasize.Size)(synWtMem).String(), (datasize.Size)(synCaMem).String()) return b.String() } diff --git a/axon/network.goal b/axon/network.goal index ac805ae2..e2e40d10 100644 --- a/axon/network.goal +++ b/axon/network.goal @@ -28,7 +28,6 @@ import ( // all data parallel values. The current Context.NData should be set // properly prior to calling this and subsequent Cycle methods. func (nt *Network) NewState(ctx *Context) { - // nt.Context().NetIndexes.NData = ctx.NetIndexes.NData // if nt.GPU.On { // todo: this has a bug in neuron-level access in updating SpkPrv // nt.GPU.RunNewState() // return @@ -486,7 +485,8 @@ func (nt *Network) UpdateExtFlags(ctx *Context) { // SynFail updates synaptic failure func (nt *Network) SynFail(ctx *Context) { - nt.PathMapSeq(func(pj *Path) { pj.SynFail(ctx) }, "SynFail") + // todo: + // nt.PathMapSeq(func(pj *Path) { pj.SynFail(ctx) }, "SynFail") } // LRateMod sets the LRate modulation parameter for Paths, which is @@ -668,7 +668,8 @@ func (nt *Network) SizeReport(detail bool) string { varBytes := 4 synVarBytes := 4 - maxData := int(nt.MaxData) + nix := nt.NetIxs() + maxData := int(nix.MaxData) memNeuron := int(NeuronVarsN)*maxData*varBytes + int(NeuronAvgVarsN)*varBytes + int(NeuronIndexVarsN)*varBytes memSynapse := int(SynapseVarsN)*varBytes + int(SynapseTraceVarsN)*maxData*varBytes + int(SynapseIndexVarsN)*varBytes @@ -706,7 +707,7 @@ func (nt *Network) SizeReport(detail bool) string { synCaMem := nt.SynapseTraces.Len() * synVarBytes fmt.Fprintf(&b, "\n\n%14s:\t Neurons: %d\t NeurMem: %v \t Syns: %d \t SynIndexes: %v \t SynWts: %v \t SynCa: %v\n", - nt.Name, nt.NNeurons, (datasize.Size)(nrnMem).String(), nt.NSyns, + nt.Name, nix.NNeurons, (datasize.Size)(nrnMem).String(), nix.NSyns, (datasize.Size)(synIndexMem).String(), (datasize.Size)(synWtMem).String(), (datasize.Size)(synCaMem).String()) return b.String() } diff --git a/axon/networkbase.go b/axon/networkbase.go index f200a331..643aee83 100644 --- a/axon/networkbase.go +++ b/axon/networkbase.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line networkbase.goal:1 // Copyright (c) 2019, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -41,12 +41,16 @@ import ( // NetworkIndexes are indexes and sizes for processing network. type NetworkIndexes struct { - // maximum number of data inputs that can be processed + // MaxData is the maximum number of data inputs that can be processed // in parallel in one pass of the network. // Neuron storage is allocated to hold this amount during // Build process, and this value reflects that. MaxData uint32 `edit:"-"` + // MaxDelay is the maximum synaptic delay across all pathways at the time of + // [Network.Build]. This determines the size of the spike sending delay buffers. + MaxDelay uint32 `edit:-"-"` + // number of layers in the network NLayers uint32 `edit:"-"` @@ -92,11 +96,6 @@ func (ix *NetworkIndexes) DataIndex(idx uint32) uint32 { return idx % ix.MaxData } -// DataIndexIsValid returns true if the data index is valid (< NData) -func (ix *NetworkIndexes) DataIndexIsValid(li uint32) bool { - return (li < ix.NData) -} - // LayerIndexIsValid returns true if the layer index is valid (< NLayers) func (ix *NetworkIndexes) LayerIndexIsValid(li uint32) bool { return (li < ix.NLayers) @@ -273,10 +272,10 @@ func (nt *Network) NetIxs() *NetworkIndexes { return &nt.NetworkIxs[0] } func (nt *Network) NumLayers() int { return len(nt.Layers) } func (nt *Network) EmerLayer(idx int) emer.Layer { return nt.Layers[idx] } func (nt *Network) MaxParallelData() int { return int(nt.NetIxs().MaxData) } -func (nt *Network) NParallelData() int { return int(nt.NetIxs().NData) } +func (nt *Network) NParallelData() int { return int(nt.Context().NData) } func (nt *Network) Init() { - nt.MaxData = 1 + nt.NetIxs().MaxData = 1 } // NewNetwork returns a new axon Network @@ -323,7 +322,8 @@ func (nt *Network) LayersByType(layType ...LayerTypes) []string { // LayerValues returns LayerValues for given layer and data parallel indexes func (nt *Network) LayerValues(li, di uint32) *LayerValues { - return &nt.LayValues[li*nt.MaxData+di] + md := nt.NetIxs().MaxData + return &nt.LayValues[li*md+di] } // UnitVarNames returns a list of variable names available on the units in this network. @@ -499,9 +499,9 @@ func (nt *Network) SaveAllPathScales(filename core.Filename) error { // AllGlobals returns a listing of all Global variables and values. func (nt *Network) AllGlobals() string { - ctx := nt.Context() + md := nt.NetIxs().MaxData str := "" - for di := uint32(0); di < nt.MaxData; di++ { + for di := uint32(0); di < md; di++ { str += fmt.Sprintf("\n###############################\nData Index: %02d\n\n", di) for vv := GvRew; vv < GlobalScalarVarsN; vv++ { str += fmt.Sprintf("%20s:\t%7.4f\n", vv.String(), GlobalScalars.Value(int(vv), int(di))) @@ -540,8 +540,8 @@ func (nt *Network) ShowAllGlobals() { //types:add // AllGlobalValues adds to map of all Global variables and values. // ctrKey is a key of counters to contextualize values. func (nt *Network) AllGlobalValues(ctrKey string, vals map[string]float32) { - ctx := nt.Context() - for di := uint32(0); di < nt.MaxData; di++ { + md := nt.NetIxs().MaxData + for di := uint32(0); di < md; di++ { for vv := GvRew; vv < GlobalScalarVarsN; vv++ { key := fmt.Sprintf("%s Di: %d\t%s", ctrKey, di, vv.String()) vals[key] = GlobalScalars.Value(int(vv), int(di)) @@ -680,25 +680,14 @@ func (nt *Network) LateralConnectLayerPath(lay *Layer, pat paths.Pattern, pt *Pa return pt } -// SetCtxStrides sets the given simulation context strides for accessing -// variables on this network -- these must be set properly before calling -// any compute methods with the context. -func (nt *Network) SetCtxStrides(simCtx *Context) { - simCtx.CopyNetStridesFrom(nt.Context()) -} - // SetMaxData sets the MaxData and current NData for both the Network and the Context func (nt *Network) SetMaxData(simCtx *Context, maxData int) { - nt.MaxData = uint32(maxData) - simCtx.NetIndexes.NData = uint32(maxData) - simCtx.NetIndexes.MaxData = uint32(maxData) + nt.NetIxs().MaxData = uint32(maxData) + simCtx.NData = uint32(maxData) } // Build constructs the layer and pathway state based on the layer shapes -// and patterns of interconnectivity. Configures threading using heuristics based -// on final network size. Must set UseGPUOrder properly prior to calling. -// Configures the given Context object used in the simulation with the memory -// access strides for this network -- must be set properly -- see SetCtxStrides. +// and patterns of interconnectivity. func (nt *Network) Build(simCtx *Context) error { //types:add nt.MakeLayerMaps() if nt.Rubicon.NPosUSs == 0 { @@ -708,7 +697,7 @@ func (nt *Network) Build(simCtx *Context) error { //types:add ctx := nt.Context() *ctx = *simCtx nt.FunTimes = make(map[string]*timer.Time) - maxData := int(nt.MaxData) + maxData := int(nt.NetIxs().MaxData) var errs []error totNeurons := 0 totPaths := 0 @@ -737,15 +726,15 @@ func (nt *Network) Build(simCtx *Context) error { //types:add nt.LayParams = make([]LayerParams, nLayers) nt.LayValues = make([]LayerValues, nLayers*maxData) nt.Pools = make([]Pool, totPools*maxData) - sltensor.SetShapeSizes(&nt.Neurons, int(NeuronVarsN), totNeurons, int(nt.MaxData)) + sltensor.SetShapeSizes(&nt.Neurons, int(NeuronVarsN), totNeurons, maxData) sltensor.SetShapeSizes(&nt.NeuronAvgs, int(NeuronAvgVarsN), totNeurons) sltensor.SetShapeSizes(&nt.NeuronIxs, int(NeuronIndexVarsN), totNeurons) nt.Paths = make([]*Path, totPaths) nt.PathParams = make([]PathParams, totPaths) sltensor.SetShapeSizes(&nt.Exts, totExts, maxData) - sltensor.SetShapeSizes(&nt.GlobalScalars, int(GlobalScalarVarsN), int(nt.MaxData)) - sltensor.SetShapeSizes(&nt.GlobalVectors, int(GlobalVectorVarsN), int(MaxGlobalVecN), int(nt.MaxData)) + sltensor.SetShapeSizes(&nt.GlobalScalars, int(GlobalScalarVarsN), maxData) + sltensor.SetShapeSizes(&nt.GlobalVectors, int(GlobalVectorVarsN), int(MaxGlobalVecN), maxData) totSynapses := 0 totRecvCon := 0 @@ -766,13 +755,13 @@ func (nt *Network) Build(simCtx *Context) error { //types:add nn := shp.Len() ly.NNeurons = uint32(nn) ly.NeurStIndex = uint32(neurIndex) - ly.MaxData = nt.MaxData + ly.MaxData = uint32(maxData) np := ly.NumPools() + 1 npd := np * maxData ly.NPools = uint32(np) ly.Pools = nt.Pools[poolIndex : poolIndex+npd] ly.Params.Indexes.LayIndex = uint32(li) - ly.Params.Indexes.MaxData = nt.MaxData + ly.Params.Indexes.MaxData = uint32(maxData) ly.Params.Indexes.PoolSt = uint32(poolIndex) ly.Params.Indexes.NeurSt = uint32(neurIndex) ly.Params.Indexes.NeurN = uint32(nn) @@ -841,7 +830,7 @@ func (nt *Network) Build(simCtx *Context) error { //types:add nt.NetworkIxs[0].NSyns = uint32(totSynapses) sltensor.SetShapeSizes(&nt.Synapses, int(SynapseVarsN), totSynapses) - sltensor.SetShapeSizes(&nt.SynapseTraces, int(SynapseTraceVarsN), totSynapses, int(nt.MaxData)) + sltensor.SetShapeSizes(&nt.SynapseTraces, int(SynapseTraceVarsN), totSynapses, maxData) sltensor.SetShapeSizes(&nt.SynapseIxs, int(SynapseIndexVarsN), totSynapses) sltensor.SetShapeSizes(&nt.PathSendCon, totSendCon, 2) sltensor.SetShapeSizes(&nt.PathRecvCon, totRecvCon, 2) @@ -913,9 +902,7 @@ func (nt *Network) Build(simCtx *Context) error { //types:add rpathIndex++ } } - nix.NSyns = nt.NSyns - - nt.SetCtxStrides(simCtx) + nix.NSyns = uint32(syIndex) nt.LayoutLayers() return errors.Join(errs...) @@ -926,42 +913,44 @@ func (nt *Network) Build(simCtx *Context) error { //types:add // which should have been configured by this point. // Called by default in InitWeights() func (nt *Network) BuildPathGBuf() { - nt.MaxDelay = 0 + nix := nt.NetIxs() + maxDel := uint32(0) npjneur := uint32(0) for _, ly := range nt.Layers { nneur := uint32(ly.NNeurons) for _, pt := range ly.RecvPaths { - if pt.Params.Com.MaxDelay > nt.MaxDelay { - nt.MaxDelay = pt.Params.Com.MaxDelay + if pt.Params.Com.MaxDelay > maxDel { + maxDel = pt.Params.Com.MaxDelay } npjneur += nneur } } - mxlen := nt.MaxDelay + 1 - sltensor.SetShapeSizes(&nt.PathGBuf, int(npjneur), int(mxlen), int(nt.MaxData)) - sltensor.SetShapeSizes(&nt.PathGSyns, int(npjneur), int(nt.MaxData)) - - // gbi := uint32(0) - // gsi := uint32(0) - // - // for _, ly := range nt.Layers { - // nneur := uint32(ly.NNeurons) - // for _, pt := range ly.RecvPaths { - // gbs := nneur * mxlen * nt.MaxData - // pt.Params.Indexes.GBufSt = gbi - // pt.GBuf = nt.PathGBuf[gbi : gbi+gbs] - // gbi += gbs - // pt.Params.Indexes.GSynSt = gsi - // pt.GSyns = nt.PathGSyns[gsi : gsi+nneur*nt.MaxData] - // gsi += nneur * nt.MaxData - // } - // } + nix.MaxDelay = maxDel + maxData := nt.NetIxs().MaxData + mxlen := maxDel + 1 + sltensor.SetShapeSizes(&nt.PathGBuf, int(npjneur), int(mxlen), int(maxData)) + sltensor.SetShapeSizes(&nt.PathGSyns, int(npjneur), int(maxData)) + + gbi := uint32(0) + gsi := uint32(0) + for _, ly := range nt.Layers { + nneur := uint32(ly.NNeurons) + for _, pt := range ly.RecvPaths { + gbs := nneur * mxlen * maxData + pt.Params.Indexes.GBufSt = gbi + // pt.GBuf = nt.PathGBuf[gbi : gbi+gbs] + gbi += gbs + pt.Params.Indexes.GSynSt = gsi + // pt.GSyns = nt.PathGSyns[gsi : gsi+nneur*nt.MaxData] + gsi += nneur * maxData + } + } } // SetAsCurrent sets this network's values as the current global variables, // that are then processed in the code. func (nt *Network) SetAsCurrent() { - LayParams = nt.LayParams + Layers = nt.LayParams Paths = nt.PathParams NeuronIxs = &nt.NeuronIxs SynapseIxs = &nt.SynapseIxs @@ -1027,7 +1016,8 @@ func (nt *Network) ReadWeightsJSON(r io.Reader) error { // SynsSlice returns a slice of synaptic values, in natural sending order, // using given synaptic variable, resizing as needed. func (nt *Network) SynsSlice(vals *[]float32, synvar SynapseVars) { - *vals = slicesx.SetLength(*vals, int(nt.NSyns)) + nix := nt.NetIxs() + *vals = slicesx.SetLength(*vals, int(nix.NSyns)) i := 0 for _, ly := range nt.Layers { for _, pt := range ly.SendPaths { @@ -1046,7 +1036,8 @@ func (nt *Network) SynsSlice(vals *[]float32, synvar SynapseVars) { // NeuronsSlice returns a slice of neuron values // using given neuron variable, resizing as needed. func (nt *Network) NeuronsSlice(vals *[]float32, nrnVar string, di int) { - *vals = slicesx.SetLength(*vals, int(nt.NNeurons)) + nix := nt.NetIxs() + *vals = slicesx.SetLength(*vals, int(nix.NNeurons)) i := 0 for _, ly := range nt.Layers { varIndex, _ := ly.UnitVarIndex(nrnVar) @@ -1081,16 +1072,18 @@ func HashEncodeSlice(slice []float32) string { // CheckSameSize checks if this network is the same size as given other, // in terms of NNeurons, MaxData, and NSyns. Returns error message if not. func (nt *Network) CheckSameSize(on *Network) error { - if nt.NNeurons != on.NNeurons { - err := fmt.Errorf("CheckSameSize: dest NNeurons: %d != src NNeurons: %d", nt.NNeurons, on.NNeurons) + nix := nt.NetIxs() + nox := on.NetIxs() + if nix.NNeurons != nox.NNeurons { + err := fmt.Errorf("CheckSameSize: dest NNeurons: %d != src NNeurons: %d", nix.NNeurons, nox.NNeurons) return err } - if nt.MaxData != on.MaxData { - err := fmt.Errorf("CheckSameSize: dest MaxData: %d != src MaxData: %d", nt.MaxData, on.MaxData) + if nix.MaxData != nox.MaxData { + err := fmt.Errorf("CheckSameSize: dest MaxData: %d != src MaxData: %d", nix.MaxData, nox.MaxData) return err } - if nt.NSyns != on.NSyns { - err := fmt.Errorf("CheckSameSize: dest NSyns: %d != src NSyns: %d", nt.NSyns, on.NSyns) + if nix.NSyns != nox.NSyns { + err := fmt.Errorf("CheckSameSize: dest NSyns: %d != src NSyns: %d", nix.NSyns, nox.NSyns) return err } return nil @@ -1119,10 +1112,10 @@ func (nt *Network) CopyStateFrom(on *Network) error { // and given other, up to given max number of differences (0 = all), // for each state value. func (nt *Network) DiffFrom(ctx *Context, on *Network, maxDiff int) string { + nix := nt.NetIxs() diffs := "" ndif := 0 - nix := nt.NetIxs() - for di := uint32(0); di < nix.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { for ni := uint32(0); ni < nix.NNeurons; ni++ { for nvar := Spike; nvar < NeuronVarsN; nvar++ { nv := nt.Neurons.Value(int(nvar), int(ni), int(di)) @@ -1163,8 +1156,8 @@ func (nt *Network) DiffFrom(ctx *Context, on *Network, maxDiff int) string { } } } - for di := uint32(0); di < nix.NData; di++ { - for si := uint32(0); si < nt.NSyns; si++ { + for di := uint32(0); di < ctx.NData; di++ { + for si := uint32(0); si < nix.NSyns; si++ { for svar := Tr; svar < SynapseTraceVarsN; svar++ { sv := nt.SynapseTraces.Value(int(svar), int(si), int(di)) ov := on.SynapseTraces.Value(int(svar), int(si), int(di)) diff --git a/axon/networkbase.goal b/axon/networkbase.goal index 8614c119..00d6edd1 100644 --- a/axon/networkbase.goal +++ b/axon/networkbase.goal @@ -39,12 +39,16 @@ import ( // NetworkIndexes are indexes and sizes for processing network. type NetworkIndexes struct { - // maximum number of data inputs that can be processed + // MaxData is the maximum number of data inputs that can be processed // in parallel in one pass of the network. // Neuron storage is allocated to hold this amount during // Build process, and this value reflects that. MaxData uint32 `edit:"-"` + // MaxDelay is the maximum synaptic delay across all pathways at the time of + // [Network.Build]. This determines the size of the spike sending delay buffers. + MaxDelay uint32 `edit:-"-"` + // number of layers in the network NLayers uint32 `edit:"-"` @@ -90,11 +94,6 @@ func (ix *NetworkIndexes) DataIndex(idx uint32) uint32 { return idx % ix.MaxData } -// DataIndexIsValid returns true if the data index is valid (< NData) -func (ix *NetworkIndexes) DataIndexIsValid(li uint32) bool { - return (li < ix.NData) -} - // LayerIndexIsValid returns true if the layer index is valid (< NLayers) func (ix *NetworkIndexes) LayerIndexIsValid(li uint32) bool { return (li < ix.NLayers) @@ -270,10 +269,10 @@ func (nt *Network) NetIxs() *NetworkIndexes { return &nt.NetworkIxs[0] } func (nt *Network) NumLayers() int { return len(nt.Layers) } func (nt *Network) EmerLayer(idx int) emer.Layer { return nt.Layers[idx] } func (nt *Network) MaxParallelData() int { return int(nt.NetIxs().MaxData) } -func (nt *Network) NParallelData() int { return int(nt.NetIxs().NData) } +func (nt *Network) NParallelData() int { return int(nt.Context().NData) } func (nt *Network) Init() { - nt.MaxData = 1 + nt.NetIxs().MaxData = 1 } // NewNetwork returns a new axon Network @@ -320,7 +319,8 @@ func (nt *Network) LayersByType(layType ...LayerTypes) []string { // LayerValues returns LayerValues for given layer and data parallel indexes func (nt *Network) LayerValues(li, di uint32) *LayerValues { - return &nt.LayValues[li*nt.MaxData+di] + md := nt.NetIxs().MaxData + return &nt.LayValues[li*md+di] } // UnitVarNames returns a list of variable names available on the units in this network. @@ -496,9 +496,9 @@ func (nt *Network) SaveAllPathScales(filename core.Filename) error { // AllGlobals returns a listing of all Global variables and values. func (nt *Network) AllGlobals() string { - ctx := nt.Context() + md := nt.NetIxs().MaxData str := "" - for di := uint32(0); di < nt.MaxData; di++ { + for di := uint32(0); di < md; di++ { str += fmt.Sprintf("\n###############################\nData Index: %02d\n\n", di) for vv := GvRew; vv < GlobalScalarVarsN; vv++ { str += fmt.Sprintf("%20s:\t%7.4f\n", vv.String(), GlobalScalars[vv, di]) @@ -537,8 +537,8 @@ func (nt *Network) ShowAllGlobals() { //types:add // AllGlobalValues adds to map of all Global variables and values. // ctrKey is a key of counters to contextualize values. func (nt *Network) AllGlobalValues(ctrKey string, vals map[string]float32) { - ctx := nt.Context() - for di := uint32(0); di < nt.MaxData; di++ { + md := nt.NetIxs().MaxData + for di := uint32(0); di < md; di++ { for vv := GvRew; vv < GlobalScalarVarsN; vv++ { key := fmt.Sprintf("%s Di: %d\t%s", ctrKey, di, vv.String()) vals[key] = GlobalScalars[vv, di] @@ -677,25 +677,14 @@ func (nt *Network) LateralConnectLayerPath(lay *Layer, pat paths.Pattern, pt *Pa return pt } -// SetCtxStrides sets the given simulation context strides for accessing -// variables on this network -- these must be set properly before calling -// any compute methods with the context. -func (nt *Network) SetCtxStrides(simCtx *Context) { - simCtx.CopyNetStridesFrom(nt.Context()) -} - // SetMaxData sets the MaxData and current NData for both the Network and the Context func (nt *Network) SetMaxData(simCtx *Context, maxData int) { - nt.MaxData = uint32(maxData) - simCtx.NetIndexes.NData = uint32(maxData) - simCtx.NetIndexes.MaxData = uint32(maxData) + nt.NetIxs().MaxData = uint32(maxData) + simCtx.NData = uint32(maxData) } // Build constructs the layer and pathway state based on the layer shapes -// and patterns of interconnectivity. Configures threading using heuristics based -// on final network size. Must set UseGPUOrder properly prior to calling. -// Configures the given Context object used in the simulation with the memory -// access strides for this network -- must be set properly -- see SetCtxStrides. +// and patterns of interconnectivity. func (nt *Network) Build(simCtx *Context) error { //types:add nt.MakeLayerMaps() if nt.Rubicon.NPosUSs == 0 { @@ -705,7 +694,7 @@ func (nt *Network) Build(simCtx *Context) error { //types:add ctx := nt.Context() *ctx = *simCtx nt.FunTimes = make(map[string]*timer.Time) - maxData := int(nt.MaxData) + maxData := int(nt.NetIxs().MaxData) var errs []error totNeurons := 0 totPaths := 0 @@ -734,15 +723,15 @@ func (nt *Network) Build(simCtx *Context) error { //types:add nt.LayParams = make([]LayerParams, nLayers) nt.LayValues = make([]LayerValues, nLayers*maxData) nt.Pools = make([]Pool, totPools*maxData) - sltensor.SetShapeSizes(&nt.Neurons, int(NeuronVarsN), totNeurons, int(nt.MaxData)) + sltensor.SetShapeSizes(&nt.Neurons, int(NeuronVarsN), totNeurons, maxData) sltensor.SetShapeSizes(&nt.NeuronAvgs, int(NeuronAvgVarsN), totNeurons) sltensor.SetShapeSizes(&nt.NeuronIxs, int(NeuronIndexVarsN), totNeurons) nt.Paths = make([]*Path, totPaths) nt.PathParams = make([]PathParams, totPaths) sltensor.SetShapeSizes(&nt.Exts, totExts, maxData) - sltensor.SetShapeSizes(&nt.GlobalScalars, int(GlobalScalarVarsN), int(nt.MaxData)) - sltensor.SetShapeSizes(&nt.GlobalVectors, int(GlobalVectorVarsN), int(MaxGlobalVecN), int(nt.MaxData)) + sltensor.SetShapeSizes(&nt.GlobalScalars, int(GlobalScalarVarsN), maxData) + sltensor.SetShapeSizes(&nt.GlobalVectors, int(GlobalVectorVarsN), int(MaxGlobalVecN), maxData) totSynapses := 0 totRecvCon := 0 @@ -763,13 +752,13 @@ func (nt *Network) Build(simCtx *Context) error { //types:add nn := shp.Len() ly.NNeurons = uint32(nn) ly.NeurStIndex = uint32(neurIndex) - ly.MaxData = nt.MaxData + ly.MaxData = uint32(maxData) np := ly.NumPools() + 1 npd := np * maxData ly.NPools = uint32(np) ly.Pools = nt.Pools[poolIndex : poolIndex+npd] ly.Params.Indexes.LayIndex = uint32(li) - ly.Params.Indexes.MaxData = nt.MaxData + ly.Params.Indexes.MaxData = uint32(maxData) ly.Params.Indexes.PoolSt = uint32(poolIndex) ly.Params.Indexes.NeurSt = uint32(neurIndex) ly.Params.Indexes.NeurN = uint32(nn) @@ -838,7 +827,7 @@ func (nt *Network) Build(simCtx *Context) error { //types:add nt.NetworkIxs[0].NSyns = uint32(totSynapses) sltensor.SetShapeSizes(&nt.Synapses, int(SynapseVarsN), totSynapses) - sltensor.SetShapeSizes(&nt.SynapseTraces, int(SynapseTraceVarsN), totSynapses, int(nt.MaxData)) + sltensor.SetShapeSizes(&nt.SynapseTraces, int(SynapseTraceVarsN), totSynapses, maxData) sltensor.SetShapeSizes(&nt.SynapseIxs, int(SynapseIndexVarsN), totSynapses) sltensor.SetShapeSizes(&nt.PathSendCon, totSendCon, 2) sltensor.SetShapeSizes(&nt.PathRecvCon, totRecvCon, 2) @@ -910,9 +899,7 @@ func (nt *Network) Build(simCtx *Context) error { //types:add rpathIndex++ } } - nix.NSyns = nt.NSyns - - nt.SetCtxStrides(simCtx) + nix.NSyns = uint32(syIndex) nt.LayoutLayers() return errors.Join(errs...) @@ -923,41 +910,44 @@ func (nt *Network) Build(simCtx *Context) error { //types:add // which should have been configured by this point. // Called by default in InitWeights() func (nt *Network) BuildPathGBuf() { - nt.MaxDelay = 0 + nix := nt.NetIxs() + maxDel := uint32(0) npjneur := uint32(0) for _, ly := range nt.Layers { nneur := uint32(ly.NNeurons) for _, pt := range ly.RecvPaths { - if pt.Params.Com.MaxDelay > nt.MaxDelay { - nt.MaxDelay = pt.Params.Com.MaxDelay + if pt.Params.Com.MaxDelay > maxDel { + maxDel = pt.Params.Com.MaxDelay } npjneur += nneur } } - mxlen := nt.MaxDelay + 1 - sltensor.SetShapeSizes(&nt.PathGBuf, int(npjneur), int(mxlen), int(nt.MaxData)) - sltensor.SetShapeSizes(&nt.PathGSyns, int(npjneur), int(nt.MaxData)) - - // gbi := uint32(0) - // gsi := uint32(0) - // for _, ly := range nt.Layers { - // nneur := uint32(ly.NNeurons) - // for _, pt := range ly.RecvPaths { - // gbs := nneur * mxlen * nt.MaxData - // pt.Params.Indexes.GBufSt = gbi - // pt.GBuf = nt.PathGBuf[gbi : gbi+gbs] - // gbi += gbs - // pt.Params.Indexes.GSynSt = gsi - // pt.GSyns = nt.PathGSyns[gsi : gsi+nneur*nt.MaxData] - // gsi += nneur * nt.MaxData - // } - // } + nix.MaxDelay = maxDel + maxData := nt.NetIxs().MaxData + mxlen := maxDel + 1 + sltensor.SetShapeSizes(&nt.PathGBuf, int(npjneur), int(mxlen), int(maxData)) + sltensor.SetShapeSizes(&nt.PathGSyns, int(npjneur), int(maxData)) + + gbi := uint32(0) + gsi := uint32(0) + for _, ly := range nt.Layers { + nneur := uint32(ly.NNeurons) + for _, pt := range ly.RecvPaths { + gbs := nneur * mxlen * maxData + pt.Params.Indexes.GBufSt = gbi + // pt.GBuf = nt.PathGBuf[gbi : gbi+gbs] + gbi += gbs + pt.Params.Indexes.GSynSt = gsi + // pt.GSyns = nt.PathGSyns[gsi : gsi+nneur*nt.MaxData] + gsi += nneur * maxData + } + } } // SetAsCurrent sets this network's values as the current global variables, // that are then processed in the code. func (nt *Network) SetAsCurrent() { - LayParams = nt.LayParams + Layers = nt.LayParams Paths = nt.PathParams NeuronIxs = &nt.NeuronIxs SynapseIxs = &nt.SynapseIxs @@ -1023,7 +1013,8 @@ func (nt *Network) ReadWeightsJSON(r io.Reader) error { // SynsSlice returns a slice of synaptic values, in natural sending order, // using given synaptic variable, resizing as needed. func (nt *Network) SynsSlice(vals *[]float32, synvar SynapseVars) { - *vals = slicesx.SetLength(*vals, int(nt.NSyns)) + nix := nt.NetIxs() + *vals = slicesx.SetLength(*vals, int(nix.NSyns)) i := 0 for _, ly := range nt.Layers { for _, pt := range ly.SendPaths { @@ -1042,7 +1033,8 @@ func (nt *Network) SynsSlice(vals *[]float32, synvar SynapseVars) { // NeuronsSlice returns a slice of neuron values // using given neuron variable, resizing as needed. func (nt *Network) NeuronsSlice(vals *[]float32, nrnVar string, di int) { - *vals = slicesx.SetLength(*vals, int(nt.NNeurons)) + nix := nt.NetIxs() + *vals = slicesx.SetLength(*vals, int(nix.NNeurons)) i := 0 for _, ly := range nt.Layers { varIndex, _ := ly.UnitVarIndex(nrnVar) @@ -1077,16 +1069,18 @@ func HashEncodeSlice(slice []float32) string { // CheckSameSize checks if this network is the same size as given other, // in terms of NNeurons, MaxData, and NSyns. Returns error message if not. func (nt *Network) CheckSameSize(on *Network) error { - if nt.NNeurons != on.NNeurons { - err := fmt.Errorf("CheckSameSize: dest NNeurons: %d != src NNeurons: %d", nt.NNeurons, on.NNeurons) + nix := nt.NetIxs() + nox := on.NetIxs() + if nix.NNeurons != nox.NNeurons { + err := fmt.Errorf("CheckSameSize: dest NNeurons: %d != src NNeurons: %d", nix.NNeurons, nox.NNeurons) return err } - if nt.MaxData != on.MaxData { - err := fmt.Errorf("CheckSameSize: dest MaxData: %d != src MaxData: %d", nt.MaxData, on.MaxData) + if nix.MaxData != nox.MaxData { + err := fmt.Errorf("CheckSameSize: dest MaxData: %d != src MaxData: %d", nix.MaxData, nox.MaxData) return err } - if nt.NSyns != on.NSyns { - err := fmt.Errorf("CheckSameSize: dest NSyns: %d != src NSyns: %d", nt.NSyns, on.NSyns) + if nix.NSyns != nox.NSyns { + err := fmt.Errorf("CheckSameSize: dest NSyns: %d != src NSyns: %d", nix.NSyns, nox.NSyns) return err } return nil @@ -1115,10 +1109,10 @@ func (nt *Network) CopyStateFrom(on *Network) error { // and given other, up to given max number of differences (0 = all), // for each state value. func (nt *Network) DiffFrom(ctx *Context, on *Network, maxDiff int) string { + nix := nt.NetIxs() diffs := "" ndif := 0 - nix := nt.NetIxs() - for di := uint32(0); di < nix.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { for ni := uint32(0); ni < nix.NNeurons; ni++ { for nvar := Spike; nvar < NeuronVarsN; nvar++ { nv := nt.Neurons[nvar, ni, di] @@ -1159,8 +1153,8 @@ func (nt *Network) DiffFrom(ctx *Context, on *Network, maxDiff int) string { } } } - for di := uint32(0); di < nix.NData; di++ { - for si := uint32(0); si < nt.NSyns; si++ { + for di := uint32(0); di < ctx.NData; di++ { + for si := uint32(0); si < nix.NSyns; si++ { for svar := Tr; svar < SynapseTraceVarsN; svar++ { sv := nt.SynapseTraces[svar, si, di] ov := on.SynapseTraces[svar, si, di] diff --git a/axon/neuron.go b/axon/neuron.go index 478aed74..1084bb4b 100644 --- a/axon/neuron.go +++ b/axon/neuron.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line neuron.goal:1 // Copyright (c) 2019, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/axon/path-algo.go b/axon/path-algo.go index 869a1577..53924aef 100644 --- a/axon/path-algo.go +++ b/axon/path-algo.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line path-algo.goal:1 // Copyright (c) 2019, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,9 +8,6 @@ package axon // path-algo.go has the core computational methods. -////////////////////////////////////////////////////////////////////// -// Act methods - ////////////////////////////////////////////////////////////////////////////////////// // Learn methods @@ -21,6 +18,7 @@ func (pj *Path) DWt(ctx *Context, si uint32) { if pj.Params.Learn.Learn.IsFalse() { return } + scon := pj.SendCon[si-pj.Send.NeurStIndex] rlay := pj.Recv isTarget := rlay.Params.Acts.Clamp.IsTarget.IsTrue() @@ -28,7 +26,7 @@ func (pj *Path) DWt(ctx *Context, si uint32) { syni := pj.SynStIndex + syi ri := SynapseIxs.Value(int(SynRecvIndex), int(syni)) dwt := float32(0) - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { layPool := rlay.Pool(0, di) subPool := rlay.SubPool(ctx, ri, di) pj.Params.DWtSyn(ctx, syni, si, ri, di, layPool, subPool, isTarget) diff --git a/axon/path-algo.goal b/axon/path-algo.goal index effe8680..f382c2e9 100644 --- a/axon/path-algo.goal +++ b/axon/path-algo.goal @@ -6,9 +6,6 @@ package axon // path-algo.go has the core computational methods. -////////////////////////////////////////////////////////////////////// -// Act methods - ////////////////////////////////////////////////////////////////////////////////////// // Learn methods @@ -19,6 +16,7 @@ func (pj *Path) DWt(ctx *Context, si uint32) { if pj.Params.Learn.Learn.IsFalse() { return } + scon := pj.SendCon[si-pj.Send.NeurStIndex] rlay := pj.Recv isTarget := rlay.Params.Acts.Clamp.IsTarget.IsTrue() @@ -26,7 +24,7 @@ func (pj *Path) DWt(ctx *Context, si uint32) { syni := pj.SynStIndex + syi ri := SynapseIxs[SynRecvIndex, syni] dwt := float32(0) - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { layPool := rlay.Pool(0, di) subPool := rlay.SubPool(ctx, ri, di) pj.Params.DWtSyn(ctx, syni, si, ri, di, layPool, subPool, isTarget) diff --git a/axon/path.go b/axon/path.go index 4fc8600f..6db8ca78 100644 --- a/axon/path.go +++ b/axon/path.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line path.goal:1 // Copyright (c) 2019, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/axon/pathbase.go b/axon/pathbase.go index 0565bf77..bab53e34 100644 --- a/axon/pathbase.go +++ b/axon/pathbase.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line pathbase.goal:1 // Copyright (c) 2019, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/axon/pathparams.go b/axon/pathparams.go index a39cca2c..979e89a5 100644 --- a/axon/pathparams.go +++ b/axon/pathparams.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line pathparams.goal:1 // Copyright (c) 2023, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -251,10 +251,12 @@ func (pt *PathParams) SynSendLayerIndex(syni uint32) uint32 { func (pt *PathParams) GatherSpikes(ctx *Context, ly *LayerParams, ni, di, lni uint32) { maxd := NetIxs().MaxData bi := pt.Indexes.GBufSt + pt.Com.ReadIndex(lni, di, ctx.CyclesTotal, pt.Indexes.RecvNeurN, maxd) - gRaw := pt.Com.FloatFromGBuf(PathGBuf[bi]) - PathGBuf[bi] = 0 + gRaw := pt.Com.FloatFromGBuf(PathGBuf.Value1D(int(bi))) + PathGBuf.Set1D(0, int(bi)) gsi := lni*maxd + di - pt.GatherSpikesGSyn(ctx, ly, ni, di, gRaw, &PathGSyns[pt.Indexes.GSynSt+gsi]) + gsyn := PathGSyns.Value1D(int(pt.Indexes.GSynSt + gsi)) + pt.GatherSpikesGSyn(ctx, ly, ni, di, gRaw, &gsyn) + PathGSyns.Set1D(gsyn, int(pt.Indexes.GSynSt+gsi)) } // GatherSpikes integrates G*Raw and G*Syn values for given neuron @@ -290,7 +292,7 @@ func (pt *PathParams) GatherSpikesGSyn(ctx *Context, ly *LayerParams, ni, di uin func (pt *PathParams) SendSpike(ctx *Context, ni, di, lni uint32) { sendVal := pt.GScale.Scale * pt.Com.FloatToIntFactor() // pre-bake in conversion to uint factor if pt.PathType == CTCtxtPath { - if ctx.Cycle != ctx.ThetaCycles-1-int(pt.Com.DelLen) { + if uint32(ctx.Cycle) != uint32(ctx.ThetaCycles)-1-pt.Com.DelLen { return } sendVal *= Neurons.Value(int(Burst), int(ni), int(di)) // Burst is regular CaSpkP for all non-SuperLayer neurons @@ -662,7 +664,7 @@ func (pt *PathParams) DWtSynVSPatch(ctx *Context, syni, si, ri, di uint32, layPo // DWtFromDiDWtSyn updates DWt from data parallel DiDWt values func (pt *PathParams) DWtFromDiDWtSyn(ctx *Context, syni uint32) { dwt := float32(0) - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { dwt += SynapseTraces.Value(int(DiDWt), int(syni), int(di)) } Synapses.SetAdd(dwt, int(DWt), int(syni)) diff --git a/axon/pathparams.goal b/axon/pathparams.goal index 6a7b7a8a..b4257973 100644 --- a/axon/pathparams.goal +++ b/axon/pathparams.goal @@ -249,10 +249,12 @@ func (pt *PathParams) SynSendLayerIndex(syni uint32) uint32 { func (pt *PathParams) GatherSpikes(ctx *Context, ly *LayerParams, ni, di, lni uint32) { maxd := NetIxs().MaxData bi := pt.Indexes.GBufSt + pt.Com.ReadIndex(lni, di, ctx.CyclesTotal, pt.Indexes.RecvNeurN, maxd) - gRaw := pt.Com.FloatFromGBuf(PathGBuf[bi]) - PathGBuf[bi] = 0 + gRaw := pt.Com.FloatFromGBuf(PathGBuf.Value1D(int(bi))) + PathGBuf.Set1D(0, int(bi)) gsi := lni*maxd + di - pt.GatherSpikesGSyn(ctx, ly, ni, di, gRaw, &PathGSyns[pt.Indexes.GSynSt+gsi]) + gsyn := PathGSyns.Value1D(int(pt.Indexes.GSynSt + gsi)) + pt.GatherSpikesGSyn(ctx, ly, ni, di, gRaw, &gsyn) + PathGSyns.Set1D(gsyn, int(pt.Indexes.GSynSt+gsi)) } // GatherSpikes integrates G*Raw and G*Syn values for given neuron @@ -288,7 +290,7 @@ func (pt *PathParams) GatherSpikesGSyn(ctx *Context, ly *LayerParams, ni, di uin func (pt *PathParams) SendSpike(ctx *Context, ni, di, lni uint32) { sendVal := pt.GScale.Scale * pt.Com.FloatToIntFactor() // pre-bake in conversion to uint factor if pt.PathType == CTCtxtPath { - if ctx.Cycle != ctx.ThetaCycles-1-int(pt.Com.DelLen) { + if uint32(ctx.Cycle) != uint32(ctx.ThetaCycles)-1-pt.Com.DelLen { return } sendVal *= Neurons[Burst, ni, di] // Burst is regular CaSpkP for all non-SuperLayer neurons @@ -660,7 +662,7 @@ func (pt *PathParams) DWtSynVSPatch(ctx *Context, syni, si, ri, di uint32, layPo // DWtFromDiDWtSyn updates DWt from data parallel DiDWt values func (pt *PathParams) DWtFromDiDWtSyn(ctx *Context, syni uint32) { dwt := float32(0) - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { dwt += SynapseTraces[DiDWt, syni, di] } Synapses[DWt, syni] += dwt diff --git a/axon/pcore_layers.go b/axon/pcore_layers.go index e75e167c..cbf67919 100644 --- a/axon/pcore_layers.go +++ b/axon/pcore_layers.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line pcore_layers.goal:1 // Copyright (c) 2022, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -14,7 +14,7 @@ import ( "cogentcore.org/core/goal/gosl/slbool" ) -//gosl:start pcore_layers +//gosl:start // MatrixParams has parameters for BG Striatum Matrix MSN layers // These are the main Go / NoGo gating units in BG. @@ -95,7 +95,7 @@ func (gp *GPParams) Defaults() { func (gp *GPParams) Update() { } -//gosl:end pcore_layers +//gosl:end // MatrixGated is called after std PlusPhase, on CPU, has Pool info // downloaded from GPU, to set Gated flag based on SpkMax activity @@ -103,14 +103,14 @@ func (ly *Layer) MatrixGated(ctx *Context) { if ly.Params.Learn.NeuroMod.DAMod != D1Mod { oly := ly.Network.Layers[int(ly.Params.Matrix.OtherMatrixIndex)] // note: NoGo layers don't track gating at the sub-pool level! - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { ly.Pool(0, di).Gated = oly.Pool(0, di).Gated } return } // todo: Context requires data parallel state! - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { mtxGated, poolIndex := ly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) thalGated := false diff --git a/axon/pcore_layers.goal b/axon/pcore_layers.goal index 24f19f19..d87b50a2 100644 --- a/axon/pcore_layers.goal +++ b/axon/pcore_layers.goal @@ -12,7 +12,7 @@ import ( "cogentcore.org/core/goal/gosl/slbool" ) -//gosl:start pcore_layers +//gosl:start // MatrixParams has parameters for BG Striatum Matrix MSN layers // These are the main Go / NoGo gating units in BG. @@ -93,7 +93,7 @@ func (gp *GPParams) Defaults() { func (gp *GPParams) Update() { } -//gosl:end pcore_layers +//gosl:end // MatrixGated is called after std PlusPhase, on CPU, has Pool info // downloaded from GPU, to set Gated flag based on SpkMax activity @@ -101,14 +101,14 @@ func (ly *Layer) MatrixGated(ctx *Context) { if ly.Params.Learn.NeuroMod.DAMod != D1Mod { oly := ly.Network.Layers[int(ly.Params.Matrix.OtherMatrixIndex)] // note: NoGo layers don't track gating at the sub-pool level! - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { ly.Pool(0, di).Gated = oly.Pool(0, di).Gated } return } // todo: Context requires data parallel state! - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { mtxGated, poolIndex := ly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) thalGated := false diff --git a/axon/pool.go b/axon/pool.go index c1db8ffa..b0174755 100644 --- a/axon/pool.go +++ b/axon/pool.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line pool.goal:1 // Copyright (c) 2019, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -208,14 +208,14 @@ func (pl *Pool) AvgMaxUpdate(ctx *Context, ni, di uint32) { func (pl *Pool) PoolGi(ctx *Context, di uint32) { nix := NetIxs() li := pl.LayIndex - giMult := LayerValues[nix.ValuesIndex(li, di)].ActAvg.GiMult + giMult := LayValues[nix.ValuesIndex(li, di)].ActAvg.GiMult if pl.IsLayPool > 0 { return } - pl.AvgMax.Calc(li) + pl.AvgMax.Calc(int32(li)) pl.Inhib.IntToRaw() lyIsOn := (Layers[li].Inhib.Layer.On == 1) - Layers[li].SubPoolGiFromSpikes(ctx, di, pl, &Pools[ly.Indexes.PoolIndex(0, di)], lyIsOn, giMult) + Layers[li].SubPoolGiFromSpikes(ctx, di, pl, &Pools[Layers[li].Indexes.PoolIndex(0, di)], lyIsOn, giMult) } //gosl:end diff --git a/axon/pool.goal b/axon/pool.goal index c5daefb9..2dc2c575 100644 --- a/axon/pool.goal +++ b/axon/pool.goal @@ -206,14 +206,14 @@ func (pl *Pool) AvgMaxUpdate(ctx *Context, ni, di uint32) { func (pl *Pool) PoolGi(ctx *Context, di uint32) { nix := NetIxs() li := pl.LayIndex - giMult := LayerValues[nix.ValuesIndex(li, di)].ActAvg.GiMult + giMult := LayValues[nix.ValuesIndex(li, di)].ActAvg.GiMult if pl.IsLayPool > 0 { return } - pl.AvgMax.Calc(li) + pl.AvgMax.Calc(int32(li)) pl.Inhib.IntToRaw() lyIsOn := (Layers[li].Inhib.Layer.On == 1) - Layers[li].SubPoolGiFromSpikes(ctx, di, pl, &Pools[ly.Indexes.PoolIndex(0, di)], lyIsOn, giMult) + Layers[li].SubPoolGiFromSpikes(ctx, di, pl, &Pools[Layers[li].Indexes.PoolIndex(0, di)], lyIsOn, giMult) } //gosl:end diff --git a/axon/pool_test.go b/axon/pool_test.go index beb33c39..0ddb97fe 100644 --- a/axon/pool_test.go +++ b/axon/pool_test.go @@ -114,7 +114,7 @@ func newPoolTestNet(ctx *Context, nData int) *Network { testNet.ConnectLayers(outLay, hidLay, paths.NewOneToOne(), BackPath) testNet.Build(ctx) - ctx.NetIndexes.NData = uint32(nData) + ctx.NData = uint32(nData) testNet.Defaults() testNet.ApplyParams(PoolParamSets["Base"], false) // false) // true) // no msg testNet.InitWeights(ctx) // get GScale here diff --git a/axon/rubicon.go b/axon/rubicon.go index 9a63d2bf..a26461f7 100644 --- a/axon/rubicon.go +++ b/axon/rubicon.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line rubicon.goal:1 // Copyright (c) 2022, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -697,14 +697,15 @@ func (rp *Rubicon) USnegIndex(simUsIndex int) int { // The USs specified here need to be managed by the simulation via the SetUS method. // Positive USs each have corresponding Drives. func (rp *Rubicon) SetNUSs(ctx *Context, nPos, nNeg int) { + nix := NetIxs() nPos = rp.USposIndex(max(nPos, 1)) nNeg = rp.USnegIndex(max(nNeg, 1)) // ensure at least 1 rp.NPosUSs = uint32(nPos) rp.NNegUSs = uint32(nNeg) rp.NCosts = 2 // default - ctx.NetIndexes.RubiconNPosUSs = rp.NPosUSs - ctx.NetIndexes.RubiconNNegUSs = rp.NNegUSs - ctx.NetIndexes.RubiconNCosts = rp.NCosts + nix.RubiconNPosUSs = rp.NPosUSs + nix.RubiconNNegUSs = rp.NNegUSs + nix.RubiconNCosts = rp.NCosts rp.Drive.Alloc(nPos) rp.USs.Alloc(nPos, nNeg, int(rp.NCosts)) } @@ -1201,18 +1202,19 @@ func GlobalSetRew(ctx *Context, di uint32, rew float32, hasRew bool) { // RubiconUSStimVal returns stimulus value for US at given index // and valence (includes Cost). If US > 0.01, a full 1 US activation is returned. func RubiconUSStimValue(ctx *Context, di uint32, usIndex uint32, valence ValenceTypes) float32 { + nix := NetIxs() us := float32(0) switch valence { case Positive: - if usIndex < ctx.NetIndexes.RubiconNPosUSs { + if usIndex < nix.RubiconNPosUSs { us = GlobalVectors.Value(int(GvUSpos), int(usIndex), int(di)) } case Negative: - if usIndex < ctx.NetIndexes.RubiconNNegUSs { + if usIndex < nix.RubiconNNegUSs { us = GlobalVectors.Value(int(GvUSneg), int(usIndex), int(di)) } case Cost: - if usIndex < ctx.NetIndexes.RubiconNCosts { + if usIndex < nix.RubiconNCosts { us = GlobalVectors.Value(int(GvCost), int(usIndex), int(di)) } default: diff --git a/axon/rubicon.goal b/axon/rubicon.goal index 9f3ee56a..182e4e68 100644 --- a/axon/rubicon.goal +++ b/axon/rubicon.goal @@ -693,14 +693,15 @@ func (rp *Rubicon) USnegIndex(simUsIndex int) int { // The USs specified here need to be managed by the simulation via the SetUS method. // Positive USs each have corresponding Drives. func (rp *Rubicon) SetNUSs(ctx *Context, nPos, nNeg int) { + nix := NetIxs() nPos = rp.USposIndex(max(nPos, 1)) nNeg = rp.USnegIndex(max(nNeg, 1)) // ensure at least 1 rp.NPosUSs = uint32(nPos) rp.NNegUSs = uint32(nNeg) rp.NCosts = 2 // default - ctx.NetIndexes.RubiconNPosUSs = rp.NPosUSs - ctx.NetIndexes.RubiconNNegUSs = rp.NNegUSs - ctx.NetIndexes.RubiconNCosts = rp.NCosts + nix.RubiconNPosUSs = rp.NPosUSs + nix.RubiconNNegUSs = rp.NNegUSs + nix.RubiconNCosts = rp.NCosts rp.Drive.Alloc(nPos) rp.USs.Alloc(nPos, nNeg, int(rp.NCosts)) } @@ -1197,18 +1198,19 @@ func GlobalSetRew(ctx *Context, di uint32, rew float32, hasRew bool) { // RubiconUSStimVal returns stimulus value for US at given index // and valence (includes Cost). If US > 0.01, a full 1 US activation is returned. func RubiconUSStimValue(ctx *Context, di uint32, usIndex uint32, valence ValenceTypes) float32 { + nix := NetIxs() us := float32(0) switch valence { case Positive: - if usIndex < ctx.NetIndexes.RubiconNPosUSs { + if usIndex < nix.RubiconNPosUSs { us = GlobalVectors[GvUSpos, usIndex, di] } case Negative: - if usIndex < ctx.NetIndexes.RubiconNNegUSs { + if usIndex < nix.RubiconNNegUSs { us = GlobalVectors[GvUSneg, usIndex, di] } case Cost: - if usIndex < ctx.NetIndexes.RubiconNCosts { + if usIndex < nix.RubiconNCosts { us = GlobalVectors[GvCost, usIndex, di] } default: diff --git a/axon/rubicon_layers.go b/axon/rubicon_layers.go index 50da39ca..b20b7e22 100644 --- a/axon/rubicon_layers.go +++ b/axon/rubicon_layers.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line rubicon_layers.goal:1 // Copyright (c) 2022, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/axon/rubicon_net.go b/axon/rubicon_net.go index dce84680..1148270a 100644 --- a/axon/rubicon_net.go +++ b/axon/rubicon_net.go @@ -11,28 +11,28 @@ import ( ) // AddLDTLayer adds a LDTLayer -func (net *Network) AddLDTLayer(prefix string) *Layer { - ldt := net.AddLayer2D(prefix+"LDT", LDTLayer, 1, 1) +func (nt *Network) AddLDTLayer(prefix string) *Layer { + ldt := nt.AddLayer2D(prefix+"LDT", LDTLayer, 1, 1) return ldt } // AddBLALayers adds two BLA layers, acquisition / extinction / D1 / D2, // for positive or negative valence -func (net *Network) AddBLALayers(prefix string, pos bool, nUs, nNeurY, nNeurX int, rel relpos.Relations, space float32) (acq, ext *Layer) { +func (nt *Network) AddBLALayers(prefix string, pos bool, nUs, nNeurY, nNeurX int, rel relpos.Relations, space float32) (acq, ext *Layer) { if pos { - d1 := net.AddLayer4D(prefix+"BLAposAcqD1", BLALayer, 1, nUs, nNeurY, nNeurX) + d1 := nt.AddLayer4D(prefix+"BLAposAcqD1", BLALayer, 1, nUs, nNeurY, nNeurX) d1.SetBuildConfig("DAMod", "D1Mod") d1.SetBuildConfig("Valence", "Positive") - d2 := net.AddLayer4D(prefix+"BLAposExtD2", BLALayer, 1, nUs, nNeurY, nNeurX) + d2 := nt.AddLayer4D(prefix+"BLAposExtD2", BLALayer, 1, nUs, nNeurY, nNeurX) d2.SetBuildConfig("DAMod", "D2Mod") d2.SetBuildConfig("Valence", "Positive") acq = d1 ext = d2 } else { - d1 := net.AddLayer4D(prefix+"BLAnegExtD1", BLALayer, 1, nUs, nNeurY, nNeurX) + d1 := nt.AddLayer4D(prefix+"BLAnegExtD1", BLALayer, 1, nUs, nNeurY, nNeurX) d1.SetBuildConfig("DAMod", "D1Mod") d1.SetBuildConfig("Valence", "Negative") - d2 := net.AddLayer4D(prefix+"BLAnegAcqD2", BLALayer, 1, nUs, nNeurY, nNeurX) + d2 := nt.AddLayer4D(prefix+"BLAnegAcqD2", BLALayer, 1, nUs, nNeurY, nNeurX) d2.SetBuildConfig("DAMod", "D2Mod") d2.SetBuildConfig("Valence", "Negative") d2.DefaultParams = params.Params{ @@ -42,7 +42,7 @@ func (net *Network) AddBLALayers(prefix string, pos bool, nUs, nNeurY, nNeurX in ext = d1 } - pj := net.ConnectLayers(ext, acq, paths.NewPoolOneToOne(), InhibPath) + pj := nt.ConnectLayers(ext, acq, paths.NewPoolOneToOne(), InhibPath) pj.DefaultParams = params.Params{ "Path.PathScale.Abs": "0.5", // key param for efficacy of inhibition -- may need to tweak } @@ -55,13 +55,13 @@ func (net *Network) AddBLALayers(prefix string, pos bool, nUs, nNeurY, nNeurX in "Path.SWts.Init.Var": "0.0", } - pj = net.ConnectLayers(acq, ext, paths.NewOneToOne(), CTCtxtPath) + pj = nt.ConnectLayers(acq, ext, paths.NewOneToOne(), CTCtxtPath) pj.AddClass("BLAAcqToExt") pj.DefaultParams = params.Params{ "Path.PathScale.Abs": "2", } - pj = net.ConnectLayers(acq, acq, NewBLANovelPath(), InhibPath) + pj = nt.ConnectLayers(acq, acq, NewBLANovelPath(), InhibPath) pj.AddClass("BLANovelInhib") pj.DefaultParams = params.Params{ "Path.Learn.Learn": "false", @@ -71,7 +71,7 @@ func (net *Network) AddBLALayers(prefix string, pos bool, nUs, nNeurY, nNeurX in "Path.SWts.Init.Var": "0.0", } - pj = net.ConnectLayers(ext, acq, NewBLANovelPath(), InhibPath) + pj = nt.ConnectLayers(ext, acq, NewBLANovelPath(), InhibPath) pj.AddClass("BLANovelInhib") pj.DefaultParams = params.Params{ "Path.Learn.Learn": "false", @@ -96,27 +96,27 @@ func (net *Network) AddBLALayers(prefix string, pos bool, nUs, nNeurY, nNeurX in // arg -- neg* layers are nil if not included. // Uses the network Rubicon.NPosUSs and NNegUSs for number of pools -- // must be configured prior to calling this. -func (net *Network) AddAmygdala(prefix string, neg bool, nNeurY, nNeurX int, space float32) (blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, cemPos, cemNeg, blaNov *Layer) { - nUSpos := int(net.Rubicon.NPosUSs) - nUSneg := int(net.Rubicon.NNegUSs) +func (nt *Network) AddAmygdala(prefix string, neg bool, nNeurY, nNeurX int, space float32) (blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, cemPos, cemNeg, blaNov *Layer) { + nUSpos := int(nt.Rubicon.NPosUSs) + nUSneg := int(nt.Rubicon.NNegUSs) - blaPosAcq, blaPosExt = net.AddBLALayers(prefix, true, nUSpos, nNeurY, nNeurX, relpos.Behind, space) + blaPosAcq, blaPosExt = nt.AddBLALayers(prefix, true, nUSpos, nNeurY, nNeurX, relpos.Behind, space) if neg { - blaNegAcq, blaNegExt = net.AddBLALayers(prefix, false, nUSneg, nNeurY, nNeurX, relpos.Behind, space) + blaNegAcq, blaNegExt = nt.AddBLALayers(prefix, false, nUSneg, nNeurY, nNeurX, relpos.Behind, space) blaPosAcq.SetBuildConfig("LayInhib1Name", blaNegAcq.Name) blaNegAcq.SetBuildConfig("LayInhib1Name", blaPosAcq.Name) } - cemPos = net.AddLayer4D(prefix+"CeMPos", CeMLayer, 1, nUSpos, 1, nNeurX) + cemPos = nt.AddLayer4D(prefix+"CeMPos", CeMLayer, 1, nUSpos, 1, nNeurX) cemPos.SetBuildConfig("DAMod", "D1Mod") // not relevant but avoids warning cemPos.SetBuildConfig("Valence", "Positive") if neg { - cemNeg = net.AddLayer4D(prefix+"CeMNeg", CeMLayer, 1, nUSneg, 1, nNeurX) + cemNeg = nt.AddLayer4D(prefix+"CeMNeg", CeMLayer, 1, nUSneg, 1, nNeurX) cemNeg.SetBuildConfig("DAMod", "D2Mod") // not relevant but avoids warning cemNeg.SetBuildConfig("Valence", "Negative") } - blaNov = net.AddLayer4D(prefix+"BLANovelCS", BLALayer, 1, 1, 4, 4) + blaNov = nt.AddLayer4D(prefix+"BLANovelCS", BLALayer, 1, 1, 4, 4) blaNov.SetBuildConfig("DAMod", "D1Mod") blaNov.SetBuildConfig("Valence", "Positive") blaNov.DefaultParams = params.Params{ @@ -129,16 +129,16 @@ func (net *Network) AddAmygdala(prefix string, neg bool, nNeurY, nNeurX int, spa p1to1 := paths.NewPoolOneToOne() - net.ConnectLayers(blaPosAcq, cemPos, p1to1, ForwardPath).AddClass("BLAToCeM_Excite") - net.ConnectLayers(blaPosExt, cemPos, p1to1, InhibPath).AddClass("BLAToCeM_Inhib") + nt.ConnectLayers(blaPosAcq, cemPos, p1to1, ForwardPath).AddClass("BLAToCeM_Excite") + nt.ConnectLayers(blaPosExt, cemPos, p1to1, InhibPath).AddClass("BLAToCeM_Inhib") // default Abs = 1 works for both of these if neg { - net.ConnectLayers(blaNegAcq, cemNeg, p1to1, ForwardPath).AddClass("BLAToCeM_Excite") - net.ConnectLayers(blaNegExt, cemNeg, p1to1, InhibPath).AddClass("BLAToCeM_Inhib") + nt.ConnectLayers(blaNegAcq, cemNeg, p1to1, ForwardPath).AddClass("BLAToCeM_Excite") + nt.ConnectLayers(blaNegExt, cemNeg, p1to1, InhibPath).AddClass("BLAToCeM_Inhib") } - pj := net.ConnectLayers(blaNov, blaPosAcq, p1to1, ForwardPath) + pj := nt.ConnectLayers(blaNov, blaPosAcq, p1to1, ForwardPath) pj.DefaultParams = params.Params{ // dilutes everyone else, so make it weaker Rel, compensate with Abs "Path.Learn.Learn": "false", "Path.SWts.Adapt.On": "false", @@ -166,8 +166,8 @@ func (net *Network) AddAmygdala(prefix string, neg bool, nNeurY, nNeurX int, spa // ConnectToBLAAcq adds a BLAPath from given sending layer to a BLA layer, // and configures it for acquisition parameters. Sets class to BLAAcqPath. // This is for any CS or contextual inputs that drive acquisition. -func (net *Network) ConnectToBLAAcq(send, recv *Layer, pat paths.Pattern) *Path { - pj := net.ConnectLayers(send, recv, pat, BLAPath) +func (nt *Network) ConnectToBLAAcq(send, recv *Layer, pat paths.Pattern) *Path { + pj := nt.ConnectLayers(send, recv, pat, BLAPath) pj.DefaultParams = params.Params{ "Path.Learn.LRate.Base": "0.02", "Path.Learn.Trace.Tau": "1", // increase for second order conditioning @@ -181,8 +181,8 @@ func (net *Network) ConnectToBLAAcq(send, recv *Layer, pat paths.Pattern) *Path // and configures it for extinctrion parameters. Sets class to BLAExtPath. // This is for any CS or contextual inputs that drive extinction neurons to fire // and override the acquisition ones. -func (net *Network) ConnectToBLAExt(send, recv *Layer, pat paths.Pattern) *Path { - pj := net.ConnectLayers(send, recv, pat, BLAPath) +func (nt *Network) ConnectToBLAExt(send, recv *Layer, pat paths.Pattern) *Path { + pj := nt.ConnectLayers(send, recv, pat, BLAPath) pj.DefaultParams = params.Params{ "Path.PathScale.Abs": "4", "Path.Learn.LRate.Base": "0.05", // 0.02 for pvlv CS 50% balance @@ -196,8 +196,8 @@ func (net *Network) ConnectToBLAExt(send, recv *Layer, pat paths.Pattern) *Path // ConnectCSToBLApos connects the CS input to BLAposAcqD1, BLANovelCS layers // using fixed, higher-variance weights, full pathway. // Sets classes to: CSToBLApos, CSToBLANovel with default params -func (net *Network) ConnectCSToBLApos(cs, blaAcq, blaNov *Layer) (toAcq, toNov, novInhib *Path) { - toAcq = net.ConnectLayers(cs, blaAcq, paths.NewFull(), BLAPath) +func (nt *Network) ConnectCSToBLApos(cs, blaAcq, blaNov *Layer) (toAcq, toNov, novInhib *Path) { + toAcq = nt.ConnectLayers(cs, blaAcq, paths.NewFull(), BLAPath) toAcq.DefaultParams = params.Params{ // stronger.. "Path.PathScale.Abs": "1.5", "Path.Learn.LRate.Base": "0.1", // faster learning @@ -206,7 +206,7 @@ func (net *Network) ConnectCSToBLApos(cs, blaAcq, blaNov *Layer) (toAcq, toNov, } toAcq.AddClass("CSToBLApos") - toNov = net.ConnectLayers(cs, blaNov, paths.NewFull(), BLAPath) + toNov = nt.ConnectLayers(cs, blaNov, paths.NewFull(), BLAPath) toNov.DefaultParams = params.Params{ // dilutes everyone else, so make it weaker Rel, compensate with Abs "Path.SWts.Init.SPct": "0", "Path.SWts.Init.Mean": "0.75", @@ -216,7 +216,7 @@ func (net *Network) ConnectCSToBLApos(cs, blaAcq, blaNov *Layer) (toAcq, toNov, } toNov.AddClass("CSToBLANovel") - novInhib = net.ConnectLayers(cs, blaNov, paths.NewFull(), InhibPath) + novInhib = nt.ConnectLayers(cs, blaNov, paths.NewFull(), InhibPath) novInhib.DefaultParams = params.Params{ "Path.SWts.Init.SPct": "0", "Path.SWts.Init.Mean": "0.1", @@ -234,8 +234,8 @@ func (net *Network) ConnectCSToBLApos(cs, blaAcq, blaNov *Layer) (toAcq, toNov, // BLApos(Neg)ExtD2(D1) layers, // using fixed, higher-variance weights, full pathway. // Sets classes to: USToBLAAcq and USToBLAExt -func (net *Network) ConnectUSToBLA(us, blaAcq, blaExt *Layer) (toAcq, toExt *Path) { - toAcq = net.ConnectLayers(us, blaAcq, paths.NewPoolOneToOne(), BLAPath) +func (nt *Network) ConnectUSToBLA(us, blaAcq, blaExt *Layer) (toAcq, toExt *Path) { + toAcq = nt.ConnectLayers(us, blaAcq, paths.NewPoolOneToOne(), BLAPath) toAcq.DefaultParams = params.Params{ "Path.PathScale.Rel": "0.5", "Path.PathScale.Abs": "6", @@ -248,7 +248,7 @@ func (net *Network) ConnectUSToBLA(us, blaAcq, blaExt *Layer) (toAcq, toExt *Pat } toAcq.AddClass("USToBLAAcq") - toExt = net.ConnectLayers(us, blaExt, paths.NewPoolOneToOne(), InhibPath) + toExt = nt.ConnectLayers(us, blaExt, paths.NewPoolOneToOne(), InhibPath) toExt.DefaultParams = params.Params{ // actual US inhibits exinction -- must be strong enough to block ACh enh Ge "Path.PathScale.Abs": "0.5", // note: key param "Path.SWts.Init.SPct": "0", @@ -267,20 +267,20 @@ func (net *Network) ConnectUSToBLA(us, blaAcq, blaExt *Layer) (toAcq, toExt *Pat // Actual US inputs are set in Rubicon. // Uses the network Rubicon.NPosUSs, NNegUSs, and NCosts for number of pools -- // must be configured prior to calling this. -func (net *Network) AddUSLayers(popY, popX int, rel relpos.Relations, space float32) (usPos, usNeg, cost, costFinal *Layer) { - nUSpos := int(net.Rubicon.NPosUSs) - nUSneg := int(net.Rubicon.NNegUSs) - nCost := int(net.Rubicon.NCosts) - usPos = net.AddLayer4D("USpos", USLayer, 1, nUSpos, popY, popX) +func (nt *Network) AddUSLayers(popY, popX int, rel relpos.Relations, space float32) (usPos, usNeg, cost, costFinal *Layer) { + nUSpos := int(nt.Rubicon.NPosUSs) + nUSneg := int(nt.Rubicon.NNegUSs) + nCost := int(nt.Rubicon.NCosts) + usPos = nt.AddLayer4D("USpos", USLayer, 1, nUSpos, popY, popX) usPos.SetBuildConfig("DAMod", "D1Mod") // not relevant but avoids warning usPos.SetBuildConfig("Valence", "Positive") - usNeg = net.AddLayer4D("USneg", USLayer, 1, nUSneg, popY, popX) + usNeg = nt.AddLayer4D("USneg", USLayer, 1, nUSneg, popY, popX) usNeg.SetBuildConfig("DAMod", "D2Mod") // not relevant but avoids warning usNeg.SetBuildConfig("Valence", "Negative") - cost = net.AddLayer4D("Cost", USLayer, 1, nCost, popY, popX) + cost = nt.AddLayer4D("Cost", USLayer, 1, nCost, popY, popX) cost.SetBuildConfig("DAMod", "D1Mod") // d1mod = incremental current cost.SetBuildConfig("Valence", "Cost") - costFinal = net.AddLayer4D("CostFin", USLayer, 1, nCost, popY, popX) + costFinal = nt.AddLayer4D("CostFin", USLayer, 1, nCost, popY, popX) costFinal.SetBuildConfig("DAMod", "D2Mod") // d2mod = final costFinal.SetBuildConfig("Valence", "Cost") @@ -299,13 +299,13 @@ func (net *Network) AddUSLayers(popY, popX int, rel relpos.Relations, space floa // These track the Global USpos, USneg, Cost, for visualization and predictive learning. // Actual US inputs are set in Rubicon. // Adds Pulvinar predictive layers for each. -func (net *Network) AddUSPulvLayers(popY, popX int, rel relpos.Relations, space float32) (usPos, usNeg, cost, costFinal, usPosP, usNegP, costP *Layer) { - usPos, usNeg, cost, costFinal = net.AddUSLayers(popY, popX, rel, space) - usPosP = net.AddPulvForLayer(usPos, space) +func (nt *Network) AddUSPulvLayers(popY, popX int, rel relpos.Relations, space float32) (usPos, usNeg, cost, costFinal, usPosP, usNegP, costP *Layer) { + usPos, usNeg, cost, costFinal = nt.AddUSLayers(popY, popX, rel, space) + usPosP = nt.AddPulvForLayer(usPos, space) usPosP.SetBuildConfig("Valence", "Positive") - usNegP = net.AddPulvForLayer(usNeg, space) + usNegP = nt.AddPulvForLayer(usNeg, space) usNegP.SetBuildConfig("Valence", "Negative") - costP = net.AddPulvForLayer(cost, space) + costP = nt.AddPulvForLayer(cost, space) costP.SetBuildConfig("Valence", "Cost") if rel == relpos.Behind { costFinal.PlaceBehind(costP, space) @@ -335,11 +335,11 @@ func (net *Network) AddUSPulvLayers(popY, popX int, rel relpos.Relations, space // USpos outcome, or total USneg outcome. // Uses a PopCode representation based on LayerParams.Act.PopCode, distributed over // given numbers of neurons in the X and Y dimensions. -func (net *Network) AddPVLayers(nNeurY, nNeurX int, rel relpos.Relations, space float32) (pvPos, pvNeg *Layer) { - pvPos = net.AddLayer2D("PVpos", PVLayer, nNeurY, nNeurX) +func (nt *Network) AddPVLayers(nNeurY, nNeurX int, rel relpos.Relations, space float32) (pvPos, pvNeg *Layer) { + pvPos = nt.AddLayer2D("PVpos", PVLayer, nNeurY, nNeurX) pvPos.SetBuildConfig("DAMod", "D1Mod") // not relevant but avoids warning pvPos.SetBuildConfig("Valence", "Positive") - pvNeg = net.AddLayer2D("PVneg", PVLayer, nNeurY, nNeurX) + pvNeg = nt.AddLayer2D("PVneg", PVLayer, nNeurY, nNeurX) pvNeg.SetBuildConfig("DAMod", "D2Mod") // not relevant but avoids warning pvNeg.SetBuildConfig("Valence", "Negative") if rel == relpos.Behind { @@ -356,10 +356,10 @@ func (net *Network) AddPVLayers(nNeurY, nNeurX int, rel relpos.Relations, space // Uses a PopCode representation based on LayerParams.Act.PopCode, distributed over // given numbers of neurons in the X and Y dimensions. // Adds Pulvinar predictive layers for each. -func (net *Network) AddPVPulvLayers(nNeurY, nNeurX int, rel relpos.Relations, space float32) (pvPos, pvNeg, pvPosP, pvNegP *Layer) { - pvPos, pvNeg = net.AddPVLayers(nNeurX, nNeurY, rel, space) - pvPosP = net.AddPulvForLayer(pvPos, space) - pvNegP = net.AddPulvForLayer(pvNeg, space) +func (nt *Network) AddPVPulvLayers(nNeurY, nNeurX int, rel relpos.Relations, space float32) (pvPos, pvNeg, pvPosP, pvNegP *Layer) { + pvPos, pvNeg = nt.AddPVLayers(nNeurX, nNeurY, rel, space) + pvPosP = nt.AddPulvForLayer(pvPos, space) + pvNegP = nt.AddPulvForLayer(pvNeg, space) if rel == relpos.Behind { pvNeg.PlaceBehind(pvPosP, space) } @@ -375,11 +375,11 @@ func (net *Network) AddPVPulvLayers(nNeurY, nNeurX int, rel relpos.Relations, sp } // AddVSPatchLayers adds VSPatch (Pos, D1, D2) -func (net *Network) AddVSPatchLayers(prefix string, nUs, nNeurY, nNeurX int, space float32) (d1, d2 *Layer) { - d1 = net.AddLayer4D(prefix+"VsPatchD1", VSPatchLayer, 1, nUs, nNeurY, nNeurX) +func (nt *Network) AddVSPatchLayers(prefix string, nUs, nNeurY, nNeurX int, space float32) (d1, d2 *Layer) { + d1 = nt.AddLayer4D(prefix+"VsPatchD1", VSPatchLayer, 1, nUs, nNeurY, nNeurX) d1.SetBuildConfig("DAMod", "D1Mod") d1.SetBuildConfig("Valence", "Positive") - d2 = net.AddLayer4D(prefix+"VsPatchD2", VSPatchLayer, 1, nUs, nNeurY, nNeurX) + d2 = nt.AddLayer4D(prefix+"VsPatchD2", VSPatchLayer, 1, nUs, nNeurY, nNeurX) d2.SetBuildConfig("DAMod", "D2Mod") d2.SetBuildConfig("Valence", "Positive") d2.PlaceBehind(d1, space) @@ -387,18 +387,18 @@ func (net *Network) AddVSPatchLayers(prefix string, nUs, nNeurY, nNeurX int, spa } // ConnectToVSPatch adds a VSPatchPath from given sending layer to VSPatchD1, D2 layers -func (net *Network) ConnectToVSPatch(send, vspD1, vspD2 *Layer, pat paths.Pattern) (*Path, *Path) { - d1 := net.ConnectLayers(send, vspD1, pat, VSPatchPath) - d2 := net.ConnectLayers(send, vspD2, pat, VSPatchPath) +func (nt *Network) ConnectToVSPatch(send, vspD1, vspD2 *Layer, pat paths.Pattern) (*Path, *Path) { + d1 := nt.ConnectLayers(send, vspD1, pat, VSPatchPath) + d2 := nt.ConnectLayers(send, vspD2, pat, VSPatchPath) return d1, d2 } // AddVTALHbLDTLayers adds VTA dopamine, LHb DA dipping, and LDT ACh layers // which are driven by corresponding values in Global -func (net *Network) AddVTALHbLDTLayers(rel relpos.Relations, space float32) (vta, lhb, ldt *Layer) { - vta = net.AddLayer2D("VTA", VTALayer, 1, 1) - lhb = net.AddLayer2D("LHb", LHbLayer, 1, 2) - ldt = net.AddLDTLayer("") +func (nt *Network) AddVTALHbLDTLayers(rel relpos.Relations, space float32) (vta, lhb, ldt *Layer) { + vta = nt.AddLayer2D("VTA", VTALayer, 1, 1) + lhb = nt.AddLayer2D("LHb", LHbLayer, 1, 2) + ldt = nt.AddLDTLayer("") if rel == relpos.Behind { lhb.PlaceBehind(vta, space) ldt.PlaceBehind(lhb, space) @@ -414,8 +414,8 @@ func (net *Network) AddVTALHbLDTLayers(rel relpos.Relations, space float32) (vta // (Inhib.FFPrv) -- connect with fixed random input from sensory // input layers. Sets base name and class name to SC. // Must set Inhib.FFPrv > 0 and Act.Decay.* = 0 -func (net *Network) AddSCLayer2D(prefix string, nNeurY, nNeurX int) *Layer { - sc := net.AddLayer2D(prefix+"SC", SuperLayer, nNeurY, nNeurX) +func (nt *Network) AddSCLayer2D(prefix string, nNeurY, nNeurX int) *Layer { + sc := nt.AddLayer2D(prefix+"SC", SuperLayer, nNeurY, nNeurX) sc.DefaultParams = params.Params{ "Layer.Inhib.ActAvg.Nominal": "0.1", "Layer.Inhib.Layer.On": "true", @@ -437,8 +437,8 @@ func (net *Network) AddSCLayer2D(prefix string, nNeurY, nNeurX int) *Layer { // (Inhib.FFPrv) -- connect with fixed random input from sensory // input layers. Sets base name and class name to SC. // Must set Inhib.FFPrv > 0 and Act.Decay.* = 0 -func (net *Network) AddSCLayer4D(prefix string, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer { - sc := net.AddLayer4D(prefix+"SC", SuperLayer, nPoolsY, nPoolsX, nNeurY, nNeurX) +func (nt *Network) AddSCLayer4D(prefix string, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer { + sc := nt.AddLayer4D(prefix+"SC", SuperLayer, nPoolsY, nPoolsX, nNeurY, nNeurX) sc.DefaultParams = params.Params{ "Layer.Inhib.ActAvg.Nominal": "0.1", "Layer.Inhib.Layer.On": "true", @@ -459,8 +459,8 @@ func (net *Network) AddSCLayer4D(prefix string, nPoolsY, nPoolsX, nNeurY, nNeurX // ConnectToSC adds a ForwardPath from given sending layer to // a SC layer, setting class as ToSC -- should set params // as fixed random with more variance than usual. -func (net *Network) ConnectToSC(send, recv *Layer, pat paths.Pattern) *Path { - pj := net.ConnectLayers(send, recv, pat, ForwardPath) +func (nt *Network) ConnectToSC(send, recv *Layer, pat paths.Pattern) *Path { + pj := nt.ConnectLayers(send, recv, pat, ForwardPath) pj.AddClass("ToSC") return pj } @@ -468,9 +468,9 @@ func (net *Network) ConnectToSC(send, recv *Layer, pat paths.Pattern) *Path { // ConnectToSC1to1 adds a 1to1 ForwardPath from given sending layer to // a SC layer, copying the geometry of the sending layer, // setting class as ToSC. The conection weights are set to uniform. -func (net *Network) ConnectToSC1to1(send, recv *Layer) *Path { +func (nt *Network) ConnectToSC1to1(send, recv *Layer) *Path { recv.Shape.CopyFrom(&send.Shape) - pj := net.ConnectLayers(send, recv, paths.NewOneToOne(), ForwardPath) + pj := nt.ConnectLayers(send, recv, paths.NewOneToOne(), ForwardPath) pj.DefaultParams = params.Params{ "Path.Learn.Learn": "false", "Path.SWts.Init.SPct": "0", @@ -486,8 +486,9 @@ func (net *Network) ConnectToSC1to1(send, recv *Layer) *Path { // from Global Drive.Drives. // Uses a PopCode representation based on LayerParams.Act.PopCode, distributed over // given numbers of neurons in the X and Y dimensions, per drive pool. -func (net *Network) AddDrivesLayer(ctx *Context, nNeurY, nNeurX int) *Layer { - drv := net.AddLayer4D("Drives", DrivesLayer, 1, int(ctx.NetIndexes.RubiconNPosUSs), nNeurY, nNeurX) +func (nt *Network) AddDrivesLayer(ctx *Context, nNeurY, nNeurX int) *Layer { + nix := nt.NetIxs() + drv := nt.AddLayer4D("Drives", DrivesLayer, 1, int(nix.RubiconNPosUSs), nNeurY, nNeurX) return drv } @@ -496,9 +497,9 @@ func (net *Network) AddDrivesLayer(ctx *Context, nNeurY, nNeurX int) *Layer { // Uses a PopCode representation based on LayerParams.Act.PopCode, distributed over // given numbers of neurons in the X and Y dimensions, per drive pool. // Adds Pulvinar predictive layers for Drives. -func (net *Network) AddDrivesPulvLayer(ctx *Context, nNeurY, nNeurX int, space float32) (drv, drvP *Layer) { - drv = net.AddDrivesLayer(ctx, nNeurY, nNeurX) - drvP = net.AddPulvForLayer(drv, space) +func (nt *Network) AddDrivesPulvLayer(ctx *Context, nNeurY, nNeurX int, space float32) (drv, drvP *Layer) { + drv = nt.AddDrivesLayer(ctx, nNeurY, nNeurX) + drvP = nt.AddPulvForLayer(drv, space) drvP.DefaultParams = params.Params{ "Layer.Inhib.ActAvg.Nominal": "0.01", "Layer.Inhib.Layer.On": "false", @@ -513,8 +514,8 @@ func (net *Network) AddDrivesPulvLayer(ctx *Context, nNeurY, nNeurX int, space f // from Global Urgency.Urge // Uses a PopCode representation based on LayerParams.Act.PopCode, distributed over // given numbers of neurons in the X and Y dimensions. -func (net *Network) AddUrgencyLayer(nNeurY, nNeurX int) *Layer { - urge := net.AddLayer2D("Urgency", UrgencyLayer, nNeurY, nNeurX) +func (nt *Network) AddUrgencyLayer(nNeurY, nNeurX int) *Layer { + urge := nt.AddLayer2D("Urgency", UrgencyLayer, nNeurY, nNeurX) return urge } @@ -531,13 +532,13 @@ func (net *Network) AddUrgencyLayer(nNeurY, nNeurX int) *Layer { // valences -- this is what the dopamine value ends up conding (pos - neg). // Layers are organized in depth per type: USs in one column, PVs in the next, // with Drives in the back; urgency behind that. -func (net *Network) AddRubiconPulvLayers(ctx *Context, nYneur, popY, popX int, space float32) (drives, drivesP, urgency, usPos, usNeg, cost, costFinal, usPosP, usNegP, costP, pvPos, pvNeg, pvPosP, pvNegP *Layer) { +func (nt *Network) AddRubiconPulvLayers(ctx *Context, nYneur, popY, popX int, space float32) (drives, drivesP, urgency, usPos, usNeg, cost, costFinal, usPosP, usNegP, costP, pvPos, pvNeg, pvPosP, pvNegP *Layer) { rel := relpos.Behind - usPos, usNeg, cost, costFinal, usPosP, usNegP, costP = net.AddUSPulvLayers(popY, popX, rel, space) - pvPos, pvNeg, pvPosP, pvNegP = net.AddPVPulvLayers(popY, popX, rel, space) - drives, drivesP = net.AddDrivesPulvLayer(ctx, popY, popX, space) - urgency = net.AddUrgencyLayer(popY, popX) + usPos, usNeg, cost, costFinal, usPosP, usNegP, costP = nt.AddUSPulvLayers(popY, popX, rel, space) + pvPos, pvNeg, pvPosP, pvNegP = nt.AddPVPulvLayers(popY, popX, rel, space) + drives, drivesP = nt.AddDrivesPulvLayer(ctx, popY, popX, space) + urgency = nt.AddUrgencyLayer(popY, popX) pvPos.PlaceRightOf(usPos, space) drives.PlaceBehind(usNegP, space) @@ -548,8 +549,8 @@ func (net *Network) AddRubiconPulvLayers(ctx *Context, nYneur, popY, popX int, s // AddOFCpos adds orbital frontal cortex positive US-coding layers, // for given number of pos US pools (first is novelty / curiosity pool), // with given number of units per pool. -func (net *Network) AddOFCpos(ctx *Context, nUSs, nY, ofcY, ofcX int, space float32) (ofc, ofcCT, ofcPT, ofcPTp, ofcMD *Layer) { - ofc, ofcCT, ofcPT, ofcPTp, ofcMD = net.AddPFC4D("OFCpos", "MD", 1, nUSs, ofcY, ofcX, true, true, space) +func (nt *Network) AddOFCpos(ctx *Context, nUSs, nY, ofcY, ofcX int, space float32) (ofc, ofcCT, ofcPT, ofcPTp, ofcMD *Layer) { + ofc, ofcCT, ofcPT, ofcPTp, ofcMD = nt.AddPFC4D("OFCpos", "MD", 1, nUSs, ofcY, ofcX, true, true, space) ofc.DefaultParams["Layer.Inhib.Pool.Gi"] = "1" ofcPT.DefaultParams["Layer.Inhib.ActAvg.Nominal"] = "0.02" ofcPT.DefaultParams["Layer.Inhib.Pool.On"] = "true" @@ -563,8 +564,8 @@ func (net *Network) AddOFCpos(ctx *Context, nUSs, nY, ofcY, ofcX int, space floa // AddOFCneg adds orbital frontal cortex negative US-coding layers, // for given number of neg US pools with given number of units per pool. -func (net *Network) AddOFCneg(ctx *Context, nUSs, ofcY, ofcX int, space float32) (ofc, ofcCT, ofcPT, ofcPTp, ofcMD *Layer) { - ofc, ofcCT, ofcPT, ofcPTp, ofcMD = net.AddPFC4D("OFCneg", "MD", 1, nUSs, ofcY, ofcX, true, true, space) +func (nt *Network) AddOFCneg(ctx *Context, nUSs, ofcY, ofcX int, space float32) (ofc, ofcCT, ofcPT, ofcPTp, ofcMD *Layer) { + ofc, ofcCT, ofcPT, ofcPTp, ofcMD = nt.AddPFC4D("OFCneg", "MD", 1, nUSs, ofcY, ofcX, true, true, space) ofc.DefaultParams["Layer.Inhib.Pool.Gi"] = "1" ofc.DefaultParams["Layer.Inhib.ActAvg.Nominal"] = "0.1" @@ -581,8 +582,8 @@ func (net *Network) AddOFCneg(ctx *Context, nUSs, ofcY, ofcX int, space float32) // AddACCost adds anterior cingulate cost coding layers, // for given number of cost pools (typically 2: time, effort), // with given number of units per pool. -func (net *Network) AddACCost(ctx *Context, nCosts, accY, accX int, space float32) (acc, accCT, accPT, accPTp, accMD *Layer) { - acc, accCT, accPT, accPTp, accMD = net.AddPFC4D("ACCcost", "MD", 1, nCosts, accY, accX, true, true, space) +func (nt *Network) AddACCost(ctx *Context, nCosts, accY, accX int, space float32) (acc, accCT, accPT, accPTp, accMD *Layer) { + acc, accCT, accPT, accPTp, accMD = nt.AddPFC4D("ACCcost", "MD", 1, nCosts, accY, accX, true, true, space) acc.DefaultParams["Layer.Inhib.Layer.On"] = "false" // no match acc.DefaultParams["Layer.Inhib.Pool.Gi"] = "1" @@ -619,47 +620,47 @@ func (net *Network) AddACCost(ctx *Context, nCosts, accY, accX int, space float3 // Makes all appropriate interconnections and sets default parameters. // Needs CS -> BLA, OFC connections to be made. // Returns layers most likely to be used for remaining connections and positions. -func (net *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, ofcY, ofcX int, space float32) (vSgpi, vSmtxGo, vSmtxNo, vSpatchD1, vSpatchD2, urgency, usPos, pvPos, usNeg, usNegP, pvNeg, pvNegP, blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, blaNov, ofcPos, ofcPosCT, ofcPosPT, ofcPosPTp, ilPos, ilPosCT, ilPosPT, ilPosPTp, ilPosMD, ofcNeg, ofcNegCT, ofcNegPT, ofcNegPTp, accCost, accCostCT, accCostPT, accCostPTp, accCostMD, ilNeg, ilNegCT, ilNegPT, ilNegPTp, ilNegMD, sc *Layer) { - nUSpos := int(net.Rubicon.NPosUSs) - nUSneg := int(net.Rubicon.NNegUSs) - nCosts := int(net.Rubicon.NCosts) +func (nt *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, ofcY, ofcX int, space float32) (vSgpi, vSmtxGo, vSmtxNo, vSpatchD1, vSpatchD2, urgency, usPos, pvPos, usNeg, usNegP, pvNeg, pvNegP, blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, blaNov, ofcPos, ofcPosCT, ofcPosPT, ofcPosPTp, ilPos, ilPosCT, ilPosPT, ilPosPTp, ilPosMD, ofcNeg, ofcNegCT, ofcNegPT, ofcNegPTp, accCost, accCostCT, accCostPT, accCostPTp, accCostMD, ilNeg, ilNegCT, ilNegPT, ilNegPTp, ilNegMD, sc *Layer) { + nUSpos := int(nt.Rubicon.NPosUSs) + nUSneg := int(nt.Rubicon.NNegUSs) + nCosts := int(nt.Rubicon.NCosts) - vta, lhb, ldt := net.AddVTALHbLDTLayers(relpos.Behind, space) + vta, lhb, ldt := nt.AddVTALHbLDTLayers(relpos.Behind, space) _ = lhb _ = ldt - drives, drivesP, urgency, usPos, usNeg, cost, costFinal, usPosP, usNegP, costP, pvPos, pvNeg, pvPosP, pvNegP := net.AddRubiconPulvLayers(ctx, nYneur, popY, popX, space) + drives, drivesP, urgency, usPos, usNeg, cost, costFinal, usPosP, usNegP, costP, pvPos, pvNeg, pvPosP, pvNegP := nt.AddRubiconPulvLayers(ctx, nYneur, popY, popX, space) _ = urgency - vSmtxGo, vSmtxNo, vSgpePr, vSgpeAk, vSstn, vSgpi := net.AddVBG("", 1, nUSpos, bgY, bgX, bgY, bgX, space) + vSmtxGo, vSmtxNo, vSgpePr, vSgpeAk, vSstn, vSgpi := nt.AddVBG("", 1, nUSpos, bgY, bgX, bgY, bgX, space) _, _ = vSgpeAk, vSgpePr - vSgated := net.AddVSGatedLayer("", nYneur) - vSpatchD1, vSpatchD2 = net.AddVSPatchLayers("", nUSpos, bgY, bgX, space) + vSgated := nt.AddVSGatedLayer("", nYneur) + vSpatchD1, vSpatchD2 = nt.AddVSPatchLayers("", nUSpos, bgY, bgX, space) vSpatchD1.PlaceRightOf(vSstn, space) - sc = net.AddSCLayer2D("", ofcY, ofcX) + sc = nt.AddSCLayer2D("", ofcY, ofcX) vSgated.PlaceRightOf(sc, space) ldt.SetBuildConfig("SrcLay1Name", sc.Name) - blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, cemPos, cemNeg, blaNov := net.AddAmygdala("", true, ofcY, ofcX, space) + blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, cemPos, cemNeg, blaNov := nt.AddAmygdala("", true, ofcY, ofcX, space) _, _, _, _, _ = blaNegAcq, blaNegExt, cemPos, cemNeg, blaNov - ofcPos, ofcPosCT, ofcPosPT, ofcPosPTp, ofcPosMD := net.AddOFCpos(ctx, nUSpos, nYneur, ofcY, ofcX, space) + ofcPos, ofcPosCT, ofcPosPT, ofcPosPTp, ofcPosMD := nt.AddOFCpos(ctx, nUSpos, nYneur, ofcY, ofcX, space) _ = ofcPosPT - ofcNeg, ofcNegCT, ofcNegPT, ofcNegPTp, ofcNegMD := net.AddOFCneg(ctx, nUSneg, ofcY, ofcX, space) + ofcNeg, ofcNegCT, ofcNegPT, ofcNegPTp, ofcNegMD := nt.AddOFCneg(ctx, nUSneg, ofcY, ofcX, space) _ = ofcNegPT - ilPos, ilPosCT, ilPosPT, ilPosPTp, ilPosMD = net.AddPFC2D("ILpos", "MD", ofcY, ofcX, true, true, space) + ilPos, ilPosCT, ilPosPT, ilPosPTp, ilPosMD = nt.AddPFC2D("ILpos", "MD", ofcY, ofcX, true, true, space) _ = ilPosPT - ilNeg, ilNegCT, ilNegPT, ilNegPTp, ilNegMD = net.AddPFC2D("ILneg", "MD", ofcY, ofcX, true, true, space) + ilNeg, ilNegCT, ilNegPT, ilNegPTp, ilNegMD = nt.AddPFC2D("ILneg", "MD", ofcY, ofcX, true, true, space) _ = ilNegPT ilPosPT.DefaultParams["Layer.Acts.Dend.ModACh"] = "true" ilNegPT.DefaultParams["Layer.Acts.Dend.ModACh"] = "true" - accCost, accCostCT, accCostPT, accCostPTp, accCostMD = net.AddACCost(ctx, nCosts, ofcY, ofcX, space) + accCost, accCostCT, accCostPT, accCostPTp, accCostMD = nt.AddACCost(ctx, nCosts, ofcY, ofcX, space) _ = accCostPT p1to1 := paths.NewPoolOneToOne() @@ -671,42 +672,42 @@ func (net *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, vSmtxGo.SetBuildConfig("ThalLay1Name", ofcPosMD.Name) vSmtxNo.SetBuildConfig("ThalLay1Name", ofcPosMD.Name) - net.ConnectLayers(vSgpi, ofcPosMD, full, InhibPath) // BGThal sets defaults for this + nt.ConnectLayers(vSgpi, ofcPosMD, full, InhibPath) // BGThal sets defaults for this vSmtxGo.SetBuildConfig("ThalLay2Name", ofcNegMD.Name) vSmtxNo.SetBuildConfig("ThalLay2Name", ofcNegMD.Name) - net.ConnectLayers(vSgpi, ofcNegMD, full, InhibPath) + nt.ConnectLayers(vSgpi, ofcNegMD, full, InhibPath) vSmtxGo.SetBuildConfig("ThalLay3Name", ilPosMD.Name) vSmtxNo.SetBuildConfig("ThalLay3Name", ilPosMD.Name) - net.ConnectLayers(vSgpi, ilPosMD, full, InhibPath) + nt.ConnectLayers(vSgpi, ilPosMD, full, InhibPath) vSmtxGo.SetBuildConfig("ThalLay4Name", ilNegMD.Name) vSmtxNo.SetBuildConfig("ThalLay4Name", ilNegMD.Name) - net.ConnectLayers(vSgpi, ilNegMD, full, InhibPath) // BGThal configs + nt.ConnectLayers(vSgpi, ilNegMD, full, InhibPath) // BGThal configs vSmtxGo.SetBuildConfig("ThalLay5Name", accCostMD.Name) vSmtxNo.SetBuildConfig("ThalLay5Name", accCostMD.Name) - net.ConnectLayers(vSgpi, accCostMD, full, InhibPath) // BGThal configs + nt.ConnectLayers(vSgpi, accCostMD, full, InhibPath) // BGThal configs pfc2m := params.Params{ // contextual, not driving -- weaker "Path.PathScale.Rel": "1", // 0.1", todo was } // neg val goes to nogo - pj = net.ConnectToVSMatrix(ilNeg, vSmtxNo, full) + pj = nt.ConnectToVSMatrix(ilNeg, vSmtxNo, full) pj.DefaultParams = pfc2m pj.AddClass("PFCToVSMtx") - net.ConnectToVSPatch(ilNegPTp, vSpatchD1, vSpatchD2, full) + nt.ConnectToVSPatch(ilNegPTp, vSpatchD1, vSpatchD2, full) /////////////////////////////////////////// // BLA - net.ConnectUSToBLA(usPos, blaPosAcq, blaPosExt) - net.ConnectUSToBLA(usNeg, blaNegAcq, blaNegExt) + nt.ConnectUSToBLA(usPos, blaPosAcq, blaPosExt) + nt.ConnectUSToBLA(usNeg, blaNegAcq, blaNegExt) - pj = net.ConnectLayers(blaPosAcq, ofcPos, p1to1, ForwardPath) // main driver strong input + pj = nt.ConnectLayers(blaPosAcq, ofcPos, p1to1, ForwardPath) // main driver strong input pj.DefaultParams = params.Params{ "Path.PathScale.Abs": "2", "Path.SWts.Init.Mean": "0.5", @@ -714,7 +715,7 @@ func (net *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, } pj.AddClass("BLAToOFC", pathClass) - pj = net.ConnectLayers(blaNegAcq, ofcNeg, p1to1, ForwardPath) + pj = nt.ConnectLayers(blaNegAcq, ofcNeg, p1to1, ForwardPath) pj.DefaultParams = params.Params{ "Path.PathScale.Abs": "2", "Path.SWts.Init.Mean": "0.5", @@ -722,7 +723,7 @@ func (net *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, } pj.AddClass("BLAToOFC", pathClass) - pj = net.ConnectLayers(ofcPosPTp, blaPosExt, p1to1, BLAPath) + pj = nt.ConnectLayers(ofcPosPTp, blaPosExt, p1to1, BLAPath) pj.DefaultParams = params.Params{ "Path.Com.GType": "ModulatoryG", "Path.PathScale.Abs": "1", @@ -734,7 +735,7 @@ func (net *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, /////////////////////////////////////////// // VS - d1, d2 := net.ConnectToVSPatch(drives, vSpatchD1, vSpatchD2, p1to1) + d1, d2 := nt.ConnectToVSPatch(drives, vSpatchD1, vSpatchD2, p1to1) // modulatory -- critical that it drives full GeModSyn=1 in Matrix at max drive act driveToVsp := params.Params{ "Path.Learn.Learn": "false", @@ -750,21 +751,21 @@ func (net *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, d1.AddClass("DrivesToVSPatch") d2.AddClass("DrivesToVSPatch") - net.ConnectToVSPatch(ofcPosPTp, vSpatchD1, vSpatchD2, p1to1) - net.ConnectToVSPatch(ilPosPTp, vSpatchD1, vSpatchD2, full) - net.ConnectToVSPatch(ofcNegPTp, vSpatchD1, vSpatchD2, full) - net.ConnectToVSPatch(ilNegPTp, vSpatchD1, vSpatchD2, full) - net.ConnectToVSPatch(pvPosP, vSpatchD1, vSpatchD2, full) + nt.ConnectToVSPatch(ofcPosPTp, vSpatchD1, vSpatchD2, p1to1) + nt.ConnectToVSPatch(ilPosPTp, vSpatchD1, vSpatchD2, full) + nt.ConnectToVSPatch(ofcNegPTp, vSpatchD1, vSpatchD2, full) + nt.ConnectToVSPatch(ilNegPTp, vSpatchD1, vSpatchD2, full) + nt.ConnectToVSPatch(pvPosP, vSpatchD1, vSpatchD2, full) // same paths to stn as mtxgo - net.ConnectToVSMatrix(usPos, vSmtxGo, p1to1) + nt.ConnectToVSMatrix(usPos, vSmtxGo, p1to1) // net.ConnectToVSMatrix(usPos, vSmtxNo, p1to1) // pj.DefaultParams = params.Params{ // "Path.PathScale.Abs": "2", // strong // "Path.PathScale.Rel": ".2", // } - net.ConnectToVSMatrix(blaPosAcq, vSmtxNo, p1to1) - pj = net.ConnectToVSMatrix(blaPosAcq, vSmtxGo, p1to1) + nt.ConnectToVSMatrix(blaPosAcq, vSmtxNo, p1to1) + pj = nt.ConnectToVSMatrix(blaPosAcq, vSmtxGo, p1to1) pj.AddClass("BLAAcqToGo") pj.DefaultParams = params.Params{ "Path.PathScale.Abs": "2", // key strength driver @@ -778,18 +779,18 @@ func (net *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, // "Path.PathScale.Abs": "2", // strong // "Path.PathScale.Rel": ".2", // } - net.ConnectToVSMatrix(blaNegAcq, vSmtxNo, full).AddClass("BLAAcqToGo") // neg -> nogo + nt.ConnectToVSMatrix(blaNegAcq, vSmtxNo, full).AddClass("BLAAcqToGo") // neg -> nogo // pj.DefaultParams = params.Params{ // "Path.PathScale.Abs": "2", // "Path.PathScale.Rel": "1", // } - net.ConnectLayers(blaPosAcq, vSstn, full, ForwardPath) - net.ConnectLayers(blaNegAcq, vSstn, full, ForwardPath) + nt.ConnectLayers(blaPosAcq, vSstn, full, ForwardPath) + nt.ConnectLayers(blaNegAcq, vSstn, full, ForwardPath) // todo: ofc -> STN? - pj = net.ConnectToVSMatrix(blaPosExt, vSmtxNo, p1to1) + pj = nt.ConnectToVSMatrix(blaPosExt, vSmtxNo, p1to1) pj.DefaultParams = params.Params{ "Path.PathScale.Abs": "0.1", // extinction is mostly within BLA "Path.PathScale.Rel": "1", @@ -813,47 +814,47 @@ func (net *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, "Path.SWts.Init.Var": "0.0", "Path.Com.GType": "ModulatoryG", } - pj = net.ConnectToVSMatrix(drives, vSmtxGo, p1to1) + pj = nt.ConnectToVSMatrix(drives, vSmtxGo, p1to1) pj.DefaultParams = d2m pj.AddClass("DrivesToMtx") - pj = net.ConnectToVSMatrix(drives, vSmtxNo, p1to1) + pj = nt.ConnectToVSMatrix(drives, vSmtxNo, p1to1) pj.DefaultParams = d2m pj.AddClass("DrivesToMtx") - pj = net.ConnectToVSMatrix(ofcPos, vSmtxGo, p1to1) + pj = nt.ConnectToVSMatrix(ofcPos, vSmtxGo, p1to1) pj.DefaultParams = pfc2m pj.AddClass("PFCToVSMtx") - pj = net.ConnectToVSMatrix(ofcPos, vSmtxNo, p1to1) + pj = nt.ConnectToVSMatrix(ofcPos, vSmtxNo, p1to1) pj.DefaultParams = pfc2m pj.AddClass("PFCToVSMtx") - net.ConnectLayers(ofcPos, vSstn, full, ForwardPath) + nt.ConnectLayers(ofcPos, vSstn, full, ForwardPath) - pj = net.ConnectToVSMatrix(ilPos, vSmtxGo, full) + pj = nt.ConnectToVSMatrix(ilPos, vSmtxGo, full) pj.DefaultParams = pfc2m pj.AddClass("PFCToVSMtx") - pj = net.ConnectToVSMatrix(ilPos, vSmtxNo, full) + pj = nt.ConnectToVSMatrix(ilPos, vSmtxNo, full) pj.DefaultParams = pfc2m pj.AddClass("PFCToVSMtx") - net.ConnectLayers(ilPos, vSstn, full, ForwardPath) + nt.ConnectLayers(ilPos, vSstn, full, ForwardPath) - pj = net.ConnectToVSMatrix(ofcNeg, vSmtxGo, full) + pj = nt.ConnectToVSMatrix(ofcNeg, vSmtxGo, full) pj.DefaultParams = pfc2m pj.AddClass("PFCToVSMtx") - pj = net.ConnectToVSMatrix(ofcNeg, vSmtxNo, full) + pj = nt.ConnectToVSMatrix(ofcNeg, vSmtxNo, full) pj.DefaultParams = pfc2m pj.AddClass("PFCToVSMtx") - pj = net.ConnectToVSMatrix(ilNeg, vSmtxGo, full) + pj = nt.ConnectToVSMatrix(ilNeg, vSmtxGo, full) pj.DefaultParams = pfc2m pj.AddClass("PFCToVSMtx") - pj = net.ConnectToVSMatrix(ilNeg, vSmtxNo, full) + pj = nt.ConnectToVSMatrix(ilNeg, vSmtxNo, full) pj.DefaultParams = pfc2m pj.AddClass("PFCToVSMtx") - pj = net.ConnectToVSMatrix(accCost, vSmtxGo, full) + pj = nt.ConnectToVSMatrix(accCost, vSmtxGo, full) pj.DefaultParams = pfc2m pj.AddClass("PFCToVSMtx") - pj = net.ConnectToVSMatrix(accCost, vSmtxNo, full) + pj = nt.ConnectToVSMatrix(accCost, vSmtxNo, full) pj.DefaultParams = pfc2m pj.AddClass("PFCToVSMtx") @@ -873,7 +874,7 @@ func (net *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, // Drives -> ofcPos then activates ofcPos -> VS -- ofcPos needs to be strongly BLA dependent // to reflect either current CS or maintained CS but not just echoing drive state. // and not adding drives -> deep layers - pj = net.ConnectLayers(drives, ofcPos, p1to1, ForwardPath) + pj = nt.ConnectLayers(drives, ofcPos, p1to1, ForwardPath) pj.DefaultParams = params.Params{ "Path.PathScale.Rel": "0.2", // weaker to not drive in absence of BLA } @@ -881,28 +882,28 @@ func (net *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, // net.ConnectCTSelf(ilPosCT, full, pathClass) // todo: test - net.ConnectLayers(pvPos, ofcPos, full, BackPath).AddClass("OFCPath", pathClass) - net.ConnectLayers(usPos, ofcPos, p1to1, BackPath).AddClass("OFCPath", pathClass) + nt.ConnectLayers(pvPos, ofcPos, full, BackPath).AddClass("OFCPath", pathClass) + nt.ConnectLayers(usPos, ofcPos, p1to1, BackPath).AddClass("OFCPath", pathClass) // note: these are all very static, lead to static PT reps: // need a more dynamic US / value representation to predict. - net.ConnectToPulv(ofcPos, ofcPosCT, drivesP, p1to1, p1to1, "OFCPath") - net.ConnectToPulv(ofcPos, ofcPosCT, usPosP, p1to1, p1to1, "OFCPath") - net.ConnectToPulv(ofcPos, ofcPosCT, pvPosP, full, full, "OFCPath") + nt.ConnectToPulv(ofcPos, ofcPosCT, drivesP, p1to1, p1to1, "OFCPath") + nt.ConnectToPulv(ofcPos, ofcPosCT, usPosP, p1to1, p1to1, "OFCPath") + nt.ConnectToPulv(ofcPos, ofcPosCT, pvPosP, full, full, "OFCPath") - net.ConnectPTpToPulv(ofcPosPTp, drivesP, p1to1, p1to1, "OFCPath") - net.ConnectPTToPulv(ofcPosPT, ofcPosPTp, usPosP, p1to1, p1to1, "OFCPath") - net.ConnectPTpToPulv(ofcPosPTp, pvPosP, p1to1, p1to1, "OFCPath") + nt.ConnectPTpToPulv(ofcPosPTp, drivesP, p1to1, p1to1, "OFCPath") + nt.ConnectPTToPulv(ofcPosPT, ofcPosPTp, usPosP, p1to1, p1to1, "OFCPath") + nt.ConnectPTpToPulv(ofcPosPTp, pvPosP, p1to1, p1to1, "OFCPath") - net.ConnectLayers(ofcPosPT, pvPos, full, ForwardPath) + nt.ConnectLayers(ofcPosPT, pvPos, full, ForwardPath) /////////////////////////////////////////// // ILpos // net.ConnectCTSelf(ilPosCT, full, pathClass) // todo: test - pj, bpj = net.BidirConnectLayers(ofcPos, ilPos, full) + pj, bpj = nt.BidirConnectLayers(ofcPos, ilPos, full) pj.AddClass("ILPath", pathClass) pj.DefaultParams = params.Params{ "Path.PathScale.Abs": "3", // val needs stronger input @@ -911,9 +912,9 @@ func (net *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, // note: do *not* bidirectionally connect PTp layers -- too much sustained activity - net.ConnectToPFC(pvPos, pvPosP, ilPos, ilPosCT, nil, ilPosPTp, full, "ILPath") - net.ConnectPTpToPulv(ilPosPTp, pvPosP, full, full, "ILPath") - net.BidirConnectLayers(ilPosPT, pvPos, full) + nt.ConnectToPFC(pvPos, pvPosP, ilPos, ilPosCT, nil, ilPosPTp, full, "ILPath") + nt.ConnectPTpToPulv(ilPosPTp, pvPosP, full, full, "ILPath") + nt.BidirConnectLayers(ilPosPT, pvPos, full) // note: not connecting deeper CT and PT layers to vSmtxGo at this point // could explore that later @@ -923,28 +924,28 @@ func (net *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, // net.ConnectCTSelf(ofcNegValCT, full, pathClass) // todo: test - net.ConnectLayers(pvNeg, ofcNeg, full, BackPath).AddClass("OFCPath", pathClass) - net.ConnectLayers(usNeg, ofcNeg, p1to1, BackPath).AddClass("OFCPath", pathClass) + nt.ConnectLayers(pvNeg, ofcNeg, full, BackPath).AddClass("OFCPath", pathClass) + nt.ConnectLayers(usNeg, ofcNeg, p1to1, BackPath).AddClass("OFCPath", pathClass) // note: these are all very static, lead to static PT reps: // need a more dynamic US / value representation to predict. - net.ConnectToPulv(ofcNeg, ofcNegCT, usNegP, p1to1, p1to1, "OFCPath") - net.ConnectToPulv(ofcNeg, ofcNegCT, pvNegP, full, full, "OFCPath") - net.ConnectPTToPulv(ofcNegPT, ofcNegPTp, usNegP, p1to1, p1to1, "OFCPath") - net.ConnectPTpToPulv(ofcNegPTp, pvNegP, full, full, "OFCPath") + nt.ConnectToPulv(ofcNeg, ofcNegCT, usNegP, p1to1, p1to1, "OFCPath") + nt.ConnectToPulv(ofcNeg, ofcNegCT, pvNegP, full, full, "OFCPath") + nt.ConnectPTToPulv(ofcNegPT, ofcNegPTp, usNegP, p1to1, p1to1, "OFCPath") + nt.ConnectPTpToPulv(ofcNegPTp, pvNegP, full, full, "OFCPath") - net.ConnectLayers(ofcNegPT, pvNeg, full, ForwardPath) + nt.ConnectLayers(ofcNegPT, pvNeg, full, ForwardPath) /////////////////////////////////////////// // Costs - net.ConnectLayers(pvNeg, accCost, full, BackPath).AddClass("ACCPath", pathClass) - net.ConnectLayers(cost, accCost, p1to1, BackPath).AddClass("ACCPath", pathClass) + nt.ConnectLayers(pvNeg, accCost, full, BackPath).AddClass("ACCPath", pathClass) + nt.ConnectLayers(cost, accCost, p1to1, BackPath).AddClass("ACCPath", pathClass) - net.ConnectToPulv(accCost, accCostCT, costP, p1to1, p1to1, "ACCPath") - net.ConnectPTpToPulv(accCostPTp, costP, p1to1, p1to1, "ACCPath") - pj = net.ConnectLayers(accCostPT, costFinal, p1to1, ForwardPath) + nt.ConnectToPulv(accCost, accCostCT, costP, p1to1, p1to1, "ACCPath") + nt.ConnectPTpToPulv(accCostPTp, costP, p1to1, p1to1, "ACCPath") + pj = nt.ConnectLayers(accCostPT, costFinal, p1to1, ForwardPath) // pj, _ = net.BidirConnectLayers(accCostPT, costFinal, p1to1) pj.AddClass("ACCCostToFinal") pj.DefaultParams = params.Params{ @@ -956,14 +957,14 @@ func (net *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, // net.ConnectCTSelf(ilNegCT, full, "ILPath") // todo: test - pj, bpj = net.BidirConnectLayers(ofcNeg, ilNeg, full) + pj, bpj = nt.BidirConnectLayers(ofcNeg, ilNeg, full) pj.AddClass("ILPath", pathClass) pj.DefaultParams = params.Params{ "Path.PathScale.Abs": "3", // val needs stronger input } bpj.AddClass("ILPath", pathClass) - pj, bpj = net.BidirConnectLayers(accCost, ilNeg, full) + pj, bpj = nt.BidirConnectLayers(accCost, ilNeg, full) pj.AddClass("ACCPath", pathClass) pj.DefaultParams = params.Params{ "Path.PathScale.Abs": "3", // val needs stronger input @@ -972,9 +973,9 @@ func (net *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, // note: do *not* bidirectionally connect PTp layers -- too much sustained activity - net.ConnectToPFC(pvNeg, pvNegP, ilNeg, ilNegCT, nil, ilNegPTp, full, "ILPath") - net.ConnectPTpToPulv(ilNegPTp, pvNegP, full, full, "ILPath") - net.BidirConnectLayers(ilNegPT, pvNeg, full) + nt.ConnectToPFC(pvNeg, pvNegP, ilNeg, ilNegCT, nil, ilNegPTp, full, "ILPath") + nt.ConnectPTpToPulv(ilNegPTp, pvNegP, full, full, "ILPath") + nt.BidirConnectLayers(ilNegPT, pvNeg, full) // note: not connecting deeper CT and PT layers to vSmtxGo at this point // could explore that later @@ -1005,46 +1006,46 @@ func (net *Network) AddRubiconOFCus(ctx *Context, nYneur, popY, popX, bgY, bgX, // Makes all appropriate interconnections and sets default parameters. // Needs CS -> BLA, OFC connections to be made. // Returns layers most likely to be used for remaining connections and positions. -func (net *Network) AddRubicon(ctx *Context, nYneur, popY, popX, bgY, bgX, pfcY, pfcX int, space float32) (vSgpi, vSmtxGo, vSmtxNo, urgency, pvPos, blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, blaNov, ofcPos, ofcPosCT, ofcPosPT, ofcPosPTp, ilPos, ilPosCT, ilPosPT, ilPosPTp, ofcNeg, ofcNegCT, ofcNegPT, ofcNegPTp, ilNeg, ilNegCT, ilNegPT, ilNegPTp, accCost, plUtil, sc *Layer) { +func (nt *Network) AddRubicon(ctx *Context, nYneur, popY, popX, bgY, bgX, pfcY, pfcX int, space float32) (vSgpi, vSmtxGo, vSmtxNo, urgency, pvPos, blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, blaNov, ofcPos, ofcPosCT, ofcPosPT, ofcPosPTp, ilPos, ilPosCT, ilPosPT, ilPosPTp, ofcNeg, ofcNegCT, ofcNegPT, ofcNegPTp, ilNeg, ilNegCT, ilNegPT, ilNegPTp, accCost, plUtil, sc *Layer) { full := paths.NewFull() var pj *Path - vSgpi, vSmtxGo, vSmtxNo, vSpatchD1, vSpatchD2, urgency, usPos, pvPos, usNeg, usNegP, pvNeg, pvNegP, blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, blaNov, ofcPos, ofcPosCT, ofcPosPT, ofcPosPTp, ilPos, ilPosCT, ilPosPT, ilPosPTp, ilPosMD, ofcNeg, ofcNegCT, ofcNegPT, ofcNegPTp, accCost, accCostCT, accCostPT, accCostPTp, accCostMD, ilNeg, ilNegCT, ilNegPT, ilNegPTp, ilNegMD, sc := net.AddRubiconOFCus(ctx, nYneur, popY, popX, bgY, bgX, pfcY, pfcX, space) + vSgpi, vSmtxGo, vSmtxNo, vSpatchD1, vSpatchD2, urgency, usPos, pvPos, usNeg, usNegP, pvNeg, pvNegP, blaPosAcq, blaPosExt, blaNegAcq, blaNegExt, blaNov, ofcPos, ofcPosCT, ofcPosPT, ofcPosPTp, ilPos, ilPosCT, ilPosPT, ilPosPTp, ilPosMD, ofcNeg, ofcNegCT, ofcNegPT, ofcNegPTp, accCost, accCostCT, accCostPT, accCostPTp, accCostMD, ilNeg, ilNegCT, ilNegPT, ilNegPTp, ilNegMD, sc := nt.AddRubiconOFCus(ctx, nYneur, popY, popX, bgY, bgX, pfcY, pfcX, space) _, _, _, _, _, _, _ = usPos, usNeg, usNegP, pvNeg, pvNegP, ilPosCT, ilNegMD _, _, _ = accCost, accCostCT, accCostPTp _, _ = blaNegAcq, blaNegExt _, _, _, _, _ = ofcPosPT, ofcNegPT, ilPosPT, ilNegPT, accCostPT // ILposP is what PLutil predicts, in order to learn about value (reward) - ilPosP := net.AddPulvForSuper(ilPos, space) + ilPosP := nt.AddPulvForSuper(ilPos, space) // ILnegP is what PLutil predicts, in order to learn about negative US - ilNegP := net.AddPulvForSuper(ilNeg, space) + ilNegP := nt.AddPulvForSuper(ilNeg, space) // ACCcostP is what PLutil predicts, in order to learn about cost - accCostP := net.AddPulvForSuper(accCost, space) + accCostP := nt.AddPulvForSuper(accCost, space) pfc2m := params.Params{ // contextual, not driving -- weaker "Path.PathScale.Rel": "0.1", } - plUtil, plUtilCT, plUtilPT, plUtilPTp, plUtilMD := net.AddPFC2D("PLutil", "MD", pfcY, pfcX, true, true, space) + plUtil, plUtilCT, plUtilPT, plUtilPTp, plUtilMD := nt.AddPFC2D("PLutil", "MD", pfcY, pfcX, true, true, space) vSmtxGo.SetBuildConfig("ThalLay5Name", plUtilMD.Name) vSmtxNo.SetBuildConfig("ThalLay5Name", plUtilMD.Name) - net.ConnectLayers(vSgpi, plUtilMD, full, InhibPath) + nt.ConnectLayers(vSgpi, plUtilMD, full, InhibPath) plUtilPT.DefaultParams["Layer.Acts.Dend.ModACh"] = "true" - pj = net.ConnectToVSMatrix(plUtil, vSmtxGo, full) + pj = nt.ConnectToVSMatrix(plUtil, vSmtxGo, full) pj.DefaultParams = pfc2m pj.AddClass("PFCToVSMtx") - pj = net.ConnectToVSMatrix(plUtil, vSmtxNo, full) + pj = nt.ConnectToVSMatrix(plUtil, vSmtxNo, full) pj.DefaultParams = pfc2m pj.AddClass("PFCToVSMtx") - net.ConnectToVSPatch(plUtilPTp, vSpatchD1, vSpatchD2, full) + nt.ConnectToVSPatch(plUtilPTp, vSpatchD1, vSpatchD2, full) /////////////////////////////////////////// // ILneg @@ -1061,15 +1062,15 @@ func (net *Network) AddRubicon(ctx *Context, nYneur, popY, popX, bgY, bgX, pfcY, // net.ConnectCTSelf(plUtilCT, full) // todo: test // util predicts OFCval and ILneg - pj, _ = net.ConnectToPFCBidir(ilPos, ilPosP, plUtil, plUtilCT, plUtilPT, plUtilPTp, full, "ILToPL") + pj, _ = nt.ConnectToPFCBidir(ilPos, ilPosP, plUtil, plUtilCT, plUtilPT, plUtilPTp, full, "ILToPL") pj.DefaultParams = params.Params{ "Path.PathScale.Abs": "1", // not good to make this stronger actually } - pj, _ = net.ConnectToPFCBidir(ilNeg, ilNegP, plUtil, plUtilCT, plUtilPT, plUtilPTp, full, "ILToPL") + pj, _ = nt.ConnectToPFCBidir(ilNeg, ilNegP, plUtil, plUtilCT, plUtilPT, plUtilPTp, full, "ILToPL") pj.DefaultParams = params.Params{ "Path.PathScale.Abs": "3", // drive pl stronger -- only this one works well } - pj, _ = net.ConnectToPFCBidir(accCost, accCostP, plUtil, plUtilCT, plUtilPT, plUtilPTp, full, "ACCToPL") + pj, _ = nt.ConnectToPFCBidir(accCost, accCostP, plUtil, plUtilCT, plUtilPT, plUtilPTp, full, "ACCToPL") pj.DefaultParams = params.Params{ "Path.PathScale.Abs": "3", // drive pl stronger? } diff --git a/axon/synapse.go b/axon/synapse.go index c5ca3245..242824d9 100644 --- a/axon/synapse.go +++ b/axon/synapse.go @@ -1,5 +1,5 @@ // Code generated by "goal build"; DO NOT EDIT. - +//line synapse.goal:1 // Copyright (c) 2019, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/axon/threads.go b/axon/threads.go index ebe158a4..55678523 100644 --- a/axon/threads.go +++ b/axon/threads.go @@ -18,10 +18,11 @@ import ( // pass 0 to use a default heuristic number based on current GOMAXPROCS // processors and the number of neurons in the network (call after building) func (nt *Network) SetNThreads(nthr int) { + md := nt.NetIxs().MaxData maxProcs := runtime.GOMAXPROCS(0) // query GOMAXPROCS if nthr <= 0 { nneur := nt.Neurons.Len() - nthr = int(math.Ceil(float64(nneur) / (float64(10000) / float64(nt.MaxData)))) + nthr = int(math.Ceil(float64(nneur) / (float64(10000) / float64(md)))) if nthr < 1 { // shouldn't happen but justin.. nthr = 1 } diff --git a/axon/vars.go b/axon/vars.go index 81d03093..bd39cd11 100644 --- a/axon/vars.go +++ b/axon/vars.go @@ -6,7 +6,7 @@ package axon import "cogentcore.org/core/tensor" -//go:generate gosl -exclude=Update,UpdateParams,Defaults,AllParams,ShouldDisplay +//go:generate gosl -keep -exclude=Update,UpdateParams,Defaults,AllParams,ShouldDisplay //gosl:start diff --git a/examples/bench_lvis/bench_lvis_test.go b/examples/bench_lvis/bench_lvis_test.go index 742b11c8..1bd0248c 100644 --- a/examples/bench_lvis/bench_lvis_test.go +++ b/examples/bench_lvis/bench_lvis_test.go @@ -84,7 +84,7 @@ func TestGPUSynCa(t *testing.T) { net.ConfigGPUnoGUI(ctx) // on mac, only works up to ndata = 6 -- 7 fails - fmt.Printf("ndata: %d floats per: %X banks: %d\n", ctx.NetIndexes.NData, ctx.NetIndexes.GPUMaxBuffFloats, ctx.NetIndexes.GPUSynCaBanks) + fmt.Printf("ndata: %d floats per: %X banks: %d\n", ctx.NData, ctx.NetIndexes.GPUMaxBuffFloats, ctx.NetIndexes.GPUSynCaBanks) passed := net.GPU.TestSynCa() if !passed { diff --git a/examples/choose/choose.go b/examples/choose/choose.go index e2e20bb5..d5d9846f 100644 --- a/examples/choose/choose.go +++ b/examples/choose/choose.go @@ -527,7 +527,7 @@ func (ss *Sim) TakeAction(net *axon.Network) { mtxLy := ss.Net.LayerByName("VMtxGo") vlly := ss.Net.LayerByName("VL") threshold := float32(0.1) - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { diu := uint32(di) ev := ss.Envs.ByModeDi(ctx.Mode, di).(*armaze.Env) justGated := mtxLy.AnyGated(diu) // not updated until plus phase: rp.VSMatrix.JustGated.IsTrue() @@ -626,7 +626,7 @@ func (ss *Sim) ApplyInputs() { lays := []string{"Dist", "CS"} ss.Net.InitExt(ctx) - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { ev := ss.Envs.ByModeDi(ctx.Mode, int(di)).(*armaze.Env) giveUp := axon.GlobalScalars[axon.GvGiveUp, di] > 0 if giveUp { @@ -667,7 +667,7 @@ func (ss *Sim) ApplyRubicon(ctx *axon.Context, ev *armaze.Env, di uint32) { func (ss *Sim) NewRun() { ctx := &ss.Context ss.InitRandSeed(ss.Loops.GetLoop(etime.Train, etime.Run).Counter.Cur) - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { ss.Envs.ByModeDi(etime.Train, di).Init(0) } ctx.Reset() @@ -1246,7 +1246,7 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) { // row = ss.Stats.Int("Cycle") case time == etime.Trial: if mode == etime.Train { - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { diu := uint32(di) ss.TrialStats(di) ss.StatCounters(di) diff --git a/examples/deep_fsa/deep_fsa.go b/examples/deep_fsa/deep_fsa.go index 8cdf8d15..f36f6101 100644 --- a/examples/deep_fsa/deep_fsa.go +++ b/examples/deep_fsa/deep_fsa.go @@ -354,7 +354,7 @@ func (ss *Sim) ApplyInputs() { clrmsk, setmsk, _ := in.ApplyExtFlags() net.InitExt(ctx) - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { fsenv := ss.Envs.ByModeDi(ctx.Mode, int(di)).(*FSAEnv) fsenv.Step() ns := fsenv.NNext.Values[0] @@ -382,7 +382,7 @@ func (ss *Sim) ApplyInputs() { func (ss *Sim) NewRun() { ctx := &ss.Context ss.InitRandSeed(ss.Loops.GetLoop(etime.Train, etime.Run).Counter.Cur) - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { ss.Envs.ByModeDi(etime.Train, di).Init(0) ss.Envs.ByModeDi(etime.Test, di).Init(0) } diff --git a/examples/deep_move/deep_move.go b/examples/deep_move/deep_move.go index 0bc42e3e..b506fe00 100644 --- a/examples/deep_move/deep_move.go +++ b/examples/deep_move/deep_move.go @@ -396,7 +396,7 @@ func (ss *Sim) ApplyInputs() { lays := net.LayersByClass("InputLayer", "TargetLayer") net.InitExt(ctx) - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { ev := ss.Envs.ByModeDi(ctx.Mode, int(di)).(*MoveEnv) ev.Step() for _, lnm := range lays { @@ -415,7 +415,7 @@ func (ss *Sim) ApplyInputs() { func (ss *Sim) NewRun() { ctx := &ss.Context ss.InitRandSeed(ss.Loops.GetLoop(etime.Train, etime.Run).Counter.Cur) - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { ss.Envs.ByModeDi(etime.Train, di).Init(0) ss.Envs.ByModeDi(etime.Test, di).Init(0) } @@ -431,7 +431,7 @@ func (ss *Sim) NewRun() { // TestAll runs through the full set of testing items func (ss *Sim) TestAll() { ctx := &ss.Context - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { ss.Envs.ByModeDi(etime.Test, di).Init(0) } ss.Loops.ResetAndRun(etime.Test) diff --git a/examples/deep_music/deep_music.go b/examples/deep_music/deep_music.go index 4787e43f..9dbec6cb 100644 --- a/examples/deep_music/deep_music.go +++ b/examples/deep_music/deep_music.go @@ -379,7 +379,7 @@ func (ss *Sim) ApplyInputs() { net.InitExt(ctx) lays := net.LayersByType(axon.InputLayer, axon.TargetLayer) - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { ev.StepDi(int(di)) if ctx.Mode == etime.Test && !ss.Config.Env.TestClamp { lastnote := ss.Stats.IntDi("OutNote", int(di)) + ev.NoteRange.Min diff --git a/examples/dls/dls.go b/examples/dls/dls.go index c65a66e9..3aec9633 100644 --- a/examples/dls/dls.go +++ b/examples/dls/dls.go @@ -481,7 +481,7 @@ func (ss *Sim) TakeAction(net *axon.Network) { ctx := &ss.Context pv := &ss.Net.Rubicon // vlly := ss.Net.LayerByName("VL") - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { diu := uint32(di) ev := ss.Envs.ByModeDi(ctx.Mode, di).(*armaze.Env) netAct := ss.DecodeAct(ev, di) @@ -569,7 +569,7 @@ func (ss *Sim) ApplyInputs() { lays := []string{"Pos", "Arm", "CS", "VSgpi", "OFC"} ss.Net.InitExt(ctx) - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { ev := ss.Envs.ByModeDi(ctx.Mode, int(di)).(*armaze.Env) giveUp := axon.GlobalScalars[axon.GvGiveUp, di] > 0 if giveUp { @@ -607,7 +607,7 @@ func (ss *Sim) ApplyRubicon(ctx *axon.Context, ev *armaze.Env, di uint32) { func (ss *Sim) NewRun() { ctx := &ss.Context ss.InitRandSeed(ss.Loops.GetLoop(etime.Train, etime.Run).Counter.Cur) - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { ss.Envs.ByModeDi(etime.Train, di).Init(0) } ctx.Reset() @@ -897,7 +897,7 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) { // row = ss.Stats.Int("Cycle") case time == etime.Trial: if mode == etime.Train { - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { diu := uint32(di) ss.TrialStats(di) ss.StatCounters(di) diff --git a/examples/hip/hip.go b/examples/hip/hip.go index f81cd11e..95fa1250 100644 --- a/examples/hip/hip.go +++ b/examples/hip/hip.go @@ -364,7 +364,7 @@ func (ss *Sim) ApplyInputs() { ev := ss.Envs.ByMode(ctx.Mode).(*env.FixedTable) lays := net.LayersByType(axon.InputLayer, axon.TargetLayer) net.InitExt(ctx) - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { ev.Step() // note: must save env state for logging / stats due to data parallel re-use of same env ss.Stats.SetStringDi("TrialName", int(di), ev.TrialName.Cur) @@ -760,7 +760,7 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) { case time == etime.Cycle: return case time == etime.Trial: - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { ss.TrialStats(di) ss.StatCounters(di) ss.Logs.LogRowDi(mode, time, row, di) diff --git a/examples/mpi/ra25.go b/examples/mpi/ra25.go index 7e4b2703..1957a2e5 100644 --- a/examples/mpi/ra25.go +++ b/examples/mpi/ra25.go @@ -502,7 +502,7 @@ func (ss *Sim) ApplyInputs() { ev := ss.Envs.ByMode(ctx.Mode).(*env.FixedTable) lays := net.LayersByType(axon.InputLayer, axon.TargetLayer) net.InitExt(ctx) - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { ev.Step() // note: must save env state for logging / stats due to data parallel re-use of same env ss.Stats.SetStringDi("TrialName", int(di), ev.TrialName.Cur) @@ -679,7 +679,7 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) { case time == etime.Cycle: return case time == etime.Trial: - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { ss.TrialStats(di) ss.StatCounters(di) ss.Logs.LogRowDi(mode, time, row, di) diff --git a/examples/objrec/objrec.go b/examples/objrec/objrec.go index 216df324..27851635 100644 --- a/examples/objrec/objrec.go +++ b/examples/objrec/objrec.go @@ -376,7 +376,7 @@ func (ss *Sim) ApplyInputs() { } net.InitExt(ctx) lays := net.LayersByType(axon.InputLayer, axon.TargetLayer) - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { ev.Step() ss.Stats.SetIntDi("Cat", int(di), ev.CurLED) // note: must save relevant state for stats later ss.Stats.SetStringDi("TrialName", int(di), ev.String()) diff --git a/examples/pcore_ds/pcore_ds.go b/examples/pcore_ds/pcore_ds.go index 00b56c82..44858dd7 100644 --- a/examples/pcore_ds/pcore_ds.go +++ b/examples/pcore_ds/pcore_ds.go @@ -615,7 +615,7 @@ func (ss *Sim) ApplyAction(di int) { func (ss *Sim) NewRun() { ctx := &ss.Context ss.InitRandSeed(ss.Loops.GetLoop(etime.Train, etime.Run).Counter.Cur) - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { ss.Envs.ByModeDi(etime.Train, di).Init(0) ss.Envs.ByModeDi(etime.Test, di).Init(0) } diff --git a/examples/pcore_vs/pcore_vs.go b/examples/pcore_vs/pcore_vs.go index e729a1ca..0326b7d8 100644 --- a/examples/pcore_vs/pcore_vs.go +++ b/examples/pcore_vs/pcore_vs.go @@ -462,7 +462,7 @@ func (ss *Sim) GatedAction() { func (ss *Sim) NewRun() { ctx := &ss.Context ss.InitRandSeed(ss.Loops.GetLoop(etime.Train, etime.Run).Counter.Cur) - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { ss.Envs.ByModeDi(etime.Train, di).Init(0) ss.Envs.ByModeDi(etime.Test, di).Init(0) } diff --git a/examples/pfcmaint/pfcmaint.go b/examples/pfcmaint/pfcmaint.go index ce0431d2..a154d53a 100644 --- a/examples/pfcmaint/pfcmaint.go +++ b/examples/pfcmaint/pfcmaint.go @@ -352,7 +352,7 @@ func (ss *Sim) ApplyRubicon(ctx *axon.Context, mode etime.Modes) { func (ss *Sim) NewRun() { ctx := &ss.Context ss.InitRandSeed(ss.Loops.GetLoop(etime.Train, etime.Run).Counter.Cur) - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { ss.Envs.ByModeDi(etime.Train, di).Init(0) ss.Envs.ByModeDi(etime.Test, di).Init(0) } diff --git a/examples/ra25/ra25.go b/examples/ra25/ra25.go index 9c0dd21d..b7b33742 100644 --- a/examples/ra25/ra25.go +++ b/examples/ra25/ra25.go @@ -458,7 +458,7 @@ func (ss *Sim) ApplyInputs() { ev := ss.Envs.ByMode(ctx.Mode).(*env.FixedTable) lays := net.LayersByType(axon.InputLayer, axon.TargetLayer) net.InitExt(ctx) - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { ev.Step() // note: must save env state for logging / stats due to data parallel re-use of same env ss.Stats.SetStringDi("TrialName", int(di), ev.TrialName.Cur) @@ -632,7 +632,7 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) { case time == etime.Cycle: return case time == etime.Trial: - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { ss.TrialStats(di) ss.StatCounters(di) ss.Logs.LogRowDi(mode, time, row, di) diff --git a/examples/ra25x/ra25x.go b/examples/ra25x/ra25x.go index 722090f2..38f49525 100644 --- a/examples/ra25x/ra25x.go +++ b/examples/ra25x/ra25x.go @@ -347,7 +347,7 @@ func (ss *Sim) ApplyInputs() { ev := ss.Envs.ByMode(ctx.Mode).(*env.FixedTable) lays := net.LayersByType(axon.InputLayer, axon.TargetLayer) net.InitExt(ctx) - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { ev.Step() // note: must save env state for logging / stats due to data parallel re-use of same env ss.Stats.SetStringDi("TrialName", int(di), ev.TrialName.Cur) @@ -599,7 +599,7 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) { case time == etime.Cycle: return case time == etime.Trial: - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { ss.TrialStats(di) ss.StatCounters(di) ss.Logs.LogRowDi(mode, time, row, di) diff --git a/examples/rl/rl.go b/examples/rl/rl.go index ef62c9b9..d5ab6e6a 100644 --- a/examples/rl/rl.go +++ b/examples/rl/rl.go @@ -266,7 +266,7 @@ func (ss *Sim) ApplyInputs() { ev := ss.Envs.ByMode(ctx.Mode).(*CondEnv) lays := []string{"Input"} ss.Net.InitExt(ctx) - for di := uint32(0); di < ctx.NetIndexes.NData; di++ { + for di := uint32(0); di < ctx.NData; di++ { ev.Step() for _, lnm := range lays { ly := ss.Net.LayerByName(lnm) @@ -387,7 +387,7 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) { case time == etime.Cycle: return case time == etime.Trial: - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { ss.TrialStats(di) ss.StatCounters(di) ss.Logs.LogRowDi(mode, time, dt.Rows, di) diff --git a/examples/vspatch/vspatch.go b/examples/vspatch/vspatch.go index bbd47a12..bdcddeea 100644 --- a/examples/vspatch/vspatch.go +++ b/examples/vspatch/vspatch.go @@ -381,7 +381,7 @@ func (ss *Sim) ApplyRew(di uint32, rew float32) { func (ss *Sim) NewRun() { ctx := &ss.Context ss.InitRandSeed(ss.Loops.GetLoop(etime.Train, etime.Run).Counter.Cur) - for di := 0; di < int(ctx.NetIndexes.NData); di++ { + for di := 0; di < int(ctx.NData); di++ { ss.Envs.ByModeDi(etime.Train, di).Init(0) ss.Envs.ByModeDi(etime.Test, di).Init(0) }