diff --git a/models/eprop_iaf.cpp b/models/eprop_iaf.cpp index 47f8e71568..2ea24569ed 100644 --- a/models/eprop_iaf.cpp +++ b/models/eprop_iaf.cpp @@ -83,6 +83,9 @@ eprop_iaf::Parameters_::Parameters_() , V_th_( -55.0 - E_L_ ) , kappa_( 0.97 ) , eprop_isi_trace_cutoff_( std::numeric_limits< long >::max() ) + , delay_rec_out_( 1 ) + , delay_out_rec_( 1 ) + , delay_total_( 1 ) { } @@ -129,6 +132,11 @@ eprop_iaf::Parameters_::get( DictionaryDatum& d ) const def< double >( d, names::V_th, V_th_ + E_L_ ); def< double >( d, names::kappa, kappa_ ); def< long >( d, names::eprop_isi_trace_cutoff, eprop_isi_trace_cutoff_ ); + + double delay_rec_out_ms = Time( Time::step( delay_rec_out_ ) ).get_ms(); + def< double >( d, names::delay_rec_out, delay_rec_out_ms ); + double delay_out_rec_ms = Time( Time::step( delay_out_rec_ ) ).get_ms(); + def< double >( d, names::delay_out_rec, delay_out_rec_ms ); } double @@ -160,6 +168,14 @@ eprop_iaf::Parameters_::set( const DictionaryDatum& d, Node* node ) updateValueParam< double >( d, names::kappa, kappa_, node ); updateValueParam< long >( d, names::eprop_isi_trace_cutoff, eprop_isi_trace_cutoff_, node ); + double delay_rec_out_ms = Time( Time::step( delay_rec_out_ ) ).get_ms(); + updateValueParam< double >( d, names::delay_rec_out, delay_rec_out_ms, node ); + delay_rec_out_ = Time( Time::ms( delay_rec_out_ms ) ).get_steps(); + + double delay_out_rec_ms = Time( Time::step( delay_out_rec_ ) ).get_ms(); + updateValueParam< double >( d, names::delay_out_rec, delay_out_rec_ms, node ); + delay_out_rec_ = Time( Time::ms( delay_out_rec_ms ) ).get_steps(); + if ( C_m_ <= 0 ) { throw BadProperty( "Membrane capacitance C_m > 0 required." ); @@ -200,6 +216,18 @@ eprop_iaf::Parameters_::set( const DictionaryDatum& d, Node* node ) throw BadProperty( "Cutoff of integration of eprop trace between spikes eprop_isi_trace_cutoff ≥ 0 required." ); } + if ( delay_rec_out_ < 1 ) + { + throw BadProperty( "Connection delay from recurrent to output neuron ≥ 1 required." ); + } + + if ( delay_out_rec_ < 1 ) + { + throw BadProperty( "Broadcast delay of learning signals ≥ 1 required." ); + } + + delay_total_ = delay_rec_out_ + ( delay_out_rec_ - 1 ); + return delta_EL; } @@ -266,6 +294,14 @@ eprop_iaf::pre_run_hook() V_.P_v_m_ = std::exp( -dt / P_.tau_m_ ); V_.P_i_in_ = P_.tau_m_ / P_.C_m_ * ( 1.0 - V_.P_v_m_ ); V_.P_z_in_ = P_.regular_spike_arrival_ ? 1.0 : 1.0 - V_.P_v_m_; + + if ( eprop_history_.empty() ) + { + for ( long t = -P_.delay_total_; t < 0; ++t ) + { + emplace_new_eprop_history_entry( t ); + } + } } long @@ -379,7 +415,8 @@ eprop_iaf::handle( DataLoggingRequest& e ) void eprop_iaf::compute_gradient( const long t_spike, const long t_spike_previous, - double& z_previous_buffer, + std::queue< double >& z_previous_buffer, + double& z_previous, double& z_bar, double& e_bar, double& epsilon, @@ -387,25 +424,30 @@ eprop_iaf::compute_gradient( const long t_spike, const CommonSynapseProperties& cp, WeightOptimizer* optimizer ) { - double e = 0.0; // eligibility trace - double z = 0.0; // spiking variable - double z_current_buffer = 1.0; // buffer containing the spike that triggered the current integration - double psi = 0.0; // surrogate gradient - double L = 0.0; // learning signal - double grad = 0.0; // gradient + double e = 0.0; // eligibility trace + double z = 0.0; // spiking variable + double z_current = 1.0; // buffer containing the spike that triggered the current integration + double psi = 0.0; // surrogate gradient + double L = 0.0; // learning signal + double grad = 0.0; // gradient const EpropSynapseCommonProperties& ecp = static_cast< const EpropSynapseCommonProperties& >( cp ); const auto optimize_each_step = ( *ecp.optimizer_cp_ ).optimize_each_step_; - auto eprop_hist_it = get_eprop_history( t_spike_previous - 1 ); + auto eprop_hist_it = get_eprop_history( t_spike_previous - P_.delay_total_ ); const long t_compute_until = std::min( t_spike_previous + P_.eprop_isi_trace_cutoff_, t_spike ); for ( long t = t_spike_previous; t < t_compute_until; ++t, ++eprop_hist_it ) { - z = z_previous_buffer; - z_previous_buffer = z_current_buffer; - z_current_buffer = 0.0; + if ( P_.delay_total_ > 1 ) + { + update_pre_syn_buffer_multiple_entries( z, z_current, z_previous, z_previous_buffer, t_spike, t ); + } + else + { + update_pre_syn_buffer_one_entry( z, z_current, z_previous, z_previous_buffer, t_spike, t ); + } psi = eprop_hist_it->surrogate_gradient_; L = eprop_hist_it->learning_signal_; diff --git a/models/eprop_iaf.h b/models/eprop_iaf.h index 674626099c..461f325350 100644 --- a/models/eprop_iaf.h +++ b/models/eprop_iaf.h @@ -330,7 +330,8 @@ class eprop_iaf : public EpropArchivingNodeRecurrent void compute_gradient( const long t_spike, const long t_spike_previous, - double& z_previous_buffer, + std::queue< double >& z_previous_buffer, + double& z_previous, double& z_bar, double& e_bar, double& epsilon, @@ -341,6 +342,9 @@ class eprop_iaf : public EpropArchivingNodeRecurrent long get_shift() const override; bool is_eprop_recurrent_node() const override; long get_eprop_isi_trace_cutoff() override; + long get_delay_total() const override; + long get_delay_recurrent_to_readout() const override; + long get_delay_readout_to_recurrent() const override; //! Compute the surrogate gradient. double ( eprop_iaf::*compute_surrogate_gradient )( double, double, double, double, double, double ); @@ -401,6 +405,15 @@ class eprop_iaf : public EpropArchivingNodeRecurrent //! eprop_isi_trace_cutoff_ and the inter-spike distance. long eprop_isi_trace_cutoff_; + //! Connection delay from recurrent to output neurons. + long delay_rec_out_; + + //! Broadcast delay of learning signals. + long delay_out_rec_; + + //! Sum of broadcast delay of learning signals and connection delay from recurrent to output neurons. + long delay_total_; + //! Default constructor. Parameters_(); @@ -526,10 +539,35 @@ eprop_iaf::get_eprop_isi_trace_cutoff() return P_.eprop_isi_trace_cutoff_; } +inline long +eprop_iaf::get_delay_total() const +{ + return P_.delay_total_; +} + +inline long +eprop_iaf::get_delay_recurrent_to_readout() const +{ + return P_.delay_rec_out_; +} + +inline long +eprop_iaf::get_delay_readout_to_recurrent() const +{ + return P_.delay_out_rec_; +} + inline size_t eprop_iaf::send_test_event( Node& target, size_t receptor_type, synindex, bool ) { SpikeEvent e; + + // To perform a consistency check on the delay parameter d_out_rec between recurrent + // neurons and output neurons, the recurrent neurons send a test event with a delay + // specified by d_rec_out. Upon receiving the test event from the recurrent neuron, + // the output neuron checks if the delay with which the event was received matches + // its own specified delay parameter d_rec_out. + e.set_delay_steps( P_.delay_rec_out_ ); e.set_sender( *this ); return target.handles_test_event( e, receptor_type ); } diff --git a/models/eprop_iaf_adapt.cpp b/models/eprop_iaf_adapt.cpp index b5883cf62e..61dffc255c 100644 --- a/models/eprop_iaf_adapt.cpp +++ b/models/eprop_iaf_adapt.cpp @@ -87,6 +87,9 @@ eprop_iaf_adapt::Parameters_::Parameters_() , V_th_( -55.0 - E_L_ ) , kappa_( 0.97 ) , eprop_isi_trace_cutoff_( std::numeric_limits< long >::max() ) + , delay_rec_out_( 1 ) + , delay_out_rec_( 1 ) + , delay_total_( 1 ) { } @@ -137,6 +140,11 @@ eprop_iaf_adapt::Parameters_::get( DictionaryDatum& d ) const def< double >( d, names::V_th, V_th_ + E_L_ ); def< double >( d, names::kappa, kappa_ ); def< long >( d, names::eprop_isi_trace_cutoff, eprop_isi_trace_cutoff_ ); + + double delay_rec_out_ms = Time( Time::step( delay_rec_out_ ) ).get_ms(); + def< double >( d, names::delay_rec_out, delay_rec_out_ms ); + double delay_out_rec_ms = Time( Time::step( delay_out_rec_ ) ).get_ms(); + def< double >( d, names::delay_out_rec, delay_out_rec_ms ); } double @@ -170,6 +178,14 @@ eprop_iaf_adapt::Parameters_::set( const DictionaryDatum& d, Node* node ) updateValueParam< double >( d, names::kappa, kappa_, node ); updateValueParam< long >( d, names::eprop_isi_trace_cutoff, eprop_isi_trace_cutoff_, node ); + double delay_rec_out_ms = Time( Time::step( delay_rec_out_ ) ).get_ms(); + updateValueParam< double >( d, names::delay_rec_out, delay_rec_out_ms, node ); + delay_rec_out_ = Time( Time::ms( delay_rec_out_ms ) ).get_steps(); + + double delay_out_rec_ms = Time( Time::step( delay_out_rec_ ) ).get_ms(); + updateValueParam< double >( d, names::delay_out_rec, delay_out_rec_ms, node ); + delay_out_rec_ = Time( Time::ms( delay_out_rec_ms ) ).get_steps(); + if ( adapt_beta_ < 0 ) { throw BadProperty( "Threshold adaptation prefactor adapt_beta ≥ 0 required." ); @@ -220,6 +236,18 @@ eprop_iaf_adapt::Parameters_::set( const DictionaryDatum& d, Node* node ) throw BadProperty( "Cutoff of integration of eprop trace between spikes eprop_isi_trace_cutoff ≥ 0 required." ); } + if ( delay_rec_out_ < 1 ) + { + throw BadProperty( "Connection delay from recurrent to output neuron ≥ 1 required." ); + } + + if ( delay_out_rec_ < 1 ) + { + throw BadProperty( "Broadcast delay of learning signals ≥ 1 required." ); + } + + delay_total_ = delay_rec_out_ + ( delay_out_rec_ - 1 ); + return delta_EL; } @@ -301,6 +329,14 @@ eprop_iaf_adapt::pre_run_hook() V_.P_i_in_ = P_.tau_m_ / P_.C_m_ * ( 1.0 - V_.P_v_m_ ); V_.P_z_in_ = P_.regular_spike_arrival_ ? 1.0 : 1.0 - V_.P_v_m_; V_.P_adapt_ = std::exp( -dt / P_.adapt_tau_ ); + + if ( eprop_history_.empty() ) + { + for ( long t = -P_.delay_total_; t < 0; ++t ) + { + emplace_new_eprop_history_entry( t ); + } + } } long @@ -417,7 +453,8 @@ eprop_iaf_adapt::handle( DataLoggingRequest& e ) void eprop_iaf_adapt::compute_gradient( const long t_spike, const long t_spike_previous, - double& z_previous_buffer, + std::queue< double >& z_previous_buffer, + double& z_previous, double& z_bar, double& e_bar, double& epsilon, @@ -425,25 +462,30 @@ eprop_iaf_adapt::compute_gradient( const long t_spike, const CommonSynapseProperties& cp, WeightOptimizer* optimizer ) { - double e = 0.0; // eligibility trace - double z = 0.0; // spiking variable - double z_current_buffer = 1.0; // buffer containing the spike that triggered the current integration - double psi = 0.0; // surrogate gradient - double L = 0.0; // learning signal - double grad = 0.0; // gradient + double e = 0.0; // eligibility trace + double z = 0.0; // spiking variable + double z_current = 1.0; // buffer containing the spike that triggered the current integration + double psi = 0.0; // surrogate gradient + double L = 0.0; // learning signal + double grad = 0.0; // gradient const EpropSynapseCommonProperties& ecp = static_cast< const EpropSynapseCommonProperties& >( cp ); const auto optimize_each_step = ( *ecp.optimizer_cp_ ).optimize_each_step_; - auto eprop_hist_it = get_eprop_history( t_spike_previous - 1 ); + auto eprop_hist_it = get_eprop_history( t_spike_previous - P_.delay_total_ ); const long t_compute_until = std::min( t_spike_previous + P_.eprop_isi_trace_cutoff_, t_spike ); for ( long t = t_spike_previous; t < t_compute_until; ++t, ++eprop_hist_it ) { - z = z_previous_buffer; - z_previous_buffer = z_current_buffer; - z_current_buffer = 0.0; + if ( P_.delay_total_ > 1 ) + { + update_pre_syn_buffer_multiple_entries( z, z_current, z_previous, z_previous_buffer, t_spike, t ); + } + else + { + update_pre_syn_buffer_one_entry( z, z_current, z_previous, z_previous_buffer, t_spike, t ); + } psi = eprop_hist_it->surrogate_gradient_; L = eprop_hist_it->learning_signal_; diff --git a/models/eprop_iaf_adapt.h b/models/eprop_iaf_adapt.h index b3c8881123..23d93e42c2 100644 --- a/models/eprop_iaf_adapt.h +++ b/models/eprop_iaf_adapt.h @@ -346,7 +346,8 @@ class eprop_iaf_adapt : public EpropArchivingNodeRecurrent void compute_gradient( const long t_spike, const long t_spike_previous, - double& z_previous_buffer, + std::queue< double >& z_previous_buffer, + double& z_previous, double& z_bar, double& e_bar, double& epsilon, @@ -357,6 +358,9 @@ class eprop_iaf_adapt : public EpropArchivingNodeRecurrent long get_shift() const override; bool is_eprop_recurrent_node() const override; long get_eprop_isi_trace_cutoff() override; + long get_delay_total() const override; + long get_delay_recurrent_to_readout() const override; + long get_delay_readout_to_recurrent() const override; //! Compute the surrogate gradient. double ( eprop_iaf_adapt::*compute_surrogate_gradient )( double, double, double, double, double, double ); @@ -423,6 +427,15 @@ class eprop_iaf_adapt : public EpropArchivingNodeRecurrent //! eprop_isi_trace_cutoff_ and the inter-spike distance. long eprop_isi_trace_cutoff_; + //! Connection delay from recurrent to output neurons. + long delay_rec_out_; + + //! Broadcast delay of learning signals. + long delay_out_rec_; + + //! Sum of broadcast delay of learning signals and connection delay from recurrent to output neurons. + long delay_total_; + //! Default constructor. Parameters_(); @@ -571,10 +584,35 @@ eprop_iaf_adapt::get_eprop_isi_trace_cutoff() return P_.eprop_isi_trace_cutoff_; } +inline long +eprop_iaf_adapt::get_delay_total() const +{ + return P_.delay_total_; +} + +inline long +eprop_iaf_adapt::get_delay_recurrent_to_readout() const +{ + return P_.delay_rec_out_; +} + +inline long +eprop_iaf_adapt::get_delay_readout_to_recurrent() const +{ + return P_.delay_out_rec_; +} + inline size_t eprop_iaf_adapt::send_test_event( Node& target, size_t receptor_type, synindex, bool ) { SpikeEvent e; + + // To perform a consistency check on the delay parameter d_out_rec between recurrent + // neurons and output neurons, the recurrent neurons send a test event with a delay + // specified by d_rec_out. Upon receiving the test event from the recurrent neuron, + // the output neuron checks if the delay with which the event was received matches + // its own specified delay parameter d_rec_out. + e.set_delay_steps( P_.delay_rec_out_ ); e.set_sender( *this ); return target.handles_test_event( e, receptor_type ); } diff --git a/models/eprop_iaf_psc_delta.cpp b/models/eprop_iaf_psc_delta.cpp index dc57f5a071..e4ec08a3c1 100644 --- a/models/eprop_iaf_psc_delta.cpp +++ b/models/eprop_iaf_psc_delta.cpp @@ -88,6 +88,9 @@ nest::eprop_iaf_psc_delta::Parameters_::Parameters_() , surrogate_gradient_function_( "piecewise_linear" ) , kappa_( 0.97 ) , eprop_isi_trace_cutoff_( std::numeric_limits< long >::max() ) + , delay_rec_out_( 1 ) + , delay_out_rec_( 1 ) + , delay_total_( 1 ) { } @@ -125,6 +128,11 @@ nest::eprop_iaf_psc_delta::Parameters_::get( DictionaryDatum& d ) const def< std::string >( d, names::surrogate_gradient_function, surrogate_gradient_function_ ); def< double >( d, names::kappa, kappa_ ); def< long >( d, names::eprop_isi_trace_cutoff, eprop_isi_trace_cutoff_ ); + + double delay_rec_out_ms = Time( Time::step( delay_rec_out_ ) ).get_ms(); + def< double >( d, names::delay_rec_out, delay_rec_out_ms ); + double delay_out_rec_ms = Time( Time::step( delay_out_rec_ ) ).get_ms(); + def< double >( d, names::delay_out_rec, delay_out_rec_ms ); } double @@ -179,6 +187,14 @@ nest::eprop_iaf_psc_delta::Parameters_::set( const DictionaryDatum& d, Node* nod updateValueParam< double >( d, names::kappa, kappa_, node ); updateValueParam< long >( d, names::eprop_isi_trace_cutoff, eprop_isi_trace_cutoff_, node ); + double delay_rec_out_ms = Time( Time::step( delay_rec_out_ ) ).get_ms(); + updateValueParam< double >( d, names::delay_rec_out, delay_rec_out_ms, node ); + delay_rec_out_ = Time( Time::ms( delay_rec_out_ms ) ).get_steps(); + + double delay_out_rec_ms = Time( Time::step( delay_out_rec_ ) ).get_ms(); + updateValueParam< double >( d, names::delay_out_rec, delay_out_rec_ms, node ); + delay_out_rec_ = Time( Time::ms( delay_out_rec_ms ) ).get_steps(); + if ( V_reset_ >= V_th_ ) { throw BadProperty( "Reset potential must be smaller than threshold." ); @@ -217,6 +233,19 @@ nest::eprop_iaf_psc_delta::Parameters_::set( const DictionaryDatum& d, Node* nod throw BadProperty( "Cutoff of integration of eprop trace between spikes eprop_isi_trace_cutoff ≥ 0 required." ); } + if ( delay_rec_out_ < 1 ) + { + throw BadProperty( "Connection delay from recurrent to output neuron ≥ 1 required." ); + } + + if ( delay_out_rec_ < 1 ) + { + throw BadProperty( "Broadcast delay of learning signals ≥ 1 required." ); + } + + delay_total_ = delay_rec_out_ + ( delay_out_rec_ - 1 ); + + return delta_EL; } @@ -319,6 +348,14 @@ nest::eprop_iaf_psc_delta::pre_run_hook() V_.RefractoryCounts_ = Time( Time::ms( P_.t_ref_ ) ).get_steps(); // since t_ref_ >= 0, this can only fail in error assert( V_.RefractoryCounts_ >= 0 ); + + if ( eprop_history_.empty() ) + { + for ( long t = -P_.delay_total_; t < 0; ++t ) + { + emplace_new_eprop_history_entry( t ); + } + } } long @@ -458,7 +495,8 @@ nest::eprop_iaf_psc_delta::handle( DataLoggingRequest& e ) void eprop_iaf_psc_delta::compute_gradient( const long t_spike, const long t_spike_previous, - double& z_previous_buffer, + std::queue< double >& z_previous_buffer, + double& z_previous, double& z_bar, double& e_bar, double& epsilon, @@ -466,25 +504,30 @@ eprop_iaf_psc_delta::compute_gradient( const long t_spike, const CommonSynapseProperties& cp, WeightOptimizer* optimizer ) { - double e = 0.0; // eligibility trace - double z = 0.0; // spiking variable - double z_current_buffer = 1.0; // buffer containing the spike that triggered the current integration - double psi = 0.0; // surrogate gradient - double L = 0.0; // learning signal - double grad = 0.0; // gradient + double e = 0.0; // eligibility trace + double z = 0.0; // spiking variable + double z_current = 1.0; // buffer containing the spike that triggered the current integration + double psi = 0.0; // surrogate gradient + double L = 0.0; // learning signal + double grad = 0.0; // gradient const EpropSynapseCommonProperties& ecp = static_cast< const EpropSynapseCommonProperties& >( cp ); const auto optimize_each_step = ( *ecp.optimizer_cp_ ).optimize_each_step_; - auto eprop_hist_it = get_eprop_history( t_spike_previous - 1 ); + auto eprop_hist_it = get_eprop_history( t_spike_previous - P_.delay_total_ ); const long t_compute_until = std::min( t_spike_previous + P_.eprop_isi_trace_cutoff_, t_spike ); for ( long t = t_spike_previous; t < t_compute_until; ++t, ++eprop_hist_it ) { - z = z_previous_buffer; - z_previous_buffer = z_current_buffer; - z_current_buffer = 0.0; + if ( P_.delay_total_ > 1 ) + { + update_pre_syn_buffer_multiple_entries( z, z_current, z_previous, z_previous_buffer, t_spike, t ); + } + else + { + update_pre_syn_buffer_one_entry( z, z_current, z_previous, z_previous_buffer, t_spike, t ); + } psi = eprop_hist_it->surrogate_gradient_; L = eprop_hist_it->learning_signal_; diff --git a/models/eprop_iaf_psc_delta.h b/models/eprop_iaf_psc_delta.h index ef88d62b4a..07dfe00233 100644 --- a/models/eprop_iaf_psc_delta.h +++ b/models/eprop_iaf_psc_delta.h @@ -264,17 +264,21 @@ class eprop_iaf_psc_delta : public EpropArchivingNodeRecurrent void compute_gradient( const long t_spike, const long t_spike_previous, - double& z_previous_buffer, + std::queue< double >& z_previous_buffer, + double& z_previous, double& z_bar, double& e_bar, double& epsilon, double& weight, const CommonSynapseProperties& cp, - WeightOptimizer* optimizer ) override; + WeightOptimizer* optimizer ) override; long get_shift() const override; bool is_eprop_recurrent_node() const override; long get_eprop_isi_trace_cutoff() override; + long get_delay_total() const override; + long get_delay_recurrent_to_readout() const override; + long get_delay_readout_to_recurrent() const override; //! Compute the surrogate gradient. double ( eprop_iaf_psc_delta::*compute_surrogate_gradient )( double, double, double, double, double, double ); @@ -343,6 +347,15 @@ class eprop_iaf_psc_delta : public EpropArchivingNodeRecurrent //! eprop_isi_trace_cutoff_ and the inter-spike distance. long eprop_isi_trace_cutoff_; + //! Connection delay from recurrent to output neurons. + long delay_rec_out_; + + //! Broadcast delay of learning signals. + long delay_out_rec_; + + //! Sum of broadcast delay of learning signals and connection delay from recurrent to output neurons. + long delay_total_; + Parameters_(); //!< Sets default parameter values void get( DictionaryDatum& ) const; //!< Store current values in dictionary @@ -474,10 +487,35 @@ eprop_iaf_psc_delta::get_eprop_isi_trace_cutoff() return P_.eprop_isi_trace_cutoff_; } +inline long +eprop_iaf_psc_delta::get_delay_total() const +{ + return P_.delay_total_; +} + +inline long +eprop_iaf_psc_delta::get_delay_recurrent_to_readout() const +{ + return P_.delay_rec_out_; +} + +inline long +eprop_iaf_psc_delta::get_delay_readout_to_recurrent() const +{ + return P_.delay_out_rec_; +} + inline size_t nest::eprop_iaf_psc_delta::send_test_event( Node& target, size_t receptor_type, synindex, bool ) { SpikeEvent e; + + // To perform a consistency check on the delay parameter d_out_rec between recurrent + // neurons and output neurons, the recurrent neurons send a test event with a delay + // specified by d_rec_out. Upon receiving the test event from the recurrent neuron, + // the output neuron checks if the delay with which the event was received matches + // its own specified delay parameter d_rec_out. + e.set_delay_steps(P_.delay_rec_out_); e.set_sender( *this ); return target.handles_test_event( e, receptor_type ); } diff --git a/models/eprop_learning_signal_connection.h b/models/eprop_learning_signal_connection.h index 928f150871..a8205db73b 100644 --- a/models/eprop_learning_signal_connection.h +++ b/models/eprop_learning_signal_connection.h @@ -161,6 +161,12 @@ class eprop_learning_signal_connection : public Connection< targetidentifierT > { LearningSignalConnectionEvent ge; + const long delay_out_rec = t.get_delay_readout_to_recurrent(); + if ( delay_out_rec != get_delay_steps() ) + { + throw IllegalConnection( "delay == delay_rec_out from target neuron required." ); + } + s.sends_secondary_event( ge ); ge.set_sender( s ); Connection< targetidentifierT >::target_.set_rport( t.handles_test_event( ge, receptor_type ) ); diff --git a/models/eprop_readout.cpp b/models/eprop_readout.cpp index 2a1fe18633..f80ec57eac 100644 --- a/models/eprop_readout.cpp +++ b/models/eprop_readout.cpp @@ -76,6 +76,8 @@ eprop_readout::Parameters_::Parameters_() , tau_m_( 10.0 ) , V_min_( -std::numeric_limits< double >::max() ) , eprop_isi_trace_cutoff_( std::numeric_limits< long >::max() ) + , delay_rec_out_( 1 ) + , delay_out_rec_( 1 ) { } @@ -113,6 +115,11 @@ eprop_readout::Parameters_::get( DictionaryDatum& d ) const def< double >( d, names::tau_m, tau_m_ ); def< double >( d, names::V_min, V_min_ + E_L_ ); def< long >( d, names::eprop_isi_trace_cutoff, eprop_isi_trace_cutoff_ ); + + double delay_rec_out_ms = Time( Time::step( delay_rec_out_ ) ).get_ms(); + def< double >( d, names::delay_rec_out, delay_rec_out_ms ); + double delay_out_rec_ms = Time( Time::step( delay_out_rec_ ) ).get_ms(); + def< double >( d, names::delay_out_rec, delay_out_rec_ms ); } double @@ -131,6 +138,14 @@ eprop_readout::Parameters_::set( const DictionaryDatum& d, Node* node ) updateValueParam< double >( d, names::tau_m, tau_m_, node ); updateValueParam< long >( d, names::eprop_isi_trace_cutoff, eprop_isi_trace_cutoff_, node ); + double delay_rec_out_ms = Time( Time::step( delay_rec_out_ ) ).get_ms(); + updateValueParam< double >( d, names::delay_rec_out, delay_rec_out_ms, node ); + delay_rec_out_ = Time( Time::ms( delay_rec_out_ms ) ).get_steps(); + + double delay_out_rec_ms = Time( Time::step( delay_out_rec_ ) ).get_ms(); + updateValueParam< double >( d, names::delay_out_rec, delay_out_rec_ms, node ); + delay_out_rec_ = Time( Time::ms( delay_out_rec_ms ) ).get_steps(); + if ( C_m_ <= 0 ) { throw BadProperty( "Membrane capacitance C_m > 0 required." ); @@ -146,6 +161,16 @@ eprop_readout::Parameters_::set( const DictionaryDatum& d, Node* node ) throw BadProperty( "Cutoff of integration of eprop trace between spikes eprop_isi_trace_cutoff ≥ 0 required." ); } + if ( delay_rec_out_ < 1 ) + { + throw BadProperty( "Connection delay from recurrent to output neuron ≥ 1 required." ); + } + + if ( delay_out_rec_ < 1 ) + { + throw BadProperty( "Broadcast delay of learning signals ≥ 1 required." ); + } + return delta_EL; } @@ -209,6 +234,19 @@ eprop_readout::pre_run_hook() V_.P_v_m_ = std::exp( -dt / P_.tau_m_ ); V_.P_i_in_ = P_.tau_m_ / P_.C_m_ * ( 1.0 - V_.P_v_m_ ); V_.P_z_in_ = P_.regular_spike_arrival_ ? 1.0 : 1.0 - V_.P_v_m_; + + if ( eprop_history_.empty() ) + { + for ( long t = -P_.delay_rec_out_; t < 0; ++t ) + { + emplace_new_eprop_history_entry( t ); + } + + for ( int i = 0; i < P_.delay_out_rec_ - 1; i++ ) + { + S_.error_signal_deque_.push_back( 0.0 ); + } + } } long @@ -249,7 +287,10 @@ eprop_readout::update( Time const& origin, const long from, const long to ) S_.readout_signal_ *= S_.learning_window_signal_; S_.error_signal_ *= S_.learning_window_signal_; - error_signal_buffer[ lag ] = S_.error_signal_; + S_.error_signal_deque_.push_back( S_.error_signal_ ); + double err_sig = S_.error_signal_deque_.front(); // get delay_out_rec-th value + S_.error_signal_deque_.pop_front(); + error_signal_buffer[ lag ] = err_sig; emplace_new_eprop_history_entry( t, false ); @@ -331,7 +372,8 @@ eprop_readout::handle( DataLoggingRequest& e ) void eprop_readout::compute_gradient( const long t_spike, const long t_spike_previous, - double& z_previous_buffer, + std::queue< double >& z_previous_buffer, + double& z_previous, double& z_bar, double& e_bar, double& epsilon, @@ -339,10 +381,10 @@ eprop_readout::compute_gradient( const long t_spike, const CommonSynapseProperties& cp, WeightOptimizer* optimizer ) { - double z = 0.0; // spiking variable - double z_current_buffer = 1.0; // buffer containing the spike that triggered the current integration - double L = 0.0; // error signal - double grad = 0.0; // gradient + double z = 0.0; // spiking variable + double z_current = 1.0; // buffer containing the spike that triggered the current integration + double L = 0.0; // error signal + double grad = 0.0; // gradient const EpropSynapseCommonProperties& ecp = static_cast< const EpropSynapseCommonProperties& >( cp ); const auto optimize_each_step = ( *ecp.optimizer_cp_ ).optimize_each_step_; @@ -353,9 +395,15 @@ eprop_readout::compute_gradient( const long t_spike, for ( long t = t_spike_previous; t < t_compute_until; ++t, ++eprop_hist_it ) { - z = z_previous_buffer; - z_previous_buffer = z_current_buffer; - z_current_buffer = 0.0; + if ( P_.delay_rec_out_ > 1 ) + { + z = z_previous_buffer.front(); + update_pre_syn_buffer_multiple_entries( z, z_current, z_previous, z_previous_buffer, t_spike, t ); + } + else + { + update_pre_syn_buffer_one_entry( z, z_current, z_previous, z_previous_buffer, t_spike, t ); + } L = eprop_hist_it->error_signal_; diff --git a/models/eprop_readout.h b/models/eprop_readout.h index 075f7a2553..23ef107652 100644 --- a/models/eprop_readout.h +++ b/models/eprop_readout.h @@ -302,7 +302,8 @@ class eprop_readout : public EpropArchivingNodeReadout void compute_gradient( const long t_spike, const long t_spike_previous, - double& z_previous_buffer, + std::queue< double >& z_previous_buffer, + double& z_previous, double& z_bar, double& e_bar, double& epsilon, @@ -313,6 +314,7 @@ class eprop_readout : public EpropArchivingNodeReadout long get_shift() const override; bool is_eprop_recurrent_node() const override; long get_eprop_isi_trace_cutoff() override; + long get_delay_total() const override; //! Compute the error signal based on the mean-squared error loss. void compute_error_signal_mean_squared_error( const long lag ); @@ -351,6 +353,12 @@ class eprop_readout : public EpropArchivingNodeReadout //! eprop_isi_trace_cutoff_ and the inter-spike distance. long eprop_isi_trace_cutoff_; + //! Connection delay from recurrent to output neurons. + long delay_rec_out_; + + //! Broadcast delay of learning signals. + long delay_out_rec_; + //! Default constructor. Parameters_(); @@ -393,6 +401,9 @@ class eprop_readout : public EpropArchivingNodeReadout //! Set the state variables. void set( const DictionaryDatum&, const Parameters_&, double, Node* ); + + //! Queue to hold last delay_out_rec error signals. + std::deque< double > error_signal_deque_; }; //! Structure of buffers. @@ -492,14 +503,35 @@ eprop_readout::get_eprop_isi_trace_cutoff() return P_.eprop_isi_trace_cutoff_; } +inline long +eprop_readout::get_delay_total() const +{ + return P_.delay_rec_out_; +} + inline size_t -eprop_readout::handles_test_event( SpikeEvent&, size_t receptor_type ) +eprop_readout::handles_test_event( SpikeEvent& e, size_t receptor_type ) { if ( receptor_type != 0 ) { throw UnknownReceptorType( receptor_type, get_name() ); } + // To perform a consistency check on the delay parameter d_out_rec between recurrent + // neurons and output neurons, the recurrent neurons send a test event with a delay + // specified by d_rec_out. Upon receiving the test event from the recurrent neuron, + // the output neuron checks if the delay with which the event was received matches + // its own specified delay parameter d_rec_out. + + // ensure that the spike event was not sent by a proxy node. + if ( e.get_sender().get_node_id() != 0 ) + { + if ( e.get_delay_steps() != P_.delay_rec_out_ ) + { + throw IllegalConnection( + "delay_rec_out from recurrent neuron equal to delay_rec_out from readout neuron required." ); + } + } return 0; } diff --git a/models/eprop_synapse.h b/models/eprop_synapse.h index 16bf25992b..5a1e542831 100644 --- a/models/eprop_synapse.h +++ b/models/eprop_synapse.h @@ -267,6 +267,9 @@ class eprop_synapse : public Connection< targetidentifierT > //! Update values in parameter dictionary. void set_status( const DictionaryDatum& d, ConnectorModel& cm ); + //! Initialize the presynaptic buffer. + void initialize_z_previous_buffer( const long delay_total ); + //! Send the spike event. bool send( Event& e, size_t thread, const EpropSynapseCommonProperties& cp ); @@ -326,7 +329,13 @@ class eprop_synapse : public Connection< targetidentifierT > double epsilon_ = 0.0; //! Value of spiking variable one time step before t_previous_spike_. - double z_previous_buffer_ = 0.0; + double z_previous_ = 0.0; + + //! Queue of length delay_total_ to hold previous spiking variables. + std::queue< double > z_previous_buffer_; + + //! Sum of broadcast delay of learning signals and connection delay from recurrent to output neurons. + long delay_total_ = 0; /** * Optimizer @@ -396,7 +405,7 @@ eprop_synapse< targetidentifierT >::operator=( const eprop_synapse& es ) z_bar_ = es.z_bar_; e_bar_ = es.e_bar_; epsilon_ = es.epsilon_; - z_previous_buffer_ = es.z_previous_buffer_; + z_previous_ = es.z_previous_; optimizer_ = es.optimizer_; return *this; @@ -434,7 +443,7 @@ eprop_synapse< targetidentifierT >::operator=( eprop_synapse&& es ) z_bar_ = es.z_bar_; e_bar_ = es.e_bar_; epsilon_ = es.epsilon_; - z_previous_buffer_ = es.z_previous_buffer_; + z_previous_ = es.z_previous_; optimizer_ = es.optimizer_; es.optimizer_ = nullptr; @@ -450,11 +459,22 @@ eprop_synapse< targetidentifierT >::check_connection( Node& s, const CommonPropertiesType& cp ) { // When we get here, delay has been set so we can check it. - if ( get_delay_steps() != 1 ) + if ( get_delay_steps() < 1 ) { throw IllegalConnection( "eprop synapses currently require a delay of one simulation step" ); } + bool is_recurrent_node = t.is_eprop_recurrent_node(); + + if ( not is_recurrent_node ) + { + const long delay_rec_out = t.get_delay_total(); + if ( delay_rec_out != get_delay_steps() ) + { + throw IllegalConnection( "delay == delay_rec_out from target neuron required." ); + } + } + ConnTestDummyNode dummy_target; ConnectionBase::check_connection_( dummy_target, s, t, receptor_type ); @@ -471,6 +491,17 @@ eprop_synapse< targetidentifierT >::delete_optimizer() // do not set to nullptr to allow detection of double deletion } +template < typename targetidentifierT > +void +eprop_synapse< targetidentifierT >::initialize_z_previous_buffer( const long delay_total ) +{ + for ( int i = 0; i < delay_total; i++ ) + { + z_previous_buffer_.push( 0.0 ); + } + z_previous_buffer_.push( 1.0 ); +} + template < typename targetidentifierT > bool eprop_synapse< targetidentifierT >::send( Event& e, size_t thread, const EpropSynapseCommonProperties& cp ) @@ -479,11 +510,19 @@ eprop_synapse< targetidentifierT >::send( Event& e, size_t thread, const EpropSy assert( target ); const long t_spike = e.get_stamp().get_steps(); + const long delay_total = target->get_delay_total(); if ( t_spike_previous_ != 0 ) { target->compute_gradient( - t_spike, t_spike_previous_, z_previous_buffer_, z_bar_, e_bar_, epsilon_, weight_, cp, optimizer_ ); + t_spike, t_spike_previous_, z_previous_buffer_, z_previous_, z_bar_, e_bar_, epsilon_, weight_, cp, optimizer_ ); + } + else + { + if ( delay_total > 1 ) + { + initialize_z_previous_buffer( delay_total ); + } } const long eprop_isi_trace_cutoff = target->get_eprop_isi_trace_cutoff(); diff --git a/nestkernel/eprop_archiving_node.cpp b/nestkernel/eprop_archiving_node.cpp index 5099acd322..324d4407f9 100644 --- a/nestkernel/eprop_archiving_node.cpp +++ b/nestkernel/eprop_archiving_node.cpp @@ -151,11 +151,15 @@ EpropArchivingNodeRecurrent::write_learning_signal_to_history( const long time_s return; } - long shift = delay_rec_out_ + delay_out_rec_; + long shift = delay_out_rec_; if ( has_norm_step ) { - shift += delay_out_norm_; + shift += delay_rec_out_ + delay_out_norm_; + } + else + { + shift += get_delay_total(); } diff --git a/nestkernel/eprop_archiving_node.h b/nestkernel/eprop_archiving_node.h index c692c96024..87c7546431 100644 --- a/nestkernel/eprop_archiving_node.h +++ b/nestkernel/eprop_archiving_node.h @@ -157,6 +157,24 @@ class EpropArchivingNode : public Node //! the first update. void erase_used_eprop_history( const long eprop_isi_trace_cutoff ); + //! Update multiple entries in the presynaptic buffer. This function is used when the total synaptic delay + //! is greater than one. + void update_pre_syn_buffer_multiple_entries( double& z, + double& z_current, + double& z_previous, + std::queue< double >& z_previous_buffer, + double t_spike, + double t ); + + //! Update one entry in the presynaptic buffer. This function is used when the total synaptic delay + //! is equal one. + void update_pre_syn_buffer_one_entry( double& z, + double& z_current, + double& z_previous, + std::queue< double >& z_previous_buffer, + double t_spike, + double t ); + protected: //! Number of incoming eprop synapses size_t eprop_indegree_; diff --git a/nestkernel/eprop_archiving_node_impl.h b/nestkernel/eprop_archiving_node_impl.h index 3f66a8bf57..445d699dc3 100644 --- a/nestkernel/eprop_archiving_node_impl.h +++ b/nestkernel/eprop_archiving_node_impl.h @@ -54,7 +54,7 @@ EpropArchivingNode< HistEntryT >::register_eprop_connection( const bool is_bsshs { ++eprop_indegree_; - const long t_first_entry = is_bsshslm_2020_model ? get_shift() : -delay_rec_out_; + const long t_first_entry = is_bsshslm_2020_model ? get_shift() : -get_delay_total(); const auto it_hist = get_update_history( t_first_entry ); @@ -80,7 +80,7 @@ EpropArchivingNode< HistEntryT >::write_update_to_history( const long t_previous return; } - const long shift = is_bsshslm_2020_model ? get_shift() : -delay_rec_out_; + const long shift = is_bsshslm_2020_model ? get_shift() : -get_delay_total(); const auto it_hist_curr = get_update_history( t_current_update + shift ); @@ -205,6 +205,47 @@ EpropArchivingNode< HistEntryT >::erase_used_update_history() } } + +template < typename HistEntryT > +void +EpropArchivingNode< HistEntryT >::update_pre_syn_buffer_multiple_entries( double& z, + double& z_current, + double& z_previous, + std::queue< double >& z_previous_buffer, + double t_spike, + double t ) +{ + if ( !z_previous_buffer.empty() ) + { + z = z_previous_buffer.front(); + z_previous_buffer.pop(); + } + + if ( t_spike - t > 1 ) + { + z_previous_buffer.push( 0.0 ); + } + else + { + z_previous_buffer.push( 1.0 ); + } +} + +template < typename HistEntryT > +void +EpropArchivingNode< HistEntryT >::update_pre_syn_buffer_one_entry( double& z, + double& z_current, + double& z_previous, + std::queue< double >& pre_syn_buffer, + double t_spike, + double t ) +{ + z = z_previous; + z_previous = z_current; + z_current = 0.0; +} + + } // namespace nest #endif // EPROP_ARCHIVING_NODE_IMPL_H diff --git a/nestkernel/nest_names.cpp b/nestkernel/nest_names.cpp index 453fba42d5..5efddaa0a1 100644 --- a/nestkernel/nest_names.cpp +++ b/nestkernel/nest_names.cpp @@ -127,6 +127,8 @@ const Name dead_time( "dead_time" ); const Name dead_time_random( "dead_time_random" ); const Name dead_time_shape( "dead_time_shape" ); const Name delay( "delay" ); +const Name delay_out_rec( "delay_out_rec" ); +const Name delay_rec_out( "delay_rec_out" ); const Name delay_u_bars( "delay_u_bars" ); const Name deliver_interval( "deliver_interval" ); const Name delta( "delta" ); diff --git a/nestkernel/nest_names.h b/nestkernel/nest_names.h index 7eebf1167a..eb967613dc 100644 --- a/nestkernel/nest_names.h +++ b/nestkernel/nest_names.h @@ -154,6 +154,8 @@ extern const Name dead_time; extern const Name dead_time_random; extern const Name dead_time_shape; extern const Name delay; +extern const Name delay_out_rec; +extern const Name delay_rec_out; extern const Name delay_u_bars; extern const Name deliver_interval; extern const Name delta; diff --git a/nestkernel/node.cpp b/nestkernel/node.cpp index e6d994f163..0783b8a715 100644 --- a/nestkernel/node.cpp +++ b/nestkernel/node.cpp @@ -228,6 +228,24 @@ Node::get_shift() const throw IllegalConnection( "The target node is not an e-prop neuron." ); } +long +Node::get_delay_total() const +{ + throw IllegalConnection( "The target node is not an e-prop neuron." ); +} + +long +Node::get_delay_recurrent_to_readout() const +{ + throw IllegalConnection( "The target node is not an e-prop neuron." ); +} + +long +Node::get_delay_readout_to_recurrent() const +{ + throw IllegalConnection( "The target node is not an e-prop neuron." ); +} + void Node::write_update_to_history( const long t_previous_update, const long t_current_update, @@ -553,6 +571,7 @@ nest::Node::get_tau_syn_in( int ) void nest::Node::compute_gradient( const long, const long, + std::queue< double >&, double&, double&, double&, diff --git a/nestkernel/node.h b/nestkernel/node.h index 067de6c548..b405047e69 100644 --- a/nestkernel/node.h +++ b/nestkernel/node.h @@ -26,6 +26,7 @@ // C++ includes: #include #include +#include #include #include #include @@ -521,6 +522,30 @@ class Node virtual long get_eprop_isi_trace_cutoff(); + /** + * Get sum of broadcast delay of learning signals and connection delay from recurrent to output neurons. + * + * @throws IllegalConnection + */ + + virtual long get_delay_total() const; + + /** + * Get connection delay from recurrent to output neurons. + * + * @throws IllegalConnection + */ + + virtual long get_delay_recurrent_to_readout() const; + + /** + * Get connection delay of learning signals and connection delay from recurrent to output neurons. + * + * @throws IllegalConnection + */ + + virtual long get_delay_readout_to_recurrent() const; + /** * Return if the node is part of the recurrent network (and thus not a readout neuron). * @@ -821,9 +846,11 @@ class Node * * @params presyn_isis is cleared during call */ + virtual void compute_gradient( const long t_spike, const long t_spike_previous, - double& z_previous_buffer, + std::queue< double >& z_previous_buffer, + double& z_previous, double& z_bar, double& e_bar, double& epsilon, diff --git a/pynest/examples/eprop_plasticity/eprop_supervised_classification_evidence-accumulation.py b/pynest/examples/eprop_plasticity/eprop_supervised_classification_evidence-accumulation.py index 16ffae807f..8232b4d97f 100644 --- a/pynest/examples/eprop_plasticity/eprop_supervised_classification_evidence-accumulation.py +++ b/pynest/examples/eprop_plasticity/eprop_supervised_classification_evidence-accumulation.py @@ -131,6 +131,8 @@ "spacing": 50, # time steps of break between two cues "bg_noise": 1050, # time steps of background noise "recall": 150, # time steps of recall + "delay_rec_out": 1, # time steps of connection delay from recurrent to output neurons + "delay_out_rec": 1, # time steps of broadcast delay of learning signals } steps["cues"] = n_cues * (steps["cue"] + steps["spacing"]) # time steps of all cues @@ -195,6 +197,8 @@ "regular_spike_arrival": False, # If True, input spikes arrive at end of time step, if False at beginning "tau_m": 20.0, # ms, membrane time constant "V_m": 0.0, # mV, initial value of the membrane voltage + "delay_out_rec": duration["delay_out_rec"], # ms, broadcast delay of learning signals + "delay_rec_out": duration["delay_rec_out"], # ms, connection delay from recurrent to output neurons } params_nrn_reg = { @@ -213,6 +217,8 @@ "V_m": 0.0, "V_th": 0.6, # mV, spike threshold membrane voltage "kappa": 0.97, # low-pass filter of the eligibility trace + "delay_out_rec": duration["delay_out_rec"], # ms, broadcast delay of learning signals + "delay_rec_out": duration["delay_rec_out"], # ms, connection delay from recurrent to output neurons } params_nrn_ad = { @@ -233,6 +239,8 @@ "V_m": 0.0, "V_th": 0.6, "kappa": 0.97, + "delay_out_rec": duration["delay_out_rec"], # ms, broadcast delay of learning signals + "delay_rec_out": duration["delay_rec_out"], # ms, connection delay from recurrent to output neurons } params_nrn_ad["adapt_beta"] = 1.7 * ( @@ -388,10 +396,11 @@ def calculate_glorot_dist(fan_in, fan_out): params_syn_out = params_syn_base.copy() params_syn_out["weight"] = weights_rec_out +params_syn_out["delay"] = duration["delay_rec_out"] params_syn_feedback = { "synapse_model": "eprop_learning_signal_connection", - "delay": duration["step"], + "delay": duration["delay_out_rec"], "weight": weights_out_rec, } diff --git a/pynest/examples/eprop_plasticity/eprop_supervised_classification_neuromorphic_mnist.py b/pynest/examples/eprop_plasticity/eprop_supervised_classification_neuromorphic_mnist.py index 0165d1770c..c39e6fc534 100644 --- a/pynest/examples/eprop_plasticity/eprop_supervised_classification_neuromorphic_mnist.py +++ b/pynest/examples/eprop_plasticity/eprop_supervised_classification_neuromorphic_mnist.py @@ -123,7 +123,10 @@ n_iter = 200 # number of iterations test_every = 10 # cyclical number of training iterations after which to test the performance -steps = {} +steps = { + "delay_rec_out": 1, # time steps of connection delay from recurrent to output neurons + "delay_out_rec": 1, # time steps of broadcast delay of learning signals +} steps["sequence"] = 300 # time steps of one full sequence steps["learning_window"] = 10 # time steps of window with non-zero learning signals @@ -190,6 +193,8 @@ "regular_spike_arrival": False, # If True, input spikes arrive at end of time step, if False at beginning "tau_m": 100.0, # ms, membrane time constant "V_m": 0.0, # mV, initial value of the membrane voltage + "delay_out_rec": duration["delay_out_rec"], # ms, broadcast delay of learning signals + "delay_rec_out": duration["delay_rec_out"], # ms, connection delay from recurrent to output neurons } params_nrn_rec = { @@ -208,6 +213,8 @@ "V_m": 0.0, "V_th": 0.6, # mV, spike threshold membrane voltage "kappa": 0.99, # low-pass filter of the eligibility trace + "delay_out_rec": duration["delay_out_rec"], # ms, broadcast delay of learning signals + "delay_rec_out": duration["delay_rec_out"], # ms, connection delay from recurrent to output neurons } #################### @@ -348,10 +355,11 @@ def create_mask(weights, sparsity_level): params_syn_in = params_syn_base.copy() params_syn_rec = params_syn_base.copy() params_syn_out = params_syn_base.copy() +params_syn_out["delay"] = duration["delay_rec_out"] params_syn_feedback = { "synapse_model": "eprop_learning_signal_connection", - "delay": duration["step"], + "delay": duration["delay_out_rec"], "weight": weights_out_rec, } diff --git a/pynest/examples/eprop_plasticity/eprop_supervised_regression_sine-waves.py b/pynest/examples/eprop_plasticity/eprop_supervised_regression_sine-waves.py index e4e94f17da..d4ea953325 100644 --- a/pynest/examples/eprop_plasticity/eprop_supervised_regression_sine-waves.py +++ b/pynest/examples/eprop_plasticity/eprop_supervised_regression_sine-waves.py @@ -119,6 +119,8 @@ steps = { "sequence": 1000, # time steps of one full sequence + "delay_rec_out": 1, # time steps of connection delay from recurrent to output neurons + "delay_out_rec": 1, # time steps of broadcast delay of learning signals } steps["learning_window"] = steps["sequence"] # time steps of window with non-zero learning signals @@ -180,6 +182,8 @@ "regular_spike_arrival": False, # If True, input spikes arrive at end of time step, if False at beginning "tau_m": 30.0, # ms, membrane time constant "V_m": 0.0, # mV, initial value of the membrane voltage + "delay_out_rec": duration["delay_out_rec"], # ms, broadcast delay of learning signals + "delay_rec_out": duration["delay_rec_out"], # ms, connection delay from recurrent to output neurons } params_nrn_rec = { @@ -198,6 +202,8 @@ "V_m": 0.0, "V_th": 0.03, # mV, spike threshold membrane voltage "kappa": 0.97, # low-pass filter of the eligibility trace + "delay_out_rec": duration["delay_out_rec"], # ms, broadcast delay of learning signals + "delay_rec_out": duration["delay_rec_out"], # ms, connection delay from recurrent to output neurons } if model_nrn_rec == "eprop_iaf_psc_delta": @@ -323,10 +329,11 @@ params_syn_out = params_syn_base.copy() params_syn_out["weight"] = weights_rec_out +params_syn_out["delay"] = duration["delay_rec_out"] params_syn_feedback = { "synapse_model": "eprop_learning_signal_connection", - "delay": duration["step"], + "delay": duration["delay_out_rec"], "weight": weights_out_rec, }