diff --git a/doc/htmldoc/examples/index.rst b/doc/htmldoc/examples/index.rst
index dba7539b99..5ed1aae50c 100644
--- a/doc/htmldoc/examples/index.rst
+++ b/doc/htmldoc/examples/index.rst
@@ -212,10 +212,6 @@ PyNEST examples
.. grid-item-card:: :doc:`../auto_examples/eprop_plasticity/index`
:img-top: ../static/img/pynest/eprop_supervised_classification_infrastructure.png
- * :doc:`/auto_examples/eprop_plasticity/eprop_supervised_classification_evidence-accumulation_bsshslm_2020`
- * :doc:`/auto_examples/eprop_plasticity/eprop_supervised_regression_sine-waves_bsshslm_2020`
- * :doc:`/auto_examples/eprop_plasticity/eprop_supervised_regression_handwriting_bsshslm_2020`
- * :doc:`/auto_examples/eprop_plasticity/eprop_supervised_regression_lemniscate_bsshslm_2020`
* :doc:`/auto_examples/eprop_plasticity/eprop_supervised_regression_sine-waves`
* :doc:`/auto_examples/eprop_plasticity/eprop_supervised_classification_neuromorphic_mnist`
diff --git a/doc/htmldoc/static/img/eprop_model_diagram.svg b/doc/htmldoc/static/img/eprop_model_diagram.svg
index 1b9a6d0fdb..d8654b81d7 100644
--- a/doc/htmldoc/static/img/eprop_model_diagram.svg
+++ b/doc/htmldoc/static/img/eprop_model_diagram.svg
@@ -1000,7 +1000,7 @@
id="tspan5255-4-9-4"
x="121.03685"
y="137.2178"
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:8px;font-family:'Liberation Mono';-inkscape-font-specification:'Liberation Mono';fill:#099268;fill-opacity:1">eprop_iaf_adapt_bsshslm_2020
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:8px;font-family:'Liberation Mono';-inkscape-font-specification:'Liberation Mono';fill:#099268;fill-opacity:1">eprop_iaf_adapt
eprop_readout_bsshslm_2020
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:8px;font-family:'Liberation Mono';-inkscape-font-specification:'Liberation Mono';fill:#099268;fill-opacity:1">eprop_readout
eprop_synapse_bsshlsm_2020
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:8px;font-family:'Liberation Mono';-inkscape-font-specification:'Liberation Mono';fill:#099268;fill-opacity:1">eprop_synapse
eprop_learning_signal_connection_bsshslm_2020
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:8px;font-family:'Liberation Mono';-inkscape-font-specification:'Liberation Mono';fill:#099268;fill-opacity:1">eprop_learning_signal_connection
eprop_iaf_bsshslm_2020
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:8px;font-family:'Liberation Mono';-inkscape-font-specification:'Liberation Mono';fill:#099268;fill-opacity:1">eprop_iaf
.
- *
- */
-
-// nest models
-#include "eprop_iaf_adapt_bsshslm_2020.h"
-
-// C++
-#include
-
-// libnestutil
-#include "dict_util.h"
-#include "numerics.h"
-
-// nestkernel
-#include "eprop_archiving_node_recurrent_impl.h"
-#include "exceptions.h"
-#include "kernel_manager.h"
-#include "nest_impl.h"
-#include "universal_data_logger_impl.h"
-
-// sli
-#include "dictutils.h"
-
-namespace nest
-{
-
-void
-register_eprop_iaf_adapt_bsshslm_2020( const std::string& name )
-{
- register_node_model< eprop_iaf_adapt_bsshslm_2020 >( name );
-}
-
-/* ----------------------------------------------------------------
- * Recordables map
- * ---------------------------------------------------------------- */
-
-RecordablesMap< eprop_iaf_adapt_bsshslm_2020 > eprop_iaf_adapt_bsshslm_2020::recordablesMap_;
-
-template <>
-void
-RecordablesMap< eprop_iaf_adapt_bsshslm_2020 >::create()
-{
- insert_( names::eprop_history_duration, &eprop_iaf_adapt_bsshslm_2020::get_eprop_history_duration );
- insert_( names::adaptation, &eprop_iaf_adapt_bsshslm_2020::get_adaptation_ );
- insert_( names::V_th_adapt, &eprop_iaf_adapt_bsshslm_2020::get_v_th_adapt_ );
- insert_( names::learning_signal, &eprop_iaf_adapt_bsshslm_2020::get_learning_signal_ );
- insert_( names::surrogate_gradient, &eprop_iaf_adapt_bsshslm_2020::get_surrogate_gradient_ );
- insert_( names::V_m, &eprop_iaf_adapt_bsshslm_2020::get_v_m_ );
-}
-
-/* ----------------------------------------------------------------
- * Default constructors for parameters, state, and buffers
- * ---------------------------------------------------------------- */
-
-eprop_iaf_adapt_bsshslm_2020::Parameters_::Parameters_()
- : adapt_beta_( 1.0 )
- , adapt_tau_( 10.0 )
- , C_m_( 250.0 )
- , c_reg_( 0.0 )
- , E_L_( -70.0 )
- , f_target_( 0.01 )
- , beta_( 1.0 )
- , gamma_( 0.3 )
- , I_e_( 0.0 )
- , regular_spike_arrival_( true )
- , surrogate_gradient_function_( "piecewise_linear" )
- , t_ref_( 2.0 )
- , tau_m_( 10.0 )
- , V_min_( -std::numeric_limits< double >::max() )
- , V_th_( -55.0 - E_L_ )
-{
-}
-
-eprop_iaf_adapt_bsshslm_2020::State_::State_()
- : adapt_( 0.0 )
- , v_th_adapt_( 15.0 )
- , learning_signal_( 0.0 )
- , r_( 0 )
- , surrogate_gradient_( 0.0 )
- , i_in_( 0.0 )
- , v_m_( 0.0 )
- , z_( 0.0 )
- , z_in_( 0.0 )
-{
-}
-
-eprop_iaf_adapt_bsshslm_2020::Buffers_::Buffers_( eprop_iaf_adapt_bsshslm_2020& n )
- : logger_( n )
-{
-}
-
-eprop_iaf_adapt_bsshslm_2020::Buffers_::Buffers_( const Buffers_&, eprop_iaf_adapt_bsshslm_2020& n )
- : logger_( n )
-{
-}
-
-/* ----------------------------------------------------------------
- * Getter and setter functions for parameters and state
- * ---------------------------------------------------------------- */
-
-void
-eprop_iaf_adapt_bsshslm_2020::Parameters_::get( DictionaryDatum& d ) const
-{
- def< double >( d, names::adapt_beta, adapt_beta_ );
- def< double >( d, names::adapt_tau, adapt_tau_ );
- def< double >( d, names::C_m, C_m_ );
- def< double >( d, names::c_reg, c_reg_ );
- def< double >( d, names::E_L, E_L_ );
- def< double >( d, names::f_target, f_target_ );
- def< double >( d, names::beta, beta_ );
- def< double >( d, names::gamma, gamma_ );
- def< double >( d, names::I_e, I_e_ );
- def< bool >( d, names::regular_spike_arrival, regular_spike_arrival_ );
- def< std::string >( d, names::surrogate_gradient_function, surrogate_gradient_function_ );
- def< double >( d, names::t_ref, t_ref_ );
- def< double >( d, names::tau_m, tau_m_ );
- def< double >( d, names::V_min, V_min_ + E_L_ );
- def< double >( d, names::V_th, V_th_ + E_L_ );
-}
-
-double
-eprop_iaf_adapt_bsshslm_2020::Parameters_::set( const DictionaryDatum& d, Node* node )
-{
- // if leak potential is changed, adjust all variables defined relative to it
- const double ELold = E_L_;
- updateValueParam< double >( d, names::E_L, E_L_, node );
- const double delta_EL = E_L_ - ELold;
-
- V_th_ -= updateValueParam< double >( d, names::V_th, V_th_, node ) ? E_L_ : delta_EL;
- V_min_ -= updateValueParam< double >( d, names::V_min, V_min_, node ) ? E_L_ : delta_EL;
-
- updateValueParam< double >( d, names::adapt_beta, adapt_beta_, node );
- updateValueParam< double >( d, names::adapt_tau, adapt_tau_, node );
- updateValueParam< double >( d, names::C_m, C_m_, node );
- updateValueParam< double >( d, names::c_reg, c_reg_, node );
-
- if ( updateValueParam< double >( d, names::f_target, f_target_, node ) )
- {
- f_target_ /= 1000.0; // convert from spikes/s to spikes/ms
- }
-
- updateValueParam< double >( d, names::beta, beta_, node );
- updateValueParam< double >( d, names::gamma, gamma_, node );
- updateValueParam< double >( d, names::I_e, I_e_, node );
- updateValueParam< bool >( d, names::regular_spike_arrival, regular_spike_arrival_, node );
-
- if ( updateValueParam< std::string >( d, names::surrogate_gradient_function, surrogate_gradient_function_, node ) )
- {
- eprop_iaf_adapt_bsshslm_2020* nrn = dynamic_cast< eprop_iaf_adapt_bsshslm_2020* >( node );
- assert( nrn );
- nrn->compute_surrogate_gradient_ = nrn->find_surrogate_gradient( surrogate_gradient_function_ );
- }
-
- updateValueParam< double >( d, names::t_ref, t_ref_, node );
- updateValueParam< double >( d, names::tau_m, tau_m_, node );
-
- if ( adapt_beta_ < 0 )
- {
- throw BadProperty( "Threshold adaptation prefactor adapt_beta ≥ 0 required." );
- }
-
- if ( adapt_tau_ <= 0 )
- {
- throw BadProperty( "Threshold adaptation time constant adapt_tau > 0 required." );
- }
-
- if ( C_m_ <= 0 )
- {
- throw BadProperty( "Membrane capacitance C_m > 0 required." );
- }
-
- if ( c_reg_ < 0 )
- {
- throw BadProperty( "Firing rate regularization coefficient c_reg ≥ 0 required." );
- }
-
- if ( f_target_ < 0 )
- {
- throw BadProperty( "Firing rate regularization target rate f_target ≥ 0 required." );
- }
-
- if ( tau_m_ <= 0 )
- {
- throw BadProperty( "Membrane time constant tau_m > 0 required." );
- }
-
- if ( t_ref_ < 0 )
- {
- throw BadProperty( "Refractory time t_ref ≥ 0 required." );
- }
-
- if ( V_th_ < V_min_ )
- {
- throw BadProperty( "Spike threshold voltage V_th ≥ minimal voltage V_min required." );
- }
-
- return delta_EL;
-}
-
-void
-eprop_iaf_adapt_bsshslm_2020::State_::get( DictionaryDatum& d, const Parameters_& p ) const
-{
- def< double >( d, names::adaptation, adapt_ );
- def< double >( d, names::V_m, v_m_ + p.E_L_ );
- def< double >( d, names::V_th_adapt, v_th_adapt_ + p.E_L_ );
- def< double >( d, names::surrogate_gradient, surrogate_gradient_ );
- def< double >( d, names::learning_signal, learning_signal_ );
-}
-
-void
-eprop_iaf_adapt_bsshslm_2020::State_::set( const DictionaryDatum& d, const Parameters_& p, double delta_EL, Node* node )
-{
- v_m_ -= updateValueParam< double >( d, names::V_m, v_m_, node ) ? p.E_L_ : delta_EL;
-
- // adaptive threshold can only be set indirectly via the adaptation variable
- if ( updateValueParam< double >( d, names::adaptation, adapt_, node ) )
- {
- // if E_L changed in this SetStatus call, p.V_th_ has been adjusted and no further action is needed
- v_th_adapt_ = p.V_th_ + p.adapt_beta_ * adapt_;
- }
- else
- {
- // adjust voltage to change in E_L
- v_th_adapt_ -= delta_EL;
- }
-}
-
-/* ----------------------------------------------------------------
- * Default and copy constructor for node
- * ---------------------------------------------------------------- */
-
-eprop_iaf_adapt_bsshslm_2020::eprop_iaf_adapt_bsshslm_2020()
- : EpropArchivingNodeRecurrent()
- , P_()
- , S_()
- , B_( *this )
-{
- recordablesMap_.create();
-}
-
-eprop_iaf_adapt_bsshslm_2020::eprop_iaf_adapt_bsshslm_2020( const eprop_iaf_adapt_bsshslm_2020& n )
- : EpropArchivingNodeRecurrent( n )
- , P_( n.P_ )
- , S_( n.S_ )
- , B_( n.B_, *this )
-{
-}
-
-/* ----------------------------------------------------------------
- * Node initialization functions
- * ---------------------------------------------------------------- */
-
-void
-eprop_iaf_adapt_bsshslm_2020::init_buffers_()
-{
- B_.spikes_.clear(); // includes resize
- B_.currents_.clear(); // includes resize
- B_.logger_.reset(); // includes resize
-}
-
-void
-eprop_iaf_adapt_bsshslm_2020::pre_run_hook()
-{
- B_.logger_.init(); // ensures initialization in case multimeter connected after Simulate
-
- V_.RefractoryCounts_ = Time( Time::ms( P_.t_ref_ ) ).get_steps();
-
- // calculate the entries of the propagator matrix for the evolution of the state vector
-
- const double dt = Time::get_resolution().get_ms();
-
- V_.P_v_m_ = std::exp( -dt / P_.tau_m_ );
- V_.P_i_in_ = P_.tau_m_ / P_.C_m_ * ( 1.0 - V_.P_v_m_ );
- V_.P_z_in_ = P_.regular_spike_arrival_ ? 1.0 : 1.0 - V_.P_v_m_;
- V_.P_adapt_ = std::exp( -dt / P_.adapt_tau_ );
-}
-
-
-/* ----------------------------------------------------------------
- * Update function
- * ---------------------------------------------------------------- */
-
-void
-eprop_iaf_adapt_bsshslm_2020::update( Time const& origin, const long from, const long to )
-{
- const long update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps();
- const bool with_reset = kernel().simulation_manager.get_eprop_reset_neurons_on_update();
- const long shift = get_shift();
-
- for ( long lag = from; lag < to; ++lag )
- {
- const long t = origin.get_steps() + lag;
- const long interval_step = ( t - shift ) % update_interval;
-
- if ( interval_step == 0 )
- {
- erase_used_firing_rate_reg_history();
- erase_used_eprop_history();
-
- if ( with_reset )
- {
- S_.v_m_ = 0.0;
- S_.adapt_ = 0.0;
- S_.r_ = 0;
- S_.z_ = 0.0;
- }
- }
-
- if ( S_.r_ > 0 )
- {
- --S_.r_;
- }
-
- S_.z_in_ = B_.spikes_.get_value( lag );
-
- S_.v_m_ = V_.P_i_in_ * S_.i_in_ + V_.P_z_in_ * S_.z_in_ + V_.P_v_m_ * S_.v_m_;
- S_.v_m_ -= P_.V_th_ * S_.z_;
- S_.v_m_ = std::max( S_.v_m_, P_.V_min_ );
-
- S_.adapt_ = V_.P_adapt_ * S_.adapt_ + S_.z_;
- S_.v_th_adapt_ = P_.V_th_ + P_.adapt_beta_ * S_.adapt_;
-
- S_.z_ = 0.0;
-
- S_.surrogate_gradient_ =
- ( this->*compute_surrogate_gradient_ )( S_.r_, S_.v_m_, S_.v_th_adapt_, P_.beta_, P_.gamma_ );
-
- if ( S_.v_m_ >= S_.v_th_adapt_ and S_.r_ == 0 )
- {
- count_spike();
-
- SpikeEvent se;
- kernel().event_delivery_manager.send( *this, se, lag );
-
- S_.z_ = 1.0;
- S_.r_ = V_.RefractoryCounts_;
- }
-
- append_new_eprop_history_entry( t );
- write_surrogate_gradient_to_history( t, S_.surrogate_gradient_ );
-
- if ( interval_step == update_interval - 1 )
- {
- write_firing_rate_reg_to_history( t, P_.f_target_, P_.c_reg_ );
- reset_spike_count();
- }
-
- S_.learning_signal_ = get_learning_signal_from_history( t );
-
- S_.i_in_ = B_.currents_.get_value( lag ) + P_.I_e_;
-
- B_.logger_.record_data( t );
- }
-}
-
-/* ----------------------------------------------------------------
- * Event handling functions
- * ---------------------------------------------------------------- */
-
-void
-eprop_iaf_adapt_bsshslm_2020::handle( SpikeEvent& e )
-{
- assert( e.get_delay_steps() > 0 );
-
- B_.spikes_.add_value(
- e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() );
-}
-
-void
-eprop_iaf_adapt_bsshslm_2020::handle( CurrentEvent& e )
-{
- assert( e.get_delay_steps() > 0 );
-
- B_.currents_.add_value(
- e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() );
-}
-
-void
-eprop_iaf_adapt_bsshslm_2020::handle( LearningSignalConnectionEvent& e )
-{
- for ( auto it_event = e.begin(); it_event != e.end(); )
- {
- const long time_step = e.get_stamp().get_steps();
- const double weight = e.get_weight();
- const double error_signal = e.get_coeffvalue( it_event ); // get_coeffvalue advances iterator
- const double learning_signal = weight * error_signal;
-
- write_learning_signal_to_history( time_step, learning_signal );
- }
-}
-
-void
-eprop_iaf_adapt_bsshslm_2020::handle( DataLoggingRequest& e )
-{
- B_.logger_.handle( e );
-}
-
-double
-eprop_iaf_adapt_bsshslm_2020::compute_gradient( std::vector< long >& presyn_isis,
- const long t_previous_update,
- const long t_previous_trigger_spike,
- const double kappa,
- const bool average_gradient )
-{
- auto eprop_hist_it = get_eprop_history( t_previous_trigger_spike );
-
- double e = 0.0; // eligibility trace
- double e_bar = 0.0; // low-pass filtered eligibility trace
- double epsilon = 0.0; // adaptive component of eligibility vector
- double grad = 0.0; // gradient value to be calculated
- double L = 0.0; // learning signal
- double psi = 0.0; // surrogate gradient
- double sum_e = 0.0; // sum of eligibility traces
- double z = 0.0; // spiking variable
- double z_bar = 0.0; // low-pass filtered spiking variable
-
- for ( long presyn_isi : presyn_isis )
- {
- z = 1.0; // set spiking variable to 1 for each incoming spike
-
- for ( long t = 0; t < presyn_isi; ++t )
- {
- assert( eprop_hist_it != eprop_history_.end() );
-
- psi = eprop_hist_it->surrogate_gradient_;
- L = eprop_hist_it->learning_signal_;
-
- z_bar = V_.P_v_m_ * z_bar + V_.P_z_in_ * z;
- e = psi * ( z_bar - P_.adapt_beta_ * epsilon );
- epsilon = V_.P_adapt_ * epsilon + e;
- e_bar = kappa * e_bar + ( 1.0 - kappa ) * e;
- grad += L * e_bar;
- sum_e += e;
- z = 0.0; // set spiking variable to 0 between spikes
-
- ++eprop_hist_it;
- }
- }
- presyn_isis.clear();
-
- const long update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps();
- const long learning_window = kernel().simulation_manager.get_eprop_learning_window().get_steps();
- const auto firing_rate_reg = get_firing_rate_reg_history( t_previous_update + get_shift() + update_interval );
-
- grad += firing_rate_reg * sum_e;
-
- if ( average_gradient )
- {
- grad /= learning_window;
- }
-
- return grad;
-}
-
-} // namespace nest
diff --git a/models/eprop_iaf_adapt_bsshslm_2020.h b/models/eprop_iaf_adapt_bsshslm_2020.h
deleted file mode 100644
index 29bdbcd65e..0000000000
--- a/models/eprop_iaf_adapt_bsshslm_2020.h
+++ /dev/null
@@ -1,620 +0,0 @@
-/*
- * eprop_iaf_adapt_bsshslm_2020.h
- *
- * This file is part of NEST.
- *
- * Copyright (C) 2004 The NEST Initiative
- *
- * NEST is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * NEST is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with NEST. If not, see .
- *
- */
-
-#ifndef EPROP_IAF_ADAPT_BSSHSLM_2020_H
-#define EPROP_IAF_ADAPT_BSSHSLM_2020_H
-
-// nestkernel
-#include "connection.h"
-#include "eprop_archiving_node_impl.h"
-#include "eprop_archiving_node_recurrent.h"
-#include "event.h"
-#include "nest_types.h"
-#include "ring_buffer.h"
-#include "universal_data_logger.h"
-
-namespace nest
-{
-
-/* BeginUserDocs: neuron, e-prop plasticity, current-based, integrate-and-fire, adaptation
-
-Short description
-+++++++++++++++++
-
-Current-based leaky integrate-and-fire neuron model with delta-shaped or exponentially filtered
-postsynaptic currents and threshold adaptation for e-prop plasticity
-
-Description
-+++++++++++
-
-``eprop_iaf_adapt_bsshslm_2020`` is an implementation of a leaky integrate-and-fire
-neuron model with delta-shaped postsynaptic currents and threshold adaptation
-used for eligibility propagation (e-prop) plasticity.
-
-E-prop plasticity was originally introduced and implemented in TensorFlow in [1]_.
-
-The suffix ``_bsshslm_2020`` follows the NEST convention to indicate in the
-model name the paper that introduced it by the first letter of the authors' last
-names and the publication year.
-
- .. note::
- The neuron dynamics of the ``eprop_iaf_adapt_bsshslm_2020`` model (excluding
- e-prop plasticity and the threshold adaptation) are similar to the neuron
- dynamics of the ``iaf_psc_delta`` model, with minor differences, such as the
- propagator of the post-synaptic current and the voltage reset upon a spike.
-
-The membrane voltage time course :math:`v_j^t` of the neuron :math:`j` is given by:
-
-.. math::
- v_j^t &= \alpha v_j^{t-1} + \zeta \sum_{i \neq j} W_{ji}^\text{rec} z_i^{t-1}
- + \zeta \sum_i W_{ji}^\text{in} x_i^t - z_j^{t-1} v_\text{th} \,, \\
- \alpha &= e^{ -\frac{ \Delta t }{ \tau_\text{m} } } \,, \\
- \zeta &=
- \begin{cases}
- 1 \\
- 1 - \alpha
- \end{cases} \,, \\
-
-where :math:`W_{ji}^\text{rec}` and :math:`W_{ji}^\text{in}` are the recurrent and
-input synaptic weight matrices, and :math:`z_i^{t-1}` is the recurrent presynaptic
-state variable, while :math:`x_i^t` represents the input at time :math:`t`.
-
-Descriptions of further parameters and variables can be found in the table below.
-
-The threshold adaptation is given by:
-
-.. math::
- A_j^t &= v_\text{th} + \beta a_j^t \,, \\
- a_j^t &= \rho a_j^{t-1} + z_j^{t-1} \,, \\
- \rho &= e^{-\frac{ \Delta t }{ \tau_\text{a} }} \,. \\
-
-The spike state variable is expressed by a Heaviside function:
-
-.. math::
- z_j^t = H \left( v_j^t - A_j^t \right) \,. \\
-
-If the membrane voltage crosses the adaptive threshold voltage :math:`A_j^t`, a spike is
-emitted and the membrane voltage is reduced by :math:`v_\text{th}` in the next
-time step. After the time step of the spike emission, the neuron is not
-able to spike for an absolute refractory period :math:`t_\text{ref}`.
-
-An additional state variable and the corresponding differential equation
-represents a piecewise constant external current.
-
-See the documentation on the :doc:`iaf_psc_delta<../models/iaf_psc_delta/>` neuron model
-for more information on the integration of the subthreshold dynamics.
-
-The change of the synaptic weight is calculated from the gradient :math:`g` of
-the loss :math:`E` with respect to the synaptic weight :math:`W_{ji}`:
-:math:`\frac{ \text{d}E }{ \text{d} W_{ij} }`
-which depends on the presynaptic
-spikes :math:`z_i^{t-1}`, the surrogate gradient or pseudo-derivative
-of the spike state variable with respect to the postsynaptic membrane
-voltage :math:`\psi_j^t` (the product of which forms the eligibility
-trace :math:`e_{ji}^t`), and the learning signal :math:`L_j^t` emitted
-by the readout neurons.
-
-.. math::
- \frac{ \text{d} E }{ \text{d} W_{ji} } &= \sum_t L_j^t \bar{e}_{ji}^t \,, \\
- e_{ji}^t &= \psi_j^t \left( \bar{z}_i^{t-1} - \beta \epsilon_{ji,a}^{t-1} \right) \,, \\
- \epsilon^{t-1}_{ji,\text{a}} &= \psi_j^{t-1} \bar{z}_i^{t-2} + \left( \rho - \psi_j^{t-1} \beta \right)
- \epsilon^{t-2}_{ji,a} \,. \\
-
-.. include:: ../models/eprop_iaf.rst
- :start-after: .. start_surrogate-gradient-functions
- :end-before: .. end_surrogate-gradient-functions
-
-The eligibility trace and the presynaptic spike trains are low-pass filtered
-with the following exponential kernels:
-
-.. math::
- \bar{e}_{ji}^t &= \mathcal{F}_\kappa \left( e_{ji}^t \right) \,, \\
- \kappa &= e^{ -\frac{\Delta t }{ \tau_\text{m,out} }} \,, \\
- \bar{z}_i^t &= \mathcal{F}_\alpha(z_i^t) \,, \\
- \mathcal{F}_\alpha \left( z_i^t \right) &= \alpha \mathcal{F}_\alpha \left( z_i^{t-1} \right) + z_i^t \,, \\
- \mathcal{F}_\alpha \left( z_i^0 \right) &= z_i^0 \,, \\
-
-where :math:`\tau_\text{m,out}` is the membrane time constant of the readout neuron.
-
-Furthermore, a firing rate regularization mechanism keeps the average firing
-rate :math:`f^\text{av}_j` of the postsynaptic neuron close to a target firing rate
-:math:`f^\text{target}`. The gradient :math:`g_\text{reg}` of the regularization loss :math:`E_\text{reg}`
-with respect to the synaptic weight :math:`W_{ji}` is given by:
-
-.. math::
- \frac{ \text{d} E_\text{reg} }{ \text{d} W_{ji} }
- = c_\text{reg} \sum_t \frac{ 1 }{ T n_\text{trial} }
- \left( f^\text{target} - f^\text{av}_j \right) e_{ji}^t \,, \\
-
-where :math:`c_\text{reg}` is a constant scaling factor and the average
-is taken over the time that passed since the previous update, that is, the number of
-trials :math:`n_\text{trial}` times the duration of an update interval :math:`T`.
-
-The overall gradient is given by the addition of the two gradients.
-
-For more information on e-prop plasticity, see the documentation on the other e-prop models:
-
- * :doc:`eprop_iaf_bsshslm_2020<../models/eprop_iaf_bsshslm_2020/>`
- * :doc:`eprop_readout_bsshslm_2020<../models/eprop_readout_bsshslm_2020/>`
- * :doc:`eprop_synapse_bsshslm_2020<../models/eprop_synapse_bsshslm_2020/>`
- * :doc:`eprop_learning_signal_connection_bsshslm_2020<../models/eprop_learning_signal_connection_bsshslm_2020/>`
-
-Details on the event-based NEST implementation of e-prop can be found in [2]_.
-
-Parameters
-++++++++++
-
-The following parameters can be set in the status dictionary.
-
-=========================== ======= ======================= ================ ===================================
-**Neuron parameters**
-----------------------------------------------------------------------------------------------------------------
-Parameter Unit Math equivalent Default Description
-=========================== ======= ======================= ================ ===================================
-``adapt_beta`` :math:`\beta` 1.0 Prefactor of the threshold
- adaptation
-``adapt_tau`` ms :math:`\tau_\text{a}` 10.0 Time constant of the threshold
- adaptation
-``C_m`` pF :math:`C_\text{m}` 250.0 Capacitance of the membrane
-``E_L`` mV :math:`E_\text{L}` -70.0 Leak / resting membrane potential
-``I_e`` pA :math:`I_\text{e}` 0.0 Constant external input current
-``regular_spike_arrival`` Boolean ``True`` If ``True``, the input spikes
- arrive at the end of the time step,
- if ``False`` at the beginning
- (determines PSC scale)
-``t_ref`` ms :math:`t_\text{ref}` 2.0 Duration of the refractory period
-``tau_m`` ms :math:`\tau_\text{m}` 10.0 Time constant of the membrane
-``V_min`` mV :math:`v_\text{min}` negative maximum Absolute lower bound of the
- value membrane voltage
- representable by
- a ``double``
- type in C++
-``V_th`` mV :math:`v_\text{th}` -55.0 Spike threshold voltage
-=========================== ======= ======================= ================ ===================================
-
-=============================== ======= ======================= ================== =============================
-**E-prop parameters**
-----------------------------------------------------------------------------------------------------------------
-Parameter Unit Math equivalent Default Description
-=============================== ======= ======================= ================== =============================
-``c_reg`` :math:`c_\text{reg}` 0.0 Coefficient of firing rate
- regularization
-``f_target`` Hz :math:`f^\text{target}` 10.0 Target firing rate of rate
- regularization
-``beta`` :math:`\beta` 1.0 Width scaling of surrogate
- gradient / pseudo-derivative
- of membrane voltage
-``gamma`` :math:`\gamma` 0.3 Height scaling of surrogate
- gradient / pseudo-derivative
- of membrane voltage
-``surrogate_gradient_function`` :math:`\psi` "piecewise_linear" Surrogate gradient /
- pseudo-derivative function
- ["piecewise_linear",
- "exponential",
- "fast_sigmoid_derivative",
- "arctan"]
-=============================== ======= ======================= ================== =============================
-
-Recordables
-+++++++++++
-
-The following state variables evolve during simulation and can be recorded.
-
-================== ==== =============== ============= ========================
-**Neuron state variables and recordables**
-------------------------------------------------------------------------------
-State variable Unit Math equivalent Initial value Description
-================== ==== =============== ============= ========================
-``adaptation`` :math:`a_j` 0.0 Adaptation variable
-``V_m`` mV :math:`v_j` -70.0 Membrane voltage
-``V_th_adapt`` mV :math:`A_j` -55.0 Adapting spike threshold
-================== ==== =============== ============= ========================
-
-====================== ==== =============== ============= =========================================
-**E-prop state variables and recordables**
----------------------------------------------------------------------------------------------------
-State variable Unit Math equivalent Initial value Description
-====================== ==== =============== ============= =========================================
-``learning_signal`` pA :math:`L_j` 0.0 Learning signal
-``surrogate_gradient`` :math:`\psi_j` 0.0 Surrogate gradient / pseudo-derivative of
- membrane voltage
-====================== ==== =============== ============= =========================================
-
-Usage
-+++++
-
-This model can only be used in combination with the other e-prop models
-and the network architecture requires specific wiring, input, and output.
-The usage is demonstrated in several
-:doc:`supervised regression and classification tasks <../auto_examples/eprop_plasticity/index>`
-reproducing among others the original proof-of-concept tasks in [1]_.
-
-References
-++++++++++
-
-.. [1] Bellec G, Scherr F, Subramoney F, Hajek E, Salaj D, Legenstein R,
- Maass W (2020). A solution to the learning dilemma for recurrent
- networks of spiking neurons. Nature Communications, 11:3625.
- https://doi.org/10.1038/s41467-020-17236-y
-
-.. [2] Korcsak-Gorzo A, Stapmanns J, Espinoza Valverde JA, Plesser HE,
- Dahmen D, Bolten M, Van Albada SJ, Diesmann M. Event-based
- implementation of eligibility propagation (in preparation)
-
-.. include:: ../models/eprop_iaf.rst
- :start-after: .. start_surrogate-gradient-references
- :end-before: .. end_surrogate-gradient-references
-
-Sends
-+++++
-
-SpikeEvent
-
-Receives
-++++++++
-
-SpikeEvent, CurrentEvent, LearningSignalConnectionEvent, DataLoggingRequest
-
-See also
-++++++++
-
-Examples using this model
-+++++++++++++++++++++++++
-
-.. listexamples:: eprop_iaf_adapt_bsshslm_2020
-
-EndUserDocs */
-
-void register_eprop_iaf_adapt_bsshslm_2020( const std::string& name );
-
-/**
- * @brief Class implementing an adaptive LIF neuron model for e-prop plasticity.
- *
- * Class implementing a current-based leaky integrate-and-fire neuron model with delta-shaped postsynaptic currents and
- * threshold adaptation for e-prop plasticity according to Bellec et al. (2020).
- */
-class eprop_iaf_adapt_bsshslm_2020 : public EpropArchivingNodeRecurrent< true >
-{
-
-public:
- //! Default constructor.
- eprop_iaf_adapt_bsshslm_2020();
-
- //! Copy constructor.
- eprop_iaf_adapt_bsshslm_2020( const eprop_iaf_adapt_bsshslm_2020& );
-
- using Node::handle;
- using Node::handles_test_event;
-
- size_t send_test_event( Node&, size_t, synindex, bool ) override;
-
- void handle( SpikeEvent& ) override;
- void handle( CurrentEvent& ) override;
- void handle( LearningSignalConnectionEvent& ) override;
- void handle( DataLoggingRequest& ) override;
-
- size_t handles_test_event( SpikeEvent&, size_t ) override;
- size_t handles_test_event( CurrentEvent&, size_t ) override;
- size_t handles_test_event( LearningSignalConnectionEvent&, size_t ) override;
- size_t handles_test_event( DataLoggingRequest&, size_t ) override;
-
- void get_status( DictionaryDatum& ) const override;
- void set_status( const DictionaryDatum& ) override;
-
-private:
- void init_buffers_() override;
- void pre_run_hook() override;
-
- void update( Time const&, const long, const long ) override;
-
- double compute_gradient( std::vector< long >&, const long, const long, const double, const bool ) override;
-
- long get_shift() const override;
- bool is_eprop_recurrent_node() const override;
-
- //! Map for storing a static set of recordables.
- friend class RecordablesMap< eprop_iaf_adapt_bsshslm_2020 >;
-
- //! Logger for universal data supporting the data logging request / reply mechanism. Populated with a recordables map.
- friend class UniversalDataLogger< eprop_iaf_adapt_bsshslm_2020 >;
-
- //! Structure of parameters.
- struct Parameters_
- {
- //! Prefactor of the threshold adaptation.
- double adapt_beta_;
-
- //! Time constant of the threshold adaptation (ms).
- double adapt_tau_;
-
- //! Capacitance of the membrane (pF).
- double C_m_;
-
- //! Coefficient of firing rate regularization.
- double c_reg_;
-
- //! Leak / resting membrane potential (mV).
- double E_L_;
-
- //! Target firing rate of rate regularization (spikes/s).
- double f_target_;
-
- //! Width scaling of surrogate gradient / pseudo-derivative of membrane voltage.
- double beta_;
-
- //! Height scaling of surrogate gradient / pseudo-derivative of membrane voltage.
- double gamma_;
-
- //! Constant external input current (pA).
- double I_e_;
-
- //! If True, the input spikes arrive at the beginning of the time step, if False at the end (determines PSC scale).
- bool regular_spike_arrival_;
-
- //! Surrogate gradient / pseudo-derivative function of the membrane voltage ["piecewise_linear", "exponential",
- //! "fast_sigmoid_derivative", "arctan"]
- std::string surrogate_gradient_function_;
-
- //! Duration of the refractory period (ms).
- double t_ref_;
-
- //! Time constant of the membrane (ms).
- double tau_m_;
-
- //! Absolute lower bound of the membrane voltage relative to the leak membrane potential (mV).
- double V_min_;
-
- //! Spike threshold voltage relative to the leak membrane potential (mV).
- double V_th_;
-
- //! Default constructor.
- Parameters_();
-
- //! Get the parameters and their values.
- void get( DictionaryDatum& ) const;
-
- //! Set the parameters and throw errors in case of invalid values.
- double set( const DictionaryDatum&, Node* );
- };
-
- //! Structure of state variables.
- struct State_
- {
- //! Adaptation variable.
- double adapt_;
-
- //! Adapting spike threshold voltage.
- double v_th_adapt_;
-
- //! Learning signal. Sum of weighted error signals coming from the readout neurons.
- double learning_signal_;
-
- //! Number of remaining refractory steps.
- int r_;
-
- //! Surrogate gradient / pseudo-derivative of the membrane voltage.
- double surrogate_gradient_;
-
- //! Input current (pA).
- double i_in_;
-
- //! Membrane voltage relative to the leak membrane potential (mV).
- double v_m_;
-
- //! Binary spike state variable - 1.0 if the neuron has spiked in the previous time step and 0.0 otherwise.
- double z_;
-
- //! Binary input spike state variable - 1.0 if the neuron has spiked in the previous time step and 0.0 otherwise.
- double z_in_;
-
- //! Default constructor.
- State_();
-
- //! Get the state variables and their values.
- void get( DictionaryDatum&, const Parameters_& ) const;
-
- //! Set the state variables.
- void set( const DictionaryDatum&, const Parameters_&, double, Node* );
- };
-
- //! Structure of buffers.
- struct Buffers_
- {
- //! Default constructor.
- Buffers_( eprop_iaf_adapt_bsshslm_2020& );
-
- //! Copy constructor.
- Buffers_( const Buffers_&, eprop_iaf_adapt_bsshslm_2020& );
-
- //! Buffer for incoming spikes.
- RingBuffer spikes_;
-
- //! Buffer for incoming currents.
- RingBuffer currents_;
-
- //! Logger for universal data.
- UniversalDataLogger< eprop_iaf_adapt_bsshslm_2020 > logger_;
- };
-
- //! Structure of internal variables.
- struct Variables_
- {
- //! Propagator matrix entry for evolving the membrane voltage (mathematical symbol "alpha" in user documentation).
- double P_v_m_;
-
- //! Propagator matrix entry for evolving the incoming spike state variables (mathematical symbol "zeta" in user
- //! documentation).
- double P_z_in_;
-
- //! Propagator matrix entry for evolving the incoming currents.
- double P_i_in_;
-
- //! Propagator matrix entry for evolving the adaptation (mathematical symbol "rho" in user documentation).
- double P_adapt_;
-
- //! Total refractory steps.
- int RefractoryCounts_;
- };
-
- //! Get the current value of the membrane voltage.
- double
- get_v_m_() const
- {
- return S_.v_m_ + P_.E_L_;
- }
-
- //! Get the current value of the surrogate gradient.
- double
- get_surrogate_gradient_() const
- {
- return S_.surrogate_gradient_;
- }
-
- //! Get the current value of the learning signal.
- double
- get_learning_signal_() const
- {
- return S_.learning_signal_;
- }
-
- //! Get the current value of the adapting threshold.
- double
- get_v_th_adapt_() const
- {
- return S_.v_th_adapt_ + P_.E_L_;
- }
-
- //! Get the current value of the adaptation.
- double
- get_adaptation_() const
- {
- return S_.adapt_;
- }
-
- // the order in which the structure instances are defined is important for speed
-
- //! Structure of parameters.
- Parameters_ P_;
-
- //! Structure of state variables.
- State_ S_;
-
- //! Structure of internal variables.
- Variables_ V_;
-
- //! Structure of buffers.
- Buffers_ B_;
-
- //! Map storing a static set of recordables.
- static RecordablesMap< eprop_iaf_adapt_bsshslm_2020 > recordablesMap_;
-};
-
-inline long
-eprop_iaf_adapt_bsshslm_2020::get_shift() const
-{
- return offset_gen_ + delay_in_rec_;
-}
-
-inline bool
-eprop_iaf_adapt_bsshslm_2020::is_eprop_recurrent_node() const
-{
- return true;
-}
-
-inline size_t
-eprop_iaf_adapt_bsshslm_2020::send_test_event( Node& target, size_t receptor_type, synindex, bool )
-{
- SpikeEvent e;
- e.set_sender( *this );
- return target.handles_test_event( e, receptor_type );
-}
-
-inline size_t
-eprop_iaf_adapt_bsshslm_2020::handles_test_event( SpikeEvent&, size_t receptor_type )
-{
- if ( receptor_type != 0 )
- {
- throw UnknownReceptorType( receptor_type, get_name() );
- }
-
- return 0;
-}
-
-inline size_t
-eprop_iaf_adapt_bsshslm_2020::handles_test_event( CurrentEvent&, size_t receptor_type )
-{
- if ( receptor_type != 0 )
- {
- throw UnknownReceptorType( receptor_type, get_name() );
- }
-
- return 0;
-}
-
-inline size_t
-eprop_iaf_adapt_bsshslm_2020::handles_test_event( LearningSignalConnectionEvent&, size_t receptor_type )
-{
- if ( receptor_type != 0 )
- {
- throw UnknownReceptorType( receptor_type, get_name() );
- }
-
- return 0;
-}
-
-inline size_t
-eprop_iaf_adapt_bsshslm_2020::handles_test_event( DataLoggingRequest& dlr, size_t receptor_type )
-{
- if ( receptor_type != 0 )
- {
- throw UnknownReceptorType( receptor_type, get_name() );
- }
-
- return B_.logger_.connect_logging_device( dlr, recordablesMap_ );
-}
-
-inline void
-eprop_iaf_adapt_bsshslm_2020::get_status( DictionaryDatum& d ) const
-{
- P_.get( d );
- S_.get( d, P_ );
- ( *d )[ names::recordables ] = recordablesMap_.get_list();
-}
-
-inline void
-eprop_iaf_adapt_bsshslm_2020::set_status( const DictionaryDatum& d )
-{
- // temporary copies in case of errors
- Parameters_ ptmp = P_;
- State_ stmp = S_;
-
- // make sure that ptmp and stmp consistent - throw BadProperty if not
- const double delta_EL = ptmp.set( d, this );
- stmp.set( d, ptmp, delta_EL, this );
-
- P_ = ptmp;
- S_ = stmp;
-}
-
-} // namespace nest
-
-#endif // EPROP_IAF_ADAPT_BSSHSLM_2020_H
diff --git a/models/eprop_iaf_bsshslm_2020.cpp b/models/eprop_iaf_bsshslm_2020.cpp
deleted file mode 100644
index a4c52557ff..0000000000
--- a/models/eprop_iaf_bsshslm_2020.cpp
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * eprop_iaf_bsshslm_2020.cpp
- *
- * This file is part of NEST.
- *
- * Copyright (C) 2004 The NEST Initiative
- *
- * NEST is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * NEST is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with NEST. If not, see .
- *
- */
-
-// nest models
-#include "eprop_iaf_bsshslm_2020.h"
-
-// C++
-#include
-
-// libnestutil
-#include "dict_util.h"
-#include "numerics.h"
-
-// nestkernel
-#include "eprop_archiving_node_recurrent_impl.h"
-#include "exceptions.h"
-#include "kernel_manager.h"
-#include "nest_impl.h"
-#include "universal_data_logger_impl.h"
-
-// sli
-#include "dictutils.h"
-
-namespace nest
-{
-
-void
-register_eprop_iaf_bsshslm_2020( const std::string& name )
-{
- register_node_model< eprop_iaf_bsshslm_2020 >( name );
-}
-
-/* ----------------------------------------------------------------
- * Recordables map
- * ---------------------------------------------------------------- */
-
-RecordablesMap< eprop_iaf_bsshslm_2020 > eprop_iaf_bsshslm_2020::recordablesMap_;
-
-template <>
-void
-RecordablesMap< eprop_iaf_bsshslm_2020 >::create()
-{
- insert_( names::eprop_history_duration, &eprop_iaf_bsshslm_2020::get_eprop_history_duration );
- insert_( names::learning_signal, &eprop_iaf_bsshslm_2020::get_learning_signal_ );
- insert_( names::surrogate_gradient, &eprop_iaf_bsshslm_2020::get_surrogate_gradient_ );
- insert_( names::V_m, &eprop_iaf_bsshslm_2020::get_v_m_ );
-}
-
-/* ----------------------------------------------------------------
- * Default constructors for parameters, state, and buffers
- * ---------------------------------------------------------------- */
-
-eprop_iaf_bsshslm_2020::Parameters_::Parameters_()
- : C_m_( 250.0 )
- , c_reg_( 0.0 )
- , E_L_( -70.0 )
- , f_target_( 0.01 )
- , beta_( 1.0 )
- , gamma_( 0.3 )
- , I_e_( 0.0 )
- , regular_spike_arrival_( true )
- , surrogate_gradient_function_( "piecewise_linear" )
- , t_ref_( 2.0 )
- , tau_m_( 10.0 )
- , V_min_( -std::numeric_limits< double >::max() )
- , V_th_( -55.0 - E_L_ )
-{
-}
-
-eprop_iaf_bsshslm_2020::State_::State_()
- : learning_signal_( 0.0 )
- , r_( 0 )
- , surrogate_gradient_( 0.0 )
- , i_in_( 0.0 )
- , v_m_( 0.0 )
- , z_( 0.0 )
- , z_in_( 0.0 )
-{
-}
-
-eprop_iaf_bsshslm_2020::Buffers_::Buffers_( eprop_iaf_bsshslm_2020& n )
- : logger_( n )
-{
-}
-
-eprop_iaf_bsshslm_2020::Buffers_::Buffers_( const Buffers_&, eprop_iaf_bsshslm_2020& n )
- : logger_( n )
-{
-}
-
-/* ----------------------------------------------------------------
- * Getter and setter functions for parameters and state
- * ---------------------------------------------------------------- */
-
-void
-eprop_iaf_bsshslm_2020::Parameters_::get( DictionaryDatum& d ) const
-{
- def< double >( d, names::C_m, C_m_ );
- def< double >( d, names::c_reg, c_reg_ );
- def< double >( d, names::E_L, E_L_ );
- def< double >( d, names::f_target, f_target_ );
- def< double >( d, names::beta, beta_ );
- def< double >( d, names::gamma, gamma_ );
- def< double >( d, names::I_e, I_e_ );
- def< bool >( d, names::regular_spike_arrival, regular_spike_arrival_ );
- def< std::string >( d, names::surrogate_gradient_function, surrogate_gradient_function_ );
- def< double >( d, names::t_ref, t_ref_ );
- def< double >( d, names::tau_m, tau_m_ );
- def< double >( d, names::V_min, V_min_ + E_L_ );
- def< double >( d, names::V_th, V_th_ + E_L_ );
-}
-
-double
-eprop_iaf_bsshslm_2020::Parameters_::set( const DictionaryDatum& d, Node* node )
-{
- // if leak potential is changed, adjust all variables defined relative to it
- const double ELold = E_L_;
- updateValueParam< double >( d, names::E_L, E_L_, node );
- const double delta_EL = E_L_ - ELold;
-
- V_th_ -= updateValueParam< double >( d, names::V_th, V_th_, node ) ? E_L_ : delta_EL;
- V_min_ -= updateValueParam< double >( d, names::V_min, V_min_, node ) ? E_L_ : delta_EL;
-
- updateValueParam< double >( d, names::C_m, C_m_, node );
- updateValueParam< double >( d, names::c_reg, c_reg_, node );
-
- if ( updateValueParam< double >( d, names::f_target, f_target_, node ) )
- {
- f_target_ /= 1000.0; // convert from spikes/s to spikes/ms
- }
-
- updateValueParam< double >( d, names::beta, beta_, node );
- updateValueParam< double >( d, names::gamma, gamma_, node );
- updateValueParam< double >( d, names::I_e, I_e_, node );
- updateValueParam< bool >( d, names::regular_spike_arrival, regular_spike_arrival_, node );
-
- if ( updateValueParam< std::string >( d, names::surrogate_gradient_function, surrogate_gradient_function_, node ) )
- {
- eprop_iaf_bsshslm_2020* nrn = dynamic_cast< eprop_iaf_bsshslm_2020* >( node );
- assert( nrn );
- nrn->compute_surrogate_gradient_ = nrn->find_surrogate_gradient( surrogate_gradient_function_ );
- }
-
- updateValueParam< double >( d, names::t_ref, t_ref_, node );
- updateValueParam< double >( d, names::tau_m, tau_m_, node );
-
- if ( C_m_ <= 0 )
- {
- throw BadProperty( "Membrane capacitance C_m > 0 required." );
- }
-
- if ( c_reg_ < 0 )
- {
- throw BadProperty( "Firing rate regularization coefficient c_reg ≥ 0 required." );
- }
-
- if ( f_target_ < 0 )
- {
- throw BadProperty( "Firing rate regularization target rate f_target ≥ 0 required." );
- }
-
- if ( tau_m_ <= 0 )
- {
- throw BadProperty( "Membrane time constant tau_m > 0 required." );
- }
-
- if ( t_ref_ < 0 )
- {
- throw BadProperty( "Refractory time t_ref ≥ 0 required." );
- }
-
- if ( V_th_ < V_min_ )
- {
- throw BadProperty( "Spike threshold voltage V_th ≥ minimal voltage V_min required." );
- }
-
- return delta_EL;
-}
-
-void
-eprop_iaf_bsshslm_2020::State_::get( DictionaryDatum& d, const Parameters_& p ) const
-{
- def< double >( d, names::V_m, v_m_ + p.E_L_ );
- def< double >( d, names::surrogate_gradient, surrogate_gradient_ );
- def< double >( d, names::learning_signal, learning_signal_ );
-}
-
-void
-eprop_iaf_bsshslm_2020::State_::set( const DictionaryDatum& d, const Parameters_& p, double delta_EL, Node* node )
-{
- v_m_ -= updateValueParam< double >( d, names::V_m, v_m_, node ) ? p.E_L_ : delta_EL;
-}
-
-/* ----------------------------------------------------------------
- * Default and copy constructor for node
- * ---------------------------------------------------------------- */
-
-eprop_iaf_bsshslm_2020::eprop_iaf_bsshslm_2020()
- : EpropArchivingNodeRecurrent()
- , P_()
- , S_()
- , B_( *this )
-{
- recordablesMap_.create();
-}
-
-eprop_iaf_bsshslm_2020::eprop_iaf_bsshslm_2020( const eprop_iaf_bsshslm_2020& n )
- : EpropArchivingNodeRecurrent( n )
- , P_( n.P_ )
- , S_( n.S_ )
- , B_( n.B_, *this )
-{
-}
-
-/* ----------------------------------------------------------------
- * Node initialization functions
- * ---------------------------------------------------------------- */
-
-void
-eprop_iaf_bsshslm_2020::init_buffers_()
-{
- B_.spikes_.clear(); // includes resize
- B_.currents_.clear(); // includes resize
- B_.logger_.reset(); // includes resize
-}
-
-void
-eprop_iaf_bsshslm_2020::pre_run_hook()
-{
- B_.logger_.init(); // ensures initialization in case multimeter connected after Simulate
-
- V_.RefractoryCounts_ = Time( Time::ms( P_.t_ref_ ) ).get_steps();
-
- // calculate the entries of the propagator matrix for the evolution of the state vector
-
- const double dt = Time::get_resolution().get_ms();
-
- V_.P_v_m_ = std::exp( -dt / P_.tau_m_ );
- V_.P_i_in_ = P_.tau_m_ / P_.C_m_ * ( 1.0 - V_.P_v_m_ );
- V_.P_z_in_ = P_.regular_spike_arrival_ ? 1.0 : 1.0 - V_.P_v_m_;
-}
-
-
-/* ----------------------------------------------------------------
- * Update function
- * ---------------------------------------------------------------- */
-
-void
-eprop_iaf_bsshslm_2020::update( Time const& origin, const long from, const long to )
-{
- const long update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps();
- const bool with_reset = kernel().simulation_manager.get_eprop_reset_neurons_on_update();
- const long shift = get_shift();
-
- for ( long lag = from; lag < to; ++lag )
- {
- const long t = origin.get_steps() + lag;
- const long interval_step = ( t - shift ) % update_interval;
-
- if ( interval_step == 0 )
- {
- erase_used_firing_rate_reg_history();
- erase_used_eprop_history();
-
- if ( with_reset )
- {
- S_.v_m_ = 0.0;
- S_.r_ = 0;
- S_.z_ = 0.0;
- }
- }
-
- if ( S_.r_ > 0 )
- {
- --S_.r_;
- }
-
- S_.z_in_ = B_.spikes_.get_value( lag );
-
- S_.v_m_ = V_.P_i_in_ * S_.i_in_ + V_.P_z_in_ * S_.z_in_ + V_.P_v_m_ * S_.v_m_;
- S_.v_m_ -= P_.V_th_ * S_.z_;
- S_.v_m_ = std::max( S_.v_m_, P_.V_min_ );
-
- S_.z_ = 0.0;
-
- S_.surrogate_gradient_ = ( this->*compute_surrogate_gradient_ )( S_.r_, S_.v_m_, P_.V_th_, P_.beta_, P_.gamma_ );
-
- if ( S_.v_m_ >= P_.V_th_ and S_.r_ == 0 )
- {
- count_spike();
-
- SpikeEvent se;
- kernel().event_delivery_manager.send( *this, se, lag );
-
- S_.z_ = 1.0;
- S_.r_ = V_.RefractoryCounts_;
- }
-
- append_new_eprop_history_entry( t );
- write_surrogate_gradient_to_history( t, S_.surrogate_gradient_ );
-
- if ( interval_step == update_interval - 1 )
- {
- write_firing_rate_reg_to_history( t, P_.f_target_, P_.c_reg_ );
- reset_spike_count();
- }
-
- S_.learning_signal_ = get_learning_signal_from_history( t );
-
- S_.i_in_ = B_.currents_.get_value( lag ) + P_.I_e_;
-
- B_.logger_.record_data( t );
- }
-}
-
-/* ----------------------------------------------------------------
- * Event handling functions
- * ---------------------------------------------------------------- */
-
-void
-eprop_iaf_bsshslm_2020::handle( SpikeEvent& e )
-{
- assert( e.get_delay_steps() > 0 );
-
- B_.spikes_.add_value(
- e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() );
-}
-
-void
-eprop_iaf_bsshslm_2020::handle( CurrentEvent& e )
-{
- assert( e.get_delay_steps() > 0 );
-
- B_.currents_.add_value(
- e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() );
-}
-
-void
-eprop_iaf_bsshslm_2020::handle( LearningSignalConnectionEvent& e )
-{
- for ( auto it_event = e.begin(); it_event != e.end(); )
- {
- const long time_step = e.get_stamp().get_steps();
- const double weight = e.get_weight();
- const double error_signal = e.get_coeffvalue( it_event ); // get_coeffvalue advances iterator
- const double learning_signal = weight * error_signal;
-
- write_learning_signal_to_history( time_step, learning_signal );
- }
-}
-
-void
-eprop_iaf_bsshslm_2020::handle( DataLoggingRequest& e )
-{
- B_.logger_.handle( e );
-}
-
-double
-eprop_iaf_bsshslm_2020::compute_gradient( std::vector< long >& presyn_isis,
- const long t_previous_update,
- const long t_previous_trigger_spike,
- const double kappa,
- const bool average_gradient )
-{
- auto eprop_hist_it = get_eprop_history( t_previous_trigger_spike );
-
- double e = 0.0; // eligibility trace
- double e_bar = 0.0; // low-pass filtered eligibility trace
- double grad = 0.0; // gradient value to be calculated
- double L = 0.0; // learning signal
- double psi = 0.0; // surrogate gradient
- double sum_e = 0.0; // sum of eligibility traces
- double z = 0.0; // spiking variable
- double z_bar = 0.0; // low-pass filtered spiking variable
-
- for ( long presyn_isi : presyn_isis )
- {
- z = 1.0; // set spiking variable to 1 for each incoming spike
-
- for ( long t = 0; t < presyn_isi; ++t )
- {
- assert( eprop_hist_it != eprop_history_.end() );
-
- psi = eprop_hist_it->surrogate_gradient_;
- L = eprop_hist_it->learning_signal_;
-
- z_bar = V_.P_v_m_ * z_bar + V_.P_z_in_ * z;
- e = psi * z_bar;
- e_bar = kappa * e_bar + ( 1.0 - kappa ) * e;
- grad += L * e_bar;
- sum_e += e;
- z = 0.0; // set spiking variable to 0 between spikes
-
- ++eprop_hist_it;
- }
- }
- presyn_isis.clear();
-
- const long update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps();
- const long learning_window = kernel().simulation_manager.get_eprop_learning_window().get_steps();
- const auto firing_rate_reg = get_firing_rate_reg_history( t_previous_update + get_shift() + update_interval );
-
- grad += firing_rate_reg * sum_e;
-
- if ( average_gradient )
- {
- grad /= learning_window;
- }
-
- return grad;
-}
-
-} // namespace nest
diff --git a/models/eprop_iaf_bsshslm_2020.h b/models/eprop_iaf_bsshslm_2020.h
deleted file mode 100644
index ea1bc3f1fe..0000000000
--- a/models/eprop_iaf_bsshslm_2020.h
+++ /dev/null
@@ -1,576 +0,0 @@
-/*
- * eprop_iaf_bsshslm_2020.h
- *
- * This file is part of NEST.
- *
- * Copyright (C) 2004 The NEST Initiative
- *
- * NEST is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * NEST is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with NEST. If not, see .
- *
- */
-
-#ifndef EPROP_IAF_BSSHSLM_2020_H
-#define EPROP_IAF_BSSHSLM_2020_H
-
-// nestkernel
-#include "connection.h"
-#include "eprop_archiving_node_impl.h"
-#include "eprop_archiving_node_recurrent.h"
-#include "event.h"
-#include "nest_types.h"
-#include "ring_buffer.h"
-#include "universal_data_logger.h"
-
-namespace nest
-{
-
-/* BeginUserDocs: neuron, e-prop plasticity, current-based, integrate-and-fire
-
-Short description
-+++++++++++++++++
-
-Current-based leaky integrate-and-fire neuron model with delta-shaped or exponentially filtered
-postsynaptic currents for e-prop plasticity
-
-Description
-+++++++++++
-
-``eprop_iaf_bsshslm_2020`` is an implementation of a leaky integrate-and-fire
-neuron model with delta-shaped postsynaptic currents used for eligibility
-propagation (e-prop) plasticity.
-
-E-prop plasticity was originally introduced and implemented in TensorFlow in [1]_.
-
-The suffix ``_bsshslm_2020`` follows the NEST convention to indicate in the
-model name the paper that introduced it by the first letter of the authors' last
-names and the publication year.
-
-.. note::
- The neuron dynamics of the ``eprop_iaf_bsshslm_2020`` model (excluding e-prop
- plasticity) are similar to the neuron dynamics of the ``iaf_psc_delta`` model,
- with minor differences, such as the propagator of the post-synaptic current
- and the voltage reset upon a spike.
-
-The membrane voltage time course :math:`v_j^t` of the neuron :math:`j` is given by:
-
-.. math::
- v_j^t &= \alpha v_j^{t-1} + \zeta \sum_{i \neq j} W_{ji}^\text{rec} z_i^{t-1}
- + \zeta \sum_i W_{ji}^\text{in} x_i^t - z_j^{t-1} v_\text{th} \,, \\
- \alpha &= e^{ -\frac{ \Delta t }{ \tau_\text{m} } } \,, \\
- \zeta &=
- \begin{cases}
- 1 \\
- 1 - \alpha
- \end{cases} \,, \\
-
-where :math:`W_{ji}^\text{rec}` and :math:`W_{ji}^\text{in}` are the recurrent and
-input synaptic weight matrices, and :math:`z_i^{t-1}` is the recurrent presynaptic
-state variable, while :math:`x_i^t` represents the input at time :math:`t`.
-
-Descriptions of further parameters and variables can be found in the table below.
-
-The spike state variable is expressed by a Heaviside function:
-
-.. math::
- z_j^t = H \left( v_j^t - v_\text{th} \right) \,. \\
-
-If the membrane voltage crosses the threshold voltage :math:`v_\text{th}`, a spike is
-emitted and the membrane voltage is reduced by :math:`v_\text{th}` in the next
-time step. After the time step of the spike emission, the neuron is not
-able to spike for an absolute refractory period :math:`t_\text{ref}`.
-
-An additional state variable and the corresponding differential equation
-represents a piecewise constant external current.
-
-See the documentation on the :doc:`iaf_psc_delta<../models/iaf_psc_delta/>` neuron model
-for more information on the integration of the subthreshold dynamics.
-
-The change of the synaptic weight is calculated from the gradient :math:`g` of
-the loss :math:`E` with respect to the synaptic weight :math:`W_{ji}`:
-:math:`\frac{ \text{d}E }{ \text{d} W_{ij} }`
-which depends on the presynaptic
-spikes :math:`z_i^{t-1}`, the surrogate gradient or pseudo-derivative
-of the spike state variable with respect to the postsynaptic membrane
-voltage :math:`\psi_j^t` (the product of which forms the eligibility
-trace :math:`e_{ji}^t`), and the learning signal :math:`L_j^t` emitted
-by the readout neurons.
-
-.. math::
- \frac{ \text{d} E }{ \text{d} W_{ji} } &= \sum_t L_j^t \bar{e}_{ji}^t \,, \\
- e_{ji}^t &= \psi^t_j \bar{z}_i^{t-1} \,, \\
-
-.. include:: ../models/eprop_iaf.rst
- :start-after: .. start_surrogate-gradient-functions
- :end-before: .. end_surrogate-gradient-functions
-
-The eligibility trace and the presynaptic spike trains are low-pass filtered
-with the following exponential kernels:
-
-.. math::
- \bar{e}_{ji}^t &= \mathcal{F}_\kappa \left( e_{ji}^t \right) \,, \\
- \kappa &= e^{ -\frac{\Delta t }{ \tau_\text{m,out} }} \,, \\
- \bar{z}_i^t &= \mathcal{F}_\alpha(z_i^t) \,, \\
- \mathcal{F}_\alpha \left( z_i^t \right) &= \alpha \mathcal{F}_\alpha \left( z_i^{t-1} \right) + z_i^t \,, \\
- \mathcal{F}_\alpha \left( z_i^0 \right) &= z_i^0 \,, \\
-
-where :math:`\tau_\text{m,out}` is the membrane time constant of the readout neuron.
-
-Furthermore, a firing rate regularization mechanism keeps the average firing
-rate :math:`f^\text{av}_j` of the postsynaptic neuron close to a target firing rate
-:math:`f^\text{target}`. The gradient :math:`g_\text{reg}` of the regularization loss :math:`E_\text{reg}`
-with respect to the synaptic weight :math:`W_{ji}` is given by:
-
-.. math::
- \frac{ \text{d} E_\text{reg} }{ \text{d} W_{ji} }
- = c_\text{reg} \sum_t \frac{ 1 }{ T n_\text{trial} }
- \left( f^\text{target} - f^\text{av}_j \right) e_{ji}^t \,, \\
-
-where :math:`c_\text{reg}` is a constant scaling factor and the average
-is taken over the time that passed since the previous update, that is, the number of
-trials :math:`n_\text{trial}` times the duration of an update interval :math:`T`.
-
-The overall gradient is given by the addition of the two gradients.
-
-For more information on e-prop plasticity, see the documentation on the other e-prop models:
-
- * :doc:`eprop_iaf_adapt_bsshslm_2020<../models/eprop_iaf_adapt_bsshslm_2020/>`
- * :doc:`eprop_readout_bsshslm_2020<../models/eprop_readout_bsshslm_2020/>`
- * :doc:`eprop_synapse_bsshslm_2020<../models/eprop_synapse_bsshslm_2020/>`
- * :doc:`eprop_learning_signal_connection_bsshslm_2020<../models/eprop_learning_signal_connection_bsshslm_2020/>`
-
-Details on the event-based NEST implementation of e-prop can be found in [2]_.
-
-Parameters
-++++++++++
-
-The following parameters can be set in the status dictionary.
-
-=========================== ======= ======================= ================ ===================================
-**Neuron parameters**
-----------------------------------------------------------------------------------------------------------------
-Parameter Unit Math equivalent Default Description
-=========================== ======= ======================= ================ ===================================
-``C_m`` pF :math:`C_\text{m}` 250.0 Capacitance of the membrane
-``E_L`` mV :math:`E_\text{L}` -70.0 Leak / resting membrane potential
-``I_e`` pA :math:`I_\text{e}` 0.0 Constant external input current
-``regular_spike_arrival`` Boolean ``True`` If ``True``, the input spikes
- arrive at the end of the time step,
- if ``False`` at the beginning
- (determines PSC scale)
-``t_ref`` ms :math:`t_\text{ref}` 2.0 Duration of the refractory period
-``tau_m`` ms :math:`\tau_\text{m}` 10.0 Time constant of the membrane
-``V_min`` mV :math:`v_\text{min}` negative maximum Absolute lower bound of the
- value membrane voltage
- representable by
- a ``double``
- type in C++
-``V_th`` mV :math:`v_\text{th}` -55.0 Spike threshold voltage
-=========================== ======= ======================= ================ ===================================
-
-=============================== ==== ======================= ================== ================================
-**E-prop parameters**
-----------------------------------------------------------------------------------------------------------------
-Parameter Unit Math equivalent Default Description
-=============================== ==== ======================= ================== ================================
-``c_reg`` :math:`c_\text{reg}` 0.0 Coefficient of firing rate
- regularization
-``f_target`` Hz :math:`f^\text{target}` 10.0 Target firing rate of rate
- regularization
-``beta`` :math:`\beta` 1.0 Width scaling of surrogate
- gradient / pseudo-derivative of
- membrane voltage
-``gamma`` :math:`\gamma` 0.3 Height scaling of surrogate
- gradient / pseudo-derivative of
- membrane voltage
-``surrogate_gradient_function`` :math:`\psi` "piecewise_linear" Surrogate gradient /
- pseudo-derivative function
- ["piecewise_linear",
- "exponential",
- "fast_sigmoid_derivative",
- "arctan"]
-=============================== ==== ======================= ================== ================================
-
-Recordables
-+++++++++++
-
-The following state variables evolve during simulation and can be recorded.
-
-================== ==== =============== ============= ================
-**Neuron state variables and recordables**
-----------------------------------------------------------------------
-State variable Unit Math equivalent Initial value Description
-================== ==== =============== ============= ================
-``V_m`` mV :math:`v_j` -70.0 Membrane voltage
-================== ==== =============== ============= ================
-
-====================== ==== =============== ============= =========================================
-**E-prop state variables and recordables**
----------------------------------------------------------------------------------------------------
-State variable Unit Math equivalent Initial value Description
-====================== ==== =============== ============= =========================================
-``learning_signal`` pA :math:`L_j` 0.0 Learning signal
-``surrogate_gradient`` :math:`\psi_j` 0.0 Surrogate gradient / pseudo-derivative of
- membrane voltage
-====================== ==== =============== ============= =========================================
-
-Usage
-+++++
-
-This model can only be used in combination with the other e-prop models
-and the network architecture requires specific wiring, input, and output.
-The usage is demonstrated in several
-:doc:`supervised regression and classification tasks <../auto_examples/eprop_plasticity/index>`
-reproducing among others the original proof-of-concept tasks in [1]_.
-
-References
-++++++++++
-
-.. [1] Bellec G, Scherr F, Subramoney F, Hajek E, Salaj D, Legenstein R,
- Maass W (2020). A solution to the learning dilemma for recurrent
- networks of spiking neurons. Nature Communications, 11:3625.
- https://doi.org/10.1038/s41467-020-17236-y
-
-.. [2] Korcsak-Gorzo A, Stapmanns J, Espinoza Valverde JA, Plesser HE,
- Dahmen D, Bolten M, Van Albada SJ, Diesmann M. Event-based
- implementation of eligibility propagation (in preparation)
-
-.. include:: ../models/eprop_iaf.rst
- :start-after: .. start_surrogate-gradient-references
- :end-before: .. end_surrogate-gradient-references
-
-Sends
-+++++
-
-SpikeEvent
-
-Receives
-++++++++
-
-SpikeEvent, CurrentEvent, LearningSignalConnectionEvent, DataLoggingRequest
-
-See also
-++++++++
-
-Examples using this model
-+++++++++++++++++++++++++
-
-.. listexamples:: eprop_iaf_bsshslm_2020
-
-EndUserDocs */
-
-void register_eprop_iaf_bsshslm_2020( const std::string& name );
-
-/**
- * @brief Class implementing a LIF neuron model for e-prop plasticity.
- *
- * Class implementing a current-based leaky integrate-and-fire neuron model with delta-shaped postsynaptic currents for
- * e-prop plasticity according to Bellec et al. (2020).
- */
-class eprop_iaf_bsshslm_2020 : public EpropArchivingNodeRecurrent< true >
-{
-
-public:
- //! Default constructor.
- eprop_iaf_bsshslm_2020();
-
- //! Copy constructor.
- eprop_iaf_bsshslm_2020( const eprop_iaf_bsshslm_2020& );
-
- using Node::handle;
- using Node::handles_test_event;
-
- size_t send_test_event( Node&, size_t, synindex, bool ) override;
-
- void handle( SpikeEvent& ) override;
- void handle( CurrentEvent& ) override;
- void handle( LearningSignalConnectionEvent& ) override;
- void handle( DataLoggingRequest& ) override;
-
- size_t handles_test_event( SpikeEvent&, size_t ) override;
- size_t handles_test_event( CurrentEvent&, size_t ) override;
- size_t handles_test_event( LearningSignalConnectionEvent&, size_t ) override;
- size_t handles_test_event( DataLoggingRequest&, size_t ) override;
-
- void get_status( DictionaryDatum& ) const override;
- void set_status( const DictionaryDatum& ) override;
-
-private:
- void init_buffers_() override;
- void pre_run_hook() override;
-
- void update( Time const&, const long, const long ) override;
-
- double compute_gradient( std::vector< long >&, const long, const long, const double, const bool ) override;
-
- long get_shift() const override;
- bool is_eprop_recurrent_node() const override;
-
- //! Map for storing a static set of recordables.
- friend class RecordablesMap< eprop_iaf_bsshslm_2020 >;
-
- //! Logger for universal data supporting the data logging request / reply mechanism. Populated with a recordables map.
- friend class UniversalDataLogger< eprop_iaf_bsshslm_2020 >;
-
- //! Structure of parameters.
- struct Parameters_
- {
- //! Capacitance of the membrane (pF).
- double C_m_;
-
- //! Coefficient of firing rate regularization.
- double c_reg_;
-
- //! Leak / resting membrane potential (mV).
- double E_L_;
-
- //! Target firing rate of rate regularization (spikes/s).
- double f_target_;
-
- //! Width scaling of surrogate gradient / pseudo-derivative of membrane voltage.
- double beta_;
-
- //! Height scaling of surrogate gradient / pseudo-derivative of membrane voltage.
- double gamma_;
-
- //! Constant external input current (pA).
- double I_e_;
-
- //! If True, the input spikes arrive at the beginning of the time step, if False at the end (determines PSC scale).
- bool regular_spike_arrival_;
-
- //! Surrogate gradient / pseudo-derivative function of the membrane voltage ["piecewise_linear", "exponential",
- //! "fast_sigmoid_derivative", "arctan"]
- std::string surrogate_gradient_function_;
-
- //! Duration of the refractory period (ms).
- double t_ref_;
-
- //! Time constant of the membrane (ms).
- double tau_m_;
-
- //! Absolute lower bound of the membrane voltage relative to the leak membrane potential (mV).
- double V_min_;
-
- //! Spike threshold voltage relative to the leak membrane potential (mV).
- double V_th_;
-
- //! Default constructor.
- Parameters_();
-
- //! Get the parameters and their values.
- void get( DictionaryDatum& ) const;
-
- //! Set the parameters and throw errors in case of invalid values.
- double set( const DictionaryDatum&, Node* );
- };
-
- //! Structure of state variables.
- struct State_
- {
- //! Learning signal. Sum of weighted error signals coming from the readout neurons.
- double learning_signal_;
-
- //! Number of remaining refractory steps.
- int r_;
-
- //! Surrogate gradient / pseudo-derivative of the membrane voltage.
- double surrogate_gradient_;
-
- //! Input current (pA).
- double i_in_;
-
- //! Membrane voltage relative to the leak membrane potential (mV).
- double v_m_;
-
- //! Binary spike state variable - 1.0 if the neuron has spiked in the previous time step and 0.0 otherwise.
- double z_;
-
- //! Binary input spike state variable - 1.0 if the neuron has spiked in the previous time step and 0.0 otherwise.
- double z_in_;
-
- //! Default constructor.
- State_();
-
- //! Get the state variables and their values.
- void get( DictionaryDatum&, const Parameters_& ) const;
-
- //! Set the state variables.
- void set( const DictionaryDatum&, const Parameters_&, double, Node* );
- };
-
- //! Structure of buffers.
- struct Buffers_
- {
- //! Default constructor.
- Buffers_( eprop_iaf_bsshslm_2020& );
-
- //! Copy constructor.
- Buffers_( const Buffers_&, eprop_iaf_bsshslm_2020& );
-
- //! Buffer for incoming spikes.
- RingBuffer spikes_;
-
- //! Buffer for incoming currents.
- RingBuffer currents_;
-
- //! Logger for universal data.
- UniversalDataLogger< eprop_iaf_bsshslm_2020 > logger_;
- };
-
- //! Structure of internal variables.
- struct Variables_
- {
- //! Propagator matrix entry for evolving the membrane voltage (mathematical symbol "alpha" in user documentation).
- double P_v_m_;
-
- //! Propagator matrix entry for evolving the incoming spike state variables (mathematical symbol "zeta" in user
- //! documentation).
- double P_z_in_;
-
- //! Propagator matrix entry for evolving the incoming currents.
- double P_i_in_;
-
- //! Total refractory steps.
- int RefractoryCounts_;
- };
-
- //! Get the current value of the membrane voltage.
- double
- get_v_m_() const
- {
- return S_.v_m_ + P_.E_L_;
- }
-
- //! Get the current value of the surrogate gradient.
- double
- get_surrogate_gradient_() const
- {
- return S_.surrogate_gradient_;
- }
-
- //! Get the current value of the learning signal.
- double
- get_learning_signal_() const
- {
- return S_.learning_signal_;
- }
-
- // the order in which the structure instances are defined is important for speed
-
- //! Structure of parameters.
- Parameters_ P_;
-
- //! Structure of state variables.
- State_ S_;
-
- //! Structure of internal variables.
- Variables_ V_;
-
- //! Structure of buffers.
- Buffers_ B_;
-
- //! Map storing a static set of recordables.
- static RecordablesMap< eprop_iaf_bsshslm_2020 > recordablesMap_;
-};
-
-inline long
-eprop_iaf_bsshslm_2020::get_shift() const
-{
- return offset_gen_ + delay_in_rec_;
-}
-
-inline bool
-eprop_iaf_bsshslm_2020::is_eprop_recurrent_node() const
-{
- return true;
-}
-
-inline size_t
-eprop_iaf_bsshslm_2020::send_test_event( Node& target, size_t receptor_type, synindex, bool )
-{
- SpikeEvent e;
- e.set_sender( *this );
- return target.handles_test_event( e, receptor_type );
-}
-
-inline size_t
-eprop_iaf_bsshslm_2020::handles_test_event( SpikeEvent&, size_t receptor_type )
-{
- if ( receptor_type != 0 )
- {
- throw UnknownReceptorType( receptor_type, get_name() );
- }
-
- return 0;
-}
-
-inline size_t
-eprop_iaf_bsshslm_2020::handles_test_event( CurrentEvent&, size_t receptor_type )
-{
- if ( receptor_type != 0 )
- {
- throw UnknownReceptorType( receptor_type, get_name() );
- }
-
- return 0;
-}
-
-inline size_t
-eprop_iaf_bsshslm_2020::handles_test_event( LearningSignalConnectionEvent&, size_t receptor_type )
-{
- if ( receptor_type != 0 )
- {
- throw UnknownReceptorType( receptor_type, get_name() );
- }
-
- return 0;
-}
-
-inline size_t
-eprop_iaf_bsshslm_2020::handles_test_event( DataLoggingRequest& dlr, size_t receptor_type )
-{
- if ( receptor_type != 0 )
- {
- throw UnknownReceptorType( receptor_type, get_name() );
- }
-
- return B_.logger_.connect_logging_device( dlr, recordablesMap_ );
-}
-
-inline void
-eprop_iaf_bsshslm_2020::get_status( DictionaryDatum& d ) const
-{
- P_.get( d );
- S_.get( d, P_ );
- ( *d )[ names::recordables ] = recordablesMap_.get_list();
-}
-
-inline void
-eprop_iaf_bsshslm_2020::set_status( const DictionaryDatum& d )
-{
- // temporary copies in case of errors
- Parameters_ ptmp = P_;
- State_ stmp = S_;
-
- // make sure that ptmp and stmp consistent - throw BadProperty if not
- const double delta_EL = ptmp.set( d, this );
- stmp.set( d, ptmp, delta_EL, this );
-
- P_ = ptmp;
- S_ = stmp;
-}
-
-} // namespace nest
-
-#endif // EPROP_IAF_BSSHSLM_2020_H
diff --git a/models/eprop_learning_signal_connection_bsshslm_2020.cpp b/models/eprop_learning_signal_connection_bsshslm_2020.cpp
deleted file mode 100644
index fe29c2a84c..0000000000
--- a/models/eprop_learning_signal_connection_bsshslm_2020.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * eprop_learning_signal_connection_bsshslm_2020.cpp
- *
- * This file is part of NEST.
- *
- * Copyright (C) 2004 The NEST Initiative
- *
- * NEST is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * NEST is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with NEST. If not, see .
- *
- */
-
-#include "eprop_learning_signal_connection_bsshslm_2020.h"
-
-// nestkernel
-#include "nest_impl.h"
-
-void
-nest::register_eprop_learning_signal_connection_bsshslm_2020( const std::string& name )
-{
- register_connection_model< eprop_learning_signal_connection_bsshslm_2020 >( name );
-}
diff --git a/models/eprop_learning_signal_connection_bsshslm_2020.h b/models/eprop_learning_signal_connection_bsshslm_2020.h
deleted file mode 100644
index 5a6f5d5e5d..0000000000
--- a/models/eprop_learning_signal_connection_bsshslm_2020.h
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * eprop_learning_signal_connection_bsshslm_2020.h
- *
- * This file is part of NEST.
- *
- * Copyright (C) 2004 The NEST Initiative
- *
- * NEST is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * NEST is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with NEST. If not, see .
- *
- */
-
-
-#ifndef EPROP_LEARNING_SIGNAL_CONNECTION_BSSHSLM_2020_H
-#define EPROP_LEARNING_SIGNAL_CONNECTION_BSSHSLM_2020_H
-
-// nestkernel
-#include "connection.h"
-
-namespace nest
-{
-
-/* BeginUserDocs: synapse, e-prop plasticity
-
-Short description
-+++++++++++++++++
-
-Synapse model transmitting feedback learning signals for e-prop plasticity
-
-Description
-+++++++++++
-
-``eprop_learning_signal_connection_bsshslm_2020`` is an implementation of a feedback connector from
-``eprop_readout_bsshslm_2020`` readout neurons to ``eprop_iaf_bsshslm_2020`` or ``eprop_iaf_adapt_bsshslm_2020``
-recurrent neurons that transmits the learning signals :math:`L_j^t` for eligibility propagation (e-prop) plasticity and
-has a static weight :math:`B_{jk}`.
-
-E-prop plasticity was originally introduced and implemented in TensorFlow in [1]_.
-
-The suffix ``_bsshslm_2020`` follows the NEST convention to indicate in the
-model name the paper that introduced it by the first letter of the authors' last
-names and the publication year.
-
-For more information on e-prop plasticity, see the documentation on the other e-prop models:
-
- * :doc:`eprop_iaf_bsshslm_2020<../models/eprop_iaf_bsshslm_2020/>`
- * :doc:`eprop_iaf_adapt_bsshslm_2020<../models/eprop_iaf_adapt_bsshslm_2020/>`
- * :doc:`eprop_readout_bsshslm_2020<../models/eprop_readout_bsshslm_2020/>`
- * :doc:`eprop_synapse_bsshslm_2020<../models/eprop_synapse_bsshslm_2020/>`
-
-Details on the event-based NEST implementation of e-prop can be found in [2]_.
-
-Parameters
-++++++++++
-
-The following parameters can be set in the status dictionary.
-
-========== ===== ================ ======= ===============
-**Individual synapse parameters**
----------------------------------------------------------
-Parameter Unit Math equivalent Default Description
-========== ===== ================ ======= ===============
-``delay`` ms :math:`d_{jk}` 1.0 Dendritic delay
-``weight`` pA :math:`B_{jk}` 1.0 Synaptic weight
-========== ===== ================ ======= ===============
-
-Recordables
-+++++++++++
-
-The following variables can be recorded. Note that since this connection lacks
-a plasticity mechanism the weight does not evolve over time.
-
-============== ==== =============== ============= ===============
-**Synapse recordables**
------------------------------------------------------------------
-State variable Unit Math equivalent Initial value Description
-============== ==== =============== ============= ===============
-``weight`` pA :math:`B_{jk}` 1.0 Synaptic weight
-============== ==== =============== ============= ===============
-
-Usage
-+++++
-
-This model can only be used in combination with the other e-prop models
-and the network architecture requires specific wiring, input, and output.
-The usage is demonstrated in several
-:doc:`supervised regression and classification tasks <../auto_examples/eprop_plasticity/index>`
-reproducing among others the original proof-of-concept tasks in [1]_.
-
-Transmits
-+++++++++
-
-LearningSignalConnectionEvent
-
-References
-++++++++++
-
-.. [1] Bellec G, Scherr F, Subramoney F, Hajek E, Salaj D, Legenstein R,
- Maass W (2020). A solution to the learning dilemma for recurrent
- networks of spiking neurons. Nature Communications, 11:3625.
- https://doi.org/10.1038/s41467-020-17236-y
-
-.. [2] Korcsak-Gorzo A, Stapmanns J, Espinoza Valverde JA, Plesser HE,
- Dahmen D, Bolten M, Van Albada SJ, Diesmann M. Event-based
- implementation of eligibility propagation (in preparation)
-
-See also
-++++++++
-
-Examples using this model
-+++++++++++++++++++++++++
-
-.. listexamples:: eprop_learning_signal_connection_bsshslm_2020
-
-EndUserDocs */
-
-void register_eprop_learning_signal_connection_bsshslm_2020( const std::string& name );
-
-/**
- * @brief Class implementing a feedback connection model for e-prop plasticity.
- *
- * Class implementing a synapse model transmitting secondary feedback learning signals for e-prop plasticity
- * according to Bellec et al. (2020).
- */
-template < typename targetidentifierT >
-class eprop_learning_signal_connection_bsshslm_2020 : public Connection< targetidentifierT >
-{
-
-public:
- //! Type of the common synapse properties.
- typedef CommonSynapseProperties CommonPropertiesType;
-
- //! Type of the connection base.
- typedef Connection< targetidentifierT > ConnectionBase;
-
- //! Properties of the connection model.
- static constexpr ConnectionModelProperties properties = ConnectionModelProperties::HAS_DELAY;
-
- //! Default constructor.
- eprop_learning_signal_connection_bsshslm_2020()
- : ConnectionBase()
- , weight_( 1.0 )
- {
- }
-
- //! Get the secondary learning signal event.
- SecondaryEvent* get_secondary_event();
-
- using ConnectionBase::get_delay_steps;
- using ConnectionBase::get_rport;
- using ConnectionBase::get_target;
-
- //! Check if the target accepts the event and receptor type requested by the sender.
- void
- check_connection( Node& s, Node& t, size_t receptor_type, const CommonPropertiesType& )
- {
- LearningSignalConnectionEvent ge;
-
- s.sends_secondary_event( ge );
- ge.set_sender( s );
- Connection< targetidentifierT >::target_.set_rport( t.handles_test_event( ge, receptor_type ) );
- Connection< targetidentifierT >::target_.set_target( &t );
- }
-
- //! Send the learning signal event.
- bool
- send( Event& e, size_t t, const CommonSynapseProperties& )
- {
- e.set_weight( weight_ );
- e.set_delay_steps( get_delay_steps() );
- e.set_receiver( *get_target( t ) );
- e.set_rport( get_rport() );
- e();
- return true;
- }
-
- //! Get the model attributes and their values.
- void get_status( DictionaryDatum& d ) const;
-
- //! Set the values of the model attributes.
- void set_status( const DictionaryDatum& d, ConnectorModel& cm );
-
- //! Set the synaptic weight to the provided value.
- void
- set_weight( const double w )
- {
- weight_ = w;
- }
-
-private:
- //! Synaptic weight.
- double weight_;
-};
-
-template < typename targetidentifierT >
-constexpr ConnectionModelProperties eprop_learning_signal_connection_bsshslm_2020< targetidentifierT >::properties;
-
-template < typename targetidentifierT >
-void
-eprop_learning_signal_connection_bsshslm_2020< targetidentifierT >::get_status( DictionaryDatum& d ) const
-{
- ConnectionBase::get_status( d );
- def< double >( d, names::weight, weight_ );
- def< long >( d, names::size_of, sizeof( *this ) );
-}
-
-template < typename targetidentifierT >
-void
-eprop_learning_signal_connection_bsshslm_2020< targetidentifierT >::set_status( const DictionaryDatum& d,
- ConnectorModel& cm )
-{
- ConnectionBase::set_status( d, cm );
- updateValue< double >( d, names::weight, weight_ );
-}
-
-template < typename targetidentifierT >
-SecondaryEvent*
-eprop_learning_signal_connection_bsshslm_2020< targetidentifierT >::get_secondary_event()
-{
- return new LearningSignalConnectionEvent();
-}
-
-} // namespace nest
-
-#endif // EPROP_LEARNING_SIGNAL_CONNECTION_BSSHSLM_2020_H
diff --git a/models/eprop_readout_bsshslm_2020.cpp b/models/eprop_readout_bsshslm_2020.cpp
deleted file mode 100644
index 79f059fb59..0000000000
--- a/models/eprop_readout_bsshslm_2020.cpp
+++ /dev/null
@@ -1,422 +0,0 @@
-/*
- * eprop_readout_bsshslm_2020.cpp
- *
- * This file is part of NEST.
- *
- * Copyright (C) 2004 The NEST Initiative
- *
- * NEST is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * NEST is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with NEST. If not, see .
- *
- */
-
-// nest models
-#include "eprop_readout_bsshslm_2020.h"
-
-// C++
-#include
-
-// libnestutil
-#include "dict_util.h"
-#include "numerics.h"
-
-// nestkernel
-#include "exceptions.h"
-#include "kernel_manager.h"
-#include "nest_impl.h"
-#include "universal_data_logger_impl.h"
-
-// sli
-#include "dictutils.h"
-
-namespace nest
-{
-
-void
-register_eprop_readout_bsshslm_2020( const std::string& name )
-{
- register_node_model< eprop_readout_bsshslm_2020 >( name );
-}
-
-/* ----------------------------------------------------------------
- * Recordables map
- * ---------------------------------------------------------------- */
-
-RecordablesMap< eprop_readout_bsshslm_2020 > eprop_readout_bsshslm_2020::recordablesMap_;
-
-template <>
-void
-RecordablesMap< eprop_readout_bsshslm_2020 >::create()
-{
- insert_( names::eprop_history_duration, &eprop_readout_bsshslm_2020::get_eprop_history_duration );
- insert_( names::error_signal, &eprop_readout_bsshslm_2020::get_error_signal_ );
- insert_( names::readout_signal, &eprop_readout_bsshslm_2020::get_readout_signal_ );
- insert_( names::readout_signal_unnorm, &eprop_readout_bsshslm_2020::get_readout_signal_unnorm_ );
- insert_( names::target_signal, &eprop_readout_bsshslm_2020::get_target_signal_ );
- insert_( names::V_m, &eprop_readout_bsshslm_2020::get_v_m_ );
-}
-
-/* ----------------------------------------------------------------
- * Default constructors for parameters, state, and buffers
- * ---------------------------------------------------------------- */
-
-eprop_readout_bsshslm_2020::Parameters_::Parameters_()
- : C_m_( 250.0 )
- , E_L_( 0.0 )
- , I_e_( 0.0 )
- , loss_( "mean_squared_error" )
- , regular_spike_arrival_( true )
- , tau_m_( 10.0 )
- , V_min_( -std::numeric_limits< double >::max() )
-{
-}
-
-eprop_readout_bsshslm_2020::State_::State_()
- : error_signal_( 0.0 )
- , readout_signal_( 0.0 )
- , readout_signal_unnorm_( 0.0 )
- , target_signal_( 0.0 )
- , i_in_( 0.0 )
- , v_m_( 0.0 )
- , z_in_( 0.0 )
-{
-}
-
-eprop_readout_bsshslm_2020::Buffers_::Buffers_( eprop_readout_bsshslm_2020& n )
- : logger_( n )
-{
-}
-
-eprop_readout_bsshslm_2020::Buffers_::Buffers_( const Buffers_&, eprop_readout_bsshslm_2020& n )
- : logger_( n )
-{
-}
-
-/* ----------------------------------------------------------------
- * Getter and setter functions for parameters and state
- * ---------------------------------------------------------------- */
-
-void
-eprop_readout_bsshslm_2020::Parameters_::get( DictionaryDatum& d ) const
-{
- def< double >( d, names::C_m, C_m_ );
- def< double >( d, names::E_L, E_L_ );
- def< double >( d, names::I_e, I_e_ );
- def< std::string >( d, names::loss, loss_ );
- def< bool >( d, names::regular_spike_arrival, regular_spike_arrival_ );
- def< double >( d, names::tau_m, tau_m_ );
- def< double >( d, names::V_min, V_min_ + E_L_ );
-}
-
-double
-eprop_readout_bsshslm_2020::Parameters_::set( const DictionaryDatum& d, Node* node )
-{
- // if leak potential is changed, adjust all variables defined relative to it
- const double ELold = E_L_;
- updateValueParam< double >( d, names::E_L, E_L_, node );
- const double delta_EL = E_L_ - ELold;
-
- V_min_ -= updateValueParam< double >( d, names::V_min, V_min_, node ) ? E_L_ : delta_EL;
-
- updateValueParam< double >( d, names::C_m, C_m_, node );
- updateValueParam< double >( d, names::I_e, I_e_, node );
- updateValueParam< std::string >( d, names::loss, loss_, node );
- updateValueParam< bool >( d, names::regular_spike_arrival, regular_spike_arrival_, node );
- updateValueParam< double >( d, names::tau_m, tau_m_, node );
-
- if ( C_m_ <= 0 )
- {
- throw BadProperty( "Membrane capacitance C_m > 0 required." );
- }
-
- if ( loss_ != "mean_squared_error" and loss_ != "cross_entropy" )
- {
- throw BadProperty( "Loss function loss from [\"mean_squared_error\", \"cross_entropy\"] required." );
- }
-
- if ( tau_m_ <= 0 )
- {
- throw BadProperty( "Membrane time constant tau_m > 0 required." );
- }
-
- return delta_EL;
-}
-
-void
-eprop_readout_bsshslm_2020::State_::get( DictionaryDatum& d, const Parameters_& p ) const
-{
- def< double >( d, names::V_m, v_m_ + p.E_L_ );
- def< double >( d, names::error_signal, error_signal_ );
- def< double >( d, names::readout_signal, readout_signal_ );
- def< double >( d, names::readout_signal_unnorm, readout_signal_unnorm_ );
- def< double >( d, names::target_signal, target_signal_ );
-}
-
-void
-eprop_readout_bsshslm_2020::State_::set( const DictionaryDatum& d, const Parameters_& p, double delta_EL, Node* node )
-{
- v_m_ -= updateValueParam< double >( d, names::V_m, v_m_, node ) ? p.E_L_ : delta_EL;
-}
-
-/* ----------------------------------------------------------------
- * Default and copy constructor for node
- * ---------------------------------------------------------------- */
-
-eprop_readout_bsshslm_2020::eprop_readout_bsshslm_2020()
- : EpropArchivingNodeReadout()
- , P_()
- , S_()
- , B_( *this )
-{
- recordablesMap_.create();
-}
-
-eprop_readout_bsshslm_2020::eprop_readout_bsshslm_2020( const eprop_readout_bsshslm_2020& n )
- : EpropArchivingNodeReadout( n )
- , P_( n.P_ )
- , S_( n.S_ )
- , B_( n.B_, *this )
-{
-}
-
-/* ----------------------------------------------------------------
- * Node initialization functions
- * ---------------------------------------------------------------- */
-
-void
-eprop_readout_bsshslm_2020::init_buffers_()
-{
- B_.normalization_rate_ = 0;
- B_.spikes_.clear(); // includes resize
- B_.currents_.clear(); // includes resize
- B_.logger_.reset(); // includes resize
-}
-
-void
-eprop_readout_bsshslm_2020::pre_run_hook()
-{
- B_.logger_.init(); // ensures initialization in case multimeter connected after Simulate
-
- if ( P_.loss_ == "mean_squared_error" )
- {
- compute_error_signal = &eprop_readout_bsshslm_2020::compute_error_signal_mean_squared_error;
- V_.signal_to_other_readouts_ = false;
- }
- else if ( P_.loss_ == "cross_entropy" )
- {
- compute_error_signal = &eprop_readout_bsshslm_2020::compute_error_signal_cross_entropy;
- V_.signal_to_other_readouts_ = true;
- }
-
- const double dt = Time::get_resolution().get_ms();
-
- V_.P_v_m_ = std::exp( -dt / P_.tau_m_ );
- V_.P_i_in_ = P_.tau_m_ / P_.C_m_ * ( 1.0 - V_.P_v_m_ );
- V_.P_z_in_ = P_.regular_spike_arrival_ ? 1.0 : 1.0 - V_.P_v_m_;
-}
-
-
-/* ----------------------------------------------------------------
- * Update function
- * ---------------------------------------------------------------- */
-
-void
-eprop_readout_bsshslm_2020::update( Time const& origin, const long from, const long to )
-{
- const long update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps();
- const long learning_window = kernel().simulation_manager.get_eprop_learning_window().get_steps();
- const bool with_reset = kernel().simulation_manager.get_eprop_reset_neurons_on_update();
- const long shift = get_shift();
-
- const size_t buffer_size = kernel().connection_manager.get_min_delay();
-
- std::vector< double > error_signal_buffer( buffer_size, 0.0 );
- std::vector< double > readout_signal_unnorm_buffer( buffer_size, 0.0 );
-
- for ( long lag = from; lag < to; ++lag )
- {
- const long t = origin.get_steps() + lag;
- const long interval_step = ( t - shift ) % update_interval;
- const long interval_step_signals = ( t - shift - delay_out_norm_ ) % update_interval;
-
- if ( interval_step == 0 )
- {
- erase_used_eprop_history();
-
- if ( with_reset )
- {
- S_.v_m_ = 0.0;
- }
- }
-
- S_.z_in_ = B_.spikes_.get_value( lag );
-
- S_.v_m_ = V_.P_i_in_ * S_.i_in_ + V_.P_z_in_ * S_.z_in_ + V_.P_v_m_ * S_.v_m_;
- S_.v_m_ = std::max( S_.v_m_, P_.V_min_ );
-
- ( this->*compute_error_signal )( lag );
-
- if ( interval_step_signals < update_interval - learning_window )
- {
- S_.target_signal_ = 0.0;
- S_.readout_signal_ = 0.0;
- S_.error_signal_ = 0.0;
- }
-
- B_.normalization_rate_ = 0.0;
-
- if ( V_.signal_to_other_readouts_ )
- {
- readout_signal_unnorm_buffer[ lag ] = S_.readout_signal_unnorm_;
- }
-
- error_signal_buffer[ lag ] = S_.error_signal_;
-
- append_new_eprop_history_entry( t );
- write_error_signal_to_history( t, S_.error_signal_ );
-
- S_.i_in_ = B_.currents_.get_value( lag ) + P_.I_e_;
-
- B_.logger_.record_data( t );
- }
-
- LearningSignalConnectionEvent error_signal_event;
- error_signal_event.set_coeffarray( error_signal_buffer );
- kernel().event_delivery_manager.send_secondary( *this, error_signal_event );
-
- if ( V_.signal_to_other_readouts_ )
- {
- // time is one time step longer than the final interval_step to enable sending the
- // unnormalized readout signal one time step in advance so that it is available
- // in the next times step for computing the normalized readout signal
- DelayedRateConnectionEvent readout_signal_unnorm_event;
- readout_signal_unnorm_event.set_coeffarray( readout_signal_unnorm_buffer );
- kernel().event_delivery_manager.send_secondary( *this, readout_signal_unnorm_event );
- }
- return;
-}
-
-/* ----------------------------------------------------------------
- * Error signal functions
- * ---------------------------------------------------------------- */
-
-void
-eprop_readout_bsshslm_2020::compute_error_signal_mean_squared_error( const long lag )
-{
- S_.readout_signal_ = S_.readout_signal_unnorm_;
- S_.readout_signal_unnorm_ = S_.v_m_ + P_.E_L_;
- S_.error_signal_ = S_.readout_signal_ - S_.target_signal_;
-}
-
-void
-eprop_readout_bsshslm_2020::compute_error_signal_cross_entropy( const long lag )
-{
- const double norm_rate = B_.normalization_rate_ + S_.readout_signal_unnorm_;
- S_.readout_signal_ = S_.readout_signal_unnorm_ / norm_rate;
- S_.readout_signal_unnorm_ = std::exp( S_.v_m_ + P_.E_L_ );
- S_.error_signal_ = S_.readout_signal_ - S_.target_signal_;
-}
-
-/* ----------------------------------------------------------------
- * Event handling functions
- * ---------------------------------------------------------------- */
-
-void
-eprop_readout_bsshslm_2020::handle( DelayedRateConnectionEvent& e )
-{
- const size_t rport = e.get_rport();
- assert( rport < SUP_RATE_RECEPTOR );
-
- auto it = e.begin();
- assert( it != e.end() );
-
- const double signal = e.get_weight() * e.get_coeffvalue( it );
- if ( rport == READOUT_SIG )
- {
- B_.normalization_rate_ += signal;
- }
- else if ( rport == TARGET_SIG )
- {
- S_.target_signal_ = signal;
- }
-
- assert( it == e.end() );
-}
-
-void
-eprop_readout_bsshslm_2020::handle( SpikeEvent& e )
-{
- assert( e.get_delay_steps() > 0 );
-
- B_.spikes_.add_value(
- e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() );
-}
-
-void
-eprop_readout_bsshslm_2020::handle( CurrentEvent& e )
-{
- assert( e.get_delay_steps() > 0 );
-
- B_.currents_.add_value(
- e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() );
-}
-
-void
-eprop_readout_bsshslm_2020::handle( DataLoggingRequest& e )
-{
- B_.logger_.handle( e );
-}
-
-double
-eprop_readout_bsshslm_2020::compute_gradient( std::vector< long >& presyn_isis,
- const long,
- const long t_previous_trigger_spike,
- const double kappa,
- const bool average_gradient )
-{
- auto eprop_hist_it = get_eprop_history( t_previous_trigger_spike );
-
- double grad = 0.0; // gradient value to be calculated
- double L = 0.0; // error signal
- double z = 0.0; // spiking variable
- double z_bar = 0.0; // low-pass filtered spiking variable
-
- for ( long presyn_isi : presyn_isis )
- {
- z = 1.0; // set spiking variable to 1 for each incoming spike
-
- for ( long t = 0; t < presyn_isi; ++t )
- {
- assert( eprop_hist_it != eprop_history_.end() );
-
- L = eprop_hist_it->error_signal_;
-
- z_bar = V_.P_v_m_ * z_bar + V_.P_z_in_ * z;
- grad += L * z_bar;
- z = 0.0; // set spiking variable to 0 between spikes
-
- ++eprop_hist_it;
- }
- }
- presyn_isis.clear();
-
- const long learning_window = kernel().simulation_manager.get_eprop_learning_window().get_steps();
- if ( average_gradient )
- {
- grad /= learning_window;
- }
-
- return grad;
-}
-
-} // namespace nest
diff --git a/models/eprop_readout_bsshslm_2020.h b/models/eprop_readout_bsshslm_2020.h
deleted file mode 100644
index 36ae6e7134..0000000000
--- a/models/eprop_readout_bsshslm_2020.h
+++ /dev/null
@@ -1,589 +0,0 @@
-/*
- * eprop_readout_bsshslm_2020.h
- *
- * This file is part of NEST.
- *
- * Copyright (C) 2004 The NEST Initiative
- *
- * NEST is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * NEST is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with NEST. If not, see .
- *
- */
-
-#ifndef EPROP_READOUT_BSSHSLM_2020_H
-#define EPROP_READOUT_BSSHSLM_2020_H
-
-// nestkernel
-#include "connection.h"
-#include "eprop_archiving_node_impl.h"
-#include "eprop_archiving_node_readout.h"
-#include "event.h"
-#include "nest_types.h"
-#include "ring_buffer.h"
-#include "universal_data_logger.h"
-
-namespace nest
-{
-
-/* BeginUserDocs: neuron, e-prop plasticity, current-based
-
-Short description
-+++++++++++++++++
-
-Current-based leaky integrate readout neuron model with delta-shaped or exponentially filtered
-postsynaptic currents for e-prop plasticity
-
-Description
-+++++++++++
-
-``eprop_readout_bsshslm_2020`` is an implementation of an integrate-and-fire neuron model
-with delta-shaped postsynaptic currents used as readout neuron for eligibility propagation (e-prop) plasticity.
-
-E-prop plasticity was originally introduced and implemented in TensorFlow in [1]_.
-
-The suffix ``_bsshslm_2020`` follows the NEST convention to indicate in the
-model name the paper that introduced it by the first letter of the authors' last
-names and the publication year.
-
-The membrane voltage time course :math:`v_j^t` of the neuron :math:`j` is given by:
-
-.. math::
- v_j^t &= \kappa v_j^{t-1} + \zeta \sum_{i \neq j} W_{ji}^\text{out} z_i^{t-1} \,, \\
- \kappa &= e^{ -\frac{ \Delta t }{ \tau_\text{m} } } \,, \\
- \zeta &=
- \begin{cases}
- 1 \\
- 1 - \kappa
- \end{cases} \,, \\
-
-where :math:`W_{ji}^\text{out}` is the output synaptic weight matrix and
-:math:`z_i^{t-1}` is the recurrent presynaptic spike state variable.
-
-Descriptions of further parameters and variables can be found in the table below.
-
-The spike state variable of a presynaptic neuron is expressed by a Heaviside function:
-
-.. math::
- z_i^t = H \left( v_i^t - v_\text{th} \right) \,. \\
-
-An additional state variable and the corresponding differential equation
-represents a piecewise constant external current.
-
-See the documentation on the :doc:`iaf_psc_delta<../models/iaf_psc_delta/>` neuron model
-for more information on the integration of the subthreshold dynamics.
-
-The change of the synaptic weight is calculated from the gradient :math:`g` of
-the loss :math:`E` with respect to the synaptic weight :math:`W_{ji}`:
-:math:`\frac{ \text{d}E }{ \text{d} W_{ij} }`
-which depends on the presynaptic
-spikes :math:`z_i^{t-1}` and the learning signal :math:`L_j^t` emitted by the readout
-neurons.
-
-.. math::
- \frac{ \text{d} E }{ \text{d} W_{ji} } = \sum_t L_j^t \bar{z}_i^{t-1} \,. \\
-
-The presynaptic spike trains are low-pass filtered with the following exponential kernel:
-
-.. math::
- \bar{z}_i^t &=\mathcal{F}_\kappa(z_i^t) \,, \\
- \mathcal{F}_\kappa(z_i^t) &= \kappa \mathcal{F}_\kappa \left( z_i^{t-1} \right) + z_i^t \,, \\
- \mathcal{F}_\kappa(z_i^0) &= z_i^0 \,. \\
-
-Since readout neurons are leaky integrators without a spiking mechanism, the
-formula for computing the gradient lacks the surrogate gradient /
-pseudo-derivative and a firing regularization term.
-
-The learning signal :math:`L_j^t` is given by the non-plastic feedback weight
-matrix :math:`B_{jk}` and the continuous error signal :math:`e_k^t` emitted by
-readout neuron :math:`k`:
-
-.. math::
- L_j^t = B_{jk} e_k^t \,. \\
-
-The error signal depends on the selected loss function.
-If a mean squared error loss is selected, then:
-
-.. math::
- e_k^t = y_k^t - y_k^{*,t} \,, \\
-
-where the readout signal :math:`y_k^t` corresponds to the membrane voltage of
-readout neuron :math:`k` and :math:`y_k^{*,t}` is the real-valued target signal.
-
-If a cross-entropy loss is selected, then:
-
-.. math::
- e^k_t &= \pi_k^t - \pi_k^{*,t} \,, \\
- \pi_k^t &= \text{softmax}_k \left( y_1^t, ..., y_K^t \right) =
- \frac{ \exp \left( y_k^t\right) }{ \sum_{k'} \exp \left( y_{k'}^t \right) } \,, \\
-
-where the readout signal :math:`\pi_k^t` corresponds to the softmax of the
-membrane voltage of readout neuron :math:`k` and :math:`\pi_k^{*,t}` is the
-one-hot encoded target signal.
-
-Furthermore, the readout and target signal are zero before the onset of the
-learning window in each update interval.
-
-For more information on e-prop plasticity, see the documentation on the other e-prop models:
-
- * :doc:`eprop_iaf_bsshslm_2020<../models/eprop_iaf_bsshslm_2020/>`
- * :doc:`eprop_iaf_adapt_bsshslm_2020<../models/eprop_iaf_adapt_bsshslm_2020/>`
- * :doc:`eprop_synapse_bsshslm_2020<../models/eprop_synapse_bsshslm_2020/>`
- * :doc:`eprop_learning_signal_connection_bsshslm_2020<../models/eprop_learning_signal_connection_bsshslm_2020/>`
-
-Details on the event-based NEST implementation of e-prop can be found in [2]_.
-
-Parameters
-++++++++++
-
-The following parameters can be set in the status dictionary.
-
-========================= ======= ===================== ================== =====================================
-**Neuron parameters**
-----------------------------------------------------------------------------------------------------------------
-Parameter Unit Math equivalent Default Description
-========================= ======= ===================== ================== =====================================
-``C_m`` pF :math:`C_\text{m}` 250.0 Capacitance of the membrane
-``E_L`` mV :math:`E_\text{L}` 0.0 Leak / resting membrane potential
-``I_e`` pA :math:`I_\text{e}` 0.0 Constant external input current
-``regular_spike_arrival`` Boolean ``True`` If ``True``, the input spikes arrive
- at the end of the time step, if
- ``False`` at the beginning
- (determines PSC scale)
-``tau_m`` ms :math:`\tau_\text{m}` 10.0 Time constant of the membrane
-``V_min`` mV :math:`v_\text{min}` negative maximum Absolute lower bound of the membrane
- value voltage
- representable by a
- ``double`` type in
- C++
-========================= ======= ===================== ================== =====================================
-
-========== ======= ===================== ==================== =========================================
-**E-prop parameters**
--------------------------------------------------------------------------------------------------------
-Parameter Unit Math equivalent Default Description
-========== ======= ===================== ==================== =========================================
-``loss`` :math:`E` "mean_squared_error" Loss function
- ["mean_squared_error", "cross_entropy"]
-========== ======= ===================== ==================== =========================================
-
-Recordables
-+++++++++++
-
-The following state variables evolve during simulation and can be recorded.
-
-=============== ==== =============== ============= ================
-**Neuron state variables and recordables**
--------------------------------------------------------------------
-State variable Unit Math equivalent Initial value Description
-=============== ==== =============== ============= ================
-``V_m`` mV :math:`v_j` 0.0 Membrane voltage
-=============== ==== =============== ============= ================
-
-========================= ==== =============== ============= ===============================
-**E-prop state variables and recordables**
---------------------------------------------------------------------------------------------
-State variable Unit Math equivalent Initial value Description
-========================= ==== =============== ============= ===============================
-``error_signal`` mV :math:`L_j` 0.0 Error signal
-``readout_signal`` mV :math:`y_j` 0.0 Readout signal
-``readout_signal_unnorm`` mV 0.0 Unnormalized readout signal
-``target_signal`` mV :math:`y^*_j` 0.0 Target signal
-========================= ==== =============== ============= ===============================
-
-Usage
-+++++
-
-This model can only be used in combination with the other e-prop models
-and the network architecture requires specific wiring, input, and output.
-The usage is demonstrated in several
-:doc:`supervised regression and classification tasks <../auto_examples/eprop_plasticity/index>`
-reproducing among others the original proof-of-concept tasks in [1]_.
-
-References
-++++++++++
-
-.. [1] Bellec G, Scherr F, Subramoney F, Hajek E, Salaj D, Legenstein R,
- Maass W (2020). A solution to the learning dilemma for recurrent
- networks of spiking neurons. Nature Communications, 11:3625.
- https://doi.org/10.1038/s41467-020-17236-y
-
-.. [2] Korcsak-Gorzo A, Stapmanns J, Espinoza Valverde JA, Plesser HE,
- Dahmen D, Bolten M, Van Albada SJ, Diesmann M. Event-based
- implementation of eligibility propagation (in preparation)
-
-Sends
-+++++
-
-LearningSignalConnectionEvent, DelayedRateConnectionEvent
-
-Receives
-++++++++
-
-SpikeEvent, CurrentEvent, DelayedRateConnectionEvent, DataLoggingRequest
-
-See also
-++++++++
-
-Examples using this model
-+++++++++++++++++++++++++
-
-.. listexamples:: eprop_readout_bsshslm_2020
-
-EndUserDocs */
-
-void register_eprop_readout_bsshslm_2020( const std::string& name );
-
-/**
- * @brief Class implementing a readout neuron model for e-prop plasticity.
- *
- * Class implementing a current-based leaky integrate readout neuron model with delta-shaped postsynaptic currents for
- * e-prop plasticity according to Bellec et al. (2020).
- */
-class eprop_readout_bsshslm_2020 : public EpropArchivingNodeReadout< true >
-{
-
-public:
- //! Default constructor.
- eprop_readout_bsshslm_2020();
-
- //! Copy constructor.
- eprop_readout_bsshslm_2020( const eprop_readout_bsshslm_2020& );
-
- using Node::handle;
- using Node::handles_test_event;
-
- using Node::sends_secondary_event;
-
- void
- sends_secondary_event( LearningSignalConnectionEvent& ) override
- {
- }
-
- void
- sends_secondary_event( DelayedRateConnectionEvent& ) override
- {
- }
-
- void handle( SpikeEvent& ) override;
- void handle( CurrentEvent& ) override;
- void handle( DelayedRateConnectionEvent& ) override;
- void handle( DataLoggingRequest& ) override;
-
- size_t handles_test_event( SpikeEvent&, size_t ) override;
- size_t handles_test_event( CurrentEvent&, size_t ) override;
- size_t handles_test_event( DelayedRateConnectionEvent&, size_t ) override;
- size_t handles_test_event( DataLoggingRequest&, size_t ) override;
-
- void get_status( DictionaryDatum& ) const override;
- void set_status( const DictionaryDatum& ) override;
-
-private:
- void init_buffers_() override;
- void pre_run_hook() override;
-
- void update( Time const&, const long, const long ) override;
-
- double compute_gradient( std::vector< long >& presyn_isis,
- const long t_previous_update,
- const long t_previous_trigger_spike,
- const double kappa,
- const bool average_gradient ) override;
-
- long get_shift() const override;
- bool is_eprop_recurrent_node() const override;
-
- //! Compute the error signal based on the mean-squared error loss.
- void compute_error_signal_mean_squared_error( const long lag );
-
- //! Compute the error signal based on the cross-entropy loss.
- void compute_error_signal_cross_entropy( const long lag );
-
- //! Compute the error signal based on a loss function.
- void ( eprop_readout_bsshslm_2020::*compute_error_signal )( const long lag );
-
- //! Map for storing a static set of recordables.
- friend class RecordablesMap< eprop_readout_bsshslm_2020 >;
-
- //! Logger for universal data supporting the data logging request / reply mechanism. Populated with a recordables map.
- friend class UniversalDataLogger< eprop_readout_bsshslm_2020 >;
-
- //! Structure of parameters.
- struct Parameters_
- {
- //! Capacitance of the membrane (pF).
- double C_m_;
-
- //! Leak / resting membrane potential (mV).
- double E_L_;
-
- //! Constant external input current (pA).
- double I_e_;
-
- //! Loss function ["mean_squared_error", "cross_entropy"].
- std::string loss_;
-
- //! If True, the input spikes arrive at the beginning of the time step, if False at the end (determines PSC scale).
- bool regular_spike_arrival_;
-
- //! Time constant of the membrane (ms).
- double tau_m_;
-
- //! Absolute lower bound of the membrane voltage relative to the leak membrane potential (mV).
- double V_min_;
-
- //! Default constructor.
- Parameters_();
-
- //! Get the parameters and their values.
- void get( DictionaryDatum& ) const;
-
- //! Set the parameters and throw errors in case of invalid values.
- double set( const DictionaryDatum&, Node* );
- };
-
- //! Structure of state variables.
- struct State_
- {
- //! Error signal. Deviation between the readout and the target signal.
- double error_signal_;
-
- //! Readout signal. Leaky integrated spikes emitted by the recurrent network.
- double readout_signal_;
-
- //! Unnormalized readout signal. Readout signal not yet divided by the readout signals of other readout neurons.
- double readout_signal_unnorm_;
-
- //! Target / teacher signal that the network is supposed to learn.
- double target_signal_;
-
- //! Input current (pA).
- double i_in_;
-
- //! Membrane voltage relative to the leak membrane potential (mV).
- double v_m_;
-
- //! Binary input spike state variable - 1.0 if the neuron has spiked in the previous time step and 0.0 otherwise.
- double z_in_;
-
- //! Default constructor.
- State_();
-
- //! Get the state variables and their values.
- void get( DictionaryDatum&, const Parameters_& ) const;
-
- //! Set the state variables.
- void set( const DictionaryDatum&, const Parameters_&, double, Node* );
- };
-
- //! Structure of buffers.
- struct Buffers_
- {
- //! Default constructor.
- Buffers_( eprop_readout_bsshslm_2020& );
-
- //! Copy constructor.
- Buffers_( const Buffers_&, eprop_readout_bsshslm_2020& );
-
- //! Normalization rate of the readout signal. Sum of the readout signals of all readout neurons.
- double normalization_rate_;
-
- //! Buffer for incoming spikes.
- RingBuffer spikes_;
-
- //! Buffer for incoming currents.
- RingBuffer currents_;
-
- //! Logger for universal data.
- UniversalDataLogger< eprop_readout_bsshslm_2020 > logger_;
- };
-
- //! Structure of internal variables.
- struct Variables_
- {
- //! Propagator matrix entry for evolving the membrane voltage (mathematical symbol "kappa" in user documentation).
- double P_v_m_;
-
- //! Propagator matrix entry for evolving the incoming spike state variables (mathematical symbol "zeta" in user
- //! documentation).
- double P_z_in_;
-
- //! Propagator matrix entry for evolving the incoming currents.
- double P_i_in_;
-
- //! If the loss requires communication between the readout neurons and thus a buffer for the exchanged signals.
- bool signal_to_other_readouts_;
- };
-
- //! Minimal spike receptor type. Start with 1 to forbid port 0 and avoid accidental creation of connections with no
- //! receptor type set.
- static const size_t MIN_RATE_RECEPTOR = 1;
-
- //! Enumeration of spike receptor types.
- enum RateSynapseTypes
- {
- READOUT_SIG = MIN_RATE_RECEPTOR,
- TARGET_SIG,
- SUP_RATE_RECEPTOR
- };
-
- //! Get the current value of the membrane voltage.
- double
- get_v_m_() const
- {
- return S_.v_m_ + P_.E_L_;
- }
-
- //! Get the current value of the normalized readout signal.
- double
- get_readout_signal_() const
- {
- return S_.readout_signal_;
- }
-
- //! Get the current value of the unnormalized readout signal.
- double
- get_readout_signal_unnorm_() const
- {
- return S_.readout_signal_unnorm_;
- }
-
- //! Get the current value of the target signal.
- double
- get_target_signal_() const
- {
- return S_.target_signal_;
- }
-
- //! Get the current value of the error signal.
- double
- get_error_signal_() const
- {
- return S_.error_signal_;
- }
-
- // the order in which the structure instances are defined is important for speed
-
- //! Structure of parameters.
- Parameters_ P_;
-
- //! Structure of state variables.
- State_ S_;
-
- //! Structure of internal variables.
- Variables_ V_;
-
- //! Structure of buffers.
- Buffers_ B_;
-
- //! Map storing a static set of recordables.
- static RecordablesMap< eprop_readout_bsshslm_2020 > recordablesMap_;
-};
-
-inline long
-eprop_readout_bsshslm_2020::get_shift() const
-{
- return offset_gen_ + delay_in_rec_ + delay_rec_out_;
-}
-
-inline bool
-eprop_readout_bsshslm_2020::is_eprop_recurrent_node() const
-{
- return false;
-}
-
-inline size_t
-eprop_readout_bsshslm_2020::handles_test_event( SpikeEvent&, size_t receptor_type )
-{
- if ( receptor_type != 0 )
- {
- throw UnknownReceptorType( receptor_type, get_name() );
- }
-
- return 0;
-}
-
-inline size_t
-eprop_readout_bsshslm_2020::handles_test_event( CurrentEvent&, size_t receptor_type )
-{
- if ( receptor_type != 0 )
- {
- throw UnknownReceptorType( receptor_type, get_name() );
- }
-
- return 0;
-}
-
-inline size_t
-eprop_readout_bsshslm_2020::handles_test_event( DelayedRateConnectionEvent& e, size_t receptor_type )
-{
- size_t step_rate_model_id = kernel().model_manager.get_node_model_id( "step_rate_generator" );
- size_t model_id = e.get_sender().get_model_id();
-
- if ( step_rate_model_id == model_id and receptor_type != TARGET_SIG )
- {
- throw IllegalConnection(
- "eprop_readout_bsshslm_2020 neurons expect a connection with a step_rate_generator node through receptor_type "
- "2." );
- }
-
- if ( receptor_type < MIN_RATE_RECEPTOR or receptor_type >= SUP_RATE_RECEPTOR )
- {
- throw UnknownReceptorType( receptor_type, get_name() );
- }
-
- return receptor_type;
-}
-
-inline size_t
-eprop_readout_bsshslm_2020::handles_test_event( DataLoggingRequest& dlr, size_t receptor_type )
-{
- if ( receptor_type != 0 )
- {
- throw UnknownReceptorType( receptor_type, get_name() );
- }
-
- return B_.logger_.connect_logging_device( dlr, recordablesMap_ );
-}
-
-inline void
-eprop_readout_bsshslm_2020::get_status( DictionaryDatum& d ) const
-{
- P_.get( d );
- S_.get( d, P_ );
- ( *d )[ names::recordables ] = recordablesMap_.get_list();
-
- DictionaryDatum receptor_dict_ = new Dictionary();
- ( *receptor_dict_ )[ names::readout_signal ] = READOUT_SIG;
- ( *receptor_dict_ )[ names::target_signal ] = TARGET_SIG;
-
- ( *d )[ names::receptor_types ] = receptor_dict_;
-}
-
-inline void
-eprop_readout_bsshslm_2020::set_status( const DictionaryDatum& d )
-{
- // temporary copies in case of errors
- Parameters_ ptmp = P_;
- State_ stmp = S_;
-
- // make sure that ptmp and stmp consistent - throw BadProperty if not
- const double delta_EL = ptmp.set( d, this );
- stmp.set( d, ptmp, delta_EL, this );
-
- P_ = ptmp;
- S_ = stmp;
-}
-
-} // namespace nest
-
-#endif // EPROP_READOUT_BSSHSLM_2020_H
diff --git a/models/eprop_synapse_bsshslm_2020.cpp b/models/eprop_synapse_bsshslm_2020.cpp
deleted file mode 100644
index ceb1dba4d1..0000000000
--- a/models/eprop_synapse_bsshslm_2020.cpp
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * eprop_synapse_bsshslm_2020.cpp
- *
- * This file is part of NEST.
- *
- * Copyright (C) 2004 The NEST Initiative
- *
- * NEST is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * NEST is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with NEST. If not, see .
- *
- */
-
-#include "eprop_synapse_bsshslm_2020.h"
-
-// nestkernel
-#include "nest_impl.h"
-
-namespace nest
-{
-
-void
-register_eprop_synapse_bsshslm_2020( const std::string& name )
-{
- register_connection_model< eprop_synapse_bsshslm_2020 >( name );
-}
-
-EpropSynapseBSSHSLM2020CommonProperties::EpropSynapseBSSHSLM2020CommonProperties()
- : CommonSynapseProperties()
- , average_gradient_( false )
- , optimizer_cp_( new WeightOptimizerCommonPropertiesGradientDescent() )
-{
-}
-
-EpropSynapseBSSHSLM2020CommonProperties::EpropSynapseBSSHSLM2020CommonProperties(
- const EpropSynapseBSSHSLM2020CommonProperties& cp )
- : CommonSynapseProperties( cp )
- , average_gradient_( cp.average_gradient_ )
- , optimizer_cp_( cp.optimizer_cp_->clone() )
-{
-}
-
-EpropSynapseBSSHSLM2020CommonProperties::~EpropSynapseBSSHSLM2020CommonProperties()
-{
- delete optimizer_cp_;
-}
-
-void
-EpropSynapseBSSHSLM2020CommonProperties::get_status( DictionaryDatum& d ) const
-{
- CommonSynapseProperties::get_status( d );
- def< bool >( d, names::average_gradient, average_gradient_ );
- def< std::string >( d, names::optimizer, optimizer_cp_->get_name() );
- DictionaryDatum optimizer_dict = new Dictionary;
- optimizer_cp_->get_status( optimizer_dict );
- ( *d )[ names::optimizer ] = optimizer_dict;
-}
-
-void
-EpropSynapseBSSHSLM2020CommonProperties::set_status( const DictionaryDatum& d, ConnectorModel& cm )
-{
- CommonSynapseProperties::set_status( d, cm );
- updateValue< bool >( d, names::average_gradient, average_gradient_ );
-
- if ( d->known( names::optimizer ) )
- {
- DictionaryDatum optimizer_dict = getValue< DictionaryDatum >( d->lookup( names::optimizer ) );
-
- std::string new_optimizer;
- const bool set_optimizer = updateValue< std::string >( optimizer_dict, names::type, new_optimizer );
- if ( set_optimizer and new_optimizer != optimizer_cp_->get_name() )
- {
- if ( kernel().connection_manager.get_num_connections( cm.get_syn_id() ) > 0 )
- {
- throw BadParameter( "The optimizer cannot be changed because synapses have been created." );
- }
-
- // TODO: selection here should be based on an optimizer registry and a factory
- // delete is in if/else if because we must delete only when we are sure that we have a valid optimizer
- if ( new_optimizer == "gradient_descent" )
- {
- delete optimizer_cp_;
- optimizer_cp_ = new WeightOptimizerCommonPropertiesGradientDescent();
- }
- else if ( new_optimizer == "adam" )
- {
- delete optimizer_cp_;
- optimizer_cp_ = new WeightOptimizerCommonPropertiesAdam();
- }
- else
- {
- throw BadProperty( "optimizer from [\"gradient_descent\", \"adam\"] required." );
- }
- }
-
- // we can now set the defaults on the new optimizer common properties
- optimizer_cp_->set_status( optimizer_dict );
- }
-}
-
-template <>
-void
-Connector< eprop_synapse_bsshslm_2020< TargetIdentifierPtrRport > >::disable_connection( const size_t lcid )
-{
- assert( not C_[ lcid ].is_disabled() );
- C_[ lcid ].disable();
- C_[ lcid ].delete_optimizer();
-}
-
-template <>
-void
-Connector< eprop_synapse_bsshslm_2020< TargetIdentifierIndex > >::disable_connection( const size_t lcid )
-{
- assert( not C_[ lcid ].is_disabled() );
- C_[ lcid ].disable();
- C_[ lcid ].delete_optimizer();
-}
-
-
-template <>
-Connector< eprop_synapse_bsshslm_2020< TargetIdentifierPtrRport > >::~Connector()
-{
- for ( auto& c : C_ )
- {
- c.delete_optimizer();
- }
- C_.clear();
-}
-
-template <>
-Connector< eprop_synapse_bsshslm_2020< TargetIdentifierIndex > >::~Connector()
-{
- for ( auto& c : C_ )
- {
- c.delete_optimizer();
- }
- C_.clear();
-}
-
-
-} // namespace nest
diff --git a/models/eprop_synapse_bsshslm_2020.h b/models/eprop_synapse_bsshslm_2020.h
deleted file mode 100644
index 3ac9144827..0000000000
--- a/models/eprop_synapse_bsshslm_2020.h
+++ /dev/null
@@ -1,633 +0,0 @@
-/*
- * eprop_synapse_bsshslm_2020.h
- *
- * This file is part of NEST.
- *
- * Copyright (C) 2004 The NEST Initiative
- *
- * NEST is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * NEST is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with NEST. If not, see .
- *
- */
-
-#ifndef EPROP_SYNAPSE_BSSHSLM_2020_H
-#define EPROP_SYNAPSE_BSSHSLM_2020_H
-
-// nestkernel
-#include "connection.h"
-#include "connector_base.h"
-#include "eprop_archiving_node.h"
-#include "target_identifier.h"
-#include "weight_optimizer.h"
-
-namespace nest
-{
-
-/* BeginUserDocs: synapse, e-prop plasticity
-
-Short description
-+++++++++++++++++
-
-Synapse type for e-prop plasticity
-
-Description
-+++++++++++
-
-``eprop_synapse_bsshslm_2020`` is an implementation of a connector model to create synapses between postsynaptic
-neurons :math:`j` and presynaptic neurons :math:`i` for eligibility propagation (e-prop) plasticity.
-
-E-prop plasticity was originally introduced and implemented in TensorFlow in [1]_.
-
-The suffix ``_bsshslm_2020`` follows the NEST convention to indicate in the
-model name the paper that introduced it by the first letter of the authors' last
-names and the publication year.
-
-The e-prop synapse collects the presynaptic spikes needed for calculating the
-weight update. When it is time to update, it triggers the calculation of the
-gradient which is specific to the post-synaptic neuron and is thus defined there.
-
-Eventually, it optimizes the weight with the specified optimizer.
-
-E-prop synapses require archiving of continuous quantities. Therefore e-prop
-synapses can only be connected to neuron models that are capable of
-archiving. So far, compatible models are ``eprop_iaf_bsshslm_2020``,
-``eprop_iaf_adapt_bsshslm_2020``, and ``eprop_readout_bsshslm_2020``.
-
-For more information on e-prop plasticity, see the documentation on the other e-prop models:
-
- * :doc:`eprop_iaf_bsshslm_2020<../models/eprop_iaf_bsshslm_2020/>`
- * :doc:`eprop_iaf_adapt_bsshslm_2020<../models/eprop_iaf_adapt_bsshslm_2020/>`
- * :doc:`eprop_readout_bsshslm_2020<../models/eprop_readout_bsshslm_2020/>`
- * :doc:`eprop_learning_signal_connection_bsshslm_2020<../models/eprop_learning_signal_connection_bsshslm_2020/>`
-
-For more information on the optimizers, see the documentation of the weight optimizer:
-
- * :doc:`weight_optimizer<../models/weight_optimizer/>`
-
-Details on the event-based NEST implementation of e-prop can be found in [2]_.
-
-.. warning::
-
- This synaptic plasticity rule does not take
- :ref:`precise spike timing ` into
- account. When calculating the weight update, the precise spike time part
- of the timestamp is ignored.
-
-Parameters
-++++++++++
-
-The following parameters can be set in the status dictionary.
-
-==================== ======= =============== ========= ======================================================
-**Common e-prop synapse parameters**
--------------------------------------------------------------------------------------------------------------
-Parameter Unit Math equivalent Default Description
-==================== ======= =============== ========= ======================================================
-``average_gradient`` Boolean ``False`` If ``True``, average the gradient over the learning
- window
-``optimizer`` {} Dictionary of optimizer parameters
-==================== ======= =============== ========= ======================================================
-
-============= ==== ========================= ======= ================================
-**Individual synapse parameters**
--------------------------------------------------------------------------------------
-Parameter Unit Math equivalent Default Description
-============= ==== ========================= ======= ================================
-``delay`` ms :math:`d_{ji}` 1.0 Dendritic delay
-``weight`` pA :math:`W_{ji}` 1.0 Initial value of synaptic weight
-============= ==== ========================= ======= ================================
-
-================= ==== ========================= ======= ==============================
-**Individual e-prop synapse parameters**
----------------------------------------------------------------------------------------
-Parameter Unit Math equivalent Default Description
-================= ==== ========================= ======= ==============================
-``tau_m_readout`` ms :math:`\tau_\text{m,out}` 10.0 Time constant for low-pass
- filtering of eligibility trace
-================= ==== ========================= ======= ==============================
-
-Recordables
-+++++++++++
-
-The following variables can be recorded.
-
-================== ==== =============== ============= ===============
-**Synapse recordables**
----------------------------------------------------------------------
-State variable Unit Math equivalent Initial value Description
-================== ==== =============== ============= ===============
-``weight`` pA :math:`B_{jk}` 1.0 Synaptic weight
-================== ==== =============== ============= ===============
-
-Usage
-+++++
-
-This model can only be used in combination with the other e-prop models
-and the network architecture requires specific wiring, input, and output.
-The usage is demonstrated in several
-:doc:`supervised regression and classification tasks <../auto_examples/eprop_plasticity/index>`
-reproducing among others the original proof-of-concept tasks in [1]_.
-
-Transmits
-+++++++++
-
-SpikeEvent, DSSpikeEvent
-
-References
-++++++++++
-
-.. [1] Bellec G, Scherr F, Subramoney F, Hajek E, Salaj D, Legenstein R,
- Maass W (2020). A solution to the learning dilemma for recurrent
- networks of spiking neurons. Nature Communications, 11:3625.
- https://doi.org/10.1038/s41467-020-17236-y
-
-.. [2] Korcsak-Gorzo A, Stapmanns J, Espinoza Valverde JA, Plesser HE,
- Dahmen D, Bolten M, Van Albada SJ, Diesmann M. Event-based
- implementation of eligibility propagation (in preparation)
-
-See also
-++++++++
-
-Examples using this model
-+++++++++++++++++++++++++
-
-.. listexamples:: eprop_synapse_bsshslm_2020
-
-EndUserDocs */
-
-/**
- * @brief Base class implementing common properties for e-prop synapses.
- *
- * Base class implementing common properties for the e-prop synapse model according to Bellec et al. (2020).
- *
- * This class in particular manages a pointer to weight-optimizer common properties to support
- * exchanging the weight optimizer at runtime. Setting the weight-optimizer common properties
- * determines the WO type. It can only be exchanged as long as no synapses for the model exist.
- * The WO CP object is responsible for providing individual optimizer objects to synapses upon
- * connection.
- *
- * @see WeightOptimizerCommonProperties
- */
-class EpropSynapseBSSHSLM2020CommonProperties : public CommonSynapseProperties
-{
-public:
- // Default constructor.
- EpropSynapseBSSHSLM2020CommonProperties();
-
- //! Copy constructor.
- EpropSynapseBSSHSLM2020CommonProperties( const EpropSynapseBSSHSLM2020CommonProperties& );
-
- //! Assignment operator.
- EpropSynapseBSSHSLM2020CommonProperties& operator=( const EpropSynapseBSSHSLM2020CommonProperties& ) = delete;
-
- //! Destructor.
- ~EpropSynapseBSSHSLM2020CommonProperties();
-
- //! Get parameter dictionary.
- void get_status( DictionaryDatum& d ) const;
-
- //! Update values in parameter dictionary.
- void set_status( const DictionaryDatum& d, ConnectorModel& cm );
-
- //! If True, average the gradient over the learning window.
- bool average_gradient_;
-
- /**
- * Pointer to common properties object for weight optimizer.
- *
- * @note Must only be changed as long as no synapses of the model exist.
- */
- WeightOptimizerCommonProperties* optimizer_cp_;
-};
-
-//! Register the eprop synapse model.
-void register_eprop_synapse_bsshslm_2020( const std::string& name );
-
-/**
- * @brief Class implementing a synapse model for e-prop plasticity.
- *
- * Class implementing a synapse model for e-prop plasticity according to Bellec et al. (2020).
- *
- * @note Several aspects of this synapse are in place to reproduce the Tensorflow implementation of Bellec et al.
- * (2020).
- *
- * @note Each synapse has a optimizer_ object managed through a `WeightOptimizer*`, pointing to an object of
- * a specific weight optimizer type. This optimizer, drawing also on parameters in the `WeightOptimizerCommonProperties`
- * accessible via the synapse models `CommonProperties::optimizer_cp_` pointer, computes the weight update for the
- * neuron. The actual optimizer type can be selected at runtime (before creating any synapses) by exchanging the
- * `optimizer_cp_` pointer. Individual optimizer objects are created by `check_connection()` when a synapse is actually
- * created. It is important that the constructors of `eprop_synapse_bsshslm_2020` **do not** create optimizer objects
- * and that the destructor **does not** delete optimizer objects; this currently leads to bugs when using Boosts's
- * `spreadsort()` due to use of the copy constructor where it should suffice to use the move constructor. Therefore,
- * `check_connection()`creates the optimizer object when it is needed and specializations of `Connector::~Connector()`
- * and `Connector::disable_connection()` delete it by calling `delete_optimizer()`. A disadvantage of this approach is
- * that the `default_connection` in the connector model does not have an optimizer object, whence it is not possible to
- * set default (initial) values for the per-synapse optimizer.
- *
- * @note If we can find a way to modify our co-sorting of source and target tables in Boost's `spreadsort()` to only use
- * move operations, it should be possible to create the individual optimizers in the copy constructor of
- * `eprop_synapse_bsshslm_2020` and to delete it in the destructor. The `default_connection` can then own an optimizer
- * and default values could be set on it.
- */
-template < typename targetidentifierT >
-class eprop_synapse_bsshslm_2020 : public Connection< targetidentifierT >
-{
-
-public:
- //! Type of the common synapse properties.
- typedef EpropSynapseBSSHSLM2020CommonProperties CommonPropertiesType;
-
- //! Type of the connection base.
- typedef Connection< targetidentifierT > ConnectionBase;
-
- /**
- * Properties of the connection model.
- *
- * @note Does not support LBL at present because we cannot properly cast GenericModel common props in that case.
- */
- static constexpr ConnectionModelProperties properties = ConnectionModelProperties::HAS_DELAY
- | ConnectionModelProperties::IS_PRIMARY | ConnectionModelProperties::REQUIRES_EPROP_ARCHIVING
- | ConnectionModelProperties::SUPPORTS_HPC;
-
- //! Default constructor.
- eprop_synapse_bsshslm_2020();
-
- //! Destructor
- ~eprop_synapse_bsshslm_2020();
-
- //! Parameterized copy constructor.
- eprop_synapse_bsshslm_2020( const eprop_synapse_bsshslm_2020& );
-
- //! Assignment operator
- eprop_synapse_bsshslm_2020& operator=( const eprop_synapse_bsshslm_2020& );
-
- //! Move constructor
- eprop_synapse_bsshslm_2020( eprop_synapse_bsshslm_2020&& );
-
- //! Move assignment operator
- eprop_synapse_bsshslm_2020& operator=( eprop_synapse_bsshslm_2020&& );
-
- using ConnectionBase::get_delay;
- using ConnectionBase::get_delay_steps;
- using ConnectionBase::get_rport;
- using ConnectionBase::get_target;
-
- //! Get parameter dictionary.
- void get_status( DictionaryDatum& d ) const;
-
- //! Update values in parameter dictionary.
- void set_status( const DictionaryDatum& d, ConnectorModel& cm );
-
- //! Send the spike event.
- bool send( Event& e, size_t thread, const EpropSynapseBSSHSLM2020CommonProperties& cp );
-
- //! Dummy node for testing the connection.
- class ConnTestDummyNode : public ConnTestDummyNodeBase
- {
- public:
- using ConnTestDummyNodeBase::handles_test_event;
-
- size_t
- handles_test_event( SpikeEvent&, size_t )
- {
- return invalid_port;
- }
-
- size_t
- handles_test_event( DSSpikeEvent&, size_t )
- {
- return invalid_port;
- }
- };
-
- /**
- * Check if the target accepts the event and receptor type requested by the sender.
- *
- * @note This sets the optimizer_ member.
- */
- void check_connection( Node& s, Node& t, size_t receptor_type, const CommonPropertiesType& cp );
-
- //! Set the synaptic weight to the provided value.
- void
- set_weight( const double w )
- {
- weight_ = w;
- }
-
- //! Delete optimizer
- void delete_optimizer();
-
-private:
- //! Synaptic weight.
- double weight_;
-
- //! The time step when the previous spike arrived.
- long t_spike_previous_;
-
- //! The time step when the previous e-prop update was.
- long t_previous_update_;
-
- //! The time step when the next e-prop update will be.
- long t_next_update_;
-
- //! The time step when the spike arrived that triggered the previous e-prop update.
- long t_previous_trigger_spike_;
-
- //! %Time constant for low-pass filtering the eligibility trace.
- double tau_m_readout_;
-
- //! Low-pass filter of the eligibility trace.
- double kappa_;
-
- //! If this connection is between two recurrent neurons.
- bool is_recurrent_to_recurrent_conn_;
-
- //! Vector of presynaptic inter-spike-intervals.
- std::vector< long > presyn_isis_;
-
- /**
- * Optimizer
- *
- * @note Pointer is set by check_connection() and deleted by delete_optimizer().
- */
- WeightOptimizer* optimizer_;
-};
-
-template < typename targetidentifierT >
-constexpr ConnectionModelProperties eprop_synapse_bsshslm_2020< targetidentifierT >::properties;
-
-// Explicitly declare specializations of Connector methods that need to do special things for eprop_synapse_bsshslm_2020
-template <>
-void Connector< eprop_synapse_bsshslm_2020< TargetIdentifierPtrRport > >::disable_connection( const size_t lcid );
-
-template <>
-void Connector< eprop_synapse_bsshslm_2020< TargetIdentifierIndex > >::disable_connection( const size_t lcid );
-
-template <>
-Connector< eprop_synapse_bsshslm_2020< TargetIdentifierPtrRport > >::~Connector();
-
-template <>
-Connector< eprop_synapse_bsshslm_2020< TargetIdentifierIndex > >::~Connector();
-
-
-template < typename targetidentifierT >
-eprop_synapse_bsshslm_2020< targetidentifierT >::eprop_synapse_bsshslm_2020()
- : ConnectionBase()
- , weight_( 1.0 )
- , t_spike_previous_( 0 )
- , t_previous_update_( 0 )
- , t_next_update_( 0 )
- , t_previous_trigger_spike_( 0 )
- , tau_m_readout_( 10.0 )
- , kappa_( std::exp( -Time::get_resolution().get_ms() / tau_m_readout_ ) )
- , is_recurrent_to_recurrent_conn_( false )
- , optimizer_( nullptr )
-{
-}
-
-template < typename targetidentifierT >
-eprop_synapse_bsshslm_2020< targetidentifierT >::~eprop_synapse_bsshslm_2020()
-{
-}
-
-// This copy constructor is used to create instances from prototypes.
-// Therefore, only parameter values are copied.
-template < typename targetidentifierT >
-eprop_synapse_bsshslm_2020< targetidentifierT >::eprop_synapse_bsshslm_2020( const eprop_synapse_bsshslm_2020& es )
- : ConnectionBase( es )
- , weight_( es.weight_ )
- , t_spike_previous_( 0 )
- , t_previous_update_( 0 )
- , t_next_update_( kernel().simulation_manager.get_eprop_update_interval().get_steps() )
- , t_previous_trigger_spike_( 0 )
- , tau_m_readout_( es.tau_m_readout_ )
- , kappa_( std::exp( -Time::get_resolution().get_ms() / tau_m_readout_ ) )
- , is_recurrent_to_recurrent_conn_( es.is_recurrent_to_recurrent_conn_ )
- , optimizer_( es.optimizer_ )
-{
-}
-
-// This copy assignment operator is used to write a connection into the connection array.
-template < typename targetidentifierT >
-eprop_synapse_bsshslm_2020< targetidentifierT >&
-eprop_synapse_bsshslm_2020< targetidentifierT >::operator=( const eprop_synapse_bsshslm_2020& es )
-{
- if ( this == &es )
- {
- return *this;
- }
-
- ConnectionBase::operator=( es );
-
- weight_ = es.weight_;
- t_spike_previous_ = es.t_spike_previous_;
- t_previous_update_ = es.t_previous_update_;
- t_next_update_ = es.t_next_update_;
- t_previous_trigger_spike_ = es.t_previous_trigger_spike_;
- tau_m_readout_ = es.tau_m_readout_;
- kappa_ = es.kappa_;
- is_recurrent_to_recurrent_conn_ = es.is_recurrent_to_recurrent_conn_;
- optimizer_ = es.optimizer_;
-
- return *this;
-}
-
-template < typename targetidentifierT >
-eprop_synapse_bsshslm_2020< targetidentifierT >::eprop_synapse_bsshslm_2020( eprop_synapse_bsshslm_2020&& es )
- : ConnectionBase( es )
- , weight_( es.weight_ )
- , t_spike_previous_( 0 )
- , t_previous_update_( 0 )
- , t_next_update_( es.t_next_update_ )
- , t_previous_trigger_spike_( 0 )
- , tau_m_readout_( es.tau_m_readout_ )
- , kappa_( es.kappa_ )
- , is_recurrent_to_recurrent_conn_( es.is_recurrent_to_recurrent_conn_ )
- , optimizer_( es.optimizer_ )
-{
- es.optimizer_ = nullptr;
-}
-
-// This move assignment operator is used to write a connection into the connection array.
-template < typename targetidentifierT >
-eprop_synapse_bsshslm_2020< targetidentifierT >&
-eprop_synapse_bsshslm_2020< targetidentifierT >::operator=( eprop_synapse_bsshslm_2020&& es )
-{
- if ( this == &es )
- {
- return *this;
- }
-
- ConnectionBase::operator=( es );
-
- weight_ = es.weight_;
- t_spike_previous_ = es.t_spike_previous_;
- t_previous_update_ = es.t_previous_update_;
- t_next_update_ = es.t_next_update_;
- t_previous_trigger_spike_ = es.t_previous_trigger_spike_;
- tau_m_readout_ = es.tau_m_readout_;
- kappa_ = es.kappa_;
- is_recurrent_to_recurrent_conn_ = es.is_recurrent_to_recurrent_conn_;
-
- optimizer_ = es.optimizer_;
- es.optimizer_ = nullptr;
-
- return *this;
-}
-
-template < typename targetidentifierT >
-inline void
-eprop_synapse_bsshslm_2020< targetidentifierT >::check_connection( Node& s,
- Node& t,
- size_t receptor_type,
- const CommonPropertiesType& cp )
-{
- // When we get here, delay has been set so we can check it.
- if ( get_delay_steps() != 1 )
- {
- throw IllegalConnection( "eprop synapses currently require a delay of one simulation step" );
- }
-
- ConnTestDummyNode dummy_target;
- ConnectionBase::check_connection_( dummy_target, s, t, receptor_type );
-
- t.register_eprop_connection();
-
- optimizer_ = cp.optimizer_cp_->get_optimizer();
-}
-
-template < typename targetidentifierT >
-inline void
-eprop_synapse_bsshslm_2020< targetidentifierT >::delete_optimizer()
-{
- delete optimizer_;
- // do not set to nullptr to allow detection of double deletion
-}
-
-template < typename targetidentifierT >
-bool
-eprop_synapse_bsshslm_2020< targetidentifierT >::send( Event& e,
- size_t thread,
- const EpropSynapseBSSHSLM2020CommonProperties& cp )
-{
- Node* target = get_target( thread );
- assert( target );
-
- const long t_spike = e.get_stamp().get_steps();
- const long update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps();
- const long shift = target->get_shift();
-
- const long interval_step = ( t_spike - shift ) % update_interval;
-
- if ( target->is_eprop_recurrent_node() and interval_step == 0 )
- {
- return false;
- }
-
- if ( t_previous_trigger_spike_ == 0 )
- {
- t_previous_trigger_spike_ = t_spike;
- }
-
- if ( t_spike_previous_ > 0 )
- {
- const long t = t_spike >= t_next_update_ + shift ? t_next_update_ + shift : t_spike;
- presyn_isis_.push_back( t - t_spike_previous_ );
- }
-
- if ( t_spike > t_next_update_ + shift )
- {
- const long idx_current_update = ( t_spike - shift ) / update_interval;
- const long t_current_update = idx_current_update * update_interval;
-
- target->write_update_to_history( t_previous_update_, t_current_update );
-
- const double gradient = target->compute_gradient(
- presyn_isis_, t_previous_update_, t_previous_trigger_spike_, kappa_, cp.average_gradient_ );
-
- weight_ = optimizer_->optimized_weight( *cp.optimizer_cp_, idx_current_update, gradient, weight_ );
-
- t_previous_update_ = t_current_update;
- t_next_update_ = t_current_update + update_interval;
-
- t_previous_trigger_spike_ = t_spike;
- }
-
- t_spike_previous_ = t_spike;
-
- e.set_receiver( *target );
- e.set_weight( weight_ );
- e.set_delay_steps( get_delay_steps() );
- e.set_rport( get_rport() );
- e();
-
- return true;
-}
-
-template < typename targetidentifierT >
-void
-eprop_synapse_bsshslm_2020< targetidentifierT >::get_status( DictionaryDatum& d ) const
-{
- ConnectionBase::get_status( d );
- def< double >( d, names::weight, weight_ );
- def< double >( d, names::tau_m_readout, tau_m_readout_ );
- def< long >( d, names::size_of, sizeof( *this ) );
-
- DictionaryDatum optimizer_dict = new Dictionary();
-
- // The default_connection_ has no optimizer, therefore we need to protect it
- if ( optimizer_ )
- {
- optimizer_->get_status( optimizer_dict );
- ( *d )[ names::optimizer ] = optimizer_dict;
- }
-}
-
-template < typename targetidentifierT >
-void
-eprop_synapse_bsshslm_2020< targetidentifierT >::set_status( const DictionaryDatum& d, ConnectorModel& cm )
-{
- ConnectionBase::set_status( d, cm );
- if ( d->known( names::optimizer ) and optimizer_ )
- {
- optimizer_->set_status( getValue< DictionaryDatum >( d->lookup( names::optimizer ) ) );
- }
-
- updateValue< double >( d, names::weight, weight_ );
-
- if ( updateValue< double >( d, names::tau_m_readout, tau_m_readout_ ) )
- {
- if ( tau_m_readout_ <= 0 )
- {
- throw BadProperty( "Membrane time constant of readout neuron tau_m_readout > 0 required." );
- }
- kappa_ = std::exp( -Time::get_resolution().get_ms() / tau_m_readout_ );
- }
-
- const auto& gcm =
- dynamic_cast< const GenericConnectorModel< eprop_synapse_bsshslm_2020< targetidentifierT > >& >( cm );
- const CommonPropertiesType& epcp = gcm.get_common_properties();
- if ( weight_ < epcp.optimizer_cp_->get_Wmin() )
- {
- throw BadProperty( "Minimal weight Wmin ≤ weight required." );
- }
-
- if ( weight_ > epcp.optimizer_cp_->get_Wmax() )
- {
- throw BadProperty( "weight ≤ maximal weight Wmax required." );
- }
-}
-
-} // namespace nest
-
-#endif // EPROP_SYNAPSE_BSSHSLM_2020_H
diff --git a/models/weight_optimizer.cpp b/models/weight_optimizer.cpp
index f38e81fc9e..560704c302 100644
--- a/models/weight_optimizer.cpp
+++ b/models/weight_optimizer.cpp
@@ -56,7 +56,7 @@ WeightOptimizerCommonProperties::WeightOptimizerCommonProperties( const WeightOp
void
WeightOptimizerCommonProperties::get_status( DictionaryDatum& d ) const
{
- def< std::string >( d, names::optimizer, get_name() );
+ def< std::string >( d, names::type, get_name() );
def< long >( d, names::batch_size, batch_size_ );
def< double >( d, names::eta, eta_ );
def< double >( d, names::Wmin, Wmin_ );
diff --git a/models/weight_optimizer.h b/models/weight_optimizer.h
index 3bed762225..27608e7c10 100644
--- a/models/weight_optimizer.h
+++ b/models/weight_optimizer.h
@@ -144,7 +144,7 @@ See also
Examples using this model
+++++++++++++++++++++++++
-.. listexamples:: eprop_synapse_bsshslm_2020
+.. listexamples:: eprop_synapse
EndUserDocs */
diff --git a/modelsets/eprop b/modelsets/eprop
index 0a9a5ed696..2d8af6f25d 100644
--- a/modelsets/eprop
+++ b/modelsets/eprop
@@ -12,12 +12,6 @@ step_rate_generator
rate_connection_delayed
static_synapse
-eprop_iaf_bsshslm_2020
-eprop_iaf_adapt_bsshslm_2020
-eprop_readout_bsshslm_2020
-eprop_synapse_bsshslm_2020
-eprop_learning_signal_connection_bsshslm_2020
-
eprop_iaf
eprop_iaf_adapt
eprop_iaf_psc_delta
diff --git a/modelsets/full b/modelsets/full
index 46d6c6ee5a..bee574382b 100644
--- a/modelsets/full
+++ b/modelsets/full
@@ -21,11 +21,6 @@ correlomatrix_detector
correlospinmatrix_detector
dc_generator
diffusion_connection
-eprop_iaf_bsshslm_2020
-eprop_iaf_adapt_bsshslm_2020
-eprop_readout_bsshslm_2020
-eprop_synapse_bsshslm_2020
-eprop_learning_signal_connection_bsshslm_2020
eprop_iaf
eprop_iaf_adapt
eprop_iaf_psc_delta
diff --git a/nestkernel/connector_base.h b/nestkernel/connector_base.h
index 7cdd91b1e8..91006e0307 100644
--- a/nestkernel/connector_base.h
+++ b/nestkernel/connector_base.h
@@ -59,7 +59,7 @@ namespace nest
*
* @note If any member functions need to do something special for a given connection type,
* declare specializations in the corresponding header file and define them in the corresponding
- * source file. For an example, see `eprop_synapse_bsshslm_2020`.
+ * source file. For an example, see `eprop_synapse`.
*/
class ConnectorBase
{
diff --git a/nestkernel/eprop_archiving_node.h b/nestkernel/eprop_archiving_node.h
index 04cfc2d3ba..894cc33da4 100644
--- a/nestkernel/eprop_archiving_node.h
+++ b/nestkernel/eprop_archiving_node.h
@@ -89,7 +89,7 @@ class EpropArchivingNode : public Node
typename std::vector< HistEntryT >::iterator get_eprop_history( const long time_step );
/**
- * @brief Erases the used eprop history for `bsshslm_2020` models.
+ * @brief Erases the used eprop history for `bsshslm_2020` models. ETODO: Explain in more abstract terms
*
* Erases e-prop history entries for update intervals during which no spikes were sent to the target neuron,
* and any entries older than the earliest time stamp required by the first update in the history.
@@ -114,7 +114,8 @@ class EpropArchivingNode : public Node
double get_eprop_history_duration() const;
protected:
- //! Returns correct shift for history depending on whether it is a normal or a bsshslm_2020 model.
+ //! Returns correct shift for history depending on whether it is a normal or a bsshslm_2020 model. ETODO: Explain in
+ //! more abstract terms
virtual long model_dependent_history_shift_() const = 0;
/**
diff --git a/nestkernel/nest_names.cpp b/nestkernel/nest_names.cpp
index 2b1d98435e..d9e1d619be 100644
--- a/nestkernel/nest_names.cpp
+++ b/nestkernel/nest_names.cpp
@@ -74,7 +74,6 @@ const Name asc_decay( "asc_decay" );
const Name asc_init( "asc_init" );
const Name asc_r( "asc_r" );
const Name available( "available" );
-const Name average_gradient( "average_gradient" );
const Name azimuth_angle( "azimuth_angle" );
const Name b( "b" );
@@ -305,7 +304,6 @@ const Name local_spike_counter( "local_spike_counter" );
const Name lookuptable_0( "lookuptable_0" );
const Name lookuptable_1( "lookuptable_1" );
const Name lookuptable_2( "lookuptable_2" );
-const Name loss( "loss" );
const Name lower_left( "lower_left" );
const Name m( "m" );
@@ -423,7 +421,6 @@ const Name rate_values( "rate_values" );
const Name ratio_ER_cyt( "ratio_ER_cyt" );
const Name readout_cycle_duration( "readout_cycle_duration" );
const Name readout_signal( "readout_signal" );
-const Name readout_signal_unnorm( "readout_signal_unnorm" );
const Name receptor_idx( "receptor_idx" );
const Name receptor_type( "receptor_type" );
const Name receptor_types( "receptor_types" );
@@ -439,7 +436,6 @@ const Name rectify_rate( "rectify_rate" );
const Name recv_buffer_size_secondary_events( "recv_buffer_size_secondary_events" );
const Name refractory_input( "refractory_input" );
const Name registered( "registered" );
-const Name regular_spike_arrival( "regular_spike_arrival" );
const Name relative_amplitude( "relative_amplitude" );
const Name requires_symmetric( "requires_symmetric" );
const Name reset_pattern( "reset_pattern" );
@@ -549,7 +545,6 @@ const Name tau_max( "tau_max" );
const Name tau_minus( "tau_minus" );
const Name tau_minus_stdp( "tau_minus_stdp" );
const Name tau_minus_triplet( "tau_minus_triplet" );
-const Name tau_m_readout( "tau_m_readout" );
const Name tau_n( "tau_n" );
const Name tau_plus( "tau_plus" );
const Name tau_plus_triplet( "tau_plus_triplet" );
diff --git a/nestkernel/nest_names.h b/nestkernel/nest_names.h
index 44a75f62d0..4aa720ff7b 100644
--- a/nestkernel/nest_names.h
+++ b/nestkernel/nest_names.h
@@ -100,7 +100,6 @@ extern const Name asc_decay;
extern const Name asc_init;
extern const Name asc_r;
extern const Name available;
-extern const Name average_gradient;
extern const Name azimuth_angle;
extern const Name b;
@@ -333,7 +332,6 @@ extern const Name local_spike_counter;
extern const Name lookuptable_0;
extern const Name lookuptable_1;
extern const Name lookuptable_2;
-extern const Name loss;
extern const Name lower_left;
extern const Name m;
@@ -451,7 +449,6 @@ extern const Name rate_values;
extern const Name ratio_ER_cyt;
extern const Name readout_cycle_duration;
extern const Name readout_signal;
-extern const Name readout_signal_unnorm;
extern const Name receptor_idx;
extern const Name receptor_type;
extern const Name receptor_types;
@@ -467,7 +464,6 @@ extern const Name rectify_rate;
extern const Name recv_buffer_size_secondary_events;
extern const Name refractory_input;
extern const Name registered;
-extern const Name regular_spike_arrival;
extern const Name relative_amplitude;
extern const Name requires_symmetric;
extern const Name reset_pattern;
@@ -575,7 +571,6 @@ extern const Name tau_max;
extern const Name tau_minus;
extern const Name tau_minus_stdp;
extern const Name tau_minus_triplet;
-extern const Name tau_m_readout;
extern const Name tau_n;
extern const Name tau_P;
extern const Name tau_plus;
diff --git a/nestkernel/node.cpp b/nestkernel/node.cpp
index 6f54cc1075..99a464c339 100644
--- a/nestkernel/node.cpp
+++ b/nestkernel/node.cpp
@@ -447,7 +447,7 @@ Node::handles_test_event( LearningSignalConnectionEvent&, size_t )
{
throw IllegalConnection(
"The target node cannot handle learning signal events or"
- " synapse is not of type eprop_learning_signal_connection_bsshslm_2020." );
+ " synapse is not of type eprop_learning_signal_connection." );
return invalid_port;
}
diff --git a/nestkernel/node.h b/nestkernel/node.h
index 5c41113f43..4da30450dc 100644
--- a/nestkernel/node.h
+++ b/nestkernel/node.h
@@ -488,6 +488,7 @@ class Node
*
* The time for the first entry of the update history is set to the neuron specific shift for `bsshslm_2020`
* models and to the negative transmission delay from the recurrent to the output layer otherwise.
+ * ETODO: Explain in more abstract terms
*
* @throws IllegalConnection
*/
diff --git a/pynest/examples/eprop_plasticity/eprop_supervised_classification_evidence-accumulation_bsshslm_2020.png b/pynest/examples/eprop_plasticity/eprop_supervised_classification_evidence-accumulation_bsshslm_2020.png
deleted file mode 100644
index 5899ac8a33..0000000000
Binary files a/pynest/examples/eprop_plasticity/eprop_supervised_classification_evidence-accumulation_bsshslm_2020.png and /dev/null differ
diff --git a/pynest/examples/eprop_plasticity/eprop_supervised_classification_evidence-accumulation_bsshslm_2020.py b/pynest/examples/eprop_plasticity/eprop_supervised_classification_evidence-accumulation_bsshslm_2020.py
deleted file mode 100644
index fc734ed8d4..0000000000
--- a/pynest/examples/eprop_plasticity/eprop_supervised_classification_evidence-accumulation_bsshslm_2020.py
+++ /dev/null
@@ -1,945 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# eprop_supervised_classification_evidence-accumulation_bsshslm_2020.py
-#
-# This file is part of NEST.
-#
-# Copyright (C) 2004 The NEST Initiative
-#
-# NEST is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-#
-# NEST is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with NEST. If not, see .
-
-r"""
-Tutorial on learning to accumulate evidence with e-prop after Bellec et al. (2020)
-----------------------------------------------------------------------------------
-
-Training a classification model using supervised e-prop plasticity to accumulate evidence.
-
-Description
-~~~~~~~~~~~
-
-This script demonstrates supervised learning of a classification task with the eligibility propagation (e-prop)
-plasticity mechanism by Bellec et al. [1]_.
-
-This type of learning is demonstrated at the proof-of-concept task in [1]_. We based this script on their
-TensorFlow script given in [2]_.
-
-The task, a so-called evidence accumulation task, is inspired by behavioral tasks, where a lab animal (e.g., a
-mouse) runs along a track, gets cues on the left and right, and has to decide at the end of the track between
-taking a left and a right turn of which one is correct. After a number of iterations, the animal is able to
-infer the underlying rationale of the task. Here, the solution is to turn to the side in which more cues were
-presented.
-
-.. image:: eprop_supervised_classification_evidence-accumulation_bsshslm_2020.png
- :width: 70 %
- :alt: Schematic of network architecture. Same as Figure 1 in the code.
- :align: center
-
-Learning in the neural network model is achieved by optimizing the connection weights with e-prop plasticity.
-This plasticity rule requires a specific network architecture depicted in Figure 1. The neural network model
-consists of a recurrent network that receives input from spike generators and projects onto two readout
-neurons - one for the left and one for the right turn at the end. The input neuron population consists of four
-groups: one group providing background noise of a specific rate for some base activity throughout the
-experiment, one group providing the input spikes of the left cues and one group providing them for the right
-cues, and a last group defining the recall window, in which the network has to decide. The readout neuron
-compares the network signal :math:`\pi_k` with the teacher target signal :math:`\pi_k^*`, which it receives from
-a rate generator. Since the decision is at the end and all the cues are relevant, the network has to keep the
-cues in memory. Additional adaptive neurons in the network enable this memory. The network's training error is
-assessed by employing a cross-entropy error loss.
-
-Details on the event-based NEST implementation of e-prop can be found in [3]_.
-
-References
-~~~~~~~~~~
-
-.. [1] Bellec G, Scherr F, Subramoney F, Hajek E, Salaj D, Legenstein R, Maass W (2020). A solution to the
- learning dilemma for recurrent networks of spiking neurons. Nature Communications, 11:3625.
- https://doi.org/10.1038/s41467-020-17236-y
-
-.. [2] https://github.com/IGITUGraz/eligibility_propagation/blob/master/Figure_3_and_S7_e_prop_tutorials/tutorial_evidence_accumulation_with_alif.py
-
-.. [3] Korcsak-Gorzo A, Stapmanns J, Espinoza Valverde JA, Plesser HE,
- Dahmen D, Bolten M, Van Albada SJ, Diesmann M. Event-based
- implementation of eligibility propagation (in preparation)
-
-""" # pylint: disable=line-too-long # noqa: E501
-
-# %% ###########################################################################################################
-# Import libraries
-# ~~~~~~~~~~~~~~~~
-# We begin by importing all libraries required for the simulation, analysis, and visualization.
-
-import matplotlib as mpl
-import matplotlib.pyplot as plt
-import nest
-import numpy as np
-from cycler import cycler
-from IPython.display import Image
-
-# %% ###########################################################################################################
-# Schematic of network architecture
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# This figure, identical to the one in the description, shows the required network architecture in the center,
-# the input and output of the classification task above, and lists of the required NEST device, neuron, and
-# synapse models below. The connections that must be established are numbered 1 to 7.
-
-try:
- Image(filename="./eprop_supervised_classification_evidence-accumulation_bsshslm_2020.png")
-except Exception:
- pass
-
-# %% ###########################################################################################################
-# Setup
-# ~~~~~
-
-# %% ###########################################################################################################
-# Initialize random generator
-# ...........................
-# We seed the numpy random generator, which will generate random initial weights as well as random input and
-# output.
-
-rng_seed = 1 # numpy random seed
-np.random.seed(rng_seed) # fix numpy random seed
-
-# %% ###########################################################################################################
-# Define timing of task
-# .....................
-# The task's temporal structure is then defined, once as time steps and once as durations in milliseconds.
-# Using a batch size larger than one aids the network in generalization, facilitating the solution to this task.
-# The original number of iterations requires distributed computing. Increasing the number of iterations
-# enhances learning performance up to the point where overfitting occurs. If early stopping is enabled, the
-# classification error is tested in regular intervals and the training stopped as soon as the error selected as
-# stop criterion is reached. After training, the performance can be tested over a number of test iterations.
-
-batch_size = 32 # batch size, 64 in reference [2], 32 in the README to reference [2]
-n_iter_train = 50 # number of training iterations, 2000 in reference [2]
-n_iter_test = 4 # number of iterations for final test
-do_early_stopping = True # if True, stop training as soon as stop criterion fulfilled
-n_iter_validate_every = 10 # number of training iterations before validation
-n_iter_early_stop = 8 # number of iterations to average over to evaluate early stopping condition
-stop_crit = 0.07 # error value corresponding to stop criterion for early stopping
-
-input = {
- "n_symbols": 4, # number of input populations, e.g. 4 = left, right, recall, noise
- "n_cues": 7, # number of cues given before decision
- "prob_group": 0.3, # probability with which one input group is present
- "spike_prob": 0.04, # spike probability of frozen input noise
-}
-
-steps = {
- "cue": 100, # time steps in one cue presentation
- "spacing": 50, # time steps of break between two cues
- "bg_noise": 1050, # time steps of background noise
- "recall": 150, # time steps of recall
-}
-
-steps["cues"] = input["n_cues"] * (steps["cue"] + steps["spacing"]) # time steps of all cues
-steps["sequence"] = steps["cues"] + steps["bg_noise"] + steps["recall"] # time steps of one full sequence
-steps["learning_window"] = steps["recall"] # time steps of window with non-zero learning signals
-
-steps.update(
- {
- "offset_gen": 1, # offset since generator signals start from time step 1
- "delay_in_rec": 1, # connection delay between input and recurrent neurons
- "delay_rec_out": 1, # connection delay between recurrent and output neurons
- "delay_out_norm": 1, # connection delay between output neurons for normalization
- "extension_sim": 1, # extra time step to close right-open simulation time interval in Simulate()
- "final_update": 3, # extra time steps to update all synapses at the end of task
- }
-)
-
-steps["delays"] = steps["delay_in_rec"] + steps["delay_rec_out"] + steps["delay_out_norm"] # time steps of delays
-
-steps["total_offset"] = steps["offset_gen"] + steps["delays"] # time steps of total offset
-
-duration = {"step": 1.0} # ms, temporal resolution of the simulation
-
-duration.update({key: value * duration["step"] for key, value in steps.items()}) # ms, durations
-
-# %% ###########################################################################################################
-# Set up simulation
-# .................
-# As last step of the setup, we reset the NEST kernel to remove all existing NEST simulation settings and
-# objects and set some NEST kernel parameters, some of which are e-prop-related.
-
-params_setup = {
- "eprop_learning_window": duration["learning_window"],
- "eprop_reset_neurons_on_update": True, # if True, reset dynamic variables at start of each update interval
- "eprop_update_interval": duration["sequence"], # ms, time interval for updating the synaptic weights
- "print_time": False, # if True, print time progress bar during simulation, set False if run as code cell
- "resolution": duration["step"],
- "total_num_virtual_procs": 1, # number of virtual processes, set in case of distributed computing
-}
-
-####################
-
-nest.ResetKernel()
-nest.set(**params_setup)
-nest.set_verbosity("M_FATAL")
-
-# %% ###########################################################################################################
-# Create neurons
-# ~~~~~~~~~~~~~~
-# We proceed by creating a certain number of input, recurrent, and readout neurons and setting their parameters.
-# Additionally, we already create an input spike generator and an output target rate generator, which we will
-# configure later. Within the recurrent network, alongside a population of regular neurons, we introduce a
-# population of adaptive neurons, to enhance the network's memory retention.
-
-n_in = 40 # number of input neurons
-n_ad = 50 # number of adaptive neurons
-n_reg = 50 # number of regular neurons
-n_rec = n_ad + n_reg # number of recurrent neurons
-n_out = 2 # number of readout neurons
-
-params_nrn_out = {
- "C_m": 1.0, # pF, membrane capacitance - takes effect only if neurons get current input (here not the case)
- "E_L": 0.0, # mV, leak / resting membrane potential
- "I_e": 0.0, # pA, external current input
- "loss": "cross_entropy", # loss function
- "regular_spike_arrival": False, # If True, input spikes arrive at end of time step, if False at beginning
- "tau_m": 20.0, # ms, membrane time constant
- "V_m": 0.0, # mV, initial value of the membrane voltage
-}
-
-params_nrn_reg = {
- "beta": 1.0, # width scaling of the pseudo-derivative
- "C_m": 1.0,
- "c_reg": 300.0, # coefficient of firing rate regularization - 2*learning_window*(TF c_reg) for technical reasons
- "E_L": 0.0,
- "f_target": 10.0, # spikes/s, target firing rate for firing rate regularization
- "gamma": 0.3, # height scaling of the pseudo-derivative
- "I_e": 0.0,
- "regular_spike_arrival": True,
- "surrogate_gradient_function": "piecewise_linear", # surrogate gradient / pseudo-derivative function
- "t_ref": 5.0, # ms, duration of refractory period
- "tau_m": 20.0,
- "V_m": 0.0,
- "V_th": 0.6, # mV, spike threshold membrane voltage
-}
-
-# factors from the original pseudo-derivative definition are incorporated into the parameters
-params_nrn_reg["gamma"] /= params_nrn_reg["V_th"]
-params_nrn_reg["beta"] /= np.abs(params_nrn_reg["V_th"]) # prefactor is inside abs in the original definition
-
-params_nrn_ad = {
- "beta": 1.0,
- "adapt_tau": 2000.0, # ms, time constant of adaptive threshold
- "adaptation": 0.0, # initial value of the spike threshold adaptation
- "C_m": 1.0,
- "c_reg": 300.0,
- "E_L": 0.0,
- "f_target": 10.0,
- "gamma": 0.3,
- "I_e": 0.0,
- "regular_spike_arrival": True,
- "surrogate_gradient_function": "piecewise_linear",
- "t_ref": 5.0,
- "tau_m": 20.0,
- "V_m": 0.0,
- "V_th": 0.6,
-}
-
-params_nrn_ad["gamma"] /= params_nrn_ad["V_th"]
-params_nrn_ad["beta"] /= np.abs(params_nrn_ad["V_th"])
-
-params_nrn_ad["adapt_beta"] = 1.7 * (
- (1.0 - np.exp(-duration["step"] / params_nrn_ad["adapt_tau"]))
- / (1.0 - np.exp(-duration["step"] / params_nrn_ad["tau_m"]))
-) # prefactor of adaptive threshold
-
-####################
-
-# Intermediate parrot neurons required between input spike generators and recurrent neurons,
-# since devices cannot establish plastic synapses for technical reasons
-
-gen_spk_in = nest.Create("spike_generator", n_in)
-nrns_in = nest.Create("parrot_neuron", n_in)
-
-# The suffix _bsshslm_2020 follows the NEST convention to indicate in the model name the paper
-# that introduced it by the first letter of the authors' last names and the publication year.
-
-nrns_reg = nest.Create("eprop_iaf_bsshslm_2020", n_reg, params_nrn_reg)
-nrns_ad = nest.Create("eprop_iaf_adapt_bsshslm_2020", n_ad, params_nrn_ad)
-nrns_out = nest.Create("eprop_readout_bsshslm_2020", n_out, params_nrn_out)
-gen_rate_target = nest.Create("step_rate_generator", n_out)
-
-nrns_rec = nrns_reg + nrns_ad
-
-# %% ###########################################################################################################
-# Create recorders
-# ~~~~~~~~~~~~~~~~
-# We also create recorders, which, while not required for the training, will allow us to track various dynamic
-# variables of the neurons, spikes, and changes in synaptic weights. To save computing time and memory, the
-# recorders, the recorded variables, neurons, and synapses can be limited to the ones relevant to the
-# experiment, and the recording interval can be increased (see the documentation on the specific recorders). By
-# default, recordings are stored in memory but can also be written to file.
-
-n_record = 1 # number of neurons per type to record dynamic variables from - this script requires n_record >= 1
-n_record_w = 5 # number of senders and targets to record weights from - this script requires n_record_w >=1
-
-if n_record == 0 or n_record_w == 0:
- raise ValueError("n_record and n_record_w >= 1 required")
-
-params_mm_reg = {
- "interval": duration["step"], # interval between two recorded time points
- "record_from": ["V_m", "surrogate_gradient", "learning_signal"], # dynamic variables to record
- "start": duration["offset_gen"] + duration["delay_in_rec"], # start time of recording
- "label": "multimeter_reg",
-}
-
-params_mm_ad = {
- "interval": duration["step"],
- "record_from": params_mm_reg["record_from"] + ["V_th_adapt", "adaptation"],
- "start": duration["offset_gen"] + duration["delay_in_rec"],
- "label": "multimeter_ad",
-}
-
-params_mm_out = {
- "interval": duration["step"],
- "record_from": ["V_m", "readout_signal", "readout_signal_unnorm", "target_signal", "error_signal"],
- "start": duration["total_offset"],
- "label": "multimeter_out",
-}
-
-params_wr = {
- "senders": nrns_in[:n_record_w] + nrns_rec[:n_record_w], # limit senders to subsample weights to record
- "targets": nrns_rec[:n_record_w] + nrns_out, # limit targets to subsample weights to record from
- "start": duration["total_offset"],
- "label": "weight_recorder",
-}
-
-params_sr_in = {
- "start": duration["offset_gen"],
- "label": "spike_recorder_in",
-}
-
-params_sr_reg = {
- "start": duration["offset_gen"],
- "label": "spike_recorder_reg",
-}
-
-params_sr_ad = {
- "start": duration["offset_gen"],
- "label": "spike_recorder_ad",
-}
-
-####################
-
-mm_reg = nest.Create("multimeter", params_mm_reg)
-mm_ad = nest.Create("multimeter", params_mm_ad)
-mm_out = nest.Create("multimeter", params_mm_out)
-sr_in = nest.Create("spike_recorder", params_sr_in)
-sr_reg = nest.Create("spike_recorder", params_sr_reg)
-sr_ad = nest.Create("spike_recorder", params_sr_ad)
-wr = nest.Create("weight_recorder", params_wr)
-
-nrns_reg_record = nrns_reg[:n_record]
-nrns_ad_record = nrns_ad[:n_record]
-
-# %% ###########################################################################################################
-# Create connections
-# ~~~~~~~~~~~~~~~~~~
-# Now, we define the connectivity and set up the synaptic parameters, with the synaptic weights drawn from
-# normal distributions. After these preparations, we establish the enumerated connections of the core network,
-# as well as additional connections to the recorders.
-
-params_conn_all_to_all = {"rule": "all_to_all", "allow_autapses": False}
-params_conn_one_to_one = {"rule": "one_to_one"}
-
-
-def calculate_glorot_dist(fan_in, fan_out):
- glorot_scale = 1.0 / max(1.0, (fan_in + fan_out) / 2.0)
- glorot_limit = np.sqrt(3.0 * glorot_scale)
- glorot_distribution = np.random.uniform(low=-glorot_limit, high=glorot_limit, size=(fan_in, fan_out))
- return glorot_distribution
-
-
-dtype_weights = np.float32 # data type of weights - for reproducing TF results set to np.float32
-weights_in_rec = np.array(np.random.randn(n_in, n_rec).T / np.sqrt(n_in), dtype=dtype_weights)
-weights_rec_rec = np.array(np.random.randn(n_rec, n_rec).T / np.sqrt(n_rec), dtype=dtype_weights)
-np.fill_diagonal(weights_rec_rec, 0.0) # since no autapses set corresponding weights to zero
-weights_rec_out = np.array(calculate_glorot_dist(n_rec, n_out).T, dtype=dtype_weights)
-weights_out_rec = np.array(np.random.randn(n_rec, n_out), dtype=dtype_weights)
-
-params_common_syn_eprop = {
- "optimizer": {
- "type": "adam", # algorithm to optimize the weights
- "batch_size": batch_size,
- "beta_1": 0.9, # exponential decay rate for 1st moment estimate of Adam optimizer
- "beta_2": 0.999, # exponential decay rate for 2nd moment raw estimate of Adam optimizer
- "epsilon": 1e-8, # small numerical stabilization constant of Adam optimizer
- "Wmin": -100.0, # pA, minimal limit of the synaptic weights
- "Wmax": 100.0, # pA, maximal limit of the synaptic weights
- },
- "average_gradient": True, # if True, average the gradient over the learning window
- "weight_recorder": wr,
-}
-
-eta_test = 0.0 # learning rate for test phase
-eta_train = 5e-3 # learning rate for training phase
-
-params_syn_base = {
- "synapse_model": "eprop_synapse_bsshslm_2020",
- "delay": duration["step"], # ms, dendritic delay
- "tau_m_readout": params_nrn_out["tau_m"], # ms, for technical reasons pass readout neuron membrane time constant
-}
-
-params_syn_in = params_syn_base.copy()
-params_syn_in["weight"] = weights_in_rec # pA, initial values for the synaptic weights
-
-params_syn_rec = params_syn_base.copy()
-params_syn_rec["weight"] = weights_rec_rec
-
-params_syn_out = params_syn_base.copy()
-params_syn_out["weight"] = weights_rec_out
-
-params_syn_feedback = {
- "synapse_model": "eprop_learning_signal_connection_bsshslm_2020",
- "delay": duration["step"],
- "weight": weights_out_rec,
-}
-
-params_syn_out_out = {
- "synapse_model": "rate_connection_delayed",
- "delay": duration["step"],
- "receptor_type": 1, # receptor type of readout neuron to receive other readout neuron's signals for softmax
- "weight": 1.0, # pA, weight 1.0 required for correct softmax computation for technical reasons
-}
-
-params_syn_rate_target = {
- "synapse_model": "rate_connection_delayed",
- "delay": duration["step"],
- "receptor_type": 2, # receptor type over which readout neuron receives target signal
-}
-
-params_syn_static = {
- "synapse_model": "static_synapse",
- "delay": duration["step"],
-}
-
-params_init_optimizer = {
- "optimizer": {
- "m": 0.0, # initial 1st moment estimate m of Adam optimizer
- "v": 0.0, # initial 2nd moment raw estimate v of Adam optimizer
- }
-}
-
-####################
-
-nest.SetDefaults("eprop_synapse_bsshslm_2020", params_common_syn_eprop)
-
-nest.Connect(gen_spk_in, nrns_in, params_conn_one_to_one, params_syn_static) # connection 1
-nest.Connect(nrns_in, nrns_rec, params_conn_all_to_all, params_syn_in) # connection 2
-nest.Connect(nrns_rec, nrns_rec, params_conn_all_to_all, params_syn_rec) # connection 3
-nest.Connect(nrns_rec, nrns_out, params_conn_all_to_all, params_syn_out) # connection 4
-nest.Connect(nrns_out, nrns_rec, params_conn_all_to_all, params_syn_feedback) # connection 5
-nest.Connect(gen_rate_target, nrns_out, params_conn_one_to_one, params_syn_rate_target) # connection 6
-nest.Connect(nrns_out, nrns_out, params_conn_all_to_all, params_syn_out_out) # connection 7
-
-nest.Connect(nrns_in, sr_in, params_conn_all_to_all, params_syn_static)
-nest.Connect(nrns_reg, sr_reg, params_conn_all_to_all, params_syn_static)
-nest.Connect(nrns_ad, sr_ad, params_conn_all_to_all, params_syn_static)
-
-nest.Connect(mm_reg, nrns_reg_record, params_conn_all_to_all, params_syn_static)
-nest.Connect(mm_ad, nrns_ad_record, params_conn_all_to_all, params_syn_static)
-nest.Connect(mm_out, nrns_out, params_conn_all_to_all, params_syn_static)
-
-# After creating the connections, we can individually initialize the optimizer's
-# dynamic variables for single synapses (here exemplarily for two connections).
-
-nest.GetConnections(nrns_rec[0], nrns_rec[1:3]).set([params_init_optimizer] * 2)
-
-# %% ###########################################################################################################
-# Create input and output
-# ~~~~~~~~~~~~~~~~~~~~~~~
-# We generate the input as four neuron populations, two producing the left and right cues, respectively, one the
-# recall signal and one the background input throughout the task. The sequence of cues is drawn with a
-# probability that favors one side. For each such sequence, the favored side, the solution or target, is
-# assigned randomly to the left or right.
-
-
-def generate_evidence_accumulation_input_output(batch_size, n_in, steps, input):
- n_pop_nrn = n_in // input["n_symbols"]
-
- prob_choices = np.array([input["prob_group"], 1 - input["prob_group"]], dtype=np.float32)
- idx = np.random.choice([0, 1], batch_size)
- probs = np.zeros((batch_size, 2), dtype=np.float32)
- probs[:, 0] = prob_choices[idx]
- probs[:, 1] = prob_choices[1 - idx]
-
- batched_cues = np.zeros((batch_size, input["n_cues"]), dtype=int)
- for b_idx in range(batch_size):
- batched_cues[b_idx, :] = np.random.choice([0, 1], input["n_cues"], p=probs[b_idx])
-
- input_spike_probs = np.zeros((batch_size, steps["sequence"], n_in))
-
- for b_idx in range(batch_size):
- for c_idx in range(input["n_cues"]):
- cue = batched_cues[b_idx, c_idx]
-
- step_start = c_idx * (steps["cue"] + steps["spacing"]) + steps["spacing"]
- step_stop = step_start + steps["cue"]
-
- pop_nrn_start = cue * n_pop_nrn
- pop_nrn_stop = pop_nrn_start + n_pop_nrn
-
- input_spike_probs[b_idx, step_start:step_stop, pop_nrn_start:pop_nrn_stop] = input["spike_prob"]
-
- input_spike_probs[:, -steps["recall"] :, 2 * n_pop_nrn : 3 * n_pop_nrn] = input["spike_prob"]
- input_spike_probs[:, :, 3 * n_pop_nrn :] = input["spike_prob"] / 4.0
- input_spike_bools = input_spike_probs > np.random.rand(input_spike_probs.size).reshape(input_spike_probs.shape)
- input_spike_bools[:, 0, :] = 0 # remove spikes in 0th time step of every sequence for technical reasons
-
- target_cues = np.zeros(batch_size, dtype=int)
- target_cues[:] = np.sum(batched_cues, axis=1) > int(input["n_cues"] / 2)
-
- return input_spike_bools, target_cues
-
-
-def get_params_task_input_output(n_iter_interval):
- iteration_offset = n_iter_interval * batch_size * duration["sequence"]
- dtype_in_spks = np.float32 # data type of input spikes - for reproducing TF results set to np.float32
-
- input_spike_bools, target_cues = generate_evidence_accumulation_input_output(batch_size, n_in, steps, input)
-
- input_spike_bools_arr = np.array(input_spike_bools).reshape(batch_size * steps["sequence"], n_in)
- timeline_task = (
- np.arange(0.0, batch_size * duration["sequence"], duration["step"]) + iteration_offset + duration["offset_gen"]
- )
-
- params_gen_spk_in = [
- {"spike_times": timeline_task[input_spike_bools_arr[:, nrn_in_idx]].astype(dtype_in_spks)}
- for nrn_in_idx in range(n_in)
- ]
-
- target_rate_changes = np.zeros((n_out, batch_size))
- target_rate_changes[np.array(target_cues), np.arange(batch_size)] = 1
-
- params_gen_rate_target = [
- {
- "amplitude_times": np.arange(0.0, batch_size * duration["sequence"], duration["sequence"])
- + iteration_offset
- + duration["total_offset"],
- "amplitude_values": target_rate_changes[nrn_out_idx],
- }
- for nrn_out_idx in range(n_out)
- ]
-
- return params_gen_spk_in, params_gen_rate_target
-
-
-# %% ###########################################################################################################
-# Force final update
-# ~~~~~~~~~~~~~~~~~~
-# Synapses only get active, that is, the correct weight update calculated and applied, when they transmit a
-# spike. To still be able to read out the correct weights at the end of the simulation, we force spiking of the
-# presynaptic neuron and thus an update of all synapses, including those that have not transmitted a spike in
-# the last update interval, by sending a strong spike to all neurons that form the presynaptic side of an eprop
-# synapse. This step is required purely for technical reasons.
-
-gen_spk_final_update = nest.Create("spike_generator", 1)
-
-nest.Connect(gen_spk_final_update, nrns_in + nrns_rec, "all_to_all", {"weight": 1000.0})
-
-# %% ###########################################################################################################
-# Read out pre-training weights
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Before we begin training, we read out the initial weight matrices so that we can eventually compare them to
-# the optimized weights.
-
-
-def get_weights(pop_pre, pop_post):
- conns = nest.GetConnections(pop_pre, pop_post).get(["source", "target", "weight"])
- conns["senders"] = np.array(conns["source"]) - np.min(conns["source"])
- conns["targets"] = np.array(conns["target"]) - np.min(conns["target"])
-
- conns["weight_matrix"] = np.zeros((len(pop_post), len(pop_pre)))
- conns["weight_matrix"][conns["targets"], conns["senders"]] = conns["weight"]
- return conns
-
-
-weights_pre_train = {
- "in_rec": get_weights(nrns_in, nrns_rec),
- "rec_rec": get_weights(nrns_rec, nrns_rec),
- "rec_out": get_weights(nrns_rec, nrns_out),
-}
-
-# %% ###########################################################################################################
-# Simulate and evaluate
-# ~~~~~~~~~~~~~~~~~~~~~
-# We train the network by simulating for a number of training iterations with the set learning rate. If early
-# stopping is turned on, we evaluate the network's performance on the validation set in regular intervals and,
-# if the error is below a certain threshold, we stop the training early. If the error is not below the
-# threshold, we continue training until the end of the set number of iterations. Finally, we evaluate the
-# network's performance on the test set.
-# Furthermore, we evaluate the network's training error by calculating a loss - in this case, the cross-entropy
-# error between the integrated recurrent network activity and the target rate.
-
-
-class TrainingPipeline:
- def __init__(self):
- self.results_dict = {
- "error": [],
- "loss": [],
- "iteration": [],
- "label": [],
- }
- self.n_iter_sim = 0
- self.phase_label_previous = ""
- self.error = 0
- self.k_iter = 0
- self.early_stop = False
-
- def evaluate(self):
- events_mm_out = mm_out.get("events")
-
- readout_signal = events_mm_out["readout_signal"] # corresponds to softmax
- target_signal = events_mm_out["target_signal"]
- senders = events_mm_out["senders"]
- times = events_mm_out["times"]
-
- cond1 = times > (self.n_iter_sim - 1) * batch_size * duration["sequence"] + duration["total_offset"]
- cond2 = times <= self.n_iter_sim * batch_size * duration["sequence"] + duration["total_offset"]
- idc = cond1 & cond2
-
- readout_signal = np.array([readout_signal[idc][senders[idc] == i] for i in set(senders)])
- target_signal = np.array([target_signal[idc][senders[idc] == i] for i in set(senders)])
-
- readout_signal = readout_signal.reshape((n_out, 1, batch_size, steps["sequence"]))
- target_signal = target_signal.reshape((n_out, 1, batch_size, steps["sequence"]))
-
- readout_signal = readout_signal[:, :, :, -steps["learning_window"] :]
- target_signal = target_signal[:, :, :, -steps["learning_window"] :]
-
- loss = -np.mean(np.sum(target_signal * np.log(readout_signal), axis=0), axis=(1, 2))
-
- y_prediction = np.argmax(np.mean(readout_signal, axis=3), axis=0)
- y_target = np.argmax(np.mean(target_signal, axis=3), axis=0)
- accuracy = np.mean((y_target == y_prediction), axis=1)
- errors = 1.0 - accuracy
-
- self.results_dict["iteration"].append(self.n_iter_sim)
- self.results_dict["error"].extend(errors)
- self.results_dict["loss"].extend(loss)
- self.results_dict["label"].append(self.phase_label_previous)
-
- self.error = errors[0]
-
- def run_phase(self, phase_label, eta):
- params_common_syn_eprop["optimizer"]["eta"] = eta
- nest.SetDefaults("eprop_synapse_bsshslm_2020", params_common_syn_eprop)
-
- params_gen_spk_in, params_gen_rate_target = get_params_task_input_output(self.n_iter_sim)
- nest.SetStatus(gen_spk_in, params_gen_spk_in)
- nest.SetStatus(gen_rate_target, params_gen_rate_target)
-
- self.simulate("total_offset")
- self.simulate("extension_sim")
-
- if self.n_iter_sim > 0:
- self.evaluate()
-
- duration["sim"] = batch_size * duration["sequence"] - duration["total_offset"] - duration["extension_sim"]
-
- self.simulate("sim")
-
- self.n_iter_sim += 1
- self.phase_label_previous = phase_label
-
- def run_training(self):
- self.run_phase("training", eta_train)
-
- def run_validation(self):
- if do_early_stopping and self.k_iter % n_iter_validate_every == 0:
- self.run_phase("validation", eta_test)
-
- def run_early_stopping(self):
- if do_early_stopping and self.k_iter % n_iter_validate_every == 0:
- if self.k_iter > 0 and self.error < stop_crit:
- errors_early_stop = []
- for _ in range(n_iter_early_stop):
- self.run_phase("early-stopping", eta_test)
- errors_early_stop.append(self.error)
-
- self.early_stop = np.mean(errors_early_stop) < stop_crit
-
- def run_test(self):
- for _ in range(n_iter_test):
- self.run_phase("test", eta_test)
-
- def simulate(self, k):
- nest.Simulate(duration[k])
-
- def run(self):
- while self.k_iter < n_iter_train and not self.early_stop:
- self.run_validation()
- self.run_early_stopping()
- self.run_training()
- self.k_iter += 1
-
- self.run_test()
-
- self.simulate("total_offset")
- self.simulate("extension_sim")
-
- self.evaluate()
-
- duration["task"] = self.n_iter_sim * batch_size * duration["sequence"] + duration["total_offset"]
-
- gen_spk_final_update.set({"spike_times": [duration["task"] + duration["extension_sim"] + 1]})
-
- self.simulate("final_update")
-
- def get_results(self):
- for k, v in self.results_dict.items():
- self.results_dict[k] = np.array(v)
- return self.results_dict
-
-
-training_pipeline = TrainingPipeline()
-training_pipeline.run()
-
-results_dict = training_pipeline.get_results()
-n_iter_sim = training_pipeline.n_iter_sim
-
-# %% ###########################################################################################################
-# Read out post-training weights
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# After the training, we can read out the optimized final weights.
-
-weights_post_train = {
- "in_rec": get_weights(nrns_in, nrns_rec),
- "rec_rec": get_weights(nrns_rec, nrns_rec),
- "rec_out": get_weights(nrns_rec, nrns_out),
-}
-
-# %% ###########################################################################################################
-# Read out recorders
-# ~~~~~~~~~~~~~~~~~~
-# We can also retrieve the recorded history of the dynamic variables and weights, as well as detected spikes.
-
-events_mm_reg = mm_reg.get("events")
-events_mm_ad = mm_ad.get("events")
-events_mm_out = mm_out.get("events")
-events_sr_in = sr_in.get("events")
-events_sr_reg = sr_reg.get("events")
-events_sr_ad = sr_ad.get("events")
-events_wr = wr.get("events")
-
-# %% ###########################################################################################################
-# Plot results
-# ~~~~~~~~~~~~
-# Then, we plot a series of plots.
-
-do_plotting = True # if True, plot the results
-
-if not do_plotting:
- exit()
-
-colors = {
- "blue": "#2854c5ff",
- "red": "#e04b40ff",
- "green": "#25aa2cff",
- "gold": "#f9c643ff",
- "white": "#ffffffff",
-}
-
-plt.rcParams.update(
- {
- "axes.spines.right": False,
- "axes.spines.top": False,
- "axes.prop_cycle": cycler(color=[colors[k] for k in ["blue", "red", "green", "gold"]]),
- }
-)
-
-# %% ###########################################################################################################
-# Plot learning performance
-# .........................
-# We begin with two plots visualizing the learning performance of the network: the loss and the error, both
-# plotted against the iterations.
-
-fig, axs = plt.subplots(2, 1, sharex=True)
-fig.suptitle("Learning performance")
-
-for color, label in zip(colors, set(results_dict["label"])):
- idc = results_dict["label"] == label
- axs[0].scatter(results_dict["iteration"][idc], results_dict["loss"][idc], label=label)
- axs[1].scatter(results_dict["iteration"][idc], results_dict["error"][idc], label=label)
-
-axs[0].set_ylabel(r"$\mathcal{L} = -\sum_{t,k} \pi_k^{*,t} \log \pi_k^t$")
-axs[1].set_ylabel("error")
-
-axs[-1].set_xlabel("iteration")
-axs[-1].legend(bbox_to_anchor=(1.05, 0.5), loc="center left")
-axs[-1].xaxis.get_major_locator().set_params(integer=True)
-
-fig.tight_layout()
-
-# %% ###########################################################################################################
-# Plot spikes and dynamic variables
-# .................................
-# This plotting routine shows how to plot all of the recorded dynamic variables and spikes across time. We take
-# one snapshot in the first iteration and one snapshot at the end.
-
-
-def plot_recordable(ax, events, recordable, ylabel, xlims):
- for sender in set(events["senders"]):
- idc_sender = events["senders"] == sender
- idc_times = (events["times"][idc_sender] > xlims[0]) & (events["times"][idc_sender] < xlims[1])
- ax.plot(events["times"][idc_sender][idc_times], events[recordable][idc_sender][idc_times], lw=0.5)
- ax.set_ylabel(ylabel)
- margin = np.abs(np.max(events[recordable]) - np.min(events[recordable])) * 0.1
- ax.set_ylim(np.min(events[recordable]) - margin, np.max(events[recordable]) + margin)
-
-
-def plot_spikes(ax, events, ylabel, xlims):
- idc_times = (events["times"] > xlims[0]) & (events["times"] < xlims[1])
- senders_subset = events["senders"][idc_times]
- times_subset = events["times"][idc_times]
-
- ax.scatter(times_subset, senders_subset, s=0.1)
- ax.set_ylabel(ylabel)
- margin = np.abs(np.max(senders_subset) - np.min(senders_subset)) * 0.1
- ax.set_ylim(np.min(senders_subset) - margin, np.max(senders_subset) + margin)
-
-
-for title, xlims in zip(
- ["Dynamic variables before training", "Dynamic variables after training"],
- [
- (0, steps["sequence"]),
- ((n_iter_sim - 1) * batch_size * steps["sequence"], n_iter_sim * batch_size * steps["sequence"]),
- ],
-):
- fig, axs = plt.subplots(14, 1, sharex=True, figsize=(8, 14), gridspec_kw={"hspace": 0.4, "left": 0.2})
- fig.suptitle(title)
-
- plot_spikes(axs[0], events_sr_in, r"$z_i$" + "\n", xlims)
- plot_spikes(axs[1], events_sr_reg, r"$z_j$" + "\n", xlims)
-
- plot_recordable(axs[2], events_mm_reg, "V_m", r"$v_j$" + "\n(mV)", xlims)
- plot_recordable(axs[3], events_mm_reg, "surrogate_gradient", r"$\psi_j$" + "\n", xlims)
- plot_recordable(axs[4], events_mm_reg, "learning_signal", r"$L_j$" + "\n(pA)", xlims)
-
- plot_spikes(axs[5], events_sr_ad, r"$z_j$" + "\n", xlims)
-
- plot_recordable(axs[6], events_mm_ad, "V_m", r"$v_j$" + "\n(mV)", xlims)
- plot_recordable(axs[7], events_mm_ad, "surrogate_gradient", r"$\psi_j$" + "\n", xlims)
- plot_recordable(axs[8], events_mm_ad, "V_th_adapt", r"$A_j$" + "\n(mV)", xlims)
- plot_recordable(axs[9], events_mm_ad, "learning_signal", r"$L_j$" + "\n(pA)", xlims)
-
- plot_recordable(axs[10], events_mm_out, "V_m", r"$v_k$" + "\n(mV)", xlims)
- plot_recordable(axs[11], events_mm_out, "target_signal", r"$\pi^*_k$" + "\n", xlims)
- plot_recordable(axs[12], events_mm_out, "readout_signal", r"$\pi_k$" + "\n", xlims)
- plot_recordable(axs[13], events_mm_out, "error_signal", r"$\pi_k-\pi^*_k$" + "\n", xlims)
-
- axs[-1].set_xlabel(r"$t$ (ms)")
- axs[-1].set_xlim(*xlims)
-
- fig.align_ylabels()
-
-# %% ###########################################################################################################
-# Plot weight time courses
-# ........................
-# Similarly, we can plot the weight histories. Note that the weight recorder, attached to the synapses, works
-# differently than the other recorders. Since synapses only get activated when they transmit a spike, the weight
-# recorder only records the weight in those moments. That is why the first weight registrations do not start in
-# the first time step and we add the initial weights manually.
-
-
-def plot_weight_time_course(ax, events, nrns, label, ylabel):
- sender_label, target_label = label.split("_")
- nrns_senders = nrns[sender_label]
- nrns_targets = nrns[target_label]
-
- for sender in set(events_wr["senders"]):
- for target in set(events_wr["targets"]):
- if sender in nrns_senders and target in nrns_targets:
- idc_syn = (events["senders"] == sender) & (events["targets"] == target)
- if np.any(idc_syn):
- idc_syn_pre = (weights_pre_train[label]["source"] == sender) & (
- weights_pre_train[label]["target"] == target
- )
- times = np.concatenate([[0.0], events["times"][idc_syn]])
-
- weights = np.concatenate(
- [np.array(weights_pre_train[label]["weight"])[idc_syn_pre], events["weights"][idc_syn]]
- )
- ax.step(times, weights, c=colors["blue"])
- ax.set_ylabel(ylabel)
- ax.set_ylim(-0.6, 0.6)
-
-
-fig, axs = plt.subplots(3, 1, sharex=True, figsize=(3, 4))
-fig.suptitle("Weight time courses")
-
-nrns = {
- "in": nrns_in.tolist(),
- "rec": nrns_rec.tolist(),
- "out": nrns_out.tolist(),
-}
-
-plot_weight_time_course(axs[0], events_wr, nrns, "in_rec", r"$W_\text{in}$ (pA)")
-plot_weight_time_course(axs[1], events_wr, nrns, "rec_rec", r"$W_\text{rec}$ (pA)")
-plot_weight_time_course(axs[2], events_wr, nrns, "rec_out", r"$W_\text{out}$ (pA)")
-
-axs[-1].set_xlabel(r"$t$ (ms)")
-axs[-1].set_xlim(0, duration["task"])
-
-fig.align_ylabels()
-fig.tight_layout()
-
-# %% ###########################################################################################################
-# Plot weight matrices
-# ....................
-# If one is not interested in the time course of the weights, it is possible to read out only the initial and
-# final weights, which requires less computing time and memory than the weight recorder approach. Here, we plot
-# the corresponding weight matrices before and after the optimization.
-
-cmap = mpl.colors.LinearSegmentedColormap.from_list(
- "cmap", ((0.0, colors["blue"]), (0.5, colors["white"]), (1.0, colors["red"]))
-)
-
-fig, axs = plt.subplots(3, 2, sharex="col", sharey="row")
-fig.suptitle("Weight matrices")
-
-all_w_extrema = []
-
-for k in weights_pre_train.keys():
- w_pre = weights_pre_train[k]["weight"]
- w_post = weights_post_train[k]["weight"]
- all_w_extrema.append([np.min(w_pre), np.max(w_pre), np.min(w_post), np.max(w_post)])
-
-args = {"cmap": cmap, "vmin": np.min(all_w_extrema), "vmax": np.max(all_w_extrema)}
-
-for i, weights in zip([0, 1], [weights_pre_train, weights_post_train]):
- axs[0, i].pcolormesh(weights["in_rec"]["weight_matrix"].T, **args)
- axs[1, i].pcolormesh(weights["rec_rec"]["weight_matrix"], **args)
- cmesh = axs[2, i].pcolormesh(weights["rec_out"]["weight_matrix"], **args)
-
- axs[2, i].set_xlabel("recurrent\nneurons")
-
-axs[0, 0].set_ylabel("input\nneurons")
-axs[1, 0].set_ylabel("recurrent\nneurons")
-axs[2, 0].set_ylabel("readout\nneurons")
-fig.align_ylabels(axs[:, 0])
-
-axs[0, 0].text(0.5, 1.1, "before training", transform=axs[0, 0].transAxes, ha="center")
-axs[0, 1].text(0.5, 1.1, "after training", transform=axs[0, 1].transAxes, ha="center")
-
-axs[2, 0].yaxis.get_major_locator().set_params(integer=True)
-
-cbar = plt.colorbar(cmesh, cax=axs[1, 1].inset_axes([1.1, 0.2, 0.05, 0.8]), label="weight (pA)")
-
-fig.tight_layout()
-
-plt.show()
diff --git a/pynest/examples/eprop_plasticity/eprop_supervised_regression_handwriting_bsshslm_2020.png b/pynest/examples/eprop_plasticity/eprop_supervised_regression_handwriting_bsshslm_2020.png
deleted file mode 100644
index f078866bcf..0000000000
Binary files a/pynest/examples/eprop_plasticity/eprop_supervised_regression_handwriting_bsshslm_2020.png and /dev/null differ
diff --git a/pynest/examples/eprop_plasticity/eprop_supervised_regression_handwriting_bsshslm_2020.py b/pynest/examples/eprop_plasticity/eprop_supervised_regression_handwriting_bsshslm_2020.py
deleted file mode 100644
index 48f0d07b94..0000000000
--- a/pynest/examples/eprop_plasticity/eprop_supervised_regression_handwriting_bsshslm_2020.py
+++ /dev/null
@@ -1,762 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# eprop_supervised_regression_handwriting_bsshslm_2020.py
-#
-# This file is part of NEST.
-#
-# Copyright (C) 2004 The NEST Initiative
-#
-# NEST is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-#
-# NEST is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with NEST. If not, see .
-
-r"""
-Tutorial on learning to generate handwritten text with e-prop after Bellec et al. (2020)
-----------------------------------------------------------------------------------------
-
-Training a regression model using supervised e-prop plasticity to generate handwritten text
-
-Description
-~~~~~~~~~~~
-
-This script demonstrates supervised learning of a regression task with a recurrent spiking neural network that
-is equipped with the eligibility propagation (e-prop) plasticity mechanism by Bellec et al. [1]_.
-
-This type of learning is demonstrated at the proof-of-concept task in [1]_. We based this script on their
-TensorFlow script given in [2]_ and changed the task as well as the parameters slightly.
-
-In this task, the network learns to generate an arbitrary N-dimensional temporal pattern. Here, the network
-learns to reproduce with its overall spiking activity a two-dimensional, roughly two-second-long target signal
-which encode the x and y coordinates of the handwritten word "chaos".
-
-.. image:: eprop_supervised_regression_handwriting_bsshslm_2020.png
- :width: 70 %
- :alt: Schematic of network architecture. Same as Figure 1 in the code.
- :align: center
-
-Learning in the neural network model is achieved by optimizing the connection weights with e-prop plasticity.
-This plasticity rule requires a specific network architecture depicted in Figure 1. The neural network model
-consists of a recurrent network that receives frozen noise input from spike generators and projects onto two
-readout neurons. Each individual readout signal denoted as :math:`y_k` is compared with a corresponding target
-signal represented as :math:`y_k^*`. The network's training error is assessed by employing a mean-squared error
-loss.
-
-Details on the event-based NEST implementation of e-prop can be found in [3]_.
-
-The development of this task and the hyper-parameter optimization were conducted by Agnes Korcsak-Gorzo and
-Charl Linssen, inspired by activities and feedback received at the CapoCaccia Workshop toward Neuromorphic
-Intelligence 2023.
-
-
-Get the chaos_handwriting.txt file:
-
-.. grid::
-
- .. grid-item-card::
- :columns: 3
-
- :download:`chaos_handwriting.txt`
-
-References
-~~~~~~~~~~
-
-.. [1] Bellec G, Scherr F, Subramoney F, Hajek E, Salaj D, Legenstein R, Maass W (2020). A solution to the
- learning dilemma for recurrent networks of spiking neurons. Nature Communications, 11:3625.
- https://doi.org/10.1038/s41467-020-17236-y
-
-.. [2] https://github.com/IGITUGraz/eligibility_propagation/blob/master/Figure_3_and_S7_e_prop_tutorials/tutorial_pattern_generation.py
-
-.. [3] Korcsak-Gorzo A, Stapmanns J, Espinoza Valverde JA, Plesser HE,
- Dahmen D, Bolten M, Van Albada SJ, Diesmann M. Event-based
- implementation of eligibility propagation (in preparation)
-
-""" # pylint: disable=line-too-long # noqa: E501
-
-# %% ###########################################################################################################
-# Import libraries
-# ~~~~~~~~~~~~~~~~
-# We begin by importing all libraries required for the simulation, analysis, and visualization.
-
-import matplotlib as mpl
-import matplotlib.pyplot as plt
-import nest
-import numpy as np
-from cycler import cycler
-from IPython.display import Image
-
-# %% ###########################################################################################################
-# Schematic of network architecture
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# This figure, identical to the one in the description, shows the required network architecture in the center,
-# the input and output of the pattern generation task above, and lists of the required NEST device, neuron, and
-# synapse models below. The connections that must be established are numbered 1 to 6.
-
-try:
- Image(filename="./eprop_supervised_regression_handwriting_bsshslm_2020.png")
-except Exception:
- pass
-
-# %% ###########################################################################################################
-# Setup
-# ~~~~~
-
-# %% ###########################################################################################################
-# Initialize random generator
-# ...........................
-# We seed the numpy random generator, which will generate random initial weights as well as random input and
-# output.
-
-rng_seed = 1 # numpy random seed
-np.random.seed(rng_seed) # fix numpy random seed
-
-# %% ###########################################################################################################
-# Define timing of task
-# .....................
-# The task's temporal structure is then defined, once as time steps and once as durations in milliseconds.
-# Increasing the number of iterations enhances learning performance.
-
-batch_size = 1 # batch size
-n_iter = 200 # number of iterations, 5000 to reach convergence as in the figure
-
-data_file_name = "chaos_handwriting.txt" # name of file with task data
-data = np.loadtxt(data_file_name)
-
-steps = {
- "data_point": 8, # time steps of one data point
-}
-
-steps["sequence"] = len(data) * steps["data_point"] # time steps of one full sequence
-steps["learning_window"] = steps["sequence"] # time steps of window with non-zero learning signals
-steps["task"] = n_iter * batch_size * steps["sequence"] # time steps of task
-
-steps.update(
- {
- "offset_gen": 1, # offset since generator signals start from time step 1
- "delay_in_rec": 1, # connection delay between input and recurrent neurons
- "delay_rec_out": 1, # connection delay between recurrent and output neurons
- "delay_out_norm": 1, # connection delay between output neurons for normalization
- "extension_sim": 1, # extra time step to close right-open simulation time interval in Simulate()
- }
-)
-
-steps["delays"] = steps["delay_in_rec"] + steps["delay_rec_out"] + steps["delay_out_norm"] # time steps of delays
-
-steps["total_offset"] = steps["offset_gen"] + steps["delays"] # time steps of total offset
-
-steps["sim"] = steps["task"] + steps["total_offset"] + steps["extension_sim"] # time steps of simulation
-
-duration = {"step": 1.0} # ms, temporal resolution of the simulation
-
-duration.update({key: value * duration["step"] for key, value in steps.items()}) # ms, durations
-
-# %% ###########################################################################################################
-# Set up simulation
-# .................
-# As last step of the setup, we reset the NEST kernel to remove all existing NEST simulation settings and
-# objects and set some NEST kernel parameters, some of which are e-prop-related.
-
-params_setup = {
- "eprop_learning_window": duration["learning_window"],
- "eprop_reset_neurons_on_update": True, # if True, reset dynamic variables at start of each update interval
- "eprop_update_interval": duration["sequence"], # ms, time interval for updating the synaptic weights
- "print_time": False, # if True, print time progress bar during simulation, set False if run as code cell
- "resolution": duration["step"],
- "total_num_virtual_procs": 1, # number of virtual processes, set in case of distributed computing
- "rng_seed": rng_seed, # seed for NEST random generator
-}
-
-####################
-
-nest.ResetKernel()
-nest.set(**params_setup)
-
-# %% ###########################################################################################################
-# Create neurons
-# ~~~~~~~~~~~~~~
-# We proceed by creating a certain number of input, recurrent, and readout neurons and setting their parameters.
-# Additionally, we already create an input spike generator and an output target rate generator, which we will
-# configure later.
-
-n_in = 100 # number of input neurons
-n_rec = 200 # number of recurrent neurons
-n_out = 2 # number of readout neurons
-
-params_nrn_out = {
- "C_m": 1.0, # pF, membrane capacitance - takes effect only if neurons get current input (here not the case)
- "E_L": 0.0, # mV, leak / resting membrane potential
- "I_e": 0.0, # pA, external current input
- "loss": "mean_squared_error", # loss function
- "regular_spike_arrival": False, # If True, input spikes arrive at end of time step, if False at beginning
- "tau_m": 50.0, # ms, membrane time constant
- "V_m": 0.0, # mV, initial value of the membrane voltage
-}
-
-tau_m_mean = 30.0 # ms, mean of membrane time constant distribution
-
-params_nrn_rec = {
- "beta": 1.0, # width scaling of the pseudo-derivative
- "adapt_tau": 2000.0, # ms, time constant of adaptive threshold
- "C_m": 250.0,
- "c_reg": 150.0, # coefficient of firing rate regularization
- "E_L": 0.0,
- "f_target": 20.0, # spikes/s, target firing rate for firing rate regularization
- "gamma": 0.3, # height scaling of the pseudo-derivative
- "I_e": 0.0,
- "regular_spike_arrival": False,
- "surrogate_gradient_function": "piecewise_linear", # surrogate gradient / pseudo-derivative function
- "t_ref": 0.0, # ms, duration of refractory period
- "tau_m": nest.random.normal(mean=tau_m_mean, std=2.0),
- "V_m": 0.0,
- "V_th": 0.03, # mV, spike threshold membrane voltage
-}
-
-# factors from the original pseudo-derivative definition are incorporated into the parameters
-params_nrn_rec["gamma"] /= params_nrn_rec["V_th"]
-params_nrn_rec["beta"] /= np.abs(params_nrn_rec["V_th"]) # prefactor is inside abs in the original definition
-
-params_nrn_rec["adapt_beta"] = (
- 1.7 * (1.0 - np.exp(-1 / params_nrn_rec["adapt_tau"])) / (1.0 - np.exp(-1.0 / tau_m_mean))
-) # prefactor of adaptive threshold
-
-####################
-
-# Intermediate parrot neurons required between input spike generators and recurrent neurons,
-# since devices cannot establish plastic synapses for technical reasons
-
-gen_spk_in = nest.Create("spike_generator", n_in)
-nrns_in = nest.Create("parrot_neuron", n_in)
-
-# The suffix _bsshslm_2020 follows the NEST convention to indicate in the model name the paper
-# that introduced it by the first letter of the authors' last names and the publication year.
-
-nrns_rec = nest.Create("eprop_iaf_adapt_bsshslm_2020", n_rec, params_nrn_rec)
-nrns_out = nest.Create("eprop_readout_bsshslm_2020", n_out, params_nrn_out)
-gen_rate_target = nest.Create("step_rate_generator", n_out)
-
-
-# %% ###########################################################################################################
-# Create recorders
-# ~~~~~~~~~~~~~~~~
-# We also create recorders, which, while not required for the training, will allow us to track various dynamic
-# variables of the neurons, spikes, and changes in synaptic weights. To save computing time and memory, the
-# recorders, the recorded variables, neurons, and synapses can be limited to the ones relevant to the
-# experiment, and the recording interval can be increased (see the documentation on the specific recorders). By
-# default, recordings are stored in memory but can also be written to file.
-
-n_record = 1 # number of neurons to record dynamic variables from - this script requires n_record >= 1
-n_record_w = 5 # number of senders and targets to record weights from - this script requires n_record_w >=1
-
-if n_record == 0 or n_record_w == 0:
- raise ValueError("n_record and n_record_w >= 1 required")
-
-params_mm_rec = {
- "interval": duration["step"], # interval between two recorded time points
- "record_from": [
- "V_m",
- "surrogate_gradient",
- "learning_signal",
- "V_th_adapt",
- "adaptation",
- ], # dynamic variables to record
- "start": duration["offset_gen"] + duration["delay_in_rec"], # start time of recording
- "stop": duration["offset_gen"] + duration["delay_in_rec"] + duration["task"], # stop time of recording
- "label": "multimeter_rec",
-}
-
-params_mm_out = {
- "interval": duration["step"],
- "record_from": ["V_m", "readout_signal", "readout_signal_unnorm", "target_signal", "error_signal"],
- "start": duration["total_offset"],
- "stop": duration["total_offset"] + duration["task"],
- "label": "multimeter_out",
-}
-
-params_wr = {
- "senders": nrns_in[:n_record_w] + nrns_rec[:n_record_w], # limit senders to subsample weights to record
- "targets": nrns_rec[:n_record_w] + nrns_out, # limit targets to subsample weights to record from
- "start": duration["total_offset"],
- "stop": duration["total_offset"] + duration["task"],
- "label": "weight_recorder",
-}
-
-params_sr_in = {
- "start": duration["offset_gen"],
- "stop": duration["total_offset"] + duration["task"],
- "label": "spike_recorder_in",
-}
-
-params_sr_rec = {
- "start": duration["offset_gen"],
- "stop": duration["total_offset"] + duration["task"],
- "label": "spike_recorder_rec",
-}
-
-####################
-
-mm_rec = nest.Create("multimeter", params_mm_rec)
-mm_out = nest.Create("multimeter", params_mm_out)
-sr_in = nest.Create("spike_recorder", params_sr_in)
-sr_rec = nest.Create("spike_recorder", params_sr_rec)
-wr = nest.Create("weight_recorder", params_wr)
-
-nrns_rec_record = nrns_rec[:n_record]
-
-# %% ###########################################################################################################
-# Create connections
-# ~~~~~~~~~~~~~~~~~~
-# Now, we define the connectivity and set up the synaptic parameters, with the synaptic weights drawn from
-# normal distributions. After these preparations, we establish the enumerated connections of the core network,
-# as well as additional connections to the recorders.
-
-params_conn_all_to_all = {"rule": "all_to_all", "allow_autapses": False}
-params_conn_one_to_one = {"rule": "one_to_one"}
-
-dtype_weights = np.float32 # data type of weights - for reproducing TF results set to np.float32
-weights_in_rec = np.array(np.random.randn(n_in, n_rec).T / np.sqrt(n_in), dtype=dtype_weights)
-weights_rec_rec = np.array(np.random.randn(n_rec, n_rec).T / np.sqrt(n_rec), dtype=dtype_weights)
-np.fill_diagonal(weights_rec_rec, 0.0) # since no autapses set corresponding weights to zero
-weights_rec_out = np.array(np.random.randn(n_rec, n_out).T / np.sqrt(n_rec), dtype=dtype_weights)
-weights_out_rec = np.array(np.random.randn(n_rec, n_out) / np.sqrt(n_rec), dtype=dtype_weights)
-
-params_common_syn_eprop = {
- "optimizer": {
- "type": "adam", # algorithm to optimize the weights
- "batch_size": batch_size,
- "beta_1": 0.9, # exponential decay rate for 1st moment estimate of Adam optimizer
- "beta_2": 0.999, # exponential decay rate for 2nd moment raw estimate of Adam optimizer
- "epsilon": 1e-8, # small numerical stabilization constant of Adam optimizer
- "eta": 5e-3, # learning rate
- "Wmin": -100.0, # pA, minimal limit of the synaptic weights
- "Wmax": 100.0, # pA, maximal limit of the synaptic weights
- },
- "average_gradient": False, # if True, average the gradient over the learning window
- "weight_recorder": wr,
-}
-
-params_syn_base = {
- "synapse_model": "eprop_synapse_bsshslm_2020",
- "delay": duration["step"], # ms, dendritic delay
- "tau_m_readout": params_nrn_out["tau_m"], # ms, for technical reasons pass readout neuron membrane time constant
-}
-
-params_syn_in = params_syn_base.copy()
-params_syn_in["weight"] = weights_in_rec # pA, initial values for the synaptic weights
-
-params_syn_rec = params_syn_base.copy()
-params_syn_rec["weight"] = weights_rec_rec
-
-params_syn_out = params_syn_base.copy()
-params_syn_out["weight"] = weights_rec_out
-
-params_syn_feedback = {
- "synapse_model": "eprop_learning_signal_connection_bsshslm_2020",
- "delay": duration["step"],
- "weight": weights_out_rec,
-}
-
-params_syn_rate_target = {
- "synapse_model": "rate_connection_delayed",
- "delay": duration["step"],
- "receptor_type": 2, # receptor type over which readout neuron receives target signal
-}
-
-params_syn_static = {
- "synapse_model": "static_synapse",
- "delay": duration["step"],
-}
-
-params_init_optimizer = {
- "optimizer": {
- "m": 0.0, # initial 1st moment estimate m of Adam optimizer
- "v": 0.0, # initial 2nd moment raw estimate v of Adam optimizer
- }
-}
-
-####################
-
-nest.SetDefaults("eprop_synapse_bsshslm_2020", params_common_syn_eprop)
-
-nest.Connect(gen_spk_in, nrns_in, params_conn_one_to_one, params_syn_static) # connection 1
-nest.Connect(nrns_in, nrns_rec, params_conn_all_to_all, params_syn_in) # connection 2
-nest.Connect(nrns_rec, nrns_rec, params_conn_all_to_all, params_syn_rec) # connection 3
-nest.Connect(nrns_rec, nrns_out, params_conn_all_to_all, params_syn_out) # connection 4
-nest.Connect(nrns_out, nrns_rec, params_conn_all_to_all, params_syn_feedback) # connection 5
-nest.Connect(gen_rate_target, nrns_out, params_conn_one_to_one, params_syn_rate_target) # connection 6
-
-nest.Connect(nrns_in, sr_in, params_conn_all_to_all, params_syn_static)
-nest.Connect(nrns_rec, sr_rec, params_conn_all_to_all, params_syn_static)
-
-nest.Connect(mm_rec, nrns_rec_record, params_conn_all_to_all, params_syn_static)
-nest.Connect(mm_out, nrns_out, params_conn_all_to_all, params_syn_static)
-
-# After creating the connections, we can individually initialize the optimizer's
-# dynamic variables for single synapses (here exemplarily for two connections).
-
-nest.GetConnections(nrns_rec[0], nrns_rec[1:3]).set([params_init_optimizer] * 2)
-
-# %% ###########################################################################################################
-# Create input
-# ~~~~~~~~~~~~
-# We generate some frozen Poisson spike noise of a fixed rate that is repeated in each iteration and feed these
-# spike times to the previously created input spike generator. The network will use these spike times as a
-# temporal backbone for encoding the target signal into its recurrent spiking activity.
-
-input_spike_prob = 0.05 # spike probability of frozen input noise
-dtype_in_spks = np.float32 # data type of input spikes - for reproducing TF results set to np.float32
-
-input_spike_bools = (np.random.rand(steps["sequence"], n_in) < input_spike_prob).swapaxes(0, 1)
-input_spike_bools[:, 0] = 0 # remove spikes in 0th time step of every sequence for technical reasons
-
-sequence_starts = np.arange(0.0, duration["task"], duration["sequence"]) + duration["offset_gen"]
-params_gen_spk_in = []
-for input_spike_bool in input_spike_bools:
- input_spike_times = np.arange(0.0, duration["sequence"], duration["step"])[input_spike_bool]
- input_spike_times_all = [input_spike_times + start for start in sequence_starts]
- params_gen_spk_in.append({"spike_times": np.hstack(input_spike_times_all).astype(dtype_in_spks)})
-
-####################
-
-nest.SetStatus(gen_spk_in, params_gen_spk_in)
-
-# %% ###########################################################################################################
-# Create output
-# ~~~~~~~~~~~~~
-# Then, we load the x and y values of an image of the word "chaos" written by hand and construct a roughly
-# two-second long target signal from it. This signal, like the input, is repeated for all iterations and fed
-# into the rate generator that was previously created.
-
-x_eval = np.arange(steps["sequence"]) / steps["data_point"]
-x_data = np.arange(steps["sequence"] // steps["data_point"])
-
-target_signal_list = []
-for y_data in np.cumsum(data, axis=0).T:
- y_data /= np.max(np.abs(y_data))
- y_data -= np.mean(y_data)
- target_signal_list.append(np.interp(x_eval, x_data, y_data))
-
-params_gen_rate_target = []
-
-for target_signal in target_signal_list:
- params_gen_rate_target.append(
- {
- "amplitude_times": np.arange(0.0, duration["task"], duration["step"]) + duration["total_offset"],
- "amplitude_values": np.tile(target_signal, n_iter * batch_size),
- }
- )
-
-####################
-
-nest.SetStatus(gen_rate_target, params_gen_rate_target)
-
-# %% ###########################################################################################################
-# Force final update
-# ~~~~~~~~~~~~~~~~~~
-# Synapses only get active, that is, the correct weight update calculated and applied, when they transmit a
-# spike. To still be able to read out the correct weights at the end of the simulation, we force spiking of the
-# presynaptic neuron and thus an update of all synapses, including those that have not transmitted a spike in
-# the last update interval, by sending a strong spike to all neurons that form the presynaptic side of an eprop
-# synapse. This step is required purely for technical reasons.
-
-gen_spk_final_update = nest.Create("spike_generator", 1, {"spike_times": [duration["task"] + duration["delays"]]})
-
-nest.Connect(gen_spk_final_update, nrns_in + nrns_rec, "all_to_all", {"weight": 1000.0})
-
-# %% ###########################################################################################################
-# Read out pre-training weights
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Before we begin training, we read out the initial weight matrices so that we can eventually compare them to
-# the optimized weights.
-
-
-def get_weights(pop_pre, pop_post):
- conns = nest.GetConnections(pop_pre, pop_post).get(["source", "target", "weight"])
- conns["senders"] = np.array(conns["source"]) - np.min(conns["source"])
- conns["targets"] = np.array(conns["target"]) - np.min(conns["target"])
-
- conns["weight_matrix"] = np.zeros((len(pop_post), len(pop_pre)))
- conns["weight_matrix"][conns["targets"], conns["senders"]] = conns["weight"]
- return conns
-
-
-weights_pre_train = {
- "in_rec": get_weights(nrns_in, nrns_rec),
- "rec_rec": get_weights(nrns_rec, nrns_rec),
- "rec_out": get_weights(nrns_rec, nrns_out),
-}
-
-# %% ###########################################################################################################
-# Simulate
-# ~~~~~~~~
-# We train the network by simulating for a set simulation time, determined by the number of iterations and the
-# batch size and the length of one sequence.
-
-nest.Simulate(duration["sim"])
-
-# %% ###########################################################################################################
-# Read out post-training weights
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# After the training, we can read out the optimized final weights.
-
-weights_post_train = {
- "in_rec": get_weights(nrns_in, nrns_rec),
- "rec_rec": get_weights(nrns_rec, nrns_rec),
- "rec_out": get_weights(nrns_rec, nrns_out),
-}
-
-# %% ###########################################################################################################
-# Read out recorders
-# ~~~~~~~~~~~~~~~~~~
-# We can also retrieve the recorded history of the dynamic variables and weights, as well as detected spikes.
-
-events_mm_rec = mm_rec.get("events")
-events_mm_out = mm_out.get("events")
-events_sr_in = sr_in.get("events")
-events_sr_rec = sr_rec.get("events")
-events_wr = wr.get("events")
-
-# %% ###########################################################################################################
-# Evaluate training error
-# ~~~~~~~~~~~~~~~~~~~~~~~
-# We evaluate the network's training error by calculating a loss - in this case, the mean squared error between
-# the integrated recurrent network activity and the target rate.
-
-readout_signal = events_mm_out["readout_signal"]
-target_signal = events_mm_out["target_signal"]
-senders = events_mm_out["senders"]
-
-loss_list = []
-for sender in set(senders):
- idc = senders == sender
- error = (readout_signal[idc] - target_signal[idc]) ** 2
- loss_list.append(0.5 * np.add.reduceat(error, np.arange(0, steps["task"], steps["sequence"])))
-
-readout_signal = np.array([readout_signal[senders == i] for i in set(senders)])
-target_signal = np.array([target_signal[senders == i] for i in set(senders)])
-
-readout_signal = readout_signal.reshape((n_out, n_iter, batch_size, steps["sequence"]))
-target_signal = target_signal.reshape((n_out, n_iter, batch_size, steps["sequence"]))
-
-loss = 0.5 * np.mean(np.sum((readout_signal - target_signal) ** 2, axis=3), axis=(0, 2))
-
-# %% ###########################################################################################################
-# Plot results
-# ~~~~~~~~~~~~
-# Then, we plot a series of plots.
-
-do_plotting = True # if True, plot the results
-
-if not do_plotting:
- exit()
-
-colors = {
- "blue": "#2854c5ff",
- "red": "#e04b40ff",
- "white": "#ffffffff",
-}
-
-plt.rcParams.update(
- {
- "axes.spines.right": False,
- "axes.spines.top": False,
- "axes.prop_cycle": cycler(color=[colors["blue"], colors["red"]]),
- }
-)
-
-# %% ###########################################################################################################
-# Plot pattern
-# ............
-# First, we visualize the created pattern and plot the target for comparison. The outputs of the two readout
-# neurons encode the horizontal and vertical coordinate of the pattern respectively.
-
-fig, ax = plt.subplots()
-fig.suptitle("Pattern")
-
-ax.plot(readout_signal[0, -1, 0, :], -readout_signal[1, -1, 0, :], c=colors["red"], label="readout")
-
-ax.plot(target_signal[0, -1, 0, :], -target_signal[1, -1, 0, :], c=colors["blue"], label="target")
-
-ax.set_xlabel(r"$y_0$ and $y^*_0$")
-ax.set_ylabel(r"$y_1$ and $y^*_1$")
-
-ax.axis("equal")
-
-fig.tight_layout()
-
-# %% ###########################################################################################################
-# Plot learning performance
-# ...................
-# We begin with a plot visualizing the learning performance of the network: the loss plotted against the
-# iterations.
-
-fig, ax = plt.subplots()
-fig.suptitle("Learning performance")
-
-ax.plot(range(1, n_iter + 1), loss_list[0], label=r"$\mathcal{L}_0$", alpha=0.8, c=colors["blue"], ls="--")
-ax.plot(range(1, n_iter + 1), loss_list[1], label=r"$\mathcal{L}_1$", alpha=0.8, c=colors["blue"], ls="dotted")
-ax.plot(range(1, n_iter + 1), loss, label=r"$\mathcal{L}$", c=colors["blue"])
-ax.set_ylabel(r"\mathcal{L} = \frac{1}{2} \sum_{t,k} \left( y_k^t -y_k^{*,t}\right)^2$")
-ax.set_xlabel("iteration")
-ax.set_xlim(1, n_iter)
-ax.xaxis.get_major_locator().set_params(integer=True)
-ax.legend(bbox_to_anchor=(1.01, 0.5), loc="center left")
-
-fig.tight_layout()
-
-# %% ###########################################################################################################
-# Plot spikes and dynamic variables
-# .................................
-# This plotting routine shows how to plot all of the recorded dynamic variables and spikes across time. We take
-# one snapshot in the first iteration and one snapshot at the end.
-
-
-def plot_recordable(ax, events, recordable, ylabel, xlims):
- for sender in set(events["senders"]):
- idc_sender = events["senders"] == sender
- idc_times = (events["times"][idc_sender] > xlims[0]) & (events["times"][idc_sender] < xlims[1])
- ax.plot(events["times"][idc_sender][idc_times], events[recordable][idc_sender][idc_times], lw=0.5)
- ax.set_ylabel(ylabel)
- margin = np.abs(np.max(events[recordable]) - np.min(events[recordable])) * 0.1
- ax.set_ylim(np.min(events[recordable]) - margin, np.max(events[recordable]) + margin)
-
-
-def plot_spikes(ax, events, ylabel, xlims):
- idc_times = (events["times"] > xlims[0]) & (events["times"] < xlims[1])
- senders_subset = events["senders"][idc_times]
- times_subset = events["times"][idc_times]
-
- ax.scatter(times_subset, senders_subset, s=0.1)
- ax.set_ylabel(ylabel)
- margin = np.abs(np.max(senders_subset) - np.min(senders_subset)) * 0.1
- ax.set_ylim(np.min(senders_subset) - margin, np.max(senders_subset) + margin)
-
-
-for title, xlims in zip(
- ["Dynamic variables before training", "Dynamic variables after training"],
- [(0, steps["sequence"]), (steps["task"] - steps["sequence"], steps["task"])],
-):
- fig, axs = plt.subplots(10, 1, sharex=True, figsize=(8, 12), gridspec_kw={"hspace": 0.4, "left": 0.2})
- fig.suptitle(title)
-
- plot_spikes(axs[0], events_sr_in, r"$z_i$" + "\n", xlims)
- plot_spikes(axs[1], events_sr_rec, r"$z_j$" + "\n", xlims)
-
- plot_recordable(axs[2], events_mm_rec, "V_m", r"$v_j$" + "\n(mV)", xlims)
- plot_recordable(axs[3], events_mm_rec, "surrogate_gradient", r"$\psi_j$" + "\n", xlims)
- plot_recordable(axs[4], events_mm_rec, "V_th_adapt", r"$A_j$" + "\n(mV)", xlims)
- plot_recordable(axs[5], events_mm_rec, "learning_signal", r"$L_j$" + "\n(pA)", xlims)
-
- plot_recordable(axs[6], events_mm_out, "V_m", r"$v_k$" + "\n(mV)", xlims)
- plot_recordable(axs[7], events_mm_out, "target_signal", r"$y^*_k$" + "\n", xlims)
- plot_recordable(axs[8], events_mm_out, "readout_signal", r"$y_k$" + "\n", xlims)
- plot_recordable(axs[9], events_mm_out, "error_signal", r"$y_k-y^*_k$" + "\n", xlims)
-
- axs[-1].set_xlabel(r"$t$ (ms)")
- axs[-1].set_xlim(*xlims)
-
- fig.align_ylabels()
-
-# %% ###########################################################################################################
-# Plot weight time courses
-# ........................
-# Similarly, we can plot the weight histories. Note that the weight recorder, attached to the synapses, works
-# differently than the other recorders. Since synapses only get activated when they transmit a spike, the weight
-# recorder only records the weight in those moments. That is why the first weight registrations do not start in
-# the first time step and we add the initial weights manually.
-
-
-def plot_weight_time_course(ax, events, nrns, label, ylabel):
- sender_label, target_label = label.split("_")
- nrns_senders = nrns[sender_label]
- nrns_targets = nrns[target_label]
-
- for sender in set(events_wr["senders"]):
- for target in set(events_wr["targets"]):
- if sender in nrns_senders and target in nrns_targets:
- idc_syn = (events["senders"] == sender) & (events["targets"] == target)
- if np.any(idc_syn):
- idc_syn_pre = (weights_pre_train[label]["source"] == sender) & (
- weights_pre_train[label]["target"] == target
- )
- times = np.concatenate([[0.0], events["times"][idc_syn]])
-
- weights = np.concatenate(
- [np.array(weights_pre_train[label]["weight"])[idc_syn_pre], events["weights"][idc_syn]]
- )
- ax.step(times, weights, c=colors["blue"])
- ax.set_ylabel(ylabel)
- ax.set_ylim(-0.6, 0.6)
-
-
-fig, axs = plt.subplots(3, 1, sharex=True, figsize=(3, 4))
-fig.suptitle("Weight time courses")
-
-nrns = {
- "in": nrns_in.tolist(),
- "rec": nrns_rec.tolist(),
- "out": nrns_out.tolist(),
-}
-
-plot_weight_time_course(axs[0], events_wr, nrns, "in_rec", r"$W_\text{in}$ (pA)")
-plot_weight_time_course(axs[1], events_wr, nrns, "rec_rec", r"$W_\text{rec}$ (pA)")
-plot_weight_time_course(axs[2], events_wr, nrns, "rec_out", r"$W_\text{out}$ (pA)")
-
-axs[-1].set_xlabel(r"$t$ (ms)")
-axs[-1].set_xlim(0, duration["task"])
-
-fig.align_ylabels()
-fig.tight_layout()
-
-# %% ###########################################################################################################
-# Plot weight matrices
-# ....................
-# If one is not interested in the time course of the weights, it is possible to read out only the initial and
-# final weights, which requires less computing time and memory than the weight recorder approach. Here, we plot
-# the corresponding weight matrices before and after the optimization.
-
-cmap = mpl.colors.LinearSegmentedColormap.from_list(
- "cmap", ((0.0, colors["blue"]), (0.5, colors["white"]), (1.0, colors["red"]))
-)
-
-fig, axs = plt.subplots(3, 2, sharex="col", sharey="row")
-fig.suptitle("Weight matrices")
-
-all_w_extrema = []
-
-for k in weights_pre_train.keys():
- w_pre = weights_pre_train[k]["weight"]
- w_post = weights_post_train[k]["weight"]
- all_w_extrema.append([np.min(w_pre), np.max(w_pre), np.min(w_post), np.max(w_post)])
-
-args = {"cmap": cmap, "vmin": np.min(all_w_extrema), "vmax": np.max(all_w_extrema)}
-
-for i, weights in zip([0, 1], [weights_pre_train, weights_post_train]):
- axs[0, i].pcolormesh(weights["in_rec"]["weight_matrix"].T, **args)
- axs[1, i].pcolormesh(weights["rec_rec"]["weight_matrix"], **args)
- cmesh = axs[2, i].pcolormesh(weights["rec_out"]["weight_matrix"], **args)
-
- axs[2, i].set_xlabel("recurrent\nneurons")
-
-axs[0, 0].set_ylabel("input\nneurons")
-axs[1, 0].set_ylabel("recurrent\nneurons")
-axs[2, 0].set_ylabel("readout\nneurons")
-fig.align_ylabels(axs[:, 0])
-
-axs[0, 0].text(0.5, 1.1, "before training", transform=axs[0, 0].transAxes, ha="center")
-axs[0, 1].text(0.5, 1.1, "after training", transform=axs[0, 1].transAxes, ha="center")
-
-axs[2, 0].yaxis.get_major_locator().set_params(integer=True)
-
-cbar = plt.colorbar(cmesh, cax=axs[1, 1].inset_axes([1.1, 0.2, 0.05, 0.8]), label="weight (pA)")
-
-fig.tight_layout()
-
-plt.show()
diff --git a/pynest/examples/eprop_plasticity/eprop_supervised_regression_lemniscate_bsshslm_2020.png b/pynest/examples/eprop_plasticity/eprop_supervised_regression_lemniscate_bsshslm_2020.png
deleted file mode 100644
index 7d81e3edf1..0000000000
Binary files a/pynest/examples/eprop_plasticity/eprop_supervised_regression_lemniscate_bsshslm_2020.png and /dev/null differ
diff --git a/pynest/examples/eprop_plasticity/eprop_supervised_regression_lemniscate_bsshslm_2020.py b/pynest/examples/eprop_plasticity/eprop_supervised_regression_lemniscate_bsshslm_2020.py
deleted file mode 100644
index 40a32ae3ef..0000000000
--- a/pynest/examples/eprop_plasticity/eprop_supervised_regression_lemniscate_bsshslm_2020.py
+++ /dev/null
@@ -1,743 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# eprop_supervised_regression_lemniscate_bsshslm_2020.py
-#
-# This file is part of NEST.
-#
-# Copyright (C) 2004 The NEST Initiative
-#
-# NEST is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-#
-# NEST is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with NEST. If not, see .
-
-r"""
-Tutorial on learning to generate a lemniscate with e-prop after Bellec et al. (2020)
-------------------------------------------------------------------------------------
-
-Training a regression model using supervised e-prop plasticity to generate a lemniscate
-
-Description
-~~~~~~~~~~~
-
-This script demonstrates supervised learning of a regression task with a recurrent spiking neural network that
-is equipped with the eligibility propagation (e-prop) plasticity mechanism by Bellec et al. [1]_.
-
-This type of learning is demonstrated at the proof-of-concept task in [1]_. We based this script on their
-TensorFlow script given in [2]_ and changed the task as well as the parameters slightly.
-
-In this task, the network learns to generate an arbitrary N-dimensional temporal pattern. Here, the network
-learns to reproduce with its overall spiking activity a two-dimensional, roughly one-second-long target signal
-which encode the x and y coordinates of a lemniscate.
-
-.. image:: eprop_supervised_regression_lemniscate_bsshslm_2020.png
- :width: 70 %
- :alt: Schematic of network architecture. Same as Figure 1 in the code.
- :align: center
-
-Learning in the neural network model is achieved by optimizing the connection weights with e-prop plasticity.
-This plasticity rule requires a specific network architecture depicted in Figure 1. The neural network model
-consists of a recurrent network that receives frozen noise input from spike generators and projects onto two
-readout neurons. Each individual readout signal denoted as :math:`y_k` is compared with a corresponding target
-signal represented as :math:`y_k^*`. The network's training error is assessed by employing a mean-squared error
-loss.
-
-Details on the event-based NEST implementation of e-prop can be found in [3]_.
-
-The development of this task and the hyper-parameter optimization were conducted by Agnes Korcsak-Gorzo and
-Charl Linssen, inspired by activities and feedback received at the CapoCaccia Workshop toward Neuromorphic
-Intelligence 2023.
-
-References
-~~~~~~~~~~
-
-.. [1] Bellec G, Scherr F, Subramoney F, Hajek E, Salaj D, Legenstein R, Maass W (2020). A solution to the
- learning dilemma for recurrent networks of spiking neurons. Nature Communications, 11:3625.
- https://doi.org/10.1038/s41467-020-17236-y
-
-.. [2] https://github.com/IGITUGraz/eligibility_propagation/blob/master/Figure_3_and_S7_e_prop_tutorials/tutorial_pattern_generation.py
-
-.. [3] Korcsak-Gorzo A, Stapmanns J, Espinoza Valverde JA, Plesser HE,
- Dahmen D, Bolten M, Van Albada SJ, Diesmann M. Event-based
- implementation of eligibility propagation (in preparation)
-
-""" # pylint: disable=line-too-long # noqa: E501
-
-# %% ###########################################################################################################
-# Import libraries
-# ~~~~~~~~~~~~~~~~
-# We begin by importing all libraries required for the simulation, analysis, and visualization.
-
-import matplotlib as mpl
-import matplotlib.pyplot as plt
-import nest
-import numpy as np
-from cycler import cycler
-from IPython.display import Image
-
-# %% ###########################################################################################################
-# Schematic of network architecture
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# This figure, identical to the one in the description, shows the required network architecture in the center,
-# the input and output of the pattern generation task above, and lists of the required NEST device, neuron, and
-# synapse models below. The connections that must be established are numbered 1 to 6.
-
-try:
- Image(filename="./eprop_supervised_regression_lemniscate_bsshslm_2020.png")
-except Exception:
- pass
-
-# %% ###########################################################################################################
-# Setup
-# ~~~~~
-
-# %% ###########################################################################################################
-# Initialize random generator
-# ...........................
-# We seed the numpy random generator, which will generate random initial weights as well as random input and
-# output.
-
-rng_seed = 1 # numpy random seed
-np.random.seed(rng_seed) # fix numpy random seed
-
-# %% ###########################################################################################################
-# Define timing of task
-# .....................
-# The task's temporal structure is then defined, once as time steps and once as durations in milliseconds.
-# Increasing the number of iterations enhances learning performance.
-
-batch_size = 1 # batch size
-n_iter = 200 # number of iterations, 5000 to reach convergence as in the figure
-
-steps = {
- "sequence": 1258, # time steps of one full sequence
-}
-
-steps["learning_window"] = steps["sequence"] # time steps of window with non-zero learning signals
-steps["task"] = n_iter * batch_size * steps["sequence"] # time steps of task
-
-steps.update(
- {
- "offset_gen": 1, # offset since generator signals start from time step 1
- "delay_in_rec": 1, # connection delay between input and recurrent neurons
- "delay_rec_out": 1, # connection delay between recurrent and output neurons
- "delay_out_norm": 1, # connection delay between output neurons for normalization
- "extension_sim": 1, # extra time step to close right-open simulation time interval in Simulate()
- }
-)
-
-steps["delays"] = steps["delay_in_rec"] + steps["delay_rec_out"] + steps["delay_out_norm"] # time steps of delays
-
-steps["total_offset"] = steps["offset_gen"] + steps["delays"] # time steps of total offset
-
-steps["sim"] = steps["task"] + steps["total_offset"] + steps["extension_sim"] # time steps of simulation
-
-duration = {"step": 1.0} # ms, temporal resolution of the simulation
-
-duration.update({key: value * duration["step"] for key, value in steps.items()}) # ms, durations
-
-# %% ###########################################################################################################
-# Set up simulation
-# .................
-# As last step of the setup, we reset the NEST kernel to remove all existing NEST simulation settings and
-# objects and set some NEST kernel parameters, some of which are e-prop-related.
-
-params_setup = {
- "eprop_learning_window": duration["learning_window"],
- "eprop_reset_neurons_on_update": True, # if True, reset dynamic variables at start of each update interval
- "eprop_update_interval": duration["sequence"], # ms, time interval for updating the synaptic weights
- "print_time": False, # if True, print time progress bar during simulation, set False if run as code cell
- "resolution": duration["step"],
- "total_num_virtual_procs": 1, # number of virtual processes, set in case of distributed computing
- "rng_seed": rng_seed, # seed for NEST random generator
-}
-
-####################
-
-nest.ResetKernel()
-nest.set(**params_setup)
-
-# %% ###########################################################################################################
-# Create neurons
-# ~~~~~~~~~~~~~~
-# We proceed by creating a certain number of input, recurrent, and readout neurons and setting their parameters.
-# Additionally, we already create an input spike generator and an output target rate generator, which we will
-# configure later.
-
-n_in = 100 # number of input neurons
-n_rec = 200 # number of recurrent neurons
-n_out = 2 # number of readout neurons
-
-params_nrn_out = {
- "C_m": 1.0, # pF, membrane capacitance - takes effect only if neurons get current input (here not the case)
- "E_L": 0.0, # mV, leak / resting membrane potential
- "I_e": 0.0, # pA, external current input
- "loss": "mean_squared_error", # loss function
- "regular_spike_arrival": False, # If True, input spikes arrive at end of time step, if False at beginning
- "tau_m": 50.0, # ms, membrane time constant
- "V_m": 0.0, # mV, initial value of the membrane voltage
-}
-
-tau_m_mean = 30.0 # ms, mean of membrane time constant distribution
-
-params_nrn_rec = {
- "beta": 1.0, # width scaling of the pseudo-derivative
- "adapt_tau": 2000.0, # ms, time constant of adaptive threshold
- "C_m": 250.0,
- "c_reg": 150.0, # coefficient of firing rate regularization
- "E_L": 0.0,
- "f_target": 20.0, # spikes/s, target firing rate for firing rate regularization
- "gamma": 0.3, # height scaling of the pseudo-derivative
- "I_e": 0.0,
- "regular_spike_arrival": False,
- "surrogate_gradient_function": "piecewise_linear", # surrogate gradient / pseudo-derivative function
- "t_ref": 0.0, # ms, duration of refractory period
- "tau_m": nest.random.normal(mean=tau_m_mean, std=2.0),
- "V_m": 0.0,
- "V_th": 0.03, # mV, spike threshold membrane voltage
-}
-
-# factors from the original pseudo-derivative definition are incorporated into the parameters
-params_nrn_rec["gamma"] /= params_nrn_rec["V_th"]
-params_nrn_rec["beta"] /= np.abs(params_nrn_rec["V_th"]) # prefactor is inside abs in the original definition
-
-params_nrn_rec["adapt_beta"] = (
- 1.7 * (1.0 - np.exp(-1 / params_nrn_rec["adapt_tau"])) / (1.0 - np.exp(-1.0 / tau_m_mean))
-) # prefactor of adaptive threshold
-
-####################
-
-# Intermediate parrot neurons required between input spike generators and recurrent neurons,
-# since devices cannot establish plastic synapses for technical reasons
-
-gen_spk_in = nest.Create("spike_generator", n_in)
-nrns_in = nest.Create("parrot_neuron", n_in)
-
-# The suffix _bsshslm_2020 follows the NEST convention to indicate in the model name the paper
-# that introduced it by the first letter of the authors' last names and the publication year.
-
-nrns_rec = nest.Create("eprop_iaf_adapt_bsshslm_2020", n_rec, params_nrn_rec)
-nrns_out = nest.Create("eprop_readout_bsshslm_2020", n_out, params_nrn_out)
-gen_rate_target = nest.Create("step_rate_generator", n_out)
-
-
-# %% ###########################################################################################################
-# Create recorders
-# ~~~~~~~~~~~~~~~~
-# We also create recorders, which, while not required for the training, will allow us to track various dynamic
-# variables of the neurons, spikes, and changes in synaptic weights. To save computing time and memory, the
-# recorders, the recorded variables, neurons, and synapses can be limited to the ones relevant to the
-# experiment, and the recording interval can be increased (see the documentation on the specific recorders). By
-# default, recordings are stored in memory but can also be written to file.
-
-n_record = 1 # number of neurons to record dynamic variables from - this script requires n_record >= 1
-n_record_w = 5 # number of senders and targets to record weights from - this script requires n_record_w >=1
-
-if n_record == 0 or n_record_w == 0:
- raise ValueError("n_record and n_record_w >= 1 required")
-
-params_mm_rec = {
- "interval": duration["step"], # interval between two recorded time points
- "record_from": [
- "V_m",
- "surrogate_gradient",
- "learning_signal",
- "V_th_adapt",
- "adaptation",
- ], # dynamic variables to record
- "start": duration["offset_gen"] + duration["delay_in_rec"], # start time of recording
- "stop": duration["offset_gen"] + duration["delay_in_rec"] + duration["task"], # stop time of recording
- "label": "multimeter_rec",
-}
-
-params_mm_out = {
- "interval": duration["step"],
- "record_from": ["V_m", "readout_signal", "readout_signal_unnorm", "target_signal", "error_signal"],
- "start": duration["total_offset"],
- "stop": duration["total_offset"] + duration["task"],
- "label": "multimeter_out",
-}
-
-params_wr = {
- "senders": nrns_in[:n_record_w] + nrns_rec[:n_record_w], # limit senders to subsample weights to record
- "targets": nrns_rec[:n_record_w] + nrns_out, # limit targets to subsample weights to record from
- "start": duration["total_offset"],
- "stop": duration["total_offset"] + duration["task"],
-}
-
-params_sr_in = {
- "start": duration["offset_gen"],
- "stop": duration["total_offset"] + duration["task"],
- "label": "spike_recorder_in",
-}
-
-params_sr_rec = {
- "start": duration["offset_gen"],
- "stop": duration["total_offset"] + duration["task"],
- "label": "spike_recorder_rec",
-}
-
-####################
-
-mm_rec = nest.Create("multimeter", params_mm_rec)
-mm_out = nest.Create("multimeter", params_mm_out)
-sr_in = nest.Create("spike_recorder", params_sr_in)
-sr_rec = nest.Create("spike_recorder", params_sr_rec)
-wr = nest.Create("weight_recorder", params_wr)
-
-nrns_rec_record = nrns_rec[:n_record]
-
-# %% ###########################################################################################################
-# Create connections
-# ~~~~~~~~~~~~~~~~~~
-# Now, we define the connectivity and set up the synaptic parameters, with the synaptic weights drawn from
-# normal distributions. After these preparations, we establish the enumerated connections of the core network,
-# as well as additional connections to the recorders.
-
-params_conn_all_to_all = {"rule": "all_to_all", "allow_autapses": False}
-params_conn_one_to_one = {"rule": "one_to_one"}
-
-dtype_weights = np.float32 # data type of weights - for reproducing TF results set to np.float32
-weights_in_rec = np.array(np.random.randn(n_in, n_rec).T / np.sqrt(n_in), dtype=dtype_weights)
-weights_rec_rec = np.array(np.random.randn(n_rec, n_rec).T / np.sqrt(n_rec), dtype=dtype_weights)
-np.fill_diagonal(weights_rec_rec, 0.0) # since no autapses set corresponding weights to zero
-weights_rec_out = np.array(np.random.randn(n_rec, n_out).T / np.sqrt(n_rec), dtype=dtype_weights)
-weights_out_rec = np.array(np.random.randn(n_rec, n_out) / np.sqrt(n_rec), dtype=dtype_weights)
-
-params_common_syn_eprop = {
- "optimizer": {
- "type": "adam", # algorithm to optimize the weights
- "batch_size": batch_size,
- "beta_1": 0.9, # exponential decay rate for 1st moment estimate of Adam optimizer
- "beta_2": 0.999, # exponential decay rate for 2nd moment raw estimate of Adam optimizer
- "epsilon": 1e-8, # small numerical stabilization constant of Adam optimizer
- "eta": 5e-3, # learning rate
- "Wmin": -100.0, # pA, minimal limit of the synaptic weights
- "Wmax": 100.0, # pA, maximal limit of the synaptic weights
- },
- "average_gradient": False, # if True, average the gradient over the learning window
- "weight_recorder": wr,
-}
-
-params_syn_base = {
- "synapse_model": "eprop_synapse_bsshslm_2020",
- "delay": duration["step"], # ms, dendritic delay
- "tau_m_readout": params_nrn_out["tau_m"], # ms, for technical reasons pass readout neuron membrane time constant
-}
-
-params_syn_in = params_syn_base.copy()
-params_syn_in["weight"] = weights_in_rec # pA, initial values for the synaptic weights
-
-params_syn_rec = params_syn_base.copy()
-params_syn_rec["weight"] = weights_rec_rec
-
-params_syn_out = params_syn_base.copy()
-params_syn_out["weight"] = weights_rec_out
-
-params_syn_feedback = {
- "synapse_model": "eprop_learning_signal_connection_bsshslm_2020",
- "delay": duration["step"],
- "weight": weights_out_rec,
-}
-
-params_syn_rate_target = {
- "synapse_model": "rate_connection_delayed",
- "delay": duration["step"],
- "receptor_type": 2, # receptor type over which readout neuron receives target signal
-}
-
-params_syn_static = {
- "synapse_model": "static_synapse",
- "delay": duration["step"],
-}
-
-params_init_optimizer = {
- "optimizer": {
- "m": 0.0, # initial 1st moment estimate m of Adam optimizer
- "v": 0.0, # initial 2nd moment raw estimate v of Adam optimizer
- }
-}
-
-####################
-
-nest.SetDefaults("eprop_synapse_bsshslm_2020", params_common_syn_eprop)
-
-nest.Connect(gen_spk_in, nrns_in, params_conn_one_to_one, params_syn_static) # connection 1
-nest.Connect(nrns_in, nrns_rec, params_conn_all_to_all, params_syn_in) # connection 2
-nest.Connect(nrns_rec, nrns_rec, params_conn_all_to_all, params_syn_rec) # connection 3
-nest.Connect(nrns_rec, nrns_out, params_conn_all_to_all, params_syn_out) # connection 4
-nest.Connect(nrns_out, nrns_rec, params_conn_all_to_all, params_syn_feedback) # connection 5
-nest.Connect(gen_rate_target, nrns_out, params_conn_one_to_one, params_syn_rate_target) # connection 6
-
-nest.Connect(nrns_in, sr_in, params_conn_all_to_all, params_syn_static)
-nest.Connect(nrns_rec, sr_rec, params_conn_all_to_all, params_syn_static)
-
-nest.Connect(mm_rec, nrns_rec_record, params_conn_all_to_all, params_syn_static)
-nest.Connect(mm_out, nrns_out, params_conn_all_to_all, params_syn_static)
-
-# After creating the connections, we can individually initialize the optimizer's
-# dynamic variables for single synapses (here exemplarily for two connections).
-
-nest.GetConnections(nrns_rec[0], nrns_rec[1:3]).set([params_init_optimizer] * 2)
-
-# %% ###########################################################################################################
-# Create input
-# ~~~~~~~~~~~~
-# We generate some frozen Poisson spike noise of a fixed rate that is repeated in each iteration and feed these
-# spike times to the previously created input spike generator. The network will use these spike times as a
-# temporal backbone for encoding the target signal into its recurrent spiking activity.
-
-input_spike_prob = 0.05 # spike probability of frozen input noise
-dtype_in_spks = np.float32 # data type of input spikes - for reproducing TF results set to np.float32
-
-input_spike_bools = (np.random.rand(steps["sequence"], n_in) < input_spike_prob).swapaxes(0, 1)
-input_spike_bools[:, 0] = 0 # remove spikes in 0th time step of every sequence for technical reasons
-
-sequence_starts = np.arange(0.0, duration["task"], duration["sequence"]) + duration["offset_gen"]
-params_gen_spk_in = []
-for input_spike_bool in input_spike_bools:
- input_spike_times = np.arange(0.0, duration["sequence"], duration["step"])[input_spike_bool]
- input_spike_times_all = [input_spike_times + start for start in sequence_starts]
- params_gen_spk_in.append({"spike_times": np.hstack(input_spike_times_all).astype(dtype_in_spks)})
-
-####################
-
-nest.SetStatus(gen_spk_in, params_gen_spk_in)
-
-# %% ###########################################################################################################
-# Create output
-# ~~~~~~~~~~~~~
-# Then, we load the x and y values of an image of a lemniscate and construct a roughly
-# one-second long target signal from it. This signal, like the input, is repeated for all iterations and fed
-# into the rate generator that was previously created.
-
-target_signal_list = [
- np.sin(np.linspace(0.0, 2.0 * np.pi, steps["sequence"])),
- np.sin(np.linspace(0.0, 4.0 * np.pi, steps["sequence"])),
-]
-
-params_gen_rate_target = []
-
-for target_signal in target_signal_list:
- params_gen_rate_target.append(
- {
- "amplitude_times": np.arange(0.0, duration["task"], duration["step"]) + duration["total_offset"],
- "amplitude_values": np.tile(target_signal, n_iter * batch_size),
- }
- )
-
-####################
-
-nest.SetStatus(gen_rate_target, params_gen_rate_target)
-
-# %% ###########################################################################################################
-# Force final update
-# ~~~~~~~~~~~~~~~~~~
-# Synapses only get active, that is, the correct weight update calculated and applied, when they transmit a
-# spike. To still be able to read out the correct weights at the end of the simulation, we force spiking of the
-# presynaptic neuron and thus an update of all synapses, including those that have not transmitted a spike in
-# the last update interval, by sending a strong spike to all neurons that form the presynaptic side of an eprop
-# synapse. This step is required purely for technical reasons.
-
-gen_spk_final_update = nest.Create("spike_generator", 1, {"spike_times": [duration["task"] + duration["delays"]]})
-
-nest.Connect(gen_spk_final_update, nrns_in + nrns_rec, "all_to_all", {"weight": 1000.0})
-
-# %% ###########################################################################################################
-# Read out pre-training weights
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Before we begin training, we read out the initial weight matrices so that we can eventually compare them to
-# the optimized weights.
-
-
-def get_weights(pop_pre, pop_post):
- conns = nest.GetConnections(pop_pre, pop_post).get(["source", "target", "weight"])
- conns["senders"] = np.array(conns["source"]) - np.min(conns["source"])
- conns["targets"] = np.array(conns["target"]) - np.min(conns["target"])
-
- conns["weight_matrix"] = np.zeros((len(pop_post), len(pop_pre)))
- conns["weight_matrix"][conns["targets"], conns["senders"]] = conns["weight"]
- return conns
-
-
-weights_pre_train = {
- "in_rec": get_weights(nrns_in, nrns_rec),
- "rec_rec": get_weights(nrns_rec, nrns_rec),
- "rec_out": get_weights(nrns_rec, nrns_out),
-}
-
-# %% ###########################################################################################################
-# Simulate
-# ~~~~~~~~
-# We train the network by simulating for a set simulation time, determined by the number of iterations and the
-# batch size and the length of one sequence.
-
-nest.Simulate(duration["sim"])
-
-# %% ###########################################################################################################
-# Read out post-training weights
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# After the training, we can read out the optimized final weights.
-
-weights_post_train = {
- "in_rec": get_weights(nrns_in, nrns_rec),
- "rec_rec": get_weights(nrns_rec, nrns_rec),
- "rec_out": get_weights(nrns_rec, nrns_out),
-}
-
-# %% ###########################################################################################################
-# Read out recorders
-# ~~~~~~~~~~~~~~~~~~
-# We can also retrieve the recorded history of the dynamic variables and weights, as well as detected spikes.
-
-events_mm_rec = mm_rec.get("events")
-events_mm_out = mm_out.get("events")
-events_sr_in = sr_in.get("events")
-events_sr_rec = sr_rec.get("events")
-events_wr = wr.get("events")
-
-# %% ###########################################################################################################
-# Evaluate training error
-# ~~~~~~~~~~~~~~~~~~~~~~~
-# We evaluate the network's training error by calculating a loss - in this case, the mean squared error between
-# the integrated recurrent network activity and the target rate.
-
-readout_signal = events_mm_out["readout_signal"]
-target_signal = events_mm_out["target_signal"]
-senders = events_mm_out["senders"]
-
-loss_list = []
-for sender in set(senders):
- idc = senders == sender
- error = (readout_signal[idc] - target_signal[idc]) ** 2
- loss_list.append(0.5 * np.add.reduceat(error, np.arange(0, steps["task"], steps["sequence"])))
-
-readout_signal = np.array([readout_signal[senders == i] for i in set(senders)])
-target_signal = np.array([target_signal[senders == i] for i in set(senders)])
-
-readout_signal = readout_signal.reshape((n_out, n_iter, batch_size, steps["sequence"]))
-target_signal = target_signal.reshape((n_out, n_iter, batch_size, steps["sequence"]))
-
-loss = 0.5 * np.mean(np.sum((readout_signal - target_signal) ** 2, axis=3), axis=(0, 2))
-
-# %% ###########################################################################################################
-# Plot results
-# ~~~~~~~~~~~~
-# Then, we plot a series of plots.
-
-do_plotting = True # if True, plot the results
-
-if not do_plotting:
- exit()
-
-colors = {
- "blue": "#2854c5ff",
- "red": "#e04b40ff",
- "white": "#ffffffff",
-}
-
-plt.rcParams.update(
- {
- "axes.spines.right": False,
- "axes.spines.top": False,
- "axes.prop_cycle": cycler(color=[colors["blue"], colors["red"]]),
- }
-)
-
-# %% ###########################################################################################################
-# Plot pattern
-# ............
-# First, we visualize the created pattern and plot the target for comparison. The outputs of the two readout
-# neurons encode the horizontal and vertical coordinate of the pattern respectively.
-
-fig, ax = plt.subplots()
-fig.suptitle("Pattern")
-
-ax.plot(readout_signal[0, -1, 0, :], -readout_signal[1, -1, 0, :], c=colors["red"], label="readout")
-
-ax.plot(target_signal[0, -1, 0, :], -target_signal[1, -1, 0, :], c=colors["blue"], label="target")
-
-ax.set_xlabel(r"$y_0$ and $y^*_0$")
-ax.set_ylabel(r"$y_1$ and $y^*_1$")
-
-ax.axis("equal")
-
-fig.tight_layout()
-
-# %% ###########################################################################################################
-# Plot learning performance
-# ...................
-# We begin with a plot visualizing the learning performance of the network: the loss plotted against the
-# iterations.
-
-fig, ax = plt.subplots()
-fig.suptitle("Learning performance")
-
-ax.plot(range(1, n_iter + 1), loss_list[0], label=r"$\mathcal{L}_0$", alpha=0.8, c=colors["blue"], ls="--")
-ax.plot(range(1, n_iter + 1), loss_list[1], label=r"$\mathcal{L}_1$", alpha=0.8, c=colors["blue"], ls="dotted")
-ax.plot(range(1, n_iter + 1), loss, label=r"$\mathcal{L}$", c=colors["blue"])
-ax.set_ylabel(r"$\mathcal{L} = \frac{1}{2} \sum_{t,k} \left( y_k^t -y_k^{*,t}\right)^2$")
-ax.set_xlabel("iteration")
-ax.set_xlim(1, n_iter)
-ax.xaxis.get_major_locator().set_params(integer=True)
-ax.legend(bbox_to_anchor=(1.01, 0.5), loc="center left")
-
-fig.tight_layout()
-
-# %% ###########################################################################################################
-# Plot spikes and dynamic variables
-# .................................
-# This plotting routine shows how to plot all of the recorded dynamic variables and spikes across time. We take
-# one snapshot in the first iteration and one snapshot at the end.
-
-
-def plot_recordable(ax, events, recordable, ylabel, xlims):
- for sender in set(events["senders"]):
- idc_sender = events["senders"] == sender
- idc_times = (events["times"][idc_sender] > xlims[0]) & (events["times"][idc_sender] < xlims[1])
- ax.plot(events["times"][idc_sender][idc_times], events[recordable][idc_sender][idc_times], lw=0.5)
- ax.set_ylabel(ylabel)
- margin = np.abs(np.max(events[recordable]) - np.min(events[recordable])) * 0.1
- ax.set_ylim(np.min(events[recordable]) - margin, np.max(events[recordable]) + margin)
-
-
-def plot_spikes(ax, events, ylabel, xlims):
- idc_times = (events["times"] > xlims[0]) & (events["times"] < xlims[1])
- senders_subset = events["senders"][idc_times]
- times_subset = events["times"][idc_times]
-
- ax.scatter(times_subset, senders_subset, s=0.1)
- ax.set_ylabel(ylabel)
- margin = np.abs(np.max(senders_subset) - np.min(senders_subset)) * 0.1
- ax.set_ylim(np.min(senders_subset) - margin, np.max(senders_subset) + margin)
-
-
-for title, xlims in zip(
- ["Dynamic variables before training", "Dynamic variables after training"],
- [(0, steps["sequence"]), (steps["task"] - steps["sequence"], steps["task"])],
-):
- fig, axs = plt.subplots(10, 1, sharex=True, figsize=(8, 12), gridspec_kw={"hspace": 0.4, "left": 0.2})
- fig.suptitle(title)
-
- plot_spikes(axs[0], events_sr_in, r"$z_i$" + "\n", xlims)
- plot_spikes(axs[1], events_sr_rec, r"$z_j$" + "\n", xlims)
-
- plot_recordable(axs[2], events_mm_rec, "V_m", r"$v_j$" + "\n(mV)", xlims)
- plot_recordable(axs[3], events_mm_rec, "surrogate_gradient", r"$\psi_j$" + "\n", xlims)
- plot_recordable(axs[4], events_mm_rec, "V_th_adapt", r"$A_j$" + "\n(mV)", xlims)
- plot_recordable(axs[5], events_mm_rec, "learning_signal", r"$L_j$" + "\n(pA)", xlims)
-
- plot_recordable(axs[6], events_mm_out, "V_m", r"$v_k$" + "\n(mV)", xlims)
- plot_recordable(axs[7], events_mm_out, "target_signal", r"$y^*_k$" + "\n", xlims)
- plot_recordable(axs[8], events_mm_out, "readout_signal", r"$y_k$" + "\n", xlims)
- plot_recordable(axs[9], events_mm_out, "error_signal", r"$y_k-y^*_k$" + "\n", xlims)
-
- axs[-1].set_xlabel(r"$t$ (ms)")
- axs[-1].set_xlim(*xlims)
-
- fig.align_ylabels()
-
-# %% ###########################################################################################################
-# Plot weight time courses
-# ........................
-# Similarly, we can plot the weight histories. Note that the weight recorder, attached to the synapses, works
-# differently than the other recorders. Since synapses only get activated when they transmit a spike, the weight
-# recorder only records the weight in those moments. That is why the first weight registrations do not start in
-# the first time step and we add the initial weights manually.
-
-
-def plot_weight_time_course(ax, events, nrns, label, ylabel):
- sender_label, target_label = label.split("_")
- nrns_senders = nrns[sender_label]
- nrns_targets = nrns[target_label]
-
- for sender in set(events_wr["senders"]):
- for target in set(events_wr["targets"]):
- if sender in nrns_senders and target in nrns_targets:
- idc_syn = (events["senders"] == sender) & (events["targets"] == target)
- if np.any(idc_syn):
- idc_syn_pre = (weights_pre_train[label]["source"] == sender) & (
- weights_pre_train[label]["target"] == target
- )
- times = np.concatenate([[0.0], events["times"][idc_syn]])
-
- weights = np.concatenate(
- [np.array(weights_pre_train[label]["weight"])[idc_syn_pre], events["weights"][idc_syn]]
- )
- ax.step(times, weights, c=colors["blue"])
- ax.set_ylabel(ylabel)
- ax.set_ylim(-0.6, 0.6)
-
-
-fig, axs = plt.subplots(3, 1, sharex=True, figsize=(3, 4))
-fig.suptitle("Weight time courses")
-
-nrns = {
- "in": nrns_in.tolist(),
- "rec": nrns_rec.tolist(),
- "out": nrns_out.tolist(),
-}
-
-plot_weight_time_course(axs[0], events_wr, nrns, "in_rec", r"$W_\text{in}$ (pA)")
-plot_weight_time_course(axs[1], events_wr, nrns, "rec_rec", r"$W_\text{rec}$ (pA)")
-plot_weight_time_course(axs[2], events_wr, nrns, "rec_out", r"$W_\text{out}$ (pA)")
-
-axs[-1].set_xlabel(r"$t$ (ms)")
-axs[-1].set_xlim(0, duration["task"])
-
-fig.align_ylabels()
-fig.tight_layout()
-
-# %% ###########################################################################################################
-# Plot weight matrices
-# ....................
-# If one is not interested in the time course of the weights, it is possible to read out only the initial and
-# final weights, which requires less computing time and memory than the weight recorder approach. Here, we plot
-# the corresponding weight matrices before and after the optimization.
-
-cmap = mpl.colors.LinearSegmentedColormap.from_list(
- "cmap", ((0.0, colors["blue"]), (0.5, colors["white"]), (1.0, colors["red"]))
-)
-
-fig, axs = plt.subplots(3, 2, sharex="col", sharey="row")
-fig.suptitle("Weight matrices")
-
-all_w_extrema = []
-
-for k in weights_pre_train.keys():
- w_pre = weights_pre_train[k]["weight"]
- w_post = weights_post_train[k]["weight"]
- all_w_extrema.append([np.min(w_pre), np.max(w_pre), np.min(w_post), np.max(w_post)])
-
-args = {"cmap": cmap, "vmin": np.min(all_w_extrema), "vmax": np.max(all_w_extrema)}
-
-for i, weights in zip([0, 1], [weights_pre_train, weights_post_train]):
- axs[0, i].pcolormesh(weights["in_rec"]["weight_matrix"].T, **args)
- axs[1, i].pcolormesh(weights["rec_rec"]["weight_matrix"], **args)
- cmesh = axs[2, i].pcolormesh(weights["rec_out"]["weight_matrix"], **args)
-
- axs[2, i].set_xlabel("recurrent\nneurons")
-
-axs[0, 0].set_ylabel("input\nneurons")
-axs[1, 0].set_ylabel("recurrent\nneurons")
-axs[2, 0].set_ylabel("readout\nneurons")
-fig.align_ylabels(axs[:, 0])
-
-axs[0, 0].text(0.5, 1.1, "before training", transform=axs[0, 0].transAxes, ha="center")
-axs[0, 1].text(0.5, 1.1, "after training", transform=axs[0, 1].transAxes, ha="center")
-
-axs[2, 0].yaxis.get_major_locator().set_params(integer=True)
-
-cbar = plt.colorbar(cmesh, cax=axs[1, 1].inset_axes([1.1, 0.2, 0.05, 0.8]), label="weight (pA)")
-
-fig.tight_layout()
-
-plt.show()
diff --git a/pynest/examples/eprop_plasticity/eprop_supervised_regression_sine-waves_bsshslm_2020.png b/pynest/examples/eprop_plasticity/eprop_supervised_regression_sine-waves_bsshslm_2020.png
deleted file mode 100644
index 83a4bfdc4a..0000000000
Binary files a/pynest/examples/eprop_plasticity/eprop_supervised_regression_sine-waves_bsshslm_2020.png and /dev/null differ
diff --git a/pynest/examples/eprop_plasticity/eprop_supervised_regression_sine-waves_bsshslm_2020.py b/pynest/examples/eprop_plasticity/eprop_supervised_regression_sine-waves_bsshslm_2020.py
deleted file mode 100644
index 46387e4df5..0000000000
--- a/pynest/examples/eprop_plasticity/eprop_supervised_regression_sine-waves_bsshslm_2020.py
+++ /dev/null
@@ -1,692 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# eprop_supervised_regression_sine-waves_bsshslm_2020.py
-#
-# This file is part of NEST.
-#
-# Copyright (C) 2004 The NEST Initiative
-#
-# NEST is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-#
-# NEST is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with NEST. If not, see .
-
-r"""
-Tutorial on learning to generate sine waves with e-prop after Bellec et al. (2020)
-----------------------------------------------------------------------------------
-
-Training a regression model using supervised e-prop plasticity to generate sine waves
-
-Description
-~~~~~~~~~~~
-
-This script demonstrates supervised learning of a regression task with a recurrent spiking neural network that
-is equipped with the eligibility propagation (e-prop) plasticity mechanism by Bellec et al. [1]_.
-
-This type of learning is demonstrated at the proof-of-concept task in [1]_. We based this script on their
-TensorFlow script given in [2]_.
-
-In this task, the network learns to generate an arbitrary N-dimensional temporal pattern. Here, the
-network learns to reproduce with its overall spiking activity a one-dimensional, one-second-long target signal
-which is a superposition of four sine waves of different amplitudes, phases, and periods.
-
-.. image:: eprop_supervised_regression_sine-waves_bsshslm_2020.png
- :width: 70 %
- :alt: Schematic of network architecture. Same as Figure 1 in the code.
- :align: center
-
-Learning in the neural network model is achieved by optimizing the connection weights with e-prop plasticity.
-This plasticity rule requires a specific network architecture depicted in Figure 1. The neural network model
-consists of a recurrent network that receives frozen noise input from spike generators and projects onto one
-readout neuron. The readout neuron compares the network signal :math:`y` with the teacher target signal
-:math:`y*`, which it receives from a rate generator. In scenarios with multiple readout neurons, each individual
-readout signal denoted as :math:`y_k` is compared with a corresponding target signal represented as
-:math:`y_k^*`. The network's training error is assessed by employing a mean-squared error loss.
-
-Details on the event-based NEST implementation of e-prop can be found in [3]_.
-
-References
-~~~~~~~~~~
-
-.. [1] Bellec G, Scherr F, Subramoney F, Hajek E, Salaj D, Legenstein R, Maass W (2020). A solution to the
- learning dilemma for recurrent networks of spiking neurons. Nature Communications, 11:3625.
- https://doi.org/10.1038/s41467-020-17236-y
-
-.. [2] https://github.com/IGITUGraz/eligibility_propagation/blob/master/Figure_3_and_S7_e_prop_tutorials/tutorial_pattern_generation.py
-
-.. [3] Korcsak-Gorzo A, Stapmanns J, Espinoza Valverde JA, Plesser HE,
- Dahmen D, Bolten M, Van Albada SJ, Diesmann M. Event-based
- implementation of eligibility propagation (in preparation)
-
-""" # pylint: disable=line-too-long # noqa: E501
-
-# %% ###########################################################################################################
-# Import libraries
-# ~~~~~~~~~~~~~~~~
-# We begin by importing all libraries required for the simulation, analysis, and visualization.
-
-import matplotlib as mpl
-import matplotlib.pyplot as plt
-import nest
-import numpy as np
-from cycler import cycler
-from IPython.display import Image
-
-# %% ###########################################################################################################
-# Schematic of network architecture
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# This figure, identical to the one in the description, shows the required network architecture in the center,
-# the input and output of the pattern generation task above, and lists of the required NEST device, neuron, and
-# synapse models below. The connections that must be established are numbered 1 to 6.
-
-try:
- Image(filename="./eprop_supervised_regression_sine-waves_bsshslm_2020.png")
-except Exception:
- pass
-
-# %% ###########################################################################################################
-# Setup
-# ~~~~~
-
-# %% ###########################################################################################################
-# Initialize random generator
-# ...........................
-# We seed the numpy random generator, which will generate random initial weights as well as random input and
-# output.
-
-rng_seed = 1 # numpy random seed
-np.random.seed(rng_seed) # fix numpy random seed
-
-# %% ###########################################################################################################
-# Define timing of task
-# .....................
-# The task's temporal structure is then defined, once as time steps and once as durations in milliseconds.
-# Increasing the number of iterations enhances learning performance.
-
-batch_size = 1 # batch size, 1 in reference [2]
-n_iter = 200 # number of iterations, 2000 in reference [2]
-
-steps = {
- "sequence": 1000, # time steps of one full sequence
-}
-
-steps["learning_window"] = steps["sequence"] # time steps of window with non-zero learning signals
-steps["task"] = n_iter * batch_size * steps["sequence"] # time steps of task
-
-steps.update(
- {
- "offset_gen": 1, # offset since generator signals start from time step 1
- "delay_in_rec": 1, # connection delay between input and recurrent neurons
- "delay_rec_out": 1, # connection delay between recurrent and output neurons
- "delay_out_norm": 1, # connection delay between output neurons for normalization
- "extension_sim": 1, # extra time step to close right-open simulation time interval in Simulate()
- }
-)
-
-steps["delays"] = steps["delay_in_rec"] + steps["delay_rec_out"] + steps["delay_out_norm"] # time steps of delays
-
-steps["total_offset"] = steps["offset_gen"] + steps["delays"] # time steps of total offset
-
-steps["sim"] = steps["task"] + steps["total_offset"] + steps["extension_sim"] # time steps of simulation
-
-duration = {"step": 1.0} # ms, temporal resolution of the simulation
-
-duration.update({key: value * duration["step"] for key, value in steps.items()}) # ms, durations
-
-# %% ###########################################################################################################
-# Set up simulation
-# .................
-# As last step of the setup, we reset the NEST kernel to remove all existing NEST simulation settings and
-# objects and set some NEST kernel parameters, some of which are e-prop-related.
-
-params_setup = {
- "eprop_learning_window": duration["learning_window"],
- "eprop_reset_neurons_on_update": True, # if True, reset dynamic variables at start of each update interval
- "eprop_update_interval": duration["sequence"], # ms, time interval for updating the synaptic weights
- "print_time": False, # if True, print time progress bar during simulation, set False if run as code cell
- "resolution": duration["step"],
- "total_num_virtual_procs": 1, # number of virtual processes, set in case of distributed computing
-}
-
-####################
-
-nest.ResetKernel()
-nest.set(**params_setup)
-
-# %% ###########################################################################################################
-# Create neurons
-# ~~~~~~~~~~~~~~
-# We proceed by creating a certain number of input, recurrent, and readout neurons and setting their parameters.
-# Additionally, we already create an input spike generator and an output target rate generator, which we will
-# configure later.
-
-n_in = 100 # number of input neurons
-n_rec = 100 # number of recurrent neurons
-n_out = 1 # number of readout neurons
-
-params_nrn_out = {
- "C_m": 1.0, # pF, membrane capacitance - takes effect only if neurons get current input (here not the case)
- "E_L": 0.0, # mV, leak / resting membrane potential
- "I_e": 0.0, # pA, external current input
- "loss": "mean_squared_error", # loss function
- "regular_spike_arrival": False, # If True, input spikes arrive at end of time step, if False at beginning
- "tau_m": 30.0, # ms, membrane time constant
- "V_m": 0.0, # mV, initial value of the membrane voltage
-}
-
-params_nrn_rec = {
- "beta": 1.0, # width scaling of the pseudo-derivative
- "C_m": 1.0,
- "c_reg": 300.0, # coefficient of firing rate regularization
- "E_L": 0.0,
- "f_target": 10.0, # spikes/s, target firing rate for firing rate regularization
- "gamma": 0.3, # height scaling of the pseudo-derivative
- "I_e": 0.0,
- "regular_spike_arrival": False,
- "surrogate_gradient_function": "piecewise_linear", # surrogate gradient / pseudo-derivative function
- "t_ref": 0.0, # ms, duration of refractory period
- "tau_m": 30.0,
- "V_m": 0.0,
- "V_th": 0.03, # mV, spike threshold membrane voltage
-}
-
-# factors from the original pseudo-derivative definition are incorporated into the parameters
-params_nrn_rec["gamma"] /= params_nrn_rec["V_th"]
-params_nrn_rec["beta"] /= np.abs(params_nrn_rec["V_th"]) # prefactor is inside abs in the original definition
-
-####################
-
-# Intermediate parrot neurons required between input spike generators and recurrent neurons,
-# since devices cannot establish plastic synapses for technical reasons
-
-gen_spk_in = nest.Create("spike_generator", n_in)
-nrns_in = nest.Create("parrot_neuron", n_in)
-
-# The suffix _bsshslm_2020 follows the NEST convention to indicate in the model name the paper
-# that introduced it by the first letter of the authors' last names and the publication year.
-
-nrns_rec = nest.Create("eprop_iaf_bsshslm_2020", n_rec, params_nrn_rec)
-nrns_out = nest.Create("eprop_readout_bsshslm_2020", n_out, params_nrn_out)
-gen_rate_target = nest.Create("step_rate_generator", n_out)
-
-
-# %% ###########################################################################################################
-# Create recorders
-# ~~~~~~~~~~~~~~~~
-# We also create recorders, which, while not required for the training, will allow us to track various dynamic
-# variables of the neurons, spikes, and changes in synaptic weights. To save computing time and memory, the
-# recorders, the recorded variables, neurons, and synapses can be limited to the ones relevant to the
-# experiment, and the recording interval can be increased (see the documentation on the specific recorders). By
-# default, recordings are stored in memory but can also be written to file.
-
-n_record = 1 # number of neurons to record dynamic variables from - this script requires n_record >= 1
-n_record_w = 5 # number of senders and targets to record weights from - this script requires n_record_w >=1
-
-if n_record == 0 or n_record_w == 0:
- raise ValueError("n_record and n_record_w >= 1 required")
-
-params_mm_rec = {
- "interval": duration["step"], # interval between two recorded time points
- "record_from": ["V_m", "surrogate_gradient", "learning_signal"], # dynamic variables to record
- "start": duration["offset_gen"] + duration["delay_in_rec"], # start time of recording
- "stop": duration["offset_gen"] + duration["delay_in_rec"] + duration["task"], # stop time of recording
- "label": "multimeter_rec",
-}
-
-params_mm_out = {
- "interval": duration["step"],
- "record_from": ["V_m", "readout_signal", "readout_signal_unnorm", "target_signal", "error_signal"],
- "start": duration["total_offset"],
- "stop": duration["total_offset"] + duration["task"],
- "label": "multimeter_out",
-}
-
-params_wr = {
- "senders": nrns_in[:n_record_w] + nrns_rec[:n_record_w], # limit senders to subsample weights to record
- "targets": nrns_rec[:n_record_w] + nrns_out, # limit targets to subsample weights to record from
- "start": duration["total_offset"],
- "stop": duration["total_offset"] + duration["task"],
- "label": "weight_recorder",
-}
-
-params_sr_in = {
- "start": duration["offset_gen"],
- "stop": duration["total_offset"] + duration["task"],
- "label": "spike_recorder_in",
-}
-
-params_sr_rec = {
- "start": duration["offset_gen"],
- "stop": duration["total_offset"] + duration["task"],
- "label": "spike_recorder_rec",
-}
-
-####################
-
-mm_rec = nest.Create("multimeter", params_mm_rec)
-mm_out = nest.Create("multimeter", params_mm_out)
-sr_in = nest.Create("spike_recorder", params_sr_in)
-sr_rec = nest.Create("spike_recorder", params_sr_rec)
-wr = nest.Create("weight_recorder", params_wr)
-
-nrns_rec_record = nrns_rec[:n_record]
-
-# %% ###########################################################################################################
-# Create connections
-# ~~~~~~~~~~~~~~~~~~
-# Now, we define the connectivity and set up the synaptic parameters, with the synaptic weights drawn from
-# normal distributions. After these preparations, we establish the enumerated connections of the core network,
-# as well as additional connections to the recorders.
-
-params_conn_all_to_all = {"rule": "all_to_all", "allow_autapses": False}
-params_conn_one_to_one = {"rule": "one_to_one"}
-
-dtype_weights = np.float32 # data type of weights - for reproducing TF results set to np.float32
-weights_in_rec = np.array(np.random.randn(n_in, n_rec).T / np.sqrt(n_in), dtype=dtype_weights)
-weights_rec_rec = np.array(np.random.randn(n_rec, n_rec).T / np.sqrt(n_rec), dtype=dtype_weights)
-np.fill_diagonal(weights_rec_rec, 0.0) # since no autapses set corresponding weights to zero
-weights_rec_out = np.array(np.random.randn(n_rec, n_out).T / np.sqrt(n_rec), dtype=dtype_weights)
-weights_out_rec = np.array(np.random.randn(n_rec, n_out) / np.sqrt(n_rec), dtype=dtype_weights)
-
-params_common_syn_eprop = {
- "optimizer": {
- "type": "gradient_descent", # algorithm to optimize the weights
- "batch_size": batch_size,
- "eta": 1e-4, # learning rate
- "Wmin": -100.0, # pA, minimal limit of the synaptic weights
- "Wmax": 100.0, # pA, maximal limit of the synaptic weights
- },
- "average_gradient": False, # if True, average the gradient over the learning window
- "weight_recorder": wr,
-}
-
-params_syn_base = {
- "synapse_model": "eprop_synapse_bsshslm_2020",
- "delay": duration["step"], # ms, dendritic delay
- "tau_m_readout": params_nrn_out["tau_m"], # ms, for technical reasons pass readout neuron membrane time constant
-}
-
-params_syn_in = params_syn_base.copy()
-params_syn_in["weight"] = weights_in_rec # pA, initial values for the synaptic weights
-
-params_syn_rec = params_syn_base.copy()
-params_syn_rec["weight"] = weights_rec_rec
-
-params_syn_out = params_syn_base.copy()
-params_syn_out["weight"] = weights_rec_out
-
-params_syn_feedback = {
- "synapse_model": "eprop_learning_signal_connection_bsshslm_2020",
- "delay": duration["step"],
- "weight": weights_out_rec,
-}
-
-params_syn_rate_target = {
- "synapse_model": "rate_connection_delayed",
- "delay": duration["step"],
- "receptor_type": 2, # receptor type over which readout neuron receives target signal
-}
-
-params_syn_static = {
- "synapse_model": "static_synapse",
- "delay": duration["step"],
-}
-
-####################
-
-nest.SetDefaults("eprop_synapse_bsshslm_2020", params_common_syn_eprop)
-
-nest.Connect(gen_spk_in, nrns_in, params_conn_one_to_one, params_syn_static) # connection 1
-nest.Connect(nrns_in, nrns_rec, params_conn_all_to_all, params_syn_in) # connection 2
-nest.Connect(nrns_rec, nrns_rec, params_conn_all_to_all, params_syn_rec) # connection 3
-nest.Connect(nrns_rec, nrns_out, params_conn_all_to_all, params_syn_out) # connection 4
-nest.Connect(nrns_out, nrns_rec, params_conn_all_to_all, params_syn_feedback) # connection 5
-nest.Connect(gen_rate_target, nrns_out, params_conn_one_to_one, params_syn_rate_target) # connection 6
-
-nest.Connect(nrns_in, sr_in, params_conn_all_to_all, params_syn_static)
-nest.Connect(nrns_rec, sr_rec, params_conn_all_to_all, params_syn_static)
-
-nest.Connect(mm_rec, nrns_rec_record, params_conn_all_to_all, params_syn_static)
-nest.Connect(mm_out, nrns_out, params_conn_all_to_all, params_syn_static)
-
-# %% ###########################################################################################################
-# Create input
-# ~~~~~~~~~~~~
-# We generate some frozen Poisson spike noise of a fixed rate that is repeated in each iteration and feed these
-# spike times to the previously created input spike generator. The network will use these spike times as a
-# temporal backbone for encoding the target signal into its recurrent spiking activity.
-
-input_spike_prob = 0.05 # spike probability of frozen input noise
-dtype_in_spks = np.float32 # data type of input spikes - for reproducing TF results set to np.float32
-
-input_spike_bools = (np.random.rand(steps["sequence"], n_in) < input_spike_prob).swapaxes(0, 1)
-input_spike_bools[:, 0] = 0 # remove spikes in 0th time step of every sequence for technical reasons
-
-sequence_starts = np.arange(0.0, duration["task"], duration["sequence"]) + duration["offset_gen"]
-params_gen_spk_in = []
-for input_spike_bool in input_spike_bools:
- input_spike_times = np.arange(0.0, duration["sequence"], duration["step"])[input_spike_bool]
- input_spike_times_all = [input_spike_times + start for start in sequence_starts]
- params_gen_spk_in.append({"spike_times": np.hstack(input_spike_times_all).astype(dtype_in_spks)})
-
-####################
-
-nest.SetStatus(gen_spk_in, params_gen_spk_in)
-
-# %% ###########################################################################################################
-# Create output
-# ~~~~~~~~~~~~~
-# Then, as a superposition of four sine waves with various durations, amplitudes, and phases, we construct a
-# one-second target signal. This signal, like the input, is repeated for all iterations and fed into the rate
-# generator that was previously created.
-
-
-def generate_superimposed_sines(steps_sequence, periods):
- n_sines = len(periods)
-
- amplitudes = np.random.uniform(low=0.5, high=2.0, size=n_sines)
- phases = np.random.uniform(low=0.0, high=2.0 * np.pi, size=n_sines)
-
- sines = [
- A * np.sin(np.linspace(phi, phi + 2.0 * np.pi * (steps_sequence // T), steps_sequence))
- for A, phi, T in zip(amplitudes, phases, periods)
- ]
-
- superposition = sum(sines)
- superposition -= superposition[0]
- superposition /= max(np.abs(superposition).max(), 1e-6)
- return superposition
-
-
-target_signal = generate_superimposed_sines(steps["sequence"], [1000, 500, 333, 200]) # periods in steps
-
-params_gen_rate_target = {
- "amplitude_times": np.arange(0.0, duration["task"], duration["step"]) + duration["total_offset"],
- "amplitude_values": np.tile(target_signal, n_iter * batch_size),
-}
-
-####################
-
-nest.SetStatus(gen_rate_target, params_gen_rate_target)
-
-# %% ###########################################################################################################
-# Force final update
-# ~~~~~~~~~~~~~~~~~~
-# Synapses only get active, that is, the correct weight update calculated and applied, when they transmit a
-# spike. To still be able to read out the correct weights at the end of the simulation, we force spiking of the
-# presynaptic neuron and thus an update of all synapses, including those that have not transmitted a spike in
-# the last update interval, by sending a strong spike to all neurons that form the presynaptic side of an eprop
-# synapse. This step is required purely for technical reasons.
-
-gen_spk_final_update = nest.Create("spike_generator", 1, {"spike_times": [duration["task"] + duration["delays"]]})
-
-nest.Connect(gen_spk_final_update, nrns_in + nrns_rec, "all_to_all", {"weight": 1000.0})
-
-# %% ###########################################################################################################
-# Read out pre-training weights
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Before we begin training, we read out the initial weight matrices so that we can eventually compare them to
-# the optimized weights.
-
-
-def get_weights(pop_pre, pop_post):
- conns = nest.GetConnections(pop_pre, pop_post).get(["source", "target", "weight"])
- conns["senders"] = np.array(conns["source"]) - np.min(conns["source"])
- conns["targets"] = np.array(conns["target"]) - np.min(conns["target"])
-
- conns["weight_matrix"] = np.zeros((len(pop_post), len(pop_pre)))
- conns["weight_matrix"][conns["targets"], conns["senders"]] = conns["weight"]
- return conns
-
-
-weights_pre_train = {
- "in_rec": get_weights(nrns_in, nrns_rec),
- "rec_rec": get_weights(nrns_rec, nrns_rec),
- "rec_out": get_weights(nrns_rec, nrns_out),
-}
-
-# %% ###########################################################################################################
-# Simulate
-# ~~~~~~~~
-# We train the network by simulating for a set simulation time, determined by the number of iterations and the
-# batch size and the length of one sequence.
-
-nest.Simulate(duration["sim"])
-
-# %% ###########################################################################################################
-# Read out post-training weights
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# After the training, we can read out the optimized final weights.
-
-weights_post_train = {
- "in_rec": get_weights(nrns_in, nrns_rec),
- "rec_rec": get_weights(nrns_rec, nrns_rec),
- "rec_out": get_weights(nrns_rec, nrns_out),
-}
-
-# %% ###########################################################################################################
-# Read out recorders
-# ~~~~~~~~~~~~~~~~~~
-# We can also retrieve the recorded history of the dynamic variables and weights, as well as detected spikes.
-
-events_mm_rec = mm_rec.get("events")
-events_mm_out = mm_out.get("events")
-events_sr_in = sr_in.get("events")
-events_sr_rec = sr_rec.get("events")
-events_wr = wr.get("events")
-
-# %% ###########################################################################################################
-# Evaluate training error
-# ~~~~~~~~~~~~~~~~~~~~~~~
-# We evaluate the network's training error by calculating a loss - in this case, the mean squared error between
-# the integrated recurrent network activity and the target rate.
-
-readout_signal = events_mm_out["readout_signal"]
-target_signal = events_mm_out["target_signal"]
-senders = events_mm_out["senders"]
-
-readout_signal = np.array([readout_signal[senders == i] for i in set(senders)])
-target_signal = np.array([target_signal[senders == i] for i in set(senders)])
-
-readout_signal = readout_signal.reshape((n_out, n_iter, batch_size, steps["sequence"]))
-target_signal = target_signal.reshape((n_out, n_iter, batch_size, steps["sequence"]))
-
-loss = 0.5 * np.mean(np.sum((readout_signal - target_signal) ** 2, axis=3), axis=(0, 2))
-
-# %% ###########################################################################################################
-# Plot results
-# ~~~~~~~~~~~~
-# Then, we plot a series of plots.
-
-do_plotting = True # if True, plot the results
-
-if not do_plotting:
- exit()
-
-colors = {
- "blue": "#2854c5ff",
- "red": "#e04b40ff",
- "white": "#ffffffff",
-}
-
-plt.rcParams.update(
- {
- "axes.spines.right": False,
- "axes.spines.top": False,
- "axes.prop_cycle": cycler(color=[colors["blue"], colors["red"]]),
- }
-)
-
-# %% ###########################################################################################################
-# Plot learning performance
-# .........................
-# We begin with a plot visualizing the learning performance of the network: the loss plotted against the
-# iterations.
-
-fig, ax = plt.subplots()
-fig.suptitle("Learning performance")
-
-ax.plot(range(1, n_iter + 1), loss)
-ax.set_ylabel(r"$\mathcal{L} = \frac{1}{2} \sum_{t,k} \left( y_k^t -y_k^{*,t}\right)^2$")
-ax.set_xlabel("iteration")
-ax.set_xlim(1, n_iter)
-ax.xaxis.get_major_locator().set_params(integer=True)
-
-fig.tight_layout()
-
-# %% ###########################################################################################################
-# Plot spikes and dynamic variables
-# .................................
-# This plotting routine shows how to plot all of the recorded dynamic variables and spikes across time. We take
-# one snapshot in the first iteration and one snapshot at the end.
-
-
-def plot_recordable(ax, events, recordable, ylabel, xlims):
- for sender in set(events["senders"]):
- idc_sender = events["senders"] == sender
- idc_times = (events["times"][idc_sender] > xlims[0]) & (events["times"][idc_sender] < xlims[1])
- ax.plot(events["times"][idc_sender][idc_times], events[recordable][idc_sender][idc_times], lw=0.5)
- ax.set_ylabel(ylabel)
- margin = np.abs(np.max(events[recordable]) - np.min(events[recordable])) * 0.1
- ax.set_ylim(np.min(events[recordable]) - margin, np.max(events[recordable]) + margin)
-
-
-def plot_spikes(ax, events, ylabel, xlims):
- idc_times = (events["times"] > xlims[0]) & (events["times"] < xlims[1])
- senders_subset = events["senders"][idc_times]
- times_subset = events["times"][idc_times]
-
- ax.scatter(times_subset, senders_subset, s=0.1)
- ax.set_ylabel(ylabel)
- margin = np.abs(np.max(senders_subset) - np.min(senders_subset)) * 0.1
- ax.set_ylim(np.min(senders_subset) - margin, np.max(senders_subset) + margin)
-
-
-for title, xlims in zip(
- ["Dynamic variables before training", "Dynamic variables after training"],
- [(0, steps["sequence"]), (steps["task"] - steps["sequence"], steps["task"])],
-):
- fig, axs = plt.subplots(9, 1, sharex=True, figsize=(6, 8), gridspec_kw={"hspace": 0.4, "left": 0.2})
- fig.suptitle(title)
-
- plot_spikes(axs[0], events_sr_in, r"$z_i$" + "\n", xlims)
- plot_spikes(axs[1], events_sr_rec, r"$z_j$" + "\n", xlims)
-
- plot_recordable(axs[2], events_mm_rec, "V_m", r"$v_j$" + "\n(mV)", xlims)
- plot_recordable(axs[3], events_mm_rec, "surrogate_gradient", r"$\psi_j$" + "\n", xlims)
- plot_recordable(axs[4], events_mm_rec, "learning_signal", r"$L_j$" + "\n(pA)", xlims)
-
- plot_recordable(axs[5], events_mm_out, "V_m", r"$v_k$" + "\n(mV)", xlims)
- plot_recordable(axs[6], events_mm_out, "target_signal", r"$y^*_k$" + "\n", xlims)
- plot_recordable(axs[7], events_mm_out, "readout_signal", r"$y_k$" + "\n", xlims)
- plot_recordable(axs[8], events_mm_out, "error_signal", r"$y_k-y^*_k$" + "\n", xlims)
-
- axs[-1].set_xlabel(r"$t$ (ms)")
- axs[-1].set_xlim(*xlims)
-
- fig.align_ylabels()
-
-# %% ###########################################################################################################
-# Plot weight time courses
-# ........................
-# Similarly, we can plot the weight histories. Note that the weight recorder, attached to the synapses, works
-# differently than the other recorders. Since synapses only get activated when they transmit a spike, the weight
-# recorder only records the weight in those moments. That is why the first weight registrations do not start in
-# the first time step and we add the initial weights manually.
-
-
-def plot_weight_time_course(ax, events, nrns, label, ylabel):
- sender_label, target_label = label.split("_")
- nrns_senders = nrns[sender_label]
- nrns_targets = nrns[target_label]
-
- for sender in set(events_wr["senders"]):
- for target in set(events_wr["targets"]):
- if sender in nrns_senders and target in nrns_targets:
- idc_syn = (events["senders"] == sender) & (events["targets"] == target)
- if np.any(idc_syn):
- idc_syn_pre = (weights_pre_train[label]["source"] == sender) & (
- weights_pre_train[label]["target"] == target
- )
- times = np.concatenate([[0.0], events["times"][idc_syn]])
-
- weights = np.concatenate(
- [np.array(weights_pre_train[label]["weight"])[idc_syn_pre], events["weights"][idc_syn]]
- )
- ax.step(times, weights, c=colors["blue"])
- ax.set_ylabel(ylabel)
- ax.set_ylim(-0.6, 0.6)
-
-
-fig, axs = plt.subplots(3, 1, sharex=True, figsize=(3, 4))
-fig.suptitle("Weight time courses")
-
-nrns = {
- "in": nrns_in.tolist(),
- "rec": nrns_rec.tolist(),
- "out": nrns_out.tolist(),
-}
-
-plot_weight_time_course(axs[0], events_wr, nrns, "in_rec", r"$W_\text{in}$ (pA)")
-plot_weight_time_course(axs[1], events_wr, nrns, "rec_rec", r"$W_\text{rec}$ (pA)")
-plot_weight_time_course(axs[2], events_wr, nrns, "rec_out", r"$W_\text{out}$ (pA)")
-
-axs[-1].set_xlabel(r"$t$ (ms)")
-axs[-1].set_xlim(0, duration["task"])
-
-fig.align_ylabels()
-fig.tight_layout()
-
-# %% ###########################################################################################################
-# Plot weight matrices
-# ....................
-# If one is not interested in the time course of the weights, it is possible to read out only the initial and
-# final weights, which requires less computing time and memory than the weight recorder approach. Here, we plot
-# the corresponding weight matrices before and after the optimization.
-
-cmap = mpl.colors.LinearSegmentedColormap.from_list(
- "cmap", ((0.0, colors["blue"]), (0.5, colors["white"]), (1.0, colors["red"]))
-)
-
-fig, axs = plt.subplots(3, 2, sharex="col", sharey="row")
-fig.suptitle("Weight matrices")
-
-all_w_extrema = []
-
-for k in weights_pre_train.keys():
- w_pre = weights_pre_train[k]["weight"]
- w_post = weights_post_train[k]["weight"]
- all_w_extrema.append([np.min(w_pre), np.max(w_pre), np.min(w_post), np.max(w_post)])
-
-args = {"cmap": cmap, "vmin": np.min(all_w_extrema), "vmax": np.max(all_w_extrema)}
-
-for i, weights in zip([0, 1], [weights_pre_train, weights_post_train]):
- axs[0, i].pcolormesh(weights["in_rec"]["weight_matrix"].T, **args)
- axs[1, i].pcolormesh(weights["rec_rec"]["weight_matrix"], **args)
- cmesh = axs[2, i].pcolormesh(weights["rec_out"]["weight_matrix"], **args)
-
- axs[2, i].set_xlabel("recurrent\nneurons")
-
-axs[0, 0].set_ylabel("input\nneurons")
-axs[1, 0].set_ylabel("recurrent\nneurons")
-axs[2, 0].set_ylabel("readout\nneurons")
-fig.align_ylabels(axs[:, 0])
-
-axs[0, 0].text(0.5, 1.1, "before training", transform=axs[0, 0].transAxes, ha="center")
-axs[0, 1].text(0.5, 1.1, "after training", transform=axs[0, 1].transAxes, ha="center")
-
-axs[2, 0].yaxis.get_major_locator().set_params(integer=True)
-
-cbar = plt.colorbar(cmesh, cax=axs[1, 1].inset_axes([1.1, 0.2, 0.05, 0.8]), label="weight (pA)")
-
-fig.tight_layout()
-
-plt.show()
diff --git a/testsuite/pytests/sli2py_connect/test_common_properties_setting.py b/testsuite/pytests/sli2py_connect/test_common_properties_setting.py
index 2d1f2ee687..bb5235f285 100644
--- a/testsuite/pytests/sli2py_connect/test_common_properties_setting.py
+++ b/testsuite/pytests/sli2py_connect/test_common_properties_setting.py
@@ -37,18 +37,24 @@ def set_volume_transmitter():
def set_default_delay_resolution():
- nest.resolution = nest.GetDefaults("eprop_synapse_bsshslm_2020")["delay"]
+ nest.resolution = nest.GetDefaults("eprop_synapse")["delay"]
+
+
+def filter_matching_keys(d, target):
+ """Return a copy of d that contains only the keys in target."""
+
+ return {k: v for k, v in d.items() if k in target}
# This list shall contain all synapse models extending the CommonSynapseProperties class.
# For each model, specify which parameter to test with and which test value to use. A
# setup function can be provided if preparations are required. Provide also supported neuron model.
common_prop_models = {
- "eprop_synapse_bsshslm_2020": {
- "parameter": "average_gradient",
- "value": not nest.GetDefaults("eprop_synapse_bsshslm_2020")["average_gradient"],
+ "eprop_synapse": {
+ "parameter": "optimizer",
+ "value": {"type": "adam"},
"setup": set_default_delay_resolution,
- "neuron": "eprop_iaf_bsshslm_2020",
+ "neuron": "eprop_iaf",
},
"jonke_synapse": {"parameter": "tau_plus", "value": 10, "setup": None, "neuron": "iaf_psc_alpha"},
"stdp_dopamine_synapse": {
@@ -85,6 +91,8 @@ def test_set_common_properties(syn_model, specs):
nest.SetDefaults(syn_model, {specs["parameter"]: specs["value"]})
new_val = nest.GetDefaults(syn_model)[specs["parameter"]]
+ if isinstance(specs["value"], dict):
+ new_val = filter_matching_keys(new_val, specs["value"])
assert new_val == specs["value"]
@@ -102,11 +110,16 @@ def test_copy_common_properties(syn_model, specs):
new_model = syn_model + "_copy"
nest.CopyModel(syn_model, new_model)
new_val = nest.GetDefaults(new_model)[specs["parameter"]]
+ if isinstance(specs["value"], dict):
+ new_val = filter_matching_keys(new_val, specs["value"])
assert new_val == specs["value"]
# Set parameter back on copied model, original must not be changed
nest.SetDefaults(new_model, {specs["parameter"]: old_val})
- assert nest.GetDefaults(syn_model)[specs["parameter"]] == specs["value"]
+ check_val = nest.GetDefaults(syn_model)[specs["parameter"]]
+ if isinstance(specs["value"], dict):
+ check_val = filter_matching_keys(check_val, specs["value"])
+ assert check_val == specs["value"]
@pytest.mark.parametrize("syn_model, specs", available_cp_models.items())
diff --git a/testsuite/pytests/sli2py_recording/test_multimeter_stepping.py b/testsuite/pytests/sli2py_recording/test_multimeter_stepping.py
index c800c5ae61..d09e4bf716 100644
--- a/testsuite/pytests/sli2py_recording/test_multimeter_stepping.py
+++ b/testsuite/pytests/sli2py_recording/test_multimeter_stepping.py
@@ -29,7 +29,6 @@
import pytest
skip_models = [
- "eprop_readout_bsshslm_2020", # extra timestep added to some recordables in update function
"erfc_neuron", # binary neuron
"ginzburg_neuron", # binary neuron
"mcculloch_pitts_neuron", # binary neuron
diff --git a/testsuite/pytests/sli2py_regressions/test_issue_77.py b/testsuite/pytests/sli2py_regressions/test_issue_77.py
index ba1bb8c151..7431368819 100644
--- a/testsuite/pytests/sli2py_regressions/test_issue_77.py
+++ b/testsuite/pytests/sli2py_regressions/test_issue_77.py
@@ -58,9 +58,6 @@
"music_rate_in_proxy", # MUSIC device
"music_rate_out_proxy", # MUSIC device
"astrocyte_lr_1994", # does not send spikes
- "eprop_readout_bsshslm_2020", # does not send spikes
- "eprop_iaf_bsshslm_2020", # does not support stdp synapses
- "eprop_iaf_adapt_bsshslm_2020", # does not support stdp synapses
"eprop_readout", # does not send spikes
"eprop_iaf", # does not support stdp synapses
"eprop_iaf_adapt", # does not support stdp synapses
diff --git a/testsuite/pytests/test_eprop_bsshslm_2020_plasticity.py b/testsuite/pytests/test_eprop_bsshslm_2020_plasticity.py
deleted file mode 100644
index 69ed7d8fff..0000000000
--- a/testsuite/pytests/test_eprop_bsshslm_2020_plasticity.py
+++ /dev/null
@@ -1,923 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# test_eprop_bsshslm_2020_plasticity.py
-#
-# This file is part of NEST.
-#
-# Copyright (C) 2004 The NEST Initiative
-#
-# NEST is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-#
-# NEST is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with NEST. If not, see .
-
-"""
-Test functionality of e-prop plasticity.
-"""
-
-import nest
-import numpy as np
-import pytest
-
-nest.set_verbosity("M_WARNING")
-
-supported_source_models = ["eprop_iaf_bsshslm_2020", "eprop_iaf_adapt_bsshslm_2020"]
-supported_target_models = supported_source_models + ["eprop_readout_bsshslm_2020"]
-
-
-@pytest.fixture(autouse=True)
-def fix_resolution():
- nest.ResetKernel()
-
-
-@pytest.mark.parametrize("source_model", supported_source_models)
-@pytest.mark.parametrize("target_model", supported_target_models)
-def test_connect_with_eprop_synapse(source_model, target_model):
- """
- Ensure that the restriction to supported neuron models works.
- """
-
- # Connect supported models with e-prop synapse
- src = nest.Create(source_model)
- tgt = nest.Create(target_model)
- nest.Connect(src, tgt, "all_to_all", {"synapse_model": "eprop_synapse_bsshslm_2020", "delay": nest.resolution})
-
-
-@pytest.mark.parametrize("target_model", set(nest.node_models) - set(supported_target_models))
-def test_unsupported_model_raises(target_model):
- """
- Confirm that connecting a non-eprop neuron as target via an eprop_synapse_bsshslm_2020 raises an error.
- """
-
- src_nrn = nest.Create(supported_source_models[0])
- tgt_nrn = nest.Create(target_model)
-
- with pytest.raises(nest.kernel.NESTError):
- nest.Connect(src_nrn, tgt_nrn, "all_to_all", {"synapse_model": "eprop_synapse_bsshslm_2020"})
-
-
-def test_eprop_regression():
- """
- Test correct computation of losses for a regression task (for details on the task, see
- nest-simulator/pynest/examples/eprop_plasticity/eprop_supervised_regression_sine-waves_bsshslm_2020.py)
- by comparing the simulated losses with
-
- 1. NEST reference losses to catch scenarios in which the e-prop model does not work as
- intended (e.g., potential future changes to the NEST code base or a faulty installation).
- These reference losses were obtained from a simulation with the verified NEST e-prop
- implementation run with Linux 4.15.0-213-generic, Python v3.11.6, Numpy v1.26.0, and
- NEST@3304c6b5c.
-
- 2. TensorFlow reference losses to check the faithfulness to the original model. These
- reference losses were obtained from a simulation with the original TensorFlow implementation
- (https://github.com/INM-6/eligibility_propagation/blob/eprop_in_nest/Figure_3_and_S7_e_prop_tutorials/tutorial_pattern_generation.py,
- a modified fork of the original model at
- https://github.com/IGITUGraz/eligibility_propagation) run with Linux 4.15.0-213-generic,
- Python v3.6.10, Numpy v1.18.0, TensorFlow v1.15.0, and
- INM6/eligibility_propagation@7df7d2627.
- """ # pylint: disable=line-too-long # noqa: E501
-
- # Initialize random generator
- rng_seed = 1
- np.random.seed(rng_seed)
-
- # Define timing of task
-
- batch_size = 1
- n_iter = 5
-
- steps = {
- "sequence": 1000,
- }
-
- steps["learning_window"] = steps["sequence"]
- steps["task"] = n_iter * batch_size * steps["sequence"]
-
- steps.update(
- {
- "offset_gen": 1,
- "delay_in_rec": 1,
- "delay_rec_out": 1,
- "delay_out_norm": 1,
- "extension_sim": 1,
- }
- )
-
- steps["delays"] = steps["delay_in_rec"] + steps["delay_rec_out"] + steps["delay_out_norm"]
-
- steps["total_offset"] = steps["offset_gen"] + steps["delays"]
-
- steps["sim"] = steps["task"] + steps["total_offset"] + steps["extension_sim"]
-
- duration = {"step": 1.0}
-
- duration.update({key: value * duration["step"] for key, value in steps.items()})
-
- # Set up simulation
-
- params_setup = {
- "eprop_learning_window": duration["learning_window"],
- "eprop_reset_neurons_on_update": True,
- "eprop_update_interval": duration["sequence"],
- "print_time": False,
- "resolution": duration["step"],
- "total_num_virtual_procs": 1,
- }
-
- nest.ResetKernel()
- nest.set(**params_setup)
-
- # Create neurons
-
- n_in = 100
- n_rec = 100
- n_out = 1
-
- params_nrn_out = {
- "C_m": 1.0,
- "E_L": 0.0,
- "I_e": 0.0,
- "loss": "mean_squared_error",
- "regular_spike_arrival": False,
- "tau_m": 30.0,
- "V_m": 0.0,
- }
-
- params_nrn_rec = {
- "beta": 1.0,
- "C_m": 1.0,
- "c_reg": 300.0,
- "E_L": 0.0,
- "f_target": 10.0,
- "gamma": 0.3,
- "I_e": 0.0,
- "regular_spike_arrival": False,
- "surrogate_gradient_function": "piecewise_linear",
- "t_ref": 0.0,
- "tau_m": 30.0,
- "V_m": 0.0,
- "V_th": 0.03,
- }
-
- params_nrn_rec["gamma"] /= params_nrn_rec["V_th"]
- params_nrn_rec["beta"] /= np.abs(params_nrn_rec["V_th"])
-
- gen_spk_in = nest.Create("spike_generator", n_in)
- nrns_in = nest.Create("parrot_neuron", n_in)
- nrns_rec = nest.Create("eprop_iaf_bsshslm_2020", n_rec, params_nrn_rec)
- nrns_out = nest.Create("eprop_readout_bsshslm_2020", n_out, params_nrn_out)
- gen_rate_target = nest.Create("step_rate_generator", n_out)
-
- # Create recorders
-
- n_record = 1
- n_record_w = 1
-
- params_mm_rec = {
- "interval": duration["sequence"],
- "record_from": ["V_m", "surrogate_gradient", "learning_signal"],
- "start": duration["offset_gen"] + duration["delay_in_rec"],
- "stop": duration["offset_gen"] + duration["delay_in_rec"] + duration["task"],
- }
-
- params_mm_out = {
- "interval": duration["step"],
- "record_from": ["V_m", "readout_signal", "readout_signal_unnorm", "target_signal", "error_signal"],
- "start": duration["total_offset"],
- "stop": duration["total_offset"] + duration["task"],
- }
-
- params_wr = {
- "senders": nrns_in[:n_record_w] + nrns_rec[:n_record_w],
- "targets": nrns_rec[:n_record_w] + nrns_out,
- "start": duration["total_offset"],
- "stop": duration["total_offset"] + duration["task"],
- }
-
- params_sr = {
- "start": duration["offset_gen"],
- "stop": duration["total_offset"] + duration["task"],
- }
-
- mm_rec = nest.Create("multimeter", params_mm_rec)
- mm_out = nest.Create("multimeter", params_mm_out)
- sr = nest.Create("spike_recorder", params_sr)
- wr = nest.Create("weight_recorder", params_wr)
-
- nrns_rec_record = nrns_rec[:n_record]
-
- # Create connections
-
- params_conn_all_to_all = {"rule": "all_to_all", "allow_autapses": False}
- params_conn_one_to_one = {"rule": "one_to_one"}
-
- dtype_weights = np.float32
- weights_in_rec = np.array(np.random.randn(n_in, n_rec).T / np.sqrt(n_in), dtype=dtype_weights)
- weights_rec_rec = np.array(np.random.randn(n_rec, n_rec).T / np.sqrt(n_rec), dtype=dtype_weights)
- np.fill_diagonal(weights_rec_rec, 0.0)
- weights_rec_out = np.array(np.random.randn(n_rec, n_out).T / np.sqrt(n_rec), dtype=dtype_weights)
- weights_out_rec = np.array(np.random.randn(n_rec, n_out) / np.sqrt(n_rec), dtype=dtype_weights)
-
- params_common_syn_eprop = {
- "optimizer": {
- "type": "gradient_descent",
- "batch_size": batch_size,
- "eta": 1e-4,
- "Wmin": -100.0,
- "Wmax": 100.0,
- },
- "average_gradient": False,
- "weight_recorder": wr,
- }
-
- params_syn_base = {
- "synapse_model": "eprop_synapse_bsshslm_2020",
- "delay": duration["step"],
- "tau_m_readout": params_nrn_out["tau_m"],
- }
-
- params_syn_in = params_syn_base.copy()
- params_syn_in["weight"] = weights_in_rec
-
- params_syn_rec = params_syn_base.copy()
- params_syn_rec["weight"] = weights_rec_rec
-
- params_syn_out = params_syn_base.copy()
- params_syn_out["weight"] = weights_rec_out
-
- params_syn_feedback = {
- "synapse_model": "eprop_learning_signal_connection_bsshslm_2020",
- "delay": duration["step"],
- "weight": weights_out_rec,
- }
-
- params_syn_rate_target = {
- "synapse_model": "rate_connection_delayed",
- "delay": duration["step"],
- "receptor_type": 2,
- }
-
- params_syn_static = {
- "synapse_model": "static_synapse",
- "delay": duration["step"],
- }
-
- nest.SetDefaults("eprop_synapse_bsshslm_2020", params_common_syn_eprop)
-
- nest.Connect(gen_spk_in, nrns_in, params_conn_one_to_one, params_syn_static)
- nest.Connect(nrns_in, nrns_rec, params_conn_all_to_all, params_syn_in)
- nest.Connect(nrns_rec, nrns_rec, params_conn_all_to_all, params_syn_rec)
- nest.Connect(nrns_rec, nrns_out, params_conn_all_to_all, params_syn_out)
- nest.Connect(nrns_out, nrns_rec, params_conn_all_to_all, params_syn_feedback)
- nest.Connect(gen_rate_target, nrns_out, params_conn_one_to_one, params_syn_rate_target)
-
- nest.Connect(nrns_in + nrns_rec, sr, params_conn_all_to_all, params_syn_static)
-
- nest.Connect(mm_rec, nrns_rec_record, params_conn_all_to_all, params_syn_static)
- nest.Connect(mm_out, nrns_out, params_conn_all_to_all, params_syn_static)
-
- # Create input
-
- input_spike_prob = 0.05
- dtype_in_spks = np.float32
-
- input_spike_bools = (np.random.rand(steps["sequence"], n_in) < input_spike_prob).swapaxes(0, 1)
- input_spike_bools[:, 0] = 0
-
- sequence_starts = np.arange(0.0, duration["task"], duration["sequence"]) + duration["offset_gen"]
- params_gen_spk_in = []
- for input_spike_bool in input_spike_bools:
- input_spike_times = np.arange(0.0, duration["sequence"], duration["step"])[input_spike_bool]
- input_spike_times_all = [input_spike_times + start for start in sequence_starts]
- params_gen_spk_in.append({"spike_times": np.hstack(input_spike_times_all).astype(dtype_in_spks)})
-
- nest.SetStatus(gen_spk_in, params_gen_spk_in)
-
- # Create output
-
- def generate_superimposed_sines(steps_sequence, periods):
- n_sines = len(periods)
-
- amplitudes = np.random.uniform(low=0.5, high=2.0, size=n_sines)
- phases = np.random.uniform(low=0.0, high=2.0 * np.pi, size=n_sines)
-
- sines = [
- A * np.sin(np.linspace(phi, phi + 2.0 * np.pi * (steps_sequence // T), steps_sequence))
- for A, phi, T in zip(amplitudes, phases, periods)
- ]
-
- superposition = sum(sines)
- superposition -= superposition[0]
- superposition /= max(np.abs(superposition).max(), 1e-6)
- return superposition
-
- target_signal = generate_superimposed_sines(steps["sequence"], [1000, 500, 333, 200])
-
- params_gen_rate_target = {
- "amplitude_times": np.arange(0.0, duration["task"], duration["step"]) + duration["total_offset"],
- "amplitude_values": np.tile(target_signal, n_iter * batch_size),
- }
-
- nest.SetStatus(gen_rate_target, params_gen_rate_target)
-
- # Simulate
-
- nest.Simulate(duration["sim"])
-
- # Read out recorders
-
- events_mm_out = mm_out.get("events")
-
- # Evaluate training error
-
- readout_signal = events_mm_out["readout_signal"]
- target_signal = events_mm_out["target_signal"]
- senders = events_mm_out["senders"]
-
- readout_signal = np.array([readout_signal[senders == i] for i in set(senders)])
- target_signal = np.array([target_signal[senders == i] for i in set(senders)])
-
- readout_signal = readout_signal.reshape((n_out, n_iter, batch_size, steps["sequence"]))
- target_signal = target_signal.reshape((n_out, n_iter, batch_size, steps["sequence"]))
-
- loss = 0.5 * np.mean(np.sum((readout_signal - target_signal) ** 2, axis=3), axis=(0, 2))
-
- # Verify results
- loss_nest_reference = [
- 101.964356999041,
- 103.466731126205,
- 103.340607074771,
- 103.680244037686,
- 104.412775748752,
- ]
-
- loss_tf_reference = np.array(
- [
- 101.964363098144,
- 103.466735839843,
- 103.340606689453,
- 103.680244445800,
- 104.412780761718,
- ]
- )
-
- assert np.allclose(loss, loss_tf_reference, rtol=1e-7)
- assert np.allclose(loss, loss_nest_reference, rtol=1e-8)
-
-
-@pytest.mark.parametrize(
- "batch_size,loss_nest_reference",
- [
- (
- 1,
- [
- 0.741152550006,
- 0.740388187700,
- 0.665785233177,
- 0.663644193322,
- 0.729428962844,
- ],
- ),
- (
- 2,
- [
- 0.702163370672,
- 0.735555303152,
- 0.740354864111,
- 0.683882815282,
- 0.707841122268,
- ],
- ),
- ],
-)
-def test_eprop_classification(batch_size, loss_nest_reference):
- """
- Test correct computation of losses for a classification task (for details on the task, see
- nest-simulator/pynest/examples/eprop_plasticity/eprop_supervised_classification_evidence-accumulation_bsshslm_2020.py)
- by comparing the simulated losses with
-
- 1. NEST reference losses to catch scenarios in which the e-prop model does not work as
- intended (e.g., potential future changes to the NEST code base or a faulty installation).
- These reference losses were obtained from a simulation with the verified NEST e-prop
- implementation run with Linux 4.15.0-213-generic, Python v3.11.6, Numpy v1.26.0, and
- NEST@3304c6b5c.
-
- 2. TensorFlow reference losses to check the faithfulness to the original model. These
- reference losses were obtained from a simulation with the original TensorFlow implementation
- (https://github.com/INM-6/eligibility_propagation/blob/eprop_in_nest/Figure_3_and_S7_e_prop_tutorials/tutorial_evidence_accumulation_with_alif.py,
- a modified fork of the original model at
- https://github.com/IGITUGraz/eligibility_propagation) run with Linux 4.15.0-213-generic,
- Python v3.6.10, Numpy v1.18.0, TensorFlow v1.15.0, and
- INM6/eligibility_propagation@7df7d2627.
- """ # pylint: disable=line-too-long # noqa: E501
-
- # Initialize random generator
-
- rng_seed = 1
- np.random.seed(rng_seed)
-
- # Define timing of task
-
- n_iter = 5
-
- input = {
- "n_symbols": 4,
- "n_cues": 7,
- "prob_group": 0.3,
- "spike_prob": 0.04,
- }
-
- steps = {
- "cue": 100,
- "spacing": 50,
- "bg_noise": 1050,
- "recall": 150,
- }
-
- steps["cues"] = input["n_cues"] * (steps["cue"] + steps["spacing"])
- steps["sequence"] = steps["cues"] + steps["bg_noise"] + steps["recall"]
- steps["learning_window"] = steps["recall"]
- steps["task"] = n_iter * batch_size * steps["sequence"]
-
- steps.update(
- {
- "offset_gen": 1,
- "delay_in_rec": 1,
- "delay_rec_out": 1,
- "delay_out_norm": 1,
- "extension_sim": 1,
- }
- )
-
- steps["delays"] = steps["delay_in_rec"] + steps["delay_rec_out"] + steps["delay_out_norm"]
-
- steps["total_offset"] = steps["offset_gen"] + steps["delays"]
-
- steps["sim"] = steps["task"] + steps["total_offset"] + steps["extension_sim"]
-
- duration = {"step": 1.0}
-
- duration.update({key: value * duration["step"] for key, value in steps.items()})
-
- # Set up simulation
-
- params_setup = {
- "eprop_learning_window": duration["learning_window"],
- "eprop_reset_neurons_on_update": True,
- "eprop_update_interval": duration["sequence"],
- "print_time": False,
- "resolution": duration["step"],
- "total_num_virtual_procs": 1,
- }
-
- nest.ResetKernel()
- nest.set(**params_setup)
-
- # Create neurons
-
- n_in = 40
- n_ad = 50
- n_reg = 50
- n_rec = n_ad + n_reg
- n_out = 2
-
- params_nrn_out = {
- "C_m": 1.0,
- "E_L": 0.0,
- "I_e": 0.0,
- "loss": "cross_entropy",
- "regular_spike_arrival": False,
- "tau_m": 20.0,
- "V_m": 0.0,
- }
-
- params_nrn_reg = {
- "beta": 1.0,
- "C_m": 1.0,
- "c_reg": 300.0,
- "E_L": 0.0,
- "f_target": 10.0,
- "gamma": 0.3,
- "I_e": 0.0,
- "regular_spike_arrival": True,
- "surrogate_gradient_function": "piecewise_linear",
- "t_ref": 5.0,
- "tau_m": 20.0,
- "V_m": 0.0,
- "V_th": 0.6,
- }
-
- params_nrn_reg["gamma"] /= params_nrn_reg["V_th"]
- params_nrn_reg["beta"] /= np.abs(params_nrn_reg["V_th"])
-
- params_nrn_ad = {
- "beta": 1.0,
- "adapt_tau": 2000.0,
- "adaptation": 0.0,
- "C_m": 1.0,
- "c_reg": 300.0,
- "E_L": 0.0,
- "f_target": 10.0,
- "gamma": 0.3,
- "I_e": 0.0,
- "regular_spike_arrival": True,
- "surrogate_gradient_function": "piecewise_linear",
- "t_ref": 5.0,
- "tau_m": 20.0,
- "V_m": 0.0,
- "V_th": 0.6,
- }
-
- params_nrn_ad["gamma"] /= params_nrn_ad["V_th"]
- params_nrn_ad["beta"] /= np.abs(params_nrn_ad["V_th"])
-
- params_nrn_ad["adapt_beta"] = 1.7 * (
- (1.0 - np.exp(-duration["step"] / params_nrn_ad["adapt_tau"]))
- / (1.0 - np.exp(-duration["step"] / params_nrn_ad["tau_m"]))
- )
-
- gen_spk_in = nest.Create("spike_generator", n_in)
- nrns_in = nest.Create("parrot_neuron", n_in)
- nrns_reg = nest.Create("eprop_iaf_bsshslm_2020", n_reg, params_nrn_reg)
- nrns_ad = nest.Create("eprop_iaf_adapt_bsshslm_2020", n_ad, params_nrn_ad)
- nrns_out = nest.Create("eprop_readout_bsshslm_2020", n_out, params_nrn_out)
- gen_rate_target = nest.Create("step_rate_generator", n_out)
-
- nrns_rec = nrns_reg + nrns_ad
-
- # Create recorders
-
- n_record = 1
- n_record_w = 1
-
- params_mm_reg = {
- "interval": duration["step"],
- "record_from": ["V_m", "surrogate_gradient", "learning_signal"],
- "start": duration["offset_gen"] + duration["delay_in_rec"],
- "stop": duration["offset_gen"] + duration["delay_in_rec"] + duration["task"],
- }
-
- params_mm_ad = {
- "interval": duration["step"],
- "record_from": params_mm_reg["record_from"] + ["V_th_adapt", "adaptation"],
- "start": duration["offset_gen"] + duration["delay_in_rec"],
- "stop": duration["offset_gen"] + duration["delay_in_rec"] + duration["task"],
- }
-
- params_mm_out = {
- "interval": duration["step"],
- "record_from": ["V_m", "readout_signal", "readout_signal_unnorm", "target_signal", "error_signal"],
- "start": duration["total_offset"],
- "stop": duration["total_offset"] + duration["task"],
- }
-
- params_wr = {
- "senders": nrns_in[:n_record_w] + nrns_rec[:n_record_w],
- "targets": nrns_rec[:n_record_w] + nrns_out,
- "start": duration["total_offset"],
- "stop": duration["total_offset"] + duration["task"],
- }
-
- params_sr = {
- "start": duration["offset_gen"],
- "stop": duration["total_offset"] + duration["task"],
- }
-
- mm_reg = nest.Create("multimeter", params_mm_reg)
- mm_ad = nest.Create("multimeter", params_mm_ad)
- mm_out = nest.Create("multimeter", params_mm_out)
- sr = nest.Create("spike_recorder", params_sr)
- wr = nest.Create("weight_recorder", params_wr)
-
- nrns_reg_record = nrns_reg[:n_record]
- nrns_ad_record = nrns_ad[:n_record]
-
- # Create connections
-
- params_conn_all_to_all = {"rule": "all_to_all", "allow_autapses": False}
- params_conn_one_to_one = {"rule": "one_to_one"}
-
- def calculate_glorot_dist(fan_in, fan_out):
- glorot_scale = 1.0 / max(1.0, (fan_in + fan_out) / 2.0)
- glorot_limit = np.sqrt(3.0 * glorot_scale)
- glorot_distribution = np.random.uniform(low=-glorot_limit, high=glorot_limit, size=(fan_in, fan_out))
- return glorot_distribution
-
- dtype_weights = np.float32
- weights_in_rec = np.array(np.random.randn(n_in, n_rec).T / np.sqrt(n_in), dtype=dtype_weights)
- weights_rec_rec = np.array(np.random.randn(n_rec, n_rec).T / np.sqrt(n_rec), dtype=dtype_weights)
- np.fill_diagonal(weights_rec_rec, 0.0)
- weights_rec_out = np.array(calculate_glorot_dist(n_rec, n_out).T, dtype=dtype_weights)
- weights_out_rec = np.array(np.random.randn(n_rec, n_out), dtype=dtype_weights)
-
- params_common_syn_eprop = {
- "optimizer": {
- "type": "adam",
- "batch_size": batch_size,
- "beta_1": 0.9,
- "beta_2": 0.999,
- "epsilon": 1e-8,
- "eta": 5e-3,
- "Wmin": -100.0,
- "Wmax": 100.0,
- },
- "average_gradient": True,
- "weight_recorder": wr,
- }
-
- params_syn_base = {
- "synapse_model": "eprop_synapse_bsshslm_2020",
- "delay": duration["step"],
- "tau_m_readout": params_nrn_out["tau_m"],
- }
-
- params_syn_in = params_syn_base.copy()
- params_syn_in["weight"] = weights_in_rec
-
- params_syn_rec = params_syn_base.copy()
- params_syn_rec["weight"] = weights_rec_rec
-
- params_syn_out = params_syn_base.copy()
- params_syn_out["weight"] = weights_rec_out
-
- params_syn_feedback = {
- "synapse_model": "eprop_learning_signal_connection_bsshslm_2020",
- "delay": duration["step"],
- "weight": weights_out_rec,
- }
-
- params_syn_out_out = {
- "synapse_model": "rate_connection_delayed",
- "delay": duration["step"],
- "receptor_type": 1,
- "weight": 1.0,
- }
-
- params_syn_rate_target = {
- "synapse_model": "rate_connection_delayed",
- "delay": duration["step"],
- "receptor_type": 2,
- }
-
- params_syn_static = {
- "synapse_model": "static_synapse",
- "delay": duration["step"],
- }
-
- params_init_optimizer = {
- "optimizer": {
- "m": 0.0,
- "v": 0.0,
- }
- }
-
- nest.SetDefaults("eprop_synapse_bsshslm_2020", params_common_syn_eprop)
-
- nest.Connect(gen_spk_in, nrns_in, params_conn_one_to_one, params_syn_static)
- nest.Connect(nrns_in, nrns_rec, params_conn_all_to_all, params_syn_in)
- nest.Connect(nrns_rec, nrns_rec, params_conn_all_to_all, params_syn_rec)
- nest.Connect(nrns_rec, nrns_out, params_conn_all_to_all, params_syn_out)
- nest.Connect(nrns_out, nrns_rec, params_conn_all_to_all, params_syn_feedback)
- nest.Connect(gen_rate_target, nrns_out, params_conn_one_to_one, params_syn_rate_target)
- nest.Connect(nrns_out, nrns_out, params_conn_all_to_all, params_syn_out_out)
-
- nest.Connect(nrns_in + nrns_rec, sr, params_conn_all_to_all, params_syn_static)
-
- nest.Connect(mm_reg, nrns_reg_record, params_conn_all_to_all, params_syn_static)
- nest.Connect(mm_ad, nrns_ad_record, params_conn_all_to_all, params_syn_static)
- nest.Connect(mm_out, nrns_out, params_conn_all_to_all, params_syn_static)
-
- nest.GetConnections(nrns_rec[0], nrns_rec[1:3]).set([params_init_optimizer] * 2)
-
- # Create input and output
-
- def generate_evidence_accumulation_input_output(batch_size, n_in, steps, input):
- n_pop_nrn = n_in // input["n_symbols"]
-
- prob_choices = np.array([input["prob_group"], 1 - input["prob_group"]], dtype=np.float32)
- idx = np.random.choice([0, 1], batch_size)
- probs = np.zeros((batch_size, 2), dtype=np.float32)
- probs[:, 0] = prob_choices[idx]
- probs[:, 1] = prob_choices[1 - idx]
-
- batched_cues = np.zeros((batch_size, input["n_cues"]), dtype=int)
- for b_idx in range(batch_size):
- batched_cues[b_idx, :] = np.random.choice([0, 1], input["n_cues"], p=probs[b_idx])
-
- input_spike_probs = np.zeros((batch_size, steps["sequence"], n_in))
-
- for b_idx in range(batch_size):
- for c_idx in range(input["n_cues"]):
- cue = batched_cues[b_idx, c_idx]
-
- step_start = c_idx * (steps["cue"] + steps["spacing"]) + steps["spacing"]
- step_stop = step_start + steps["cue"]
-
- pop_nrn_start = cue * n_pop_nrn
- pop_nrn_stop = pop_nrn_start + n_pop_nrn
-
- input_spike_probs[b_idx, step_start:step_stop, pop_nrn_start:pop_nrn_stop] = input["spike_prob"]
-
- input_spike_probs[:, -steps["recall"] :, 2 * n_pop_nrn : 3 * n_pop_nrn] = input["spike_prob"]
- input_spike_probs[:, :, 3 * n_pop_nrn :] = input["spike_prob"] / 4.0
- input_spike_bools = input_spike_probs > np.random.rand(input_spike_probs.size).reshape(input_spike_probs.shape)
- input_spike_bools[:, 0, :] = 0
-
- target_cues = np.zeros(batch_size, dtype=int)
- target_cues[:] = np.sum(batched_cues, axis=1) > int(input["n_cues"] / 2)
-
- return input_spike_bools, target_cues
-
- dtype_in_spks = np.float32
-
- input_spike_bools_list = []
- target_cues_list = []
-
- for _ in range(n_iter):
- input_spike_bools, target_cues = generate_evidence_accumulation_input_output(batch_size, n_in, steps, input)
- input_spike_bools_list.append(input_spike_bools)
- target_cues_list.extend(target_cues)
-
- input_spike_bools_arr = np.array(input_spike_bools_list).reshape(steps["task"], n_in)
- timeline_task = np.arange(0.0, duration["task"], duration["step"]) + duration["offset_gen"]
-
- params_gen_spk_in = [
- {"spike_times": timeline_task[input_spike_bools_arr[:, nrn_in_idx]].astype(dtype_in_spks)}
- for nrn_in_idx in range(n_in)
- ]
-
- target_rate_changes = np.zeros((n_out, batch_size * n_iter))
- target_rate_changes[np.array(target_cues_list), np.arange(batch_size * n_iter)] = 1
-
- params_gen_rate_target = [
- {
- "amplitude_times": np.arange(0.0, duration["task"], duration["sequence"]) + duration["total_offset"],
- "amplitude_values": target_rate_changes[nrn_out_idx],
- }
- for nrn_out_idx in range(n_out)
- ]
-
- nest.SetStatus(gen_spk_in, params_gen_spk_in)
- nest.SetStatus(gen_rate_target, params_gen_rate_target)
-
- # Simulate
-
- nest.Simulate(duration["sim"])
-
- # Read out recorders
-
- events_mm_out = mm_out.get("events")
-
- # Evaluate training error
-
- readout_signal = events_mm_out["readout_signal"]
- target_signal = events_mm_out["target_signal"]
- senders = events_mm_out["senders"]
-
- readout_signal = np.array([readout_signal[senders == i] for i in set(senders)])
- target_signal = np.array([target_signal[senders == i] for i in set(senders)])
-
- readout_signal = readout_signal.reshape((n_out, n_iter, batch_size, steps["sequence"]))
- target_signal = target_signal.reshape((n_out, n_iter, batch_size, steps["sequence"]))
-
- readout_signal = readout_signal[:, :, :, -steps["learning_window"] :]
- target_signal = target_signal[:, :, :, -steps["learning_window"] :]
-
- loss = -np.mean(np.sum(target_signal * np.log(readout_signal), axis=0), axis=(1, 2))
-
- # Verify results
-
- loss_tf_reference = np.array(
- [
- 0.741152524948,
- 0.740388214588,
- 0.665785133838,
- 0.663644134998,
- 0.729429066181,
- ]
- )
-
- if batch_size == 1:
- assert np.allclose(loss, loss_tf_reference, rtol=1e-6)
- assert np.allclose(loss, loss_nest_reference, rtol=1e-8)
-
-
-@pytest.mark.parametrize("source_model", supported_source_models)
-def test_unsupported_surrogate_gradient(source_model):
- """
- Confirm that selecting an unsupported surrogate gradient raises an error.
- """
-
- params_nrn_rec = {
- "surrogate_gradient_function": "unsupported_surrogate_gradient",
- }
-
- with pytest.raises(nest.kernel.NESTErrors.BadProperty):
- nest.SetDefaults(source_model, params_nrn_rec)
-
-
-@pytest.mark.parametrize(
- "neuron_model,eprop_history_duration_reference",
- [
- ("eprop_iaf_bsshslm_2020", np.hstack([np.arange(x, y) for x, y in [[1, 3], [1, 61], [21, 61], [41, 48]]])),
- (
- "eprop_readout_bsshslm_2020",
- np.hstack([np.arange(x, y) for x, y in [[1, 4], [2, 22], [21, 61], [21, 61], [41, 47]]]),
- ),
- ],
-)
-def test_eprop_history_cleaning(neuron_model, eprop_history_duration_reference):
- """
- Test the e-prop archiving mechanism's cleaning process by ensuring that the length of the `eprop_history`
- buffer matches the expected values based on a given input firing pattern. These reference length values
- were obtained from a simulation with the verified NEST e-prop implementation run with Linux 5.8.7-1-default,
- Python v3.12.5, Numpy v2.0.1, and NEST@3a1c2c914.
- """
-
- # Define timing of task
-
- duration = {"step": 1.0, "sequence": 20.0}
-
- # Set up simulation
-
- params_setup = {
- "print_time": False,
- "resolution": duration["step"],
- "eprop_update_interval": duration["sequence"],
- "total_num_virtual_procs": 1,
- }
-
- nest.ResetKernel()
- nest.set(**params_setup)
-
- # Create neurons
-
- gen_spk_in = nest.Create("spike_generator", 3)
- nrns_in = nest.Create("parrot_neuron", 3)
- nrns_rec = nest.Create(neuron_model, 1)
-
- # Create recorders
-
- params_mm_rec = {
- "interval": duration["step"],
- "record_from": ["eprop_history_duration"],
- }
-
- mm_rec = nest.Create("multimeter", params_mm_rec)
-
- # Create connections
-
- params_conn_all_to_all = {"rule": "all_to_all", "allow_autapses": False}
- params_conn_one_to_one = {"rule": "one_to_one"}
-
- params_syn_base = {
- "synapse_model": "eprop_synapse_bsshslm_2020",
- "delay": duration["step"],
- "weight": 1.0,
- }
-
- params_syn_static = {
- "synapse_model": "static_synapse",
- "delay": duration["step"],
- }
-
- params_syn_in = params_syn_base.copy()
-
- nest.Connect(gen_spk_in, nrns_in, params_conn_one_to_one, params_syn_static)
- nest.Connect(nrns_in, nrns_rec, params_conn_all_to_all, params_syn_in)
- nest.Connect(mm_rec, nrns_rec, params_conn_all_to_all, params_syn_static)
-
- # Create input
-
- input_spike_times = [
- [10.0, 20.0, 50.0, 60.0],
- [10.0, 30.0, 50.0, 90.0],
- [40.0, 60.0],
- ]
-
- params_gen_spk_in = [{"spike_times": spike_times} for spike_times in input_spike_times]
-
- nest.SetStatus(gen_spk_in, params_gen_spk_in)
-
- # Simulate
-
- nest.Simulate(110.0)
-
- # Evaluate training error
-
- events_mm_rec = mm_rec.get("events")
-
- eprop_history_duration = events_mm_rec["eprop_history_duration"]
- senders = events_mm_rec["senders"]
-
- eprop_history_duration = np.array([eprop_history_duration[senders == i] for i in set(senders)])[0]
-
- assert np.allclose(eprop_history_duration, eprop_history_duration_reference, rtol=1e-8)
diff --git a/testsuite/pytests/test_labeled_synapses.py b/testsuite/pytests/test_labeled_synapses.py
index 0ff60ceec7..b8e3fd5eb9 100644
--- a/testsuite/pytests/test_labeled_synapses.py
+++ b/testsuite/pytests/test_labeled_synapses.py
@@ -57,13 +57,6 @@ def default_network(self, syn_model):
self.urbanczik_synapses = ["urbanczik_synapse", "urbanczik_synapse_lbl", "urbanczik_synapse_hpc"]
- self.eprop_synapses_bsshslm_2020 = ["eprop_synapse_bsshslm_2020", "eprop_synapse_bsshslm_2020_hpc"]
- self.eprop_connections_bsshslm_2020 = [
- "eprop_learning_signal_connection_bsshslm_2020",
- "eprop_learning_signal_connection_bsshslm_2020_lbl",
- "eprop_learning_signal_connection_bsshslm_2020_hpc",
- ]
-
self.eprop_synapses = ["eprop_synapse", "eprop_synapse_hpc"]
self.eprop_connections = [
"eprop_learning_signal_connection",
@@ -95,12 +88,6 @@ def default_network(self, syn_model):
syns = nest.GetDefaults("pp_cond_exp_mc_urbanczik")["receptor_types"]
r_type = syns["soma_exc"]
- if syn_model in self.eprop_synapses_bsshslm_2020:
- neurons = nest.Create("eprop_iaf_bsshslm_2020", 5)
-
- if syn_model in self.eprop_connections_bsshslm_2020:
- neurons = nest.Create("eprop_readout_bsshslm_2020", 5) + nest.Create("eprop_iaf_bsshslm_2020", 5)
-
if syn_model in self.eprop_synapses:
neurons = nest.Create("eprop_iaf", 5)
@@ -210,13 +197,7 @@ def test_SetLabelToNotLabeledSynapse(self):
nest.SetDefaults(syn, {"synapse_label": 123})
# plain connection
- if (
- syn
- in self.eprop_connections_bsshslm_2020
- + self.eprop_connections
- + self.eprop_synapses_bsshslm_2020
- + self.eprop_synapses
- ):
+ if syn in self.eprop_connections + self.eprop_synapses:
# try set on connect
with self.assertRaises(nest.kernel.NESTError):
nest.Connect(
diff --git a/testsuite/pytests/test_refractory.py b/testsuite/pytests/test_refractory.py
index fe49b2662a..8ce8c96371 100644
--- a/testsuite/pytests/test_refractory.py
+++ b/testsuite/pytests/test_refractory.py
@@ -58,8 +58,6 @@
neurons_interspike_ps = ["iaf_psc_alpha_ps", "iaf_psc_delta_ps", "iaf_psc_exp_ps"]
neurons_eprop = [
- "eprop_iaf_bsshslm_2020",
- "eprop_iaf_adapt_bsshslm_2020",
"eprop_iaf",
"eprop_iaf_adapt",
]
@@ -86,7 +84,6 @@
"iaf_psc_exp_ps_lossless", # This one use presice times
"siegert_neuron", # This one does not connect to voltmeter
"step_rate_generator", # No regular neuron model
- "eprop_readout_bsshslm_2020", # This one does not spike
"eprop_readout", # This one does not spike
"iaf_tum_2000", # Hijacks the offset field, see #2912
"iaf_bw_2001", # Hijacks the offset field, see #2912
diff --git a/testsuite/pytests/test_sp/test_disconnect.py b/testsuite/pytests/test_sp/test_disconnect.py
index 79ad552435..1aa91cc8a6 100644
--- a/testsuite/pytests/test_sp/test_disconnect.py
+++ b/testsuite/pytests/test_sp/test_disconnect.py
@@ -73,12 +73,7 @@ def test_synapse_deletion_one_to_one_no_sp(self):
syn_dict = {"synapse_model": syn_model}
- if "eprop_synapse_bsshslm_2020" in syn_model:
- neurons = nest.Create("eprop_iaf_bsshslm_2020", 4)
- syn_dict["delay"] = nest.resolution
- elif "eprop_learning_signal_connection_bsshslm_2020" in syn_model:
- neurons = nest.Create("eprop_readout_bsshslm_2020", 2) + nest.Create("eprop_iaf_bsshslm_2020", 2)
- elif "eprop_synapse" in syn_model:
+ if "eprop_synapse" in syn_model:
neurons = nest.Create("eprop_iaf", 4)
syn_dict["delay"] = nest.resolution
elif "eprop_learning_signal_connection" in syn_model:
diff --git a/testsuite/pytests/test_sp/test_disconnect_multiple.py b/testsuite/pytests/test_sp/test_disconnect_multiple.py
index a3eeebf0ed..b9b11a569c 100644
--- a/testsuite/pytests/test_sp/test_disconnect_multiple.py
+++ b/testsuite/pytests/test_sp/test_disconnect_multiple.py
@@ -49,11 +49,6 @@ def setUp(self):
"urbanczik_synapse",
"urbanczik_synapse_lbl",
"urbanczik_synapse_hpc",
- "eprop_synapse_bsshslm_2020",
- "eprop_synapse_bsshslm_2020_hpc",
- "eprop_learning_signal_connection_bsshslm_2020",
- "eprop_learning_signal_connection_bsshslm_2020_lbl",
- "eprop_learning_signal_connection_bsshslm_2020_hpc",
"eprop_synapse",
"eprop_synapse_hpc",
"eprop_learning_signal_connection",
diff --git a/testsuite/regressiontests/ticket-310.sli b/testsuite/regressiontests/ticket-310.sli
index eed11a5b25..f5e54b0127 100644
--- a/testsuite/regressiontests/ticket-310.sli
+++ b/testsuite/regressiontests/ticket-310.sli
@@ -39,9 +39,6 @@
/skip_list [ /iaf_chxk_2008 % non-standard spiking conditions
/correlospinmatrix_detector % not a neuron
- /eprop_iaf_bsshslm_2020 % no ArchivingNode, thus no t_spike
- /eprop_iaf_adapt_bsshslm_2020 % no ArchivingNode, thus no t_spike
- /eprop_readout_bsshslm_2020 % no ArchivingNode, thus no t_spike
/eprop_iaf % no ArchivingNode, thus no t_spike
/eprop_iaf_adapt % no ArchivingNode, thus no t_spike
/eprop_iaf_psc_delta % no ArchivingNode, thus no t_spike
diff --git a/testsuite/regressiontests/ticket-421.sli b/testsuite/regressiontests/ticket-421.sli
index a8ccb8b038..b67c44af8f 100644
--- a/testsuite/regressiontests/ticket-421.sli
+++ b/testsuite/regressiontests/ticket-421.sli
@@ -53,7 +53,6 @@ Author: Hans Ekkehard Plesser, 2010-05-05
/aeif_psc_exp /aeif_psc_alpha /aeif_psc_delta /aeif_cond_beta_multisynapse /hh_cond_exp_traub
/hh_cond_beta_gap_traub /hh_psc_alpha /hh_psc_alpha_clopath /hh_psc_alpha_gap /ht_neuron /ht_neuron_fs
/iaf_cond_exp_sfa_rr /izhikevich
- /eprop_iaf_bsshslm_2020 /eprop_iaf_adapt_bsshslm_2020 /eprop_readout_bsshslm_2020
/eprop_iaf /eprop_iaf_adapt /eprop_iaf_psc_delta /eprop_iaf_psc_delta_adapt /eprop_readout] def
% use power-of-two resolution to avoid round-off problems
diff --git a/testsuite/regressiontests/ticket-618.sli b/testsuite/regressiontests/ticket-618.sli
index fe56d45ebd..f9c7312280 100644
--- a/testsuite/regressiontests/ticket-618.sli
+++ b/testsuite/regressiontests/ticket-618.sli
@@ -46,8 +46,7 @@ Author: Hans Ekkehard Plesser, 2012-12-11
M_ERROR setverbosity
-/excluded_models [ /eprop_iaf_bsshslm_2020 /eprop_iaf_adapt_bsshslm_2020 /eprop_readout_bsshslm_2020
- /eprop_iaf /eprop_iaf_adapt /eprop_iaf_psc_delta /eprop_iaf_psc_delta_adapt /eprop_readout /iaf_bw_2001 ] def
+/excluded_models [ /eprop_iaf /eprop_iaf_adapt /eprop_iaf_psc_delta /eprop_iaf_psc_delta_adapt /eprop_readout /iaf_bw_2001 ] def
{
GetKernelStatus /node_models get