#### 研究领域：混沌系统，洛伦兹吸引子，储备池计算，循环神经网络

Jason Platt   | 作者

#### 3. 储备池计算的结构

using Random

using SparseArrays

using Arpack

struct rc

“””Structure holding the reservoir computer

Args:

D::Int : Input system Dimension

N::Int : Reservoir Dimension

ρA::Float : Density of A

α::Float : Leak Rate

σ::Float : Input Strength

random_state::Int : Random seed

σb::Float : Bias

β::Float : Tikhonov regularization

“””

D::Int # Input System Dimension

N::Int # Reservoir Dimension

ρSR::Float64 # Spectral Radius of A

ρA::Float64 # Density of A

α::Float64 # Leak Rate

σ::Float64 # Input Strength

σb::Float64 # Input Bias

β::Float64 # Tikhonov Regularization

Win::Array{Float64,2} # Input Matrix

Wout::Array{Float64,2} # Output Matrix

rng::MersenneTwister # Random Number Generator

# Constructor Function

function rc(D, N, ρSR , ρA, α, σ, σb, β; random_state=111111)

rng = MersenneTwister(random_state)

A, Win, Wout = initialize_weights(D, N, ρSR, ρA, σ, rng)

new(D, N, ρSR , ρA, α, σ, σb, β, A, Win, Wout, rng)

end

end

function initialize_weights(D, N, SR, ρA, σ, rng)

A = get_connection_matrix(N, ρA, ρSR, rng)

Win = rand(rng, Uniform(-σ, σ), N, D)

Wout = Array{Float64}(undef, (D, N))

return A, Win, Wout

end

function get_connection_matrix(N, ρA, ρSR, rng)

# Define that numbers selected between [-1, 1]

rf(rng, N) = rand(rng, Uniform(-1.0, 1.0), N)

# NxN sparse random matrix

A = sprand(rng, N, N, ρA, rf)

# calculate eigenvalues and rescale

eig, _ = eigs(A, nev=1)

maxeig = abs(eig[1])

A = A.*(ρSR/maxeig)

return A

end

#### 4. 训练储备池计算

function train_RC(rc::rc, u; spinup = 100, readout = false)

“””Train the rc

This function sets the matrix rc.Wout

u ∈ D×T

r ∈ N×T

Args:

rc : reservoir struct

u : Array of data of shape D×Time

spinup : Amount of data to use for spinup

Returns:

else: return the last state of r, can be used to predict

forward from the training data

“””

r = generate(rc, u)

compute_Wout(rc, r[:, 1+spinup:end], u[:, 1+spinup:end])

if readout == true return rc.Wout*r end

return r[:, end]

end

function driven_rc!(rtp1, rc::rc, rt, ut)

“””Drive the rc with data

Args:

rtp1 : r(t+1)

rc : reservoir computer

rt : r(t)

ut : u(t)

“””

rtp1[:] .= rc.α.*tanh.(rc.A*rt .+ rc.Win*ut .+ rc.σb).+(1 .- rc.α).*rt

end

function generate(rc::rc, u)

T = size(u)[2]

r = Array{Float64}(undef, (rc.N, T))

r[:, 1] = zeros(rc.N)

for t in 2:T

driven_esn!(r[:, t], rc, r[:, t-1], u[:, t-1])

end

return r

end

function compute_Wout(rc::rc, r, u)

rc.Wout[:, :] .= (Symmetric(r*r’+esn.β*I)\(r*u’))’

end

#### 5. 预测

function forecast_RC(rc::rc, nsteps; uspin=nothing, r0 = nothing)
“””Forecast nsteps forward in time from the end of uspin or from rc state r0
Make sure to train the RC before forecasting.  Requires either uspin or r0.
If both are provided will use uspin to set r0.
“””
if !isnothing(uspin)
rspin = generate(rc, uspin)
r0 = rspin[:, end]
end
@assert !isnothing(r0)

# Initialize array of hidden states and set r(0)=r0
rfc = Array{Float64}(undef, (rc.N, nsteps))
rfc[:, 1] = r0

# Now iterate the autonomous RC nsteps times for the forecast
for t in 1:nsteps-1
auto_esn!(rfc[:, t+1], rc, rfc[:, t])
end
return esn.Wout*rfc
end
function auto_rc!(rtp1, rc::rc, rt)
“””Autonomous RC update rule”””
rtp1[:] .= rc.α.*tanh.(rc.A*rt .+ rc.Win*rc.Wout*rt .+ rc.σb).+(1 .- rc.α).*rt
end

#### 6. 洛伦兹吸引子示例

using BasicReservoirComputing
using Plots
function L63_ex()
# make the training and testing data (not defined here)
utrain, utest = make_lor63(train_time = 100, test_time = 20, n_test = 100)

#define the RC
D=3; N=200; σ=0.084; α=0.6; SR=0.8; ρA=0.02; β=8.5e-8; σb=1.6; Δt=0.01
reservoir = rc(D, N, ρA, Δt, SR, α, σ, σb, β))

# Training
nspin = 100
train_RC(reservoir, utrain, spinup=nspin)

# Forecast and compare to truth
utrue = utest[:, nspin:end]
nsteps = size(utrue)[2]
upred = forecast_RC(rc, nsteps, uspin = utest[:, 1:nspin])

# now plot results (not defined here)
t = collect(0:nsteps-1)*Δt
p = plot_prediction(t, upred, utrue)
display(p)
end
L63_ex()

#### 8. 如何找到正确的参数

1、固定参数

2、通过线性回归训练Wout

3、通过一系列长期预测评估损失函数

function Loss(data, spinup_steps, rc::esn)
“””Loss function for an individual forecast”””
uspin = data[:, 1:spinup_steps]
utrue = data[:, spinup_steps:end]
Ttrue = size(utrue)[2]
upred = forecast_RC(rc, Ttrue; uspin=uspin)

err = upred.-utrue
weight = exp.(-(1:Ttrue)./Ttrue)
return norm(err.*weight’)
end
function obj(θ)
“””The objective function to minimize over the optimization

Args:
θ : Array of parameters

Returns:
objective value
“””
SR, α, logσ, σb, logβ = θ
D, _ = size(f.train_data)

# build the RC
rc = esn(D, f.N, SR, f.ρA, α, exp(logσ), σb, exp(logβ))

# Train the RC
train_RC(rc, f.train_data)

# Compute loss over n_valid forecasts
n_valid = length(f.valid_data_list)
loss = zeros(n_valid)
for i in 1:n_valid
loss[i] = Loss(f.valid_data_list[i], f.spinup_steps, rc)
end

# Objective is sum of loss functions
return sum(loss)
end

#### 9. 总结

https://github.com/japlatt/BasicReservoirComputing

1. Jason A. Platt et al. “Robust forecasting using predictive generalized synchronization in reservoir computing”. In: Chaos 31 (2021), p. 123118. URL: https://doi.org/10.1063/5.0066013

2. Platt,  J. A., Penny, S. G., Smith, T. A., Chen, T.-C., & Abarbanel, H. D.  I. (2022). A Systematic Exploration of Reservoir Computing for  Forecasting Complex Spatiotemporal Dynamics. arXiv http://arxiv.org/abs/2201.08910

Penny, S. G., Smith, T. A., Chen, T.-C., Platt, J. A.,  Lin, H.-Y., Goodliff, M., & Abarbanel, H. D. I. (2021). Integrating  Recurrent Neural Networks with Data Assimilation for Scalable  Data-Driven State Estimation. arXiv [cs.LG]. Opgehaal van http://arxiv.org/abs/2109.12269

Zhixin Lu, Brian R. Hunt, and Edward Ott. “Attractor reconstruction by machine learning”. In: Chaos: An Interdisciplinary Journal of Nonlinear Science 28.6 (2018), p. 061104. DOI: 10.1063/1.5039508

Data Driven Modeling of Complex Systems: A Reservoir Computing Tutorial

https://towardsdatascience.com/data-driven-modeling-of-complex-systems-8a96dc92abf9