class SHAInet::LSTMLayer

Defined in:

shainet/rnn/lstm_layer.cr

Constructors

Instance Method Summary

Instance methods inherited from class SHAInet::Layer

activation_function : Float32 | Float64 | Int32 | Int64 -> {Float64, Float64} activation_function, activations : Matrix(Float64) activations, biases : Matrix(Float64) biases, biases=(biases : Matrix(Float64)) biases=, clone clone, input_sums : Matrix(Float64) input_sums, input_sums=(input_sums : Matrix(Float64)) input_sums=, inspect inspect, l_size : Int32 l_size, n_type : String n_type, n_type=(n_type : String) n_type=, neurons : Array(SHAInet::Neuron) neurons, neurons=(neurons : Array(SHAInet::Neuron)) neurons=, propagate_forward_exp(prev_layer : Layer) propagate_forward_exp, random_seed random_seed, sigma_primes : Matrix(Float64) sigma_primes, size : Int32 size, type_change(new_neuron_type : String) type_change, weights : Matrix(Float64) weights, weights=(weights : Matrix(Float64)) weights=

Constructor methods inherited from class SHAInet::Layer

new(n_type : String, l_size : Int32, activation_function : ActivationFunction = SHAInet.sigmoid) new

Constructor Detail

def self.new(n_type : String, l_size : Int32, activation_function : ActivationFunction = SHAInet.tanh) #

[View source]

Instance Method Detail

def accumulate_gate_gradients #

[View source]
def activate_sequence(sequence : Array(Array(GenNum))) #

[View source]
def activate_step #

[View source]
def backprop_sequence #

[View source]
def cell_state : Array(Float64) #

[View source]
def cell_state=(cell_state : Array(Float64)) #

[View source]
def forget_b_grad : Array(Float64) #

[View source]
def forget_b_grad=(forget_b_grad : Array(Float64)) #

[View source]
def forget_bias : Array(Float64) #

[View source]
def forget_bias=(forget_bias : Array(Float64)) #

[View source]
def forget_w_grad : Array(Array(Float64)) #

[View source]
def forget_w_grad=(forget_w_grad : Array(Array(Float64))) #

[View source]
def forget_weights : Array(Array(Float64)) #

[View source]
def forget_weights=(forget_weights : Array(Array(Float64))) #

[View source]
def hidden_state : Array(Float64) #

[View source]
def hidden_state=(hidden_state : Array(Float64)) #

[View source]
def input_b_grad : Array(Float64) #

[View source]
def input_b_grad=(input_b_grad : Array(Float64)) #

[View source]
def input_bias : Array(Float64) #

[View source]
def input_bias=(input_bias : Array(Float64)) #

[View source]
def input_w_grad : Array(Array(Float64)) #

[View source]
def input_w_grad=(input_w_grad : Array(Array(Float64))) #

[View source]
def input_weights : Array(Array(Float64)) #

[View source]
def input_weights=(input_weights : Array(Array(Float64))) #

[View source]
def output_b_grad : Array(Float64) #

[View source]
def output_b_grad=(output_b_grad : Array(Float64)) #

[View source]
def output_bias : Array(Float64) #

[View source]
def output_bias=(output_bias : Array(Float64)) #

[View source]
def output_w_grad : Array(Array(Float64)) #

[View source]
def output_w_grad=(output_w_grad : Array(Array(Float64))) #

[View source]
def output_weights : Array(Array(Float64)) #

[View source]
def output_weights=(output_weights : Array(Array(Float64))) #

[View source]
def recurrent_synapses : Array(Array(Synapse)) #

[View source]
def recurrent_synapses=(recurrent_synapses : Array(Array(Synapse))) #

[View source]
def reset_state #

[View source]
def setup_gate_params #

[View source]
def update_gate_params(lr : Float64) #

[View source]
def zero_gate_gradients #

[View source]