class SHAInet::CNN

Defined in:

shainet/cnn/cnn.cr

Constant Summary

COST_FUNCTIONS = ["mse", "c_ent", "exp", "hel_d", "kld", "gkld", "ita_sai_d"]
Log = ::Log.for(self)

Constructors

Instance Method Summary

Constructor Detail

def self.new #

[View source]

Instance Method Detail

def add_conv(filters_num : Int32, window_size : Int32, stride : Int32, padding : Int32, activation_function : ActivationFunction = SHAInet.none) #

[View source]
def add_dropout(drop_percent : Int32 = 5) #

[View source]
def add_fconnect(l_size : Int32, activation_function : ActivationFunction = SHAInet.none) #

[View source]
def add_input(input_volume : Array(Int32)) #

[View source]
def add_maxpool(pool : Int32, stride : Int32) #

[View source]
def add_relu(l_relu_slope : Float64 = 0.0) #

[View source]
def add_softmax(range : Range(Int32, Int32) = (0..-1)) #

[View source]
def alpha : Float64 #

Parameters for Adam


[View source]
def alpha=(alpha : Float64) #

Parameters for Adam


[View source]
def beta1 : Float64 #

[View source]
def beta2 : Float64 #

[View source]
def check_nan : Bool #

[View source]
def delta_max : Float64 #

Parameters for Rprop


[View source]
def delta_max=(delta_max : Float64) #

Parameters for Rprop


[View source]
def delta_min : Float64 #

Parameters for Rprop


[View source]
def delta_min=(delta_min : Float64) #

Parameters for Rprop


[View source]
def epsilon : Float64 #

[View source]
def error_signal : Array(Float64) #

[View source]
def etah_minus : Float64 #

Parameters for Rprop


[View source]
def etah_minus=(etah_minus : Float64) #

Parameters for Rprop


[View source]
def etah_plus : Float64 #

Parameters for Rprop


[View source]
def etah_plus=(etah_plus : Float64) #

Parameters for Rprop


[View source]
def evaluate(input_data : Array(Array(Array(GenNum))), expected_output : Array(GenNum), cost_function : CostFunction = SHAInet.quadratic_cost) #

[View source]
def get_cost_proc(function_name : String) : CostFunction #

[View source]
def inspect(what : String | Symbol) #

[View source]
def layers : Array(CNNLayer | ConvLayer) #

General network parameters


[View source]
def learning_rate : Float64 #

Parameters for SGD + Momentum


[View source]
def learning_rate=(learning_rate : Float64) #

Parameters for SGD + Momentum


[View source]
def log_summary(e) #

[View source]
def mean_error : Float64 #

[View source]
def momentum : Float64 #

Parameters for SGD + Momentum


[View source]
def momentum=(momentum : Float64) #

Parameters for SGD + Momentum


[View source]
def output : Array(Float64) #

[View source]
def prev_mean_error : Float64 #

[View source]
def propagate_backwards #

[View source]
def run(input_data : Array(Array(Array(GenNum))), stealth : Bool = true) : Array(Float64) #

[View source]
def time_step : Int32 #

[View source]
def total_error : Float64 #

[View source]
def train(data : NamedTuple(input: Array(Array(Array(Float64))), output: Array(Float64)), training_type : Symbol | String, cost_function : Symbol | String | CostFunction = :mse, epochs : Int32 = 1, error_threshold : Float64 = 0.0, log_each : Int32 = 1000) #

Online train, updates weights/biases after each data point (stochastic gradient descent)


[View source]
def train_batch(data : Array(CNNPair), training_type : Symbol | String, cost_function : Symbol | String | CostFunction = :mse, epochs : Int32 = 1, error_threshold : Float64 = 0.0, log_each : Int32 = 1000, mini_batch_size : Int32 | Nil = nil) #

Batch train, updates weights/biases using a gradient sum from all data points in the batch (using gradient descent) ameba:disable Metrics/CyclomaticComplexity


[View source]
def update_output_gradients(cost_function_derivatives : Array(Float64)) #

Update the output layer gradients manually


[View source]
def update_wb(training_type : Symbol | String, batch : Bool = false) #

Go over all layers and update the weights and biases, based on learning type chosen


[View source]
def verify_data(data : Array(CNNPair)) #

[View source]