class SHAInet::CNN
- SHAInet::CNN
- Reference
- Object
Defined in:
shainet/cnn/cnn.crConstant Summary
-
COST_FUNCTIONS =
["mse", "c_ent", "exp", "hel_d", "kld", "gkld", "ita_sai_d"]
-
Log =
::Log.for(self)
Constructors
Instance Method Summary
- #add_conv(filters_num : Int32, window_size : Int32, stride : Int32, padding : Int32, activation_function : ActivationFunction = SHAInet.none)
- #add_dropout(drop_percent : Int32 = 5)
- #add_fconnect(l_size : Int32, activation_function : ActivationFunction = SHAInet.none)
- #add_input(input_volume : Array(Int32))
- #add_maxpool(pool : Int32, stride : Int32)
- #add_relu(l_relu_slope : Float64 = 0.0)
- #add_softmax(range : Range(Int32, Int32) = (0..-1))
-
#alpha : Float64
Parameters for Adam
-
#alpha=(alpha : Float64)
Parameters for Adam
- #beta1 : Float64
- #beta2 : Float64
- #check_nan : Bool
-
#delta_max : Float64
Parameters for Rprop
-
#delta_max=(delta_max : Float64)
Parameters for Rprop
-
#delta_min : Float64
Parameters for Rprop
-
#delta_min=(delta_min : Float64)
Parameters for Rprop
- #epsilon : Float64
- #error_signal : Array(Float64)
-
#etah_minus : Float64
Parameters for Rprop
-
#etah_minus=(etah_minus : Float64)
Parameters for Rprop
-
#etah_plus : Float64
Parameters for Rprop
-
#etah_plus=(etah_plus : Float64)
Parameters for Rprop
- #evaluate(input_data : Array(Array(Array(GenNum))), expected_output : Array(GenNum), cost_function : CostFunction = SHAInet.quadratic_cost)
- #get_cost_proc(function_name : String) : CostFunction
- #inspect(what : String | Symbol)
-
#layers : Array(CNNLayer | ConvLayer)
General network parameters
-
#learning_rate : Float64
Parameters for SGD + Momentum
-
#learning_rate=(learning_rate : Float64)
Parameters for SGD + Momentum
- #log_summary(e)
- #mean_error : Float64
-
#momentum : Float64
Parameters for SGD + Momentum
-
#momentum=(momentum : Float64)
Parameters for SGD + Momentum
- #output : Array(Float64)
- #prev_mean_error : Float64
- #propagate_backwards
- #run(input_data : Array(Array(Array(GenNum))), stealth : Bool = true) : Array(Float64)
- #time_step : Int32
- #total_error : Float64
-
#train(data : NamedTuple(input: Array(Array(Array(Float64))), output: Array(Float64)), training_type : Symbol | String, cost_function : Symbol | String | CostFunction = :mse, epochs : Int32 = 1, error_threshold : Float64 = 0.0, log_each : Int32 = 1000)
Online train, updates weights/biases after each data point (stochastic gradient descent)
-
#train_batch(data : Array(CNNPair), training_type : Symbol | String, cost_function : Symbol | String | CostFunction = :mse, epochs : Int32 = 1, error_threshold : Float64 = 0.0, log_each : Int32 = 1000, mini_batch_size : Int32 | Nil = nil)
Batch train, updates weights/biases using a gradient sum from all data points in the batch (using gradient descent) ameba:disable Metrics/CyclomaticComplexity
-
#update_output_gradients(cost_function_derivatives : Array(Float64))
Update the output layer gradients manually
-
#update_wb(training_type : Symbol | String, batch : Bool = false)
Go over all layers and update the weights and biases, based on learning type chosen
- #verify_data(data : Array(CNNPair))
Constructor Detail
Instance Method Detail
Online train, updates weights/biases after each data point (stochastic gradient descent)
Batch train, updates weights/biases using a gradient sum from all data points in the batch (using gradient descent) ameba:disable Metrics/CyclomaticComplexity
Update the output layer gradients manually
Go over all layers and update the weights and biases, based on learning type chosen