출처: https://www.youtube.com/watch?v=XmK9f5IV8Uw#t=568.604741
#include <stdio.h>
#include <iostream>
#define MAX2(a,b) (a) > (b) ? (a) : (b)
class Neuron
{
public:
Neuron()
: w_(2.0),b_(1.0)
{}
Neuron(const double& w_input, const double& b_input)
: w_(w_input),b_(b_input)
{}
public: // attributes
double w_; // weight of one input
double b_; // bias
double input_, output_; //saved for back-prop
public: // behaviors
double getAct(const double& x)
{
// for linear or identity activation functions
return x;
// for ReLU activation functions
//return MAX2(0.0, x);
}
double feedForward(const double& _input)
{
input_ = _input;
// output y = f(\sigma) ,
// \sigma = w_ * input x + b
// for multiple inputs,
// \sigma = w0_ * x0_ + w1_ * x1_ + ... + b
const double sigma = w_ * input_ + b_;
output_ = getAct(sigma);
return output_;
}
double getActGrad(const double& x)
{
return 1.0;
}
void propBackward(const double& target)
{
const double alpha = 0.1; // learning rate
const double grad = (output_ - target) * getActGrad(output_);
w_ -= alpha * grad * input_; // last input_ came from d(wx+b)/dw = x
b_ -= alpha * grad * 1.0; // last 1.0 came from d(wx+b)/db = 1
}
void feedForwardAndPrint(const double& input)
{
printf("%f %f \n", input, feedForward(input));
}
};
void main()
{
// initialize my_neuron
Neuron my_neuron(2.0, 1.0);
for (int r = 0; r < 100; r++)
{
std::cout << "Training :: " << r << std::endl;
my_neuron.feedForwardAndPrint(1.0);
my_neuron.propBackward(4.0);
my_neuron.feedForwardAndPrint(1.0);
std::cout << "w = " << my_neuron.w_ << " b = " << my_neuron.b_ << std::endl;
}
}
'IT기술 관련 > A.I 인공지능' 카테고리의 다른 글
컨볼루션 네트워크 (0) | 2016.06.01 |
---|---|
몬테카를로 트리서치, 무작위 시뮬레이션 통해 승률계산 (0) | 2016.06.01 |
[딥러닝] C++로 구현하는 뉴런 기초 (0) | 2016.05.31 |
Deep Learning Tutorial (0) | 2016.05.29 |
모두의 연구소 - 딥러닝 강의 (0) | 2016.05.29 |