コード
#include <iostream>
#include <fstream>
#include <vector>
#include <map>
#include <cmath>
unsigned long xor128(){
static unsigned long x=123456789, y=362436069, z=521288629, w=88675123;
unsigned long t;
t=(x^(x<<11));
x=y; y=z; z=w;
return w=(w^(w>>19))^(t^(t>>8));
}
double frand(){
return xor128()%1000000/static_cast<double>(1000000);
}
class NeuralNetwork {
static const double nu = 0.8;
static const double alpha = 0.75;
int N_in, N_out, N_hide;
std::vector<double> output_in, output_out, output_hide;
std::vector<double> h_hide, h_out;
std::vector< std::vector<double> > w_in_hide, w_hide_out;
std::vector<double> dh_hide, dh_out;
std::vector< std::vector<double> > dw_in_hide, dw_hide_out;
double sigmoid(double x, double a){
return 1.0 / (1.0 + exp(a - x));
}
double sigmoid_dash(double fx){
return fx * (1.0 - fx);
}
double error_rate(const std::vector<double>& in, const std::vector<double>& out){
std::vector<double> ret = forward_propagation(in);
double res = 0.0;
for(size_t i=0; i<ret.size(); i++){
res += fabs(ret[i] - out[i]) * fabs(ret[i] - out[i]);
}
return res/ret.size();
}
void back_propagation(const std::vector<double>& in, const std::vector<double>& out){
std::vector<double> sig_out(N_out), sig_hide(N_hide);
std::vector<double> ret = forward_propagation(in);
for(size_t i=0; i<N_out; i++){
sig_out[i] = (out[i] - ret[i]) * sigmoid_dash(ret[i]);
}
for(size_t i=0; i<N_hide; i++){
double sum = 0.0;
for(size_t j=0; j<N_out; j++){
dw_hide_out[i][j] = nu * sig_out[j] * output_hide[i] + alpha * dw_hide_out[i][j];
w_hide_out[i][j] += dw_hide_out[i][j];
sum += sig_out[j] * w_hide_out[i][j];
}
sig_hide[i] = sum * sigmoid_dash(output_hide[i]);
}
for(size_t i=0; i<N_out; i++){
dh_out[i] = nu * sig_out[i] + alpha * dh_out[i];
h_out[i] += dh_out[i];
}
for(size_t i=0; i<N_in; i++){
for(size_t j=0; j<N_hide; j++){
dw_in_hide[i][j] = nu * sig_hide[j] * output_in[i] + alpha * dw_in_hide[i][j];
w_in_hide[i][j] += dw_in_hide[i][j];
}
}
for(size_t i=0; i<N_hide; i++){
dh_hide[i] = nu * sig_hide[i] + alpha * dh_hide[i];
h_hide[i] += dh_hide[i];
}
}
std::vector<double> forward_propagation(const std::vector<double>& in){
for(size_t i=0; i<N_in; i++) output_in[i] = in[i];
for(size_t i=0; i<N_hide; i++){
double sum = 0;
for(size_t j=0; j<N_in; j++){
sum += output_in[j] * w_in_hide[j][i];
}
output_hide[i] = sigmoid(sum + h_hide[i], 0.0);
}
for(size_t i=0; i<N_out; i++){
double sum = 0;
for(size_t j=0; j<N_hide; j++){
sum += output_hide[j] * w_hide_out[j][i];
}
output_out[i] = sigmoid(sum + h_out[i], 0.0);
}
return output_out;
}
public:
NeuralNetwork(int n_in, int n_out, int n_hide):
N_in(n_in), N_out(n_out), N_hide(n_hide),
output_in(n_in, 0.0), output_out(n_out, 0.0),
output_hide(n_hide, 0.0),
h_hide(n_hide, 0.0),
h_out(n_out, 0.0),
w_in_hide(n_in, std::vector<double>(n_hide, 0.0)),
w_hide_out(n_hide, std::vector<double>(n_out, 0.0)),
dh_hide(n_hide, 0.0),
dh_out(n_out, 0.0),
dw_in_hide(n_in, std::vector<double>(n_hide, 0.0)),
dw_hide_out(n_hide, std::vector<double>(n_out, 0.0)){
for(size_t i=0; i<n_in; i++){
for(size_t j=0; j<n_hide; j++){
w_in_hide[i][j] = frand() * 0.6 - 0.3;
}
}
for(size_t i=0; i<n_hide; i++){
for(size_t j=0; j<n_out; j++){
w_hide_out[i][j] = frand() * 0.6 - 0.3;
}
}
}
void train(const std::string& filename, int loop){
int train_case;
std::vector<double> in, out;
for(int i=0; i<loop; i++){
std::ifstream ifs(filename.c_str());
ifs >> train_case;
for(int j=0; j<train_case; j++){
double tmp;
in.clear();
out.clear();
for(int k=0; k<N_in; k++){
ifs >> tmp;
in.push_back(tmp);
}
for(int k=0; k<N_out; k++){
ifs >> tmp;
out.push_back(tmp);
}
back_propagation(in, out);
}
}
}
void test(){
std::vector<double> v(N_in);
while(std::cin >> v[0]){
for(size_t i=1; i<N_in; i++) std::cin >> v[i];
std::vector<double> ret = forward_propagation(v);
for(size_t i=0; i<ret.size(); i++){
std::cout << ret[i] << " ";
}
std::cout << std::endl;
}
}
};
int main(){
NeuralNetwork neural(2,1,2);
neural.train("train.in", 5000);
neural.test();
return 0;
}