(將28*28=784個(gè)像素的灰度值標(biāo)準(zhǔn)化為(0,1)的實(shí)數(shù)作為輸入層的數(shù)據(jù))
BP神
經(jīng)網(wǎng)絡(luò)的基本原理
BP神經(jīng)網(wǎng)絡(luò)的C++實(shí)現(xiàn)
將BP神經(jīng)網(wǎng)絡(luò)應(yīng)用于手寫(xiě)數(shù)字識(shí)別
坑點(diǎn)
存在的疑惑
參考資料:機(jī)器學(xué)習(xí)(西瓜書(shū)) - 周志華
如圖所示,一個(gè)簡(jiǎn)單的BP網(wǎng)絡(luò)包含輸入層,隱藏層和輸出層。
給定輸入值x1,x2,...,xnx1,x2,...,xn,隱藏層和輸出層的輸出分別值為。
v,wv,w為連接權(quán),γ,θγ,θ為閾值。ff為激活函數(shù),一般取
它有很好的性質(zhì),為求解梯度提供了便利
對(duì)訓(xùn)練例(x1,y1)...(xn,yn)(x1,y1)...(xn,yn),假定神經(jīng)網(wǎng)絡(luò)的輸出為y1^...yn^y1^...yn^,則定義網(wǎng)絡(luò)的均方誤差為
我們可以根據(jù)網(wǎng)絡(luò)的均方誤差,采取梯度下降的方法,調(diào)整連接權(quán)與閾值的參數(shù)值,例如
為盡量減小均方誤差,ΔvΔv應(yīng)取均方誤差關(guān)于vijvij的負(fù)梯度方向。
我們令
其中η∈(0,1)η∈(0,1)稱(chēng)為學(xué)習(xí)率,控制著算法每一輪迭代中的更新步長(zhǎng),若太大容易震蕩,太小收斂速度又會(huì)過(guò)慢。
這里略去使用鏈?zhǔn)角髮?dǎo)法則求解梯度的具體過(guò)程,僅給出結(jié)果。
從輸出數(shù)據(jù)計(jì)算輸出數(shù)據(jù)和誤差的過(guò)程稱(chēng)為前向傳播。而調(diào)整權(quán)值和閾值則從輸出到輸入的方向進(jìn)行,稱(chēng)為反向傳播(back propagation)。
算法的流程圖如下
根據(jù)西瓜書(shū)上的推導(dǎo)與流程,實(shí)現(xiàn)了BP神經(jīng)網(wǎng)絡(luò)的基本框架
const int NX = 784, NB = 500, NY = 10;//輸入層X(jué),隱藏層B,輸出層Y節(jié)點(diǎn)數(shù) const double eta = 0.1;//學(xué)習(xí)率 struct Node { double val{}; double bias{}; vector<double> weight; } x[NX], b[NB], y[NY];//輸入層X(jué),隱藏層B,輸出層Y double g[NY], e[NB];//用于反向傳播 double trainx[NX], trainy[NY];//訓(xùn)練數(shù)據(jù) double sigmoid(double x) { return 1.0 / (1.0 + exp(-x)); } double get_rand_weight() { return rand() % 10 / 5.0 - 1; } //生成(-1,1)隨機(jī)數(shù) double get_rand_bias() { return rand() % 10 / 500.0 - 0.01; } //生成(-0.01,0.01)隨機(jī)數(shù) //網(wǎng)絡(luò)初始化 void init() { for (int i = 0; i < NX; i++) { //x[i].bias = get_rand_bias(); for (int j = 0; j < NB; j++) { x[i].weight.push_back(get_rand_weight()); } } for (int i = 0; i < NB; i++) { b[i].bias = get_rand_bias(); for (int j = 0; j < NY; j++) { b[i].weight.push_back(get_rand_weight()); } } for (int i = 0; i < NY; i++) { y[i].bias = get_rand_bias(); } }; //前向傳播 void forward() { //首先需要清空隱藏層和輸出層原有的非參數(shù)數(shù)據(jù)?。?! for (int i = 0; i < NB; i++) b[i].val = 0; for (int i = 0; i < NY; i++) y[i].val = 0; //輸入層讀取數(shù)據(jù) for (int i = 0; i < NX; i++) x[i].val = trainx[i]; //輸入層->隱藏層 for (int i = 0; i < NX; i++) { for (int j = 0; j < NB; j++) { b[j].val += x[i].val * x[i].weight[j]; } } //隱藏層求值 for (int i = 0; i < NB; i++) { b[i].val = sigmoid(b[i].val - b[i].bias); } //隱藏層->輸出層 for (int i = 0; i < NB; i++) { for (int j = 0; j < NY; j++) { y[j].val += b[i].val * b[i].weight[j]; } } //輸出層求值 for (int i = 0; i < NY; i++) { y[i].val = sigmoid(y[i].val - y[i].bias); } } //反向傳播 void back() { //計(jì)算g和e for (int i = 0; i < NY; i++) { g[i] = y[i].val * (1 - y[i].val) * (trainy[i] - y[i].val); } for (int i = 0; i < NB; i++) { double res = 0; for (int j = 0; j < NY; j++) { res += b[i].weight[j] * g[j]; } e[i] = b[i].val * (1 - b[i].val) * res; } //更新參數(shù)w, theta, v, gamma for (int i = 0; i < NB; i++) for (int j = 0; j < NY; j++) b[i].weight[j] += eta * b[i].val * g[j]; for (int i = 0; i < NY; i++) y[i].bias -= eta * g[i]; for (int i = 0; i < NX; i++) for (int j = 0; j < NB; j++) x[i].weight[j] += eta * x[i].val * e[j]; for (int i = 0; i < NB; i++) b[i].bias -= eta * e[i]; }
數(shù)據(jù)處理過(guò)程參考了這篇博客https://www.cnblogs.com/alphainf/p/16395313.html
使用Minst數(shù)據(jù)集,可以從官網(wǎng)http://yann.lecun.com/exdb/mnist/獲取。
訓(xùn)練集包含60000組28*28的手寫(xiě)數(shù)字灰度圖像,以及每個(gè)圖像對(duì)應(yīng)的正確數(shù)字0~9。
我們可以將28*28=784個(gè)像素的灰度值標(biāo)準(zhǔn)化為(0,1)的實(shí)數(shù),作為輸入層的數(shù)據(jù)。
輸出層的節(jié)點(diǎn)數(shù)設(shè)為10,y0y0到y9y9分別表示輸入圖像為0~9的概率。
隱藏層節(jié)點(diǎn)數(shù)量可以自行設(shè)點(diǎn),這里取500。
FILE *fImg, *fAns; fImg=fopen("train-images.idx3-ubyte","rb"); fseek(fImg, 16, SEEK_SET); fAns=fopen("train-labels.idx1-ubyte","rb"); fseek(fAns, 8, SEEK_SET); //讀入一張新的圖片 //除了前16字節(jié),接下來(lái)的信息都是一張一張的圖片 //每張圖片大小為28*28 = 784 = NX,每個(gè)char表示該像素對(duì)應(yīng)的灰度,范圍為0至255 unsigned char img[NX], ans; fread(img, 1, NX, fImg); for (int i = 0; i < NX; i++) trainx[i] = (double)img[i] / 255.0; //讀入該圖片對(duì)應(yīng)的答案 //除了前8字節(jié),第k個(gè)字節(jié)對(duì)應(yīng)第k張圖片的正確答案 fread(&ans,1,1,fAns); for(int i = 0; i < NY; i++) trainy[i] = (i == ans) ? 1 : 0;
下面這段代碼,可以粗略地將圖像和訓(xùn)練過(guò)程可視化。
for (int i = 0; i < 28; i++) { for (int j = 0; j < 28; j++) { if (trainx[i * 28 + j] != 0) cout << 'X'; else cout << ' '; } cout << endl; } cout << "Test Case #" << Case <<", result is " << res << ", answer is " << (int)ans << endl;
完整代碼如下
#include <iostream> #include <cstdlib> #include <cmath> #include <vector> using namespace std; const int NX = 784, NB = 500, NY = 10;//輸入層X(jué),隱藏層B,輸出層Y節(jié)點(diǎn)數(shù) const double eta = 0.1;//學(xué)習(xí)率 struct Node { double val{}; double bias{}; vector<double> weight; } x[NX], b[NB], y[NY];//輸入層X(jué),隱藏層B,輸出層Y double g[NY], e[NB];//用于反向傳播 double trainx[NX], trainy[NY];//訓(xùn)練數(shù)據(jù) double sigmoid(double x) { return 1.0 / (1.0 + exp(-x)); } double get_rand_weight() { return rand() % 10 / 5.0 - 1; } //生成(-1,1)隨機(jī)數(shù) double get_rand_bias() { return rand() % 10 / 500.0 - 0.01; } //生成(-0.01,0.01)隨機(jī)數(shù) //網(wǎng)絡(luò)初始化 void init() { for (int i = 0; i < NX; i++) { //x[i].bias = get_rand_bias(); for (int j = 0; j < NB; j++) { x[i].weight.push_back(get_rand_weight()); } } for (int i = 0; i < NB; i++) { b[i].bias = get_rand_bias(); for (int j = 0; j < NY; j++) { b[i].weight.push_back(get_rand_weight()); } } for (int i = 0; i < NY; i++) { y[i].bias = get_rand_bias(); } }; //前向傳播 void forward() { //首先需要清空隱藏層和輸出層原有的非參數(shù)數(shù)據(jù)?。?! for (int i = 0; i < NB; i++) b[i].val = 0; for (int i = 0; i < NY; i++) y[i].val = 0; //輸入層讀取數(shù)據(jù) for (int i = 0; i < NX; i++) x[i].val = trainx[i]; //輸入層->隱藏層 for (int i = 0; i < NX; i++) { for (int j = 0; j < NB; j++) { b[j].val += x[i].val * x[i].weight[j]; } } //隱藏層求值 for (int i = 0; i < NB; i++) { b[i].val = sigmoid(b[i].val - b[i].bias); } //隱藏層->輸出層 for (int i = 0; i < NB; i++) { for (int j = 0; j < NY; j++) { y[j].val += b[i].val * b[i].weight[j]; } } //輸出層求值 for (int i = 0; i < NY; i++) { y[i].val = sigmoid(y[i].val - y[i].bias); } } //反向傳播 void back() { //計(jì)算g和e for (int i = 0; i < NY; i++) { g[i] = y[i].val * (1 - y[i].val) * (trainy[i] - y[i].val); } for (int i = 0; i < NB; i++) { double res = 0; for (int j = 0; j < NY; j++) { res += b[i].weight[j] * g[j]; } e[i] = b[i].val * (1 - b[i].val) * res; } //更新w, theta, v, gamma for (int i = 0; i < NB; i++) for (int j = 0; j < NY; j++) b[i].weight[j] += eta * b[i].val * g[j]; for (int i = 0; i < NY; i++) y[i].bias -= eta * g[i]; for (int i = 0; i < NX; i++) for (int j = 0; j < NB; j++) x[i].weight[j] += eta * x[i].val * e[j]; for (int i = 0; i < NB; i++) b[i].bias -= eta * e[i]; } FILE *fImg, *fAns; int result[1000000] = {0}; //每次訓(xùn)練的結(jié)果,正確為1,錯(cuò)誤為0 void train(int Case) { //讀入一張新的圖片 //除了前16字節(jié),接下來(lái)的信息都是一張一張的圖片 //每張圖片大小為28*28 = 784 = NX,每個(gè)char表示該像素對(duì)應(yīng)的灰度,范圍為0至255 unsigned char img[NX], ans; fread(img, 1, NX, fImg); for (int i = 0; i < NX; i++) trainx[i] = (double)img[i] / 255.0; //讀入該圖片對(duì)應(yīng)的答案 //除了前8字節(jié),第k個(gè)字節(jié)對(duì)應(yīng)第k張圖片的正確答案 fread(&ans,1,1,fAns); for(int i = 0; i < NY; i++) trainy[i] = (i == ans) ? 1 : 0; //前向傳播,計(jì)算答案是否正確 forward(); int res = 0; for (int i = 0; i <= 9; i++) if (y[i].val > y[res].val) res = i; result[Case] = (res == ans) ? 1 : 0; /* for (int i = 0; i < 28; i++) { for (int j = 0; j < 28; j++) { if (trainx[i * 28 + j] != 0) cout << 'X'; else cout << ' '; } cout << endl; } cout << "Test Case #" << Case <<", result is " << res << ", answer is " << (int)ans << endl; */ //反向傳播 back(); //輸出最近100局的正確率 int P = 100, cnt = 0; if(Case % P == 0) { for(int i = 0; i < P; i++) cnt += result[Case - i]; cout << Case << " " << cnt << endl; } } int main() { fImg=fopen("train-images.idx3-ubyte","rb"); fseek(fImg, 16, SEEK_SET); fAns=fopen("train-labels.idx1-ubyte","rb"); fseek(fAns, 8, SEEK_SET); freopen("result.txt", "w", stdout); init(); for (int Case = 1; Case <= 60000; Case++) { train(Case); } return 0; }
程序每訓(xùn)練100次,會(huì)輸出過(guò)去一百次測(cè)試的正確率,在60000次測(cè)試后,平均正確率達(dá)到85%左右
訓(xùn)練次數(shù)-正確率曲線如圖所示
前向傳播之前要把節(jié)點(diǎn)原有的輸入值清零。
一開(kāi)始將閾值和連接權(quán)用同樣的(-1,1)隨機(jī)數(shù)生成器,發(fā)現(xiàn)網(wǎng)絡(luò)會(huì)失效,只會(huì)輸出同一個(gè)答案。嘗試將閾值調(diào)小后才提升了準(zhǔn)確率。
代碼中對(duì)于閾值的反向傳播處理可能存在問(wèn)題。
聯(lián)系客服