home *** CD-ROM | disk | FTP | other *** search
- #include <math.h>
- #include <stdio.h>
- #include <stdlib.h>
- #include <malloc.h>
- #include "nndefs.h"
- #include "datamat.h"
- #include "params.h"
- #include "neural.h"
-
- float gasdev(int *idum, float dzdiv);
- int sign (float x, float y);
- float sign_of_arg (float x);
- float Max (float a, float b);
- float Random(float randz);
- void Logit(const char* fmt, ...);
- static char FMT_E[] = "%e ";
-
-
- NEURAL *NCreateNeural( )
- {
- NEURAL *pN;
- pN = (NEURAL*) malloc (sizeof(NEURAL));
-
- pN->m_version = REVLEVEL;
- pN->m_istate = pN->m_ninputs = pN->m_noutputs = pN->m_nhidden = 0;
- pN->m_params = NULL;
- pN->m_sumerr2 = 0;
- pN->m_cnt = 0;
- pN->m_itmax = 2;
- pN->m_dm=NULL;
-
- pN->m_ni = NULL;
- pN->m_nout = NULL;
- pN->m_hinputw = NULL;
- pN->m_houtputv = NULL;
- pN->m_htheta = NULL;
- pN->m_oinputw = NULL;
- pN->m_otheta = NULL;
-
- pN->m_olastoutputv = NULL;
- pN->m_hlastoutputv = NULL;
-
- #ifdef VERBOSE
- Logit ("CNeural null constructor\n");
- #endif
- return pN;
-
- }
-
- void NDeleteNeural(NEURAL *pN )
- {
- #ifdef VERBOSE
- Logit ("CNeural destructor\n");
- #endif
-
- if (pN->m_istate&NN_PARAMSLOADED) free (pN->m_params);
-
- if (pN->m_noutputs) {
- #ifdef VERBOSE
- Logit (" Destroying in=%d hid=%d out=%d\n",pN->m_ninputs,pN->m_nhidden,pN->m_noutputs);
- #endif
- free_2d_floats (pN->m_hinputw, pN->m_nhidden);
- free_2d_floats (pN->m_oinputw, pN->m_noutputs);
- #ifdef NN_STHRU
- free_2d_floats (pN->m_iinputw, pN->m_noutputs);
- #endif
- free (pN->m_ni);
- free (pN->m_houtputv);
- free (pN->m_htheta);
- free (pN->m_otheta);
- free (pN->m_nout);
- free (pN->m_olastoutputv);
- }
- if (pN->m_istate&NN_DYNAMIC) {
- if(pN->m_noutputs) {
- #ifdef VERBOSE
- Logit (" Destroy dynamic\n");
- #endif
- free_2d_floats (pN->m_hlastdelta, pN->m_nhidden);
- free_2d_floats (pN->m_olastdelta,pN->m_noutputs);
- #ifdef NN_STHRU
- free_2d_floats (pN->m_ilastdelta,pN->m_noutputs);
- #endif
- free (pN->m_hlastvar);
- free (pN->m_hlearn);
- free (pN->m_htlearn);
- free (pN->m_olastvar);
- free (pN->m_otraining);
- free (pN->m_olearn);
- free (pN->m_otlearn);
- free (pN->m_hlastoutputv);
- }
- }
- if (pN->m_istate&NN_DATAMAT) {
- #ifdef VERBOSE
- Logit (" Destroy DM\n");
- #endif
- free (pN->m_dm);
- }
- }
-
- // add another element to a vector
-
- float *ReallocVector(int num, float *arptr, float init)
- {
- float *temp;
- int i;
-
- temp = malloc (sizeof(float)*(num+1));
- for (i=0;i<num;i++) temp[i] = arptr[i];
- free (arptr);
- temp[num] = init;
- return temp;
- }
-
- // Add another column to a matrix
-
- float **ReallocFloats1(int first, int second, float **arptr, float ran)
- {
- float **temp;
- int i,j;
-
- temp = alloc_2d_floats (first+1, second);
- for (i=0; i < first; i++)
- for (j=0; j < second; j++)
- temp[i][j] = arptr[i][j];
- for (j=0; j < second; j++)
- temp[first][j] = Random(ran);
- free_2d_floats (arptr, first);
- return temp;
- }
-
- // Add another row to a matrix
-
- float **ReallocFloats2(int first, int second, float **arptr, float ran)
- {
- float **temp;
- int i,j;
-
- temp = alloc_2d_floats (first, second+1);
- for (i=0; i < first; i++)
- for (j=0; j < second; j++)
- temp[i][j] = arptr[i][j];
- for (j=0; j < first; j++)
- temp[j][second] = Random(ran);
- free_2d_floats (arptr, first);
- return temp;
- }
-
- // Add another hidden neuron to a dynamic learning neural net
-
- void NAddHidden(NEURAL *pN)
- {
- int oldhid;
-
- if (pN->m_nhidden >= pN->m_params->m_AImaxhid) return;
- oldhid = pN->m_nhidden;
- pN->m_nhidden++;
- #ifdef VERBOSE
- Logit ("CNeural AddHidden Adding hidden neuron now=%d\n",pN->m_nhidden);
- #endif
-
- pN->m_hinputw = ReallocFloats1(oldhid, pN->m_ninputs, pN->m_hinputw,pN->m_params->m_randz);
- pN->m_oinputw = ReallocFloats2(pN->m_noutputs, oldhid, pN->m_oinputw,pN->m_params->m_randz);
- pN->m_hlastdelta = ReallocFloats1(oldhid, pN->m_ninputs, pN->m_hlastdelta,pN->m_params->m_randz);
- pN->m_olastdelta = ReallocFloats2(pN->m_noutputs, oldhid, pN->m_olastdelta,pN->m_params->m_randz);
- #ifdef NN_STHRU
- pN->m_ilastdelta = ReallocFloats2(pN->m_noutputs, pN->m_ninputs, pN->m_ilastdelta,pN->m_params->m_randz);
- #endif
-
- pN->m_houtputv = ReallocVector(oldhid, pN->m_houtputv, 0.0f);
- pN->m_htheta = ReallocVector(oldhid, pN->m_htheta,Random(pN->m_params->m_randz) );
- pN->m_hlastvar = ReallocVector(oldhid, pN->m_hlastvar, 0.0f);
- pN->m_hlearn = ReallocVector(oldhid, pN->m_hlearn,pN->m_params->m_Hlearning_rate);
- pN->m_htlearn = ReallocVector(oldhid, pN->m_htlearn,pN->m_params->m_tlearning_rate);
- }
-
- float NGetROutput(NEURAL *pN, const int neuron)
- {
- return (DRescale(pN->m_dm, pN->m_nout[neuron],'O',neuron));
- }
-
- float NGetRInput(NEURAL *pN, const int neuron)
- {
- return (DRescale(pN->m_dm, pN->m_ni[neuron],'I',neuron));
- }
-
- void NSetRInput(NEURAL *pN, const int neuron, float f)
- {
- pN->m_ni[neuron] = DScale(pN->m_dm, f,'I',neuron);
- }
-
- // Attach the Design Matrix to the Dynamic Neural Net
-
- void NSetDM( NEURAL *pN, DATAMAT *a)
- {
- pN->m_dm = a;
- pN->m_istate |= NN_DATAMAT;
- }
-
-
- // Back Propagate output error to weights
-
- int NBackProp1(NEURAL *pN, int cnt)
- {
- int i,j,k;
- static int idummy = -13;
- float deltaw,ERR,VAR,sum1;
-
- // calculate the outputs of the hidden layer
-
- for (i=0;i<pN->m_ninputs;i++) pN->m_ni[i] = pN->m_dm->m_iarray[i][cnt];
- // add gaussian noise
- if (pN->m_params->m_inrandzdiv!=0.0)
- for (i=0;i<pN->m_ninputs;i++) pN->m_ni[i] *= (1+gasdev(&idummy,pN->m_params->m_inrandzdiv));
-
- for (i=0; i < pN->m_nhidden; i++) {
- for (sum1=0,j=0; j < pN->m_ninputs; j++)
- sum1 += (pN->m_ni[j] * pN->m_hinputw[i][j] );
- sum1 += pN->m_htheta[i];
- pN->m_houtputv[i] = afunc (sum1);
- }
-
- // connect hidden layer 1 to output neurons
-
- for (j=0; j < pN->m_noutputs; j++) {
- for (sum1=0,i=0; i < pN->m_nhidden; i++)
- sum1 += (pN->m_houtputv[i] * pN->m_oinputw[j][i]);
- #ifdef NN_STHRU
- if (pN->m_params->m_TrainFlags & TF_NOINTOOUT) {
- for (i=0; i < pN->m_ninputs; i++)
- sum1 += (pN->m_ni[i] * pN->m_iinputw[j][i]);
- }
- #endif
- sum1 += pN->m_otheta[j];
- pN->m_nout[j] = afunc (sum1);
- }
-
- // BP Output Layer to hidden
- for (i=0; i < pN->m_noutputs; i++) {
- if (pN->m_params->m_inrandzdiv==0.0)
- ERR = (pN->m_dm->m_oarray[i][cnt] - pN->m_nout[i]);
- // add gaussian noise
- else
- ERR = (pN->m_dm->m_oarray[i][cnt] *
- (1+gasdev(&idummy,pN->m_params->m_inrandzdiv))
- - pN->m_nout[i]);
-
- pN->m_sumerr2 += ERR*ERR;
- VAR = ERR * dafunc (pN->m_nout[i]);
- pN->m_olastvar[i] = VAR;
- for (j=0; j < pN->m_nhidden; j++) {
- deltaw = (pN->m_olearn[i] * VAR * pN->m_houtputv[j]) +
- (pN->m_params->m_alpha * pN->m_olastdelta[i][j]);
- pN->m_olastdelta[i][j] = deltaw;
- pN->m_oinputw[i][j] += deltaw;
- }
- #ifdef NN_STHRU
- if (pN->m_params->m_TrainFlags & TF_NOINTOOUT) {
- for (j=0; j < pN->m_ninputs; j++) {
- deltaw = (pN->m_olearn[i] * VAR * pN->m_ni[j] * pN->m_params->m_inoutlearn) +
- (pN->m_params->m_alpha * pN->m_ilastdelta[i][j]);
- pN->m_ilastdelta[i][j] = deltaw;
- pN->m_iinputw[i][j] += deltaw;
- }
- }
- #endif
- pN->m_otheta[i] += (pN->m_otlearn[i] * VAR);
- }
- // BP Hidden Layer to Input
- for (j=0; j < pN->m_nhidden; j++) {
- for (VAR = 0.f,i=0; i < pN->m_noutputs; i++)
- VAR += (pN->m_olastvar[i] * pN->m_oinputw[i][j]);
- VAR *= dafunc(pN->m_houtputv[j]);
- for(k=0;k<pN->m_ninputs;k++) {
- deltaw = (pN->m_hlearn[j] * VAR * pN->m_ni[k]) +
- (pN->m_params->m_alpha * pN->m_hlastdelta[j][k]);
- pN->m_hlastdelta[j][k] = deltaw;
- pN->m_hinputw[j][k] += deltaw;
- }
- // pN->m_hlastvar[j] = VAR; /* only if more hidden layers */
- pN->m_htheta[j] += (pN->m_htlearn[j] * VAR);
- }
- return 0;
- }
-
- void NFeedForward(NEURAL *pN)
- {
- int i,j;
- float sum1;
-
- /* calculate the outputs of the hidden layer */
-
- for (i=0; i < pN->m_nhidden; i++) {
- for (sum1=0.f,j=0; j < pN->m_ninputs; j++)
- sum1 += (pN->m_ni[j] * pN->m_hinputw[i][j] );
- sum1 += pN->m_htheta[i];
- pN->m_houtputv[i] = afunc (sum1);
- }
-
- /* connect hidden layer 1 to output neurons */
-
- for (j=0; j < pN->m_noutputs; j++) {
- for (sum1=0.f,i=0; i < pN->m_nhidden; i++)
- sum1 += (pN->m_houtputv[i] * pN->m_oinputw[j][i]);
- #ifdef NN_STHRU
- if (pN->m_params->m_TrainFlags & TF_NOINTOOUT) {
- for (i=0; i < pN->m_ninputs; i++)
- sum1 += (pN->m_ni[i] * pN->m_iinputw[j][i]);
- }
- #endif
- sum1 += pN->m_otheta[j];
- pN->m_nout[j] = afunc (sum1);
- }
- }
-
-
- void NClearDelta(NEURAL *pN)
- {
- int i,j;
-
- for (i=0;i<pN->m_nhidden;i++)
- for (j=0;j<pN->m_ninputs;j++) pN->m_hlastdelta[i][j] = 0.f;
- for (i=0;i<pN->m_noutputs;i++)
- for (j=0;j<pN->m_nhidden;j++) pN->m_olastdelta[i][j] = 0.f;
- #ifdef NN_STHRU
- for (i=0;i<pN->m_noutputs;i++)
- for (j=0;j<pN->m_ninputs;j++) pN->m_ilastdelta[i][j] = 0.f;
- #endif
- }
-
- void NInitializeNetwork(NEURAL *pN)
- {
- int i,n;
-
- /* randonize weights */
-
- // de-allocate extra hidden units
- free_2d_floats (pN->m_hinputw, pN->m_nhidden);
- free_2d_floats (pN->m_oinputw, pN->m_noutputs);
- free_2d_floats (pN->m_hlastdelta, pN->m_nhidden);
- free_2d_floats (pN->m_olastdelta, pN->m_noutputs);
- #ifdef NN_STHRU
- free_2d_floats (pN->m_ilastdelta, pN->m_noutputs);
- #endif
-
- free (pN->m_houtputv);
- free (pN->m_htheta);
- free (pN->m_hlastvar);
- free (pN->m_hlearn);
- free (pN->m_htlearn);
-
- if (pN->m_params->m_goodness==0) pN->m_nhidden = pN->m_params->m_AImaxhid;
- else pN->m_nhidden=1;
-
- pN->m_hinputw = alloc_2d_floats (pN->m_nhidden, pN->m_ninputs);
- pN->m_oinputw = alloc_2d_floats (pN->m_noutputs, pN->m_nhidden);
- pN->m_hlastdelta = alloc_2d_floats (pN->m_nhidden,pN->m_ninputs);
- pN->m_olastdelta = alloc_2d_floats (pN->m_noutputs,pN->m_nhidden);
- #ifdef NN_STHRU
- pN->m_ilastdelta = alloc_2d_floats (pN->m_noutputs,pN->m_ninputs);
- #endif
-
- pN->m_houtputv = (float *) malloc (sizeof(float)*pN->m_nhidden);
- pN->m_htheta = (float *) malloc (sizeof(float)*pN->m_nhidden);
- pN->m_hlastvar = (float *) malloc (sizeof(float)*pN->m_nhidden);
- pN->m_hlearn = (float *) malloc (sizeof(float)*pN->m_nhidden);
- pN->m_htlearn = (float *) malloc (sizeof(float)*pN->m_nhidden);
-
- pN->m_sumerr2=0;
- pN->m_cnt = 0;
-
- srand(pN->m_params->m_seed);
-
- for (n=0; n < pN->m_ninputs; n++) pN->m_ni[n] = 0.f;
- for (n=0; n < pN->m_noutputs; n++) pN->m_nout[n] = 0.f;
- for (i=0; i < pN->m_nhidden; i++) {
- for (n=0; n < pN->m_ninputs; n++) {
- pN->m_hinputw[i][n] = Random(pN->m_params->m_randz);
- pN->m_hlastdelta[i][n] = 0.f;
- }
- pN->m_htheta[i] = Random(pN->m_params->m_randz);
- pN->m_htlearn[i] = pN->m_params->m_tlearning_rate;
- pN->m_hlearn[i] = pN->m_params->m_Hlearning_rate;
- pN->m_houtputv[i] = pN->m_hlastvar[i] = 0.f;
- }
- for (i=0; i < pN->m_noutputs; i++) {
- for (n=0; n < pN->m_nhidden; n++) {
- pN->m_oinputw[i][n] = Random(pN->m_params->m_randz);
- pN->m_olastdelta[i][n] = 0.f;
- }
- pN->m_otheta[i] = Random(pN->m_params->m_randz);
- pN->m_otlearn[i] = pN->m_params->m_tlearning_rate;
- pN->m_olearn[i] = pN->m_params->m_learning_rate;
- pN->m_olastvar[i] = 0.f;
- #ifdef NN_STHRU
- for (n=0; n < pN->m_ninputs; n++) {
- pN->m_iinputw[i][n] = Random(pN->m_params->m_randz);
- pN->m_ilastdelta[i][n] = 0.f;
- }
- #endif
- }
- // save_weights();
- return;
- }
-
- // returns 1 when finished else 0
-
- int NQuickTrain(NEURAL *pN, int mode,int increment)
- {
-
- static int train_cnt;
- int dograph;
- int lastrow;
-
- if (mode==-1) { // init
- NAI(pN,-1,0); // init AI routine
- train_cnt = 0;
- NClearDelta(pN);
- pN->m_sumerr2 = 0;
- return 0;
- }
- pN->m_sumerr2 = 0;
- dograph=0;
- lastrow = pN->m_dm->m_numrows;
-
- redo:
-
- // NewTrainingSet(train_cnt,0);
-
- if (train_cnt == lastrow) {
- ++pN->m_cnt;
- --increment;
-
- if (pN->m_cnt >= pN->m_params->m_cnt_max) {
- #ifdef VERBOSE
- Logit("Done Training\n");
- #endif
- NAI(pN, -2,0); // deinit AI routine
- return 1;
- }
- if ((pN->m_cnt % pN->m_params->m_eon) == 0) {
- if (NAI(pN, 0,0)) { // adjust_learning_heuristics
- NAI(pN, -2,0); // deinit AI routine
- return 1;
- }
- return 2;
- }
-
- train_cnt = 0;
- if (!increment) return dograph;
- pN->m_sumerr2 = 0;
- goto redo;
- }
- NBackProp1(pN,train_cnt); // Back Propagate error to weights
- train_cnt++;
- goto redo;
-
- }
-
- void NNewTrainingSet(NEURAL *pN, int t,int flag)
- {
- int i,k;
- static int idummy = -13;
-
- if (!flag) {
- for (k=0;k<pN->m_ninputs;k++) pN->m_ni[k] = pN->m_dm->m_iarray[k][t];
- for (i=0;i<pN->m_noutputs;i++) pN->m_otraining[i] = pN->m_dm->m_oarray[i][t];
- } else {
- for (k=0;k<pN->m_ninputs;k++) pN->m_ni[k] = pN->m_dm->m_itarray[k][t];
- for (i=0;i<pN->m_noutputs;i++) pN->m_otraining[i] = pN->m_dm->m_otarray[i][t];
- }
-
- return;
- }
-
- /* Incremental learning adjust_learning_heuristics */
- int NAI(NEURAL *pN, int flag,int a)
- {
- // extern int keytran;
- // extern int ninp,noutp;
- static float RSQ,NWT,LASTRSQ;
- static int nomorehidinc,nmin,nmax;
- static lasthid;
- static float lastmin,lastmax,min,max;
- float calc_rsquare();
- int i;
- static int first;
- static float sumdiv,pers;
- static float lastsum;
- static int lastnmin,lastnmax;
- char cmin,cmax,cnmin,cnmax,csum;
-
- if (flag==-1) { /* init */
- LASTRSQ = RSQ = lastmin = lastmax = lastsum = 0;
- lastnmin = lastnmax = 0;
- nomorehidinc=0;
- lasthid=0;
- first=1;
- if (pN->m_params->m_eon<0) pN->m_params->m_eon = 10;
- i = (int) ((pN->m_params->m_cnt_max-pN->m_cnt)/pN->m_params->m_eon);
- if (i < 1) i = 2;
- // if (!a) InitGraph("AI TRAINING",6, i+1,0.0,1.0);
-
- // The following is for the first graph
-
- RSQ = NCalcRsquare(pN);
- NWT = NCheckTol(pN, &min,&max,&nmin,&nmax);
- pN->m_stats [0] = 1;
- pN->m_stats [1] = min;
- pN->m_stats [2] = max;
- pN->m_stats [3] = (float) nmin;
- pN->m_stats [4] = (float) nmax;
- pN->m_stats [5] = RSQ;
- #ifdef VERBOSE
- Logit ("sumerr=%f min=%f max=%f nmin=%d nmax=%d rsq=%f\n",pN->m_sumerr2,min,max,nmin,nmax,RSQ);
- #endif
- return 0;
- }
- if (flag==-2) { /* deinit */
- // if (!a) CloseGraph();
- return 0;
- }
- lasthid++;
-
- /* different goodness algorithms
- 0 = no measurement
- 1 = inc even thru cnt_max
- 2 = same a 3 manual inc
- 3 = inc on factors
- */
-
- switch (pN->m_params->m_goodness) {
- default:
- return 0;
- break;
- case 0: /* old training method */
- case 1: /* same as 3 even thru eons */
- case 2: /* same as 3 but manual hid inc */
- case 3: /* number within tol */
- if (first) {sumdiv = pN->m_sumerr2; first=0;}
- RSQ = NCalcRsquare(pN);
- NWT = NCheckTol(pN,&min,&max,&nmin,&nmax);
-
- pN->m_stats [0] = pN->m_sumerr2;
- pN->m_stats [1] = min;
- pN->m_stats [2] = max;
- pN->m_stats [3] = (float) nmin;
- pN->m_stats [4] = (float) nmax;
- pN->m_stats [5] = RSQ;
-
- if ((pN->m_params->m_TrainFlags & TF_STOPONTOL) && (NWT == pN->m_dm->m_numrows)) return 1;
- if ((pN->m_params->m_TrainFlags & TF_STOPONERR) && (pN->m_sumerr2 < pN->m_params->m_errtol)) return 1;
- if ((pN->m_params->m_TrainFlags & TF_STOPONRSQ) && (RSQ > pN->m_params->m_goodrsq)) return 1;
-
- if ((min+pN->m_params->m_nosigninc) < lastmin) cmin ='y'; else cmin='n';
- if ((max+pN->m_params->m_nosigninc) < lastmax) cmax ='y'; else cmax='n';
- if (nmin < lastnmin) cnmin ='y'; else cnmin='n';
- if (nmax > lastnmax) cnmax ='y'; else cnmax='n';
-
- if (sumdiv==0.) pers=0;
- else pers= (lastsum-pN->m_sumerr2)/sumdiv;
- if (pers > pN->m_params->m_nosigninc) csum ='y'; else csum='n';
-
- lastmin = min;
- lastmax = max;
- lastnmin = nmin;
- lastnmax = nmax;
- lastsum = pN->m_sumerr2;
-
-
- #ifdef VERBOSE
- Logit ("NWT=%6.f RSq=%6f",NWT,RSQ);
- Logit (" min=%6f max=%6f",min,max);
- Logit (" nmin=%4d nmax=%4d",nmin,nmax);
- Logit (" %c%c%c%c%c\n",cmin,cmax,cnmin,cnmax,csum);
- #endif
- if ((pN->m_params->m_goodness==0) || (pN->m_params->m_goodness==2)) return 0;
-
- if (pN->m_params->m_goodness==1) {
- if (lasthid<(pN->m_params->m_cnt_max/pN->m_params->m_AImaxhid/pN->m_params->m_eon)) return 0;
- lasthid=0;
- goto incrnow;
- }
- if (pN->m_nhidden==1) goto incrnow;
- if (!((cmin|cmax|cnmin|cnmax|csum)=='n')) return 0;
- if ( (pN->m_params->m_cnt_max-pN->m_cnt)/pN->m_params->m_eon < pN->m_params->m_cnt_max/pN->m_params->m_eon/5 ) nomorehidinc=1;
-
- break;
-
- } /* end of goodness switch */
-
- if (nomorehidinc) return 0;
- if (lasthid<(pN->m_params->m_cnt_max/pN->m_params->m_eon/20)) return 0;
- lasthid=0;
-
- incrnow:
- if (pN->m_params->m_goodness==0) return 0;
- if (pN->m_nhidden >= pN->m_params->m_AImaxhid) return 0;
-
- /* decrement learning rates */
-
- for (i=0; i < pN->m_nhidden; i++) {
- pN->m_htlearn[i] -= (pN->m_htlearn[i] * pN->m_params->m_hiddegrad);
- pN->m_hlearn[i] -= (pN->m_hlearn[i] * pN->m_params->m_hiddegrad);
- }
-
- /* create another hidden neuron */
- NAddHidden(pN);
- return 0; /* continue */
- }
-
- float NCalcRsquare(NEURAL *pN )
- {
- double resid,rrq,sum;
- double sumres[32],sumobs[32];
- double averes[32],aveobs[32];
- double rq[32];
- int i,loop;
- int train_cnt;
-
- for (i=0;i<32;i++) sumres[i] = sumobs[i] = 0.;
- train_cnt = 0;
- loop=1;
-
- top:
- if (train_cnt == pN->m_dm->m_numrows) {
- if (loop==1) {
- for(i=0;i<pN->m_noutputs;i++) {
- aveobs[i] = sumobs[i]/(float) train_cnt;
- averes[i] = sumres[i]/(float) train_cnt;
- sumres[i] = sumobs[i] = 0.;
- }
- train_cnt = 0;
- loop=2;
- }
- else {
- for(i=0;i<pN->m_noutputs;i++) {
- rq[i] = 1-sumres[i]/sumobs[i];
- if (rq[i] < 0.0) rq[i] = 0.0;
- if (rq[i] > 1.0) rq[i] = 1.0;
- }
- sum = 0.0;
- for(i=0;i<pN->m_noutputs;i++) sum += rq[i];
- rrq = sum/(float) pN->m_noutputs;
- return (float) rrq;
- }
- }
- NNewTrainingSet(pN, train_cnt,0);
-
- NFeedForward(pN);
-
- for(i=0;i<pN->m_noutputs;i++) {
- resid = (double) DGetOutputVal(pN->m_dm, train_cnt,i)-pN->m_nout[i];
- if (loop==1) {
- sumobs[i] += DGetOutputVal(pN->m_dm, train_cnt,i);/*observed*/
- sumres[i] += resid; /* model error */
- }
- else {
- sumobs[i] += pow(DGetOutputVal(pN->m_dm, train_cnt,i)-aveobs[i],2); /*observed*/
- sumres[i] += pow(resid-averes[i],2); /* model error */
- }
- }
- train_cnt++;
- goto top;
- }
-
- float NCheckTol(NEURAL *pN, float *min, float *max, int *nmin, int *nmax)
- {
- int i,t,si;
- float temp;
-
- t = 0; *max = 0.f; *min = 0.f;
- *nmin = 0; *nmax = 0;
-
- for (i=0; i < pN->m_dm->m_numrows;i++) {
-
- NNewTrainingSet(pN, i,0);
-
- NFeedForward(pN); /* Feed Forward to output */
-
- for (si=0;si<pN->m_noutputs;si++) {
- temp = pN->m_nout[si]-pN->m_otraining[si];
- if (temp > *max) *max = temp;
- if (temp < -*min) *min = -temp;
- temp = (float) fabs(temp);
- if (temp < pN->m_params->m_tol) t++;
- if (pN->m_nout[si] > (pN->m_otraining[si]+pN->m_params->m_tol)) (*nmax)++;
- if (pN->m_nout[si] < (pN->m_otraining[si]-pN->m_params->m_tol)) (*nmin)++;
-
- }
- }
- return (float)t;
- }
-
-
-
- //#define itmax 200
- #define EPS 1.0e-10
-
- void Nfrprmn(NEURAL *pN, float *p, float ftol, int *iter, float *fret)
- {
- int j,its,n;
- float gg,gam,fp,dgg;
-
- // Logit ("frprmn\n");
-
- n=pN->m_NDIM;
-
- fp=NErrorFunction(pN,p);
- NforwardDIffGradient(pN,p,pN->m_xi);
- for (j=0;j<n;j++) {
- pN->m_g[j] = -pN->m_xi[j];
- pN->m_xi[j]=pN->m_h[j]=pN->m_g[j];
- }
- for (its=1;its<=pN->m_itmax;its++) {
- *iter=its;
- Nlinmin(pN, p,pN->m_xi,fret);
- if (2.0*fabs(*fret-fp) <= ftol*(fabs(*fret)+fabs(fp)+EPS)) {
- //Logit("TOL\n");
- //FREEALL
- return;
- }
- fp=NErrorFunction(pN,p);
- NforwardDIffGradient(pN,p,pN->m_xi);
- dgg=gg=0;
- for (j=0;j<n;j++) {
- gg += pN->m_g[j]*pN->m_g[j];
- // dgg += pN->m_xi[j]*pN->m_xi[j];
- dgg += (pN->m_xi[j]+pN->m_g[j])*pN->m_xi[j];
- }
- if (gg == 0.0) {
- //Logit("gg=0\n");
- //FREEALL
- return;
- }
- gam=dgg/gg;
- for (j=0;j<n;j++) {
- pN->m_g[j] = -pN->m_xi[j];
- pN->m_xi[j]=pN->m_h[j]=pN->m_g[j]+gam*pN->m_h[j];
- }
- }
- //nrerror("Too many iterations in FRPRMN");
- }
-
- #undef EPS
-
- #define ITMAX 100
- #define CGOLD 0.3819660f
- #define ZEPS 1.0e-10
- #define SIGN(a,b) ((b) > 0.0 ? fabs(a) : -fabs(a))
- #define SHFT(a,b,c,d) (a)=(b);(b)=(c);(c)=(d);
-
- float Nbrent(NEURAL *pN,float ax, float bx, float cx, float tol,
- float *xmin)
- {
- int iter;
- float a,b,d,etemp,fu,fv,fw,fx,p,q,r,tol1,tol2,u,v,w,x,xm;
- float e=0;
-
- // Logit ("brent\n");
-
- a=((ax < cx) ? ax : cx);
- b=((ax > cx) ? ax : cx);
- x=w=v=bx;
- fw=fv=fx=Nf1dim(pN,x);
- for (iter=1;iter<=ITMAX;iter++) {
- xm=0.5f*(a+b);
- tol2=(float) (2.0f*(tol1=(float) (tol*fabs(x)+ZEPS)));
- if (fabs(x-xm) <= (tol2-0.5f*(b-a))) {
- *xmin=x;
- return fx;
- }
- if (fabs(e) > tol1) {
- r=(x-w)*(fx-fv);
- q=(x-v)*(fx-fw);
- p=(x-v)*q-(x-w)*r;
- q=2.0f*(q-r);
- if (q > 0.0) p = -p;
- q=(float)fabs(q);
- etemp=e;
- e=d;
- if (fabs(p) >= fabs(0.5*q*etemp) || p <= q*(a-x) || p >= q*(b-x))
- d=CGOLD*(e=(x >= xm ? a-x : b-x));
- else {
- d=p/q;
- u=x+d;
- if (u-a < tol2 || b-u < tol2)
- d=(float)SIGN(tol1,xm-x);
- }
- } else {
- d=CGOLD*(e=(x >= xm ? a-x : b-x));
- }
- u=(float)(fabs(d) >= tol1 ? x+d : x+SIGN(tol1,d));
- fu=Nf1dim(pN,u);
- if (fu <= fx) {
- if (u >= x) a=x; else b=x;
- SHFT(v,w,x,u)
- SHFT(fv,fw,fx,fu)
- } else {
- if (u < x) a=u; else b=u;
- if (fu <= fw || w == x) {
- v=w;
- w=u;
- fv=fw;
- fw=fu;
- } else if (fu <= fv || v == x || v == w) {
- v=u;
- fv=fu;
- }
- }
- }
- Nrerror("Too many iterations in BRENT");
- *xmin=x;
- return fx;
- }
-
- #undef ITMAX
- #undef CGOLD
- #undef ZEPS
- #undef SIGN
-
- #define GOLD 1.618034f
- #define GLIMIT 100.0f
- #define TINY 1.0e-20
- #define MAX(a,b) ((a) > (b) ? (a) : (b))
- #define SIGN(a,b) ((b) > 0.0 ? fabs(a) : -fabs(a))
- #define SHFT(a,b,c,d) (a)=(b);(b)=(c);(c)=(d);
-
- void Nmnbrak(NEURAL *pN, float *ax, float *bx, float *cx, float *fa, float *fb,
- float *fc)
- {
- float ulim,u,r,q,fu,dum;
- long loopcount;
- // Logit ("mnbrak\n");
-
- *fa=Nf1dim(pN,*ax);
- *fb=Nf1dim(pN,*bx);
- if (*fb > *fa) {
- SHFT(dum,*ax,*bx,dum)
- SHFT(dum,*fb,*fa,dum)
- }
- *cx=(*bx)+GOLD*(*bx-*ax);
- *fc=Nf1dim(pN,*cx);
-
- loopcount = 0;
- while (*fb > *fc) {
- if (++loopcount > 1000) {
- Logit ("mnbrak loopcount\n");
- exit(1);
- }
- r=(*bx-*ax)*(*fb-*fc);
- q=(*bx-*cx)*(*fb-*fa);
- u=(float) ((*bx)-((*bx-*cx)*q-(*bx-*ax)*r)/
- (2.0f*SIGN(MAX(fabs(q-r),TINY),q-r)));
- ulim=(*bx)+GLIMIT*(*cx-*bx);
- if ((*bx-u)*(u-*cx) > 0.0) {
- fu=Nf1dim(pN,u);
- if (fu < *fc) {
- *ax=(*bx);
- *bx=u;
- *fa=(*fb);
- *fb=fu;
- return;
- } else if (fu > *fb) {
- *cx=u;
- *fc=fu;
- return;
- }
- u=(*cx)+GOLD*(*cx-*bx);
- fu=Nf1dim(pN,u);
- } else if ((*cx-u)*(u-ulim) > 0.0) {
- fu=Nf1dim(pN,u);
- if (fu < *fc) {
- SHFT(*bx,*cx,u,*cx+GOLD*(*cx-*bx))
- SHFT(*fb,*fc,fu,Nf1dim(pN,u))
- }
- } else if ((u-ulim)*(ulim-*cx) >= 0.0) {
- u=ulim;
- fu=Nf1dim(pN,u);
- } else {
- u=(*cx)+GOLD*(*cx-*bx);
- fu=Nf1dim(pN,u);
- }
- SHFT(*ax,*bx,*cx,u)
- SHFT(*fa,*fb,*fc,fu)
- }
- }
-
- #undef GOLD
- #undef GLIMIT
- #undef TINY
- #undef MAX
- #undef SIGN
- #undef SHFT
-
- float Nf1dim(NEURAL *pN, float x)
- {
- int j;
- float f;
-
- //Logit ("in f1dim %d\n",pN->m_NDIM);
- for (j=0;j<pN->m_NDIM;j++) pN->m_xt[j]=pN->m_pcom[j]+x*pN->m_xicom[j];
- //Logit ("20\n");
- f=NErrorFunction(pN,pN->m_xt);
- //Logit ("out f1dim\n");
- return f;
- }
-
- #define TOL 2.0e-4
-
- void Nlinmin(NEURAL *pN, float *p, float *xi, float *fret)
- {
- int j,n;
- float xx,xmin,fx,fb,fa,bx,ax;
- // Logit ("linmin\n");
-
- n = pN->m_NDIM;
-
- for (j=0;j<n;j++) {
- pN->m_pcom[j]=p[j];
- pN->m_xicom[j]=xi[j];
- }
- ax=0;
- xx=1;
- Nmnbrak(pN,&ax,&xx,&bx,&fa,&fx,&fb);
- //Logit ("returned from mnbrak\n");
- *fret= Nbrent(pN,ax,xx,bx,(float) TOL,&xmin);
- //Logit ("returned from brent\n");
- for (j=0;j<n;j++) {
- xi[j] *= xmin;
- p[j] += xi[j];
- }
- }
-
- #undef TOL
-
-
- void NforwardDIffGradient (NEURAL *pN, float *x0, float *g)
- {
- int j;
- double sqrteta, stepsizej, tempj, fj, eta, fc;
- // eta = 2.22E-16;
- eta = 2.22E-9;
- //Logit ("forwardDIffGradient\n");
-
- fc = NErrorFunction(pN,x0);
- sqrteta = sqrt (eta);
- for (j = 0; j < pN->m_NDIM; ++j) {
- stepsizej = sqrteta * Max ((float)fabs (x0[j]), sign_of_arg (x0[j]));
- tempj = x0[j];
- x0[j] += (float) stepsizej;
-
- stepsizej = x0[j] - tempj; // reduce finite precision errors
- fj = NErrorFunction (pN,x0);
- g[j] = (float) ((fj - fc) / stepsizej);
-
- x0[j] = (float) tempj; // reset x0[j]
- }
- //Logit ("end \n");
-
- } // end forwardDIffGradient
-
- void Nrerror(char *error_text)
- {
- Logit ("***ERROR %s\n",error_text);
- //fprintf (pN->m_fd,"***ERROR %s\n",error_text);
- //fclose(pN->m_fd);
- exit(1);
- }
-
- // ErrorFunction is called by the CG routine to evaluate the total system error
- // it takes the weight vector "x" from the CG routine and puts it into the neural
- // network weight structures then runs the entire training matrix through the
- // Neural network to calculate the total system error
-
- float NErrorFunction(NEURAL *pN, float* x)
- {
- int i,n,traincnt;
- double errorsq;
-
-
- for (i=0; i < pN->m_nhidden; i++) {
- for (n=0; n < pN->m_ninputs; n++) pN->m_hinputw[i][n] = *x++;
- }
- for (i=0; i < pN->m_noutputs; i++) {
- for (n=0; n < pN->m_nhidden; n++) pN->m_oinputw[i][n] = *x++;
- #ifdef NN_STHRU
- if (pN->m_params->m_TrainFlags & TF_NOINTOOUT)
- for (n=0; n < pN->m_ninputs; n++) pN->m_iinputw[i][n] = *x++;
- #endif
- }
-
-
- errorsq = 0.0;
- for(traincnt=0;traincnt<pN->m_dm->m_numrows;traincnt++) {
- for (i=0;i<pN->m_ninputs;i++) pN->m_ni[i] = pN->m_dm->m_iarray[i][traincnt];
- NFeedForward(pN); // Calculate outputs
- for(i=0;i<pN->m_noutputs;i++)
- errorsq += pow ((pN->m_dm->m_oarray[i][traincnt] - pN->m_nout[i]),2);
- }
- //Logit ("%.6f\n",errorsq);
- return (float) errorsq;
- }
-
- void NCGTrain(NEURAL *pN )
- {
- int i,j,n;
- float fret;
- int iter;
-
- if (pN->m_cnt < 201) return;
-
- pN->m_NDIM = pN->m_nhidden*pN->m_ninputs;
- pN->m_NDIM += pN->m_noutputs*pN->m_nhidden;
-
- #ifdef NN_STHRU
- if (pN->m_params->m_TrainFlags & TF_NOINTOOUT)
- pN->m_NDIM += pN->m_noutputs*pN->m_ninputs;
- #endif
-
- pN->m_startp = malloc (sizeof(float)*pN->m_NDIM);
- pN->m_pcom = malloc (sizeof(float)*pN->m_NDIM);
- pN->m_xicom = malloc (sizeof(float)*pN->m_NDIM);
- pN->m_xt = malloc (sizeof(float)*pN->m_NDIM);
- pN->m_g = malloc (sizeof(float)*pN->m_NDIM);
- pN->m_h = malloc (sizeof(float)*pN->m_NDIM);
- pN->m_xi = malloc (sizeof(float)*pN->m_NDIM);
-
- j=0;
- for (i=0; i < pN->m_nhidden; i++) {
- for (n=0; n < pN->m_ninputs; n++) pN->m_startp[j++] = pN->m_hinputw[i][n];
- }
- for (i=0; i < pN->m_noutputs; i++) {
- for (n=0; n < pN->m_nhidden; n++) pN->m_startp[j++] = pN->m_oinputw[i][n];
- #ifdef NN_STHRU
- if (pN->m_params->m_TrainFlags & TF_NOINTOOUT)
- for (n=0; n < pN->m_ninputs; n++) pN->m_startp[j++] = pN->m_iinputw[i][n];
- #endif
- }
-
- #define FTOL 1.0e-6
-
- // frprmn(pN->m_startp,pN->m_params->m_tol,&iter,&fret);
- if (pN->m_cnt < 500) pN->m_itmax = 2;
- else pN->m_itmax = (int) (pN->m_cnt * 20 / pN->m_params->m_cnt_max);
- Nfrprmn(pN, pN->m_startp,(float)FTOL,&iter,&fret);
-
- Logit ("frprmn iter=%d fret=%f\n",iter,fret);
-
- free (pN->m_startp);
- free (pN->m_pcom);
- free (pN->m_xicom);
- free (pN->m_xt);
- free (pN->m_g);
- free (pN->m_h);
- free (pN->m_xi);
-
- }
-
- int ntransl(char *cdummy)
- {
- int val=0;
- //Logit ("ntransl <%s>\n",cdummy);
-
- if (cdummy[0] != 'N') return -1;
- sscanf(&cdummy[1],"%d",&val);
- // if (val > 12) return -1;
- return val;
- }
-
- int NImportNetwork (NEURAL *pN, FILE *fd)
- {
- int i,j,sel,stat,h,o;
- char cdummy[80];
-
- stat=ImportParams(fd,pN->m_params);
- if (stat) return stat;
- pN->m_sumerr2 = 0;
-
- top:
- stat = fscanf (fd,"%s",&cdummy);
- if (stat==EOF) goto errorexit;
-
-
- sel = ntransl(cdummy);
- switch (sel) {
- case 99:
- pN->m_dm = DCreateDataMat();
- stat=DImportDataMat(pN->m_dm,fd);
- #ifdef VERBOSE
- Logit("Finished import NN\n");
- #endif
- return stat;
-
- break;
- default:
- goto errorexit;
- break;
- case 1:
- fscanf (fd,"%u %d %d %d %ld ", &pN->m_istate, &pN->m_ninputs, &pN->m_nhidden, &pN->m_noutputs, &pN->m_cnt);
- pN->m_istate |= NN_PARAMSLOADED;
- pN->m_ni = (float*) malloc (sizeof(float)*pN->m_ninputs);
- pN->m_houtputv = (float*) malloc (sizeof(float)*pN->m_nhidden);
- pN->m_htheta = (float*) malloc (sizeof(float)*pN->m_nhidden);
- pN->m_otheta = (float*) malloc (sizeof(float)*pN->m_noutputs);
- pN->m_nout = (float*) malloc (sizeof(float)*pN->m_noutputs);
- pN->m_hinputw = alloc_2d_floats (pN->m_nhidden,pN->m_ninputs);
- pN->m_oinputw = alloc_2d_floats (pN->m_noutputs,pN->m_nhidden);
- #ifdef NN_STHRU
- pN->m_iinputw = alloc_2d_floats (pN->m_noutputs,pN->m_ninputs);
- #endif
-
- if (pN->m_istate&NN_DYNAMIC) {
- pN->m_hlastvar = (float*) malloc (sizeof(float)*pN->m_nhidden);
- pN->m_hlearn = (float*) malloc (sizeof(float)*pN->m_nhidden);
- pN->m_htlearn = (float*) malloc (sizeof(float)*pN->m_nhidden);
- pN->m_olastvar = (float*) malloc (sizeof(float)*pN->m_noutputs);
- pN->m_otraining = (float*) malloc (sizeof(float)*pN->m_noutputs);
- pN->m_olearn = (float*) malloc (sizeof(float)*pN->m_noutputs);
- pN->m_otlearn = (float*) malloc (sizeof(float)*pN->m_noutputs);
- pN->m_hlastdelta = alloc_2d_floats (pN->m_nhidden,pN->m_ninputs);
- pN->m_olastdelta = alloc_2d_floats (pN->m_noutputs,pN->m_nhidden);
- #ifdef NN_STHRU
- pN->m_ilastdelta = alloc_2d_floats (pN->m_noutputs,pN->m_ninputs);
- #endif
-
- }
-
- for (i=0;i<pN->m_ninputs;i++) pN->m_ni[i] = 0;
- for (h=0;h<pN->m_nhidden;h++) {
- for (i=0;i<pN->m_ninputs;i++) pN->m_hinputw[h][i] = 0;
- pN->m_houtputv[h] = pN->m_htheta[h] = 0;
- }
- for (o=0;o<pN->m_noutputs;o++) {
- for (h=0;h<pN->m_nhidden;h++) pN->m_oinputw[o][h] = 0;
- #ifdef NN_STHRU
- for (i=0;i<pN->m_ninputs;i++) pN->m_iinputw[o][i] = 0;
- #endif
- pN->m_otheta[o] = pN->m_nout[o] = 0;
- }
- break;
- case 2:
- for (i=0;i<pN->m_ninputs;i++)
- for (j=0;j<pN->m_nhidden;j++) fscanf (fd,"%f ",&pN->m_hinputw[j][i]);
- break;
- case 3:
- for (i=0;i<pN->m_nhidden;i++) fscanf (fd,"%f ",&pN->m_htheta[i]);
- break;
- case 4:
- for (i=0;i<pN->m_noutputs;i++)
- for (j=0;j<pN->m_nhidden;j++) fscanf (fd,"%f ",&pN->m_oinputw[i][j]);
- break;
- case 5:
- for (i=0;i<pN->m_noutputs;i++) fscanf (fd,"%f ",&pN->m_otheta[i]);
- break;
- // Now load data for the dynamic part of CNeural class
- case 6:
- for (i=0;i<pN->m_nhidden;i++) fscanf (fd,"%f",&pN->m_hlastvar[i]);
- break;
- case 7:
- for (i=0;i<pN->m_nhidden;i++) fscanf (fd,"%f",&pN->m_hlearn[i]);
- break;
- case 8:
- for (i=0;i<pN->m_nhidden;i++) fscanf (fd,"%f",&pN->m_htlearn[i]);
- break;
- case 9:
- for (i=0;i<pN->m_noutputs;i++) fscanf (fd,"%f",&pN->m_olastvar[i]);
- break;
- case 10:
- for (i=0;i<pN->m_noutputs;i++) fscanf (fd,"%f",&pN->m_olearn[i]);
- break;
- case 11:
- for (i=0;i<pN->m_noutputs;i++) fscanf (fd,"%f",&pN->m_otlearn[i]);
- break;
- #ifdef NN_STHRU
- case 12:
- for (i=0;i<pN->m_noutputs;i++)
- for (j=0;j<pN->m_ninputs;j++) fscanf (fd,"%f",&pN->m_iinputw[i][j]);
- break;
- #endif
- }
- goto top;
-
- errorexit:
- return -1;
- }
-
- NEURAL *LoadNetwork (char *filename) {
- FILE *fd;
- int stat;
- PARAMS *tparams;
- NEURAL *tneural;
-
- fd = fopen(filename,"r");
- if (fd == NULL) return NULL;
-
- tparams = (PARAMS*) malloc (sizeof(PARAMS));
- tneural = (NEURAL*) malloc (sizeof(NEURAL));
- tneural->m_params = tparams;
- tneural->m_istate |= NN_PARAMSLOADED;
-
- stat = NImportNetwork(tneural,fd);
- fclose(fd);
-
-
- if (stat) {
- NDeleteNeural (tneural);
- #ifdef VERBOSE
- Logit ("Error importing network\n");
- #endif
- return NULL;
- }
- return tneural;
- }
-
- void DumpNeural(NEURAL *pN, FILE *fd)
- {
- int i,j;
- static char fmt1[] = "Dump of Neural Network inp=%d hid=%d outp=%d\n";
- static char fmt2[] = "ni = ";
- static char fmt3[] = "\nhinputw = ";
- static char fmt4[] = "\nhoutputv = ";
- static char fmt5[] = "\nhtheta = ";
- static char fmt6[] = "\noinputw = ";
- static char fmt7[] = "otheta = ";
- static char fmt8[] = "\nnout = ";
-
-
-
- static char fmt21[] = "\nhlastvar = ";
- static char fmt22[] = "\nhlearn = ";
- static char fmt23[] = "\nhtlearn = ";
- static char fmt24[] = "\nhlastdelta = ";
- static char fmt25[] = "\nolastvar = ";
- static char fmt26[] = "\notraining = ";
- static char fmt27[] = "\nolearn = ";
- static char fmt28[] = "\notlearn = ";
- static char fmt29[] = "\nolastdelta = ";
- static char fmt30[] = "\nilastdelta = ";
- static char fmt31[] = "\niinputw = ";
-
-
- fprintf (fd,fmt1,pN->m_ninputs,pN->m_nhidden,pN->m_noutputs);
-
- if (!pN->m_ninputs) return;
- if (!pN->m_nhidden) return;
- if (!pN->m_noutputs) return;
- fprintf (fd,fmt2);
- for (i=0;i<pN->m_ninputs;i++)
- fprintf (fd,FMT_E,pN->m_ni[i]);
- fprintf (fd,fmt3);
- for (i=0;i<pN->m_ninputs;i++) {
- for (j=0;j<pN->m_nhidden;j++) fprintf (fd,FMT_E,pN->m_hinputw[j][i]);
- }
-
- fprintf (fd,fmt4);
- for (i=0;i<pN->m_nhidden;i++) fprintf (fd,FMT_E,pN->m_houtputv[i]);
- fprintf (fd,fmt5);
- for (i=0;i<pN->m_nhidden;i++) fprintf (fd,FMT_E,pN->m_htheta[i]);
-
- fprintf (fd,fmt6);
- for (i=0;i<pN->m_noutputs;i++) {
- for (j=0;j<pN->m_nhidden;j++) fprintf (fd,FMT_E,pN->m_oinputw[i][j]);
- fprintf (fd,"\n");
- }
- fprintf (fd,fmt7);
- for (i=0;i<pN->m_noutputs;i++) fprintf (fd,FMT_E,pN->m_otheta[i]);
-
- #ifdef NN_STHRU
- fprintf (fd,fmt31);
- for (i=0;i<pN->m_noutputs;i++)
- for (j=0;j<pN->m_ninputs;j++) fprintf (fd,FMT_E,pN->m_iinputw[i][j]);
- #endif
-
- fprintf (fd,fmt8);
- for (i=0;i<pN->m_noutputs;i++) fprintf (fd,FMT_E,pN->m_nout[i]);
- fprintf (fd,"\n");
-
-
- if (pN->m_istate&NN_PARAMSLOADED) DumpParams(pN->m_params,fd);
-
- if (pN->m_istate&NN_DYNAMIC) {
-
- // dneural part
-
- fprintf (fd,fmt21);
- for (i=0;i<pN->m_nhidden;i++) fprintf (fd,FMT_E,pN->m_hlastvar[i]);
- fprintf (fd,fmt22);
- for (i=0;i<pN->m_nhidden;i++) fprintf (fd,FMT_E,pN->m_hlearn[i]);
- fprintf (fd,fmt23);
- for (i=0;i<pN->m_nhidden;i++) fprintf (fd,FMT_E,pN->m_htlearn[i]);
- fprintf (fd,fmt24);
- for (i=0;i<pN->m_ninputs;i++) {
- for (j=0;j<pN->m_nhidden;j++) fprintf (fd,FMT_E,pN->m_hlastdelta[j][i]);
- }
-
- fprintf (fd,fmt25);
- for (i=0;i<pN->m_noutputs;i++) fprintf (fd,FMT_E,pN->m_olastvar[i]);
- fprintf (fd,fmt26);
- for (i=0;i<pN->m_noutputs;i++) fprintf (fd,FMT_E,pN->m_otraining[i]);
- fprintf (fd,fmt27);
- for (i=0;i<pN->m_noutputs;i++) fprintf (fd,FMT_E,pN->m_olearn[i]);
- fprintf (fd,fmt28);
- for (i=0;i<pN->m_noutputs;i++) fprintf (fd,FMT_E,pN->m_otlearn[i]);
- fprintf (fd,fmt29);
- for (i=0;i<pN->m_noutputs;i++) {
- for (j=0;j<pN->m_nhidden;j++) fprintf (fd,FMT_E,pN->m_olastdelta[i][j]);
- }
- #ifdef NN_STHRU
- fprintf (fd,fmt30);
- for (i=0;i<pN->m_noutputs;i++) {
- for (j=0;j<pN->m_ninputs;j++) fprintf (fd,FMT_E,pN->m_ilastdelta[i][j]);
- }
- #endif
- fprintf (fd,"\n\n");
- }
- if (pN->m_dm!=NULL) DumpDataMat(pN->m_dm,fd);
- fprintf (fd,"\n\n");
- }
-
- void NInterrogate(NEURAL *pN,float *Ivec,float *Ovec)
- {
- int i;
-
- for (i=0; i < pN->m_ninputs; i++) pN->m_ni[i] = DScale(pN->m_dm,Ivec[i],'I',i);
- NFeedForward(pN);
- for (i=0; i < pN->m_noutputs; i++) Ovec[i] = DRescale(pN->m_dm,pN->m_nout[i],'O',i);
- }
-
-