home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Media Share 9
/
MEDIASHARE_09.ISO
/
cprog
/
neuron.zip
/
NEURON.C
< prev
next >
Wrap
C/C++ Source or Header
|
1992-07-06
|
34KB
|
925 lines
/* Multilayers and/or interconnected network models */
/* Enable many networks study */
/* of auto-associated networks (Preceptron, Hopfield) */
/* Computation module */
/* TC 2.0 NEURON.C E.FARHI
versions: 0.0 09/91 experimental version
1.0 10/91 multi-layers network
2.0 11/91 first version of a multi-model network
2.1 01/92 adding learn_opt
2.2 02/92 debugging, add noise
2.3 03/92 debug, simulated tempering
2.5 05/92 modifying learn_opt, add SHAKER
learn_verify */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* VERSION 2.5 */
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* ------------------------------------------------------------ */
/* initialization beginning */
/* ------------------------------------------------------------ */
/* Library uses */
#include <math.h> /* for exp */
#include <stdlib.h> /* for random and malloc */
#include <time.h> /* for network init on internal clock */
#include <stdio.h> /* for printf(debugging) */
/* Every printf can be removed for a tranparent use */
/* Network Constants -------------------------------------- */
/* internal use */
#define NB_LAYERS_MAXI 3 /* defines the maxi nb of different layers */
#define NB_LEVELS_MAXI 3 /* defines the maxi nb of levels NB_LEV>=NB_LAY is better */
#define NB_N_LAYER_MAXI 40 /* nb maxi of neurons in a layer*/
#define NB_BACKP_MAXI 10 /* before shaker */
#define LEARN_FAIL 1000 /* test for learning failure */
#define MAX_RAND 32767.0
#define TEMPERING_OFF 0
#define TEMPERING_ON 255 /* some different tempering modes */
#define MSG_NOISE 1 /* enables an external use */
#define MSG_STD 0
#define MSG_ERR 255
/* Definition of 'networks' types ----------------------------- */
/* You can add things if you wish */
typedef struct
{
char activity; /* if neuron active */
} FLAG_N; /* neuron flag */
typedef struct
{
char interconnection ; /* if Hopfield layer */
char bias ; /* if 0, no bias */
} FLAG_L; /* flag for layer */
typedef struct
{
float intra[NB_N_LAYER_MAXI]; /* INTRA-layer weight, bi-directional link, Hopfield type */
float sum, /* weight sum */
state, /* neural state=f(sum) */
loc_err, /* local weight error */
bias; /* neural bias */
FLAG_N flag;
} NEURONS; /* formal neuron model */
/* note : high or low state */
typedef struct /* struct containing every infos for a network */
{
int nb_layers, /* nb of diferent layers in the net */
nb_n_layer[NB_LAYERS_MAXI], /* neurons nb in a layer*/
nb_levels, /* levels nb in the network */
layer_order[NB_LEVELS_MAXI]; /* layer order */
/* some layers can be used for diferent levels */
FLAG_L flag[NB_LAYERS_MAXI];
float err_threshold, /* for learning end */
coefficient, /* accentuate weight=links variations in the gradient */
coef_out; /* output coefficient for continuum neuron */
float beta, /* 1/thermo temperature */
high_threshold, /* potential thresholds for short mem */
low_threshold,
weight_threshold, /* threshold for synaptic links */
threshold, /* bascule threshold for learning and sigmoid */
noise, /* noise for synaptic pot */
high_state, /* state bounds for neurons */
low_state;
unsigned char tempering; /* simulated tempering */
float t_tempering_max,
t_tempering_min,
cte_tempering;
float energy;
float inter[NB_LEVELS_MAXI][NB_N_LAYER_MAXI][NB_N_LAYER_MAXI];
/* INTER-levels coefs lev->lev+1 multi-perceptron */
NEURONS neuron[NB_N_LAYER_MAXI][NB_LAYERS_MAXI];
} NETWORKS; /* ISN'T IT BIG ? */
/* NOTE: layer = phisical neuron layer */
/* level = logical neuron layer */
/* local variables */
char msg_prg=0; /* global message */
/* procedures declaration ------------------------------ */
/* NB : Have a look at the doc */
float sqr(float);
float Rnd(float);
float sigmoid(float,float,float);
float sigmoid_derivative(float,float,float);
int *list_norm(int);
int *list_alea(int,int);
float in_limits(float,float,float);
int equal(float,float);
float weight_sum(NETWORKS *,int,int); /* important */
float level_state(NETWORKS *,int); /* important */
float level_tempering(NETWORKS *,int);
float network_state(NETWORKS *,float[]);
float neuron_state(NETWORKS *,float);
float backpropagation(NETWORKS *,float[],float[]); /* important */
float learn_prototype(NETWORKS *,int[],int,float[],float[],int);
float learn_list(NETWORKS *,int[],int,float[],float[]);
int learn_opt(NETWORKS *,int[],int,float[],float[]); /* important */
int learn_norm(NETWORKS *,int[],int,float[],float[]);
int learn_test(NETWORKS *,int,long,long);
void init_alea_network(NETWORKS *);
void init_0_network(NETWORKS *);
void init_flag_network(NETWORKS *,int,int,int,int);
void init_var_network(NETWORKS *,float,float,float,float,float,float,float,float,float,float,float,float,float,float);
float sqr_sum(NETWORKS *);
float compare_output(NETWORKS *,float[]);
float learn_verify(NETWORKS *,int[],int,float[],float[]);
void print_network(NETWORKS *);
void info_struct_network(NETWORKS *);
void info_vars_network(NETWORKS *);
/* ------------------------------------------------------------ */
/* procedures definition beginning */
/* ------------------------------------------------------------ */
float sqr(float x) /* mysterious procedure... */
{
return(x*x);
}
float Rnd(float x) /* sends back a float between 0 and x */
{
float rnd;
rnd=(float)rand()/MAX_RAND*x;
return(rnd);
}
float sigmoid(float beta, float sum, float threshold) /* sigmoid function */
{ /* = transfert function */
int beta2=abs(beta);
return(1/(1+exp(-2*beta2*(sum-threshold))));
} /* usually threshold=0 */
float sigmoid_derivative(float beta, float sum, float threshold)
{ /* just guess */
float s;
s=sigmoid(beta,sum,threshold);
return(s*(1-s));
}
/* procedures for defining a learning pedagogy */
int *list_alea(size_line,nb_prototypes) /* generate a random list to learn */
int size_line;
int nb_prototypes;
{
int *list;
int i;
list=malloc((size_line+1)*sizeof(int));
for (i=0; i<=size_line; i++)
list[i]=(int)(rand()*(nb_prototypes+1)/32768.0);
return(list);
}
int *list_norm(nb_prototypes) /* genarate the normal list */
int nb_prototypes;
{
int *list;
int i;
list=malloc((nb_prototypes+1)*sizeof(int));
for (i=0; i<=nb_prototypes; i++)
list[i]=i;
return(list);
}
float in_limits(float x, float high, float low) /* to remain between bounds */
{
float tmp=x;
if (tmp>high) tmp=high;
else
if (tmp<low) tmp=low;
/* if (x != tmp)
printf("\a");*/
return (tmp);
}
int equal(float var,float ref) /* equal to within 1 % */
{
return ((var<ref*1.01) && (var>0.99*ref));
}
/* ------------------------------------------------------------ */
/* network state procedures */
/* ------------------------------------------------------------ */
float weight_sum(NETWORKS *network,int neuron,int level)
{ /* weight sum of a neuron in a level */
int n,c,c_1;
float b,s=0;
c=(*network).layer_order[level]; /* layer of a level */
if ((*network).neuron[neuron][c].flag.activity) /* activee neuron ? */
{
s=(*network).neuron[neuron][c].bias;
if ((*network).flag[c].interconnection) /* Hopfield ? */
for (n=0; n<=(*network).nb_n_layer[c]; n++) /* INTRA-layer Hopfield sum */
s+=(*network).neuron[n][c].state*(*network).neuron[neuron][c].intra[n];
if (level >= 1) /* not the input layer */
{
c_1=(*network).layer_order[level-1]; /* preceding level */
for (n=0; n<=(*network).nb_n_layer[c_1]; n++)
/* INTER-level perceptron sum */
s+=(*network).neuron[n][c_1].state*(*network).inter[level-1][n][neuron];
} /* sends back s=0 if inactiv or Perceptron input */
}
b=Rnd(2*(*network).noise)-(*network).noise; /* add some noise */
if (s==0)
s=b/(*network).beta;
else
s*=1+b;
s=in_limits(s,(*network).high_threshold,(*network).low_threshold);
return (s);
}
float neuron_state(NETWORKS *network,float s)
{ /* for computing neural state */
float proba,state;
proba=sigmoid((*network).beta,s,(*network).threshold);
if ((*network).beta<0)
{ /* stochastic neuron */
if (Rnd(1)<proba)
state=(*network).high_state;
else
state=(*network).low_state;
}
else /* continuum neuron */
state=((*network).high_state-(*network).low_state)*proba+(*network).low_state; /* state continu entre -1 et 1 */
return(state);
}
/* if T<0 it is a stochastic neuron */
/* if T -> 0 the neuron becomes determinist */
float level_state(NETWORKS *network,int level)
{ /* compute the state of a logic layer */
int layer,neuron;
float s,state2,
energy=0; /* level energy */
float state[NB_N_LAYER_MAXI][NB_LAYERS_MAXI];
layer=(*network).layer_order[level];
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
{
s=weight_sum(network,neuron,level); /* synaptic potential */
state2=neuron_state(network,s);
state[neuron][layer]=state2;
if (level != 0)
{
energy+=state2*s; /* energy=sum(i<>j)[-.5*Jij.Etat(i).Etat(j) ] */
(*network).neuron[neuron][layer].sum=s; /* saving results, parallel calculation */
}
}
if ((level>0) || ((*network).flag[layer].interconnection))
/* input unmodified exept for Hopfield */
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
/* network=network0 */
(*network).neuron[neuron][layer].state=state[neuron][layer];
return(-0.5*energy);
}
float network_state(NETWORKS *network,float input[])
{ /* network state */
int neuron,
layer,
level;
float energy=0;
layer=(*network).layer_order[0];
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
(*network).neuron[neuron][layer].state=input[neuron];
/* imposing an input */
for (level=0; level<=(*network).nb_levels; level++)
energy+=level_state(network,level);
/* progressing towards output */
(*network).energy=energy;
return(energy); /* network energy */
}
/* ------------------------------------------------------------ */
/* learning procedures */
/* ------------------------------------------------------------ */
float backpropagation(NETWORKS *network,float input[],float output[]) /* procedure d'apprentissage simple */
{ /* gradient backpropagation algorithm */
int neuron,
layer,
layer1, /* next layer */
layer_1, /* preceding layer */
level,
n;
float error,
var,
err_tot=0; /* global network error */
network_state(network,input);
for (level = (*network).nb_levels; level >= 0; level--)
{ /* progressing towards input */
layer=(*network).layer_order[level];
if (level > 0) /* except input */
layer_1=(*network).layer_order[level-1];
if (level < (*network).nb_levels) /* except output */
layer1=(*network).layer_order[level+1];
/* error caculation */
for (neuron = 0; neuron <= (*network).nb_n_layer[layer]; neuron++)
{
error=0;
if (level < (*network).nb_levels)
{ /* error coming from the next layer */
for (n = 0; n <= (*network).nb_n_layer[layer1]; n++)
/* on next layer : inter layer Perceptron */
error+=(*network).neuron[n][layer1].loc_err*(*network).inter[level][neuron][n];
}
else
{ /* output layer */
error=(*network).neuron[neuron][layer].state-output[neuron];
err_tot+=0.5*error*error;
} /* rough error */
error*=sigmoid_derivative((*network).beta,(*network).neuron[neuron][layer].sum,(*network).threshold);
(*network).neuron[neuron][layer].loc_err=error; /* local partial error (non Hopfield) */
}
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
{
error=(*network).neuron[neuron][layer].loc_err;
if ((*network).flag[layer].interconnection)
{
var=sigmoid_derivative((*network).beta,(*network).neuron[neuron][layer].sum,(*network).threshold);
for (n=0; n<=(*network).nb_n_layer[layer]; n++)
error+=(*network).neuron[n][layer].loc_err*(*network).neuron[neuron][layer].intra[n]*var;
(*network).neuron[neuron][layer].loc_err=error; /* local error */
}
/* modifying weights */
error*=-(*network).coefficient; /* accentuates the weight variation */
if ((*network).flag[layer].bias)
/* else bias remains at 0 */
{
var=(*network).neuron[neuron][layer].bias+error;
(*network).neuron[neuron][layer].bias=in_limits(var,(*network).weight_threshold,-(*network).weight_threshold);
}
if ((*network).flag[layer].interconnection)
for (n=0; n<=(*network).nb_n_layer[layer]; n++)
/* var intra weight */
{
var=(*network).neuron[neuron][layer].intra[n]+error*(*network).neuron[n][layer].state;
(*network).neuron[neuron][layer].intra[n]=in_limits(var,(*network).weight_threshold,-(*network).weight_threshold);
}
if (level > 0)
for (n=0; n<=(*network).nb_n_layer[layer_1]; n++)
/* var inter weight */
{
var=(*network).inter[level-1][n][neuron]+error*(*network).neuron[n][layer_1].state;
(*network).inter[level-1][n][neuron]=in_limits(var,(*network).weight_threshold,-(*network).weight_threshold);
}
}
}
return(err_tot);
}
float learn_prototype(NETWORKS *network,int list_prototypes[],int k,float inputs[],float outputs[],int nb_proto)
{ /* learns a prototype to the net nb_proto times */
int j,NCin,NCout,nb_back=0,noise=0;
float err_tot,t,t0; /* output error */
float input[NB_N_LAYER_MAXI],
output[NB_N_LAYER_MAXI];
/* neurons nb in 1st and last level */
NCin=(*network).nb_n_layer[(*network).layer_order[0]];
NCout=(*network).nb_n_layer[(*network).layer_order[(*network).nb_levels]];
for (j=0; j<=NCin; j++) /* extract first prototype=(input,output) */
input[j]=inputs[list_prototypes[k]*(NCin+1)+j];
for (j=0; j<=NCout; j++)
output[j]=outputs[list_prototypes[k]*(NCout+1)+j];
t0=1/(*network).beta;
if (msg_prg==MSG_NOISE)
{
printf(" with SHAKER ");
noise=(*network).noise;
(*network).noise*=2; /* noise */
msg_prg=MSG_STD;
}
do
{ /* possibly simulated tempering */
t=-1;
if ((noise>0) && ((*network).tempering != TEMPERING_OFF)) /* thermic noise */
{
t=(*network).t_tempering_min+(*network).t_tempering_max*exp(-(float)nb_back/(*network).cte_tempering);
printf(".");
}
if (t<t0) t=t0;
nb_back++;
err_tot=backpropagation(network,input,output);
}
while (nb_back<nb_proto);
(*network).beta=1/t0;
return(err_tot); /* sends back prototype error */
}
float learn_list(NETWORKS *network,int list_prototypes[],int size_list,float inputs[],float outputs[]) /* supervising learning */
/* int list_prototypes[]; prototype order to learn */
/* int size_list; prototypes nb in list */
/* float inputs[],outputs[]; prototypes to learn */
{
int k;
float err_cumul=0; /* error sobre todos los prototypes */
for (k=0; k<=size_list; k++) /* let's read the list */
err_cumul+=learn_prototype(network,list_prototypes,k,inputs,outputs,1);
return(err_cumul/(size_list+1));
/* sends back the mean prototype error */
}
int learn_opt(NETWORKS *network,int list_prototypes[],int size_list,float inputs[],float outputs[]) /* learn en favorisant les prototypes mal connus */
{ /* we need the normal list *list_norm(..)={1,2,..,n} */
/* optimized learning */
int k,i=0;
float error_mean,
error_m,error_m0,
err_max,err_max0,
error,
noise=(*network).noise;
int *priority_proto;
priority_proto=(int *)malloc((size_t)sizeof(int)*(size_list+1));
error_mean=(*network).err_threshold*4;
error_m0=0;
err_max0=0;
for (k=0; k<=size_list; k++) /* init learning priority */
priority_proto[k]=1;
do
{
error_m=0;
err_max=0;
printf("\nI learn all prototypes especially those I don't know well.\n");
for (k=0; k<=size_list; k++) /* looking for the unknown prototype and error max */
{
error=learn_prototype(network,list_prototypes,k,inputs,outputs,priority_proto[k]);
error_m+=error;
if (err_max<error) err_max=error;
i+=priority_proto[k];
printf("\nPrototype No: %3i. Priority %3i. Error: %5.2f. Energy %4.1f.",k,priority_proto[k],error,(*network).energy);
if (error>=error_mean*1.05)
if (priority_proto[k]<NB_BACKP_MAXI)
priority_proto[k]++;
else
{
msg_prg=MSG_NOISE;
priority_proto[k]/=2;
}
else
if ((error<=error_mean*.95) && (priority_proto[k]>1))
priority_proto[k]--;
}
error_mean=error_m/(size_list+1);
printf("\nTurn: %3i. Mean error %5.2f. Max error %5.2f.",i,error_mean,err_max);
if ((equal(error_m,error_m0)) && (equal(err_max,err_max0)))
{
(*network).noise*=2;
printf("\nGlobal Shaker");
}
else
{
error_m0=error_m;
err_max0=err_max;
(*network).noise=noise;
}
}
while ((err_max>=(*network).err_threshold) && (i<=LEARN_FAIL));
if (i==LEARN_FAIL) i*=-1;
return (i); /* sends back the number of learning turns */
}
int learn_norm(NETWORKS *network,int list_lots[],int size_list,float inputs[],float outputs[])
{ /* learn a prototype list */
int n=0;
float err;
do
{
err=learn_list(network,list_lots,size_list,inputs,outputs);
printf("\nError: %4.3f",err);
n++;
}
while (err>(*network).err_threshold);
return (n);
}
int learn_test(NETWORKS *network,int nb_prototypes,long sizein,long sizeout)
{ /* test the validity of prototypes */
int flag=1;
if (((*network).nb_n_layer[(*network).layer_order[0]]*(nb_prototypes+1)>(sizein/sizeof(float)))
|| ((*network).nb_n_layer[(*network).layer_order[(*network).nb_levels]]*(nb_prototypes+1)>(sizeout/sizeof(float))))
flag=0;
return(flag);
}
/* ------------------------------------------------------------ */
/* Initialization of networks */
/* ------------------------------------------------------------ */
void init_alea_network(NETWORKS *network)
{ /* initialize a random network */
int neuron,
layer,
level,
n,
lay_sup,nb_lev,nb_neuron,nb_neuron_sup;
float s;
srand((unsigned)clock()); /* used because it is portable */
/* initialize random generator with the clock */
for (layer=0; layer<=(*network).nb_layers; layer++)
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
{
if ((*network).flag[layer].bias)
(*network).neuron[neuron][layer].bias=Rnd(0.5)-0.25;
else
(*network).neuron[neuron][layer].bias=0;
s=Rnd(4)-2;
(*network).neuron[neuron][layer].sum=s;
(*network).neuron[neuron][layer].state=neuron_state(network,s);
(*network).neuron[neuron][layer].loc_err=0;
for (n=0; n<=(*network).nb_n_layer[layer]; n++)
if ((*network).flag[layer].interconnection)
(*network).neuron[neuron][layer].intra[n]=Rnd(0.5)-0.25;
else
(*network).neuron[neuron][layer].intra[n]=0;
}
nb_lev=(*network).nb_levels-1; /* nb of levels except the last */
for (level=0; level<=nb_lev; level++)
{
layer=(*network).layer_order[level]; /* layer No level */
nb_neuron=(*network).nb_n_layer[layer]; /* nb of neuron in the level */
lay_sup=(*network).layer_order[level+1]; /* next level */
nb_neuron_sup=(*network).nb_n_layer[lay_sup];/* nb of neuron in lay_sup */
for (neuron=0; neuron<=nb_neuron;neuron++)
for (n=0; n<=nb_neuron_sup; n++)
(*network).inter[level][neuron][n]=Rnd(0.5)-0.25;
}
}
void init_0_network(NETWORKS *network)
/* initializes a zero network */
{
int neuron,
layer,
level,
n,
lay_sup,nb_lev,nb_neuron,nb_neuron_sup;
for (layer=0; layer<=(*network).nb_layers; layer++)
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
{
(*network).neuron[neuron][layer].sum=0;
(*network).neuron[neuron][layer].state=0;
(*network).neuron[neuron][layer].loc_err=0;
for (n=0; n<=(*network).nb_n_layer[layer]; n++)
(*network).neuron[neuron][layer].intra[n]=0;
}
nb_lev=(*network).nb_levels-1; /* nb of levels except the last */
for (level=0; level<=nb_lev; level++)
{
layer=(*network).layer_order[level]; /* layer No level */
nb_neuron=(*network).nb_n_layer[layer]; /* nb of neuron in level */
lay_sup=(*network).layer_order[level+1]; /* next level */
nb_neuron_sup=(*network).nb_n_layer[lay_sup];
for (neuron=0; neuron<=nb_neuron;neuron++)
for (n=0; n<=nb_neuron_sup; n++)
(*network).inter[level][neuron][n]=0;
}
}
void init_flag_network(NETWORKS *network,int activity,int interconnection,int bias,int tempering)
/* initializes flags for a net */
{
int neuron,
layer;
(*network).tempering=tempering;
for (layer=0; layer<=(*network).nb_layers; layer++)
{
(*network).flag[layer].interconnection=interconnection;
(*network).flag[layer].bias=bias;
}
for (layer=0; layer<=(*network).nb_layers; layer++)
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
{
(*network).neuron[neuron][layer].flag.activity=activity;
if (activity==0)
(*network).neuron[neuron][layer].state=0;
if (bias==0)
(*network).neuron[neuron][layer].bias=0;
}
}
void init_var_network(NETWORKS *network,
float err_threshold,
float beta,
float coefficient,
float coef_out,
float high_threshold,
float low_threshold,
float weight_threshold,
float threshold,
float noise,
float high_state,
float low_state,
float t_max,
float t_min,
float cte)
{
(*network).err_threshold=err_threshold*(*network).nb_n_layer[(*network).layer_order[(*network).nb_levels]];
(*network).beta=beta;
(*network).coefficient=coefficient;
(*network).coef_out=coef_out;
(*network).high_threshold=threshold+high_threshold/2/beta;
(*network).low_threshold=threshold+low_threshold/2/beta;
(*network).weight_threshold=weight_threshold;
(*network).threshold=threshold;
(*network).noise=noise;
(*network).high_state=high_state;
(*network).low_state=low_state;
(*network).t_tempering_max=t_max;
(*network).t_tempering_min=t_min;
(*network).cte_tempering=cte;
(*network).energy=0;
}
/* ------------------------------------------------------------ */
/* infos */
/* ------------------------------------------------------------ */
float compare_output(NETWORKS *network,float output[])
{ /* compare the output to the one expected */
int neuron,
layer=(*network).layer_order[(*network).nb_levels];
float err_tot=0,
error;
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
{
error=(*network).neuron[neuron][layer].state-output[neuron];
err_tot+=0.5*error*error;
}
return(err_tot);
}
float learn_verify(NETWORKS *network,int list_prototypes[],int size_list,float inputs[],float outputs[])
{ /* verify the learning for each prototype */
int k,j,NCin,NCout;
float err_max=0,err;
float input[NB_N_LAYER_MAXI],
output[NB_N_LAYER_MAXI];
/* nb of neurons in the 1st and last level */
NCin=(*network).nb_n_layer[(*network).layer_order[0]];
NCout=(*network).nb_n_layer[(*network).layer_order[(*network).nb_levels]];
for (k=0; k<=size_list; k++)
{
for (j=0; j<=NCin; j++)
input[j]=inputs[list_prototypes[k]*(NCin+1)+j];
for (j=0; j<=NCout; j++)
output[j]=outputs[list_prototypes[k]*(NCout+1)+j];
network_state(network,input);
err=compare_output(network,output);
printf("\nFinal error lot %2i : %4.2f (%%)",k,err/NCout*100);
if (err_max<err) err_max=err;
}
return(err_max);
}
float sqr_sum(NETWORKS *network) /* sends back the sum(sqr(links)) */
{
int neuron,layer,n,level,layer_1;
float sum2=0;
for (layer=0; layer<=(*network).nb_layers; layer++)
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
{
sum2+=sqr((*network).neuron[neuron][layer].bias);
for (n=0; n<=(*network).nb_n_layer[layer]; n++)
sum2+=sqr((*network).neuron[neuron][layer].intra[n]);
}
for (level=1; level<=(*network).nb_levels; level++)
{
layer=(*network).layer_order[level];
layer_1=(*network).layer_order[level-1];
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
for (n=0; n<=(*network).nb_n_layer[layer_1]; n++)
sum2+=sqr((*network).inter[level-1][n][neuron]);
}
return(sum2);
}
void print_network(NETWORKS *network)
{
int neuron,level,layer;
float pot,state,loc_err,bias;
printf("\nNetwork state: sum, state, loc_err, bias");
for (level=0 ;level<=(*network).nb_levels; level++)
{
layer=(*network).layer_order[level];
printf("\n----------------------------------------------------\n");
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
{
pot=(*network).neuron[neuron][layer].sum;
printf(" %4.2f ",pot);
}
printf("\n");
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
{
state=(*network).neuron[neuron][layer].state;
printf(" %4.2f ",state);
}
printf("\n");
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
{
loc_err=(*network).neuron[neuron][layer].loc_err;
printf(" %4.2f ",loc_err);
}
printf("\n");
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
{
bias=(*network).neuron[neuron][layer].bias;
printf(" %4.2f ",bias);
}
}
printf("\n----------------------------------------------------\n\n");
printf("\n <RETURN>");
getchar();
}
void info_struct_network(NETWORKS *network)
{
int i;
printf("\n----------------- INFO STRUCTURE NETWORK -------------------\n\n");
printf("Memory size: %7.1f o\n",(float)sizeof(NETWORKS));
printf("Number of layers: %i\n",(*network).nb_layers+1);
printf("Number of levels: %i\n",(*network).nb_levels+1);
printf("\n");
for (i=0; i<=(*network).nb_layers; i++)
{
printf(" Layer %2i, %3i neurons. Model ",i,(*network).nb_n_layer[i]+1);
if ((*network).flag[i].interconnection)
printf("Hopfield.\n");
else
printf("Perceptron.\n");
}
printf("\n");
printf("The levels of the network are the layers:\n -> ");
for (i=0; i<=(*network).nb_levels; i++)
printf(" %i ",(*network).layer_order[i]);
printf("\n\n");
printf("\n <RETURN>");
getchar();
}
void info_vars_network(NETWORKS *network)
{
float s,
s1=0,
s2=0,
sqr1=0,
sqr2=0;
int i=0,j=0,neuron,layer,n,level,layer_1;
printf("\n---------------- INFO VARIABLES NETWORK -------------------\n");
printf("\n");
printf("Temperature: %5.2f\n",1/(*network).beta);
printf("Learning improving coefficient: %4.2f\n",(*network).coefficient);
printf("Threshold error on learning end: %4.3f (%3.1f %%)\n",(*network).err_threshold,
(*network).err_threshold/(*network).nb_n_layer[(*network).layer_order[(*network).nb_levels]]*100);
printf("High neuron state: %4.2f\n",(*network).high_state);
printf("Low neuron state : %4.2f\n",(*network).low_state);
printf("High threshold neuron potential: %4.2f\n",(*network).high_threshold);
printf("Low threshold neuron potential : %4.2f\n",(*network).low_threshold);
printf("Link threshold : %4.2f\n",(*network).weight_threshold);
printf("Bascule neuron threshold: %4.2f\n",(*network).threshold);
printf("Mean intra-layer link (Hopfield):");
for (layer=0; layer<=(*network).nb_layers; layer++)
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
{
s=(*network).neuron[neuron][layer].bias;
s1+=s;
sqr1+=sqr(s);
i++;
if ((*network).flag[layer].interconnection)
for (n=0; n<=(*network).nb_n_layer[layer]; n++)
{
s=(*network).neuron[neuron][layer].intra[n];
s1+=s;
sqr1+=sqr(s);
i++;
}
}
printf(" %5.3f\n\n",s1/i);
if ((*network).nb_levels>0)
{
printf("Mean inter-layer link (Perceptron):");
for (level=1; level<=(*network).nb_levels; level++)
{
layer=(*network).layer_order[level];
layer_1=(*network).layer_order[level-1];
for (neuron=0; neuron<=(*network).nb_n_layer[layer]; neuron++)
for (n=0; n<=(*network).nb_n_layer[layer_1]; n++)
{
s=(*network).inter[level-1][n][neuron];
s2+=s;
sqr2+=sqr(s);
j++;
}
}
printf("%5.3f\n\n",s2/j);
}
printf("Mean global link: %5.3f\n",(s1+s2)/(i+j));
printf("Sum of intra-layer link square: %5.3f\n",sqr1);
printf("Sum of inter-layer link square: %5.3f\n",sqr2);
printf("\n");
printf("Network energy: %5.3f\n",(*network).energy);
printf("\n\n");
printf("\n <RETURN>");
getchar();
}
/* ------------------------------------------------------------ */
/* END of module */
/* ------------------------------------------------------------ */