ANN toolbox

ANN toolbox Commit Details

Date:2011-11-24 12:54:24 (6 years 7 months ago)
Author:Allan Cornet
Branch:master
Commit:4ba827c4053aa1e3db69226ddf65616e2f105768
Parents: b775b1431e23029420ac078e785309f40b1a925d
Message:update 0.4.2.5 for Scilab 5.4

Changes:
DANN_toolbox.iss
Mbuilder.sce (3 diffs)
Mchangelog.txt (1 diff)
Mdemos/ANN.dem.gateway.sce (1 diff)
Mdemos/enc838_m.sce (1 diff)
Mdemos/enc848_m_nb.sce (1 diff)
Mdemos/enc848_ssab.sce (1 diff)
Mdemos/enc858_ssab_nb.sce (1 diff)
Mdemos/encoder.sce (1 diff)
Mdemos/encoder_cc.sce (1 diff)
Mdemos/encoder_m.sce (1 diff)
Mdemos/encoder_m_nb.sce (1 diff)
Mdemos/encoder_nb.sce (1 diff)
Metc/ANN_toolbox.quit (1 diff)
Metc/ANN_toolbox.start (4 diffs)
Mhelp/builder_help.sce (1 diff)
Mhelp/en_US/ANN.xml (2 diffs)
Mhelp/en_US/ANN_FF.xml (15 diffs)
Mhelp/en_US/ann_FF_ConjugGrad.xml (1 diff)
Mhelp/en_US/ann_FF_Hess.xml (1 diff)
Mhelp/en_US/ann_FF_INT.xml (1 diff)
Mhelp/en_US/ann_FF_Jacobian.xml (1 diff)
Mhelp/en_US/ann_FF_Jacobian_BP.xml (1 diff)
Mhelp/en_US/ann_FF_Mom_online.xml (1 diff)
Mhelp/en_US/ann_FF_Mom_online_nb.xml (1 diff)
Mhelp/en_US/ann_FF_SSAB_online.xml (1 diff)
Mhelp/en_US/ann_FF_SSAB_online_nb.xml (1 diff)
Mhelp/en_US/ann_FF_Std_online.xml (1 diff)
Mhelp/en_US/ann_FF_Std_online_nb.xml (1 diff)
Mhelp/en_US/ann_FF_VHess.xml (1 diff)
Mhelp/en_US/ann_FF_grad.xml (1 diff)
Mhelp/en_US/ann_FF_grad_BP.xml (1 diff)
Mhelp/en_US/ann_FF_grad_BP_nb.xml (1 diff)
Mhelp/en_US/ann_FF_grad_nb.xml (1 diff)
Mhelp/en_US/ann_FF_init.xml (2 diffs)
Mhelp/en_US/ann_FF_init_nb.xml (2 diffs)
Mhelp/en_US/ann_FF_run.xml (1 diff)
Mhelp/en_US/ann_FF_run_nb.xml (1 diff)
Mhelp/en_US/ann_d_log_activ.xml (2 diffs)
Mhelp/en_US/ann_d_sum_of_sqr.xml (1 diff)
Mhelp/en_US/ann_log_activ.xml (1 diff)
Mhelp/en_US/ann_pat_shuffle.xml (1 diff)
Mhelp/en_US/ann_sum_of_sqr.xml (1 diff)
Mlicense.txt
Mmacros/buildmacros.sce (1 diff)
Mmacros/cleanmacros.sce (1 diff)
Mreadme.txt (1 diff)

File differences

builder.sce
11
22
3
3
44
55
66
77
88
9
10
911
1012
1113
......
1618
1719
1820
19
21
2022
2123
22
23
24
24
25
26
2527
2628
2729
2830
2931
30
3132
3233
3334
......
4041
4142
4243
43
44
45
46
47
4448
45
46
47
// =============================================================================
// Copyright INRIA 2008
// Copyright DIGITEO 2010
// Copyright DIGITEO 2010 - 2011
// Allan CORNET
// =============================================================================
mode(-1);
lines(0);
function builder_main()
TOOLBOX_NAME = "ANN_toolbox";
TOOLBOX_TITLE = "ANN toolbox";
toolbox_dir = get_absolute_file_path("builder.sce");
try
v = getversion("scilab");
catch
error(gettext("Scilab 5.3 or more is required."));
error(gettext("Scilab 5.4 or more is required."));
end
if v(2) < 3 then
// new API in scilab 5.3
error(gettext('Scilab 5.3 or more is required.'));
if v(2) < 4 then
// new API in scilab 5.4
error(gettext('Scilab 5.4 or more is required.'));
end
clear v;
// Check modules_manager module availability
// =============================================================================
if ~isdef('tbx_build_loader') then
error(msprintf(gettext('%s module not installed."), 'modules_manager'));
end
tbx_build_loader(TOOLBOX_NAME, toolbox_dir);
tbx_build_cleaner(TOOLBOX_NAME, toolbox_dir);
// Clean variables
endfunction
// =============================================================================
builder_main()
clear builder_main;
// =============================================================================
clear toolbox_dir TOOLBOX_NAME TOOLBOX_TITLE;
changelog.txt
22
33
44
5
6
7
8
9
510
611
712
==========================
=====================================================================
From 0.4.2.4 -> 0.4.2.5 :
- compatibility with Scilab 5.4.0
(Allan CORNET , DIGITEO , 2011)
=====================================================================
From 0.4.2.3 -> 0.4.2.4 :
- compatibility with Scilab 5.3.0
demos/ANN.dem.gateway.sce
11
22
3
34
45
6
7
58
69
7
8
9
10
11
12
13
14
15
10
11
12
13
14
15
16
17
18
1619
1720
1821
22
23
24
25
1926
// ====================================================================
// Copyright INRIA 2008
// Copyright DIGITEO 2011
// Allan CORNET
// ====================================================================
function subdemolist = demo_ANN_gw()
demopath = get_absolute_file_path("ANN.dem.gateway.sce");
subdemolist = [ "encoder 4-3-4 on ANN without biases", "encoder_nb.sce" ; ..
"tight encoder 4-2-4 on ANN with biases", "encoder.sce" ; ..
"encoder 4-3-4 on ANN without biases compare with encoder_nb.sce", "encoder_m_nb.sce" ; ..
"tight encoder 4-2-4 on ANN with biases compare with encoder.sce", "encoder_m.sce" ; ..
"encoder 8-4-8 on ANN without biases", "enc848_m_nb.sce" ; ..
"encoder 8-3-8 on ANN with biases", "enc838_m.sce" ; ..
"encoder 8-5-8 on ANN without biases", "enc858_ssab_nb.sce" ; ..
"encoder 8-4-8 on ANN with biases", "enc848_ssab.sce" ; ..
"tight encoder 4-2-4 on ANN with biases uses a mixed standard/conjugate gradients method", "encoder_cc.sce" ..
subdemolist = [ "encoder 4-3-4 on ANN without biases", "encoder_nb.sce" ; ..
"tight encoder 4-2-4 on ANN with biases", "encoder.sce" ; ..
"encoder 4-3-4 on ANN without biases compare with encoder_nb.sce", "encoder_m_nb.sce" ; ..
"tight encoder 4-2-4 on ANN with biases compare with encoder.sce", "encoder_m.sce" ; ..
"encoder 8-4-8 on ANN without biases", "enc848_m_nb.sce" ; ..
"encoder 8-3-8 on ANN with biases", "enc838_m.sce" ; ..
"encoder 8-5-8 on ANN without biases", "enc858_ssab_nb.sce" ; ..
"encoder 8-4-8 on ANN with biases", "enc848_ssab.sce" ; ..
"tight encoder 4-2-4 on ANN with biases uses a mixed standard/conjugate gradients method", "encoder_cc.sce" ..
];
subdemolist(:,2) = demopath + subdemolist(:,2);
endfunction
// ====================================================================
subdemolist = demo_ANN_gw();
clear demo_ANN_gw;
// ====================================================================
demos/enc838_m.sce
33
44
55
6
76
87
98
// on a backpropagation ANN with biases and momentum
// ==================================================
FILENAMEDEM = "enc838_m";
lines(0);
scepath = get_absolute_file_path(FILENAMEDEM+".sce");
exec(scepath+FILENAMEDEM+".sci",1);
clear scepath;
demos/enc848_m_nb.sce
44
55
66
7
87
98
109
// (Note that the tight 8-4-8 encoder will not work without biases)
// ==================================================
FILENAMEDEM = "enc848_m_nb";
lines(0);
scepath = get_absolute_file_path(FILENAMEDEM+".sce");
exec(scepath+FILENAMEDEM+".sci",1);
clear scepath;
demos/enc848_ssab.sce
55
66
77
8
98
109
1110
// (The 8-4-8 encoder have proven very difficult to train on SuperSAB)
// ==================================================
FILENAMEDEM = "enc848_ssab";
lines(0);
scepath = get_absolute_file_path(FILENAMEDEM+".sce");
exec(scepath+FILENAMEDEM+".sci",1);
clear scepath;
demos/enc858_ssab_nb.sce
55
66
77
8
98
109
1110
// (The 8-4-8 encoder have proven very difficult to train on SuperSAB)
// ==================================================
FILENAMEDEM = "enc858_ssab_nb";
lines(0);
scepath = get_absolute_file_path(FILENAMEDEM+".sce");
exec(scepath+FILENAMEDEM+".sci",1);
clear scepath;
demos/encoder.sce
22
33
44
5
65
76
87
// Tight 4-2-4 encoder on a backpropagation ANN
// ==================================================
FILENAMEDEM = "encoder";
lines(0);
scepath = get_absolute_file_path(FILENAMEDEM+".sce");
exec(scepath+FILENAMEDEM+".sci",1);
clear scepath;
demos/encoder_cc.sce
22
33
44
5
65
76
87
// Tight 4-2-4 encoder using a mixed standard/conjugate gradients algorithm
// ==================================================
FILENAMEDEM = "encoder_cc";
lines(0);
scepath = get_absolute_file_path(FILENAMEDEM+".sce");
exec(scepath+FILENAMEDEM+".sci",1);
clear scepath;
demos/encoder_m.sce
33
44
55
6
76
87
98
// on a backpropagation ANN with biases and momentum
// ==================================================
FILENAMEDEM = "encoder_m";
lines(0);
scepath = get_absolute_file_path(FILENAMEDEM+".sce");
exec(scepath+FILENAMEDEM+".sci",1);
clear scepath;
demos/encoder_m_nb.sce
44
55
66
7
87
98
109
// (Note that the tight 4-2-4 encoder will not work without biases)
// ==================================================
FILENAMEDEM = "encoder_m_nb";
lines(0);
scepath = get_absolute_file_path(FILENAMEDEM+".sce");
exec(scepath+FILENAMEDEM+".sci",1);
clear scepath;
demos/encoder_nb.sce
33
44
55
6
76
87
98
// (Note that the tight 4-2-4 encoder will not work without biases)
// ==================================================
FILENAMEDEM = "encoder_nb";
lines(0);
scepath = get_absolute_file_path(FILENAMEDEM+".sce");
exec(scepath+FILENAMEDEM+".sci",1);
clear scepath;
etc/ANN_toolbox.quit
11
22
33
4
45
// ====================================================================
// Allan CORNET
// Copyright INRIA 2008
// Copyright DIGITEO 2011
// ====================================================================
etc/ANN_toolbox.start
11
22
3
3
44
55
6
67
7
8
89
910
10
11
1112
1213
1314
......
2021
2122
2223
23
24
2524
2625
2726
......
2928
3029
3130
32
33
31
3432
35
3633
3734
3835
......
4037
4138
4239
43
44
40
4541
42
43
44
45
4646
47
48
// =============================================================================
// Allan CORNET
// Copyright DIGITEO 2010
// Copyright DIGITEO 2010 - 2011
// Copyright INRIA 2008
// =============================================================================
function ANN_toolboxlib = startModule()
mprintf("Start ANN Toolbox 0.4.2.4\n");
mprintf("Start ANN Toolbox 0.4.2.5\n");
if isdef("ANN_toolboxlib") then
warning("ANN Toolbox 0.4.2.4 library is already loaded");
warning("ANN Toolbox 0.4.2.5 library is already loaded");
return;
end
mprintf("\tLoad macros\n");
pathmacros = pathconvert( root_tlbx ) + "macros" + filesep();
ANN_toolboxlib = lib(pathmacros);
clear pathmacros;
// Load and add help chapter
// =============================================================================
mprintf("\tLoad help\n");
path_addchapter = pathconvert(root_tlbx+"/jar");
if ( isdir(path_addchapter) <> [] ) then
add_help_chapter("ANN Toolbox 0.4.2.4", path_addchapter, %F);
clear add_help_chapter;
add_help_chapter("ANN Toolbox 0.4.2.5", path_addchapter, %F);
end
clear path_addchapter;
end
// Load demos
if or(getscilabmode() == ["NW";"STD"]) then
mprintf("\tLoad demos\n");
pathdemos = pathconvert(root_tlbx+"/demos/ANN.dem.gateway.sce",%F,%T);
add_demo("ANN Toolbox 0.4.2.4", pathdemos);
clear pathdemos add_demo;
add_demo("ANN Toolbox 0.4.2.5", pathdemos);
end
// =============================================================================
ANN_toolboxlib = startModule();
clear startModule;
// =============================================================================
clear root_tlbx;
clear etc_tlbx;
help/builder_help.sce
11
2
2
3
34
45
5
66
77
8
9
10
// ====================================================================
// Copyright INRIA 2008
// Copyright INRIA 2008
// Copyright DIGITEO 2011
// Allan CORNET
// ====================================================================
tbx_builder_help_lang(["en_US"], ..
get_absolute_file_path("builder_help.sce"));
clear tbx_builder_help_lang;
// ====================================================================
help/en_US/ANN.xml
3636
3737
3838
39
39
4040
4141
4242
......
4545
4646
4747
48
48
4949
5050
5151
<title>COMPONENTS</title>
<para>General support functions See ANN_GEN for support functions for ANN.
Feedforward networks See ANN_FF for detailed description.</para>
Feedforward networks See ann_FF for detailed description.</para>
</refsection>
<refsection>
<simplelist type="inline">
<member><link linkend="ANN_GEN">ANN_GEN</link></member>
<member><link linkend="ANN_FF">ANN_FF</link></member>
<member><link linkend="ann_FF">ann_FF</link></member>
</simplelist>
</refsection>
</refentry>
help/en_US/ANN_FF.xml
11
2
2
33
44
55
......
1212
1313
1414
15
15
1616
1717
1818
......
2121
2222
2323
24
24
2525
2626
2727
......
3131
3232
3333
34
34
3535
3636
3737
......
4141
4242
4343
44
44
4545
4646
4747
4848
4949
50
50
5151
52
52
5353
5454
55
55
5656
57
57
5858
5959
6060
61
61
6262
6363
6464
6565
66
66
6767
6868
69
69
7070
7171
7272
......
110110
111111
112112
113
113
114114
115115
116116
......
120120
121121
122122
123
123
124124
125125
126126
127127
128
129
128
129
130130
131131
132132
......
149149
150150
151151
152
152
153153
154154
155155
......
158158
159159
160160
161
161
162162
163163
164
164
165165
166
166
167167
168168
169169
170170
171171
172
172
173173
174174
175175
......
205205
206206
207207
208
208
209209
210210
211211
212212
213213
214
214
215215
216
216
217217
218218
219
219
220220
221221
222
222
223223
224224
225225
......
228228
229229
230230
231
231
232232
233233
234234
235235
236236
237
237
238238
239239
240240
......
247247
248248
249249
250
250
251251
252252
253253
......
258258
259259
260260
261
261
262262
263263
264264
......
272272
273273
274274
275
275
276276
277277
278278
......
291291
292292
293293
294
295
296294
297295
298
296
<?xml version="1.0" encoding="UTF-8"?>
<refentry version="5.0-subset Scilab" xml:id="ANN_FF" xml:lang="en"
<refentry version="5.0-subset Scilab" xml:id="ann_FF" xml:lang="en"
xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:svg="http://www.w3.org/2000/svg"
</info>
<refnamediv>
<refname>ANN_FF</refname>
<refname>ann_FF</refname>
<refpurpose>Algorithms for feedforward nets.</refpurpose>
</refnamediv>
<title>OBJECTIVE</title>
<para>To provide engines for feedforward ANN exploration, testing and
rapid prototyping. </para>
rapid prototyping.</para>
<para>Some flexibility is provided (e.g. the possibility to change the
activation or error functions).</para>
<title>ARCHITECTURE DESCRIPTION</title>
<para>(a) The network is visualized as follows: inputs at the left and
data (signals) propagating to the right. </para>
data (signals) propagating to the right.</para>
<para>(b) N is a row vector containing the number of neurons per layer,
input included.</para>
<para>Layer no. 1 2 ... size(N,'c') . -- o o -&gt; \/ /\ i . -- o o -&gt;
o n \ |\ u p \ =====| &gt; t u |/ p t u s / t / s . - o o -&gt; input
first output hidden </para>
first output hidden</para>
<para>Note that connections do not jump over layers, they are only between
adjacent layers (fully interconnected).</para>
<para>(d) The dimension of N is size(N,'c') so: - input layer have N(1)
neurons - first hidden layer have N(2) neurons, ... </para>
neurons - first hidden layer have N(2) neurons, ...</para>
<para>- the output layer L have N(size(N,'c')) neurons </para>
<para>- the output layer L have N(size(N,'c')) neurons</para>
<para>(e) The input vector/matrix is x, each pattern is represented by one
column. </para>
column.</para>
<para>Only constant size input patterns are accepted. </para>
<para>Only constant size input patterns are accepted.</para>
<para>NOTE: Internally the patterns will be worked with, individually, as
column vectors, i.e. each pattern vector is a column of the form: x(:,p),
(p being the pattern order number). </para>
(p being the pattern order number).</para>
<para>(f) Each neuron on first hidden layer have N(1) inputs, ... for
layer l in [2, ..., size(N,'c')] each neuron have N(l-1) inputs from
previous layer plus one simulating the bias (where applicable, most
algorithms assume existence of bias). </para>
algorithms assume existence of bias).</para>
<para>(g) The network is fully connected but a connection can be canceled
by zeroing the corresponding weight </para>
by zeroing the corresponding weight</para>
<para>(note that a training procedure may turn it back to a non-zero
value, this is one reason for which some "hooks" are provided, see "ex"
y) and note the ".*" operator.</para>
<para>Delta_W_old The quantity by which W was changed on previous training
pattern. </para>
pattern.</para>
<para>dW, dW2 the amount of variations of each W element for calculating
the error derivatives trough a finite difference approach (see ann_FF_grad
the sum-of-squares (already defined within this toolbox). err_deriv_y the
error derivative with respect to network outputs. Returns a matrix each
column containing the error derivative corresponding to the appropriate
pattern. </para>
pattern.</para>
<para>This parameter is optional, default value is "ann_d_sum_of_sqr"
(already defined within this toolbox), i.e. the derivative of
sum-of-squares error function. ex is a Scilab program sequence, executed
after the weight hypermatrix for each training pattern have been updated.
</para>
after the weight hypermatrix for each training pattern have been
updated.</para>
<para>Its main purpose is to provide hooks in order to change the learning
function without having to rewrite it. Typical usages would be: checking
layer.</para>
<para>lp represents the learning parameters, is a row vector [lp(1),
lp(2), ...] </para>
lp(2), ...]</para>
<para>The actual significance of each component may vary, see the
respective man pages for representation and typical values.</para>
N(l) represents the number of neurons on layer l. E.g.: N(1) is the size
of input vector, N(size(N),'c') is the size of output vector r range of
random numbers based on which the connection weights (not biases) are
initialized. </para>
initialized.</para>
<para>Is a two component row vector: r(1) gives the lower limit r(2) gives
the upper limit </para>
the upper limit</para>
<para>This parameter is optional, default value is [-1,1]. </para>
<para>This parameter is optional, default value is [-1,1].</para>
<para>rb range of random numbers based on which the biases (not other
weihts) are initialized.</para>
<para>Is a two component row vector: rb(1) gives the lower limit rb(2)
gives the upper limit </para>
gives the upper limit</para>
<para>This parameter is optional, default value is [0,0], i.e. biases are
initialized with 0.</para>
<title>TIPS AND TRICKS</title>
<para>- Do not use the no-bias networks unless you know what you are
doing. </para>
doing.</para>
<para>- The most efficient (by far) algorithm is the "Conjugate Gradient",
however it may require bootstrapping with another algorithm (see the
examples).</para>
<para> - Reduce as much is possible the number of loops and the number of
<para>- Reduce as much is possible the number of loops and the number of
function calls, use instead as much is possible the matrix manipulation
capabilities of Scilab. </para>
capabilities of Scilab.</para>
<para>- You can do a shuffling of training patterns between two calls to
the training procedure, use the "ex" hooks provided. </para>
the training procedure, use the "ex" hooks provided.</para>
<para>- Be very careful when defining new activation and error functions
and test them to make sure they do what are supposed to do. </para>
and test them to make sure they do what are supposed to do.</para>
<para>- don't use sparse matrices unless they are really sparse (&lt;
5%).</para>
<refsection>
<title>IMPLEMENTATION DETAILS</title>
<para>- Each layer have associated a hypermatrix of weights. </para>
<para>- Each layer have associated a hypermatrix of weights.</para>
<para>NOTE: Most algorithms assume existence of bias by default. For each
layer l, except l=1, the weight matrix associated with connections from
l-1 to l is W(1:N(l),1:N(l-1),l-1) for networks without biases and
W(1:N(l),1:N(l-1)+1,l-1) for networks with biases, i.e. biases are stored
in first column: W(1:N(l),1,l-1). </para>
in first column: W(1:N(l),1,l-1).</para>
<para>The total input to a layer l is: =
W(1:N(l),1:N(l-1),l-1)*z(1:N(l-1)) for network without biases =
networks with biases; the unused entries from W are initialized to zero
and left untouched.</para>
<para> - Pattern vectors are passed as columns in a matrix representing a
<para>- Pattern vectors are passed as columns in a matrix representing a
set (of patterns).</para>
</refsection>
is assumed that (as least to some extent) you know what you are doing ;-)
You can implement them yourself if you wish.</para>
<para> The following conditions have to be met: + targets have to have the
<para>The following conditions have to be met: + targets have to have the
same size as output layer, i.e. size(target,'r') = N(size(N,'c')) + inputs
have to have the same size as input layer, i.e. size(input,'r') = N(1) +
all N(i) have to be positive integers of course (am I paranoid here ? :-)
what you would expect when initializing the weights. Warning: In some
particular cases this may lead to very subtle errors (e.g. your program
may even run without generating any Scilab errors but the results may be
meaningless). </para>
meaningless).</para>
<para>- The algorithms themselfs are not described here, there are many
books which describes them (e.g. get mine "Matrix ANN" wherever you may
<member><link linkend="ANN">ANN</link></member>
<member><link linkend="ANN_GEN">ANN_GEN</link></member>
<member><link linkend="ANN_FF_INT">ANN_FFT_INT</link></member>
</simplelist>
</refsection>
</refentry>
</refentry>
help/en_US/ann_FF_ConjugGrad.xml
7777
7878
7979
80
80
8181
82
82
8383
84
84
8585
86
86
8787
88
88
8989
9090
9191
<member><link linkend="ANN_GEN">ANN_GEN</link></member>
<member><link linkend="ANN_FF">ANN_FF</link></member>
<member><link linkend="ann_FF">ann_FF</link></member>
<member><link linkend="ANN_FF_init">ANN_FF_init</link></member>
<member><link linkend="ann_FF_init">ann_FF_init</link></member>
<member><link linkend="ANN_FF_run">ANN_FF_run</link></member>
<member><link linkend="ann_FF_run">ann_FF_run</link></member>
<member><link linkend="ANN_FF_grad_BP">ANN_FF_grad_BP</link></member>
<member><link linkend="ann_FF_grad_BP">ann_FF_grad_BP</link></member>
<member><link linkend="ANN_FF_VHess">ANN_FF_VHess</link></member>
<member><link linkend="ann_FF_VHess">ann_FF_VHess</link></member>
</simplelist>
</refsection>
</refentry>
help/en_US/ann_FF_Hess.xml
6767
6868
6969
70
70
7171
7272
7373
<link linkend="ANN_GEN">ANN_GEN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
</simplelist>
</refsection>
help/en_US/ann_FF_INT.xml
8181
8282
8383
84
84
8585
8686
8787
<link linkend="ANN">ANN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
</simplelist>
</refsection>
help/en_US/ann_FF_Jacobian.xml
5252
5353
5454
55
55
5656
5757
5858
<link linkend="ANN_GEN">ANN_GEN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
</simplelist>
</refsection>
help/en_US/ann_FF_Jacobian_BP.xml
5252
5353
5454
55
55
5656
5757
5858
<link linkend="ANN_GEN">ANN_GEN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
</simplelist>
</refsection>
help/en_US/ann_FF_Mom_online.xml
137137
138138
139139
140
140
141141
142142
143143
<link linkend="ANN_GEN">ANN_GEN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
<member>
<link linkend="ann_FF_init">ann_FF_init</link>
help/en_US/ann_FF_Mom_online_nb.xml
130130
131131
132132
133
133
134134
135135
136136
<link linkend="ANN_GEN">ANN_GEN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
<member>
<link linkend="ann_FF_init_nb">ann_FF_init_nb</link>
help/en_US/ann_FF_SSAB_online.xml
121121
122122
123123
124
124
125125
126126
127127
<link linkend="ANN_GEN">ANN_GEN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
<member>
<link linkend="ann_FF_init">ann_FF_init</link>
help/en_US/ann_FF_SSAB_online_nb.xml
125125
126126
127127
128
128
129129
130130
131131
132132
133
134
135
136133
137134
138135
<link linkend="ANN_GEN">ANN_GEN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
<member>
<link linkend="ann_FF_init_nb">ann_FF_init_nb</link>
</member>
<member>
<link linkend="ann_BP_run_nb">ann_BP_run_nb</link>
</member>
</simplelist>
</refsection>
</refentry>
help/en_US/ann_FF_Std_online.xml
9696
9797
9898
99
99
100100
101101
102102
<link linkend="ANN_GEN">ANN_GEN</link>
</member>
<member>
<link linkend=" ANN_FF"> ANN_FF</link>
<link linkend="ann_FF"> ann_FF</link>
</member>
<member>
<link linkend="ann_FF_init">ann_FF_init</link>
help/en_US/ann_FF_Std_online_nb.xml
9797
9898
9999
100
100
101101
102102
103103
<link linkend="ANN_GEN">ANN_GEN</link>
</member>
<member>
<link linkend=" ANN_FF"> ANN_FF</link>
<link linkend="ann_FF"> ann_FF</link>
</member>
<member>
<link linkend="ann_FF_init_nb">ann_FF_init_nb</link>
help/en_US/ann_FF_VHess.xml
6060
6161
6262
63
63
6464
6565
6666
<link linkend="ANN">ANN</link>
</member>
<member>
<link linkend=" ANN_FF"> ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
</simplelist>
</refsection>
help/en_US/ann_FF_grad.xml
7070
7171
7272
73
73
7474
7575
7676
<simplelist type="inline">
<member><link linkend="ANN">ANN</link></member>
<member><link linkend="ANN_FF">ANN_FF</link></member>
<member><link linkend="ann_FF">ann_FF</link></member>
</simplelist>
</refsection>
</refentry>
help/en_US/ann_FF_grad_BP.xml
7878
7979
8080
81
81
8282
83
83
8484
8585
8686
<simplelist type="inline">
<member><link linkend="ANN">ANN</link></member>
<member><link linkend="ANN_FF">ANN_FF</link></member>
<member><link linkend="ann_FF">ann_FF</link></member>
<member><link linkend="ANN_FF_init">ANN_FF_init</link></member>
<member><link linkend="ann_FF_init">ann_FF_init</link></member>
</simplelist>
</refsection>
</refentry>
help/en_US/ann_FF_grad_BP_nb.xml
7979
8080
8181
82
82
8383
8484
85
85
8686
8787
8888
<link linkend="ANN">ANN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
<member>
<link linkend="ANN_FF_init_nb">ANN_FF_init_nb</link>
<link linkend="ann_FF_init_nb">ann_FF_init_nb</link>
</member>
</simplelist>
</refsection>
help/en_US/ann_FF_grad_nb.xml
5959
6060
6161
62
62
6363
6464
6565
<link linkend="ANN">ANN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
</simplelist>
</refsection>
help/en_US/ann_FF_init.xml
4141
4242
4343
44
44
4545
4646
4747
......
5252
5353
5454
55
55
5656
5757
5858
<refsection>
<title>Description</title>
<para> This function builds the weight hypermatrix according to network
description N. Its format is detailed in ANN_FF.
description N. Its format is detailed in ann_FF.
</para>
</refsection>
<link linkend="ANN">ANN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
</simplelist>
</refsection>
help/en_US/ann_FF_init_nb.xml
3939
4040
4141
42
42
4343
4444
4545
......
5151
5252
5353
54
54
5555
5656
5757
<refsection>
<title>Description</title>
<para> This function builds the weight hypermatrix according to network
description N. The format of it is detailed in ANN_FF. This function is
description N. The format of it is detailed in ann_FF. This function is
to be used on networks without biases.
</para>
</refsection>
<link linkend="ANN">ANN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
</simplelist>
</refsection>
help/en_US/ann_FF_run.xml
4848
4949
5050
51
51
5252
5353
5454
<link linkend="ANN">ANN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
</simplelist>
</refsection>
help/en_US/ann_FF_run_nb.xml
5555
5656
5757
58
58
5959
6060
6161
<link linkend="ANN">ANN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
</simplelist>
</refsection>
help/en_US/ann_d_log_activ.xml
5353
5454
5555
56
56
5757
5858
5959
......
6262
6363
6464
65
65
6666
6767
6868
<title>COMPONENTS</title>
<para>General support functions See ANN_GEN for support functions for ANN.
Feedforward networks See ANN_FF for detailed description.</para>
Feedforward networks See ann_FF for detailed description.</para>
</refsection>
<refsection>
<simplelist type="inline">
<member><link linkend="ANN">ANN</link></member>
<member><link linkend="ANN_FF">ANN_FF</link></member>
<member><link linkend="ann_FF">ann_FF</link></member>
</simplelist>
</refsection>
</refentry>
help/en_US/ann_d_sum_of_sqr.xml
5858
5959
6060
61
61
6262
6363
6464
<simplelist type="inline">
<member><link linkend="ANN">ANN</link></member>
<member><link linkend="ANN_FF">ANN_FF</link></member>
<member><link linkend="ann_FF">ann_FF</link></member>
</simplelist>
</refsection>
</refentry>
help/en_US/ann_log_activ.xml
4343
4444
4545
46
46
4747
4848
4949
<link linkend="ANN">ANN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
</simplelist>
</refsection>
help/en_US/ann_pat_shuffle.xml
4343
4444
4545
46
46
4747
4848
4949
<link linkend="ANN">ANN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
</simplelist>
</refsection>
help/en_US/ann_sum_of_sqr.xml
4141
4242
4343
44
44
4545
4646
4747
<link linkend="ANN">ANN</link>
</member>
<member>
<link linkend="ANN_FF">ANN_FF</link>
<link linkend="ann_FF">ann_FF</link>
</member>
</simplelist>
</refsection>
macros/buildmacros.sce
44
55
66
7
7
8
// INRIA 2008
// ANN Toolbox
// ====================================================================
tbx_build_macros(TOOLBOX_NAME,get_absolute_file_path("buildmacros.sce"));
tbx_build_macros(TOOLBOX_NAME,get_absolute_file_path("buildmacros.sce"));
clear tbx_build_macros;
macros/cleanmacros.sce
11
22
3
4
3
54
6
5
6
77
8
9
10
11
12
13
14
8
9
10
11
1512
13
14
15
16
17
18
1619
// ====================================================================
// Allan CORNET
// DIGITEO 2009
// This file is released into the public domain
// DIGITEO 2009 - 2011
// ====================================================================
libpath = get_absolute_file_path('cleanmacros.sce');
function clean_macros()
libpath = get_absolute_file_path('cleanmacros.sce');
binfiles = ls(libpath+'/*.bin');
for i = 1:size(binfiles,'*')
mdelete(binfiles(i));
end
mdelete(libpath+'/names');
mdelete(libpath+'/lib');
binfiles = ls(libpath+'/*.bin');
for i = 1:size(binfiles,'*')
mdelete(binfiles(i));
end
mdelete(libpath+'/names');
mdelete(libpath+'/lib');
endfunction
// ====================================================================
clean_macros()
clear clean_macros;
// ====================================================================
readme.txt
1
1
22
33
44
55
66
77
8
89
910
1011
ANN Toolbox ver. 0.4.2.4 for Scilab 5.3
ANN Toolbox ver. 0.4.2.5 for Scilab 5.4
=======================================
This represents a toolbox for artificial neural networks,
based on my developments described in "Matrix ANN" book,
under development, if interested send me an email at
r.hristev@phys.canterbury.ac.nz
allan.cornet@scilab.org
Current feature:s
- Only layered feedforward networks are supported *directly* at the moment

Archive Download the corresponding diff file

Branches