-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathReadMe.cpp
More file actions
177 lines (144 loc) · 6.23 KB
/
ReadMe.cpp
File metadata and controls
177 lines (144 loc) · 6.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
#include <EasyFactorGraph/factor/Factor.h>
#include <EasyFactorGraph/factor/FactorExponential.h>
#include <EasyFactorGraph/io/TrainSetImport.h>
#include <EasyFactorGraph/io/json/Importer.h>
#include <EasyFactorGraph/io/xml/Importer.h>
#include <EasyFactorGraph/model/RandomField.h>
#include <EasyFactorGraph/trainable/ModelTrainer.h>
#include <TrainingTools/iterative/solvers/QuasiNewton.h>
using namespace EFG;
using namespace EFG::categoric;
using namespace EFG::factor;
using namespace EFG::model;
using namespace EFG::io;
using namespace EFG::train;
using namespace EFG::strct;
int main() {
{
// FACTORS CONSTRUCTION
// define a couple of variables, with the same size
VariablePtr A = make_variable(3, "A"); // size is 3
VariablePtr B = make_variable(3, "B"); // size is 3
// build a simply correlating factor connecting the 2 variables
Factor factor_AB(VariablesSoup{B, A}, // the order in the specified
// group matters: B is assumed
// as the first variable, A
// will be the second
Factor::SimplyCorrelatedTag{});
// build an exponential factor using as base `factor_AB`: values of the
// images are assumed as exp(weight * images_factor_AB)
FactorExponential factor_AB_exponential(
factor_AB,
1.5f // this will be the value assumed for the weight
);
// define another variable
VariablePtr C = make_variable(2, "C"); // size is 2
// define a factor connecting C to B
// we start building an empty factor, having all images equal to 0
Factor factor_BC(VariablesSoup{B, C});
// set some individual images of factor_BC
// set for <0,1> -> 2
factor_BC.set(std::vector<std::size_t>{0, 1}, 2.f);
// set for <2,0> -> 1.3f
factor_BC.set(std::vector<std::size_t>{2, 0}, 1.3f);
}
{
// MODELS CONSTRUCTION
// start building an empty random field
RandomField model;
// define some variables, which will be later connected
auto A = make_variable(4, "varA");
auto B = make_variable(4, "varB");
auto C = make_variable(4, "varC");
// without loss of generality, add to the model some simply correlating
// factors
model.addConstFactor(std::make_shared<Factor>(
VariablesSoup{A, B},
Factor::SimplyCorrelatedTag{})); // the generated smart
// pointer is shallow
// copied
model.copyConstFactor(
Factor{VariablesSoup{A, C},
Factor::SimplyCorrelatedTag{}}); // the passed factor is
// deep-copied into the
// model
// build some additional tunable exponential factors that will be too added
auto factor_exp_BC = std::make_shared<FactorExponential>(
Factor{VariablesSoup{B, C}, Factor::SimplyCorrelatedTag{}}, 1.f);
model.addTunableFactor(factor_exp_BC);
auto D = make_variable(4, "varD");
auto factor_exp_CD = std::make_shared<FactorExponential>(
Factor{VariablesSoup{C, D}, Factor::SimplyCorrelatedTag{}}, 1.5f);
model.addTunableFactor(factor_exp_CD);
// insert another tunable factor, this time specifying that it needs to
// share the weight with already inserted exponential factor that connects B
// and C
model.addTunableFactor(
std::make_shared<FactorExponential>(
Factor{VariablesSoup{C, D}, Factor::SimplyCorrelatedTag{}},
2.f // actually this value is irrelevant, as the weight of
// factor_exp_BC will be assumed from now on
),
VariablesSet{B, C}
// this additional input is to specify that this exponential factor
// needs to share the weight with the one connecting B and C
);
// absorb the structure defined in an xml file
xml::Importer::importFromFile(model, std::string{"file_name.xml"});
// absorb the structure encoded in a json string
nlohmann::json json_defining_a_structure = ...;
json::Importer::importFromJson(model, json_defining_a_structure);
}
{
// QUERY THE MODEL
RandomField model;
// set some evidences
model.setEvidence("variable_1", 0); // setting variable_1 = 0
model.setEvidence("variable_2", 2); // setting variable_2 = 2
// get the marginal conditioned distribution of an hidden variable
std::vector<float> conditioned_marginals =
model.getMarginalDistribution("var_A");
// get maxiomum a posteriori estimation of the entire hidden set
std::vector<std::size_t> MAP_hidden_set = model.getHiddenSetMAP();
// set some new evidences
model.removeAllEvidences();
model.setEvidence("evid_1", 1);
// compute new conditioned marginals: the should be different as the
// evidences were changed
conditioned_marginals = model.getMarginalDistribution("var_A");
}
{
// TUNE THE MODEL
RandomField tunable_model;
// assume we have a training set for the model stored in a file
TrainSet training_set = import_train_set("file_name.txt");
// we can train the model using one of the ready to use gradient based
// approaches
::train::QuasiNewton ready_to_use_trainer;
ready_to_use_trainer.setMaxIterations(50);
// some definitions to control the training process
TrainInfo info = TrainInfo{
4, // threads to use
1.f // stochasticity. When set different from 1, the stochastich
// gradient descend approaches are actually used
};
train_model(tunable_model, ready_to_use_trainer, training_set, info);
}
{
// GIBBS SAMPLING
RandomField model;
// some definitions to control the samples generation process
GibbsSampler::SamplesGenerationContext info =
GibbsSampler::SamplesGenerationContext{
1000, // samples number
0, // seed used by random engines
500 // number of iterations to discard at the beginning (burn out)
};
// get samples from the model using Gibbs sampler
std::vector<std::vector<std::size_t>> samples =
model.makeSamples(info,
4 // threads to use
);
}
return EXIT_SUCCESS;
}