-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathexample_cuda_inference.cpp
More file actions
36 lines (26 loc) · 1.32 KB
/
example_cuda_inference.cpp
File metadata and controls
36 lines (26 loc) · 1.32 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
//
// Created by paolo on 28/10/21.
//
#include <baylib/inference/cuda/likelihood_weighting_cuda.hpp>
#include <baylib/smile_utils/smile_utils.hpp>
#include <iostream>
/**
* Baylib implements several algorithms exploiting gpgpu parallelization.
*/
int main(int argc, char** argv){
using namespace baylib;
using namespace baylib::inference;
baylib::xdsl_parser parser;
// We use the Hailfinder network for this example
auto bn = parser.deserialize("../../examples/xdsl/Hailfinder2.5.xdsl");
// Gpu algorithms use montecarlo simulations to approximate inference results, all simulations are made
// simultaneously, for this reason we have to take into account memory usage
// For all gpu algorithms the first attribute will be the network, the second one will be the number of samples
// to be generated and the third one will be the amount of memory on the opencl device available
likelihood_weighting_cuda ls(bn, 10000);
// Gpu algorithms offer the same external interface as all other baylib algorithms
auto result = ls.make_inference();
// The main advantage to using this kind of parallelization is that for high number of samples the
// computation time raises very slowly in respect to classical algorithms (as long as enough memory is provided)
std::cout << result << '\n';
}