|
18 | 18 |
|
19 | 19 | namespace eicrecon { |
20 | 20 |
|
21 | | -static std::string print_shape(const std::vector<std::int64_t>& v) { |
22 | | - std::stringstream ss(""); |
23 | | - for (std::size_t i = 0; i < v.size() - 1; i++) { |
24 | | - ss << v[i] << " x "; |
25 | | - } |
26 | | - ss << v[v.size() - 1]; |
27 | | - return ss.str(); |
28 | | -} |
| 21 | +#ifndef UNITY_BUILD_UNIQUE_ID |
| 22 | +#define UNITY_BUILD_UNIQUE_ID |
| 23 | +#endif |
29 | 24 |
|
30 | | -static bool check_shape_consistency(const std::vector<std::int64_t>& shape1, |
31 | | - const std::vector<std::int64_t>& shape2) { |
32 | | - if (shape2.size() != shape1.size()) { |
33 | | - return false; |
34 | | - } |
35 | | - for (std::size_t ix = 0; ix < shape1.size(); ix++) { |
36 | | - if ((shape1[ix] != -1) && (shape2[ix] != -1) && (shape1[ix] != shape2[ix])) { |
37 | | - return false; |
| 25 | +namespace { |
| 26 | + namespace UNITY_BUILD_UNIQUE_ID { |
| 27 | + |
| 28 | + static std::string print_shape(const std::vector<std::int64_t>& v) { |
| 29 | + std::stringstream ss(""); |
| 30 | + for (std::size_t i = 0; i < v.size() - 1; i++) { |
| 31 | + ss << v[i] << " x "; |
| 32 | + } |
| 33 | + ss << v[v.size() - 1]; |
| 34 | + return ss.str(); |
38 | 35 | } |
39 | | - } |
40 | | - return true; |
41 | | -} |
42 | 36 |
|
43 | | -template <typename T> |
44 | | -static Ort::Value iters_to_tensor(typename std::vector<T>::const_iterator data_begin, |
45 | | - typename std::vector<T>::const_iterator data_end, |
46 | | - std::vector<int64_t>::const_iterator shape_begin, |
47 | | - std::vector<int64_t>::const_iterator shape_end) { |
48 | | - Ort::MemoryInfo mem_info = Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, |
49 | | - OrtMemType::OrtMemTypeDefault); |
50 | | - auto tensor = |
51 | | - Ort::Value::CreateTensor<T>(mem_info, const_cast<T*>(&*data_begin), data_end - data_begin, |
52 | | - &*shape_begin, shape_end - shape_begin); |
53 | | - return tensor; |
54 | | -} |
| 37 | + static bool check_shape_consistency(const std::vector<std::int64_t>& shape1, |
| 38 | + const std::vector<std::int64_t>& shape2) { |
| 39 | + if (shape2.size() != shape1.size()) { |
| 40 | + return false; |
| 41 | + } |
| 42 | + for (std::size_t ix = 0; ix < shape1.size(); ix++) { |
| 43 | + if ((shape1[ix] != -1) && (shape2[ix] != -1) && (shape1[ix] != shape2[ix])) { |
| 44 | + return false; |
| 45 | + } |
| 46 | + } |
| 47 | + return true; |
| 48 | + } |
| 49 | + |
| 50 | + template <typename T> |
| 51 | + static Ort::Value iters_to_tensor(typename std::vector<T>::const_iterator data_begin, |
| 52 | + typename std::vector<T>::const_iterator data_end, |
| 53 | + std::vector<int64_t>::const_iterator shape_begin, |
| 54 | + std::vector<int64_t>::const_iterator shape_end) { |
| 55 | + Ort::MemoryInfo mem_info = Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, |
| 56 | + OrtMemType::OrtMemTypeDefault); |
| 57 | + auto tensor = |
| 58 | + Ort::Value::CreateTensor<T>(mem_info, const_cast<T*>(&*data_begin), data_end - data_begin, |
| 59 | + &*shape_begin, shape_end - shape_begin); |
| 60 | + return tensor; |
| 61 | + } |
| 62 | + |
| 63 | + } // namespace UNITY_BUILD_UNIQUE_ID |
| 64 | +} // namespace |
55 | 65 |
|
56 | 66 | void ONNXInference::init() { |
| 67 | + using namespace UNITY_BUILD_UNIQUE_ID; |
| 68 | + |
57 | 69 | // onnxruntime setup |
58 | 70 | m_env = Ort::Env(ORT_LOGGING_LEVEL_WARNING, name().data()); |
59 | 71 | Ort::SessionOptions session_options; |
@@ -106,6 +118,8 @@ void ONNXInference::init() { |
106 | 118 | void ONNXInference::process(const ONNXInference::Input& input, |
107 | 119 | const ONNXInference::Output& output) const { |
108 | 120 |
|
| 121 | + using namespace UNITY_BUILD_UNIQUE_ID; |
| 122 | + |
109 | 123 | const auto [in_tensors] = input; |
110 | 124 | auto [out_tensors] = output; |
111 | 125 |
|
|
0 commit comments