Analysis Software
Documentation for sPHENIX simulation software
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
TorchMetricLearning.cpp
Go to the documentation of this file. Or view the newest version in sPHENIX GitHub for file TorchMetricLearning.cpp
1 // This file is part of the Acts project.
2 //
3 // Copyright (C) 2023 CERN for the benefit of the Acts project
4 //
5 // This Source Code Form is subject to the terms of the Mozilla Public
6 // License, v. 2.0. If a copy of the MPL was not distributed with this
7 // file, You can obtain one at http://mozilla.org/MPL/2.0/.
8 
10 
13 
14 #include <torch/script.h>
15 #include <torch/torch.h>
16 
17 #include "printCudaMemInfo.hpp"
18 
19 using namespace torch::indexing;
20 
21 namespace Acts {
22 
23 TorchMetricLearning::TorchMetricLearning(const Config &cfg,
24  std::unique_ptr<const Logger> _logger)
25  : m_logger(std::move(_logger)), m_cfg(cfg) {
26  c10::InferenceMode guard(true);
27  m_deviceType = torch::cuda::is_available() ? torch::kCUDA : torch::kCPU;
28  ACTS_DEBUG("Using torch version " << TORCH_VERSION_MAJOR << "."
29  << TORCH_VERSION_MINOR << "."
30  << TORCH_VERSION_PATCH);
31 #ifndef ACTS_EXATRKX_CPUONLY
32  if (not torch::cuda::is_available()) {
33  ACTS_INFO("CUDA not available, falling back to CPU");
34  }
35 #endif
36 
37  try {
38  m_model = std::make_unique<torch::jit::Module>();
39  *m_model = torch::jit::load(m_cfg.modelPath, m_deviceType);
40  m_model->eval();
41  } catch (const c10::Error &e) {
42  throw std::invalid_argument("Failed to load models: " + e.msg());
43  }
44 }
45 
47 
48 std::tuple<std::any, std::any> TorchMetricLearning::operator()(
49  std::vector<float> &inputValues, std::size_t numNodes, int deviceHint) {
50  ACTS_DEBUG("Start graph construction");
51  c10::InferenceMode guard(true);
52  const torch::Device device(m_deviceType, deviceHint);
53 
54  const int64_t numAllFeatures = inputValues.size() / numNodes;
55 
56  // printout the r,phi,z of the first spacepoint
57  ACTS_VERBOSE("First spacepoint information: " << [&]() {
58  std::stringstream ss;
59  for (int i = 0; i < numAllFeatures; ++i) {
60  ss << inputValues[i] << " ";
61  }
62  return ss.str();
63  }());
64  printCudaMemInfo(logger());
65 
66  auto inputTensor = detail::vectorToTensor2D(inputValues, numAllFeatures);
67 
68  // If we are on CPU, clone to get ownership (is this necessary?), else bring
69  // to device.
70  if (inputTensor.options().device() == device) {
71  inputTensor = inputTensor.clone();
72  } else {
73  inputTensor = inputTensor.to(device);
74  }
75 
76  // **********
77  // Embedding
78  // **********
79 
80  if (m_cfg.numFeatures > numAllFeatures) {
81  throw std::runtime_error("requested more features then available");
82  }
83 
84  // Clone models (solve memory leak? members can be const...)
85  auto model = m_model->clone();
86  model.to(device);
87 
88  std::vector<torch::jit::IValue> inputTensors;
89  inputTensors.push_back(
90  m_cfg.numFeatures < numAllFeatures
91  ? inputTensor.index({Slice{}, Slice{None, m_cfg.numFeatures}})
92  : std::move(inputTensor));
93 
94  ACTS_DEBUG("embedding input tensor shape "
95  << inputTensors[0].toTensor().size(0) << ", "
96  << inputTensors[0].toTensor().size(1));
97 
98  auto output = model.forward(inputTensors).toTensor();
99 
100  ACTS_VERBOSE("Embedding space of the first SP:\n"
101  << output.slice(/*dim=*/0, /*start=*/0, /*end=*/1));
102  printCudaMemInfo(logger());
103 
104  // ****************
105  // Building Edges
106  // ****************
107 
108  auto edgeList = detail::buildEdges(output, m_cfg.rVal, m_cfg.knnVal,
109  m_cfg.shuffleDirections);
110 
111  ACTS_VERBOSE("Shape of built edges: (" << edgeList.size(0) << ", "
112  << edgeList.size(1));
113  ACTS_VERBOSE("Slice of edgelist:\n" << edgeList.slice(1, 0, 5));
114  printCudaMemInfo(logger());
115 
116  return {std::move(inputTensors[0]).toTensor(), std::move(edgeList)};
117 }
118 } // namespace Acts