LArSoft  v09_90_00
Liquid Argon Software toolkit - https://larsoft.org/
NuGraphInference_module.cc
Go to the documentation of this file.
1 // Class: NuGraphInference
3 // Plugin Type: producer (Unknown Unknown)
4 // File: NuGraphInference_module.cc
5 //
6 // Generated at Tue Nov 14 14:41:30 2023 by Giuseppe Cerati using cetskelgen
7 // from version .
9 
18 #include "fhiclcpp/ParameterSet.h"
20 
21 #include <array>
22 #include <limits>
23 #include <memory>
24 
25 #include "delaunator.hpp"
26 #include <torch/script.h>
27 
31 #include "lardataobj/RecoBase/Vertex.h" //this creates a conflict with torch script if included before it...
32 
33 class NuGraphInference;
34 
37 using recob::Hit;
38 using recob::SpacePoint;
39 using std::array;
40 using std::vector;
41 
42 namespace {
43  template <typename T, typename A>
44  int arg_max(std::vector<T, A> const& vec)
45  {
46  return static_cast<int>(std::distance(vec.begin(), max_element(vec.begin(), vec.end())));
47  }
48 
49  template <typename T, size_t N>
50  void softmax(std::array<T, N>& arr)
51  {
52  T m = -std::numeric_limits<T>::max();
53  for (size_t i = 0; i < arr.size(); i++) {
54  if (arr[i] > m) { m = arr[i]; }
55  }
56  T sum = 0.0;
57  for (size_t i = 0; i < arr.size(); i++) {
58  sum += expf(arr[i] - m);
59  }
60  T offset = m + logf(sum);
61  for (size_t i = 0; i < arr.size(); i++) {
62  arr[i] = expf(arr[i] - offset);
63  }
64  return;
65  }
66 }
67 
69 public:
70  explicit NuGraphInference(fhicl::ParameterSet const& p);
71 
72  // Plugins should not be copied or assigned.
73  NuGraphInference(NuGraphInference const&) = delete;
77 
78  // Required functions.
79  void produce(art::Event& e) override;
80 
81 private:
82  vector<std::string> planes;
85  size_t minHits;
86  bool debug;
87  vector<vector<float>> avgs;
88  vector<vector<float>> devs;
92  torch::jit::script::Module model;
93 };
94 
96  : EDProducer{p}
97  , planes(p.get<vector<std::string>>("planes"))
98  , hitInput(p.get<art::InputTag>("hitInput"))
99  , spsInput(p.get<art::InputTag>("spsInput"))
100  , minHits(p.get<size_t>("minHits"))
101  , debug(p.get<bool>("debug"))
102  , filterDecoder(p.get<bool>("filterDecoder"))
103  , semanticDecoder(p.get<bool>("semanticDecoder"))
104  , vertexDecoder(p.get<bool>("vertexDecoder"))
105 {
106 
107  for (size_t ip = 0; ip < planes.size(); ++ip) {
108  avgs.push_back(p.get<vector<float>>("avgs_" + planes[ip]));
109  devs.push_back(p.get<vector<float>>("devs_" + planes[ip]));
110  }
111 
112  if (filterDecoder) { produces<vector<FeatureVector<1>>>("filter"); }
113  //
114  if (semanticDecoder) {
115  produces<vector<FeatureVector<5>>>("semantic");
116  produces<MVADescription<5>>("semantic");
117  }
118  //
119  if (vertexDecoder) { produces<vector<recob::Vertex>>("vertex"); }
120 
121  cet::search_path sp("FW_SEARCH_PATH");
122  model = torch::jit::load(sp.find_file(p.get<std::string>("modelFileName")));
123 }
124 
126 {
127 
128  art::Handle<vector<Hit>> hitListHandle;
129  vector<art::Ptr<Hit>> hitlist;
130  if (e.getByLabel(hitInput, hitListHandle)) { art::fill_ptr_vector(hitlist, hitListHandle); }
131 
132  std::unique_ptr<vector<FeatureVector<1>>> filtcol(
133  new vector<FeatureVector<1>>(hitlist.size(), FeatureVector<1>(std::array<float, 1>({-1.}))));
134 
135  std::unique_ptr<vector<FeatureVector<5>>> semtcol(new vector<FeatureVector<5>>(
136  hitlist.size(), FeatureVector<5>(std::array<float, 5>({-1., -1., -1., -1., -1.}))));
137  std::unique_ptr<MVADescription<5>> semtdes(
138  new MVADescription<5>(hitListHandle.provenance()->moduleLabel(),
139  "semantic",
140  {"MIP", "HIP", "shower", "michel", "diffuse"}));
141 
142  std::unique_ptr<vector<recob::Vertex>> vertcol(new vector<recob::Vertex>());
143 
144  if (debug) std::cout << "Hits size=" << hitlist.size() << std::endl;
145  if (hitlist.size() < minHits) {
146  if (filterDecoder) { e.put(std::move(filtcol), "filter"); }
147  if (semanticDecoder) {
148  e.put(std::move(semtcol), "semantic");
149  e.put(std::move(semtdes), "semantic");
150  }
151  if (vertexDecoder) { e.put(std::move(vertcol), "vertex"); }
152  return;
153  }
154 
155  vector<vector<float>> nodeft_bare(planes.size(), vector<float>());
156  vector<vector<float>> nodeft(planes.size(), vector<float>());
157  vector<vector<double>> coords(planes.size(), vector<double>());
158  vector<vector<size_t>> idsmap(planes.size(), vector<size_t>());
159  vector<size_t> idsmapRev(hitlist.size(), hitlist.size());
160  for (auto h : hitlist) {
161  idsmap[h->View()].push_back(h.key());
162  idsmapRev[h.key()] = idsmap[h->View()].size() - 1;
163  coords[h->View()].push_back(h->PeakTime() * 0.055);
164  coords[h->View()].push_back(h->WireID().Wire * 0.3);
165  nodeft[h->View()].push_back((h->WireID().Wire * 0.3 - avgs[h->View()][0]) / devs[h->View()][0]);
166  nodeft[h->View()].push_back((h->PeakTime() * 0.055 - avgs[h->View()][1]) / devs[h->View()][1]);
167  nodeft[h->View()].push_back((h->Integral() - avgs[h->View()][2]) / devs[h->View()][2]);
168  nodeft[h->View()].push_back((h->RMS() - avgs[h->View()][3]) / devs[h->View()][3]);
169  nodeft_bare[h->View()].push_back(h->WireID().Wire * 0.3);
170  nodeft_bare[h->View()].push_back(h->PeakTime() * 0.055);
171  nodeft_bare[h->View()].push_back(h->Integral());
172  nodeft_bare[h->View()].push_back(h->RMS());
173  }
174 
175  struct Edge {
176  size_t n1;
177  size_t n2;
178  bool operator==(const Edge& other) const
179  {
180  if (this->n1 == other.n1 && this->n2 == other.n2)
181  return true;
182  else
183  return false;
184  };
185  };
186  vector<vector<Edge>> edge2d(planes.size(), vector<Edge>());
187  for (size_t p = 0; p < planes.size(); p++) {
188  if (debug) std::cout << "Plane " << p << " has N hits=" << coords[p].size() / 2 << std::endl;
189  if (coords[p].size() / 2 < 3) continue;
190  delaunator::Delaunator d(coords[p]);
191  if (debug) std::cout << "Found N triangles=" << d.triangles.size() / 3 << std::endl;
192  for (std::size_t i = 0; i < d.triangles.size(); i += 3) {
193  //create edges in both directions
194  Edge e;
195  e.n1 = d.triangles[i];
196  e.n2 = d.triangles[i + 1];
197  edge2d[p].push_back(e);
198  e.n1 = d.triangles[i + 1];
199  e.n2 = d.triangles[i];
200  edge2d[p].push_back(e);
201  //
202  e.n1 = d.triangles[i];
203  e.n2 = d.triangles[i + 2];
204  edge2d[p].push_back(e);
205  e.n1 = d.triangles[i + 2];
206  e.n2 = d.triangles[i];
207  edge2d[p].push_back(e);
208  //
209  e.n1 = d.triangles[i + 1];
210  e.n2 = d.triangles[i + 2];
211  edge2d[p].push_back(e);
212  e.n1 = d.triangles[i + 2];
213  e.n2 = d.triangles[i + 1];
214  edge2d[p].push_back(e);
215  //
216  }
217  //sort and cleanup duplicate edges
218  std::sort(edge2d[p].begin(), edge2d[p].end(), [](const auto& i, const auto& j) {
219  return (i.n1 != j.n1 ? i.n1 < j.n1 : i.n2 < j.n2);
220  });
221  if (debug) {
222  for (auto& e : edge2d[p]) {
223  std::cout << "sorted plane=" << p << " e1=" << e.n1 << " e2=" << e.n2 << std::endl;
224  }
225  }
226  edge2d[p].erase(std::unique(edge2d[p].begin(), edge2d[p].end()), edge2d[p].end());
227  }
228 
229  if (debug) {
230  for (size_t p = 0; p < planes.size(); p++) {
231  for (auto& e : edge2d[p]) {
232  std::cout << " plane=" << p << " e1=" << e.n1 << " e2=" << e.n2 << std::endl;
233  }
234  }
235  }
236 
237  // Get spacepoints from the event record
238  art::Handle<vector<SpacePoint>> spListHandle;
239  vector<art::Ptr<SpacePoint>> splist;
240  if (e.getByLabel(spsInput, spListHandle)) { art::fill_ptr_vector(splist, spListHandle); }
241  // Get assocations from spacepoints to hits
242  vector<vector<art::Ptr<Hit>>> sp2Hit(splist.size());
243  if (splist.size() > 0) {
244  art::FindManyP<Hit> fmp(spListHandle, e, "sps");
245  for (size_t spIdx = 0; spIdx < sp2Hit.size(); ++spIdx) {
246  sp2Hit[spIdx] = fmp.at(spIdx);
247  }
248  }
249 
250  //Edges are the same as in pyg, but order is not identical.
251  //It should not matter but better verify that output is indeed the same.
252  vector<vector<Edge>> edge3d(planes.size(), vector<Edge>());
253  for (size_t i = 0; i < splist.size(); ++i) {
254  for (size_t j = 0; j < sp2Hit[i].size(); ++j) {
255  Edge e;
256  e.n1 = idsmapRev[sp2Hit[i][j].key()];
257  e.n2 = i;
258  edge3d[sp2Hit[i][j]->View()].push_back(e);
259  }
260  }
261 
262  auto x = torch::Dict<std::string, torch::Tensor>();
263  auto batch = torch::Dict<std::string, torch::Tensor>();
264  for (size_t p = 0; p < planes.size(); p++) {
265  long int dim = nodeft[p].size() / 4;
266  torch::Tensor ix = torch::zeros({dim, 4}, torch::dtype(torch::kFloat32));
267  if (debug) {
268  std::cout << "plane=" << p << std::endl;
269  std::cout << std::fixed;
270  std::cout << std::setprecision(4);
271  std::cout << "before, plane=" << planes[p] << std::endl;
272  for (size_t n = 0; n < nodeft_bare[p].size(); n = n + 4) {
273  std::cout << nodeft_bare[p][n] << " " << nodeft_bare[p][n + 1] << " "
274  << nodeft_bare[p][n + 2] << " " << nodeft_bare[p][n + 3] << " " << std::endl;
275  }
276  std::cout << std::scientific;
277  std::cout << "after, plane=" << planes[p] << std::endl;
278  for (size_t n = 0; n < nodeft[p].size(); n = n + 4) {
279  std::cout << nodeft[p][n] << " " << nodeft[p][n + 1] << " " << nodeft[p][n + 2] << " "
280  << nodeft[p][n + 3] << " " << std::endl;
281  }
282  }
283  for (size_t n = 0; n < nodeft[p].size(); n = n + 4) {
284  ix[n / 4][0] = nodeft[p][n];
285  ix[n / 4][1] = nodeft[p][n + 1];
286  ix[n / 4][2] = nodeft[p][n + 2];
287  ix[n / 4][3] = nodeft[p][n + 3];
288  }
289  x.insert(planes[p], ix);
290  torch::Tensor ib = torch::zeros({dim}, torch::dtype(torch::kInt64));
291  batch.insert(planes[p], ib);
292  }
293 
294  auto edge_index_plane = torch::Dict<std::string, torch::Tensor>();
295  for (size_t p = 0; p < planes.size(); p++) {
296  long int dim = edge2d[p].size();
297  torch::Tensor ix = torch::zeros({2, dim}, torch::dtype(torch::kInt64));
298  for (size_t n = 0; n < edge2d[p].size(); n++) {
299  ix[0][n] = int(edge2d[p][n].n1);
300  ix[1][n] = int(edge2d[p][n].n2);
301  }
302  edge_index_plane.insert(planes[p], ix);
303  if (debug) {
304  std::cout << "plane=" << p << std::endl;
305  std::cout << "2d edge size=" << edge2d[p].size() << std::endl;
306  for (size_t n = 0; n < edge2d[p].size(); n++) {
307  std::cout << edge2d[p][n].n1 << " ";
308  }
309  std::cout << std::endl;
310  for (size_t n = 0; n < edge2d[p].size(); n++) {
311  std::cout << edge2d[p][n].n2 << " ";
312  }
313  std::cout << std::endl;
314  }
315  }
316 
317  auto edge_index_nexus = torch::Dict<std::string, torch::Tensor>();
318  for (size_t p = 0; p < planes.size(); p++) {
319  long int dim = edge3d[p].size();
320  torch::Tensor ix = torch::zeros({2, dim}, torch::dtype(torch::kInt64));
321  for (size_t n = 0; n < edge3d[p].size(); n++) {
322  ix[0][n] = int(edge3d[p][n].n1);
323  ix[1][n] = int(edge3d[p][n].n2);
324  }
325  edge_index_nexus.insert(planes[p], ix);
326  if (debug) {
327  std::cout << "plane=" << p << std::endl;
328  std::cout << "3d edge size=" << edge3d[p].size() << std::endl;
329  for (size_t n = 0; n < edge3d[p].size(); n++) {
330  std::cout << edge3d[p][n].n1 << " ";
331  }
332  std::cout << std::endl;
333  for (size_t n = 0; n < edge3d[p].size(); n++) {
334  std::cout << edge3d[p][n].n2 << " ";
335  }
336  std::cout << std::endl;
337  }
338  }
339 
340  long int spdim = splist.size();
341  auto nexus = torch::empty({spdim, 0}, torch::dtype(torch::kFloat32));
342 
343  std::vector<torch::jit::IValue> inputs;
344  inputs.push_back(x);
345  inputs.push_back(edge_index_plane);
346  inputs.push_back(edge_index_nexus);
347  inputs.push_back(nexus);
348  inputs.push_back(batch);
349  if (debug) std::cout << "FORWARD!" << std::endl;
350  auto outputs = model.forward(inputs).toGenericDict();
351  if (debug) std::cout << "output =" << outputs << std::endl;
352  if (semanticDecoder) {
353  for (size_t p = 0; p < planes.size(); p++) {
354  torch::Tensor s = outputs.at("x_semantic").toGenericDict().at(planes[p]).toTensor();
355  for (int i = 0; i < s.sizes()[0]; ++i) {
356  size_t idx = idsmap[p][i];
357  std::array<float, 5> input({s[i][0].item<float>(),
358  s[i][1].item<float>(),
359  s[i][2].item<float>(),
360  s[i][3].item<float>(),
361  s[i][4].item<float>()});
362  softmax(input);
363  FeatureVector<5> semt = FeatureVector<5>(input);
364  (*semtcol)[idx] = semt;
365  }
366  if (debug) {
367  for (int j = 0; j < 5; j++) {
368  std::cout << "x_semantic category=" << j << " : ";
369  for (size_t p = 0; p < planes.size(); p++) {
370  torch::Tensor s = outputs.at("x_semantic").toGenericDict().at(planes[p]).toTensor();
371  for (int i = 0; i < s.sizes()[0]; ++i)
372  std::cout << s[i][j].item<float>() << ", ";
373  }
374  std::cout << std::endl;
375  }
376  }
377  }
378  }
379  if (filterDecoder) {
380  for (size_t p = 0; p < planes.size(); p++) {
381  torch::Tensor f = outputs.at("x_filter").toGenericDict().at(planes[p]).toTensor();
382  for (int i = 0; i < f.numel(); ++i) {
383  size_t idx = idsmap[p][i];
384  std::array<float, 1> input({f[i].item<float>()});
385  (*filtcol)[idx] = FeatureVector<1>(input);
386  }
387  }
388  if (debug) {
389  std::cout << "x_filter : ";
390  for (size_t p = 0; p < planes.size(); p++) {
391  torch::Tensor f = outputs.at("x_filter").toGenericDict().at(planes[p]).toTensor();
392  for (int i = 0; i < f.numel(); ++i)
393  std::cout << f[i].item<float>() << ", ";
394  }
395  std::cout << std::endl;
396  }
397  }
398  if (vertexDecoder) {
399  torch::Tensor v = outputs.at("x_vtx").toGenericDict().at("evt").toTensor()[0];
400  double vpos[3];
401  vpos[0] = v[0].item<float>();
402  vpos[1] = v[1].item<float>();
403  vpos[2] = v[2].item<float>();
404  vertcol->push_back(recob::Vertex(vpos));
405  }
406 
407  if (filterDecoder) { e.put(std::move(filtcol), "filter"); }
408  if (semanticDecoder) {
409  e.put(std::move(semtcol), "semantic");
410  e.put(std::move(semtdes), "semantic");
411  }
412  if (vertexDecoder) { e.put(std::move(vertcol), "vertex"); }
413 }
414 
Float_t x
Definition: compare.C:6
NuGraphInference & operator=(NuGraphInference const &)=delete
torch::jit::script::Module model
Declaration of signal hit object.
EDProducer(fhicl::ParameterSet const &pset)
Definition: EDProducer.cc:6
vector< std::string > planes
boost::graph_traits< ModuleGraph >::edge_descriptor Edge
Definition: ModuleGraph.h:24
void produce(art::Event &e) override
Definition of vertex object for LArSoft.
Definition: Vertex.h:35
TFile f
Definition: plotHisto.C:6
PutHandle< PROD > put(std::unique_ptr< PROD > &&edp, std::string const &instance={})
Definition: Event.h:77
decltype(auto) constexpr end(T &&obj)
ADL-aware version of std::end.
Definition: StdUtils.h:77
decltype(auto) constexpr size(T &&obj)
ADL-aware version of std::size.
Definition: StdUtils.h:101
auto vector(Vector const &v)
Returns a manipulator which will print the specified array.
Definition: DumpUtils.h:289
auto array(Array const &a)
Returns a manipulator which will print the specified array.
Definition: DumpUtils.h:250
#define DEFINE_ART_MODULE(klass)
Definition: ModuleMacros.h:65
Provenance const * provenance() const
Definition: Handle.h:217
vector< vector< float > > devs
Float_t d
Definition: plot.C:235
vector< vector< float > > avgs
bool getByLabel(std::string const &label, std::string const &instance, Handle< PROD > &result) const
float at(size_t index) const
Definition: MVAOutput.h:94
Char_t n[5]
decltype(auto) constexpr begin(T &&obj)
ADL-aware version of std::begin.
Definition: StdUtils.h:69
NuGraphInference(fhicl::ParameterSet const &p)
2D representation of charge deposited in the TDC/wire plane
Definition: Hit.h:46
void fill_ptr_vector(std::vector< Ptr< T >> &ptrs, H const &h)
Definition: Ptr.h:306
Double_t sum
Definition: plot.C:31
Float_t e
Definition: plot.C:35
bool operator==(infinite_endcount_iterator< T > const &, count_iterator< T > const &)
Definition: counter.h:278
decltype(auto) constexpr empty(T &&obj)
ADL-aware version of std::empty.
Definition: StdUtils.h:109