Skip to content

Commit

Permalink
[tmva][sofie-gnn] Vectorization in SOFIE-GraphIndependent
Browse files Browse the repository at this point in the history
  • Loading branch information
sanjibansg committed Sep 1, 2023
1 parent c21af28 commit 071b0d8
Show file tree
Hide file tree
Showing 7 changed files with 870 additions and 874 deletions.
4 changes: 4 additions & 0 deletions bindings/pyroot/pythonizations/test/sofie_gnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@

import numpy as np
from numpy.testing import assert_almost_equal

if np.__version__ > "1.19":
raise RuntimeError(f"This test requires NumPy version 1.19 or lower")

import graph_nets as gn
from graph_nets import utils_tf
import sonnet as snt
Expand Down
239 changes: 105 additions & 134 deletions tmva/sofie/inc/TMVA/RModel.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -4,149 +4,120 @@
#include "TMVA/RModel_Base.hxx"
#include "TMVA/SOFIE_common.hxx"
#include "TMVA/ROperator.hxx"
#include "TBuffer.h"

namespace TMVA{
namespace Experimental{
namespace SOFIE{

<<<<<<< HEAD
enum class Options {
kDefault = 0x0,
kNoSession = 0x1,
kNoWeightFile = 0x2,
kRootBinaryWeightFile = 0x4
kGNN = 0x8,
kGNNComponent = 0x16,
};

std::underlying_type_t<Options> operator|(Options opA, Options opB);
std::underlying_type_t<Options> operator|(std::underlying_type_t<Options> opA, Options opB);

enum class WeightFileType {None, RootBinary, Text};
namespace TMVA {
namespace Experimental {
namespace SOFIE {

class RModel: public TObject{
=======
class RModel: public RModel_Base{
>>>>>>> 3b618fbcad (feat: modify RModel architecture)
class RModel: public RModel_Base {

private:
WeightFileType fWeightFile = WeightFileType::Text;

std::unordered_map<std::string, InputTensorInfo> fInputTensorInfos; //graph input only; not including operator input (intermediate tensors)
std::unordered_map<std::string, TensorInfo> fReadyInputTensorInfos;
std::unordered_map<std::string, InitializedTensor> fInitializedTensors;
std::unordered_map<std::string, TensorInfo> fIntermediateTensorInfos;
std::vector<std::string> fOutputTensorNames;
std::vector<std::string> fInputTensorNames; //input tensor names using ONNX order
std::unordered_map<std::string, InputTensorInfo> fInputTensorInfos; //graph input only; not including operator input (intermediate tensors)
std::unordered_map<std::string, TensorInfo> fReadyInputTensorInfos;
std::unordered_map<std::string, InitializedTensor> fInitializedTensors;
std::unordered_map<std::string, TensorInfo> fIntermediateTensorInfos;
std::vector<std::string> fOutputTensorNames;
std::vector<std::string> fInputTensorNames; //input tensor names using ONNX order

std::vector<std::unique_ptr<ROperator>> fOperators;
std::vector<std::unique_ptr<ROperator>> fOperators;

public:

//explicit move ctor/assn
RModel(RModel&& other);

RModel& operator=(RModel&& other);

//disallow copy
RModel(const RModel& other) = delete;
RModel& operator=(const RModel& other) = delete;

RModel(){}
RModel(std::string name, std::string parsedtime): RModel_Base(name, parsedtime){}

// For GNN Functions usage
RModel(std::string function_name):RModel_Base(function_name){}

const std::vector<size_t>& GetTensorShape(std::string name);
const ETensorType& GetTensorType(std::string name);

bool CheckIfTensorAlreadyExist(std::string tensor_name);
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<Dim> shape);
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<size_t> shape);
void AddOperator(std::unique_ptr<ROperator> op, int order_execution = -1);
void AddOperatorReference(ROperator* op, int order_execution = -1){
std::unique_ptr<ROperator> tmp(op);
AddOperator(std::move(tmp), order_execution);
}
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data);

template <typename T>
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, T* raw_data){
int size=1;
for(auto item:shape){
size*=(int)item;
}
std::shared_ptr<void> data(malloc(size * sizeof(T)), free);
std::memcpy(data.get(), raw_data, size * sizeof(T));
AddInitializedTensor(tensor_name, type, shape, data);
}

// Check if a tensor is initialized
bool IsInitializedTensor(const std::string& name) const;
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape);

void AddInputTensorName(std::string name);
void AddOutputTensorNameList(std::vector<std::string> outputtensornames);
void UpdateOutputTensorList(std::vector<std::string> curr_output_tensor, std::vector<std::string> modify_output_tensor);
void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data);
std::shared_ptr<void> GetInitializedTensorData(std::string tensor_name);


void Initialize(int batchSize=1);
void GenerateInitializedTensorInfo();
void GenerateIntermediateTensorInfo();
void GenerateOutput();
void Generate(std::underlying_type_t<Options> options, int batchSize = 1, long pos = 0);
void Generate(Options options = Options::kDefault, int batchSize = 1, int pos = 0) {
Generate(static_cast<std::underlying_type_t<Options>>(options), batchSize, pos);
}

void ReadInitializedTensorsFromFile(long);
long WriteInitializedTensorsToFile(std::string filename = "");

<<<<<<< HEAD
void PrintGenerated() {
std::cout << fGC;
}

std::string ReturnGenerated(){
return fGC;
}
=======
>>>>>>> 3b618fbcad (feat: modify RModel architecture)
void PrintIntermediateTensors();
void OutputGenerated(std::string filename = "", bool append = false);
std::vector<std::string> GetOutputTensorNames(){
return fOutputTensorNames;
}
void SetFilename(std::string filename){
fName = filename;
}

/*
template <typename T>
void AddInitializedTensor(std::string tensor_name, RTensor<T> new_tensor){
//a view only
T obj;
if (fInitializedTensors.find(tensor_name) != fInitializedTensors.end()){
throw std::runtime_error("TMVA-SOFIE: initialized tensor with name " + tensor_name + " already exists \n");
}
InitializedTensor new_tensor_ {GetTemplatedType(obj), new_tensor.GetShape() , static_cast<void>(new_tensor.GetData())};
fInitializedTensors[tensor_name] = new_tensor_;
}
*/

void PrintRequiredInputTensors();
void PrintInitializedTensors();
void HeadInitializedTensors(std::string name, int n_print = 50);

bool UseSession() const { return fUseSession;}

~RModel() {}

ClassDef(RModel,1);
//explicit move ctor/assn
RModel(RModel&& other);

RModel& operator=(RModel&& other);

//disallow copy
RModel(const RModel& other) = delete;
RModel& operator=(const RModel& other) = delete;

RModel() {}
RModel(std::string name, std::string parsedtime): RModel_Base(name, parsedtime) {}

// For GNN Functions usage
RModel(std::string function_name):RModel_Base(function_name) {}

const std::vector<size_t>& GetTensorShape(std::string name);
const ETensorType& GetTensorType(std::string name);

bool CheckIfTensorAlreadyExist(std::string tensor_name);
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<Dim> shape);
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<size_t> shape);
void AddOperator(std::unique_ptr<ROperator> op, int order_execution = -1);
void AddOperatorReference(ROperator* op, int order_execution = -1) {
std::unique_ptr<ROperator> tmp(op);
AddOperator(std::move(tmp), order_execution);
}
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data);

template <typename T>
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, T* raw_data) {
int size=1;
for(auto item:shape) {
size*=(int)item;
}
std::shared_ptr<void> data(malloc(size * sizeof(T)), free);
std::memcpy(data.get(), raw_data, size * sizeof(T));
AddInitializedTensor(tensor_name, type, shape, data);
}

// Check if a tensor is initialized
bool IsInitializedTensor(const std::string& name) const;
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape);

void AddInputTensorName(std::string name);
void AddOutputTensorNameList(std::vector<std::string> outputtensornames);
void UpdateOutputTensorList(std::vector<std::string> curr_output_tensor, std::vector<std::string> modify_output_tensor);
void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data);
std::shared_ptr<void> GetInitializedTensorData(std::string tensor_name);

void Initialize(int batchSize=1);
void GenerateInitializedTensorInfo();
void GenerateIntermediateTensorInfo();
void GenerateOutput();
void Generate(std::underlying_type_t<Options> options, int batchSize = 1, long pos = 0);
void Generate(Options options = Options::kDefault, int batchSize = 1, int pos = 0) {
Generate(static_cast<std::underlying_type_t<Options>>(options), batchSize, pos);
}

void ReadInitializedTensorsFromFile(long);
long WriteInitializedTensorsToFile(std::string filename = "");

void PrintIntermediateTensors();
void OutputGenerated(std::string filename = "", bool append = false);
std::vector<std::string> GetOutputTensorNames() {
return fOutputTensorNames;
}
void SetFilename(std::string filename) {
fName = filename;
}

/*
template <typename T>
void AddInitializedTensor(std::string tensor_name, RTensor<T> new_tensor){
//a view only
T obj;
if (fInitializedTensors.find(tensor_name) != fInitializedTensors.end()){
throw std::runtime_error("TMVA-SOFIE: initialized tensor with name " + tensor_name + " already exists \n");
}
InitializedTensor new_tensor_ {GetTemplatedType(obj), new_tensor.GetShape() , static_cast<void>(new_tensor.GetData())};
fInitializedTensors[tensor_name] = new_tensor_;
}
*/

void PrintRequiredInputTensors();
void PrintInitializedTensors();
void HeadInitializedTensors(std::string name, int n_print = 50);

bool UseSession() const {
return fUseSession;
}

~RModel() {}

ClassDef(RModel,1);
};

}//SOFIE
Expand Down

0 comments on commit 071b0d8

Please sign in to comment.