Apollo 10.0
自动驾驶开放平台
apollo::prediction::SemanticLstmPedestrianCpuTorch类 参考

#include <semantic_lstm_pedestrian_torch_model.h>

类 apollo::prediction::SemanticLstmPedestrianCpuTorch 继承关系图:
apollo::prediction::SemanticLstmPedestrianCpuTorch 的协作图:

Public 成员函数

 SemanticLstmPedestrianCpuTorch ()
 
 ~SemanticLstmPedestrianCpuTorch ()
 
virtual bool Init ()
 parse model description class and load the model
 
virtual bool Inference (const std::vector< void * > &input_buffer, unsigned int input_size, std::vector< void * > *output_buffer, unsigned int output_size)
 performing network inference
 
virtual bool LoadModel ()
 load the model from file
 
virtual void Destory ()
 free all memory requested, gpu or cpu
 
- Public 成员函数 继承自 apollo::prediction::ModelBase
 ModelBase ()
 
 ~ModelBase ()
 

额外继承的成员函数

- Public 属性 继承自 apollo::prediction::ModelBase
std::string model_path_
 
uint8_t init_ = 0
 

详细描述

在文件 semantic_lstm_pedestrian_torch_model.h30 行定义.

构造及析构函数说明

◆ SemanticLstmPedestrianCpuTorch()

apollo::prediction::SemanticLstmPedestrianCpuTorch::SemanticLstmPedestrianCpuTorch ( )
inline

在文件 semantic_lstm_pedestrian_torch_model.h32 行定义.

32{}

◆ ~SemanticLstmPedestrianCpuTorch()

apollo::prediction::SemanticLstmPedestrianCpuTorch::~SemanticLstmPedestrianCpuTorch ( )
inline

在文件 semantic_lstm_pedestrian_torch_model.h33 行定义.

33{Destory();}
virtual void Destory()
free all memory requested, gpu or cpu

成员函数说明

◆ Destory()

void apollo::prediction::SemanticLstmPedestrianCpuTorch::Destory ( )
virtual

free all memory requested, gpu or cpu

返回
memory release result, true for success

实现了 apollo::prediction::ModelBase.

在文件 semantic_lstm_pedestrian_torch_model.cc116 行定义.

116{}

◆ Inference()

bool apollo::prediction::SemanticLstmPedestrianCpuTorch::Inference ( const std::vector< void * > &  input_buffer,
unsigned int  input_size,
std::vector< void * > *  output_buffer,
unsigned int  output_size 
)
virtual

performing network inference

参数
input_buffervector of input tensor
input_sizesize of input_buffer
output_buffervector of output tensor
output_sizesize of output_buffer
返回
init result, true for success

实现了 apollo::prediction::ModelBase.

在文件 semantic_lstm_pedestrian_torch_model.cc82 行定义.

84 {
85 ACHECK(input_size == input_buffer.size() && input_size == 3);
86 ACHECK(output_size == output_buffer->size() && output_size == 1);
87
88 if (init_ == 0) {
89 Init();
90 }
91
92 auto device = torch::Device(torch::kCPU);
93 if (torch::cuda::is_available()) {
94 device = torch::Device(torch::kCUDA);
95 }
96 torch::Tensor img_tensor =
97 torch::from_blob(input_buffer[0], {1, 3, 224, 224});
98 torch::Tensor obstacle_pos = torch::from_blob(input_buffer[1], {1, 20, 2});
99 torch::Tensor obstacle_pos_step =
100 torch::from_blob(input_buffer[2], {1, 20, 2});
101
102 std::vector<torch::jit::IValue> torch_inputs;
103
104 torch_inputs.push_back(c10::ivalue::Tuple::create(
105 {std::move(img_tensor.to(device)), std::move(obstacle_pos.to(device)),
106 std::move(obstacle_pos_step.to(device))}));
107
108 torch::Tensor torch_output_tensor =
109 model_instance_.forward(torch_inputs).toTensor().to(torch::kCPU);
110 memcpy((*output_buffer)[0], torch_output_tensor.data_ptr<float>(),
111 1 * 30 * 2 * sizeof(float));
112
113 return true;
114}
virtual bool Init()
parse model description class and load the model
first check imutoantoffset saved in device
Definition readme.txt:2
#define ACHECK(cond)
Definition log.h:80

◆ Init()

bool apollo::prediction::SemanticLstmPedestrianCpuTorch::Init ( )
virtual

parse model description class and load the model

参数
config_pathmodel config path
返回
init result, true for success

实现了 apollo::prediction::ModelBase.

在文件 semantic_lstm_pedestrian_torch_model.cc31 行定义.

31 {
32 ModelConf model_config;
33 int status;
34
35 if (init_ != 0) {
36 return true;
37 }
38
39 std::string class_name =
40 abi::__cxa_demangle(typeid(*this).name(), 0, 0, &status);
41
42 std::string default_config_path =
44 ->GetPluginConfPath<ModelBase>(class_name,
45 "conf/default_conf.pb.txt");
46
47 if (!cyber::common::GetProtoFromFile(default_config_path, &model_config)) {
48 AERROR << "Unable to load model conf file: " << default_config_path;
49 return false;
50 }
51 model_path_ = model_config.model_path();
52 init_ = 1;
53
54 return LoadModel();
55}
std::string GetPluginConfPath(const std::string &class_name, const std::string &conf_name)
get plugin configuration file location
static PluginManager * Instance()
get singleton instance of PluginManager
#define AERROR
Definition log.h:44
bool GetProtoFromFile(const std::string &file_name, google::protobuf::Message *message)
Parses the content of the file specified by the file_name as a representation of protobufs,...
Definition file.cc:132

◆ LoadModel()

bool apollo::prediction::SemanticLstmPedestrianCpuTorch::LoadModel ( )
virtual

load the model from file

返回
loading result, true for success

实现了 apollo::prediction::ModelBase.

在文件 semantic_lstm_pedestrian_torch_model.cc57 行定义.

57 {
58 auto device = torch::Device(torch::kCPU);
59 if (torch::cuda::is_available()) {
60 device = torch::Device(torch::kCUDA);
61 }
62
63 model_instance_ = torch::jit::load(model_path_, device);
64 torch::set_num_threads(1);
65
66 // Fake intput for the first frame
67 torch::Tensor img_tensor = torch::randn({1, 3, 224, 224});
68 torch::Tensor obstacle_pos = torch::randn({1, 20, 2});
69 torch::Tensor obstacle_pos_step = torch::randn({1, 20, 2});
70 std::vector<torch::jit::IValue> torch_inputs;
71 torch::Tensor torch_default_output_tensor;
72
73 torch_inputs.push_back(c10::ivalue::Tuple::create(
74 {std::move(img_tensor.to(device)), std::move(obstacle_pos.to(device)),
75 std::move(obstacle_pos_step.to(device))}));
76
77 // warm up to avoid very slow first inference later
78 WarmUp(torch_inputs, &model_instance_, &torch_default_output_tensor);
79 return true;
80}
void WarmUp(const std::vector< torch::jit::IValue > &torch_inputs, torch::jit::script::Module *model, at::Tensor *default_output_ptr)
warm up function to avoid slowly inference of torch model
Definition warm_up.cc:28

该类的文档由以下文件生成: