GstOnnxClient::~GstOnnxClient ()
{
+ outputNames.clear();
delete session;
delete[]dest;
}
case GST_ML_OUTPUT_NODE_FUNCTION_CLASS:
return "label";
break;
+ case GST_ML_OUTPUT_NODE_NUMBER_OF:
+ g_assert_not_reached();
+ GST_WARNING("Invalid parameter");
+ break;
};
return "";
return inputImageFormat;
}
-std::vector < const char *>GstOnnxClient::getOutputNodeNames (void)
+std::vector< const char *> GstOnnxClient::getOutputNodeNames (void)
{
- return outputNames;
+ if (!outputNames.empty() && outputNamesRaw.size() != outputNames.size()) {
+ outputNamesRaw.resize(outputNames.size());
+ for (size_t i = 0; i < outputNamesRaw.size(); i++) {
+ outputNamesRaw[i] = outputNames[i].get();
+ }
+ }
+
+ return outputNamesRaw;
}
void GstOnnxClient::setOutputNodeIndex (GstMlOutputNodeFunction node,
GST_DEBUG ("Number of Output Nodes: %d", (gint) session->GetOutputCount ());
Ort::AllocatorWithDefaultOptions allocator;
- GST_DEBUG ("Input name: %s", session->GetInputName (0, allocator));
+ auto input_name = session->GetInputNameAllocated (0, allocator);
+ GST_DEBUG ("Input name: %s", input_name.get());
for (size_t i = 0; i < session->GetOutputCount (); ++i) {
- auto output_name = session->GetOutputName (i, allocator);
- outputNames.push_back (output_name);
+ auto output_name = session->GetOutputNameAllocated (i, allocator);
+ GST_DEBUG("Output name %lu:%s", i, output_name.get());
+ outputNames.push_back (std::move(output_name));
auto type_info = session->GetOutputTypeInfo (i);
auto tensor_info = type_info.GetTensorTypeAndShapeInfo ();
parseDimensions (vmeta);
Ort::AllocatorWithDefaultOptions allocator;
- auto inputName = session->GetInputName (0, allocator);
+ auto inputName = session->GetInputNameAllocated (0, allocator);
auto inputTypeInfo = session->GetInputTypeInfo (0);
std::vector < int64_t > inputDims =
inputTypeInfo.GetTensorTypeAndShapeInfo ().GetShape ();
std::vector < Ort::Value > inputTensors;
inputTensors.push_back (Ort::Value::CreateTensor < uint8_t > (memoryInfo,
dest, inputTensorSize, inputDims.data (), inputDims.size ()));
- std::vector < const char *>inputNames { inputName };
+ std::vector < const char *>inputNames { inputName.get () };
std::vector < Ort::Value > modelOutput = session->Run (Ort::RunOptions { nullptr},
inputNames.data (),
- inputTensors.data (), 1, outputNames.data (), outputNames.size ());
+ inputTensors.data (), 1, outputNamesRaw.data (), outputNamesRaw.size ());
auto numDetections =
modelOutput[getOutputNodeIndex
*
* ## Example launch command:
*
- * (note: an object detection model has 3 or 4 output nodes, but there is no naming convention
- * to indicate which node outputs the bounding box, which node outputs the label, etc.
- * So, the `onnxobjectdetector` element has properties to map each node's functionality to its
- * respective node index in the specified model )
+ * (note: an object detection model has 3 or 4 output nodes, but there is no
+ * naming convention to indicate which node outputs the bounding box, which
+ * node outputs the label, etc. So, the `onnxobjectdetector` element has
+ * properties to map each node's functionality to its respective node index in
+ * the specified model. Image resolution also need to be adapted to the model.
+ * The videoscale in the pipeline below will scale the image, using padding if
+ * required, to 640x383 resolution required by the model.)
+ *
+ * model.onnx can be found here:
+ * https://github.com/zoq/onnx-runtime-examples/raw/main/data/models/model.onnx
*
* ```
* GST_DEBUG=objectdetector:5 gst-launch-1.0 multifilesrc \
* location=000000088462.jpg caps=image/jpeg,framerate=\(fraction\)30/1 ! jpegdec ! \
* videoconvert ! \
- * onnxobjectdetector \
+ * videoscale ! \
+ * 'video/x-raw,width=640,height=383' ! \
+ * onnxobjectdetector ! \
* box-node-index=0 \
* class-node-index=1 \
* score-node-index=2 \