Basic Object Detection
This example loads a complete object detection pipeline exported from the Hub.
- Python
- C
from denkflow import Pipeline, ImageTensor
# --- Configuration ---
pat = "YOUR-PAT-TOKEN" # Replace with your actual Personal Access Token
denkflow_path = "path/to/your_exported_model.denkflow"
# For other ways to configure licensing (e.g., OneTimeLicenseSource, specific license IDs),
# refer to the "Configuration Options" section.
# --- Pipeline Creation ---
# The simplest way to load a .denkflow pipeline using a PAT:
pipeline = Pipeline.from_denkflow(denkflow_path, pat=pat)
# --- Inspection ---
print("Pipeline Structure:")
print(pipeline) # Inspect the graph structure and topic names
# --- Initialization ---
pipeline.initialize()
# --- Subscribe to Outputs (Replace with actual topic names from print(pipeline)) ---
# Example topic name, adjust based on your model's export
detection_topic = "bounding_box_filter_node/filtered_bounding_boxes"
detection_receiver = pipeline.subscribe_bounding_box_tensor(detection_topic)
# --- Publish Input Image (Replace with actual topic name from print(pipeline)) ---
# Example topic name, adjust based on your model's export
input_image_topic = "camera/image"
image_path = "path/to/your/image.jpg" # Define image_path closer to its use
image_tensor = ImageTensor.from_file(image_path)
pipeline.publish_image_tensor(input_image_topic, image_tensor)
# --- Run Pipeline ---
pipeline.run()
# --- Receive and Process Results ---
detection_tensor = detection_receiver.receive()
# Convert tensor to a list of BoundingBox objects
objects = detection_tensor.to_objects(0.5)
print(f"\nDetected {len(objects)} objects:")
for obj in objects:
print(f"- Class: {obj.class_label.name}, Confidence: {obj.confidence:.2f}")
print(f" BBox: ({obj.x1}, {obj.y1}), ({obj.x2}, {obj.y2})")
#include <iostream>
#include <vector>
#include "denkflow.h"
// --- Helper Function to Interpret Error Codes ---
void process_return(DenkflowResult return_value, std::string function_name) {
std::string error_message = get_last_error();
std::cout << function_name << " returned " << (int32_t)return_value;
if (error_message.size() > 0) {
std::cout << " [" << error_message << "]";
}
std::cout << std::endl;
}
const char NULL_BYTE [1] = {'\0'};
DenkflowResult r;
HubLicenseSource* hub_license_source = nullptr;
Pipeline* pipeline = nullptr;
InitializedPipeline* initialized_pipeline = nullptr;
ImageTensor* image_tensor = nullptr;
Receiver<BoundingBoxTensor>* receiver = nullptr;
BoundingBoxTensor* bounding_box_tensor = nullptr;
BoundingBoxResults* bounding_box_results = nullptr;
// Fill these with custom values
std::string model_file = "path/to/model/file.denkflow";
std::string pat = "personal_access_token";
std::string image_path = "path/to/an/image.jpg";
float confidence_threshold = 0.9f;
// Default Values for Object Detection
std::string input_topic = "camera/image";
std::string output_topic = "bounding_box_filter_node/filtered_bounding_boxes";
// --- Create License Source ---
r = hub_license_source_from_pat(&hub_license_source, pat.c_str(), NULL_BYTE, NULL_BYTE);
process_return(r, "hub_license_source_from_pat");
// --- Read Model File ---
r = pipeline_from_denkflow(&pipeline, model_file.c_str(), (void**)&hub_license_source);
process_return(r, "pipeline_from_denkflow");
// --- Initialization ---
r = initialize_pipeline(&initialized_pipeline, &pipeline);
process_return(r, "initialize_pipeline");
// --- Subscribe to Outputs ---
r = initialized_pipeline_subscribe_bounding_box_tensor(&receiver, initialized_pipeline, output_topic.c_str());
process_return(r, "initialized_pipeline_subscribe_bounding_box_tensor");
// --- Send Image into Pipeline ---
r = image_tensor_from_file(&image_tensor, image_path.c_str());
process_return(r, "image_tensor_from_file");
r = initialized_pipeline_publish_image_tensor(initialized_pipeline, input_topic.c_str(), &image_tensor);
process_return(r, "initialized_pipeline_publish_image_tensor");
// --- Run Pipeline ---
r = initialized_pipeline_run(initialized_pipeline, 3000);
process_return(r, "initialized_pipeline_run");
// --- Receive and Process Results ---
r = receiver_receive_bounding_box_tensor(&bounding_box_tensor, receiver);
process_return(r, "receiver_receive_bounding_box_tensor");
r = bounding_box_tensor_to_objects(&bounding_box_results, bounding_box_tensor, confidence_threshold);
process_return(r, "bounding_box_tensor_to_objects");
if (r == DenkflowResult::Ok) {
for (int i = 0; i < bounding_box_results->bounding_boxes_length; i++) {
std::cout
<< "Box "
<< i
<< " ["
<< bounding_box_results->bounding_boxes[i].class_label.name
<< "]: "
<< bounding_box_results->bounding_boxes[i].confidence
<< std::endl;
}
}
// --- Free Allocated Objects ---
r = free_all_objects();
process_return(r, "free_all_objects");