Basic Instance Segmentation
This example loads a complete instance segmentation pipeline file (.denkflow) exported from the Hub. An instance segmentation network returns bounding box tensors and segmentation tensors, which need to be combined in the to_objects-function.
- Python
- C
import denkflow
pat = "YOUR-PAT"
denkflow_path = "path/to/model/file.denkflow"
image_path = "path/to/an/image.jpg"
input_topic = "camera/image"
bounding_box_output_topic = "bounding_box_filter_node/filtered_bounding_boxes"
segmentation_output_topic = "instance_segmentation_node/output_segmentations"
confidence_threshold = 0.5
segmentation_threshold = 0.5
pipeline = denkflow.Pipeline.from_denkflow(
denkflow_path,
pat=pat
)
pipeline.initialize()
# Instance segmentation returns both a bounding box tensor and an instance segmentation mask tensor
bounding_box_results_receiver = pipeline.subscribe(bounding_box_output_topic)
segmentation_results_receiver = pipeline.subscribe(segmentation_output_topic)
image_tensor = denkflow.ImageTensor.from_file(image_path)
pipeline.publish_image_tensor(input_topic, image_tensor)
pipeline.run()
bounding_box_tensor = bounding_box_results_receiver.receive_bounding_box_tensor()
segmentation_tensor = segmentation_results_receiver.receive_instance_segmentation_mask_tensor()
results_per_image = segmentation_tensor.to_objects(bounding_box_tensor, confidence_threshold, segmentation_threshold)
print("Instance Segmentation Results:")
for results_per_class_label in results_per_image:
for result_for_class_label in results_per_class_label:
print(f"{result_for_class_label.class_label.name}:")
for object in result_for_class_label.objects:
print(f" {object.confidence}")
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "denkflow.h"
void handle_error(enum DenkflowResult error_code, const char* function_name) {
printf("%s: %d", function_name, (int)error_code);
if (error_code != DenkflowResult_Ok) {
char* error_buffer = malloc(ERROR_BUFFER_SIZE);
get_last_error(error_buffer);
printf(" (%s)\n", error_buffer);
free(error_buffer);
exit(EXIT_FAILURE);
}
printf("\n");
}
int main() {
Pipeline* pipeline = NULL;
InitializedPipeline* initialized_pipeline = NULL;
ImageTensor* image_tensor = NULL;
Receiver<Tensor>* bounding_box_receiver = NULL;
Receiver<Tensor>* segmentation_receiver = NULL;
BoundingBoxTensor* bounding_box_tensor = NULL;
InstanceSegmentationMaskTensor* segmentation_tensor = NULL;
SegmentationResults* results = NULL;
HubLicenseSource* hub_license_source = NULL;
enum DenkflowResult r;
const uint64_t timeout_ms = 3000;
const float confidence_threshold = 0.5;
const float segmentation_threshold = 0.5;
const char* pat = "YOUR-PAT";
const char* input_topic = "camera/image";
const char* bounding_box_output_topic = "bounding_box_filter_node/filtered_bounding_boxes";
const char* segmentation_output_topic = "instance_segmentation_node/output_segmentations";
const char* denkflow_path = "path/to/model/file.denkflow";
const char* image_path = "path/to/an/image.jpg";
r = hub_license_source_from_pat(&hub_license_source, pat, NULL_BYTE, NULL_BYTE);
handle_error(r, "hub_license_source_from_pat");
r = pipeline_from_denkflow(&pipeline, denkflow_path, (void**)&hub_license_source);
handle_error(r, "pipeline_from_denkflow");
r = initialize_pipeline(&initialized_pipeline, &pipeline);
handle_error(r, "initialize_pipeline");
r = initialized_pipeline_subscribe(&bounding_box_receiver, initialized_pipeline, bounding_box_output_topic);
handle_error(r, "initialized_pipeline_subscribe");
r = initialized_pipeline_subscribe(&segmentation_receiver, initialized_pipeline, segmentation_output_topic);
handle_error(r, "initialized_pipeline_subscribe");
r = image_tensor_from_file(&image_tensor, image_path);
handle_error(r, "image_tensor_from_file");
r = initialized_pipeline_publish_image_tensor(initialized_pipeline, input_topic, &image_tensor);
handle_error(r, "initialized_pipeline_publish_image_tensor");
r = initialized_pipeline_run(initialized_pipeline, timeout_ms);
handle_error(r, "initialized_pipeline_run");
r = receiver_receive_bounding_box_tensor(&bounding_box_tensor, bounding_box_receiver);
handle_error(r, "receiver_receive_bounding_box_tensor");
r = receiver_receive_instance_segmentation_mask_tensor(&segmentation_tensor, segmentation_receiver);
handle_error(r, "receiver_receive_instance_segmentation_mask_tensor");
r = instance_segmentation_mask_tensor_to_objects(&results, segmentation_tensor, segmentation_threshold, bounding_box_tensor, confidence_threshold);
handle_error(r, "instance_segmentation_mask_tensor_to_objects");
if (r == DenkflowResult_Ok) {
for (int b = 0; b < results->segmentation_batch_elements_length; b++) {
printf("Image %d\n", b);
for (int c = 0; c < results->segmentation_batch_elements[b].segmentation_classes_length; c++) {
printf(" Class %s\n",
results->segmentation_batch_elements[b].segmentation_classes[c].class_label.name
);
for (int o = 0; o < results->segmentation_batch_elements[b].segmentation_classes[c].segmentation_objects_length; o++) {
printf(" - %f\n",
results->segmentation_batch_elements[b].segmentation_classes[c].segmentation_objects[o].confidence
);
}
}
}
}
r = free_object((void**)&initialized_pipeline);
handle_error(r, "free_object");
r = free_object((void**)&bounding_box_receiver);
handle_error(r, "free_object");
r = free_object((void**)&segmentation_receiver);
handle_error(r, "free_object");
r = free_object((void**)&bounding_box_tensor);
handle_error(r, "free_object");
r = free_object((void**)&segmentation_tensor);
handle_error(r, "free_object");
r = free_object((void**)&results);
handle_error(r, "free_object");
return 0;
}