Basic Classification
This example loads a complete classification pipeline file (.denkflow) exported from the Hub.
- Python
- C
import denkflow
pat = "YOUR-PAT"
input_topic = "camera/image"
output_topic = "classification_node/output"
denkflow_path = "path/to/model/file.denkflow"
image_path = "path/to/an/image.jpg"
pipeline = denkflow.Pipeline.from_denkflow(
denkflow_path,
pat=pat
)
pipeline.initialize()
image_tensor = denkflow.ImageTensor.from_file(image_path)
pipeline.publish_image_tensor(input_topic, image_tensor)
# The tensor type for classification results is the scalar tensor
results_receiver = pipeline.subscribe_scalar_tensor(output_topic)
# Run the pipeline. This operation will block until the pipeline has finished running.
pipeline.run()
tensor = results_receiver.receive()
results_per_image = tensor.to_objects()
print("Classification Results:")
for image_results in results_per_image:
for class_label_result in image_results:
print(f"{class_label_result.class_label}: {class_label_result.value:.2f}")
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "denkflow.h"
void handle_error(enum DenkflowResult error_code, const char* function_name) {
printf("%s: %d", function_name, (int)error_code);
if (error_code != DenkflowResult_Ok) {
char* error_buffer = malloc(ERROR_BUFFER_SIZE);
get_last_error(error_buffer);
printf(" (%s)\n", error_buffer);
free(error_buffer);
exit(EXIT_FAILURE);
}
printf("\n");
}
int main() {
Pipeline* pipeline = NULL;
InitializedPipeline* initialized_pipeline = NULL;
ImageTensor* image_tensor = NULL;
Receiver_ScalarTensor* receiver = NULL;
ScalarTensor* tensor = NULL;
ScalarResults* results = NULL;
HubLicenseSource* hub_license_source = NULL;
enum DenkflowResult r;
const uint64_t timeout_ms = 3000;
const char* pat = "YOUR-PAT";
const char* input_topic = "camera/image";
const char* output_topic = "classification_node/output";
const char* denkflow_path = "path/to/model/file.denkflow";
const char* image_path = "path/to/an/image.jpg";
r = hub_license_source_from_pat(&hub_license_source, pat, NULL_BYTE, NULL_BYTE);
handle_error(r, "hub_license_source_from_pat");
r = pipeline_from_denkflow(&pipeline, denkflow_path, (void**)&hub_license_source);
handle_error(r, "pipeline_from_denkflow");
r = initialize_pipeline(&initialized_pipeline, &pipeline);
handle_error(r, "initialize_pipeline");
r = initialized_pipeline_subscribe_scalar_tensor(&receiver, initialized_pipeline, output_topic);
handle_error(r, "initialized_pipeline_subscribe_scalar_tensor");
r = image_tensor_from_file(&image_tensor, image_path);
handle_error(r, "image_tensor_from_file");
r = initialized_pipeline_publish_image_tensor(initialized_pipeline, input_topic, &image_tensor);
handle_error(r, "initialized_pipeline_publish_image_tensor");
r = initialized_pipeline_run(initialized_pipeline, timeout_ms);
handle_error(r, "initialized_pipeline_run");
r = receiver_receive_scalar_tensor(&tensor, receiver);
handle_error(r, "receiver_receive_scalar_tensor");
r = scalar_tensor_to_objects(&results, tensor);
handle_error(r, "scalar_tensor_to_objects");
if (r == DenkflowResult_Ok) {
for (int b = 0; b < results->scalar_batch_elements_length; b++) {
printf("Image %d\n", b);
for (int c = 0; c < results->scalar_batch_elements[b].scalars_length; c++) {
printf(" %s: %f\n",
results->scalar_batch_elements[b].scalars[c].class_label.name,
results->scalar_batch_elements[b].scalars[c].value
);
}
}
}
r = free_object((void**)&initialized_pipeline);
handle_error(r, "free_object");
r = free_object((void**)&receiver);
handle_error(r, "free_object");
r = free_object((void**)&tensor);
handle_error(r, "free_object");
r = free_object((void**)&results);
handle_error(r, "free_object");
return 0;
}