Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
273 changes: 234 additions & 39 deletions README.md

Large diffs are not rendered by default.

21 changes: 21 additions & 0 deletions build.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,24 @@
fn main() {
// Set PKG_CONFIG_PATH to include all necessary directories for macOS
if cfg!(target_os = "macos") {
let homebrew_prefix = "/opt/homebrew";
let pkg_config_paths = vec![
format!("{}/opt/libxml2/lib/pkgconfig", homebrew_prefix),
format!("{}/lib/pkgconfig", homebrew_prefix),
format!("{}/share/pkgconfig", homebrew_prefix),
];

let existing_path = std::env::var("PKG_CONFIG_PATH").unwrap_or_default();
let new_path = if existing_path.is_empty() {
pkg_config_paths.join(":")
} else {
format!("{}:{}", pkg_config_paths.join(":"), existing_path)
};

println!("cargo:warning=Setting PKG_CONFIG_PATH to: {}", new_path);
std::env::set_var("PKG_CONFIG_PATH", &new_path);
println!("cargo:rerun-if-env-changed=PKG_CONFIG_PATH");
}

gst_plugin_version_helper::info()
}
4 changes: 2 additions & 2 deletions examples/audio_inference.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
//! # FFI mode (default, no model path needed):
//! cargo run --example audio_inference [OPTIONS]
//!
//! # EIM mode (legacy, requires model path):
//! # EIM mode (requires model path):
//! cargo run --example audio_inference -- --model <path_to_model> [OPTIONS]
//!
//! Optional arguments:
Expand Down Expand Up @@ -37,7 +37,7 @@ use std::path::Path;
/// Command line parameters for the real-time audio classification example
#[derive(Parser, Debug)]
struct AudioClassifyParams {
/// Path to the Edge Impulse model file (.eim) - EIM mode only (legacy)
/// Path to the Edge Impulse model file (.eim) - EIM mode only
#[clap(short, long)]
model: Option<String>,

Expand Down
15 changes: 4 additions & 11 deletions examples/image_inference.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,17 @@
//! This example demonstrates how to use the Edge Impulse GStreamer plugin to perform
//! image classification using a trained model on a single image file.
//!
//! The edgeimpulsevideoinfer element automatically handles frame resizing to match model
//! input requirements and scales detection results back to the original resolution.
//!
//! Usage:
//! # EIM mode (requires model path):
//! cargo run --example image_inference -- --model <path_to_model> --image <path_to_image>
//!
//! # FFI mode (no model path needed):
//! cargo run --example image_inference -- --image <path_to_image>
//!
//!
//! Environment setup:
//! export GST_PLUGIN_PATH="target/debug:$GST_PLUGIN_PATH"

Expand Down Expand Up @@ -42,14 +46,6 @@ struct ImageClassifyParams {
#[arg(short, long, default_value = "RGB")]
format: String,

/// Input width
#[arg(short = 'W', long, default_value = "96")]
width: i32,

/// Input height
#[arg(short = 'H', long, default_value = "96")]
height: i32,

/// Enable debug output
#[arg(short, long)]
debug: bool,
Expand Down Expand Up @@ -103,8 +99,6 @@ fn create_pipeline(args: &ImageClassifyParams) -> Result<gst::Pipeline, Box<dyn
"caps",
&gst::Caps::builder("video/x-raw")
.field("format", &args.format)
.field("width", &args.width)
.field("height", &args.height)
.build(),
)
.build()?;
Expand Down Expand Up @@ -222,7 +216,6 @@ fn example_main() -> Result<(), Box<dyn Error>> {
if let Some(output) = &args.output {
println!("💾 Output image: {}", output);
}
println!("📐 Image dimensions: {}x{}", args.width, args.height);
println!("🎨 Format: {}", args.format);
println!("🔧 Debug mode: {}", args.debug);

Expand Down
18 changes: 3 additions & 15 deletions examples/image_slideshow.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ use image::io::Reader as ImageReader;
use image::ImageFormat;

/// A GStreamer-based image slideshow that runs inference on images from a folder.
/// The edgeimpulsevideoinfer element automatically handles frame resizing to match model requirements.
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
Expand All @@ -28,14 +29,6 @@ struct Args {
#[arg(short = 'n', long, default_value = "100")]
max_images: usize,

/// Input width
#[arg(short = 'W', long)]
width: i32,

/// Input height
#[arg(short = 'H', long)]
height: i32,

/// Slideshow framerate (images per second, default: 1)
#[arg(long, default_value = "1")]
framerate: i32,
Expand Down Expand Up @@ -187,7 +180,7 @@ fn example_main() -> Result<()> {
.property("max-size-buffers", 1u32)
.property("max-size-time", 30000000000u64)
.build()?;
let videoscale = ElementFactory::make("videoscale").build()?;
// videoscale removed - edgeimpulsevideoinfer now handles resizing
let videorate = ElementFactory::make("videorate")
.property("max-rate", args.framerate)
.build()?;
Expand All @@ -203,18 +196,15 @@ fn example_main() -> Result<()> {
.build()?;

// Set caps for GRAY8 before inference, including framerate to control slideshow speed
// The edgeimpulsevideoinfer element will automatically resize to match model requirements
let caps_gray = gstreamer::Caps::builder("video/x-raw")
.field("format", "GRAY8")
.field("width", args.width)
.field("height", args.height)
.field("framerate", &gstreamer::Fraction::new(args.framerate, 1))
.build();
capsfilter_gray.set_property("caps", &caps_gray);

let caps_rgb = gstreamer::Caps::builder("video/x-raw")
.field("format", "RGB")
.field("width", args.width)
.field("height", args.height)
.build();
capsfilter_rgb.set_property("caps", &caps_rgb);

Expand All @@ -223,7 +213,6 @@ fn example_main() -> Result<()> {
&decodebin,
&videoconvert1,
&queue,
&videoscale,
&videorate,
&capsfilter_gray,
&edgeimpulse,
Expand Down Expand Up @@ -259,7 +248,6 @@ fn example_main() -> Result<()> {
Element::link_many([
&videoconvert1,
&queue,
&videoscale,
&videorate,
&capsfilter_gray,
&edgeimpulse,
Expand Down
Loading