Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 42 additions & 0 deletions report_bundler/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Report Bundler

Hi! I'm Rachana.

This is my submission for DreamLayer's Open Source Challenge. (Task #5 - Report Bundle)

Why this Option?
As someone who’s worked on intelligent data pipelines and NLP automation tools, this challenge was a fun way to apply my real world experience into a compact, useful OSS tool.

This task generates a reproducible `report.zip` containing:
- Metadata (`results.csv`)
- Generation config (`config.json`)
- Final grid images
- Schema validation + README

---

## CSV Columns

| Column | Description |
|---------------|-----------------------------------------------------------|
| image_path | Relative path to the grid image |
| sampler | Sampling algorithm used |
| steps | Number of inference steps |
| cfg | Classifier-Free Guidance scale |
| preset | Style or visual preset used |
| seed | Random seed for deterministic generation |
| width | Grid width in pixels (added for visual clarity) |
| height | Grid height in pixels (added for visual clarity) |
| grid_label | Custom label for the image (used in overlay or UX tags) |
| notes | Any human-readable notes about generation intent |



The output is deterministic, simple to trace, and easy to integrate into DreamLayer workflows.


## How to Run

```bash
cd report_bundler
python bundler.py
83 changes: 83 additions & 0 deletions report_bundler/bundler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
import csv
import json
import zipfile
from pathlib import Path

# These are the columns we expect in the results.csv
REQUIRED_COLUMNS = {"image_path", "sampler", "steps", "cfg", "preset", "seed"}

def validate_csv_schema(csv_path):
"""
Opens the CSV file and checks if it has all required columns.
Handles missing headers and throws readable errors.
"""
with open(csv_path, newline='') as f:
reader = csv.DictReader(f)

if reader.fieldnames is None:
raise ValueError("CSV file is empty or missing a header row.")

header_fields = set(reader.fieldnames)
if not REQUIRED_COLUMNS.issubset(header_fields):
missing = REQUIRED_COLUMNS - header_fields
raise ValueError(f"Missing required columns: {missing}")

return list(reader)

def collect_files(csv_rows):
"""
From the rows in the CSV, grab all valid image paths.
Filters out rows with missing or empty 'image_path' fields.
"""
files = set()
for idx, row in enumerate(csv_rows):
if "image_path" not in row:
raise ValueError(f"Row {idx} missing 'image_path' key: {row}")
image_path = row["image_path"]
if image_path and image_path.strip():
files.add(image_path)
else:
print(f"Skipping row {idx} due to empty image_path.")
return files

def create_report_zip(output_path="report.zip"):
"""
This function:
- Validates the results.csv file
- Ensures all images listed exist and are safe
- Packages everything into report.zip
"""
base_dir = Path(__file__).parent
csv_path = base_dir / "results.csv"
config_path = base_dir / "config.json"
readme_path = base_dir / "README.md"
zip_path = base_dir / output_path

# Step 1: Validate CSV
csv_rows = validate_csv_schema(csv_path)

# Step 2: Collect all valid image paths
image_paths = collect_files(csv_rows)

# Step 3: Zip all files
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
zipf.write(csv_path, arcname="results.csv")
zipf.write(config_path, arcname="config.json")
zipf.write(readme_path, arcname="README.md")

for path in image_paths:
norm_path = Path(path).resolve()
# Prevent path traversal by ensuring image is inside project directory
if ".." in path or not str(norm_path).startswith(str(base_dir.resolve())):
raise ValueError(f"🚨 Invalid image path: {path}")

full_path = base_dir / path
if not full_path.exists():
raise FileNotFoundError(f"Image not found: {full_path}")

zipf.write(full_path, arcname=path)

print(f"Done! Report created at: {zip_path}")

if __name__ == "__main__":
create_report_zip()
14 changes: 14 additions & 0 deletions report_bundler/config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
{
"model": "SDXL",
"vae": "AutoencoderKL",
"loras": ["AnimeStyleV2"],
"controlnets": ["CannyEdgeDetector"],
"prompt": "a serene futuristic cityscape at sunset, ultra realistic, 8k",
"negative_prompt": "blurry, low quality, distorted",
"seed": 123456,
"sampler": "Euler",
"steps": 25,
"cfg": 7.5,
"workflow": "txt2img",
"version": "1.0.0"
}
Binary file added report_bundler/grids/image1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added report_bundler/grids/image2.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added report_bundler/report.zip
Binary file not shown.
3 changes: 3 additions & 0 deletions report_bundler/results.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
image_path,sampler,steps,cfg,preset,seed,width,height,grid_label,notes
grids/image1.png,Euler,30,8.5,cyber-neural,108234,1024,1024,digital-consciousness,"Futuristic brain core visualization in a neon tech chamber"
grids/image2.png,DPM++ 2M Karras,28,7.8,ui-dreamscape,786512,1024,768,holographic-devices,"Concept art of interconnected digital devices in a glowy metaverse style"
15 changes: 15 additions & 0 deletions report_bundler/test_schema.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import csv

def test_csv_schema():
"""
Make sure results.csv contains all required metadata columns.
You can add more optional fields in the future if needed.
"""
required = {"image_path", "sampler", "steps", "cfg", "preset", "seed"}

with open("results.csv", newline='') as f:
reader = csv.DictReader(f)
assert reader.fieldnames is not None, "Missing header row in CSV"
header = set(reader.fieldnames)

assert required.issubset(header), f"Missing columns: {required - header}"