Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .python-version
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
3.11.9
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
timestamp,total_time_s,images_generated,time_per_image_s,gpu_name,driver_version,ckpt_name
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
timestamp,total_time_s,images_generated,time_per_image_s,gpu_name,driver_version,ckpt_name
1754671400.091321,0.0126,0,,CPU,N/A,
1754683081.648606,301.4291,0,,CPU,N/A,
1754683236.368768,301.5447,0,,CPU,N/A,
98 changes: 96 additions & 2 deletions dream_layer_backend/img2img_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
import json
import logging
import os
import csv
import pynvml
import requests
from PIL import Image
import io
Expand Down Expand Up @@ -34,7 +36,7 @@


# Get the absolute path to the ComfyUI root directory (parent of our backend directory)
COMFY_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
COMFY_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))

# ComfyUI's input directory should be inside the ComfyUI directory
COMFY_UI_DIR = os.path.join(COMFY_ROOT, "ComfyUI")
Expand Down Expand Up @@ -153,16 +155,108 @@ def handle_img2img():
'status': 'error',
'message': f'Invalid input image: {str(e)}'
}), 400


# Get checkpoint
ckpt_name = data.get("ckpt_name", "unknown")

# List of allowed checkpoints
CHECKPOINTS_DIR = os.path.join(COMFY_ROOT, "ComfyUI", "models", "checkpoints")

# Inline function to list allowed checkpoints dynamically
def get_allowed_checkpoints():
try:
return [
fname for fname in os.listdir(CHECKPOINTS_DIR)
if fname.endswith(('.safetensors', '.ckpt'))
]
except Exception as e:
logger.error(f"Failed to list checkpoints: {e}")
return []

ALLOWED_CKPTS = get_allowed_checkpoints()

# Validate checkpoint
if not ckpt_name or ckpt_name not in ALLOWED_CKPTS:
if ALLOWED_CKPTS:
chosen_ckpt = ALLOWED_CKPTS[0]
print(f"Checkpoint '{ckpt_name}' invalid or missing, falling back to '{chosen_ckpt}'")
ckpt_name = chosen_ckpt
else:
return jsonify({"error": "No checkpoints available on server"}), 500

# Insert ckpt_name into data
data['ckpt_name'] = ckpt_name

# Transform data to ComfyUI workflow
workflow = transform_to_img2img_workflow(data)

# workflow = transform_to_img2img_workflow(data, ckpt_name=ckpt_name)

# Log the workflow for debugging
logger.info("Generated workflow:")
logger.info(json.dumps(workflow, indent=2))

try:
pynvml.nvmlInit()
gpu_handle = pynvml.nvmlDeviceGetHandleByIndex(0)
gpu_name = pynvml.nvmlDeviceGetName(gpu_handle).decode()
driver_version = pynvml.nvmlSystemGetDriverVersion().decode()
except Exception:
gpu_name = "CPU"
driver_version = "N/A"

Comment on lines +199 to +207
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Add NVML cleanup to prevent resource leaks

NVML should be properly shut down after use to prevent resource leaks in long-running servers.

         try:
             pynvml.nvmlInit()
             gpu_handle = pynvml.nvmlDeviceGetHandleByIndex(0)
             gpu_name = pynvml.nvmlDeviceGetName(gpu_handle).decode()
             driver_version = pynvml.nvmlSystemGetDriverVersion().decode()
+            pynvml.nvmlShutdown()
         except Exception:
             gpu_name = "CPU"
             driver_version = "N/A"

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In dream_layer_backend/img2img_server.py around lines 200 to 208, NVML is
initialized but not shut down which can leak resources; wrap the nvmlInit and
subsequent NVML calls in a try/finally (or try/except/finally) and call
pynvml.nvmlShutdown() in the finally block, using a local flag to track
successful initialization so shutdown is only called if init succeeded, and
ensure exceptions during NVML calls still set gpu_name/driver_version fallback
values before shutdown.

# Start Time
start_time = time.perf_counter()

# Send to ComfyUI
comfy_response = send_to_comfyui(workflow)

# End Time
elapsed = time.perf_counter() - start_time

# Calculate images generated
images_generated = len(comfy_response.get("all_images", []))
time_per_image = elapsed / images_generated if images_generated > 0 else None

# Log info to console and logger
time_per_image_str = f"{time_per_image:.2f}s/img" if time_per_image else "N/A"
logger.info(f"⏱ {elapsed:.2f}s total · {time_per_image_str} · GPU: {gpu_name} · Driver: {driver_version}")
print(f"⏱ {elapsed:.2f}s total · {time_per_image_str} · GPU: {gpu_name} · Driver: {driver_version}")

# Log info into CSV

# Path for CSV log file
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
INFERENCE_TRACES_DIR = os.path.join(BASE_DIR, "inference_traces")
os.makedirs(INFERENCE_TRACES_DIR, exist_ok=True) # create folder if it doesn't exist
TRACE_CSV = os.path.join(INFERENCE_TRACES_DIR, "inference_trace_img2img.csv")

# Ensure CSV file exists and has header
if not os.path.exists(TRACE_CSV):
with open(TRACE_CSV, "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["timestamp", "total_time_s", "images_generated", "time_per_image_s", "gpu_name", "driver_version","ckpt_name"])

# Append new row to CSV
with open(TRACE_CSV, "a", newline="") as f:
writer = csv.writer(f)
writer.writerow([
time.time(),
round(elapsed, 4),
images_generated,
round(time_per_image, 4) if time_per_image is not None else "",
gpu_name,
driver_version,
ckpt_name
])

Comment on lines +227 to +252
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Consider adding error handling for CSV operations

The CSV writing operations could fail due to permissions or disk space issues, which would crash the request handling.

         # Log info into CSV
         
         # Path for CSV log file
         BASE_DIR = os.path.dirname(os.path.abspath(__file__))
         INFERENCE_TRACES_DIR = os.path.join(BASE_DIR, "inference_traces")
-        os.makedirs(INFERENCE_TRACES_DIR, exist_ok=True)  # create folder if it doesn't exist
-        TRACE_CSV = os.path.join(INFERENCE_TRACES_DIR, "inference_trace_img2img.csv")
-
-        # Ensure CSV file exists and has header
-        if not os.path.exists(TRACE_CSV):
-            with open(TRACE_CSV, "w", newline="") as f:
-                writer = csv.writer(f)
-                writer.writerow(["timestamp", "total_time_s", "images_generated", "time_per_image_s", "gpu_name", "driver_version","ckpt_name"])
-
-        # Append new row to CSV
-        with open(TRACE_CSV, "a", newline="") as f:
-            writer = csv.writer(f)
-            writer.writerow([
-                time.time(),
-                round(elapsed, 4),
-                images_generated,
-                round(time_per_image, 4) if time_per_image is not None else "",
-                gpu_name,
-                driver_version,
-                ckpt_name
-            ])
+        try:
+            os.makedirs(INFERENCE_TRACES_DIR, exist_ok=True)  # create folder if it doesn't exist
+            TRACE_CSV = os.path.join(INFERENCE_TRACES_DIR, "inference_trace_img2img.csv")
+    
+            # Ensure CSV file exists and has header
+            if not os.path.exists(TRACE_CSV):
+                with open(TRACE_CSV, "w", newline="") as f:
+                    writer = csv.writer(f)
+                    writer.writerow(["timestamp", "total_time_s", "images_generated", "time_per_image_s", "gpu_name", "driver_version","ckpt_name"])
+    
+            # Append new row to CSV
+            with open(TRACE_CSV, "a", newline="") as f:
+                writer = csv.writer(f)
+                writer.writerow([
+                    time.time(),
+                    round(elapsed, 4),
+                    images_generated,
+                    round(time_per_image, 4) if time_per_image is not None else "",
+                    gpu_name,
+                    driver_version,
+                    ckpt_name
+                ])
+        except Exception as e:
+            logger.warning(f"Failed to log inference trace to CSV: {e}")
+            # Continue execution as this is non-critical
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
# Path for CSV log file
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
INFERENCE_TRACES_DIR = os.path.join(BASE_DIR, "inference_traces")
os.makedirs(INFERENCE_TRACES_DIR, exist_ok=True) # create folder if it doesn't exist
TRACE_CSV = os.path.join(INFERENCE_TRACES_DIR, "inference_trace_img2img.csv")
# Ensure CSV file exists and has header
if not os.path.exists(TRACE_CSV):
with open(TRACE_CSV, "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["timestamp", "total_time_s", "images_generated", "time_per_image_s", "gpu_name", "driver_version","ckpt_name"])
# Append new row to CSV
with open(TRACE_CSV, "a", newline="") as f:
writer = csv.writer(f)
writer.writerow([
time.time(),
round(elapsed, 4),
images_generated,
round(time_per_image, 4) if time_per_image is not None else "",
gpu_name,
driver_version,
ckpt_name
])
# Path for CSV log file
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
INFERENCE_TRACES_DIR = os.path.join(BASE_DIR, "inference_traces")
try:
os.makedirs(INFERENCE_TRACES_DIR, exist_ok=True) # create folder if it doesn't exist
TRACE_CSV = os.path.join(INFERENCE_TRACES_DIR, "inference_trace_img2img.csv")
# Ensure CSV file exists and has header
if not os.path.exists(TRACE_CSV):
with open(TRACE_CSV, "w", newline="") as f:
writer = csv.writer(f)
writer.writerow([
"timestamp",
"total_time_s",
"images_generated",
"time_per_image_s",
"gpu_name",
"driver_version",
"ckpt_name"
])
# Append new row to CSV
with open(TRACE_CSV, "a", newline="") as f:
writer = csv.writer(f)
writer.writerow([
time.time(),
round(elapsed, 4),
images_generated,
round(time_per_image, 4) if time_per_image is not None else "",
gpu_name,
driver_version,
ckpt_name
])
except Exception as e:
logger.warning(f"Failed to log inference trace to CSV: {e}")
# Continue execution as this is non-critical

# Include information into JSON response
comfy_response["metrics"] = {
"elapsed_time_sec": elapsed,
"time_per_image_sec": time_per_image,
"gpu": gpu_name,
"driver_version": driver_version
}

if "error" in comfy_response:
return jsonify({
Expand Down
53 changes: 47 additions & 6 deletions dream_layer_backend/img2img_workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,47 @@
import logging
from dream_layer import get_directories
from extras import COMFY_INPUT_DIR
from pathlib import Path


# Initialize logger
logger = logging.getLogger(__name__)

def get_available_checkpoints():
logger.info(f"Current __file__ path: {__file__}")
root_dir = Path(__file__).resolve().parent.parent
logger.info(f"Resolved root_dir: {root_dir}")

checkpoints_dir = root_dir / "ComfyUI" / "models" / "checkpoints"
logger.info(f"Looking for checkpoints in: {checkpoints_dir}")

if not checkpoints_dir.exists():
logger.error(f"Checkpoints directory does not exist: {checkpoints_dir}")
return []

models = [f.name for f in checkpoints_dir.glob("*") if f.suffix in ['.safetensors', '.ckpt']]
logger.info(f"Found checkpoint files: {models}")
return models

def transform_to_img2img_workflow(data):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

issue (code-quality): We've found these issues:


Explanation
The quality score for this function is below the quality threshold of 25%.
This score is a combination of the method length, cognitive complexity and working memory.

How can you solve this?

It might be worth refactoring this function to make it shorter and more readable.

  • Reduce the function length by extracting pieces of functionality out into
    their own functions. This is the most important thing you can do - ideally a
    function should be less than 10 lines.
  • Reduce nesting, perhaps by introducing guard clauses to return early.
  • Ensure that variables are tightly scoped, so that code using related concepts
    sits together within the function rather than being scattered.

"""
Transform frontend request data into ComfyUI workflow format for img2img
"""

# Determine model type and features
model_name = data.get('model_name', 'v1-6-pruned-emaonly-fp16.safetensors')
# Dynamically determine the model name that's being used and validate
requested_model = data.get("model_name")
available_models = get_available_checkpoints()
if not available_models:
raise FileNotFoundError("No checkpoint models found in ComfyUI models/checkpoints directory")

# Use requested model if valid, else fallback to detected
if requested_model and requested_model in available_models:
model_name = requested_model
else:
# fallback to first available checkpoint and log the fallback
model_name = available_models[0]
logger.warning(f"Requested model '{requested_model}' not found. Falling back to '{model_name}'.")

#model_name = data.get('model_name', 'v1-6-pruned-emaonly-fp16.safetensors') # was hardcoded

use_controlnet = bool(data.get('controlnet'))
use_lora = bool(data.get('lora'))

Expand Down Expand Up @@ -80,7 +109,7 @@ def transform_to_img2img_workflow(data):
denoising_strength = max(
0.0, min(1.0, float(data.get('denoising_strength', 0.75))))
input_image = data.get('input_image', '')
model_name = data.get('model_name', 'v1-6-pruned-emaonly-fp16.safetensors')
#model_name = data.get('model_name', 'v1-6-pruned-emaonly-fp16.safetensors')
sampler_name = data.get('sampler_name', 'euler')
scheduler = data.get('scheduler', 'normal')

Expand Down Expand Up @@ -226,7 +255,19 @@ def transform_to_img2img_workflow(data):
if refiner_data['refiner_enabled']:
logger.info("Injecting Refiner parameters...")
workflow = inject_refiner_parameters(workflow, refiner_data)


print(f"✅ Workflow transformation complete")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Remove extraneous f-string prefix

The print statement uses an f-string prefix but doesn't contain any placeholders.

-    print(f"✅ Workflow transformation complete")
+    print("✅ Workflow transformation complete")
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
print(f"✅ Workflow transformation complete")
print("✅ Workflow transformation complete")
🧰 Tools
🪛 Ruff (0.12.2)

249-249: f-string without any placeholders

Remove extraneous f prefix

(F541)

🤖 Prompt for AI Agents
In dream_layer_backend/img2img_workflow.py around line 249, the print statement
uses an unnecessary f-string prefix for a static message; remove the leading "f"
so the call becomes a normal string literal (print("✅ Workflow transformation
complete")) to avoid misleading use of f-strings.

# Ensure dump directory exists
dump_dir = os.path.join(os.path.dirname(__file__), "workflow_dumps")
os.makedirs(dump_dir, exist_ok=True)

# Save the workflow JSON
output_path = os.path.join(dump_dir, "last_workflow.json")
with open(output_path, "w") as f:
json.dump(workflow, f, indent=2)

print(f"📋 Generated workflow JSON: {json.dumps(workflow, indent=2)}")
print(f"🚀 Workflow JSON saved to {output_path}")
return workflow
Comment on lines +259 to 271
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

suggestion: Console print statements may clutter logs in production.

Use the logger for these messages to ensure consistent log management and prevent unnecessary output in production.

Suggested change
print(f"✅ Workflow transformation complete")
# Ensure dump directory exists
dump_dir = os.path.join(os.path.dirname(__file__), "workflow_dumps")
os.makedirs(dump_dir, exist_ok=True)
# Save the workflow JSON
output_path = os.path.join(dump_dir, "last_workflow.json")
with open(output_path, "w") as f:
json.dump(workflow, f, indent=2)
print(f"📋 Generated workflow JSON: {json.dumps(workflow, indent=2)}")
print(f"🚀 Workflow JSON saved to {output_path}")
return workflow
logger.info("✅ Workflow transformation complete")
# Ensure dump directory exists
dump_dir = os.path.join(os.path.dirname(__file__), "workflow_dumps")
os.makedirs(dump_dir, exist_ok=True)
# Save the workflow JSON
output_path = os.path.join(dump_dir, "last_workflow.json")
with open(output_path, "w") as f:
json.dump(workflow, f, indent=2)
logger.info(f"📋 Generated workflow JSON: {json.dumps(workflow, indent=2)}")
logger.info(f"🚀 Workflow JSON saved to {output_path}")
return workflow



Expand Down
11 changes: 11 additions & 0 deletions dream_layer_backend/inference_traces/inference_trace_img2img.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
timestamp,total_time_s,images_generated,time_per_image_s,gpu_name,driver_version
1754796517.274257,0.1072,0,,CPU,N/A
1754799939.5604439,0.0121,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754800554.222678,0.0363,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754803186.83248,0.0754,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754833176.129069,0.0829,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754841283.544258,0.0107,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754846601.354629,0.0118,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754846604.915974,0.0045,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754847081.994686,301.2454,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754924526.325828,301.6547,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
timestamp,total_time_s,images_generated,time_per_image_s,gpu_name,driver_version
1754796516.524883,301.3008,0,,CPU,N/A
36 changes: 36 additions & 0 deletions dream_layer_backend/inference_traces/inference_trace_txt2img.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
timestamp,total_time_s,images_generated,time_per_image_s,gpu_name,driver_version,ckpt_name
1754803186.3610072,301.3461,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754833175.356676,301.1881,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754841283.3543549,0.0102,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754844427.844616,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754844771.521109,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754844897.207525,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754844928.5717719,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754845078.4543412,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754845256.875231,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754845318.64412,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754845646.243946,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754845676.958377,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754846119.498097,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754846290.427422,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754846316.227009,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754846425.6457322,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754846462.971039,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754846601.165691,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754846604.743495,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754846780.546242,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754924224.424215,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754926591.025748,301.3167,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1754948761.507086,301.2014,0,,CPU,N/A,unknown
1754953737.084278,301.574,0,,CPU,N/A,unknown
1755031482.6354072,301.4959,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1755031704.523446,80.554,1,80.554,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1755032728.004299,0.4533,1,0.4533,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1755033409.806452,301.5118,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1755033868.0322208,301.699,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1755039709.030485,301.1955,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors
1755041998.9101171,300.3344,0,,CPU,N/A,unknown
1755042012.7522612,301.2818,0,,CPU,N/A,unknown
1755072753.412419,300.5095,0,,CPU,N/A,unknown
1755100109.236161,301.1258,0,,CPU,N/A,unknown
1755275022.702453,301.1851,0,,CPU,N/A,unknown
1 change: 1 addition & 0 deletions dream_layer_backend/test_image/base64_txt_test_image.txt

Large diffs are not rendered by default.

42 changes: 42 additions & 0 deletions dream_layer_backend/test_image/base64conversion.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import base64
from pathlib import Path

# Folder containing images
folder = Path(__file__).parent # assuming this script is inside test_image folder

# Supported image extensions
img_extensions = {".png", ".jpg", ".jpeg", ".bmp", ".gif"}

# Find first image file in folder
image_files = [f for f in folder.iterdir() if f.suffix.lower() in img_extensions]

if not image_files:
raise FileNotFoundError(f"No image files found in {folder}")

input_img_path = image_files[0]
base64_txt_path = folder / "base64_txt_test_image.txt"
output_img_path = folder / "test_image.png"

print(f"Using input image: {input_img_path.name}")

# Read image bytes
with open(input_img_path, "rb") as f:
img_bytes = f.read()

# Encode to base64 string
base64_img = base64.b64encode(img_bytes).decode("utf-8")

# Save base64 string to 4.txt
with open(base64_txt_path, "w") as f:
f.write(base64_img)

print(f"Saved base64 string to {base64_txt_path}")

# Decode base64 string back to bytes
decoded_bytes = base64.b64decode(base64_img)

# Save decoded bytes as new PNG file
with open(output_img_path, "wb") as f:
f.write(decoded_bytes)

print(f"Saved decoded image as {output_img_path}")
Binary file added dream_layer_backend/test_image/examjam.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dream_layer_backend/test_image/test_image.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading