v0.1 - Merry Christmas!

This commit is contained in:
r000t 2024-12-25 09:30:53 -05:00
commit 1965706667
29 changed files with 2068 additions and 0 deletions

94
.dockerignore Normal file
View File

@ -0,0 +1,94 @@
# Local exclusions
old/
.aider*
# Git
.git
.gitignore
.gitattributes
# CI
.codeclimate.yml
.travis.yml
.taskcluster.yml
# Docker
docker-compose.yml
Dockerfile
.docker
.dockerignore
# Byte-compiled / optimized / DLL files
**/__pycache__/
**/*.py[cod]
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.cache
nosetests.xml
coverage.xml
# Translations
*.mo
*.pot
# Django stuff:
*.log
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Virtual environment
.env
.venv/
venv/
# PyCharm
.idea
# Python mode for VIM
.ropeproject
**/.ropeproject
# Vim swap files
**/*.swp
# VS Code
.vscode/

128
.gitignore vendored Normal file
View File

@ -0,0 +1,128 @@
.aider*
# Ripped from https://github.com/github/gitignore/blob/main/Python.gitignore
# Some things removed because I know they're unused
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
#uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
.idea/
# PyPI configuration file
.pypirc

300
comfykiosk/__init__.py Normal file
View File

@ -0,0 +1,300 @@
import asyncio
import json
import os
import warnings
import logging
from more_itertools import always_iterable
from fastapi import Response
from typing import List, Tuple, Optional, Union, TypedDict
# Get the module logger
logger = logging.getLogger("comfykiosk")
# Adjust third-party loggers
logging.getLogger('httpx').setLevel(logging.WARNING)
from .image_sources import ImageSource
from .randomness import generate_seed
from .comfy_networking import execute_comfyui_prompt, get_image
from .generator import ComfyGenerator
from .workflow import Workflow
from .workflow.loaders import SimpleWorkflowLoader
from .image_sources.pregenerate import PreparedGenPool
from .image_sources.filesystem import FileSystemImageSource
from .image_sources.fake_i_fier import FakeIFier
class ImageResponse(TypedDict):
image_data: bytes
seed: int
media_type: str
class BaseComfyKiosk:
"""Base class for image generation services with configurable defaults.
Configuration can be provided in four ways, in order of precedence:
1. Instance parameters passed to __init__
2. Environment variables
3. Subclass overrides of class attributes
4. Class-level defaults defined below
"""
from .config import (
DEFAULT_LOCAL_SALT,
DEFAULT_COMFYUI_URL,
DEFAULT_WORKFLOW_PATH,
DEFAULT_WORKFLOW_DIR,
DEFAULT_OUTPUT_DIR,
)
def __init__(self, *, generator=None, comfyui_url=None, output_dir=None,
local_salt=None, workflow: Workflow = None, loader: SimpleWorkflowLoader = None):
self.registered_workflows = {}
self.comfyui_url = comfyui_url or self.DEFAULT_COMFYUI_URL
self.output_dir = output_dir or self.DEFAULT_OUTPUT_DIR
self.local_salt = local_salt or self.DEFAULT_LOCAL_SALT
async def get_image(self, seed=None, workflow_id: int=None):
"""
Attempts to identify and/or select a workflow, and calls _get_image() with the result
"""
if workflow_id is None and self.workflow is None:
raise ValueError("workflow_id must be specified if no default workflow is provided during initialization.")
if workflow_id is not None:
if not isinstance(workflow_id, int):
raise TypeError("workflow_id must be an integer")
workflow = self.registered_workflows[workflow_id]
media_type = None
requested_seed = seed
if seed is None:
seed = generate_seed()
assert(isinstance(workflow, Workflow))
assert(isinstance(seed, int))
return await self._get_image(seed, workflow, seed_is_explicit=requested_seed is not None, workflow_is_explicit=workflow is not None)
async def _get_image(self, seed: int, workflow: Workflow, **flags) -> Union[ImageResponse, Response]:
"""
Actual implementation of get_image(). get_image() will handle seed and workflow selection,
so your subclass is guaranteed to get fully-instantiated objects.
You are also given additional signals via flags about how the parameters were selected.
It may be helpful to know that if the workflow was randomly selected,
this selection is stable given the same seed and possible workflows.
Your subclass should override this method, as it currently does nothing.
Your subclass may override the seed, but this behavior may be later restricted to allow
you to override the seed only when it was not explicitly defined by the user.
If this happens, the future behavior will be to discard the seed you return.
Eventually, it will step through all image sources registered with the class instance.
:param seed: The seed to use for generation
:param workflow: The workflow to use for generation
:param flags: Additional flags indicating parameter selection:
- seed_is_explicit: Whether the user explicitly requested this seed
- workflow_is_explicit: Whether the user explicitly requested this workflow
:return: Implementation specific
"""
pass
async def list_workflows(self) -> List[Tuple[int, Workflow]]:
"""
Lists all registered workflows.
Returns:
A list of tuples, where each tuple contains a workflow ID and the corresponding Workflow object.
"""
return list(self.registered_workflows.items())
def register_workflow(self, workflow: Workflow, id: int=None) -> int:
"""
Registers a workflow.
Args:
workflow: The workflow to register.
id: An optional ID for the workflow. If not provided, an ID will be generated. Generated IDs start at 100.
Returns:
The ID of the registered workflow.
"""
assert(isinstance(workflow, Workflow))
if workflow in self.registered_workflows.values():
return [k for k, v in self.registered_workflows.items() if v == workflow][0]
if id is None:
id = 101
while id in self.registered_workflows:
id += 1
self.registered_workflows[id] = workflow
return id
async def on_app_startup(self):
"""Hook that runs when the FastAPI app starts up."""
pass
class ComfyKiosk(BaseComfyKiosk):
"""
The "standard" ComfyKiosk that tries to provide sensible default behavior to cover most use cases.
Accepts and loads any combination of workflows, workflow loaders, and image sources, in that order.
If only one workflow is provided, it will be registered with ID 1.
When .get_image() is called, image sources will be tried in order, raising 404 if none return an image.
"""
def __init__(self,
workflows: Union[Workflow, List[Workflow]] = None,
loaders: Union[SimpleWorkflowLoader, List[SimpleWorkflowLoader]] = None,
image_sources: Union[ImageSource, List[ImageSource]] = None,
generator=None, *,
strict_loading: bool = False):
super().__init__()
def _register_workflow(workflow, **kwargs):
try:
self.register_workflow(workflow, **kwargs)
except Exception as e:
if strict_loading:
raise RuntimeError(f"Failed to register single workflow: {e}")
warnings.warn(f"Failed to register single workflow: {e}")
self.generator = generator
# If only one workflow was provided, give it ID 1
if isinstance(workflows, Workflow):
if not any(loaders):
_register_workflow(workflows, id=1)
for workflow in always_iterable(workflows):
_register_workflow(workflow)
# Load and register workflows from loaders
for loader in always_iterable(loaders):
try:
for workflow in loader.load():
_register_workflow(workflow)
except Exception as e:
if strict_loading:
raise RuntimeError(f"Failed to load workflows from loader: {e}")
warnings.warn(f"Failed to load workflows from loader: {e}")
if not self.registered_workflows:
warnings.warn("ComfyKiosk instantiated with no workflows. Please register at least one workflow or loader before trying to generate images.")
self.image_sources = []
for source in always_iterable(image_sources):
if not isinstance(source, ImageSource):
warnings.warn(f"Received an invalid image source: {source}. Skipping.")
continue
if isinstance(source, PreparedGenPool):
source.registered_workflows = self.registered_workflows
if not source.generator:
if self.generator:
source.generator = self.generator
else:
warnings.warn("PreparedGenPool passed in with no generator, and no generator provided to ComfyKiosk. Not adding this PreparedGenPool.")
continue
self.image_sources.append(source)
if not self.image_sources:
raise ValueError("ComfyKiosk instantiated with no image sources. Please make a new instance with at least one valid image source.")
async def _get_image(self, seed: int, workflow: Workflow, **flags) -> Union[ImageResponse, Response]:
"""
Try each image source in order until we get a successful result.
Image sources should raise FileNotFoundError to indicate we should try the next source.
Any other exception will be propagated up.
"""
for source in self.image_sources:
try:
image_data, media_type, seed = await source.get_image(seed, workflow=workflow)
return {"image_data": image_data, "seed": seed, "media_type": media_type or "image/jpeg"}
except FileNotFoundError as e:
continue
return Response(status_code=404, content="Exhausted all image sources. Please try again later.")
async def on_app_startup(self):
"""Run startup hooks for all registered image sources"""
for source in self.image_sources:
await source.on_app_startup()
class EasyComfyKiosk(BaseComfyKiosk):
"""
A simple implementation of ComfyKiosk suitable for demonstration.
Given a path, loads the single workflow if it's a file, and loads all workflows in the directory if it's a directory.
With no path provided, first tries to load a directory of workflows at DEFAULT_WORKFLOW_DIR,
then tries to load a single workflow at DEFAULT_WORKFLOW_PATH. Raises an error if no valid workflow is produced from either.
Assumes a ComfyUI instance is at DEFAULT_COMFYUI_URL, but this can be overridden.
Requests will be passed through to the ComfyUI instance, which will handle queueing.
This simple implementation does not cache or pre-generate images.
"""
def __init__(self, path: Union[str, os.PathLike]=None, comfyui_url: str=None):
# Initialize with base configuration
super().__init__()
# Set up the generator
generator = ComfyGenerator(comfyui_url=comfyui_url or self.DEFAULT_COMFYUI_URL)
# Handle workflow loading
workflow = None
loader = None
if path is None:
# Try directory first, then single file
if os.path.isdir(self.DEFAULT_WORKFLOW_DIR):
loader = SimpleWorkflowLoader(self.DEFAULT_WORKFLOW_DIR)
else:
try:
workflow = Workflow.from_file(self.DEFAULT_WORKFLOW_PATH)
except (FileNotFoundError, json.JSONDecodeError) as e:
raise ValueError(f"No workflows found in {self.DEFAULT_WORKFLOW_DIR} or {self.DEFAULT_WORKFLOW_PATH}")
else:
# User provided a path - check if it's a directory or file
if os.path.isdir(path):
loader = SimpleWorkflowLoader(path)
elif os.path.isfile(path):
workflow = Workflow.from_file(path)
else:
raise FileNotFoundError(f"Path not found: {path}")
# Update instance with configured components
self.generator = generator
self.workflow = workflow
self.loader = loader
# Register workflows
if self.loader:
# Load multiple workflows from the loader
workflows = loader.load()
for w in workflows:
try:
self.register_workflow(w)
except Exception as e:
print(f"Failed to load workflow {w} for reason: {e}")
elif workflow:
# Register single workflow with ID 1
self.register_workflow(workflow, id=1)
async def _get_image(self, seed: int, workflow: Workflow, **flags) -> ImageResponse:
"""Generate an image directly through ComfyUI"""
image_data, media_type = await self.generator.generate_image(seed, workflow)
return {"image_data": image_data, "seed": seed, "media_type": media_type or "image/jpeg"}

View File

@ -0,0 +1,165 @@
from typing import List
import logging
import websockets
import urllib.request
import json
import uuid
import http.cookiejar
import httpx
class DowngradeInfoFilter(logging.Filter):
def filter(self, record):
if record.levelno == logging.INFO:
record.levelno = logging.DEBUG
record.levelname = 'DEBUG'
return True
# Configure httpx logger to downgrade INFO to DEBUG
httpx_logger = logging.getLogger("httpx")
httpx_logger.addFilter(DowngradeInfoFilter())
# Function to handle redirects and store cookies
async def open_websocket_connection(comfyui_url):
client_id = str(uuid.uuid4())
cookie_jar = http.cookiejar.CookieJar() # Initialize a cookie jar
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie_jar))
urllib.request.install_opener(opener) # Install the opener to handle cookies globally
try:
ws = await websockets.connect(f"ws://{comfyui_url}/ws?clientId={client_id}")
return ws, client_id
except websockets.InvalidStatusCode as e:
if e.status_code in (301, 302, 307, 308): # Check for redirect status codes
print(f"Redirect detected: {e.status_code}")
location = e.headers.get("Location")
if location:
print(f"Following redirect to: {location}")
# Make a request to the redirect URL to store cookies
try:
urllib.request.urlopen(location)
except Exception as redirect_request_error:
print(f"Error following redirect: {redirect_request_error}")
raise
print(f"Retrying websocket connection to original URL: {comfyui_url}")
return await open_websocket_connection(comfyui_url) # Retry with original URL and stored cookies
else:
print("Redirect location not found.")
raise
else:
print(f"Failed to open websocket connection: {e}")
raise
except Exception as e:
print(f"Failed to open websocket connection: {e}")
raise
def queue_prompt(comfyui_url, prompt, client_id):
p = {"prompt": prompt, "client_id": client_id}
headers = {'Content-Type': 'application/json'}
data = json.dumps(p).encode('utf-8')
req = urllib.request.Request(f"http://{comfyui_url}/prompt", data=data, headers=headers)
try:
response = urllib.request.urlopen(req)
response_data = json.loads(response.read())
return response_data
except urllib.error.HTTPError as e:
error_body = e.read().decode('utf-8')
print(f"Failed to queue prompt. HTTPError: {e.code} - {e.reason}. Response body: {error_body}")
raise
except Exception as e:
print(f"Failed to queue prompt. Unexpected error: {e}")
raise
async def track_progress(prompt, ws, prompt_id):
node_ids = list(prompt.keys())
finished_nodes = []
while True:
try:
out = await ws.recv()
if isinstance(out, str):
message = json.loads(out)
if message['type'] == 'progress':
data = message['data']
current_step = data['value']
if message['type'] == 'execution_cached':
data = message['data']
for itm in data['nodes']:
if itm not in finished_nodes:
finished_nodes.append(itm)
if message['type'] == 'executing':
data = message['data']
if data['node'] not in finished_nodes:
finished_nodes.append(data['node'])
if data['node'] is None and data['prompt_id'] == prompt_id:
break # Execution is done
else:
continue
except (websockets.exceptions.ConnectionClosedError, websockets.exceptions.ConnectionClosedOK, websockets.WebSocketException) as e: # Catch correct exception
print(f"Websocket connection closed: {e}")
break
return
async def get_history(prompt_id, comfyui_url):
async with httpx.AsyncClient() as client:
try:
response = await client.get(f"http://{comfyui_url}/history/{prompt_id}")
response.raise_for_status()
comfyui_status: dict = response.json()[prompt_id]["status"]
if comfyui_status["status_str"] == "error":
for message in comfyui_status["messages"]:
if message[0] == "execution_error":
print(f"ComfyUI threw an exception: {message[1]["exception_message"]}")
raise
return response.json()[prompt_id]
except httpx.HTTPError as e:
print(f"Failed to get image. HTTPError: {e}")
raise
async def get_image(filename, subfolder, folder_type, comfyui_url) -> bytes:
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
url_values = urllib.parse.urlencode(data)
async with httpx.AsyncClient() as client:
try:
response = await client.get(f"http://{comfyui_url}/view?{url_values}")
response.raise_for_status()
return response.content
except httpx.HTTPError as e:
print(f"Failed to get image. HTTPError: {e}")
raise
async def get_images(prompt_id, server_address, allow_preview = False) -> List[dict]:
output_images = []
history = await get_history(prompt_id, server_address)
for node_id in history['outputs']:
node_output = history['outputs'][node_id]
output_data = {}
if 'images' in node_output:
for image in node_output['images']:
if allow_preview and image['type'] == 'temp':
preview_data = await get_image(image['filename'], image['subfolder'], image['type'], server_address)
output_data['image_data'] = preview_data
if image['type'] == 'output':
image_data = await get_image(image['filename'], image['subfolder'], image['type'], server_address)
output_data['image_data'] = image_data
output_data['file_name'] = image['filename']
output_data['type'] = image['type']
output_images.append(output_data)
return output_images
async def execute_comfyui_prompt(comfyui_url, prompt):
ws, client_id = await open_websocket_connection(comfyui_url)
queued_prompt = queue_prompt(comfyui_url, prompt, client_id)
prompt_id = queued_prompt['prompt_id']
await track_progress(prompt, ws, prompt_id)
await ws.close()
images = await get_images(prompt_id, comfyui_url)
return images

13
comfykiosk/config.py Normal file
View File

@ -0,0 +1,13 @@
import os
def _get_env_default(env_var: str, default_value: str) -> str:
"""Helper method to get value from environment variable or fall back to default"""
value = os.environ.get(env_var, default_value)
return value
# Define defaults using environment variables with fallbacks
DEFAULT_LOCAL_SALT = _get_env_default("LOCAL_SALT", "__SERVERSIDE_SALT__")
DEFAULT_COMFYUI_URL = _get_env_default("COMFYUI_URL", "127.0.0.1:8188")
DEFAULT_WORKFLOW_PATH = _get_env_default("WORKFLOW_PATH", "workflow.json")
DEFAULT_WORKFLOW_DIR = _get_env_default("WORKFLOW_DIR", "workflows")
DEFAULT_OUTPUT_DIR = _get_env_default("OUTPUT_DIR", "output")

81
comfykiosk/fastapi.py Normal file
View File

@ -0,0 +1,81 @@
from fastapi import FastAPI, Response, Path
from starlette.responses import RedirectResponse
from comfykiosk import BaseComfyKiosk, EasyComfyKiosk
from comfykiosk.pydantic_models import WorkflowDTO
from typing import List
def create_app(comfy_wrapper: BaseComfyKiosk) -> FastAPI:
"""Create a FastAPI application that provides HTTP access to a ComfyKiosk instance"""
import logging
# Configure logging before creating the FastAPI app
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
force=True # Overrides any existing configuration
)
app = FastAPI(title="ComfyKiosk")
async def startup():
logger = logging.getLogger("comfykiosk")
logger.info("Handling startup hook...")
await comfy_wrapper.on_app_startup()
app.add_event_handler("startup", startup)
@app.get("/workflows", response_model=List[WorkflowDTO])
async def list_workflows():
workflows = await comfy_wrapper.list_workflows()
workflow_dtos = [WorkflowDTO(id=id, handle=workflow.handle, description=workflow.description, hash=workflow.hash) for id, workflow in workflows]
return workflow_dtos
@app.get("/workflows/by-handle/{handle}", response_model=WorkflowDTO)
async def get_workflow_information_by_handle(handle: str):
for workflow_id, workflow in comfy_wrapper.registered_workflows.items():
if workflow.handle == handle:
return WorkflowDTO(id=workflow_id, handle=workflow.handle, description=workflow.description, hash=workflow.hash)
return Response(status_code=404, content=f"Workflow with handle '{handle}' not found.")
@app.get("/workflows/by-handle", response_model=List[WorkflowDTO])
async def list_workflows_by_handle():
workflows = await comfy_wrapper.list_workflows()
workflow_dtos = [WorkflowDTO(id=id, handle=workflow.handle, description=workflow.description, hash=workflow.hash) for id, workflow in workflows]
workflow_dtos.sort(key=lambda x: x.handle) # Sort by handle
return workflow_dtos
@app.get("/workflows/{workflow_id}", response_model=WorkflowDTO)
async def get_workflow_information(workflow_id: int):
workflow = comfy_wrapper.registered_workflows[workflow_id]
return WorkflowDTO(id=workflow_id, handle=workflow.handle, description=workflow.description, hash=workflow.hash)
@app.get("/workflows/{workflow_id}/image")
async def generate_by_workflow(workflow_id: int, redirect: bool = True):
try:
result = await comfy_wrapper.get_image(workflow_id=workflow_id)
except KeyError:
return Response(status_code=404, content=f"Workflow with id '{workflow_id} not found.")
if type(result) is Response:
return result
if redirect:
return RedirectResponse(url=f"/workflows/{workflow_id}/image/{result['seed']}", status_code=303)
else:
return Response(content=result["image_data"], media_type=result["media_type"])
@app.get("/workflows/by-handle/{handle}/image")
async def generate_by_workflow_handle(handle: str, redirect: bool = True):
for workflow_id, workflow in comfy_wrapper.registered_workflows.items():
if workflow.handle == handle:
return await generate_by_workflow(workflow_id, redirect)
return Response(status_code=404, content=f"Workflow with handle '{handle}' not found.")
@app.get("/workflows/{workflow_id}/image/{seed}")
async def generate_by_workflow_with_seed(workflow_id: int, seed: int = Path(..., title="Seed for image generation")):
result = await comfy_wrapper.get_image(workflow_id=workflow_id, seed=seed)
if type(result) is Response:
return result
return Response(content=result["image_data"], media_type=result["media_type"])
return app

103
comfykiosk/generator.py Normal file
View File

@ -0,0 +1,103 @@
import json
import os
from . import ImageSource
from .workflow import Workflow
from .comfy_networking import execute_comfyui_prompt
from .config import DEFAULT_COMFYUI_URL
from io import BytesIO
from PIL import Image
import traceback
class ImageGenerator(ImageSource):
"""Base class for image generators that defines the common interface."""
async def get_image(self, seed: int, workflow: Workflow) -> tuple[bytes, str, int]:
# Temporary shortcut to allow it to be a subclass of ImageSource
if workflow is None or not isinstance(workflow, Workflow):
raise ValueError(f"workflow must be a Workflow instance, got {type(workflow).__name__ if workflow is not None else 'None'}")
result = await self.generate_image(seed, workflow)
return (result[0], result[1], seed)
async def generate_image(self, seed: int, workflow: Workflow) -> tuple[bytes, str]:
"""
Generate an image using the given seed and workflow.
Args:
seed: Random seed for image generation
workflow: Workflow object containing the generation parameters
Returns:
Tuple of (image_data: bytes, media_type: str)
"""
raise NotImplementedError("Subclasses must implement generate_image()")
async def postprocess_image(self, image_data: bytes) -> tuple[bytes, str]:
"""
Post-process generated image data.
Args:
image_data: Raw image data in bytes
Returns:
Tuple of (processed_image_data: bytes, media_type: str)
"""
raise NotImplementedError("Subclasses must implement postprocess_image()")
class ComfyGenerator(ImageGenerator):
def __init__(self, *, comfyui_url=None):
self.comfyui_url = comfyui_url or DEFAULT_COMFYUI_URL
async def generate_image(self, seed, workflow) -> (bytes, str):
try:
prompt = json.loads(workflow.json)
id_to_class_type = {id: details['class_type'] for id, details in prompt.items()}
# Temporary workaround to allow use of SwarmUI-generated workflows
k_sampler = next((key for key, value in id_to_class_type.items() if value == 'KSampler'), None)
if k_sampler:
prompt[k_sampler]['inputs']['seed'] = seed
else:
k_sampler_advanced = next((key for key, value in id_to_class_type.items() if value == 'KSamplerAdvanced'), None)
if k_sampler_advanced:
prompt[k_sampler_advanced]['inputs']['noise_seed'] = seed
images = await execute_comfyui_prompt(self.comfyui_url, prompt)
if images:
# Workaround: Order doesn't seem predictable across workflows
# TODO: Remove preview nodes during parsing
# TODO: Replace save node with websockets save node
for image in images:
image_data: bytes = image.get('image_data')
if image_data:
return await self.postprocess_image(image_data)
else:
raise RuntimeError("No image data received from ComfyUI")
else:
raise RuntimeError("No images received from ComfyUI")
except Exception as e:
print(f"An unexpected error occurred: {e}")
import traceback
traceback.print_exc()
raise RuntimeError(f"An unexpected error occurred: {e}") from e
async def postprocess_image(self, image_data: bytes) -> (bytes, str):
try:
image = Image.open(BytesIO(image_data))
if image.mode != "RGB":
image = image.convert("RGB")
output_buffer = BytesIO()
image.save(output_buffer, "JPEG", quality=90)
final_image_data: bytes = output_buffer.getvalue()
media_type = "image/jpeg"
return final_image_data, media_type
except Exception as e:
print(f"Error during postprocessing: {e}")
traceback.print_exc()
raise

View File

@ -0,0 +1,51 @@
from typing import Optional
class ImageSource:
"""
Base class for image sources.
Subclasses should raise FileNotFoundError if nothing has specifically gone "wrong",
but the app should silently move on to the next source.
All other error types are subject to handling by the app.
"""
def __init__(self, generator=None, saver=None, workflow=None):
self.generator = generator
self.saver: ImageSink = saver
self.workflow = workflow
async def get_image(self, seed, workflow=None) -> bytes:
raise NotImplementedError()
async def save_image(self, seed) -> bytes:
if self.saver:
try:
await self.saver.save_image(self, seed)
except:
pass
raise NotImplementedError()
async def on_app_startup(self):
"""Hook that runs when the FastAPI app starts up. Override in subclasses that need startup initialization."""
pass
class ImageSink(ImageSource):
"""
An ImageSink acts as both an ImageSource and a storage location for images. Subclasses implement
`save_image()` to define how images are stored, and `get_image()` to retrieve them later using the same seed.
"""
async def save_image(self, seed, image_data: bytes) -> Optional[str]:
'''
Saves the provided image data, associating it with the given seed. The implementation should ensure that
calling `get_image()` with the same seed will retrieve the saved image.
:param seed: A value used to identify the image. Implementations may use this value to determine how the image is stored,
for example, as part of a filename or as a key in a database.
:param image_data: The image data to be saved, represented as bytes.
:return: An optional string that represents the location or identifier of the saved image. This could be a URL,
a file path, or another suitable identifier. Returns None if the implementation does not use such identifiers.
'''
raise NotImplementedError()

View File

@ -0,0 +1,86 @@
import os
import random
import mimetypes
from . import ImageSource
class FakeIFier(ImageSource):
"""
Allows us to map a seed to an existing image on disk,
gaslighting the user into thinking they're seeing a new image when specifying a seed.
We lampshade it in the source comments tho so it's all good.
At the moment, this is done by hardlinking the target filename to a randomly-selected image on disk.
This ImageSource tests for the ability to create hardlinks in the target directory when it is instantiated.
"""
def __init__(self, directory, saver, **kwargs):
super().__init__(**kwargs)
self.directory = directory
self.saver = saver
if not os.path.exists(self.directory):
raise FileNotFoundError(f"Directory not found: {self.directory}")
FakeIFier.test_hardlinks(self.directory)
@staticmethod
def test_hardlinks(directory):
# Check if hardlinking is supported. Not a comprehensive check, but better than nothing.
try:
testlink_source = os.path.join(directory, "testlink_source")
testlink_target = os.path.join(directory, "testlink_target")
# Create a temporary file for the source
with open(testlink_source, "w") as f:
f.write("hey how's it going?")
os.link(testlink_source, testlink_target)
os.remove(testlink_target)
os.remove(testlink_source)
except OSError as e:
raise OSError(f"Hardlinking not supported in this directory: {e}")
async def get_image(self, seed, workflow=None):
if workflow:
workflow_hash = workflow.hash
directory = os.path.join(self.directory, workflow_hash)
else:
directory = self.directory
if not os.path.exists(directory):
raise FileNotFoundError(f"Directory not found: {directory}")
files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
if not files:
raise FileNotFoundError(f"No files found in directory: {directory}")
# Filter for JPG and PNG files
allowed_extensions = ('.jpg', '.jpeg', '.png')
files = [f for f in files if f.lower().endswith(allowed_extensions)]
if not files:
raise FileNotFoundError(f"No JPG or PNG files found in directory: {directory}")
chosen_file = random.choice(files)
filepath = os.path.join(directory, chosen_file)
target_filename = self.saver.generate_filename(seed, workflow=workflow)
target_filepath = os.path.join(self.directory, target_filename)
try:
# For Windows compatibility, use absolute paths
os.link(os.path.abspath(filepath), os.path.abspath(target_filepath))
except OSError as e:
raise OSError(f"Failed to create hardlink: {e}")
with open(target_filepath, "rb") as f:
image_data = f.read()
mime_type = mimetypes.guess_type(target_filepath)[0]
return image_data, mime_type, seed

View File

@ -0,0 +1,67 @@
import hashlib
import os
import mimetypes
import base64
from comfykiosk.image_sources import ImageSource, ImageSink
class FileSystemImageSource(ImageSink):
def __init__(self, directory, *, local_salt=None, **kwargs):
super().__init__(**kwargs)
self.directory = directory
self.local_salt = local_salt
# Move the directory check/creation and write permissions check to here
if not os.path.exists(self.directory):
try:
os.makedirs(self.directory)
print(f"Created output directory: {self.directory}")
except OSError as e:
raise OSError(f"Error creating output directory: {e}")
# Check write permissions
if not os.access(self.directory, os.W_OK):
raise OSError(f"No write permissions in output directory: {self.directory}")
async def get_image(self, seed, workflow=None):
filename = self.generate_filename(seed, workflow)
filepath = os.path.join(self.directory, filename)
if os.path.exists(filepath):
with open(filepath, "rb") as f:
image_data = f.read()
media_type: str = mimetypes.guess_type(filepath)[0]
return image_data, media_type, seed
raise FileNotFoundError
async def save_image(self, seed, image_data, workflow=None):
filename = self.generate_filename(seed, workflow)
filepath = os.path.join(self.directory, filename)
with open(filepath, "wb") as f:
f.write(image_data)
def generate_filename(self, seed, workflow=None):
salted_seed = f"{seed}{self.local_salt}"
seed_hash = hashlib.sha256(salted_seed.encode()).digest()
b64_hash = base64.urlsafe_b64encode(seed_hash).decode('utf-8')[:16]
filename = b64_hash + ".jpg"
if workflow:
workflow_hash = workflow.hash
filepath = os.path.join(self.directory, workflow_hash, filename)
dirpath = os.path.join(self.directory, workflow_hash)
if not os.path.exists(dirpath):
try:
os.makedirs(dirpath)
except OSError as e:
raise OSError(f"Error creating workflow subdirectory: {e}")
return os.path.join(workflow_hash, filename)
else:
filepath = os.path.join(self.directory, filename)
return filename

View File

@ -0,0 +1,112 @@
import asyncio
import logging
import warnings
from typing import Dict, List, Tuple # Import List and Tuple
from weakref import WeakKeyDictionary
from comfykiosk import generate_seed
from comfykiosk.image_sources import ImageSource
from comfykiosk.workflow import Workflow # Import Workflow
class PreparedGenPool(ImageSource):
def __init__(self, bucket_max: int = 10, batch_size: int = 5, registered_workflows: Dict=None,
max_retries: int = 10, initial_delay: float = 1.0, max_delay: float = 60.0, **kwargs):
super().__init__()
self.generator = kwargs.get('generator')
self.saver = kwargs.get('saver')
self.bucket_max = bucket_max
self.replenish_batch_size = batch_size
self.image_queues = WeakKeyDictionary()
self.replenish_task = None
self.registered_workflows = registered_workflows or {}
# Retry configuration
self.max_retries = max_retries
self.initial_delay = initial_delay
self.max_delay = max_delay
if self.generator is None:
raise ValueError("The 'generator' argument is required for PreparedGenPool.")
async def replenish(self):
# Check for no assigned variable, not an empty list
if self.registered_workflows is None:
warnings.warn("Assign `registered_workflows` to the `PreparedGenPool` instance, or pass it to a ComfyKiosk instance.", UserWarning)
logging.info("Replenishing image queue...")
while True: # Keep at it until there's nothing left.
# Find the workflow with the smallest queue
items: List[Tuple[int, Workflow]] = [(len(self.image_queues.get(workflow, [])), workflow) for workflow in self.registered_workflows.values()]
if not items:
break # No workflows registered
min_queue_size, target_workflow = min(items, key=lambda item: item[0], default=(float('inf'), None)) # Compare by queue size
if target_workflow is None:
break
if min_queue_size >= self.bucket_max:
break # Target queue is full
# Generate images for the target workflow with exponential backoff
# (The delay resets with the next batch)
delay = self.initial_delay
# Done in batches because switching between workflows likely means loading new models into memory
# This can more than triple the time it takes to generate an image. Batching images amortizes that cost.
# Note that only one request is made to the backend at any time, so we must wait for any queue at the
# ComfyUI server to be clear before our request is serviced, for each image.
for _ in range(self.replenish_batch_size):
retry_count = 0
while retry_count < self.max_retries:
try:
seed = generate_seed()
image_data, media_type = await self.generator.generate_image(seed=seed, workflow=target_workflow)
await self.saver.save_image(seed, image_data, workflow=target_workflow)
self.image_queues.setdefault(target_workflow, []).append(seed)
break # Success - continue to next image
except Exception as e:
retry_count += 1
if retry_count >= self.max_retries:
logging.error(f"Failed to generate image after {self.max_retries} attempts: {e}")
break
logging.warning(f"Error generating image (attempt {retry_count}/{self.max_retries}): {e}")
await asyncio.sleep(min(delay, self.max_delay))
delay *= 2 # Exponential backoff
# Log buffer status
if not self.image_queues:
logging.info("Buffer Status: All queues are empty")
else:
status = ["Buffer Status:"]
for workflow, queue in self.image_queues.items():
status.append(f" {workflow.handle}: {len(queue)} images")
logging.info("\n".join(status))
def start_replenish(self):
if self.replenish_task is None or self.replenish_task.done(): # Start only if not already running
self.replenish_task = asyncio.create_task(self.replenish())
self.replenish_task.add_done_callback(self._replenish_finished)
async def get_image(self, seed=None, workflow: Workflow=None):
if workflow is None:
raise ValueError("The 'workflow' argument is required for PreparedGenPool.get_image().")
self.start_replenish()
image_queue = self.image_queues.get(workflow)
if image_queue:
seed = image_queue.pop(0)
return await self.saver.get_image(seed, workflow=workflow)
else:
raise asyncio.QueueEmpty()
def _replenish_finished(self, task):
self.replenish_task = None # Reset the task when finished
async def on_app_startup(self):
"""Start the replenishment loop when the FastAPI app starts"""
self.start_replenish()

View File

@ -0,0 +1,10 @@
from typing import Optional
from pydantic import BaseModel, Field
class WorkflowDTO(BaseModel):
id: int
hash: str = Field(..., min_length=8, max_length=8, pattern=r"^[a-zA-Z0-9_-]+$")
handle: Optional[str] = Field(None, min_length=3, max_length=30, description="Optional name for the workflow to be displayed in lists")
description: Optional[str] = Field(None, max_length=255, description="Optional description of the workflow to be displayed in lists")

6
comfykiosk/randomness.py Normal file
View File

@ -0,0 +1,6 @@
# *holds up spork*
import random
def generate_seed():
seed = random.randint(10**14, 10**15 - 1)
return seed

View File

@ -0,0 +1,64 @@
import json
import base64
import hashlib
class Workflow:
"""
Represents a single workflow or workflow template.
Attributes:
json (str): The ComfyUI-compatible JSON blob, with or without tagging for the randomizer.
handle (str): An optional name for the workflow, shown in lists.
description (str): An optional description of the workflow, shown in lists.
hash (str): A unique 8-character base64 identifier derived from the workflow JSON.
Used to consistently identify workflows with the same content.
"""
def __init__(self, json_data: str, handle: str = None, description: str = None):
try:
data = json.loads(json_data) # Validate and parse JSON data
minified_json = json.dumps(data, separators=(',', ':')) # Minify JSON
except json.JSONDecodeError as e:
raise ValueError(f"Invalid JSON data: {e}")
self._json = minified_json
self.handle = handle
self.description = description
# Calculate and store the hash
json_hash = hashlib.sha256(self._json.encode('utf-8')).digest()
self.hash = base64.urlsafe_b64encode(json_hash).decode('utf-8')[:8]
@property
def json(self):
return self._json
@classmethod
def from_file(cls, filepath: str, *args, **kwargs):
"""
Constructs a Workflow object from a JSON file on disk.
Args:
filepath (str): The path to the JSON file.
*args: Variable length argument list to pass to the constructor.
**kwargs: Arbitrary keyword arguments to pass to the constructor.
"""
try:
with open(filepath, 'r') as f:
json_data = f.read()
except FileNotFoundError:
raise FileNotFoundError(f"File not found: {filepath}")
except Exception as e: # Catching potential read errors
raise IOError(f"Error reading file: {e}")
return cls(json_data, *args, **kwargs)
def __eq__(self, other):
if not isinstance(other, Workflow):
return False
return self._json == other._json
def __hash__(self):
return hash(self._json)
def __str__(self):
return self._json

View File

@ -0,0 +1,21 @@
from typing import List
from . import Workflow
import os
import json
class SimpleWorkflowLoader:
def __init__(self, directory: str):
self.directory = directory
def load(self) -> List[Workflow]:
workflows = []
for filename in os.listdir(self.directory):
if filename.endswith(".json"):
filepath = os.path.join(self.directory, filename)
try:
workflow = Workflow.from_file(filepath, handle=filename[:-5]) # Use filename as handle
except (ValueError, FileNotFoundError, IOError) as e:
print(f"Error loading workflow from {filepath}: {e}")
continue # Skip to the next file
workflows.append(workflow)
return workflows

15
docker/.env.template Normal file
View File

@ -0,0 +1,15 @@
# Copy this file to .env and modify as needed:
# cp .env.template .env
# Server-side salt for consistent image naming
LOCAL_SALT=__SERVERSIDE_SALT__
# ComfyUI server URL
COMFYUI_URL=127.0.0.1:8188
# Workflow file/directory paths
WORKFLOW_PATH=workflow.json
WORKFLOW_DIR=workflows
# Output directory for generated images
OUTPUT_DIR=output

19
docker/Dockerfile Normal file
View File

@ -0,0 +1,19 @@
FROM python:3.12-slim
WORKDIR /app
RUN useradd -ms /bin/bash appuser && mkdir -p /app/output && chown -R appuser:appuser /app
USER appuser
# Install dependencies
COPY ../requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Expose the FastAPI port
EXPOSE 8000
COPY ../comfykiosk/ ./comfykiosk/
COPY ../sample.py ../sample-advanced.py ../workflow.json ./
COPY ../sample-workflows ./sample-workflows
CMD ["python", "/app/sample.py"]

View File

@ -0,0 +1,15 @@
services:
comfykiosk:
build:
context: ..
dockerfile: ./docker/Dockerfile
command: ["python", "/app/sample-advanced.py"]
env_file:
- .env
volumes:
- comfykiosk-output:/app/output
ports:
- "18000:8000"
volumes:
comfykiosk-output:

View File

@ -0,0 +1,9 @@
services:
comfykiosk:
build:
context: ..
dockerfile: ./docker/Dockerfile
env_file:
- .env
ports:
- "8000:8000"

146
readme.md Normal file
View File

@ -0,0 +1,146 @@
# ComfyKiosk
A "kiosk-mode" interface for ComfyUI workflows,
with built-in image caching and workflow management capabilities.
In short, allows you to expose a ComfyUI server with very fine-grained control over what can be generated with it.
## Features
- (Optional) FastAPI-based HTTP interface for image generation
- Image caching and storage management
- Seed-based deterministic image generation
## Coming in v0.2
- Tags - Group workflows, generate randomly from a single endpoint
- Templating - Mark parameters in the workflow to be randomized with each request
## Installation
### Docker
`docker build -t comfykiosk ./docker/Dockerfile`
should create an image with the library and `sample.py`
### Not Docker
Create a `venv` with Python 3.12 and install all required packages
`python3.12 -m venv .venv && source .venv/bin/activate && pip install -r requirements.txt`
## Quick Start
### Docker
- Copy `docker/.env.template` to `docker/.env`
- You will almost certainly need to change `COMFYUI_URL`
- Run `docker-compose -f ./docker/sample-docker-compose.yml up`
- Assuming the defaults and the sample `docker-compose.yml`,
you should be able to generate an image at `http://localhost:8000/workflows/1/image`
### Not Docker
A helper function is provided for making a dead-simple application that hosts a single workflow or directory of workflows.
Requests are proxied directly to the ComfyUI server, with no caching.
This is likely unsuitable for public use, but is the absolute fastest way to get started.
It's also almost exactly what `sample-docker-compose.yml` is doing.
```python
from comfykiosk import EasyComfyKiosk
from comfykiosk.fastapi import create_app
# Create the image generation service
comfy = EasyComfyKiosk()
# Attach FastAPI routes to it
app = create_app(comfy)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="127.0.0.1", port=8000)
```
By default, it will look for workflows in a directory called `./workflows`,
or a single workflow at `./workflow.json`, but you can specify the path yourself...
```python
comfy = EasyComfyKiosk(path="./custom_workflows")
```
When a directory of workflows is loaded, their `handle` is based on the filename.
Run the app:
`python ./sample.py`
Optionally, specify where to find the ComfyUI server:
`COMFYUI_URL=127.0.0.1:8188 python ./sample.py`
Open a browser and navigate to `http://127.0.0.1:8000/workflows` to see the available workflows.
To actually generate images, navigate to `http://127.0.0.1:8000/workflows/{workflow_id}/image` or `http://127.0.0.1:8000/workflows/by-handle/{handle}/image`
## Advanced Usage
This example can be found in `sample-advanced.py` and `docker/advanced-sample-docker-compose.yml`
Let's create a webapp with a folder of workflows, and let users generate from any of them.
So they don't need to wait, we'll generate a few images from each, ahead of time.
When someone asks for a "new" image, it will be served from that buffer.
We'll also use a filesystem cache to avoid re-generating images that have already been generated.
```python
from comfykiosk import ComfyKiosk, ComfyGenerator, SimpleWorkflowLoader
from comfykiosk.image_sources.filesystem import FileSystemImageSource
from comfykiosk.image_sources.pregenerate import PreparedGenPool
from comfykiosk.fastapi import create_app
# You can compose ComfyKiosk as an alternate way to override defaults
filesystem_source = FileSystemImageSource(directory="./output")
comfyui_backend = ComfyGenerator()
# You can also use the default workflow loader
loader = SimpleWorkflowLoader(directory="./sample-workflows")
# In this example, each workflow can have up to 10 prepared images.
# The generator will make 5 images before switching workflows.
# This is to amortize the time spent loading models into VRAM.
pregen_pool = PreparedGenPool(bucket_max=10, batch_size=5, generator=comfyui_backend, saver=filesystem_source)
# When a request comes in, each image source will be tried in this order.
comfy = ComfyKiosk(loaders=loader,
image_sources=[filesystem_source, pregen_pool])
# Attach FastAPI routes to it
app = create_app(comfy)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000, log_level="info", log_config=None # Use our logging config instead of uvicorn's)
```
## Creating Workflows
From ComfyUI, use the "Export (API)" option in the Workflow menu.
There must be exactly one "Save Image" node in the graph.
At the moment ComfyKiosk only changes the seed.
The eventual goal is to allow you to specify which paramaters can be changed, and the allowed range of those choices.
In the absense of user input, any changeable parameters will be deterministically selected based on the request seed.
## API Endpoints
Given that this is a FastAPI app, you can also view the API docs as generated by Swagger at `/docs`.
- `GET /workflows` - List all available workflows
- `GET /workflows/by-handle` - List workflows sorted by handle
- `GET /workflows/{workflow_id}` - Get workflow information
- `GET /workflows/{workflow_id}/image` - Generate image using specified workflow
- `GET /workflows/by-handle/{handle}/image` - Generate image using workflow handle
- `GET /workflows/{workflow_id}/image/{seed}` - Generate image with specific seed

8
requirements.txt Normal file
View File

@ -0,0 +1,8 @@
fastapi~=0.115.6
httpx[http2]~=0.27.2
requests
uvicorn~=0.32.1
starlette~=0.41.3
pillow~=10.4.0
websockets~=14.1
more-itertools

27
sample-advanced.py Normal file
View File

@ -0,0 +1,27 @@
from comfykiosk import ComfyKiosk, ComfyGenerator, SimpleWorkflowLoader
from comfykiosk.image_sources.filesystem import FileSystemImageSource
from comfykiosk.image_sources.pregenerate import PreparedGenPool
from comfykiosk.fastapi import create_app
# You can compose ComfyKiosk as an alternate way to override defaults
filesystem_source = FileSystemImageSource(directory="./output")
comfyui_backend = ComfyGenerator()
# You can also use the default workflow loader
loader = SimpleWorkflowLoader(directory="./sample-workflows")
# In this example, each workflow can have up to 10 prepared images.
# The generator will make 5 images before switching workflows.
# This is to amortize the time spent loading models into VRAM.
pregen_pool = PreparedGenPool(bucket_max=10, batch_size=5, generator=comfyui_backend, saver=filesystem_source)
# When a request comes in, each image source will be tried in this order.
comfy = ComfyKiosk(loaders=loader,
image_sources=[filesystem_source, pregen_pool])
# Attach FastAPI routes to it
app = create_app(comfy)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000, log_level="info", log_config=None)

View File

@ -0,0 +1,86 @@
{
"3": {
"inputs": {
"seed": 165993852826701,
"steps": 20,
"cfg": 8,
"sampler_name": "euler",
"scheduler": "normal",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler"
},
"4": {
"inputs": {
"ckpt_name": "v1-5-pruned-emaonly.safetensors"
},
"class_type": "CheckpointLoaderSimple"
},
"5": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage"
},
"6": {
"inputs": {
"text": "beautiful scenery, photograph of a bird wearing a baseball cap\n",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode"
},
"7": {
"inputs": {
"text": "text, watermark",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode"
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode"
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage"
}
}

86
sample-workflows/cat.json Normal file
View File

@ -0,0 +1,86 @@
{
"3": {
"inputs": {
"seed": 541083977696651,
"steps": 20,
"cfg": 8,
"sampler_name": "euler",
"scheduler": "normal",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler"
},
"4": {
"inputs": {
"ckpt_name": "v1-5-pruned-emaonly.safetensors"
},
"class_type": "CheckpointLoaderSimple"
},
"5": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage"
},
"6": {
"inputs": {
"text": "beautiful scenery, photograph of a cat wearing a sombrero",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode"
},
"7": {
"inputs": {
"text": "text, watermark",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode"
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode"
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage"
}
}

86
sample-workflows/dog.json Normal file
View File

@ -0,0 +1,86 @@
{
"3": {
"inputs": {
"seed": 200242354553354,
"steps": 20,
"cfg": 8,
"sampler_name": "euler",
"scheduler": "normal",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler"
},
"4": {
"inputs": {
"ckpt_name": "v1-5-pruned-emaonly.safetensors"
},
"class_type": "CheckpointLoaderSimple"
},
"5": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage"
},
"6": {
"inputs": {
"text": "beautiful scenery, photograph of a dog wearing a party hat",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode"
},
"7": {
"inputs": {
"text": "text, watermark",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode"
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode"
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage"
}
}

View File

@ -0,0 +1,86 @@
{
"3": {
"inputs": {
"seed": 475216039665249,
"steps": 20,
"cfg": 8,
"sampler_name": "euler",
"scheduler": "normal",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler"
},
"4": {
"inputs": {
"ckpt_name": "v1-5-pruned-emaonly.safetensors"
},
"class_type": "CheckpointLoaderSimple"
},
"5": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage"
},
"6": {
"inputs": {
"text": "beautiful scenery, photograph of a ferret wearing a (cowboy hat)",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode"
},
"7": {
"inputs": {
"text": "text, watermark",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode"
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode"
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage"
}
}

View File

@ -0,0 +1,86 @@
{
"3": {
"inputs": {
"seed": 333804592922943,
"steps": 20,
"cfg": 8,
"sampler_name": "euler",
"scheduler": "normal",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler"
},
"4": {
"inputs": {
"ckpt_name": "v1-5-pruned-emaonly.safetensors"
},
"class_type": "CheckpointLoaderSimple"
},
"5": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage"
},
"6": {
"inputs": {
"text": "beautiful scenery, photograph of a hamster wearing a football (helmet:1.2)\n",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode"
},
"7": {
"inputs": {
"text": "text, watermark",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode"
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode"
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage"
}
}

12
sample.py Normal file
View File

@ -0,0 +1,12 @@
from comfykiosk import EasyComfyKiosk
from comfykiosk.fastapi import create_app
# Create the image generation service
comfy = EasyComfyKiosk()
# Attach FastAPI routes to it
app = create_app(comfy)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000, log_level="info")

86
workflow.json Normal file
View File

@ -0,0 +1,86 @@
{
"3": {
"inputs": {
"seed": 310210910957456,
"steps": 20,
"cfg": 8,
"sampler_name": "euler",
"scheduler": "normal",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler"
},
"4": {
"inputs": {
"ckpt_name": "sd-v1-4.ckpt"
},
"class_type": "CheckpointLoaderSimple"
},
"5": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage"
},
"6": {
"inputs": {
"text": "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode"
},
"7": {
"inputs": {
"text": "text, watermark",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode"
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode"
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage"
}
}