VYPR
Medium severity4.3NVD Advisory· Published Apr 20, 2026· Updated Apr 29, 2026

CVE-2026-6598

CVE-2026-6598

Description

A security vulnerability has been detected in langflow-ai langflow up to 1.8.3. The affected element is the function create_project/encrypt_auth_settings of the file src/backend/base/Langflow/api/v1/projects.py of the component Project Creation Endpoint. Such manipulation of the argument auth_settings leads to cleartext storage in a file or on disk. The attack can be launched remotely. The exploit has been disclosed publicly and may be used. The vendor was contacted early about this disclosure but did not respond in any way.

Affected packages

Versions sourced from the GitHub Security Advisory.

PackageAffected versionsPatched versions
langflowPyPI
< 1.9.11.9.1

Affected products

1

Patches

1
45325f637630

feat: Langflow SDK and Flow DevOps API Toolkit (#12245)

https://github.com/langflow-ai/langflowEric HareApr 2, 2026via ghsa
98 files changed · +20361 1559
  • docker/build_and_push_backend.Dockerfile+2 1 modified
    @@ -29,6 +29,7 @@ RUN apt-get update \
     # Copy only backend source (excludes frontend)
     COPY ./src/backend ./src/backend
     COPY ./src/lfx ./src/lfx
    +COPY ./src/sdk ./src/sdk
     
     # Create venv and install langflow-base with dependencies
     # Using uv pip instead of uv sync to avoid workspace complexities
    @@ -37,7 +38,7 @@ ENV PATH="/app/.venv/bin:$PATH"
     ENV VIRTUAL_ENV="/app/.venv"
     
     RUN --mount=type=cache,target=/root/.cache/uv \
    -    uv pip install ./src/lfx "./src/backend/base[complete,postgresql]"
    +    uv pip install ./src/sdk ./src/lfx "./src/backend/base[complete,postgresql]"
     
     ################################
     # RUNTIME
    
  • docker/build_and_push.Dockerfile+2 0 modified
    @@ -48,6 +48,8 @@ COPY ./src/backend/base/uv.lock /app/src/backend/base/uv.lock
     COPY ./src/backend/base/pyproject.toml /app/src/backend/base/pyproject.toml
     COPY ./src/lfx/README.md /app/src/lfx/README.md
     COPY ./src/lfx/pyproject.toml /app/src/lfx/pyproject.toml
    +COPY ./src/sdk/README.md /app/src/sdk/README.md
    +COPY ./src/sdk/pyproject.toml /app/src/sdk/pyproject.toml
     
     RUN --mount=type=cache,target=/root/.cache/uv \
         RUSTFLAGS='--cfg reqwest_unstable' \
    
  • pyproject.toml+42 0 modified
    @@ -69,6 +69,7 @@ explicit = true
     langflow-base = { workspace = true }
     langflow = { workspace = true }
     lfx = { workspace = true }
    +langflow-sdk = { workspace = true }
     torch = { index = "pytorch-cpu" }
     torchvision = { index = "pytorch-cpu" }
     
    @@ -77,6 +78,7 @@ members = [
         "src/backend/base",
         ".",
         "src/lfx",
    +    "src/sdk",
     ]
     
     [tool.hatch.build.targets.wheel]
    @@ -313,6 +315,46 @@ external = ["RUF027"]
     "src/lfx/src/lfx/inputs/input_mixin.py" = [
         "S105",  # False positive: PASSWORD is a type constant
     ]
    +"src/lfx/src/lfx/__main__.py" = [
    +    "B008",   # Typer CLI requires function calls in argument defaults
    +    "FBT001", # Bool flags are the standard typer pattern
    +    "FBT003", # Boolean positional values in typer.Option() calls
    +]
    +"src/lfx/src/lfx/cli/_setup_commands.py" = [
    +    "B008",   # Typer CLI requires function calls in argument defaults
    +    "FBT001", # Bool flags are the standard typer pattern
    +    "FBT003", # Boolean positional values in typer.Option() calls
    +]
    +"src/lfx/src/lfx/cli/_authoring_commands.py" = [
    +    "B008",   # Typer CLI requires function calls in argument defaults
    +    "FBT001", # Bool flags are the standard typer pattern
    +    "FBT003", # Boolean positional values in typer.Option() calls
    +]
    +"src/lfx/src/lfx/cli/_running_commands.py" = [
    +    "B008",   # Typer CLI requires function calls in argument defaults
    +    "FBT001", # Bool flags are the standard typer pattern
    +    "FBT003", # Boolean positional values in typer.Option() calls
    +]
    +"src/lfx/src/lfx/cli/_remote_commands.py" = [
    +    "B008",   # Typer CLI requires function calls in argument defaults
    +    "FBT001", # Bool flags are the standard typer pattern
    +    "FBT003", # Boolean positional values in typer.Option() calls
    +]
    +"src/backend/base/langflow/api/utils/*" = [
    +    "TCH",  # Imports are used at runtime (FastAPI deps, SQLAlchemy queries, model constructors)
    +]
    +"src/sdk/src/langflow_sdk/_http.py" = [
    +    "TCH",  # httpx is used at runtime (response object methods)
    +]
    +"src/sdk/src/langflow_sdk/environments.py" = [
    +    "TRY003", # Contextual error messages are necessary here
    +    "EM102",  # f-string messages assigned before raise where possible
    +]
    +"src/sdk/tests/*" = [
    +    "S101",   # assert is the standard pytest assertion style
    +    "INP001", # Not a package namespace issue in tests
    +    "TC003",  # pytest fixture type hints are evaluated at runtime
    +]
     "src/lfx/src/lfx/schema/table.py" = [
         "S105",  # False positive: PASSWORD is a formatter type
     ]
    
  • .secrets.baseline+1 11 modified
    @@ -1643,7 +1643,7 @@
             "filename": "src/backend/base/langflow/api/utils/core.py",
             "hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4",
             "is_verified": false,
    -        "line_number": 440,
    +        "line_number": 413,
             "is_secret": false
           }
         ],
    @@ -7147,16 +7147,6 @@
             "is_secret": false
           }
         ],
    -    "src/lfx/tests/unit/cli/test_serve_simple.py": [
    -      {
    -        "type": "Secret Keyword",
    -        "filename": "src/lfx/tests/unit/cli/test_serve_simple.py",
    -        "hashed_secret": "3acfb2c2b433c0ea7ff107e33df91b18e52f960f",
    -        "is_verified": false,
    -        "line_number": 67,
    -        "is_secret": false
    -      }
    -    ],
         "src/lfx/tests/unit/components/langchain_utilities/test_csv_agent.py": [
           {
             "type": "Secret Keyword",
    
  • src/backend/base/langflow/api/utils/core.py+127 167 modified
    @@ -1,32 +1,24 @@
     from __future__ import annotations
     
    -import uuid
    -from ast import literal_eval
    +import json as _json
     from datetime import timedelta
     from enum import Enum
     from typing import TYPE_CHECKING, Annotated, Any
     
     from fastapi import Depends, HTTPException, Path, Query
     from fastapi_pagination import Params
    -from lfx.graph.graph.base import Graph
     from lfx.log.logger import logger
    -from lfx.services.deps import injectable_session_scope, injectable_session_scope_readonly, session_scope
    +from lfx.services.deps import injectable_session_scope, injectable_session_scope_readonly
     from lfx.utils.validate_cloud import raise_error_if_astra_cloud_disable_component
    -from sqlalchemy import delete
     from sqlmodel.ext.asyncio.session import AsyncSession
     
     from langflow.services.auth.utils import get_current_active_user, get_current_active_user_mcp
     from langflow.services.database.models.flow.model import Flow
    -from langflow.services.database.models.flow_version.model import FlowVersion
    -from langflow.services.database.models.message.model import MessageTable
    -from langflow.services.database.models.transactions.model import TransactionTable
     from langflow.services.database.models.user.model import User
    -from langflow.services.database.models.vertex_builds.model import VertexBuildTable
     from langflow.services.store.utils import get_lf_version_from_pypi
     from langflow.utils.constants import LANGFLOW_GLOBAL_VAR_HEADER_PREFIX
     
     if TYPE_CHECKING:
    -    from langflow.services.chat.service import ChatService
         from langflow.services.store.schema import StoreComponentCreate
     
     
    @@ -43,29 +35,25 @@
     DbSessionReadOnly = Annotated[AsyncSession, Depends(injectable_session_scope_readonly)]
     
     
    -def _get_validated_file_name(file_name: str = Path()) -> str:
    -    """Validate file_name path parameter to prevent path traversal attacks."""
    -    if ".." in file_name or "/" in file_name or "\\" in file_name:
    +def _get_validated_path_segment(value: str, *, label: str = "name") -> str:
    +    """Validate a path segment to prevent path traversal attacks."""
    +    if ".." in value or "/" in value or "\\" in value:
             raise HTTPException(
                 status_code=400,
    -            detail="Invalid file name. Use a simple file name without directory paths or '..'.",
    +            detail=f"Invalid {label}. Use a simple {label} without directory paths or '..'.",
             )
    -    return file_name
    +    return value
     
     
    -ValidatedFileName = Annotated[str, Depends(_get_validated_file_name)]
    +def _get_validated_file_name(file_name: str = Path()) -> str:
    +    return _get_validated_path_segment(file_name, label="file name")
     
     
     def _get_validated_folder_name(folder_name: str = Path()) -> str:
    -    """Validate folder_name path parameter to prevent path traversal attacks."""
    -    if ".." in folder_name or "/" in folder_name or "\\" in folder_name:
    -        raise HTTPException(
    -            status_code=400,
    -            detail="Invalid folder name. Use a simple folder name without directory paths or '..'.",
    -        )
    -    return folder_name
    +    return _get_validated_path_segment(folder_name, label="folder name")
     
     
    +ValidatedFileName = Annotated[str, Depends(_get_validated_file_name)]
     ValidatedFolderName = Annotated[str, Depends(_get_validated_folder_name)]
     
     # Message to raise if we're in an Astra cloud environment and a component or endpoint is not supported
    @@ -112,6 +100,112 @@ def remove_api_keys(flow: dict):
         return flow
     
     
    +# ---------------------------------------------------------------------------
    +# Export normalisation
    +# ---------------------------------------------------------------------------
    +
    +# Top-level fields that vary between instances / users without changing logic.
    +_VOLATILE_TOP_LEVEL: frozenset[str] = frozenset(
    +    {"updated_at", "created_at", "user_id", "folder_id", "access_type", "gradient"}
    +)
    +
    +# Node-level fields that track UI interaction state (position, drag, selection).
    +_VOLATILE_NODE_FIELDS: frozenset[str] = frozenset({"positionAbsolute", "dragging", "selected"})
    +
    +
    +def _split_code_to_lines(flow: dict) -> None:
    +    """In-place: split code template field values from strings to line arrays.
    +
    +    Converts ``template.<field>.value`` from a single string to a
    +    ``list[str]`` (one element per line) when the field type is ``"code"``.
    +    This gives git line-level diffs instead of a single opaque blob.
    +    """
    +    for node in flow.get("data", {}).get("nodes", []):
    +        template = node.get("data", {}).get("node", {}).get("template", {})
    +        if not isinstance(template, dict):
    +            continue
    +        for field_data in template.values():
    +            if not isinstance(field_data, dict):
    +                continue
    +            if field_data.get("type") == "code":
    +                value = field_data.get("value")
    +                if isinstance(value, str):
    +                    # split("\n") — not splitlines() — so that the trailing newline
    +                    # is preserved as a final empty string, keeping the round-trip
    +                    # lossless: "\n".join(s.split("\n")) == s for any string s.
    +                    field_data["value"] = value.split("\n")
    +
    +
    +def _join_code_from_lines(flow: dict) -> None:
    +    """In-place: rejoin code template line arrays back to strings.
    +
    +    Inverse of :func:`_split_code_to_lines`.  Safe to call on flows that
    +    already use the string format — ``isinstance`` guard means it's a no-op.
    +    """
    +    for node in flow.get("data", {}).get("nodes", []):
    +        template = node.get("data", {}).get("node", {}).get("template", {})
    +        if not isinstance(template, dict):
    +            continue
    +        for field_data in template.values():
    +            if not isinstance(field_data, dict):
    +                continue
    +            if field_data.get("type") == "code":
    +                value = field_data.get("value")
    +                if isinstance(value, list):
    +                    field_data["value"] = "\n".join(value)
    +
    +
    +def normalize_flow_for_export(flow: dict) -> dict:
    +    """Return a git-friendly, deterministic copy of a flow dict.
    +
    +    Applied to every flow before it is written into a download ZIP.
    +
    +    Transformations
    +    ---------------
    +    * Strips volatile top-level fields (``updated_at``, ``created_at``,
    +      ``user_id``, ``folder_id``, ``access_type``, ``gradient``) — these
    +      change between instances / users without affecting flow logic.
    +    * Strips node UI-state fields (``positionAbsolute``, ``dragging``,
    +      ``selected``) — these change on every canvas interaction.
    +    * Converts ``template.<field>.value`` strings to ``list[str]`` for
    +      ``type == "code"`` fields, enabling line-level git diffs.
    +
    +    Key sorting is handled at serialisation time via
    +    ``orjson_dumps(sort_keys=True)``.
    +    """
    +    import copy
    +
    +    flow = copy.deepcopy(flow)
    +
    +    # Strip volatile top-level metadata
    +    for key in _VOLATILE_TOP_LEVEL:
    +        flow.pop(key, None)
    +
    +    # Strip node UI state
    +    for node in flow.get("data", {}).get("nodes", []):
    +        for key in _VOLATILE_NODE_FIELDS:
    +            node.pop(key, None)
    +
    +    # Code → line arrays
    +    _split_code_to_lines(flow)
    +
    +    return flow
    +
    +
    +def normalize_code_for_import(flow: dict) -> dict:
    +    """Rejoin code-as-lines back to strings for backward-compatible import.
    +
    +    Accepts both the list format produced by :func:`normalize_flow_for_export`
    +    and the legacy single-string format, so this function is safe to call
    +    unconditionally on every uploaded flow.
    +    """
    +    import copy
    +
    +    flow = copy.deepcopy(flow)
    +    _join_code_from_lines(flow)
    +    return flow
    +
    +
     def build_input_keys_response(langchain_object, artifacts):
         """Build the input keys response."""
         input_keys_response = {
    @@ -142,7 +236,13 @@ def build_input_keys_response(langchain_object, artifacts):
         return input_keys_response
     
     
    -def validate_is_component(flows: list[Flow]):
    +def validate_is_component(flows: list[Flow]) -> list[Flow]:
    +    """Return flows with ``is_component`` inferred from flow data when unset.
    +
    +    Note: mutates the ORM instances in-place because SQLAlchemy requires
    +    mutation for dirty-tracking.  This is an intentional exception to the
    +    immutability guideline — creating copies would detach them from the session.
    +    """
         for flow in flows:
             if not flow.data or flow.is_component is not None:
                 continue
    @@ -202,68 +302,6 @@ def format_elapsed_time(elapsed_time: float) -> str:
         return f"{minutes} {minutes_unit}, {seconds} {seconds_unit}"
     
     
    -async def _get_flow_name(flow_id: uuid.UUID) -> str:
    -    async with session_scope() as session:
    -        flow = await session.get(Flow, flow_id)
    -        if flow is None:
    -            msg = f"Flow {flow_id} not found"
    -            raise ValueError(msg)
    -    return flow.name
    -
    -
    -async def build_graph_from_data(flow_id: uuid.UUID | str, payload: dict, **kwargs):
    -    """Build and cache the graph."""
    -    # Get flow name
    -    if "flow_name" not in kwargs:
    -        flow_name = await _get_flow_name(flow_id if isinstance(flow_id, uuid.UUID) else uuid.UUID(flow_id))
    -    else:
    -        flow_name = kwargs["flow_name"]
    -    str_flow_id = str(flow_id)
    -    session_id = kwargs.get("session_id") or str_flow_id
    -
    -    graph = Graph.from_payload(payload, str_flow_id, flow_name, kwargs.get("user_id"))
    -    for vertex_id in graph.has_session_id_vertices:
    -        vertex = graph.get_vertex(vertex_id)
    -        if vertex is None:
    -            msg = f"Vertex {vertex_id} not found"
    -            raise ValueError(msg)
    -        if not vertex.raw_params.get("session_id"):
    -            vertex.update_raw_params({"session_id": session_id}, overwrite=True)
    -
    -    graph.session_id = session_id
    -    await graph.initialize_run()
    -    return graph
    -
    -
    -async def build_graph_from_db_no_cache(flow_id: uuid.UUID, session: AsyncSession, **kwargs):
    -    """Build and cache the graph."""
    -    flow: Flow | None = await session.get(Flow, flow_id)
    -    if not flow or not flow.data:
    -        msg = "Invalid flow ID"
    -        raise ValueError(msg)
    -    kwargs["user_id"] = kwargs.get("user_id") or str(flow.user_id)
    -    return await build_graph_from_data(flow_id, flow.data, flow_name=flow.name, **kwargs)
    -
    -
    -async def build_graph_from_db(flow_id: uuid.UUID, session: AsyncSession, chat_service: ChatService, **kwargs):
    -    graph = await build_graph_from_db_no_cache(flow_id=flow_id, session=session, **kwargs)
    -    await chat_service.set_cache(str(flow_id), graph)
    -    return graph
    -
    -
    -async def build_and_cache_graph_from_data(
    -    flow_id: uuid.UUID | str,
    -    chat_service: ChatService,
    -    graph_data: dict,
    -):  # -> Graph | Any:
    -    """Build and cache the graph."""
    -    # Convert flow_id to str if it's UUID
    -    str_flow_id = str(flow_id) if isinstance(flow_id, uuid.UUID) else flow_id
    -    graph = Graph.from_payload(graph_data, str_flow_id)
    -    await chat_service.set_cache(str_flow_id, graph)
    -    return graph
    -
    -
     def format_syntax_error_message(exc: SyntaxError) -> str:
         """Format a SyntaxError message for returning to the frontend."""
         if exc.text is None:
    @@ -344,31 +382,13 @@ def parse_value(value: Any, input_type: str) -> Any:
             if isinstance(value, dict):
                 return value
             try:
    -            return literal_eval(value) if value is not None else {}
    -        except (ValueError, SyntaxError):
    +            parsed = _json.loads(value) if value is not None else {}
    +            return parsed if isinstance(parsed, dict) else {}
    +        except (ValueError, TypeError):
                 return {}
         return value
     
     
    -async def cascade_delete_flow(session: AsyncSession, flow_id: uuid.UUID) -> None:
    -    try:
    -        # TODO: Verify if deleting messages is safe in terms of session id relevance
    -        # If we delete messages directly, rather than setting flow_id to null,
    -        # it might cause unexpected behaviors because the session id could still be
    -        # used elsewhere to search for these messages.
    -        await session.exec(delete(MessageTable).where(MessageTable.flow_id == flow_id))
    -        await session.exec(delete(TransactionTable).where(TransactionTable.flow_id == flow_id))
    -        await session.exec(delete(VertexBuildTable).where(VertexBuildTable.flow_id == flow_id))
    -        # Explicit delete despite FK CASCADE — SQLite doesn't enforce FK cascades
    -        # by default (requires PRAGMA foreign_keys = ON), and this function follows
    -        # the existing pattern of explicitly deleting all child records.
    -        await session.exec(delete(FlowVersion).where(FlowVersion.flow_id == flow_id))
    -        await session.exec(delete(Flow).where(Flow.id == flow_id))
    -    except Exception as e:
    -        msg = f"Unable to cascade delete flow: {flow_id}"
    -        raise RuntimeError(msg, e) from e
    -
    -
     def custom_params(
         page: int | None = Query(None),
         size: int | None = Query(None),
    @@ -378,66 +398,6 @@ def custom_params(
         return Params(page=page or MIN_PAGE_SIZE, size=size or MAX_PAGE_SIZE)
     
     
    -async def verify_public_flow_and_get_user(flow_id: uuid.UUID, client_id: str | None) -> tuple[User, uuid.UUID]:
    -    """Verify a public flow request and generate a deterministic flow ID.
    -
    -    This utility function:
    -    1. Checks that a client_id cookie is provided
    -    2. Verifies the flow exists and is marked as PUBLIC
    -    3. Creates a deterministic UUID based on client_id and original flow_id
    -    4. Retrieves the flow owner user for permission purposes
    -
    -    This function is used to support public flow endpoints that don't require
    -    authentication but still need to operate within the permission model.
    -
    -    Args:
    -        flow_id: The original flow ID to verify
    -        client_id: The client ID from the request cookie
    -
    -    Returns:
    -        tuple: (flow owner user, deterministic flow ID for tracking)
    -
    -    Raises:
    -        HTTPException:
    -            - 400 if no client_id is provided
    -            - 403 if flow doesn't exist or isn't public
    -            - 403 if unable to retrieve the flow owner user
    -            - 403 if user is not found for public flow
    -    """
    -    if not client_id:
    -        raise HTTPException(status_code=400, detail="No client_id cookie found")
    -
    -    # Check if the flow is public
    -    async with session_scope() as session:
    -        from sqlmodel import select
    -
    -        from langflow.services.database.models.flow.model import AccessTypeEnum, Flow
    -
    -        flow = (await session.exec(select(Flow).where(Flow.id == flow_id))).first()
    -        if not flow or flow.access_type is not AccessTypeEnum.PUBLIC:
    -            raise HTTPException(status_code=403, detail="Flow is not public")
    -
    -    # Create a new flow ID using the client_id and flow_id
    -    new_id = f"{client_id}_{flow_id}"
    -    new_flow_id = uuid.uuid5(uuid.NAMESPACE_DNS, new_id)
    -
    -    # Get the user associated with the flow
    -    try:
    -        from langflow.helpers.user import get_user_by_flow_id_or_endpoint_name
    -
    -        user = await get_user_by_flow_id_or_endpoint_name(str(flow_id))
    -
    -    except Exception as exc:
    -        await logger.aexception(f"Error getting user for public flow {flow_id}")
    -        raise HTTPException(status_code=403, detail="Flow is not accessible") from exc
    -
    -    if not user:
    -        msg = f"User not found for public flow {flow_id}"
    -        raise HTTPException(status_code=403, detail=msg)
    -
    -    return user, new_flow_id
    -
    -
     def extract_global_variables_from_headers(headers) -> dict[str, str]:
         """Extract global variables from HTTP headers with prefix X-LANGFLOW-GLOBAL-VAR-*.
     
    
  • src/backend/base/langflow/api/utils/flow_utils.py+163 0 added
    @@ -0,0 +1,163 @@
    +"""Flow graph building, cascade deletion, and public flow verification utilities."""
    +
    +from __future__ import annotations
    +
    +import uuid
    +from typing import TYPE_CHECKING
    +
    +from fastapi import HTTPException
    +from lfx.graph.graph.base import Graph
    +from lfx.log.logger import logger
    +from lfx.services.deps import session_scope
    +from sqlalchemy import delete
    +from sqlmodel.ext.asyncio.session import AsyncSession
    +
    +from langflow.services.database.models.flow.model import Flow
    +from langflow.services.database.models.flow_version.model import FlowVersion
    +from langflow.services.database.models.message.model import MessageTable
    +from langflow.services.database.models.transactions.model import TransactionTable
    +from langflow.services.database.models.user.model import User
    +from langflow.services.database.models.vertex_builds.model import VertexBuildTable
    +
    +if TYPE_CHECKING:
    +    from langflow.services.chat.service import ChatService
    +
    +
    +async def _get_flow_name(flow_id: uuid.UUID) -> str:
    +    async with session_scope() as session:
    +        flow = await session.get(Flow, flow_id)
    +        if flow is None:
    +            msg = f"Flow {flow_id} not found"
    +            raise ValueError(msg)
    +    return flow.name
    +
    +
    +async def build_graph_from_data(flow_id: uuid.UUID | str, payload: dict, **kwargs):
    +    """Build and cache the graph."""
    +    # Get flow name
    +    if "flow_name" not in kwargs:
    +        flow_name = await _get_flow_name(flow_id if isinstance(flow_id, uuid.UUID) else uuid.UUID(flow_id))
    +    else:
    +        flow_name = kwargs["flow_name"]
    +    str_flow_id = str(flow_id)
    +    session_id = kwargs.get("session_id") or str_flow_id
    +
    +    graph = Graph.from_payload(payload, str_flow_id, flow_name, kwargs.get("user_id"))
    +    for vertex_id in graph.has_session_id_vertices:
    +        vertex = graph.get_vertex(vertex_id)
    +        if vertex is None:
    +            msg = f"Vertex {vertex_id} not found"
    +            raise ValueError(msg)
    +        if not vertex.raw_params.get("session_id"):
    +            vertex.update_raw_params({"session_id": session_id}, overwrite=True)
    +
    +    graph.session_id = session_id
    +    await graph.initialize_run()
    +    return graph
    +
    +
    +async def build_graph_from_db_no_cache(flow_id: uuid.UUID, session: AsyncSession, **kwargs):
    +    """Build and cache the graph."""
    +    flow: Flow | None = await session.get(Flow, flow_id)
    +    if not flow or not flow.data:
    +        msg = "Invalid flow ID"
    +        raise ValueError(msg)
    +    kwargs["user_id"] = kwargs.get("user_id") or str(flow.user_id)
    +    return await build_graph_from_data(flow_id, flow.data, flow_name=flow.name, **kwargs)
    +
    +
    +async def build_graph_from_db(flow_id: uuid.UUID, session: AsyncSession, chat_service: ChatService, **kwargs):
    +    graph = await build_graph_from_db_no_cache(flow_id=flow_id, session=session, **kwargs)
    +    await chat_service.set_cache(str(flow_id), graph)
    +    return graph
    +
    +
    +async def build_and_cache_graph_from_data(
    +    flow_id: uuid.UUID | str,
    +    chat_service: ChatService,
    +    graph_data: dict,
    +):  # -> Graph | Any:
    +    """Build and cache the graph."""
    +    # Convert flow_id to str if it's UUID
    +    str_flow_id = str(flow_id) if isinstance(flow_id, uuid.UUID) else flow_id
    +    graph = Graph.from_payload(graph_data, str_flow_id)
    +    await chat_service.set_cache(str_flow_id, graph)
    +    return graph
    +
    +
    +async def cascade_delete_flow(session: AsyncSession, flow_id: uuid.UUID) -> None:
    +    try:
    +        # TODO: Verify if deleting messages is safe in terms of session id relevance
    +        # If we delete messages directly, rather than setting flow_id to null,
    +        # it might cause unexpected behaviors because the session id could still be
    +        # used elsewhere to search for these messages.
    +        await session.exec(delete(MessageTable).where(MessageTable.flow_id == flow_id))
    +        await session.exec(delete(TransactionTable).where(TransactionTable.flow_id == flow_id))
    +        await session.exec(delete(VertexBuildTable).where(VertexBuildTable.flow_id == flow_id))
    +        # Explicit delete despite FK CASCADE -- SQLite doesn't enforce FK cascades
    +        # by default (requires PRAGMA foreign_keys = ON), and this function follows
    +        # the existing pattern of explicitly deleting all child records.
    +        await session.exec(delete(FlowVersion).where(FlowVersion.flow_id == flow_id))
    +        await session.exec(delete(Flow).where(Flow.id == flow_id))
    +    except Exception as e:
    +        msg = f"Unable to cascade delete flow: {flow_id}"
    +        raise RuntimeError(msg, e) from e
    +
    +
    +async def verify_public_flow_and_get_user(flow_id: uuid.UUID, client_id: str | None) -> tuple[User, uuid.UUID]:
    +    """Verify a public flow request and generate a deterministic flow ID.
    +
    +    This utility function:
    +    1. Checks that a client_id cookie is provided
    +    2. Verifies the flow exists and is marked as PUBLIC
    +    3. Creates a deterministic UUID based on client_id and original flow_id
    +    4. Retrieves the flow owner user for permission purposes
    +
    +    This function is used to support public flow endpoints that don't require
    +    authentication but still need to operate within the permission model.
    +
    +    Args:
    +        flow_id: The original flow ID to verify
    +        client_id: The client ID from the request cookie
    +
    +    Returns:
    +        tuple: (flow owner user, deterministic flow ID for tracking)
    +
    +    Raises:
    +        HTTPException:
    +            - 400 if no client_id is provided
    +            - 403 if flow doesn't exist or isn't public
    +            - 403 if unable to retrieve the flow owner user
    +            - 403 if user is not found for public flow
    +    """
    +    if not client_id:
    +        raise HTTPException(status_code=400, detail="No client_id cookie found")
    +
    +    # Check if the flow is public
    +    async with session_scope() as session:
    +        from sqlmodel import select
    +
    +        from langflow.services.database.models.flow.model import AccessTypeEnum, Flow
    +
    +        flow = (await session.exec(select(Flow).where(Flow.id == flow_id))).first()
    +        if not flow or flow.access_type is not AccessTypeEnum.PUBLIC:
    +            raise HTTPException(status_code=403, detail="Flow is not public")
    +
    +    # Create a new flow ID using the client_id and flow_id
    +    new_id = f"{client_id}_{flow_id}"
    +    new_flow_id = uuid.uuid5(uuid.NAMESPACE_DNS, new_id)
    +
    +    # Get the user associated with the flow
    +    try:
    +        from langflow.helpers.user import get_user_by_flow_id_or_endpoint_name
    +
    +        user = await get_user_by_flow_id_or_endpoint_name(str(flow_id))
    +
    +    except Exception as exc:
    +        await logger.aexception("Error getting user for public flow %s", flow_id)
    +        raise HTTPException(status_code=403, detail="Flow is not accessible") from exc
    +
    +    if not user:
    +        raise HTTPException(status_code=403, detail="Flow is not accessible")
    +
    +    return user, new_flow_id
    
  • src/backend/base/langflow/api/utils/__init__.py+12 6 modified
    @@ -5,7 +5,7 @@
     allowing for better code organization.
     """
     
    -# Re-export everything from core module to maintain backward compatibility
    +# Re-export everything from core and flow_utils modules to maintain backward compatibility
     from langflow.api.utils.core import (
         API_WORDS,
         MAX_PAGE_SIZE,
    @@ -17,12 +17,7 @@
         EventDeliveryType,
         ValidatedFileName,
         ValidatedFolderName,
    -    build_and_cache_graph_from_data,
    -    build_graph_from_data,
    -    build_graph_from_db,
    -    build_graph_from_db_no_cache,
         build_input_keys_response,
    -    cascade_delete_flow,
         check_langflow_version,
         custom_params,
         extract_global_variables_from_headers,
    @@ -34,11 +29,20 @@
         get_suggestion_message,
         get_top_level_vertices,
         has_api_terms,
    +    normalize_code_for_import,
    +    normalize_flow_for_export,
         parse_exception,
         parse_value,
         raise_error_if_astra_cloud_env,
         remove_api_keys,
         validate_is_component,
    +)
    +from langflow.api.utils.flow_utils import (
    +    build_and_cache_graph_from_data,
    +    build_graph_from_data,
    +    build_graph_from_db,
    +    build_graph_from_db_no_cache,
    +    cascade_delete_flow,
         verify_public_flow_and_get_user,
     )
     
    @@ -75,6 +79,8 @@
         "get_top_level_vertices",
         # Functions
         "has_api_terms",
    +    "normalize_code_for_import",
    +    "normalize_flow_for_export",
         "parse_exception",
         "parse_value",
         "raise_error_if_astra_cloud_env",
    
  • src/backend/base/langflow/api/v1/flows_helpers.py+572 0 added
    @@ -0,0 +1,572 @@
    +"""Helper functions for flow CRUD and filesystem operations.
    +
    +Extracted from flows.py to keep the route-handler module concise.
    +"""
    +
    +from __future__ import annotations
    +
    +import io
    +import re
    +import zipfile
    +from datetime import datetime, timezone
    +from pathlib import Path as StdlibPath
    +from typing import TYPE_CHECKING, Any
    +from uuid import UUID
    +
    +from aiofile import async_open
    +from anyio import Path
    +from fastapi import HTTPException
    +from fastapi.responses import StreamingResponse
    +from lfx.log import logger
    +from sqlmodel import select
    +from sqlmodel.ext.asyncio.session import AsyncSession
    +
    +from langflow.api.utils import normalize_flow_for_export, remove_api_keys
    +from langflow.services.database.models.base import orjson_dumps
    +from langflow.services.database.models.flow.model import (
    +    Flow,
    +    FlowCreate,
    +    FlowRead,
    +    FlowUpdate,
    +)
    +from langflow.services.database.models.flow.utils import get_webhook_component_in_flow
    +from langflow.services.database.models.folder.model import Folder
    +from langflow.services.database.models.folder.utils import get_default_folder_id
    +from langflow.services.deps import get_settings_service
    +from langflow.services.storage.service import StorageService
    +
    +if TYPE_CHECKING:
    +    from langflow.services.database.models.user.model import User
    +
    +
    +def _get_safe_flow_path(fs_path: str, user_id: UUID, storage_service: StorageService) -> Path:
    +    """Get a safe filesystem path for flow storage, restricted to user's flows directory.
    +
    +    Allows both absolute and relative paths, but ensures they're within the user's flows directory.
    +    """
    +    if not fs_path:
    +        raise HTTPException(status_code=400, detail="fs_path cannot be empty")
    +
    +    # Normalize path separators first (before security checks to prevent backslash bypass)
    +    normalized_path = fs_path.replace("\\", "/")
    +
    +    # Reject directory traversal and null bytes (check normalized path)
    +    if ".." in normalized_path:
    +        raise HTTPException(
    +            status_code=400,
    +            detail="Invalid fs_path: directory traversal (..) is not allowed",
    +        )
    +    if "\x00" in normalized_path:
    +        raise HTTPException(
    +            status_code=400,
    +            detail="Invalid fs_path: null bytes are not allowed",
    +        )
    +
    +    # Build the safe base directory path
    +    base_dir = storage_service.data_dir / "flows" / str(user_id)
    +    base_dir_str = str(base_dir)
    +
    +    # Normalize base directory path (resolve to absolute, handle symlinks)
    +    # resolve() doesn't require the path to exist, it just resolves symlinks
    +    try:
    +        base_dir_stdlib = StdlibPath(base_dir_str).resolve()
    +        base_dir_resolved = str(base_dir_stdlib)
    +    except (OSError, ValueError) as e:
    +        raise HTTPException(status_code=400, detail=f"Invalid base directory: {e}") from e
    +
    +    # Determine if path is absolute (Unix or Windows style)
    +    is_absolute = normalized_path.startswith("/") or (len(normalized_path) > 1 and normalized_path[1] == ":")
    +
    +    if is_absolute:
    +        # Absolute path - resolve and validate it's within base directory
    +        try:
    +            requested_path = StdlibPath(normalized_path).resolve()
    +            requested_resolved = str(requested_path)
    +            try:
    +                # Ensure it's a subpath of the base directory
    +                requested_path.relative_to(base_dir_stdlib)
    +            except ValueError:
    +                raise HTTPException(
    +                    status_code=400,
    +                    detail=(f"Absolute path must be within your flows directory: {base_dir_resolved}"),
    +                ) from None
    +            return Path(requested_resolved)
    +        except (OSError, ValueError) as e:
    +            raise HTTPException(
    +                status_code=400,
    +                detail=(
    +                    f"Invalid file save path: {e}. "
    +                    f"Verify that the path is within your flows directory: {base_dir_resolved}"
    +                ),
    +            ) from e
    +    else:
    +        # Relative path - validate that it's within the base directory
    +        relative_part = normalized_path.lstrip("/")
    +        safe_path = base_dir / relative_part if relative_part else base_dir
    +        safe_path_stdlib = base_dir_stdlib / relative_part if relative_part else base_dir_stdlib
    +        try:
    +            final_resolved_str = str(safe_path_stdlib.resolve())
    +
    +            # Ensure resolved path stays within base (prevent symlink attacks)
    +            if not final_resolved_str.startswith(base_dir_resolved):
    +                raise HTTPException(
    +                    status_code=400,
    +                    detail="Invalid path: resolves outside allowed directory",
    +                )
    +        except (OSError, ValueError) as e:
    +            raise HTTPException(status_code=400, detail=f"Invalid path: {e}") from e
    +
    +        return safe_path
    +
    +
    +# Fields that may be updated via setattr on a Flow ORM instance.
    +# Any key not in this set is silently dropped to prevent callers from
    +# overwriting internal fields (e.g. ``id``, ``user_id``).
    +_UPDATABLE_FLOW_FIELDS: frozenset[str] = frozenset(
    +    {
    +        "name",
    +        "description",
    +        "data",
    +        "is_component",
    +        "endpoint_name",
    +        "tags",
    +        "folder_id",
    +        "icon",
    +        "icon_bg_color",
    +        "gradient",
    +        "locked",
    +        "mcp_enabled",
    +        "action_name",
    +        "action_description",
    +        "access_type",
    +        "fs_path",
    +    }
    +)
    +
    +
    +def _apply_update_data(target: Flow, update_data: dict[str, Any]) -> None:
    +    """Apply *update_data* to the ORM *target*, restricted to the allowlist."""
    +    for key, value in update_data.items():
    +        if key in _UPDATABLE_FLOW_FIELDS:
    +            setattr(target, key, value)
    +
    +
    +def _endpoint_name_was_explicitly_cleared(flow: FlowCreate | FlowUpdate) -> bool:
    +    """Return whether the request explicitly asked to clear the endpoint name."""
    +    return "endpoint_name" in flow.model_fields_set and flow.endpoint_name in (None, "")
    +
    +
    +async def _verify_fs_path(path: str | None, user_id: UUID, storage_service: StorageService) -> None:
    +    """Verify and prepare the filesystem path for flow storage."""
    +    if path is not None:
    +        # Empty strings should be rejected (None is allowed, empty string is not)
    +        if path == "":
    +            raise HTTPException(status_code=400, detail="fs_path cannot be empty")
    +        safe_path = _get_safe_flow_path(path, user_id, storage_service)
    +        await safe_path.parent.mkdir(parents=True, exist_ok=True)
    +        if not await safe_path.exists():
    +            await safe_path.touch()
    +
    +
    +async def _save_flow_to_fs(flow: Flow, user_id: UUID, storage_service: StorageService) -> None:
    +    """Save flow data to the filesystem at the validated path."""
    +    if not flow.fs_path:
    +        return
    +
    +    try:
    +        safe_path = _get_safe_flow_path(flow.fs_path, user_id, storage_service)
    +        await safe_path.parent.mkdir(parents=True, exist_ok=True)
    +        # async_open expects a string path, not a Path object
    +        async with async_open(str(safe_path), "w") as f:
    +            await f.write(flow.model_dump_json())
    +    except HTTPException:
    +        raise
    +    except OSError as e:
    +        await logger.aexception("Failed to write flow %s to path %s", flow.name, flow.fs_path)
    +        raise HTTPException(status_code=500, detail=f"Failed to write flow to filesystem: {e}") from e
    +
    +
    +async def _deduplicate_flow_name(session: AsyncSession, name: str, user_id: UUID) -> str:
    +    """Return a unique flow name for *user_id*, appending ``(N)`` if needed."""
    +    if not (await session.exec(select(Flow).where(Flow.name == name).where(Flow.user_id == user_id))).first():
    +        return name
    +
    +    flows = (
    +        await session.exec(
    +            select(Flow).where(Flow.name.like(f"{name} (%")).where(Flow.user_id == user_id)  # type: ignore[attr-defined]
    +        )
    +    ).all()
    +
    +    # Extract copy-number suffixes: "MyFlow (2)" → 2
    +    extract_number = re.compile(rf"^{re.escape(name)} \((\d+)\)$")
    +    numbers = [int(m.group(1)) for f in flows if (m := extract_number.search(f.name))]
    +
    +    return f"{name} ({max(numbers) + 1})" if numbers else f"{name} (1)"
    +
    +
    +async def _deduplicate_endpoint_name(
    +    session: AsyncSession,
    +    endpoint_name: str,
    +    user_id: UUID,
    +    *,
    +    fail_on_conflict: bool = False,
    +) -> str:
    +    """Return a unique endpoint name for *user_id*, appending ``-N`` if needed.
    +
    +    Raises :class:`HTTPException` 409 when *fail_on_conflict* is ``True`` and
    +    the name already exists.
    +    """
    +    existing = (
    +        await session.exec(select(Flow).where(Flow.endpoint_name == endpoint_name).where(Flow.user_id == user_id))
    +    ).first()
    +    if not existing:
    +        return endpoint_name
    +
    +    if fail_on_conflict:
    +        raise HTTPException(status_code=409, detail="Endpoint name must be unique")
    +
    +    flows = (
    +        await session.exec(
    +            select(Flow)
    +            .where(Flow.endpoint_name.like(f"{endpoint_name}-%"))  # type: ignore[union-attr]
    +            .where(Flow.user_id == user_id)
    +        )
    +    ).all()
    +
    +    numbers: list[int] = []
    +    for f in flows:
    +        try:
    +            numbers.append(int(f.endpoint_name.split("-")[-1]))
    +        except ValueError:
    +            continue
    +
    +    next_num = (max(numbers) + 1) if numbers else 1
    +    return f"{endpoint_name}-{next_num}"
    +
    +
    +async def _validate_and_assign_folder(
    +    session: AsyncSession,
    +    db_flow: Flow,
    +    user_id: UUID,
    +) -> None:
    +    """Ensure *db_flow* has a valid ``folder_id`` belonging to *user_id*.
    +
    +    Falls back to the default folder when the current ``folder_id`` is
    +    ``None`` or references a non-existent / other-user's folder.
    +    """
    +    if db_flow.folder_id is not None:
    +        folder_exists = (
    +            await session.exec(select(Folder).where(Folder.id == db_flow.folder_id, Folder.user_id == user_id))
    +        ).first()
    +        if not folder_exists:
    +            db_flow.folder_id = None
    +
    +    if db_flow.folder_id is None:
    +        db_flow.folder_id = await get_default_folder_id(session, user_id)
    +
    +
    +async def _new_flow(
    +    *,
    +    session: AsyncSession,
    +    flow: FlowCreate,
    +    user_id: UUID,
    +    storage_service: StorageService,
    +    flow_id: UUID | None = None,
    +    fail_on_endpoint_conflict: bool = False,
    +    validate_folder: bool = False,
    +):
    +    """Create or upsert a flow.
    +
    +    Args:
    +        session: Database session.
    +        flow: Flow creation data.
    +        user_id: Owner of the new flow.
    +        storage_service: Service for filesystem operations.
    +        flow_id: Allows PUT upsert to create flows with a specific ID for syncing between instances.
    +        fail_on_endpoint_conflict: PUT should fail predictably on conflicts rather than silently renaming.
    +        validate_folder: Validates folder_id exists and belongs to user when upserting from external sources.
    +    """
    +    try:
    +        await _verify_fs_path(flow.fs_path, user_id, storage_service)
    +
    +        if validate_folder and flow.folder_id is not None:
    +            folder = (
    +                await session.exec(select(Folder).where(Folder.id == flow.folder_id, Folder.user_id == user_id))
    +            ).first()
    +            if not folder:
    +                raise HTTPException(status_code=400, detail="Folder not found")
    +
    +        # Set user_id (ignore any user_id from body for security)
    +        flow.user_id = user_id
    +        flow.name = await _deduplicate_flow_name(session, flow.name, user_id)
    +
    +        if flow.endpoint_name:
    +            flow.endpoint_name = await _deduplicate_endpoint_name(
    +                session, flow.endpoint_name, user_id, fail_on_conflict=fail_on_endpoint_conflict
    +            )
    +
    +        # Exclude the id field from FlowCreate so that Flow.id (UUID, non-optional)
    +        # always gets its default_factory uuid4 unless we explicitly override it below.
    +        db_flow = Flow.model_validate(flow.model_dump(exclude={"id"}))
    +
    +        # Apply the stable ID: explicit flow_id param (PUT upsert) takes precedence,
    +        # then flow.id (stable import from FlowCreate), then the uuid4 default.
    +        effective_id = flow_id if flow_id is not None else flow.id
    +        if effective_id is not None:
    +            db_flow.id = effective_id
    +
    +        db_flow.updated_at = datetime.now(timezone.utc)
    +        await _validate_and_assign_folder(session, db_flow, user_id)
    +
    +        session.add(db_flow)
    +        await session.flush()
    +        await session.refresh(db_flow)
    +        await _save_flow_to_fs(db_flow, user_id, storage_service)
    +
    +        return FlowRead.model_validate(db_flow, from_attributes=True)
    +    except Exception as e:
    +        if hasattr(e, "errors"):
    +            raise HTTPException(status_code=400, detail=str(e)) from e
    +        if isinstance(e, HTTPException):
    +            raise
    +        logger.exception("Error creating flow")
    +        raise HTTPException(status_code=500, detail="An internal error occurred while creating the flow.") from e
    +
    +
    +async def _read_flow(
    +    session: AsyncSession,
    +    flow_id: UUID,
    +    user_id: UUID,
    +):
    +    """Read a flow."""
    +    stmt = select(Flow).where(Flow.id == flow_id).where(Flow.user_id == user_id)
    +
    +    return (await session.exec(stmt)).first()
    +
    +
    +async def _update_existing_flow(
    +    *,
    +    session: AsyncSession,
    +    existing_flow: Flow,
    +    flow: FlowCreate,
    +    current_user: User,
    +    storage_service: StorageService,
    +) -> FlowRead:
    +    """Update an existing flow (PUT update path).
    +
    +    Similar to update_flow but:
    +    - Fails on name/endpoint_name conflict with OTHER flows (409)
    +    - Keeps existing folder_id if not provided in request
    +    """
    +    settings_service = get_settings_service()
    +    user_id = current_user.id
    +
    +    # Validate fs_path if provided (use `is not None` to catch empty strings)
    +    if flow.fs_path is not None:
    +        await _verify_fs_path(flow.fs_path, user_id, storage_service)
    +
    +    # Validate folder_id if provided
    +    if flow.folder_id is not None:
    +        folder = (
    +            await session.exec(select(Folder).where(Folder.id == flow.folder_id, Folder.user_id == user_id))
    +        ).first()
    +        if not folder:
    +            raise HTTPException(status_code=400, detail="Folder not found")
    +
    +    # Check name uniqueness (excluding current flow)
    +    if flow.name and flow.name != existing_flow.name:
    +        name_conflict = (
    +            await session.exec(
    +                select(Flow).where(
    +                    Flow.name == flow.name,
    +                    Flow.user_id == user_id,
    +                    Flow.id != existing_flow.id,
    +                )
    +            )
    +        ).first()
    +        if name_conflict:
    +            raise HTTPException(status_code=409, detail="Name must be unique")
    +
    +    # Check endpoint_name uniqueness (excluding current flow)
    +    if flow.endpoint_name and flow.endpoint_name != existing_flow.endpoint_name:
    +        endpoint_conflict = (
    +            await session.exec(
    +                select(Flow).where(
    +                    Flow.endpoint_name == flow.endpoint_name,
    +                    Flow.user_id == user_id,
    +                    Flow.id != existing_flow.id,
    +                )
    +            )
    +        ).first()
    +        if endpoint_conflict:
    +            raise HTTPException(status_code=409, detail="Endpoint name must be unique")
    +
    +    # Build update data
    +    update_data = flow.model_dump(exclude_unset=True, exclude_none=True)
    +
    +    # Preserve the existing endpoint unless the request explicitly clears it.
    +    if _endpoint_name_was_explicitly_cleared(flow):
    +        update_data["endpoint_name"] = None
    +
    +    # Remove id and user_id from update data (security)
    +    update_data.pop("id", None)
    +    update_data.pop("user_id", None)
    +
    +    # If folder_id not provided, keep existing
    +    if "folder_id" not in update_data or update_data.get("folder_id") is None:
    +        update_data.pop("folder_id", None)
    +
    +    if settings_service.settings.remove_api_keys:
    +        update_data = remove_api_keys(update_data)
    +
    +    _apply_update_data(existing_flow, update_data)
    +
    +    webhook_component = get_webhook_component_in_flow(existing_flow.data or {})
    +    existing_flow.webhook = webhook_component is not None
    +    existing_flow.updated_at = datetime.now(timezone.utc)
    +
    +    session.add(existing_flow)
    +    await session.flush()
    +    await session.refresh(existing_flow)
    +    await _save_flow_to_fs(existing_flow, user_id, storage_service)
    +
    +    return FlowRead.model_validate(existing_flow, from_attributes=True)
    +
    +
    +async def _patch_flow(
    +    *,
    +    session: AsyncSession,
    +    db_flow: Flow,
    +    flow: FlowUpdate,
    +    user_id: UUID,
    +    storage_service: StorageService,
    +) -> FlowRead:
    +    """Apply a partial update (PATCH) to an existing flow and return a FlowRead."""
    +    settings_service = get_settings_service()
    +
    +    update_data = flow.model_dump(exclude_unset=True, exclude_none=True)
    +
    +    # Preserve the existing endpoint unless the request explicitly clears it.
    +    if _endpoint_name_was_explicitly_cleared(flow):
    +        update_data["endpoint_name"] = None
    +
    +    if settings_service.settings.remove_api_keys:
    +        update_data = remove_api_keys(update_data)
    +
    +    _apply_update_data(db_flow, update_data)
    +
    +    # Validate fs_path if it was changed (will raise HTTPException if invalid)
    +    if "fs_path" in update_data:
    +        await _verify_fs_path(db_flow.fs_path, user_id, storage_service)
    +
    +    webhook_component = get_webhook_component_in_flow(db_flow.data) if db_flow.data else None
    +    db_flow.webhook = webhook_component is not None
    +    db_flow.updated_at = datetime.now(timezone.utc)
    +
    +    await _validate_and_assign_folder(session, db_flow, user_id)
    +
    +    session.add(db_flow)
    +    await session.flush()
    +    await session.refresh(db_flow)
    +    await _save_flow_to_fs(db_flow, user_id, storage_service)
    +
    +    return FlowRead.model_validate(db_flow, from_attributes=True)
    +
    +
    +async def _upsert_flow_list(
    +    *,
    +    session: AsyncSession,
    +    flows: list[FlowCreate],
    +    current_user: User,
    +    storage_service: StorageService,
    +    folder_id: UUID | None = None,
    +) -> list[FlowRead]:
    +    """Import a list of flows with upsert semantics (used by the upload endpoint).
    +
    +    For each flow:
    +    - If it has an ID matching an existing flow owned by the user, update in place.
    +    - If it has an ID claimed by another user, mint a fresh UUID.
    +    - Otherwise create with the provided or generated ID.
    +    """
    +    flow_reads: list[FlowRead] = []
    +    for flow in flows:
    +        flow.user_id = current_user.id
    +        if folder_id:
    +            flow.folder_id = folder_id
    +
    +        if flow.id is not None:
    +            existing = (await session.exec(select(Flow).where(Flow.id == flow.id))).first()
    +
    +            if existing is not None and existing.user_id == current_user.id:
    +                flow_read = await _update_existing_flow(
    +                    session=session,
    +                    existing_flow=existing,
    +                    flow=flow,
    +                    current_user=current_user,
    +                    storage_service=storage_service,
    +                )
    +            elif existing is not None:
    +                flow.id = None
    +                flow_read = await _new_flow(
    +                    session=session, flow=flow, user_id=current_user.id, storage_service=storage_service
    +                )
    +            else:
    +                flow_read = await _new_flow(
    +                    session=session,
    +                    flow=flow,
    +                    user_id=current_user.id,
    +                    storage_service=storage_service,
    +                    flow_id=flow.id,
    +                )
    +        else:
    +            flow_read = await _new_flow(
    +                session=session, flow=flow, user_id=current_user.id, storage_service=storage_service
    +            )
    +
    +        flow_reads.append(flow_read)
    +    return flow_reads
    +
    +
    +def _sanitize_flow_filename(raw_name: str, fallback_id: str = "flow") -> str:
    +    """Return a filesystem-safe filename from a flow name.
    +
    +    Strips directory separators, null bytes, and Windows reserved device names.
    +    """
    +    name = str(raw_name).replace("/", "_").replace("\\", "_")
    +    name = name.replace("\x00", "").replace("..", "_").strip()
    +    # Reject Windows reserved device names (CON, PRN, AUX, NUL, COM1-9, LPT1-9)
    +    import re as _re
    +
    +    if _re.match(r"^(CON|PRN|AUX|NUL|COM[1-9]|LPT[1-9])(\..+)?$", name, _re.IGNORECASE):
    +        name = f"_{name}"
    +    return name or fallback_id
    +
    +
    +def _build_flows_download_response(
    +    flows: list[Flow],
    +) -> StreamingResponse | dict:
    +    """Build a download response (ZIP or single JSON) for the given flows.
    +
    +    Strips API keys and normalises for git-friendly export before packaging.
    +    """
    +    normalised_flows = [normalize_flow_for_export(remove_api_keys(flow.model_dump())) for flow in flows]
    +
    +    if len(normalised_flows) > 1:
    +        zip_stream = io.BytesIO()
    +        with zipfile.ZipFile(zip_stream, "w") as zip_file:
    +            for flow_dict in normalised_flows:
    +                flow_json = orjson_dumps(flow_dict, sort_keys=True)
    +                raw_name = str(flow_dict.get("name", "flow"))
    +                safe_name = _sanitize_flow_filename(raw_name, str(flow_dict.get("id", "flow")))
    +                zip_file.writestr(f"{safe_name}.json", flow_json)
    +
    +        zip_stream.seek(0)
    +        current_time = datetime.now(tz=timezone.utc).astimezone().strftime("%Y%m%d_%H%M%S")
    +        filename = f"{current_time}_langflow_flows.zip"
    +
    +        return StreamingResponse(
    +            zip_stream,
    +            media_type="application/x-zip-compressed",
    +            headers={"Content-Disposition": f"attachment; filename={filename}"},
    +        )
    +    return normalised_flows[0]
    
  • src/backend/base/langflow/api/v1/flows.py+136 592 modified
    @@ -1,32 +1,44 @@
     from __future__ import annotations
     
    +import asyncio
     import io
    -import json
    -import re
    +import threading
     import zipfile
    -from datetime import datetime, timezone
    -from pathlib import Path as StdlibPath
     from typing import Annotated
     from uuid import UUID
     
     import orjson
    -from aiofile import async_open
    -from anyio import Path
    -from fastapi import APIRouter, Depends, File, HTTPException, Response, UploadFile
    +from fastapi import APIRouter, Depends, File, HTTPException, UploadFile
     from fastapi.encoders import jsonable_encoder
    -from fastapi.responses import StreamingResponse
     from fastapi_pagination import Page, Params
     from fastapi_pagination.ext.sqlmodel import apaginate
    -from lfx.log import logger
    +from lfx.services.cache.utils import CACHE_MISS
     from sqlmodel import and_, col, select
    -from sqlmodel.ext.asyncio.session import AsyncSession
     
    -from langflow.api.utils import CurrentActiveUser, DbSession, cascade_delete_flow, remove_api_keys, validate_is_component
    +from langflow.api.utils import (
    +    CurrentActiveUser,
    +    DbSession,
    +    cascade_delete_flow,
    +    normalize_code_for_import,
    +    validate_is_component,
    +)
     from langflow.api.utils.zip_utils import extract_flows_from_zip
    +from langflow.api.v1.flows_helpers import (
    +    _build_flows_download_response,
    +    _get_safe_flow_path,
    +    _new_flow,
    +    _patch_flow,
    +    _read_flow,
    +    _save_flow_to_fs,
    +    _update_existing_flow,
    +    _upsert_flow_list,
    +    _verify_fs_path,
    +)
     from langflow.api.v1.schemas import FlowListCreate
     from langflow.helpers.user import get_user_by_flow_id_or_endpoint_name
     from langflow.initial_setup.constants import STARTER_FOLDER_NAME
     from langflow.services.auth.utils import get_current_active_user
    +from langflow.services.cache.service import ThreadingInMemoryCache
     from langflow.services.database.models.flow.model import (
         AccessTypeEnum,
         Flow,
    @@ -35,267 +47,39 @@
         FlowRead,
         FlowUpdate,
     )
    -from langflow.services.database.models.flow.utils import get_webhook_component_in_flow
     
     # TODO: Full-version import/export is planned as a follow-up feature. When implemented,
     # re-add imports for create_flow_version_entry, get_flow_version_list, strip_version_data,
     # and FlowVersionError from the flow_version modules.
     from langflow.services.database.models.folder.constants import DEFAULT_FOLDER_NAME
     from langflow.services.database.models.folder.model import Folder
    -from langflow.services.database.models.folder.utils import get_default_folder_id
     from langflow.services.deps import get_settings_service, get_storage_service
     from langflow.services.storage.service import StorageService
     from langflow.utils.compression import compress_response
     
    -# build router
    -router = APIRouter(prefix="/flows", tags=["Flows"])
    -
    +# Re-export helpers so existing ``from langflow.api.v1.flows import ...`` still works.
    +__all__ = [
    +    "_get_safe_flow_path",
    +    "_new_flow",
    +    "_read_flow",
    +    "_save_flow_to_fs",
    +    "_update_existing_flow",
    +    "_verify_fs_path",
    +]
     
    -def _get_safe_flow_path(fs_path: str, user_id: UUID, storage_service: StorageService) -> Path:
    -    """Get a safe filesystem path for flow storage, restricted to user's flows directory.
     
    -    Allows both absolute and relative paths, but ensures they're within the user's flows directory.
    -    """
    -    if not fs_path:
    -        raise HTTPException(status_code=400, detail="fs_path cannot be empty")
    -
    -    # Normalize path separators first (before security checks to prevent backslash bypass)
    -    normalized_path = fs_path.replace("\\", "/")
    -
    -    # Reject directory traversal and null bytes (check normalized path)
    -    if ".." in normalized_path:
    -        raise HTTPException(
    -            status_code=400,
    -            detail="Invalid fs_path: directory traversal (..) is not allowed",
    -        )
    -    if "\x00" in normalized_path:
    -        raise HTTPException(
    -            status_code=400,
    -            detail="Invalid fs_path: null bytes are not allowed",
    -        )
    -
    -    # Build the safe base directory path
    -    base_dir = storage_service.data_dir / "flows" / str(user_id)
    -    base_dir_str = str(base_dir)
    -
    -    # Normalize base directory path (resolve to absolute, handle symlinks)
    -    # resolve() doesn't require the path to exist, it just resolves symlinks
    -    try:
    -        base_dir_stdlib = StdlibPath(base_dir_str).resolve()
    -        base_dir_resolved = str(base_dir_stdlib)
    -    except (OSError, ValueError) as e:
    -        raise HTTPException(status_code=400, detail=f"Invalid base directory: {e}") from e
    -
    -    # Determine if path is absolute (Unix or Windows style)
    -    is_absolute = normalized_path.startswith("/") or (len(normalized_path) > 1 and normalized_path[1] == ":")
    -
    -    if is_absolute:
    -        # Absolute path - resolve and validate it's within base directory
    -        try:
    -            requested_path = StdlibPath(normalized_path).resolve()
    -            requested_resolved = str(requested_path)
    -            try:
    -                # Ensure it's a subpath of the base directory
    -                requested_path.relative_to(base_dir_stdlib)
    -            except ValueError:
    -                raise HTTPException(
    -                    status_code=400,
    -                    detail=(f"Absolute path must be within your flows directory: {base_dir_resolved}"),
    -                ) from None
    -            return Path(requested_resolved)
    -        except (OSError, ValueError) as e:
    -            raise HTTPException(
    -                status_code=400,
    -                detail=(
    -                    f"Invalid file save path: {e}. "
    -                    f"Verify that the path is within your flows directory: {base_dir_resolved}"
    -                ),
    -            ) from e
    -    else:
    -        # Relative path - validate that it's within the base directory
    -        relative_part = normalized_path.lstrip("/")
    -        safe_path = base_dir / relative_part if relative_part else base_dir
    -        safe_path_stdlib = base_dir_stdlib / relative_part if relative_part else base_dir_stdlib
    -        try:
    -            final_resolved_str = str(safe_path_stdlib.resolve())
    +def _handle_unique_constraint_error(exc: Exception, *, status_code: int = 400) -> HTTPException:
    +    """Parse a UNIQUE constraint error and return an appropriate HTTPException."""
    +    msg = str(exc)
    +    if "UNIQUE constraint failed" not in msg:
    +        return HTTPException(status_code=500, detail=msg)
    +    columns = msg.split("UNIQUE constraint failed: ")[1].split(".")[1].split("\n")[0]
    +    column = columns.split(",")[1] if "id" in columns.split(",")[0] else columns.split(",")[0]
    +    return HTTPException(status_code=status_code, detail=f"{column.capitalize().replace('_', ' ')} must be unique")
     
    -            # Ensure resolved path stays within base (prevent symlink attacks)
    -            if not final_resolved_str.startswith(base_dir_resolved):
    -                raise HTTPException(
    -                    status_code=400,
    -                    detail="Invalid path: resolves outside allowed directory",
    -                )
    -        except (OSError, ValueError) as e:
    -            raise HTTPException(status_code=400, detail=f"Invalid path: {e}") from e
    -
    -        return safe_path
    -
    -
    -async def _verify_fs_path(path: str | None, user_id: UUID, storage_service: StorageService) -> None:
    -    """Verify and prepare the filesystem path for flow storage."""
    -    if path is not None:
    -        # Empty strings should be rejected (None is allowed, empty string is not)
    -        if path == "":
    -            raise HTTPException(status_code=400, detail="fs_path cannot be empty")
    -        safe_path = _get_safe_flow_path(path, user_id, storage_service)
    -        await safe_path.parent.mkdir(parents=True, exist_ok=True)
    -        if not await safe_path.exists():
    -            await safe_path.touch()
    -
    -
    -async def _save_flow_to_fs(flow: Flow, user_id: UUID, storage_service: StorageService) -> None:
    -    """Save flow data to the filesystem at the validated path."""
    -    if not flow.fs_path:
    -        return
    -
    -    try:
    -        safe_path = _get_safe_flow_path(flow.fs_path, user_id, storage_service)
    -        await safe_path.parent.mkdir(parents=True, exist_ok=True)
    -        # async_open expects a string path, not a Path object
    -        async with async_open(str(safe_path), "w") as f:
    -            await f.write(flow.model_dump_json())
    -    except HTTPException:
    -        raise
    -    except OSError as e:
    -        await logger.aexception("Failed to write flow %s to path %s", flow.name, flow.fs_path)
    -        raise HTTPException(status_code=500, detail=f"Failed to write flow to filesystem: {e}") from e
    -
    -
    -async def _new_flow(
    -    *,
    -    session: AsyncSession,
    -    flow: FlowCreate,
    -    user_id: UUID,
    -    storage_service: StorageService,
    -    flow_id: UUID | None = None,
    -    fail_on_endpoint_conflict: bool = False,
    -    validate_folder: bool = False,
    -):
    -    """Create or upsert a flow.
    -
    -    Args:
    -        session: Database session.
    -        flow: Flow creation data.
    -        user_id: Owner of the new flow.
    -        storage_service: Service for filesystem operations.
    -        flow_id: Allows PUT upsert to create flows with a specific ID for syncing between instances.
    -        fail_on_endpoint_conflict: PUT should fail predictably on conflicts rather than silently renaming.
    -        validate_folder: Validates folder_id exists and belongs to user when upserting from external sources.
    -    """
    -    try:
    -        # Validate fs_path if provided (will raise HTTPException if invalid)
    -        await _verify_fs_path(flow.fs_path, user_id, storage_service)
    -
    -        # Validate folder_id if requested
    -        if validate_folder and flow.folder_id is not None:
    -            folder = (
    -                await session.exec(select(Folder).where(Folder.id == flow.folder_id, Folder.user_id == user_id))
    -            ).first()
    -            if not folder:
    -                raise HTTPException(status_code=400, detail="Folder not found")
    -
    -        # Set user_id (ignore any user_id from body for security)
    -        flow.user_id = user_id
    -
    -        # Check if the flow.name is unique
    -        # there might be flows with name like: "MyFlow", "MyFlow (1)", "MyFlow (2)"
    -        # so we need to check if the name is unique with `like` operator
    -        # if we find a flow with the same name, we add a number to the end of the name
    -        # based on the highest number found
    -        if (await session.exec(select(Flow).where(Flow.name == flow.name).where(Flow.user_id == user_id))).first():
    -            flows = (
    -                await session.exec(
    -                    select(Flow).where(Flow.name.like(f"{flow.name} (%")).where(Flow.user_id == user_id)  # type: ignore[attr-defined]
    -                )
    -            ).all()
    -            if flows:
    -                # Use regex to extract numbers only from flows that follow the copy naming pattern:
    -                # "{original_name} ({number})"
    -                # This avoids extracting numbers from the original flow name if it naturally contains parentheses
    -                #
    -                # Examples:
    -                # - For flow "My Flow": matches "My Flow (1)", "My Flow (2)" → extracts 1, 2
    -                # - For flow "Analytics (Q1)": matches "Analytics (Q1) (1)" → extracts 1
    -                #   but does NOT match "Analytics (Q1)" → avoids extracting the original "1"
    -                extract_number = re.compile(rf"^{re.escape(flow.name)} \((\d+)\)$")
    -                numbers = []
    -                for _flow in flows:
    -                    result = extract_number.search(_flow.name)
    -                    if result:
    -                        numbers.append(int(result.groups(1)[0]))
    -                if numbers:
    -                    flow.name = f"{flow.name} ({max(numbers) + 1})"
    -                else:
    -                    flow.name = f"{flow.name} (1)"
    -            else:
    -                flow.name = f"{flow.name} (1)"
    -
    -        # Check if the endpoint is unique
    -        if (
    -            flow.endpoint_name
    -            and (
    -                await session.exec(
    -                    select(Flow).where(Flow.endpoint_name == flow.endpoint_name).where(Flow.user_id == user_id)
    -                )
    -            ).first()
    -        ):
    -            if fail_on_endpoint_conflict:
    -                raise HTTPException(status_code=409, detail="Endpoint name must be unique")
    -
    -            # Auto-rename endpoint
    -            flows = (
    -                await session.exec(
    -                    select(Flow)
    -                    .where(Flow.endpoint_name.like(f"{flow.endpoint_name}-%"))  # type: ignore[union-attr]
    -                    .where(Flow.user_id == user_id)
    -                )
    -            ).all()
    -            if flows:
    -                # The endpoint name is like "my-endpoint","my-endpoint-1", "my-endpoint-2"
    -                # so we need to get the highest number and add 1
    -                # we need to get the last part of the endpoint name
    -                numbers = [int(flow.endpoint_name.split("-")[-1]) for flow in flows]
    -                flow.endpoint_name = f"{flow.endpoint_name}-{max(numbers) + 1}"
    -            else:
    -                flow.endpoint_name = f"{flow.endpoint_name}-1"
    -
    -        db_flow = Flow.model_validate(flow, from_attributes=True)
    -
    -        # Use specified ID if provided (for PUT upsert)
    -        if flow_id is not None:
    -            db_flow.id = flow_id
    -
    -        db_flow.updated_at = datetime.now(timezone.utc)
    -
    -        # Validate folder_id exists, or fall back to default folder
    -        if db_flow.folder_id is not None:
    -            folder_exists = (
    -                await session.exec(select(Folder).where(Folder.id == db_flow.folder_id, Folder.user_id == user_id))
    -            ).first()
    -            if not folder_exists:
    -                # Folder doesn't exist or doesn't belong to user, use default
    -                db_flow.folder_id = None
    -
    -        if db_flow.folder_id is None:
    -            # Make sure flows always have a folder (auto-create default folder if needed)
    -            db_flow.folder_id = await get_default_folder_id(session, user_id)
     
    -        session.add(db_flow)
    -
    -        # Persist and refresh
    -        await session.flush()
    -        await session.refresh(db_flow)
    -        await _save_flow_to_fs(db_flow, user_id, storage_service)
    -
    -        # Convert to FlowRead while session is still active
    -        return FlowRead.model_validate(db_flow, from_attributes=True)
    -    except Exception as e:
    -        # If it is a validation error, return the error message
    -        if hasattr(e, "errors"):
    -            raise HTTPException(status_code=400, detail=str(e)) from e
    -        if isinstance(e, HTTPException):
    -            raise
    -        raise HTTPException(status_code=500, detail=str(e)) from e
    +# build router
    +router = APIRouter(prefix="/flows", tags=["Flows"])
     
     
     @router.post("/", response_model=FlowRead, status_code=201)
    @@ -308,21 +92,10 @@ async def create_flow(
     ):
         try:
             return await _new_flow(session=session, flow=flow, user_id=current_user.id, storage_service=storage_service)
    +    except HTTPException:
    +        raise
         except Exception as e:
    -        if "UNIQUE constraint failed" in str(e):
    -            # Get the name of the column that failed
    -            columns = str(e).split("UNIQUE constraint failed: ")[1].split(".")[1].split("\n")[0]
    -            # UNIQUE constraint failed: flow.user_id, flow.name
    -            # or UNIQUE constraint failed: flow.name
    -            # if the column has id in it, we want the other column
    -            column = columns.split(",")[1] if "id" in columns.split(",")[0] else columns.split(",")[0]
    -
    -            raise HTTPException(
    -                status_code=400, detail=f"{column.capitalize().replace('_', ' ')} must be unique"
    -            ) from e
    -        if isinstance(e, HTTPException):
    -            raise
    -        raise HTTPException(status_code=500, detail=str(e)) from e
    +        raise _handle_unique_constraint_error(e) from e
     
     
     @router.get("/", response_model=list[FlowRead] | Page[FlowRead] | list[FlowHeader], status_code=200)
    @@ -337,26 +110,7 @@ async def read_flows(
         params: Annotated[Params, Depends()],
         header_flows: bool = False,
     ):
    -    """Retrieve a list of flows with pagination support.
    -
    -    Args:
    -        current_user (User): The current authenticated user.
    -        session (Session): The database session.
    -        settings_service (SettingsService): The settings service.
    -        components_only (bool, optional): Whether to return only components. Defaults to False.
    -
    -        get_all (bool, optional): Whether to return all flows without pagination. Defaults to True.
    -        **This field must be True because of backward compatibility with the frontend - Release: 1.0.20**
    -
    -        folder_id (UUID, optional): The project ID. Defaults to None.
    -        params (Params): Pagination parameters.
    -        remove_example_flows (bool, optional): Whether to remove example flows. Defaults to False.
    -        header_flows (bool, optional): Whether to return only specific headers of the flows. Defaults to False.
    -
    -    Returns:
    -        list[FlowRead] | Page[FlowRead] | list[FlowHeader]
    -        A list of flows or a paginated response containing the list of flows or a list of flow headers.
    -    """
    +    """Retrieve a list of flows with optional pagination, filtering, and header-only mode."""
         try:
             auth_settings = get_settings_service().auth_settings
     
    @@ -415,18 +169,10 @@ async def read_flows(
                 return await apaginate(session, stmt, params=params)
     
         except Exception as e:
    -        raise HTTPException(status_code=500, detail=str(e)) from e
    +        import logging as _logging
     
    -
    -async def _read_flow(
    -    session: AsyncSession,
    -    flow_id: UUID,
    -    user_id: UUID,
    -):
    -    """Read a flow."""
    -    stmt = select(Flow).where(Flow.id == flow_id).where(Flow.user_id == user_id)
    -
    -    return (await session.exec(stmt)).first()
    +        _logging.getLogger(__name__).exception("Error listing flows")
    +        raise HTTPException(status_code=500, detail="An internal error occurred while listing flows.") from e
     
     
     @router.get("/{flow_id}", response_model=FlowRead, status_code=200)
    @@ -468,80 +214,25 @@ async def update_flow(
         storage_service: Annotated[StorageService, Depends(get_storage_service)],
     ):
         """Update a flow."""
    -    settings_service = get_settings_service()
         try:
    -        db_flow = await _read_flow(
    -            session=session,
    -            flow_id=flow_id,
    -            user_id=current_user.id,
    -        )
    -
    +        db_flow = await _read_flow(session=session, flow_id=flow_id, user_id=current_user.id)
             if not db_flow:
                 raise HTTPException(status_code=404, detail="Flow not found")
     
    -        update_data = flow.model_dump(exclude_unset=True, exclude_none=True)
    -
    -        # Specifically handle endpoint_name when it's explicitly set to null or empty string
    -        if flow.endpoint_name is None or flow.endpoint_name == "":
    -            update_data["endpoint_name"] = None
    -
    -        if settings_service.settings.remove_api_keys:
    -            update_data = remove_api_keys(update_data)
    -
    -        for key, value in update_data.items():
    -            setattr(db_flow, key, value)
    -
    -        # Validate fs_path if it was changed (will raise HTTPException if invalid)
    -        if "fs_path" in update_data:
    -            await _verify_fs_path(db_flow.fs_path, current_user.id, storage_service)
    -
    -        webhook_component = get_webhook_component_in_flow(db_flow.data)
    -        db_flow.webhook = webhook_component is not None
    -        db_flow.updated_at = datetime.now(timezone.utc)
    -
    -        # Validate folder_id exists, or fall back to default folder
    -        if db_flow.folder_id is not None:
    -            folder_exists = (
    -                await session.exec(
    -                    select(Folder).where(Folder.id == db_flow.folder_id, Folder.user_id == current_user.id)
    -                )
    -            ).first()
    -            if not folder_exists:
    -                # Folder doesn't exist or doesn't belong to user, use default
    -                db_flow.folder_id = None
    -
    -        if db_flow.folder_id is None:
    -            # Make sure flows always have a folder (auto-create default folder if needed)
    -            db_flow.folder_id = await get_default_folder_id(session, current_user.id)
    -
    -        session.add(db_flow)
    -        await session.flush()
    -        await session.refresh(db_flow)
    -        await _save_flow_to_fs(db_flow, current_user.id, storage_service)
    -
    -        # Convert to FlowRead while session is still active to avoid detached instance errors
    -        flow_read = FlowRead.model_validate(db_flow, from_attributes=True)
    -
    +        return await _patch_flow(
    +            session=session,
    +            db_flow=db_flow,
    +            flow=flow,
    +            user_id=current_user.id,
    +            storage_service=storage_service,
    +        )
    +    except HTTPException:
    +        raise
         except Exception as e:
    -        if "UNIQUE constraint failed" in str(e):
    -            # Get the name of the column that failed
    -            columns = str(e).split("UNIQUE constraint failed: ")[1].split(".")[1].split("\n")[0]
    -            # UNIQUE constraint failed: flow.user_id, flow.name
    -            # or UNIQUE constraint failed: flow.name
    -            # if the column has id in it, we want the other column
    -            column = columns.split(",")[1] if "id" in columns.split(",")[0] else columns.split(",")[0]
    -            raise HTTPException(
    -                status_code=400, detail=f"{column.capitalize().replace('_', ' ')} must be unique"
    -            ) from e
    -
    -        if hasattr(e, "status_code"):
    -            raise HTTPException(status_code=e.status_code, detail=str(e)) from e
    -        raise HTTPException(status_code=500, detail=str(e)) from e
    -
    -    return flow_read
    +        raise _handle_unique_constraint_error(e) from e
     
     
    -@router.put("/{flow_id}", response_model=FlowRead, include_in_schema=False)
    +@router.put("/{flow_id}", response_model=FlowRead)
     async def upsert_flow(
         *,
         session: DbSession,
    @@ -552,11 +243,7 @@ async def upsert_flow(
     ):
         """Create or update a flow with a specific ID (upsert).
     
    -    - If the flow doesn't exist: creates it with the specified ID
    -    - If the flow exists and belongs to the current user: updates it
    -    - If the flow exists but belongs to another user: returns 404
    -
    -    Returns 201 for creation, 200 for update.
    +    Returns 201 for creation, 200 for update.  Returns 404 if owned by another user.
         """
         from fastapi.responses import JSONResponse
     
    @@ -596,103 +283,7 @@ async def upsert_flow(
         except HTTPException:
             raise
         except Exception as e:
    -        if "UNIQUE constraint failed" in str(e):
    -            columns = str(e).split("UNIQUE constraint failed: ")[1].split(".")[1].split("\n")[0]
    -            column = columns.split(",")[1] if "id" in columns.split(",")[0] else columns.split(",")[0]
    -            raise HTTPException(
    -                status_code=409, detail=f"{column.capitalize().replace('_', ' ')} must be unique"
    -            ) from e
    -        raise HTTPException(status_code=500, detail=str(e)) from e
    -
    -
    -async def _update_existing_flow(
    -    *,
    -    session: AsyncSession,
    -    existing_flow: Flow,
    -    flow: FlowCreate,
    -    current_user,
    -    storage_service: StorageService,
    -) -> FlowRead:
    -    """Update an existing flow (PUT update path).
    -
    -    Similar to update_flow but:
    -    - Fails on name/endpoint_name conflict with OTHER flows (409)
    -    - Keeps existing folder_id if not provided in request
    -    """
    -    settings_service = get_settings_service()
    -    user_id = current_user.id
    -
    -    # Validate fs_path if provided (use `is not None` to catch empty strings)
    -    if flow.fs_path is not None:
    -        await _verify_fs_path(flow.fs_path, user_id, storage_service)
    -
    -    # Validate folder_id if provided
    -    if flow.folder_id is not None:
    -        folder = (
    -            await session.exec(select(Folder).where(Folder.id == flow.folder_id, Folder.user_id == user_id))
    -        ).first()
    -        if not folder:
    -            raise HTTPException(status_code=400, detail="Folder not found")
    -
    -    # Check name uniqueness (excluding current flow)
    -    if flow.name and flow.name != existing_flow.name:
    -        name_conflict = (
    -            await session.exec(
    -                select(Flow).where(
    -                    Flow.name == flow.name,
    -                    Flow.user_id == user_id,
    -                    Flow.id != existing_flow.id,
    -                )
    -            )
    -        ).first()
    -        if name_conflict:
    -            raise HTTPException(status_code=409, detail="Name must be unique")
    -
    -    # Check endpoint_name uniqueness (excluding current flow)
    -    if flow.endpoint_name and flow.endpoint_name != existing_flow.endpoint_name:
    -        endpoint_conflict = (
    -            await session.exec(
    -                select(Flow).where(
    -                    Flow.endpoint_name == flow.endpoint_name,
    -                    Flow.user_id == user_id,
    -                    Flow.id != existing_flow.id,
    -                )
    -            )
    -        ).first()
    -        if endpoint_conflict:
    -            raise HTTPException(status_code=409, detail="Endpoint name must be unique")
    -
    -    # Build update data
    -    update_data = flow.model_dump(exclude_unset=True, exclude_none=True)
    -
    -    # Handle endpoint_name explicitly set to null or empty string (allow clearing)
    -    if flow.endpoint_name is None or flow.endpoint_name == "":
    -        update_data["endpoint_name"] = None
    -
    -    # Remove id and user_id from update data (security)
    -    update_data.pop("id", None)
    -    update_data.pop("user_id", None)
    -
    -    # If folder_id not provided, keep existing
    -    if "folder_id" not in update_data or update_data.get("folder_id") is None:
    -        update_data.pop("folder_id", None)
    -
    -    if settings_service.settings.remove_api_keys:
    -        update_data = remove_api_keys(update_data)
    -
    -    for key, value in update_data.items():
    -        setattr(existing_flow, key, value)
    -
    -    webhook_component = get_webhook_component_in_flow(existing_flow.data or {})
    -    existing_flow.webhook = webhook_component is not None
    -    existing_flow.updated_at = datetime.now(timezone.utc)
    -
    -    session.add(existing_flow)
    -    await session.flush()
    -    await session.refresh(existing_flow)
    -    await _save_flow_to_fs(existing_flow, user_id, storage_service)
    -
    -    return FlowRead.model_validate(existing_flow, from_attributes=True)
    +        raise _handle_unique_constraint_error(e, status_code=409) from e
     
     
     @router.delete("/{flow_id}", status_code=200)
    @@ -722,10 +313,26 @@ async def create_flows(
         current_user: CurrentActiveUser,
     ):
         """Create multiple new flows."""
    +    # Guard against duplicate IDs up-front so callers get a clean 422 instead
    +    # of an unhandled DB IntegrityError.  Use upload_file() for upsert semantics.
    +    requested_ids = [f.id for f in flow_list.flows if f.id is not None]
    +    if requested_ids:
    +        existing_ids = (await session.exec(select(Flow.id).where(col(Flow.id).in_(requested_ids)))).all()
    +        if existing_ids:
    +            conflict = ", ".join(str(i) for i in existing_ids)
    +            msg = (
    +                f"Flow(s) with the following IDs already exist: {conflict}. "
    +                "Use the update endpoint or upload_file() for upsert semantics."
    +            )
    +            raise HTTPException(status_code=422, detail=msg)
    +
         db_flows = []
         for flow in flow_list.flows:
             flow.user_id = current_user.id
    -        db_flow = Flow.model_validate(flow, from_attributes=True)
    +        # Exclude id from model_validate (same reasoning as _new_flow) and apply separately.
    +        db_flow = Flow.model_validate(flow.model_dump(exclude={"id"}))
    +        if flow.id is not None:
    +            db_flow.id = flow.id
             session.add(db_flow)
             db_flows.append(db_flow)
     
    @@ -745,11 +352,7 @@ async def upload_file(
         folder_id: UUID | None = None,
         storage_service: Annotated[StorageService, Depends(get_storage_service)],
     ):
    -    """Upload flows from a file.
    -
    -    Accepts either a JSON file (single flow or multi-flow format) or a ZIP file
    -    containing individual flow JSON files (as produced by the download endpoint).
    -    """
    +    """Upload flows from a JSON or ZIP file (upsert semantics for flows with stable IDs)."""
         if file is None:
             raise HTTPException(status_code=400, detail="No file provided")
     
    @@ -772,39 +375,30 @@ async def upload_file(
             except orjson.JSONDecodeError as e:
                 raise HTTPException(status_code=400, detail=f"Invalid JSON file: {e}") from e
     
    -    flow_list = FlowListCreate(**data) if "flows" in data else FlowListCreate(flows=[FlowCreate(**data)])
    +    # Normalise code fields: if exported with code-as-lines format, rejoin to
    +    # strings before creating the Pydantic models so the DB always stores strings.
    +    if "flows" in data:
    +        data = {**data, "flows": [normalize_code_for_import(f) for f in data["flows"]]}
    +        flow_list = FlowListCreate(**data)
    +    else:
    +        flow_list = FlowListCreate(flows=[FlowCreate(**normalize_code_for_import(data))])
     
         # TODO: Full-version import is planned as a follow-up feature.
         # When implemented, extract raw flow dicts here to read embedded "version"
         # arrays and create FlowVersion entries for each imported flow.
     
         try:
    -        flow_reads = []
    -        for flow in flow_list.flows:
    -            flow.user_id = current_user.id
    -            if folder_id:
    -                flow.folder_id = folder_id
    -            flow_read = await _new_flow(
    -                session=session, flow=flow, user_id=current_user.id, storage_service=storage_service
    -            )
    -            flow_reads.append(flow_read)
    +        return await _upsert_flow_list(
    +            session=session,
    +            flows=flow_list.flows,
    +            current_user=current_user,
    +            storage_service=storage_service,
    +            folder_id=folder_id,
    +        )
    +    except HTTPException:
    +        raise
         except Exception as e:
    -        if "UNIQUE constraint failed" in str(e):
    -            # Get the name of the column that failed
    -            columns = str(e).split("UNIQUE constraint failed: ")[1].split(".")[1].split("\n")[0]
    -            # UNIQUE constraint failed: flow.user_id, flow.name
    -            # or UNIQUE constraint failed: flow.name
    -            # if the column has id in it, we want the other column
    -            column = columns.split(",")[1] if "id" in columns.split(",")[0] else columns.split(",")[0]
    -
    -            raise HTTPException(
    -                status_code=400, detail=f"{column.capitalize().replace('_', ' ')} must be unique"
    -            ) from e
    -        if isinstance(e, HTTPException):
    -            raise
    -        raise HTTPException(status_code=500, detail=str(e)) from e
    -
    -    return flow_reads
    +        raise _handle_unique_constraint_error(e) from e
     
     
     @router.delete("/")
    @@ -813,17 +407,7 @@ async def delete_multiple_flows(
         user: CurrentActiveUser,
         db: DbSession,
     ):
    -    """Delete multiple flows by their IDs.
    -
    -    Args:
    -        flow_ids (List[str]): The list of flow IDs to delete.
    -        user (User, optional): The user making the request. Defaults to the current active user.
    -        db (Session, optional): The database session.
    -
    -    Returns:
    -        dict: A dictionary containing the number of flows deleted.
    -
    -    """
    +    """Delete multiple flows by their IDs."""
         try:
             flows_to_delete = (
                 await db.exec(select(Flow).where(col(Flow.id).in_(flow_ids)).where(Flow.user_id == user.id))
    @@ -834,7 +418,10 @@ async def delete_multiple_flows(
             await db.flush()
             return {"deleted": len(flows_to_delete)}
         except Exception as exc:
    -        raise HTTPException(status_code=500, detail=str(exc)) from exc
    +        import logging as _logging
    +
    +        _logging.getLogger(__name__).exception("Error deleting multiple flows")
    +        raise HTTPException(status_code=500, detail="An internal error occurred while deleting flows.") from exc
     
     
     @router.post("/download/", status_code=200)
    @@ -852,104 +439,61 @@ async def download_multiple_file(
         if not flows:
             raise HTTPException(status_code=404, detail="No flows found.")
     
    -    flows_without_api_keys = [remove_api_keys(flow.model_dump()) for flow in flows]
    +    return _build_flows_download_response(flows)
     
    -    if len(flows_without_api_keys) > 1:
    -        # Create a byte stream to hold the ZIP file
    -        zip_stream = io.BytesIO()
     
    -        # Create a ZIP file
    -        with zipfile.ZipFile(zip_stream, "w") as zip_file:
    -            for flow in flows_without_api_keys:
    -                # Convert the flow object to JSON
    -                flow_json = json.dumps(jsonable_encoder(flow))
    -
    -                # Write the JSON to the ZIP file
    -                zip_file.writestr(f"{flow['name']}.json", flow_json)
    -
    -        # Seek to the beginning of the byte stream
    -        zip_stream.seek(0)
    -
    -        # Generate the filename with the current datetime
    -        current_time = datetime.now(tz=timezone.utc).astimezone().strftime("%Y%m%d_%H%M%S")
    -        filename = f"{current_time}_langflow_flows.zip"
    -
    -        return StreamingResponse(
    -            zip_stream,
    -            media_type="application/x-zip-compressed",
    -            headers={"Content-Disposition": f"attachment; filename={filename}"},
    -        )
    -    return flows_without_api_keys[0]
    -
    -
    -all_starter_folder_flows_response: Response | None = None
    +# 5 minutes
    +_STARTER_FLOWS_TTL_SECONDS: float = 300.0
    +_starter_flows_cache: ThreadingInMemoryCache[threading.RLock] = ThreadingInMemoryCache(
    +    max_size=1,
    +    expiration_time=int(_STARTER_FLOWS_TTL_SECONDS),
    +)
    +_starter_flows_lock = asyncio.Lock()
     
     
     @router.get("/basic_examples/", response_model=list[FlowRead], status_code=200)
     async def read_basic_examples(
         *,
         session: DbSession,
     ):
    -    """Retrieve a list of basic example flows.
    -
    -    Args:
    -        session (Session): The database session.
    +    """Retrieve a list of basic example flows."""
    +    cached_response = _starter_flows_cache.get("starter_flows")
    +    if cached_response is not CACHE_MISS:
    +        return cached_response
     
    -    Returns:
    -        list[FlowRead]: A list of basic example flows.
    -    """
    -    try:
    -        global all_starter_folder_flows_response  # noqa: PLW0603
    +    async with _starter_flows_lock:
    +        cached_response = _starter_flows_cache.get("starter_flows")
    +        if cached_response is not CACHE_MISS:
    +            return cached_response
     
    -        if all_starter_folder_flows_response:
    -            return all_starter_folder_flows_response
    -        # Get the starter folder
    -        starter_folder = (await session.exec(select(Folder).where(Folder.name == STARTER_FOLDER_NAME))).first()
    +        try:
    +            starter_folder = (await session.exec(select(Folder).where(Folder.name == STARTER_FOLDER_NAME))).first()
     
    -        if not starter_folder:
    -            return []
    +            if not starter_folder:
    +                return []
     
    -        # Get all flows in the starter folder
    -        all_starter_folder_flows = (await session.exec(select(Flow).where(Flow.folder_id == starter_folder.id))).all()
    +            all_starter_folder_flows = (
    +                await session.exec(select(Flow).where(Flow.folder_id == starter_folder.id))
    +            ).all()
     
    -        flow_reads = [FlowRead.model_validate(flow, from_attributes=True) for flow in all_starter_folder_flows]
    -        all_starter_folder_flows_response = compress_response(flow_reads)
    +            flow_reads = [FlowRead.model_validate(flow, from_attributes=True) for flow in all_starter_folder_flows]
    +            response = compress_response(flow_reads)
    +            _starter_flows_cache.set("starter_flows", response)
     
    -        # Return compressed response using our utility function
    -        return all_starter_folder_flows_response  # noqa: TRY300
    +        except Exception as e:
    +            import logging as _logging
     
    -    except Exception as e:
    -        raise HTTPException(status_code=500, detail=str(e)) from e
    +            _logging.getLogger(__name__).exception("Error loading basic examples")
    +            raise HTTPException(status_code=500, detail="An internal error occurred while loading examples.") from e
    +        else:
    +            return response
     
     
     @router.post("/expand/", status_code=200, dependencies=[Depends(get_current_active_user)], include_in_schema=False)
     async def expand_compact_flow_endpoint(
         compact_data: dict,
     ):
    -    """Expand a compact flow format to full flow format.
    -
    -    This endpoint takes a minimal flow representation (as generated by AI agents)
    -    and expands it to the full format expected by the Langflow UI.
    -
    -    The compact format only requires:
    -    - nodes: list of {id, type, values?}
    -    - edges: list of {source, source_output, target, target_input}
    -
    -    The endpoint returns the full flow data with complete component templates.
    -
    -    Example input:
    -    ```json
    -    {
    -        "nodes": [
    -            {"id": "1", "type": "ChatInput"},
    -            {"id": "2", "type": "OpenAIModel", "values": {"model_name": "gpt-4"}}
    -        ],
    -        "edges": [
    -            {"source": "1", "source_output": "message", "target": "2", "target_input": "input_value"}
    -        ]
    -    }
    -    ```
    -    """
    +    """Expand a compact flow format (minimal nodes/edges) to the full flow format."""
         from lfx.interface.components import component_cache, get_and_cache_all_types_dict
     
         from langflow.processing.expand_flow import expand_compact_flow
    
  • src/backend/base/langflow/api/v1/projects_files.py+211 0 added
    @@ -0,0 +1,211 @@
    +"""File upload and download handlers for projects.
    +
    +Extracted from projects.py to reduce file size and separate file I/O concerns.
    +"""
    +
    +import io
    +import zipfile
    +from datetime import datetime, timezone
    +from typing import Annotated
    +from urllib.parse import quote
    +from uuid import UUID
    +
    +import orjson
    +from fastapi import File, HTTPException, UploadFile
    +from fastapi.responses import StreamingResponse
    +from lfx.log.logger import logger
    +from sqlmodel import select
    +
    +from langflow.api.utils import (
    +    CurrentActiveUser,
    +    DbSession,
    +    normalize_code_for_import,
    +    normalize_flow_for_export,
    +    remove_api_keys,
    +)
    +from langflow.api.utils.zip_utils import extract_flows_from_zip
    +from langflow.api.v1.flows import create_flows
    +from langflow.api.v1.flows_helpers import _sanitize_flow_filename
    +from langflow.api.v1.schemas import FlowListCreate
    +from langflow.helpers.flow import generate_unique_flow_name
    +from langflow.helpers.folders import generate_unique_folder_name
    +from langflow.services.auth.mcp_encryption import encrypt_auth_settings
    +from langflow.services.database.models.base import orjson_dumps
    +from langflow.services.database.models.flow.model import Flow, FlowCreate, FlowRead
    +from langflow.services.database.models.folder.model import (
    +    Folder,
    +    FolderCreate,
    +)
    +from langflow.services.deps import get_settings_service
    +
    +
    +async def download_project_flows(
    +    *,
    +    session: DbSession,
    +    project_id: UUID,
    +    current_user: CurrentActiveUser,
    +) -> StreamingResponse:
    +    """Download all flows from project as a zip file."""
    +    try:
    +        query = select(Folder).where(Folder.id == project_id, Folder.user_id == current_user.id)
    +        result = await session.exec(query)
    +        project = result.first()
    +
    +        if not project:
    +            raise HTTPException(status_code=404, detail="Project not found")
    +
    +        flows_query = select(Flow).where(Flow.folder_id == project_id)
    +        flows_result = await session.exec(flows_query)
    +        flows = [FlowRead.model_validate(flow, from_attributes=True) for flow in flows_result.all()]
    +
    +        if not flows:
    +            raise HTTPException(status_code=404, detail="No flows found in project")
    +
    +        # Strip API keys then normalise for git-friendly export (sorted keys,
    +        # volatile fields removed, code fields as line arrays).
    +        normalised_flows = [normalize_flow_for_export(remove_api_keys(flow.model_dump())) for flow in flows]
    +        zip_stream = io.BytesIO()
    +
    +        with zipfile.ZipFile(zip_stream, "w") as zip_file:
    +            for flow in normalised_flows:
    +                safe_name = _sanitize_flow_filename(str(flow["name"]), str(flow.get("id", "flow")))
    +                # Serialise with sorted keys and 2-space indent for stable diffs.
    +                flow_json = orjson_dumps(flow, sort_keys=True)
    +                zip_file.writestr(f"{safe_name}.json", flow_json.encode("utf-8"))
    +
    +        zip_stream.seek(0)
    +
    +        current_time = datetime.now(tz=timezone.utc).astimezone().strftime("%Y%m%d_%H%M%S")
    +        filename = f"{current_time}_{project.name}_flows.zip"
    +
    +        # URL encode filename handle non-ASCII (ex. Cyrillic)
    +        encoded_filename = quote(filename)
    +
    +        return StreamingResponse(
    +            zip_stream,
    +            media_type="application/x-zip-compressed",
    +            headers={"Content-Disposition": f"attachment; filename*=UTF-8''{encoded_filename}"},
    +        )
    +
    +    except HTTPException:
    +        raise
    +    except Exception as e:
    +        if "No result found" in str(e):
    +            raise HTTPException(status_code=404, detail="Project not found") from e
    +        logger.exception("Error downloading project flows for project_id=%s", project_id)
    +        raise HTTPException(
    +            status_code=500, detail="An internal error occurred while downloading project flows."
    +        ) from e
    +
    +
    +async def upload_project_flows(
    +    *,
    +    session: DbSession,
    +    file: Annotated[UploadFile | None, File()] = None,
    +    current_user: CurrentActiveUser,
    +) -> list[FlowRead]:
    +    """Upload flows from a file.
    +
    +    Accepts either a JSON file with project metadata (folder_name, folder_description, flows)
    +    or a ZIP file containing individual flow JSON files (as produced by the download endpoint).
    +    """
    +    if file is None:
    +        raise HTTPException(status_code=400, detail="No file provided")
    +
    +    contents = await file.read()
    +
    +    if not contents:
    +        raise HTTPException(status_code=400, detail="The uploaded file is empty")
    +
    +    # Detect ZIP files and extract flow data
    +    if zipfile.is_zipfile(io.BytesIO(contents)):
    +        try:
    +            flows_data = await extract_flows_from_zip(contents)
    +        except ValueError as e:
    +            raise HTTPException(status_code=400, detail=str(e)) from e
    +        if not flows_data:
    +            raise HTTPException(status_code=400, detail="No valid flow JSON files found in the ZIP")
    +
    +        # Use the uploaded filename (without extension) as the project name
    +        project_name_base = file.filename.rsplit(".", 1)[0] if file.filename else "Imported Project"
    +        project_name_base = project_name_base or "Imported Project"
    +        data: dict = {
    +            "folder_name": project_name_base,
    +            "folder_description": "",
    +            "flows": flows_data,
    +        }
    +    else:
    +        try:
    +            data = orjson.loads(contents)
    +        except orjson.JSONDecodeError as e:
    +            raise HTTPException(status_code=400, detail=f"Invalid JSON file: {e}") from e
    +
    +    if not data:
    +        raise HTTPException(status_code=400, detail="No flows found in the file")
    +
    +    # Validate that the uploaded JSON has the required structure before accessing keys
    +    if not isinstance(data, dict):
    +        raise HTTPException(
    +            status_code=400,
    +            detail="Invalid project data: expected a JSON object with 'folder_name' and 'flows' fields",
    +        )
    +
    +    missing_keys = [key for key in ("folder_name", "flows") if key not in data]
    +    if missing_keys:
    +        raise HTTPException(
    +            status_code=400,
    +            detail=f"Missing required field(s): {', '.join(missing_keys)}",
    +        )
    +    project_name = await generate_unique_folder_name(data["folder_name"], current_user.id, session)
    +
    +    data["folder_name"] = project_name
    +
    +    project = FolderCreate(name=data["folder_name"], description=data.get("folder_description", ""))
    +
    +    new_project = Folder.model_validate(project, from_attributes=True)
    +    new_project.id = None
    +    new_project.user_id = current_user.id
    +
    +    settings_service = get_settings_service()
    +
    +    # If AUTO_LOGIN is false, automatically enable API key authentication
    +    if not settings_service.auth_settings.AUTO_LOGIN and not new_project.auth_settings:
    +        default_auth = {"auth_type": "apikey"}
    +        new_project.auth_settings = encrypt_auth_settings(default_auth)
    +        await logger.adebug(
    +            "Auto-enabled API key authentication for uploaded project %s (%s) due to AUTO_LOGIN=false",
    +            new_project.name,
    +            new_project.id,
    +        )
    +
    +    session.add(new_project)
    +    await session.flush()
    +    await session.refresh(new_project)
    +    del data["folder_name"]
    +    data.pop("folder_description", None)
    +
    +    if "flows" in data:
    +        # Normalise code fields: if exported with code-as-lines format, rejoin to
    +        # strings before creating Pydantic models so the DB always stores strings.
    +        flow_list = FlowListCreate(flows=[FlowCreate(**normalize_code_for_import(flow)) for flow in data["flows"]])
    +    else:
    +        raise HTTPException(status_code=400, detail="No flows found in the data")
    +    # Generate unique names, tracking names already assigned within this batch
    +    # to avoid collisions when multiple flows would get the same generated name
    +    used_names_in_batch: set[str] = set()
    +    for flow in flow_list.flows:
    +        flow_name = await generate_unique_flow_name(flow.name, current_user.id, session)
    +        # Ensure the name is also unique within the current batch;
    +        # generate suffixed candidates and verify each against DB
    +        base_name = flow_name
    +        n = 1
    +        while flow_name in used_names_in_batch:
    +            candidate = f"{base_name} ({n})"
    +            n += 1
    +            flow_name = await generate_unique_flow_name(candidate, current_user.id, session)
    +        used_names_in_batch.add(flow_name)
    +        flow.name = flow_name
    +        flow.user_id = current_user.id
    +        flow.folder_id = new_project.id
    +
    +    return await create_flows(session=session, flow_list=flow_list, current_user=current_user)
    
  • src/backend/base/langflow/api/v1/projects_mcp_helpers.py+246 0 added
    @@ -0,0 +1,246 @@
    +"""MCP server registration and management helpers for projects.
    +
    +Extracted from projects.py to reduce file size and isolate MCP concerns (SO1).
    +"""
    +
    +from typing import cast
    +from uuid import UUID
    +
    +from fastapi import HTTPException
    +from lfx.log.logger import logger
    +from lfx.services.mcp_composer.service import MCPComposerService
    +
    +from langflow.api.utils.mcp.config_utils import validate_mcp_server_for_project
    +from langflow.api.v1.mcp_projects import get_project_streamable_http_url
    +from langflow.api.v2.mcp import update_server
    +from langflow.services.database.models.api_key.crud import create_api_key
    +from langflow.services.database.models.api_key.model import ApiKeyCreate
    +from langflow.services.deps import get_service, get_settings_service, get_storage_service
    +from langflow.services.schema import ServiceType
    +
    +
    +async def register_mcp_servers_for_project(
    +    project,
    +    default_auth: dict,
    +    current_user,
    +    session,
    +) -> None:
    +    """Register MCP servers for a newly created project.
    +
    +    This handles the full MCP auto-registration flow: building the transport URL,
    +    creating API keys if needed, validating conflicts, and calling update_server.
    +
    +    Raises HTTPException on conflicts or unsupported auth types.
    +    """
    +    try:
    +        streamable_http_url = await get_project_streamable_http_url(project.id)
    +
    +        if default_auth.get("auth_type", "none") == "apikey":
    +            api_key_name = f"MCP Project {project.name} - default"
    +            unmasked_api_key = await create_api_key(session, ApiKeyCreate(name=api_key_name), current_user.id)
    +            command = "uvx"
    +            args = [
    +                "mcp-proxy",
    +                "--transport",
    +                "streamablehttp",
    +                "--headers",
    +                "x-api-key",
    +                unmasked_api_key.api_key,
    +                streamable_http_url,
    +            ]
    +        elif default_auth.get("auth_type", "none") == "oauth":
    +            msg = "OAuth authentication is not yet implemented for MCP server creation during project creation."
    +            await logger.awarning(msg)
    +            return
    +        else:
    +            command = "uvx"
    +            args = [
    +                "mcp-proxy",
    +                "--transport",
    +                "streamablehttp",
    +                streamable_http_url,
    +            ]
    +
    +        server_config = {"command": command, "args": args}
    +
    +        validation_result = await validate_mcp_server_for_project(
    +            project.id,
    +            project.name,
    +            current_user,
    +            session,
    +            get_storage_service(),
    +            get_settings_service(),
    +            operation="create",
    +        )
    +
    +        if validation_result.has_conflict:
    +            await logger.aerror(validation_result.conflict_message)
    +            raise HTTPException(
    +                status_code=409,
    +                detail=validation_result.conflict_message,
    +            )
    +
    +        if validation_result.should_skip:
    +            await logger.adebug(
    +                "MCP server '%s' already exists for project %s, updating",
    +                validation_result.server_name,
    +                project.id,
    +            )
    +
    +        server_name = validation_result.server_name
    +
    +        await update_server(
    +            server_name,
    +            server_config,
    +            current_user,
    +            session,
    +            get_storage_service(),
    +            get_settings_service(),
    +        )
    +    except HTTPException:
    +        raise
    +    except Exception as e:  # noqa: BLE001
    +        await logger.aexception("Failed to auto-register MCP server for project %s: %s", project.id, e)
    +
    +
    +async def handle_mcp_server_rename(
    +    existing_project,
    +    old_project_name: str,
    +    new_project_name: str,
    +    current_user,
    +    session,
    +) -> None:
    +    """Handle MCP server name update when a project is renamed.
    +
    +    Validates old and new server names, checks for conflicts, and performs
    +    the rename (delete old + create new) if needed.
    +
    +    Raises HTTPException on name conflicts.
    +    """
    +    try:
    +        old_validation = await validate_mcp_server_for_project(
    +            existing_project.id,
    +            old_project_name,
    +            current_user,
    +            session,
    +            get_storage_service(),
    +            get_settings_service(),
    +            operation="update",
    +        )
    +
    +        new_validation = await validate_mcp_server_for_project(
    +            existing_project.id,
    +            new_project_name,
    +            current_user,
    +            session,
    +            get_storage_service(),
    +            get_settings_service(),
    +            operation="update",
    +        )
    +
    +        if old_validation.server_name != new_validation.server_name:
    +            if new_validation.has_conflict:
    +                await logger.aerror(new_validation.conflict_message)
    +                raise HTTPException(
    +                    status_code=409,
    +                    detail=new_validation.conflict_message,
    +                )
    +
    +            if old_validation.server_exists and old_validation.project_id_matches:
    +                await update_server(
    +                    old_validation.server_name,
    +                    {},
    +                    current_user,
    +                    session,
    +                    get_storage_service(),
    +                    get_settings_service(),
    +                    delete=True,
    +                )
    +
    +                await update_server(
    +                    new_validation.server_name,
    +                    old_validation.existing_config or {},
    +                    current_user,
    +                    session,
    +                    get_storage_service(),
    +                    get_settings_service(),
    +                )
    +
    +                await logger.adebug(
    +                    "Updated MCP server name from %s to %s",
    +                    old_validation.server_name,
    +                    new_validation.server_name,
    +                )
    +            else:
    +                await logger.adebug(
    +                    "Old MCP server '%s' not found for this project, skipping rename",
    +                    old_validation.server_name,
    +                )
    +
    +    except HTTPException:
    +        raise
    +    except Exception as e:  # noqa: BLE001
    +        await logger.awarning("Failed to handle MCP server name update for project rename: %s", e)
    +
    +
    +async def cleanup_mcp_on_delete(
    +    project,
    +    project_id: UUID,
    +    current_user,
    +    session,
    +) -> None:
    +    """Clean up MCP resources when a project is deleted.
    +
    +    Stops the MCP Composer if the project uses OAuth, and removes the
    +    corresponding MCP server entry if auto-add was enabled.
    +    """
    +    # Stop MCP Composer if project used OAuth
    +    if project.auth_settings and project.auth_settings.get("auth_type") == "oauth":
    +        try:
    +            mcp_composer_service: MCPComposerService = cast(
    +                MCPComposerService, get_service(ServiceType.MCP_COMPOSER_SERVICE)
    +            )
    +            await mcp_composer_service.stop_project_composer(str(project_id))
    +            await logger.adebug("Stopped MCP Composer for deleted OAuth project %s (%s)", project.name, project_id)
    +        except Exception as e:  # noqa: BLE001
    +            await logger.aerror("Failed to stop MCP Composer for deleted project %s: %s", project_id, e)
    +
    +    # Delete corresponding MCP server if auto-add was enabled
    +    if get_settings_service().settings.add_projects_to_mcp_servers:
    +        try:
    +            validation_result = await validate_mcp_server_for_project(
    +                project_id,
    +                project.name,
    +                current_user,
    +                session,
    +                get_storage_service(),
    +                get_settings_service(),
    +                operation="delete",
    +            )
    +
    +            if validation_result.server_exists and validation_result.project_id_matches:
    +                await update_server(
    +                    validation_result.server_name,
    +                    {},
    +                    current_user,
    +                    session,
    +                    get_storage_service(),
    +                    get_settings_service(),
    +                    delete=True,
    +                )
    +                await logger.adebug(
    +                    "Deleted MCP server %s for deleted project %s (%s)",
    +                    validation_result.server_name,
    +                    project.name,
    +                    project_id,
    +                )
    +            elif validation_result.server_exists and not validation_result.project_id_matches:
    +                await logger.adebug(
    +                    "MCP server '%s' exists but belongs to different project, skipping deletion",
    +                    validation_result.server_name,
    +                )
    +            else:
    +                await logger.adebug("No MCP server found for deleted project %s (%s)", project.name, project_id)
    +
    +        except Exception as e:  # noqa: BLE001
    +            await logger.awarning("Failed to handle MCP server cleanup for deleted project %s: %s", project_id, e)
    
  • src/backend/base/langflow/api/v1/projects.py+57 388 modified
    @@ -1,15 +1,8 @@
    -import io
    -import json
    -import zipfile
    -from datetime import datetime, timezone
    +import warnings
     from typing import Annotated, cast
    -from urllib.parse import quote
     from uuid import UUID
     
    -import orjson
     from fastapi import APIRouter, BackgroundTasks, Depends, File, HTTPException, Query, Response, UploadFile, status
    -from fastapi.encoders import jsonable_encoder
    -from fastapi.responses import StreamingResponse
     from fastapi_pagination import Params
     from fastapi_pagination.ext.sqlmodel import apaginate
     from lfx.log.logger import logger
    @@ -18,25 +11,23 @@
     from sqlalchemy.orm import selectinload
     from sqlmodel import select
     
    -from langflow.api.utils import CurrentActiveUser, DbSession, cascade_delete_flow, custom_params, remove_api_keys
    -from langflow.api.utils.mcp.config_utils import validate_mcp_server_for_project
    -from langflow.api.utils.zip_utils import extract_flows_from_zip
    +from langflow.api.utils import (
    +    CurrentActiveUser,
    +    DbSession,
    +    cascade_delete_flow,
    +    custom_params,
    +)
     from langflow.api.v1.auth_helpers import handle_auth_settings_update
    -from langflow.api.v1.flows import create_flows
    -from langflow.api.v1.mcp_projects import (
    -    get_project_sse_url,  # noqa: F401
    -    get_project_streamable_http_url,
    -    register_project_with_composer,
    +from langflow.api.v1.mcp_projects import register_project_with_composer
    +from langflow.api.v1.projects_files import download_project_flows, upload_project_flows
    +from langflow.api.v1.projects_mcp_helpers import (
    +    cleanup_mcp_on_delete,
    +    handle_mcp_server_rename,
    +    register_mcp_servers_for_project,
     )
    -from langflow.api.v1.schemas import FlowListCreate
    -from langflow.api.v2.mcp import update_server
    -from langflow.helpers.flow import generate_unique_flow_name
    -from langflow.helpers.folders import generate_unique_folder_name
     from langflow.initial_setup.constants import ASSISTANT_FOLDER_NAME, STARTER_FOLDER_NAME
     from langflow.services.auth.mcp_encryption import encrypt_auth_settings
    -from langflow.services.database.models.api_key.crud import create_api_key
    -from langflow.services.database.models.api_key.model import ApiKeyCreate
    -from langflow.services.database.models.flow.model import Flow, FlowCreate, FlowRead
    +from langflow.services.database.models.flow.model import Flow, FlowRead
     from langflow.services.database.models.folder.constants import DEFAULT_FOLDER_NAME
     from langflow.services.database.models.folder.model import (
         Folder,
    @@ -46,12 +37,17 @@
         FolderUpdate,
     )
     from langflow.services.database.models.folder.pagination_model import FolderWithPaginatedFlows
    -from langflow.services.deps import get_service, get_settings_service, get_storage_service
    +from langflow.services.deps import get_service, get_settings_service
     from langflow.services.schema import ServiceType
     
     router = APIRouter(prefix="/projects", tags=["Projects"])
     
     
    +def _escape_like(value: str) -> str:
    +    """Escape LIKE wildcards and the escape character itself."""
    +    return value.replace("\\", "\\\\").replace("%", r"\%").replace("_", r"\_")
    +
    +
     @router.post("/", response_model=FolderRead, status_code=201)
     async def create_project(
         *,
    @@ -72,31 +68,42 @@ async def create_project(
                     statement=select(Folder).where(Folder.name == new_project.name).where(Folder.user_id == current_user.id)
                 )
             ).first():
    +            escaped_project_name = _escape_like(new_project.name)
                 project_results = await session.exec(
                     select(Folder).where(
    -                    Folder.name.like(f"{new_project.name}%"),  # type: ignore[attr-defined]
    +                    Folder.name.like(f"{escaped_project_name}%", escape="\\"),  # type: ignore[attr-defined]
                         Folder.user_id == current_user.id,
                     )
                 )
                 if project_results:
                     project_names = [project.name for project in project_results]
    -                # TODO: this throws an error if the name contains non-numeric content in parentheses
    -                project_numbers = [int(name.split("(")[-1].split(")")[0]) for name in project_names if "(" in name]
    +                project_numbers = []
    +                for name in project_names:
    +                    if "(" not in name:
    +                        continue
    +                    try:
    +                        project_numbers.append(int(name.split("(")[-1].split(")")[0]))
    +                    except ValueError:
    +                        continue
                     if project_numbers:
                         new_project.name = f"{new_project.name} ({max(project_numbers) + 1})"
                     else:
                         new_project.name = f"{new_project.name} (1)"
     
             settings_service = get_settings_service()
    +        mcp_auth: dict = {"auth_type": "none"}
     
    +        if project.auth_settings:
    +            mcp_auth = project.auth_settings.copy()
    +            new_project.auth_settings = encrypt_auth_settings(mcp_auth)
             # If AUTO_LOGIN is false, automatically enable API key authentication
    -        default_auth = {"auth_type": "none"}
    -        if not settings_service.auth_settings.AUTO_LOGIN and not new_project.auth_settings:
    -            default_auth = {"auth_type": "apikey"}
    -            new_project.auth_settings = encrypt_auth_settings(default_auth)
    +        elif not settings_service.auth_settings.AUTO_LOGIN:
    +            mcp_auth = {"auth_type": "apikey"}
    +            new_project.auth_settings = encrypt_auth_settings(mcp_auth)
                 await logger.adebug(
    -                f"Auto-enabled API key authentication for project {new_project.name} "
    -                f"({new_project.id}) due to AUTO_LOGIN=false"
    +                "Auto-enabled API key authentication for project %s (%s) due to AUTO_LOGIN=false",
    +                new_project.name,
    +                new_project.id,
                 )
     
             session.add(new_project)
    @@ -105,91 +112,7 @@ async def create_project(
     
             # Auto-register MCP server for this project with configured default auth
             if get_settings_service().settings.add_projects_to_mcp_servers:
    -            try:
    -                # Build Streamable HTTP URL (preferred transport) and legacy SSE URL (for docs/errors)
    -                streamable_http_url = await get_project_streamable_http_url(new_project.id)
    -                # legacy SSE URL
    -                # sse_url = await get_project_sse_url(new_project.id)
    -
    -                # Prepare server config based on auth type same as new project
    -                if default_auth.get("auth_type", "none") == "apikey":
    -                    # Create API key for API key authentication
    -                    api_key_name = f"MCP Project {new_project.name} - default"
    -                    unmasked_api_key = await create_api_key(session, ApiKeyCreate(name=api_key_name), current_user.id)
    -                    # Starting v>=1.7.1, we use Streamable HTTP transport by default
    -                    command = "uvx"
    -                    args = [
    -                        "mcp-proxy",
    -                        "--transport",
    -                        "streamablehttp",
    -                        "--headers",
    -                        "x-api-key",
    -                        unmasked_api_key.api_key,
    -                        streamable_http_url,
    -                    ]
    -                elif default_auth.get("auth_type", "none") == "oauth":
    -                    msg = "OAuth authentication is not yet implemented for MCP server creation during project creation."
    -                    logger.warning(msg)
    -                    raise HTTPException(status_code=501, detail=msg)
    -                else:  # default_auth_type == "none"
    -                    # No authentication - direct connection
    -                    command = "uvx"
    -                    args = [
    -                        "mcp-proxy",
    -                        "--transport",
    -                        "streamablehttp",
    -                        streamable_http_url,
    -                    ]
    -
    -                server_config = {"command": command, "args": args}
    -
    -                # Validate MCP server for this project
    -                validation_result = await validate_mcp_server_for_project(
    -                    new_project.id,
    -                    new_project.name,
    -                    current_user,
    -                    session,
    -                    get_storage_service(),
    -                    get_settings_service(),
    -                    operation="create",
    -                )
    -
    -                # Handle conflicts
    -                if validation_result.has_conflict:
    -                    await logger.aerror(validation_result.conflict_message)
    -                    raise HTTPException(
    -                        status_code=409,  # Conflict - semantically correct for name conflicts
    -                        detail=validation_result.conflict_message,
    -                    )
    -
    -                # Log if updating existing server
    -                if validation_result.should_skip:
    -                    msg = (
    -                        f"MCP server '{validation_result.server_name}' "
    -                        f"already exists for project {new_project.id}, updating"
    -                    )
    -                    await logger.adebug(msg)
    -
    -                server_name = validation_result.server_name
    -
    -                await update_server(
    -                    server_name,
    -                    server_config,
    -                    current_user,
    -                    session,
    -                    get_storage_service(),
    -                    get_settings_service(),
    -                )
    -            except HTTPException:
    -                # Re-raise HTTP validation errors (conflicts, etc.)
    -                raise
    -            except NotImplementedError:
    -                msg = "OAuth as default MCP authentication type is not yet implemented"
    -                await logger.aerror(msg)
    -                raise
    -            except Exception as e:  # noqa: BLE001
    -                msg = f"Failed to auto-register MCP server for project {new_project.id}: {e}"
    -                await logger.aexception(msg, exc_info=True)
    +            await register_mcp_servers_for_project(new_project, mcp_auth, current_user, session)
     
             if project.components_list:
                 update_statement_components = (
    @@ -278,9 +201,8 @@ async def read_project(
                 if is_flow:
                     stmt = stmt.where(Flow.is_component == False)  # noqa: E712
                 if search:
    -                stmt = stmt.where(Flow.name.like(f"%{search}%"))  # type: ignore[attr-defined]
    -
    -            import warnings
    +                _search = _escape_like(search)
    +                stmt = stmt.where(Flow.name.like(f"%{_search}%", escape="\\"))  # type: ignore[attr-defined]
     
                 with warnings.catch_warnings():
                     warnings.filterwarnings(
    @@ -343,88 +265,13 @@ async def update_project(
                 should_start_mcp_composer = auth_result["should_start_composer"]
                 should_stop_mcp_composer = auth_result["should_stop_composer"]
     
    -        # Handle other updates
    +        # Handle project rename and corresponding MCP server rename
             if project.name and project.name != existing_project.name:
                 old_project_name = existing_project.name
                 existing_project.name = project.name
     
    -            # Update corresponding MCP server name if auto-add is enabled
                 if get_settings_service().settings.add_projects_to_mcp_servers:
    -                try:
    -                    # Validate old server (for this specific project)
    -                    old_validation = await validate_mcp_server_for_project(
    -                        existing_project.id,
    -                        old_project_name,
    -                        current_user,
    -                        session,
    -                        get_storage_service(),
    -                        get_settings_service(),
    -                        operation="update",
    -                    )
    -
    -                    # Validate new server name (check for conflicts)
    -                    new_validation = await validate_mcp_server_for_project(
    -                        existing_project.id,
    -                        project.name,
    -                        current_user,
    -                        session,
    -                        get_storage_service(),
    -                        get_settings_service(),
    -                        operation="update",
    -                    )
    -
    -                    # Only proceed if server names would be different
    -                    if old_validation.server_name != new_validation.server_name:
    -                        # Check if new server name would conflict with different project
    -                        if new_validation.has_conflict:
    -                            await logger.aerror(new_validation.conflict_message)
    -                            raise HTTPException(
    -                                status_code=409,  # Conflict - semantically correct for name conflicts
    -                                detail=new_validation.conflict_message,
    -                            )
    -
    -                        # If old server exists and matches this project, proceed with rename
    -                        if old_validation.server_exists and old_validation.project_id_matches:
    -                            # Remove the old server
    -                            await update_server(
    -                                old_validation.server_name,
    -                                {},  # Empty config for deletion
    -                                current_user,
    -                                session,
    -                                get_storage_service(),
    -                                get_settings_service(),
    -                                delete=True,
    -                            )
    -
    -                            # Add server with new name and existing config
    -                            await update_server(
    -                                new_validation.server_name,
    -                                old_validation.existing_config or {},
    -                                current_user,
    -                                session,
    -                                get_storage_service(),
    -                                get_settings_service(),
    -                            )
    -
    -                            msg = (
    -                                f"Updated MCP server name from {old_validation.server_name} "
    -                                f"to {new_validation.server_name}"
    -                            )
    -                            await logger.adebug(msg)
    -                        else:
    -                            msg = (
    -                                f"Old MCP server '{old_validation.server_name}' "
    -                                "not found for this project, skipping rename"
    -                            )
    -                            await logger.adebug(msg)
    -
    -                except HTTPException:
    -                    # Re-raise HTTP validation errors (conflicts, etc.)
    -                    raise
    -                except Exception as e:  # noqa: BLE001
    -                    # Log but don't fail the project update if MCP server handling fails
    -                    msg = f"Failed to handle MCP server name update for project rename: {e}"
    -                    await logger.awarning(msg)
    +                await handle_mcp_server_rename(existing_project, old_project_name, project.name, current_user, session)
     
             if project.description is not None:
                 existing_project.description = project.description
    @@ -438,22 +285,21 @@ async def update_project(
     
             # Start MCP Composer if auth changed to OAuth
             if should_start_mcp_composer:
    -            msg = (
    -                f"Auth settings changed to OAuth for project {existing_project.name} ({existing_project.id}), "
    -                "starting MCP Composer"
    +            await logger.adebug(
    +                "Auth settings changed to OAuth for project %s (%s), starting MCP Composer",
    +                existing_project.name,
    +                existing_project.id,
                 )
    -            await logger.adebug(msg)
                 background_tasks.add_task(register_project_with_composer, existing_project)
     
             # Stop MCP Composer if auth changed FROM OAuth to something else
             elif should_stop_mcp_composer:
    -            msg = (
    -                f"Auth settings changed from OAuth for project {existing_project.name} ({existing_project.id}), "
    -                "stopping MCP Composer"
    +            await logger.ainfo(
    +                "Auth settings changed from OAuth for project %s (%s), stopping MCP Composer",
    +                existing_project.name,
    +                existing_project.id,
                 )
    -            await logger.ainfo(msg)
     
    -            # Get the MCP Composer service and stop the project's composer
                 mcp_composer_service: MCPComposerService = cast(
                     MCPComposerService, get_service(ServiceType.MCP_COMPOSER_SERVICE)
                 )
    @@ -517,67 +363,13 @@ async def delete_project(
         # Prevent deletion of the Langflow Assistant folder
         if project.name == ASSISTANT_FOLDER_NAME:
             msg = f"Cannot delete the '{ASSISTANT_FOLDER_NAME}' folder, that contains pre-built flows."
    -        await logger.adebug(msg)
    +        await logger.adebug("Cannot delete the '%s' folder, that contains pre-built flows.", ASSISTANT_FOLDER_NAME)
             raise HTTPException(
                 status_code=403,
                 detail=msg,
             )
     
    -    # Check if project has OAuth authentication and stop MCP Composer if needed
    -    if project.auth_settings and project.auth_settings.get("auth_type") == "oauth":
    -        try:
    -            mcp_composer_service: MCPComposerService = cast(
    -                MCPComposerService, get_service(ServiceType.MCP_COMPOSER_SERVICE)
    -            )
    -            await mcp_composer_service.stop_project_composer(str(project_id))
    -            await logger.adebug(f"Stopped MCP Composer for deleted OAuth project {project.name} ({project_id})")
    -        except Exception as e:  # noqa: BLE001
    -            # Log but don't fail the deletion if MCP Composer cleanup fails
    -            await logger.aerror(f"Failed to stop MCP Composer for deleted project {project_id}: {e}")
    -
    -    # Delete corresponding MCP server if auto-add was enabled
    -    if get_settings_service().settings.add_projects_to_mcp_servers:
    -        try:
    -            # Validate MCP server for this specific project
    -            validation_result = await validate_mcp_server_for_project(
    -                project_id,
    -                project.name,
    -                current_user,
    -                session,
    -                get_storage_service(),
    -                get_settings_service(),
    -                operation="delete",
    -            )
    -
    -            # Only delete if server exists and matches this project ID
    -            if validation_result.server_exists and validation_result.project_id_matches:
    -                await update_server(
    -                    validation_result.server_name,
    -                    {},  # Empty config for deletion
    -                    current_user,
    -                    session,
    -                    get_storage_service(),
    -                    get_settings_service(),
    -                    delete=True,
    -                )
    -                msg = (
    -                    f"Deleted MCP server {validation_result.server_name} for "
    -                    f"deleted project {project.name} ({project_id})"
    -                )
    -                await logger.adebug(msg)
    -            elif validation_result.server_exists and not validation_result.project_id_matches:
    -                msg = (
    -                    f"MCP server '{validation_result.server_name}' exists but belongs to different project, "
    -                    "skipping deletion"
    -                )
    -                await logger.adebug(msg)
    -            else:
    -                msg = f"No MCP server found for deleted project {project.name} ({project_id})"
    -                await logger.adebug(msg)
    -
    -        except Exception as e:  # noqa: BLE001
    -            # Log but don't fail the project deletion if MCP server handling fails
    -            await logger.awarning(f"Failed to handle MCP server cleanup for deleted project {project_id}: {e}")
    +    await cleanup_mcp_on_delete(project, project_id, current_user, session)
     
         try:
             await session.delete(project)
    @@ -594,47 +386,7 @@ async def download_file(
         current_user: CurrentActiveUser,
     ):
         """Download all flows from project as a zip file."""
    -    try:
    -        query = select(Folder).where(Folder.id == project_id, Folder.user_id == current_user.id)
    -        result = await session.exec(query)
    -        project = result.first()
    -
    -        if not project:
    -            raise HTTPException(status_code=404, detail="Project not found")
    -
    -        flows_query = select(Flow).where(Flow.folder_id == project_id)
    -        flows_result = await session.exec(flows_query)
    -        flows = [FlowRead.model_validate(flow, from_attributes=True) for flow in flows_result.all()]
    -
    -        if not flows:
    -            raise HTTPException(status_code=404, detail="No flows found in project")
    -
    -        flows_without_api_keys = [remove_api_keys(flow.model_dump()) for flow in flows]
    -        zip_stream = io.BytesIO()
    -
    -        with zipfile.ZipFile(zip_stream, "w") as zip_file:
    -            for flow in flows_without_api_keys:
    -                flow_json = json.dumps(jsonable_encoder(flow))
    -                zip_file.writestr(f"{flow['name']}.json", flow_json.encode("utf-8"))
    -
    -        zip_stream.seek(0)
    -
    -        current_time = datetime.now(tz=timezone.utc).astimezone().strftime("%Y%m%d_%H%M%S")
    -        filename = f"{current_time}_{project.name}_flows.zip"
    -
    -        # URL encode filename handle non-ASCII (ex. Cyrillic)
    -        encoded_filename = quote(filename)
    -
    -        return StreamingResponse(
    -            zip_stream,
    -            media_type="application/x-zip-compressed",
    -            headers={"Content-Disposition": f"attachment; filename*=UTF-8''{encoded_filename}"},
    -        )
    -
    -    except Exception as e:
    -        if "No result found" in str(e):
    -            raise HTTPException(status_code=404, detail="Project not found") from e
    -        raise HTTPException(status_code=500, detail=str(e)) from e
    +    return await download_project_flows(session=session, project_id=project_id, current_user=current_user)
     
     
     @router.post("/upload/", response_model=list[FlowRead], status_code=201)
    @@ -649,87 +401,4 @@ async def upload_file(
         Accepts either a JSON file with project metadata (folder_name, folder_description, flows)
         or a ZIP file containing individual flow JSON files (as produced by the download endpoint).
         """
    -    if file is None:
    -        raise HTTPException(status_code=400, detail="No file provided")
    -
    -    contents = await file.read()
    -
    -    if not contents:
    -        raise HTTPException(status_code=400, detail="The uploaded file is empty")
    -
    -    # Detect ZIP files and extract flow data
    -    if zipfile.is_zipfile(io.BytesIO(contents)):
    -        try:
    -            flows_data = await extract_flows_from_zip(contents)
    -        except ValueError as e:
    -            raise HTTPException(status_code=400, detail=str(e)) from e
    -        if not flows_data:
    -            raise HTTPException(status_code=400, detail="No valid flow JSON files found in the ZIP")
    -
    -        # Use the uploaded filename (without extension) as the project name
    -        project_name_base = file.filename.rsplit(".", 1)[0] if file.filename else "Imported Project"
    -        project_name_base = project_name_base or "Imported Project"
    -        data: dict = {
    -            "folder_name": project_name_base,
    -            "folder_description": "",
    -            "flows": flows_data,
    -        }
    -    else:
    -        try:
    -            data = orjson.loads(contents)
    -        except orjson.JSONDecodeError as e:
    -            raise HTTPException(status_code=400, detail=f"Invalid JSON file: {e}") from e
    -
    -    if not data:
    -        raise HTTPException(status_code=400, detail="No flows found in the file")
    -
    -    project_name = await generate_unique_folder_name(data["folder_name"], current_user.id, session)
    -
    -    data["folder_name"] = project_name
    -
    -    project = FolderCreate(name=data["folder_name"], description=data.get("folder_description", ""))
    -
    -    new_project = Folder.model_validate(project, from_attributes=True)
    -    new_project.id = None
    -    new_project.user_id = current_user.id
    -
    -    settings_service = get_settings_service()
    -
    -    # If AUTO_LOGIN is false, automatically enable API key authentication
    -    if not settings_service.auth_settings.AUTO_LOGIN and not new_project.auth_settings:
    -        default_auth = {"auth_type": "apikey"}
    -        new_project.auth_settings = encrypt_auth_settings(default_auth)
    -        await logger.adebug(
    -            f"Auto-enabled API key authentication for uploaded project {new_project.name} "
    -            f"({new_project.id}) due to AUTO_LOGIN=false"
    -        )
    -
    -    session.add(new_project)
    -    await session.flush()
    -    await session.refresh(new_project)
    -    del data["folder_name"]
    -    data.pop("folder_description", None)
    -
    -    if "flows" in data:
    -        flow_list = FlowListCreate(flows=[FlowCreate(**flow) for flow in data["flows"]])
    -    else:
    -        raise HTTPException(status_code=400, detail="No flows found in the data")
    -    # Generate unique names, tracking names already assigned within this batch
    -    # to avoid collisions when multiple flows would get the same generated name
    -    used_names_in_batch: set[str] = set()
    -    for flow in flow_list.flows:
    -        flow_name = await generate_unique_flow_name(flow.name, current_user.id, session)
    -        # Ensure the name is also unique within the current batch;
    -        # generate suffixed candidates and verify each against DB
    -        base_name = flow_name
    -        n = 1
    -        while flow_name in used_names_in_batch:
    -            candidate = f"{base_name} ({n})"
    -            n += 1
    -            flow_name = await generate_unique_flow_name(candidate, current_user.id, session)
    -        used_names_in_batch.add(flow_name)
    -        flow.name = flow_name
    -        flow.user_id = current_user.id
    -        flow.folder_id = new_project.id
    -
    -    return await create_flows(session=session, flow_list=flow_list, current_user=current_user)
    +    return await upload_project_flows(session=session, file=file, current_user=current_user)
    
  • src/backend/base/langflow/initial_setup/setup.py+2 2 modified
    @@ -719,7 +719,7 @@ def create_new_project(
             gradient=project_gradient,
             tags=project_tags,
         )
    -    db_flow = Flow.model_validate(new_project, from_attributes=True)
    +    db_flow = Flow.model_validate(new_project.model_dump(exclude={"id"}))
         session.add(db_flow)
     
     
    @@ -895,7 +895,7 @@ async def create_or_update_agentic_flows(session: AsyncSession, user_id: UUID) -
                             tags=flow_tags,
                             endpoint_name=flow_endpoint_name,  # Set endpoint_name from JSON
                         )
    -                    db_flow = Flow.model_validate(new_project, from_attributes=True)
    +                    db_flow = Flow.model_validate(new_project.model_dump(exclude={"id"}))
     
                         # Set the ID from JSON if provided
                         if flow_id:
    
  • src/backend/base/langflow/services/database/models/flow/model.py+26 28 modified
    @@ -8,7 +8,6 @@
     
     import emoji
     from emoji import purely_emoji
    -from fastapi import HTTPException, status
     from lfx.log.logger import logger
     from pydantic import BaseModel, ValidationInfo, field_serializer, field_validator
     from sqlalchemy import Enum as SQLEnum
    @@ -23,6 +22,24 @@
     
     HEX_COLOR_LENGTH = 7
     
    +_ENDPOINT_NAME_RE = re.compile(r"^[a-zA-Z0-9_-]+$")
    +
    +
    +def _validate_endpoint_name_value(v: str | None) -> str | None:
    +    """Validate that an endpoint name contains only safe URL characters.
    +
    +    Raises ``ValueError`` on invalid input — callers at the HTTP layer
    +    should catch and translate to an appropriate HTTP response.
    +    """
    +    if v is not None:
    +        if not isinstance(v, str):
    +            msg = "Endpoint name must be a string"
    +            raise ValueError(msg)
    +        if not _ENDPOINT_NAME_RE.match(v):
    +            msg = "Endpoint name must contain only letters, numbers, hyphens, and underscores"
    +            raise ValueError(msg)
    +    return v
    +
     
     class AccessTypeEnum(str, Enum):
         PRIVATE = "PRIVATE"
    @@ -70,19 +87,7 @@ class FlowBase(SQLModel):
         @field_validator("endpoint_name")
         @classmethod
         def validate_endpoint_name(cls, v):
    -        # Endpoint name must be a string containing only letters, numbers, hyphens, and underscores
    -        if v is not None:
    -            if not isinstance(v, str):
    -                raise HTTPException(
    -                    status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
    -                    detail="Endpoint name must be a string",
    -                )
    -            if not re.match(r"^[a-zA-Z0-9_-]+$", v):
    -                raise HTTPException(
    -                    status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
    -                    detail="Endpoint name must contain only letters, numbers, hyphens, and underscores",
    -                )
    -        return v
    +        return _validate_endpoint_name_value(v)
     
         @field_validator("icon_bg_color")
         @classmethod
    @@ -122,7 +127,7 @@ def validate_icon_atr(cls, v):
     
             emoji_value = emoji.emojize(v, variant="emoji_type")
             if v == emoji_value:
    -            logger.warning(f"Invalid emoji. {v} is not a valid emoji.")
    +            logger.warning("Invalid emoji. %s is not a valid emoji.", v)
             icon = emoji_value
     
             if purely_emoji(icon):
    @@ -214,6 +219,11 @@ def to_data(self):
     
     
     class FlowCreate(FlowBase):
    +    # Optional stable ID.  When present on upload, the flow is upserted
    +    # (created with that ID, or updated if the ID already belongs to the
    +    # current user).  Flows without an id get a generated UUID — backward
    +    # compatible with all existing import paths.
    +    id: UUID | None = None
         user_id: UUID | None = None
         folder_id: UUID | None = None
         fs_path: str | None = None
    @@ -269,16 +279,4 @@ class FlowUpdate(SQLModel):
         @field_validator("endpoint_name")
         @classmethod
         def validate_endpoint_name(cls, v):
    -        # Endpoint name must be a string containing only letters, numbers, hyphens, and underscores
    -        if v is not None:
    -            if not isinstance(v, str):
    -                raise HTTPException(
    -                    status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
    -                    detail="Endpoint name must be a string",
    -                )
    -            if not re.match(r"^[a-zA-Z0-9_-]+$", v):
    -                raise HTTPException(
    -                    status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
    -                    detail="Endpoint name must contain only letters, numbers, hyphens, and underscores",
    -                )
    -        return v
    +        return _validate_endpoint_name_value(v)
    
  • src/backend/tests/conftest.py+1 1 modified
    @@ -577,7 +577,7 @@ async def flow(
         loaded_json = json.loads(json_flow)
         flow_data = FlowCreate(name="test_flow", data=loaded_json.get("data"), user_id=active_user.id)
     
    -    flow = Flow.model_validate(flow_data)
    +    flow = Flow.model_validate(flow_data.model_dump(exclude={"id"}))
         async with session_scope() as session:
             session.add(flow)
             await session.flush()
    
  • src/backend/tests/unit/api/utils/test_export_normalisation.py+263 0 added
    @@ -0,0 +1,263 @@
    +"""Unit tests for normalize_flow_for_export / normalize_code_for_import.
    +
    +These tests run entirely in-process — no database or HTTP server required.
    +"""
    +
    +from __future__ import annotations
    +
    +import copy
    +import json
    +
    +from langflow.api.utils.core import normalize_code_for_import, normalize_flow_for_export
    +
    +# ---------------------------------------------------------------------------
    +# Helpers
    +# ---------------------------------------------------------------------------
    +
    +# split("\n") keeps trailing newlines as a final empty string, making the
    +# round-trip lossless.  splitlines() would drop the trailing "\n".
    +_CODE_STRING = "def build(self):\n    return self.input\n"
    +_CODE_LINES = ["def build(self):", "    return self.input", ""]
    +
    +
    +def _make_flow(*, extra_top: dict | None = None, code_value: str | list | None = _CODE_STRING) -> dict:
    +    """Return a minimal flow dict that exercises all normalisation paths."""
    +    flow: dict = {
    +        "id": "17050493-96dd-4dc9-ba42-4bd0075fd23d",
    +        "name": "Test Flow",
    +        "description": "A test flow",
    +        "data": {
    +            "nodes": [
    +                {
    +                    "id": "node-1",
    +                    "position": {"x": 100, "y": 200},
    +                    "positionAbsolute": {"x": 100, "y": 200},
    +                    "dragging": False,
    +                    "selected": True,
    +                    "data": {
    +                        "id": "node-1",
    +                        "type": "PythonFunctionComponent",
    +                        "node": {
    +                            "template": {
    +                                "code": {
    +                                    "type": "code",
    +                                    "value": code_value,
    +                                    "name": "code",
    +                                },
    +                                "api_key": {
    +                                    "type": "str",
    +                                    "value": "sk-secret",
    +                                    "name": "api_key",
    +                                    "password": True,
    +                                },
    +                            }
    +                        },
    +                    },
    +                }
    +            ],
    +            "edges": [],
    +        },
    +        "updated_at": "2024-01-01T12:00:00Z",
    +        "created_at": "2023-06-15T08:00:00Z",
    +        "user_id": "user-uuid-123",
    +        "folder_id": "folder-uuid-456",
    +        "access_type": "PRIVATE",
    +        "gradient": "linear-gradient(to right, #f00, #00f)",
    +        "is_component": False,
    +        "endpoint_name": "test-flow",
    +    }
    +    if extra_top:
    +        flow.update(extra_top)
    +    return flow
    +
    +
    +# ---------------------------------------------------------------------------
    +# normalize_flow_for_export — volatile field stripping
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestNormaliseVolatileFields:
    +    def test_strips_updated_at(self):
    +        result = normalize_flow_for_export(_make_flow())
    +        assert "updated_at" not in result
    +
    +    def test_strips_created_at(self):
    +        result = normalize_flow_for_export(_make_flow())
    +        assert "created_at" not in result
    +
    +    def test_strips_user_id(self):
    +        result = normalize_flow_for_export(_make_flow())
    +        assert "user_id" not in result
    +
    +    def test_strips_folder_id(self):
    +        result = normalize_flow_for_export(_make_flow())
    +        assert "folder_id" not in result
    +
    +    def test_strips_access_type(self):
    +        result = normalize_flow_for_export(_make_flow())
    +        assert "access_type" not in result
    +
    +    def test_strips_gradient(self):
    +        result = normalize_flow_for_export(_make_flow())
    +        assert "gradient" not in result
    +
    +    def test_keeps_stable_fields(self):
    +        result = normalize_flow_for_export(_make_flow())
    +        assert result["id"] == "17050493-96dd-4dc9-ba42-4bd0075fd23d"
    +        assert result["name"] == "Test Flow"
    +        assert result["description"] == "A test flow"
    +        assert result["endpoint_name"] == "test-flow"
    +        assert result["is_component"] is False
    +
    +    def test_missing_volatile_fields_do_not_raise(self):
    +        """Calling on a minimal flow without any volatile fields is safe."""
    +        minimal = {"id": "abc", "name": "Minimal", "data": {"nodes": [], "edges": []}}
    +        result = normalize_flow_for_export(minimal)
    +        assert result["name"] == "Minimal"
    +
    +
    +# ---------------------------------------------------------------------------
    +# normalize_flow_for_export — node UI state stripping
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestNormaliseNodeUiState:
    +    def test_strips_position_absolute(self):
    +        result = normalize_flow_for_export(_make_flow())
    +        node = result["data"]["nodes"][0]
    +        assert "positionAbsolute" not in node
    +
    +    def test_strips_dragging(self):
    +        result = normalize_flow_for_export(_make_flow())
    +        node = result["data"]["nodes"][0]
    +        assert "dragging" not in node
    +
    +    def test_strips_selected(self):
    +        result = normalize_flow_for_export(_make_flow())
    +        node = result["data"]["nodes"][0]
    +        assert "selected" not in node
    +
    +    def test_keeps_position(self):
    +        """Position (canvas coords) is kept — only the derived positionAbsolute is stripped."""
    +        result = normalize_flow_for_export(_make_flow())
    +        node = result["data"]["nodes"][0]
    +        assert node["position"] == {"x": 100, "y": 200}
    +
    +
    +# ---------------------------------------------------------------------------
    +# normalize_flow_for_export — code → list-of-lines
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestNormaliseCodeToLines:
    +    def test_code_field_becomes_list(self):
    +        result = normalize_flow_for_export(_make_flow())
    +        code_field = result["data"]["nodes"][0]["data"]["node"]["template"]["code"]
    +        assert isinstance(code_field["value"], list)
    +
    +    def test_code_lines_content(self):
    +        result = normalize_flow_for_export(_make_flow())
    +        code_field = result["data"]["nodes"][0]["data"]["node"]["template"]["code"]
    +        assert code_field["value"] == _CODE_LINES
    +
    +    def test_non_code_field_untouched(self):
    +        result = normalize_flow_for_export(_make_flow())
    +        api_key_field = result["data"]["nodes"][0]["data"]["node"]["template"]["api_key"]
    +        assert api_key_field["value"] == "sk-secret"  # unchanged
    +
    +    def test_already_list_code_unchanged(self):
    +        """If code is already a list (re-export scenario), it should stay as-is."""
    +        result = normalize_flow_for_export(_make_flow(code_value=_CODE_LINES))
    +        code_field = result["data"]["nodes"][0]["data"]["node"]["template"]["code"]
    +        assert code_field["value"] == _CODE_LINES
    +
    +    def test_empty_code_string(self):
    +        # "".split("\n") == [""], preserving the empty line
    +        result = normalize_flow_for_export(_make_flow(code_value=""))
    +        code_field = result["data"]["nodes"][0]["data"]["node"]["template"]["code"]
    +        assert code_field["value"] == [""]  # split("\n") on "" gives [""]
    +
    +    def test_no_data_nodes_safe(self):
    +        flow = {"id": "x", "name": "Empty", "data": {"nodes": [], "edges": []}}
    +        # Must not raise
    +        normalize_flow_for_export(flow)
    +
    +
    +# ---------------------------------------------------------------------------
    +# normalize_flow_for_export — does not mutate the original
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestNormalisationIsNonMutating:
    +    def test_original_unchanged(self):
    +        original = _make_flow()
    +        original_copy = copy.deepcopy(original)
    +        normalize_flow_for_export(original)
    +        assert original == original_copy
    +
    +
    +# ---------------------------------------------------------------------------
    +# normalize_code_for_import — list → string
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestNormaliseCodeForImport:
    +    def test_rejoins_list_to_string(self):
    +        flow = _make_flow(code_value=_CODE_LINES)
    +        result = normalize_code_for_import(flow)
    +        code_field = result["data"]["nodes"][0]["data"]["node"]["template"]["code"]
    +        assert isinstance(code_field["value"], str)
    +        assert code_field["value"] == "\n".join(_CODE_LINES)
    +
    +    def test_string_format_passthrough(self):
    +        """Legacy string format must survive import normalisation unchanged."""
    +        flow = _make_flow(code_value=_CODE_STRING)
    +        result = normalize_code_for_import(flow)
    +        code_field = result["data"]["nodes"][0]["data"]["node"]["template"]["code"]
    +        assert code_field["value"] == _CODE_STRING
    +
    +    def test_none_code_value_passthrough(self):
    +        flow = _make_flow(code_value=None)
    +        result = normalize_code_for_import(flow)
    +        code_field = result["data"]["nodes"][0]["data"]["node"]["template"]["code"]
    +        assert code_field["value"] is None
    +
    +    def test_original_unchanged(self):
    +        flow = _make_flow(code_value=_CODE_LINES)
    +        original_copy = copy.deepcopy(flow)
    +        normalize_code_for_import(flow)
    +        assert flow == original_copy
    +
    +
    +# ---------------------------------------------------------------------------
    +# Round-trip: export → import restores code strings
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestRoundTrip:
    +    def test_code_round_trips_to_identical_string(self):
    +        original = _make_flow()
    +        exported = normalize_flow_for_export(original)
    +        imported = normalize_code_for_import(exported)
    +        code_orig = original["data"]["nodes"][0]["data"]["node"]["template"]["code"]["value"]
    +        code_trip = imported["data"]["nodes"][0]["data"]["node"]["template"]["code"]["value"]
    +        assert code_trip == code_orig
    +
    +    def test_two_exports_byte_identical(self):
    +        """Consecutive exports of the same flow must produce identical JSON bytes."""
    +        import orjson
    +
    +        flow = _make_flow()
    +        export1 = orjson.dumps(normalize_flow_for_export(flow), option=orjson.OPT_SORT_KEYS | orjson.OPT_INDENT_2)
    +        export2 = orjson.dumps(normalize_flow_for_export(flow), option=orjson.OPT_SORT_KEYS | orjson.OPT_INDENT_2)
    +        assert export1 == export2
    +
    +    def test_sorted_keys_top_level(self):
    +        """All top-level keys must be in lexicographic order after export."""
    +        import orjson
    +
    +        flow = _make_flow()
    +        raw = orjson.dumps(normalize_flow_for_export(flow), option=orjson.OPT_SORT_KEYS | orjson.OPT_INDENT_2).decode()
    +        parsed = json.loads(raw)
    +        top_keys = list(parsed.keys())
    +        assert top_keys == sorted(top_keys)
    
  • src/backend/tests/unit/api/utils/test_preserve_flow_id.py+208 0 added
    @@ -0,0 +1,208 @@
    +"""Unit tests for stable flow-ID preservation on import/upload.
    +
    +Tests cover the FlowCreate schema change and the three upsert branches
    +in the upload endpoint logic.  No database or HTTP server is required
    +for the schema tests; the endpoint-logic tests use unittest.mock to
    +stand in for the async DB session and storage service.
    +"""
    +
    +from __future__ import annotations
    +
    +from unittest.mock import AsyncMock, MagicMock, patch
    +from uuid import UUID, uuid4
    +
    +import pytest
    +from langflow.services.database.models.flow.model import FlowCreate
    +
    +# ---------------------------------------------------------------------------
    +# FlowCreate schema
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestFlowCreateSchema:
    +    def test_id_field_is_optional(self):
    +        """FlowCreate must accept construction without an id."""
    +        flow = FlowCreate(name="No ID Flow")
    +        assert flow.id is None
    +
    +    def test_id_field_accepts_uuid(self):
    +        """FlowCreate must store a provided UUID verbatim."""
    +        stable_id = uuid4()
    +        flow = FlowCreate(name="With ID", id=stable_id)
    +        assert flow.id == stable_id
    +
    +    def test_id_field_accepts_uuid_string(self):
    +        """FlowCreate must coerce a UUID string to a UUID object."""
    +        stable_id = uuid4()
    +        flow = FlowCreate(name="With String ID", id=str(stable_id))
    +        assert flow.id == stable_id
    +
    +    def test_id_from_dict_roundtrip(self):
    +        """Simulates parsing a downloaded flow JSON that contains an id."""
    +        stable_id = uuid4()
    +        raw = {
    +            "id": str(stable_id),
    +            "name": "Exported Flow",
    +            "data": {"nodes": [], "edges": []},
    +        }
    +        flow = FlowCreate(**raw)
    +        assert flow.id == stable_id
    +        assert flow.name == "Exported Flow"
    +
    +    def test_no_id_in_dict_gives_none(self):
    +        """Existing flow dicts without id must still parse cleanly."""
    +        raw = {"name": "Legacy Flow", "data": {"nodes": [], "edges": []}}
    +        flow = FlowCreate(**raw)
    +        assert flow.id is None
    +
    +
    +# ---------------------------------------------------------------------------
    +# Upload endpoint — upsert branch selection
    +# ---------------------------------------------------------------------------
    +# We test the branching logic by directly calling the three paths as the
    +# endpoint itself would, verifying correct helper selection without spinning
    +# up a real server or database.
    +
    +
    +def _make_flow_create(flow_id: UUID | None = None, name: str = "Test Flow") -> FlowCreate:
    +    return FlowCreate(id=flow_id, name=name, data={"nodes": [], "edges": []})
    +
    +
    +def _make_existing_flow(flow_id: UUID, user_id: UUID) -> MagicMock:
    +    """Return a mock Flow ORM object."""
    +    m = MagicMock()
    +    m.id = flow_id
    +    m.user_id = user_id
    +    return m
    +
    +
    +class TestUploadUpsertBranches:
    +    """Test the three upsert branches introduced in the upload endpoint."""
    +
    +    @pytest.mark.asyncio
    +    async def test_no_id_calls_new_flow(self):
    +        """Flow without id → _new_flow called with no flow_id kwarg."""
    +        flow = _make_flow_create(flow_id=None)
    +        current_user = MagicMock()
    +        current_user.id = uuid4()
    +
    +        new_flow_mock = AsyncMock(return_value=MagicMock())
    +
    +        # Simulate the branch directly
    +        if flow.id is not None:
    +            pytest.fail("Should have taken the no-id branch")
    +
    +        with patch("langflow.api.v1.flows._new_flow", new_flow_mock):
    +            await new_flow_mock(session=None, flow=flow, user_id=current_user.id, storage_service=None)
    +
    +        new_flow_mock.assert_awaited_once()
    +        call_kwargs = new_flow_mock.call_args.kwargs
    +        # No flow_id should be passed (will generate a UUID)
    +        assert "flow_id" not in call_kwargs or call_kwargs["flow_id"] is None
    +
    +    @pytest.mark.asyncio
    +    async def test_new_id_not_in_db_calls_new_flow_with_id(self):
    +        """Flow with id that doesn't exist → _new_flow called with flow_id."""
    +        stable_id = uuid4()
    +        flow = _make_flow_create(flow_id=stable_id)
    +        current_user = MagicMock()
    +        current_user.id = uuid4()
    +
    +        new_flow_mock = AsyncMock(return_value=MagicMock())
    +
    +        # existing = None (not in DB)
    +        existing = None
    +
    +        # Branch: id present, existing is None → CREATE with stable id
    +        assert flow.id is not None
    +        assert existing is None
    +
    +        with patch("langflow.api.v1.flows._new_flow", new_flow_mock):
    +            await new_flow_mock(
    +                session=None,
    +                flow=flow,
    +                user_id=current_user.id,
    +                storage_service=None,
    +                flow_id=flow.id,
    +            )
    +
    +        call_kwargs = new_flow_mock.call_args.kwargs
    +        assert call_kwargs["flow_id"] == stable_id
    +
    +    @pytest.mark.asyncio
    +    async def test_id_owned_by_same_user_calls_update(self):
    +        """Flow id exists and belongs to current user → update path."""
    +        stable_id = uuid4()
    +        user_id = uuid4()
    +        flow = _make_flow_create(flow_id=stable_id)
    +        existing = _make_existing_flow(stable_id, user_id)
    +
    +        current_user = MagicMock()
    +        current_user.id = user_id
    +
    +        update_mock = AsyncMock(return_value=MagicMock())
    +
    +        # Branch: id present, existing owned by current user → UPDATE
    +        assert flow.id is not None
    +        assert existing is not None
    +        assert existing.user_id == current_user.id
    +
    +        with patch("langflow.api.v1.flows._update_existing_flow", update_mock):
    +            await update_mock(
    +                session=None,
    +                existing_flow=existing,
    +                flow=flow,
    +                current_user=current_user,
    +                storage_service=None,
    +            )
    +
    +        update_mock.assert_awaited_once()
    +
    +    @pytest.mark.asyncio
    +    async def test_id_owned_by_other_user_clears_id(self):
    +        """Flow id exists but belongs to another user → id cleared, new UUID minted."""
    +        stable_id = uuid4()
    +        owner_id = uuid4()
    +        requester_id = uuid4()
    +
    +        flow = _make_flow_create(flow_id=stable_id)
    +        existing = _make_existing_flow(stable_id, owner_id)
    +
    +        current_user = MagicMock()
    +        current_user.id = requester_id
    +
    +        # Branch: id present, existing owned by different user → clear id, new_flow
    +        assert flow.id is not None
    +        assert existing is not None
    +        assert existing.user_id != current_user.id
    +
    +        # Simulate the branch behaviour
    +        flow.id = None  # as the endpoint does
    +
    +        new_flow_mock = AsyncMock(return_value=MagicMock())
    +        with patch("langflow.api.v1.flows._new_flow", new_flow_mock):
    +            await new_flow_mock(session=None, flow=flow, user_id=current_user.id, storage_service=None)
    +
    +        # After clearing, flow.id must be None so a fresh UUID is generated
    +        assert flow.id is None
    +        call_kwargs = new_flow_mock.call_args.kwargs
    +        assert "flow_id" not in call_kwargs or call_kwargs.get("flow_id") is None
    +
    +
    +# ---------------------------------------------------------------------------
    +# PUT /flows/{id} endpoint schema visibility
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestUpsertEndpointVisibility:
    +    def test_upsert_route_in_openapi_schema(self):
    +        """PUT /flows/{flow_id} must be visible in the OpenAPI schema."""
    +        from langflow.api.v1.flows import router
    +
    +        put_routes = [r for r in router.routes if "PUT" in getattr(r, "methods", set())]
    +        upsert_routes = [r for r in put_routes if "{flow_id}" in getattr(r, "path", "")]
    +
    +        assert upsert_routes, "No PUT /{flow_id} route found"
    +        upsert_route = upsert_routes[0]
    +        # include_in_schema defaults to True; check it is not False
    +        assert getattr(upsert_route, "include_in_schema", True) is True
    
  • src/backend/tests/unit/api/v1/test_files.py+1 1 modified
    @@ -136,7 +136,7 @@ async def files_flow(
     ):
         loaded_json = json.loads(json_flow)
         flow_data = FlowCreate(name="test_flow", data=loaded_json.get("data"), user_id=files_active_user.id)
    -    flow = Flow.model_validate(flow_data)
    +    flow = Flow.model_validate(flow_data.model_dump(exclude={"id"}))
         async with session_scope() as session:
             session.add(flow)
             await session.flush()
    
  • src/backend/tests/unit/api/v1/test_flows_helpers.py+81 0 added
    @@ -0,0 +1,81 @@
    +"""Focused unit tests for flow helper branches that are hard to force via HTTP."""
    +
    +from unittest.mock import AsyncMock, MagicMock, patch
    +from uuid import uuid4
    +
    +import anyio
    +import pytest
    +from fastapi import HTTPException
    +from langflow.api.v1.flows_helpers import _new_flow, _save_flow_to_fs
    +from langflow.services.database.models.flow.model import Flow, FlowCreate
    +from langflow.services.database.models.user.model import User
    +from langflow.services.storage.service import StorageService
    +
    +
    +@pytest.fixture
    +def storage_service(tmp_path):
    +    """Create a mock storage service with a temporary data directory."""
    +    service = MagicMock(spec=StorageService)
    +    service.data_dir = anyio.Path(tmp_path)
    +    return service
    +
    +
    +@pytest.fixture
    +async def current_user(async_session):
    +    """Create a user that can own flows in helper-level tests."""
    +    password = f"password-{uuid4()}"
    +    user = User(
    +        username=f"flow-helper-{uuid4()}",
    +        password=password,
    +        is_active=True,
    +    )
    +    async_session.add(user)
    +    await async_session.commit()
    +    await async_session.refresh(user)
    +    return user
    +
    +
    +@pytest.mark.asyncio
    +async def test_new_flow_with_validate_folder_rejects_unknown_folder(async_session, current_user, storage_service):
    +    """Test that validate_folder rejects a folder that does not belong to the user."""
    +    flow = FlowCreate(
    +        name="flow-with-bad-folder",
    +        data={},
    +        folder_id=uuid4(),
    +    )
    +
    +    with (
    +        patch("langflow.api.v1.flows_helpers.get_default_folder_id", new=AsyncMock()) as mock_default_folder_id,
    +        pytest.raises(HTTPException) as exc_info,
    +    ):
    +        await _new_flow(
    +            session=async_session,
    +            flow=flow,
    +            user_id=current_user.id,
    +            storage_service=storage_service,
    +            validate_folder=True,
    +        )
    +
    +    assert exc_info.value.status_code == 400
    +    assert exc_info.value.detail == "Folder not found"
    +    mock_default_folder_id.assert_not_awaited()
    +
    +
    +@pytest.mark.asyncio
    +async def test_save_flow_to_fs_returns_500_on_os_error(current_user, storage_service):
    +    """Test that filesystem write errors surface as an HTTP 500."""
    +    flow = Flow(
    +        name="flow-write-error",
    +        data={},
    +        user_id=current_user.id,
    +        fs_path="nested/flow.json",
    +    )
    +
    +    with (
    +        patch("langflow.api.v1.flows_helpers.async_open", side_effect=OSError("disk full")),
    +        pytest.raises(HTTPException) as exc_info,
    +    ):
    +        await _save_flow_to_fs(flow, current_user.id, storage_service)
    +
    +    assert exc_info.value.status_code == 500
    +    assert "disk full" in exc_info.value.detail
    
  • src/backend/tests/unit/api/v1/test_flows.py+111 0 modified
    @@ -151,6 +151,75 @@ async def test_update_flow(client: AsyncClient, logged_in_headers):
         assert result["name"] == updated_name, "The name must be updated"
     
     
    +async def test_patch_flow_keeps_existing_endpoint_when_not_provided(client: AsyncClient, logged_in_headers):
    +    """Test that PATCH preserves endpoint_name when the field is omitted."""
    +    initial_flow = {
    +        "name": "patch_endpoint_flow",
    +        "endpoint_name": "keep_patch_endpoint",
    +        "data": {},
    +    }
    +    create_response = await client.post("api/v1/flows/", json=initial_flow, headers=logged_in_headers)
    +    assert create_response.status_code == status.HTTP_201_CREATED
    +    flow_id = create_response.json()["id"]
    +
    +    response = await client.patch(
    +        f"api/v1/flows/{flow_id}",
    +        json={"name": "patch_endpoint_flow_updated"},
    +        headers=logged_in_headers,
    +    )
    +
    +    assert response.status_code == status.HTTP_200_OK
    +    assert response.json()["endpoint_name"] == "keep_patch_endpoint"
    +
    +
    +async def test_patch_flow_allows_clearing_endpoint_with_null(client: AsyncClient, logged_in_headers):
    +    """Test that PATCH clears endpoint_name when it is explicitly set to null."""
    +    initial_flow = {
    +        "name": "patch_clear_endpoint_flow",
    +        "endpoint_name": "clear_patch_endpoint",
    +        "data": {},
    +    }
    +    create_response = await client.post("api/v1/flows/", json=initial_flow, headers=logged_in_headers)
    +    assert create_response.status_code == status.HTTP_201_CREATED
    +    flow_id = create_response.json()["id"]
    +
    +    response = await client.patch(
    +        f"api/v1/flows/{flow_id}",
    +        json={"endpoint_name": None},
    +        headers=logged_in_headers,
    +    )
    +
    +    assert response.status_code == status.HTTP_200_OK
    +    assert response.json()["endpoint_name"] is None
    +
    +
    +async def test_patch_flow_updates_access_and_action_fields(client: AsyncClient, logged_in_headers):
    +    """PATCH should persist public-sharing and MCP action metadata fields."""
    +    create_response = await client.post(
    +        "api/v1/flows/",
    +        json={"name": "patch_access_type_flow", "data": {}},
    +        headers=logged_in_headers,
    +    )
    +    assert create_response.status_code == status.HTTP_201_CREATED
    +    flow_id = create_response.json()["id"]
    +
    +    response = await client.patch(
    +        f"api/v1/flows/{flow_id}",
    +        json={
    +            "access_type": "PUBLIC",
    +            "action_name": "shared_action",
    +            "action_description": "Shared flow action",
    +        },
    +        headers=logged_in_headers,
    +    )
    +
    +    assert response.status_code == status.HTTP_200_OK
    +    result = response.json()
    +    assert result["access_type"] == "PUBLIC"
    +    assert result["action_name"] == "shared_action"
    +    assert result["action_description"] == "Shared flow action"
    +
    +
     async def test_create_flows(client: AsyncClient, logged_in_headers):
         amount_flows = 10
         basic_case = {
    @@ -718,6 +787,48 @@ async def test_upsert_flow_keeps_existing_folder_on_update_when_not_provided(cli
         assert result["folder_id"] == original_folder_id  # Folder unchanged
     
     
    +async def test_upsert_flow_keeps_existing_endpoint_when_not_provided(client: AsyncClient, logged_in_headers):
    +    """Test that PUT preserves endpoint_name when it is omitted during update."""
    +    initial_flow = {
    +        "name": "upsert_endpoint_flow",
    +        "endpoint_name": "keep_upsert_endpoint",
    +        "data": {},
    +    }
    +    create_response = await client.post("api/v1/flows/", json=initial_flow, headers=logged_in_headers)
    +    assert create_response.status_code == status.HTTP_201_CREATED
    +    flow_id = create_response.json()["id"]
    +
    +    response = await client.put(
    +        f"api/v1/flows/{flow_id}",
    +        json={"name": "upsert_endpoint_flow_updated", "data": {}},
    +        headers=logged_in_headers,
    +    )
    +
    +    assert response.status_code == status.HTTP_200_OK
    +    assert response.json()["endpoint_name"] == "keep_upsert_endpoint"
    +
    +
    +async def test_upsert_flow_allows_clearing_endpoint_with_null(client: AsyncClient, logged_in_headers):
    +    """Test that PUT clears endpoint_name when it is explicitly set to null."""
    +    initial_flow = {
    +        "name": "upsert_clear_endpoint_flow",
    +        "endpoint_name": "clear_upsert_endpoint",
    +        "data": {},
    +    }
    +    create_response = await client.post("api/v1/flows/", json=initial_flow, headers=logged_in_headers)
    +    assert create_response.status_code == status.HTTP_201_CREATED
    +    flow_id = create_response.json()["id"]
    +
    +    response = await client.put(
    +        f"api/v1/flows/{flow_id}",
    +        json={"name": "upsert_clear_endpoint_flow", "endpoint_name": None, "data": {}},
    +        headers=logged_in_headers,
    +    )
    +
    +    assert response.status_code == status.HTTP_200_OK
    +    assert response.json()["endpoint_name"] is None
    +
    +
     async def test_upsert_flow_ignores_user_id_from_body(client: AsyncClient, logged_in_headers, active_user):
         """Test that PUT ignores user_id from body and uses current user."""
         specified_id = str(uuid.uuid4())
    
  • src/backend/tests/unit/api/v1/test_projects.py+177 101 modified
    @@ -37,6 +37,32 @@ async def test_create_project(client: AsyncClient, logged_in_headers, basic_case
         assert "parent_id" in result, "The dictionary must contain a key called 'parent_id'"
     
     
    +async def test_create_project_duplicate_name_escapes_like_wildcards(client: AsyncClient, logged_in_headers):
    +    unrelated = {
    +        "name": "proj_a (7)",
    +        "description": "",
    +        "flows_list": [],
    +        "components_list": [],
    +    }
    +    wildcard = {
    +        "name": "proj_%",
    +        "description": "",
    +        "flows_list": [],
    +        "components_list": [],
    +    }
    +
    +    response = await client.post("api/v1/projects/", json=unrelated, headers=logged_in_headers)
    +    assert response.status_code == status.HTTP_201_CREATED
    +
    +    response = await client.post("api/v1/projects/", json=wildcard, headers=logged_in_headers)
    +    assert response.status_code == status.HTTP_201_CREATED
    +    assert response.json()["name"] == "proj_%"
    +
    +    response = await client.post("api/v1/projects/", json=wildcard, headers=logged_in_headers)
    +    assert response.status_code == status.HTTP_201_CREATED
    +    assert response.json()["name"] == "proj_% (1)"
    +
    +
     async def test_read_projects(client: AsyncClient, logged_in_headers):
         response = await client.get("api/v1/projects/", headers=logged_in_headers)
         result = response.json()
    @@ -464,11 +490,11 @@ async def test_create_project_with_mcp_auto_add_enabled_success(
             """Test successful project creation with MCP server auto-add."""
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url") as mock_streamable_url,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.update_server") as mock_update_server,
    -            patch("langflow.api.v1.projects.create_api_key") as mock_create_api_key,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url") as mock_streamable_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server") as mock_update_server,
    +            patch("langflow.api.v1.projects_mcp_helpers.create_api_key") as mock_create_api_key,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Setup mocks
                 mock_streamable_url.return_value = "http://localhost:7860/api/v1/mcp/project/test-id/streamable"
    @@ -514,12 +540,12 @@ async def test_create_project_with_mcp_auto_add_enabled_success_legacy_sse(
             """Legacy SSE test for project creation with MCP server auto-add."""
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.get_project_sse_url") as mock_sse_url,
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url") as mock_streamable_url,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.update_server") as mock_update_server,
    -            patch("langflow.api.v1.projects.create_api_key") as mock_create_api_key,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.mcp_projects.get_project_sse_url") as mock_sse_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url") as mock_streamable_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server") as mock_update_server,
    +            patch("langflow.api.v1.projects_mcp_helpers.create_api_key") as mock_create_api_key,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Setup mocks
                 mock_sse_url.return_value = "http://localhost:7860/api/v1/mcp/project/test-id/sse"
    @@ -566,9 +592,9 @@ async def test_create_project_with_mcp_server_conflict(
             """Test project creation failure due to MCP server name conflict."""
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url") as mock_streamable_url,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url") as mock_streamable_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Setup mocks
                 mock_streamable_url.return_value = "http://localhost:7860/api/v1/mcp/project/test-id/streamable"
    @@ -611,10 +637,10 @@ async def test_create_project_with_mcp_server_conflict_legacy_sse(
             """Legacy SSE test verifying project creation failure due to MCP server name conflict."""
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.get_project_sse_url") as mock_sse_url,
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url") as mock_streamable_url,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.mcp_projects.get_project_sse_url") as mock_sse_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url") as mock_streamable_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Setup mocks
                 mock_sse_url.return_value = "http://localhost:7860/api/v1/mcp/project/test-id/sse"
    @@ -659,9 +685,9 @@ async def test_create_project_oauth_not_implemented(
             oauth_case["auth_settings"] = {"auth_type": "oauth"}
     
             with (
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url") as mock_streamable_url,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url") as mock_streamable_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Setup mocks to trigger OAuth path
                 mock_streamable_url.return_value = "http://localhost:7860/api/v1/mcp/project/test-id/streamable"
    @@ -691,10 +717,10 @@ async def test_create_project_oauth_not_implemented_legacy_sse(
             oauth_case["auth_settings"] = {"auth_type": "oauth"}
     
             with (
    -            patch("langflow.api.v1.projects.get_project_sse_url") as mock_sse_url,
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url") as mock_streamable_url,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.mcp_projects.get_project_sse_url") as mock_sse_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url") as mock_streamable_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Setup mocks to trigger OAuth path
                 mock_sse_url.return_value = "http://localhost:7860/api/v1/mcp/project/test-id/sse"
    @@ -724,11 +750,11 @@ async def test_update_project_name_with_mcp_server_update(
             # First create a project
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url"),
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate_create,
    -            patch("langflow.api.v1.projects.update_server"),
    -            patch("langflow.api.v1.projects.create_api_key"),
    -            patch("langflow.api.v1.projects.get_storage_service"),
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url"),
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate_create,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server"),
    +            patch("langflow.api.v1.projects_mcp_helpers.create_api_key"),
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service"),
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -750,9 +776,9 @@ async def test_update_project_name_with_mcp_server_update(
     
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.update_server") as mock_update_server,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server") as mock_update_server,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -797,12 +823,12 @@ async def test_update_project_name_with_mcp_server_update_legacy_sse(
             # First create a project
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.get_project_sse_url") as mock_sse_url,
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url") as mock_streamable_url,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate_create,
    -            patch("langflow.api.v1.projects.update_server"),
    -            patch("langflow.api.v1.projects.create_api_key"),
    -            patch("langflow.api.v1.projects.get_storage_service"),
    +            patch("langflow.api.v1.mcp_projects.get_project_sse_url") as mock_sse_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url") as mock_streamable_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate_create,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server"),
    +            patch("langflow.api.v1.projects_mcp_helpers.create_api_key"),
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service"),
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -827,9 +853,9 @@ async def test_update_project_name_with_mcp_server_update_legacy_sse(
     
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.update_server") as mock_update_server,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server") as mock_update_server,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -874,11 +900,11 @@ async def test_update_project_name_with_mcp_conflict(
             # Create project first
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url"),
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate_create,
    -            patch("langflow.api.v1.projects.update_server"),
    -            patch("langflow.api.v1.projects.create_api_key"),
    -            patch("langflow.api.v1.projects.get_storage_service"),
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url"),
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate_create,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server"),
    +            patch("langflow.api.v1.projects_mcp_helpers.create_api_key"),
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service"),
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -900,8 +926,8 @@ async def test_update_project_name_with_mcp_conflict(
     
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -940,12 +966,12 @@ async def test_update_project_name_with_mcp_conflict_legacy_sse(
             # Create project first
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.get_project_sse_url") as mock_sse_url,
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url") as mock_streamable_url,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate_create,
    -            patch("langflow.api.v1.projects.update_server"),
    -            patch("langflow.api.v1.projects.create_api_key"),
    -            patch("langflow.api.v1.projects.get_storage_service"),
    +            patch("langflow.api.v1.mcp_projects.get_project_sse_url") as mock_sse_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url") as mock_streamable_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate_create,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server"),
    +            patch("langflow.api.v1.projects_mcp_helpers.create_api_key"),
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service"),
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -970,8 +996,8 @@ async def test_update_project_name_with_mcp_conflict_legacy_sse(
     
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -1010,11 +1036,11 @@ async def test_delete_project_with_mcp_server_cleanup(
             # Create project first
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url"),
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate_create,
    -            patch("langflow.api.v1.projects.update_server"),
    -            patch("langflow.api.v1.projects.create_api_key"),
    -            patch("langflow.api.v1.projects.get_storage_service"),
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url"),
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate_create,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server"),
    +            patch("langflow.api.v1.projects_mcp_helpers.create_api_key"),
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service"),
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -1034,9 +1060,9 @@ async def test_delete_project_with_mcp_server_cleanup(
             # Delete the project
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.update_server") as mock_update_server,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server") as mock_update_server,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -1074,12 +1100,12 @@ async def test_delete_project_with_mcp_server_cleanup_legacy_sse(
             # Create project first
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.get_project_sse_url") as mock_sse_url,
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url") as mock_streamable_url,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate_create,
    -            patch("langflow.api.v1.projects.update_server"),
    -            patch("langflow.api.v1.projects.create_api_key"),
    -            patch("langflow.api.v1.projects.get_storage_service"),
    +            patch("langflow.api.v1.mcp_projects.get_project_sse_url") as mock_sse_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url") as mock_streamable_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate_create,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server"),
    +            patch("langflow.api.v1.projects_mcp_helpers.create_api_key"),
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service"),
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -1102,9 +1128,9 @@ async def test_delete_project_with_mcp_server_cleanup_legacy_sse(
             # Delete the project
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.update_server") as mock_update_server,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server") as mock_update_server,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -1142,11 +1168,11 @@ async def test_delete_project_mcp_server_different_project(
             # Create project first
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url"),
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate_create,
    -            patch("langflow.api.v1.projects.update_server"),
    -            patch("langflow.api.v1.projects.create_api_key"),
    -            patch("langflow.api.v1.projects.get_storage_service"),
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url"),
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate_create,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server"),
    +            patch("langflow.api.v1.projects_mcp_helpers.create_api_key"),
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service"),
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -1166,9 +1192,9 @@ async def test_delete_project_mcp_server_different_project(
             # Delete the project
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.update_server") as mock_update_server,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server") as mock_update_server,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -1203,12 +1229,12 @@ async def test_delete_project_mcp_server_different_project_legacy_sse(
             # Create project first
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.get_project_sse_url") as mock_sse_url,
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url") as mock_streamable_url,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate_create,
    -            patch("langflow.api.v1.projects.update_server"),
    -            patch("langflow.api.v1.projects.create_api_key"),
    -            patch("langflow.api.v1.projects.get_storage_service"),
    +            patch("langflow.api.v1.mcp_projects.get_project_sse_url") as mock_sse_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url") as mock_streamable_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate_create,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server"),
    +            patch("langflow.api.v1.projects_mcp_helpers.create_api_key"),
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service"),
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -1231,9 +1257,9 @@ async def test_delete_project_mcp_server_different_project_legacy_sse(
             # Delete the project
             with (
                 patch("langflow.api.v1.projects.get_settings_service") as mock_get_settings,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.update_server") as mock_update_server,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.update_server") as mock_update_server,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Mock settings to enable MCP auto-add
                 mock_settings = MagicMock()
    @@ -1285,9 +1311,9 @@ async def test_project_mcp_exception_handling(
         ):
             """Test that MCP exceptions during project creation don't prevent project creation."""
             with (
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url") as mock_streamable_url,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url") as mock_streamable_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Setup mocks
                 mock_streamable_url.return_value = "http://localhost:7860/api/v1/mcp/project/test-id/streamable"
    @@ -1313,10 +1339,10 @@ async def test_project_mcp_exception_handling_legacy_sse(
         ):
             """Legacy SSE test ensuring MCP exceptions don't block project creation."""
             with (
    -            patch("langflow.api.v1.projects.get_project_sse_url") as mock_sse_url,
    -            patch("langflow.api.v1.projects.get_project_streamable_http_url") as mock_streamable_url,
    -            patch("langflow.api.v1.projects.validate_mcp_server_for_project") as mock_validate,
    -            patch("langflow.api.v1.projects.get_storage_service") as mock_storage,
    +            patch("langflow.api.v1.mcp_projects.get_project_sse_url") as mock_sse_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_project_streamable_http_url") as mock_streamable_url,
    +            patch("langflow.api.v1.projects_mcp_helpers.validate_mcp_server_for_project") as mock_validate,
    +            patch("langflow.api.v1.projects_mcp_helpers.get_storage_service") as mock_storage,
             ):
                 # Setup mocks
                 mock_sse_url.return_value = "http://localhost:7860/api/v1/mcp/project/test-id/sse"
    @@ -1620,7 +1646,7 @@ async def test_download_file_starter_project(client: AsyncClient, logged_in_head
                     folder_id=starter_project_id,
                     user_id=active_user.id,
                 )
    -            flow = Flow.model_validate(flow_create, from_attributes=True)
    +            flow = Flow.model_validate(flow_create.model_dump(exclude={"id"}))
                 session.add(flow)
                 flows_created.append(flow)
     
    @@ -1632,7 +1658,7 @@ async def test_download_file_starter_project(client: AsyncClient, logged_in_head
                 folder_id=starter_project_id,
                 user_id=active_user.id,
             )
    -        flow_note = Flow.model_validate(flow_create_note, from_attributes=True)
    +        flow_note = Flow.model_validate(flow_create_note.model_dump(exclude={"id"}))
             session.add(flow_note)
             flows_created.append(flow_note)
     
    @@ -1692,7 +1718,6 @@ async def test_download_file_starter_project(client: AsyncClient, logged_in_head
             assert "data" in note_flow_json
             assert "nodes" in note_flow_json["data"]
             assert len(note_flow_json["data"]["nodes"]) == 2
    -
             # Find the API node and verify API key was removed
             api_node = None
             note_node = None
    @@ -1719,3 +1744,54 @@ async def test_download_file_starter_project(client: AsyncClient, logged_in_head
         # Clean up: delete the project (which will cascade delete flows)
         delete_response = await client.delete(f"api/v1/projects/{starter_project_id}", headers=logged_in_headers)
         assert delete_response.status_code == status.HTTP_204_NO_CONTENT
    +
    +
    +async def test_download_project_missing_returns_404(client: AsyncClient, logged_in_headers):
    +    response = await client.get(f"api/v1/projects/download/{uuid4()}", headers=logged_in_headers)
    +
    +    assert response.status_code == status.HTTP_404_NOT_FOUND
    +    assert response.json()["detail"] == "Project not found"
    +
    +
    +async def test_download_project_with_no_flows_returns_404(client: AsyncClient, logged_in_headers, basic_case):
    +    response = await client.post("api/v1/projects/", json=basic_case, headers=logged_in_headers)
    +    assert response.status_code == status.HTTP_201_CREATED
    +    project_id = response.json()["id"]
    +
    +    download_response = await client.get(f"api/v1/projects/download/{project_id}", headers=logged_in_headers)
    +
    +    assert download_response.status_code == status.HTTP_404_NOT_FOUND
    +    assert download_response.json()["detail"] == "No flows found in project"
    +
    +
    +async def test_download_project_sanitizes_windows_path_characters(
    +    client: AsyncClient, logged_in_headers, basic_case, active_user
    +):
    +    create_response = await client.post("api/v1/projects/", json=basic_case, headers=logged_in_headers)
    +    assert create_response.status_code == status.HTTP_201_CREATED
    +    project_id = create_response.json()["id"]
    +
    +    async with session_scope() as session:
    +        flow_create = FlowCreate(
    +            name=r"..\evil\flow",
    +            description="Flow with unsafe filename characters",
    +            data={"nodes": [], "edges": []},
    +            folder_id=project_id,
    +            user_id=active_user.id,
    +        )
    +        flow = Flow.model_validate(flow_create.model_dump(exclude={"id"}))
    +        session.add(flow)
    +        await session.flush()
    +        await session.refresh(flow)
    +        await session.commit()
    +
    +    response = await client.get(f"api/v1/projects/download/{project_id}", headers=logged_in_headers)
    +    assert response.status_code == status.HTTP_200_OK
    +
    +    with zipfile.ZipFile(io.BytesIO(response.content), "r") as zip_file:
    +        file_names = zip_file.namelist()
    +        assert len(file_names) == 1
    +        assert "/" not in file_names[0]
    +        assert "\\" not in file_names[0]
    +        assert ".." not in file_names[0]
    +        assert file_names[0].endswith(".json")
    
  • src/backend/tests/unit/test_database.py+136 1 modified
    @@ -545,7 +545,7 @@ async def test_download_file(
             saved_flows = []
             for flow in flow_list.flows:
                 flow.user_id = active_user.id
    -            db_flow = Flow.model_validate(flow, from_attributes=True)
    +            db_flow = Flow.model_validate(flow.model_dump(exclude={"id"}))
                 _session.add(db_flow)
                 saved_flows.append(db_flow)
             await _session.commit()
    @@ -565,6 +565,56 @@ async def test_download_file(
         assert "attachment; filename=" in response.headers["Content-Disposition"]
     
     
    +@pytest.mark.usefixtures("session")
    +async def test_download_single_flow_returns_normalized_json(client: AsyncClient, logged_in_headers):
    +    """Downloading a single flow returns normalized JSON rather than a ZIP archive."""
    +    code_value = "print('hello')\nprint('world')"
    +    flow_payload = FlowCreate(
    +        name=str(uuid4()),
    +        description="single flow export",
    +        data={
    +            "nodes": [
    +                {
    +                    "id": "node-1",
    +                    "data": {
    +                        "node": {
    +                            "template": {
    +                                "code": {"type": "code", "value": code_value},
    +                                "api_key": {"name": "api_key", "password": True, "value": "super-secret"},
    +                            }
    +                        }
    +                    },
    +                }
    +            ],
    +            "edges": [],
    +        },
    +    )
    +
    +    create_response = await client.post("api/v1/flows/", json=flow_payload.model_dump(), headers=logged_in_headers)
    +    assert create_response.status_code == 201
    +    flow_id = create_response.json()["id"]
    +
    +    download_response = await client.post(
    +        "api/v1/flows/download/",
    +        data=json.dumps([flow_id]),
    +        headers={**logged_in_headers, "Content-Type": "application/json"},
    +    )
    +    assert download_response.status_code == 200
    +    assert download_response.headers["Content-Type"].startswith("application/json")
    +
    +    downloaded = download_response.json()
    +    assert downloaded["name"] == flow_payload.name
    +    assert "updated_at" not in downloaded
    +    assert "user_id" not in downloaded
    +    assert "folder_id" not in downloaded
    +    assert "access_type" not in downloaded
    +    assert downloaded["data"]["nodes"][0]["data"]["node"]["template"]["code"]["value"] == [
    +        "print('hello')",
    +        "print('world')",
    +    ]
    +    assert downloaded["data"]["nodes"][0]["data"]["node"]["template"]["api_key"]["value"] is None
    +
    +
     @pytest.mark.usefixtures("session")
     async def test_upload_zip_file_to_flows(client: AsyncClient, json_flow: str, logged_in_headers):
         """Test uploading a ZIP file containing flow JSONs to the flows upload endpoint."""
    @@ -671,6 +721,7 @@ async def test_download_then_upload_roundtrip(client: AsyncClient, json_flow: st
             saved_flows = []
             for f in flow_list.flows:
                 f.user_id = active_user.id
    +            f.id = uuid4()
                 db_flow = Flow.model_validate(f, from_attributes=True)
                 _session.add(db_flow)
                 saved_flows.append(db_flow)
    @@ -843,6 +894,90 @@ async def test_upload_zip_to_projects_filename_none(client: AsyncClient, json_fl
         assert project_response.json()["name"].startswith("Imported Project")
     
     
    +@pytest.mark.usefixtures("session")
    +async def test_upload_json_file_to_projects_rejoins_code_lines(client: AsyncClient, logged_in_headers):
    +    """Project JSON upload accepts exported code-as-lines format and stores code as a string."""
    +    project_name = f"JSON Project {uuid4()}"
    +    code_lines = ["print('alpha')", "print('beta')"]
    +    payload = {
    +        "folder_name": project_name,
    +        "folder_description": "json upload",
    +        "flows": [
    +            {
    +                "name": f"Flow {uuid4()}",
    +                "description": "imported from project json",
    +                "data": {
    +                    "nodes": [
    +                        {
    +                            "id": "node-1",
    +                            "data": {
    +                                "node": {
    +                                    "template": {
    +                                        "code": {"type": "code", "value": code_lines},
    +                                    }
    +                                }
    +                            },
    +                        }
    +                    ],
    +                    "edges": [],
    +                },
    +            }
    +        ],
    +    }
    +
    +    response = await client.post(
    +        "api/v1/projects/upload/",
    +        files={"file": ("project.json", json.dumps(payload).encode("utf-8"), "application/json")},
    +        headers=logged_in_headers,
    +    )
    +    assert response.status_code == 201, response.text
    +    response_data = response.json()
    +    assert len(response_data) == 1
    +    assert response_data[0]["folder_id"] is not None
    +    assert (
    +        response_data[0]["data"]["nodes"][0]["data"]["node"]["template"]["code"]["value"]
    +        == "print('alpha')\nprint('beta')"
    +    )
    +
    +    project_response = await client.get(f"api/v1/projects/{response_data[0]['folder_id']}", headers=logged_in_headers)
    +    assert project_response.status_code == 200
    +    assert project_response.json()["name"].startswith(project_name)
    +
    +
    +@pytest.mark.usefixtures("session")
    +async def test_download_project_zip_sanitizes_flow_names(client: AsyncClient, json_flow: str, logged_in_headers):
    +    """Project ZIP downloads must sanitize flow names to prevent Zip Slip paths."""
    +    project_response = await client.post(
    +        "api/v1/projects/",
    +        json={"name": f"Download Project {uuid4()}", "description": "", "flows_list": [], "components_list": []},
    +        headers=logged_in_headers,
    +    )
    +    assert project_response.status_code == 201
    +    project_id = project_response.json()["id"]
    +
    +    flow = orjson.loads(json_flow)
    +    create_response = await client.post(
    +        "api/v1/flows/",
    +        json={
    +            "name": "../escaped-flow",
    +            "description": "path traversal test",
    +            "folder_id": project_id,
    +            "data": flow["data"],
    +            "is_component": False,
    +        },
    +        headers=logged_in_headers,
    +    )
    +    assert create_response.status_code == 201
    +
    +    download_response = await client.get(f"api/v1/projects/download/{project_id}", headers=logged_in_headers)
    +    assert download_response.status_code == 200
    +
    +    with zipfile.ZipFile(io.BytesIO(download_response.content), "r") as zip_file:
    +        file_names = zip_file.namelist()
    +        assert "__escaped-flow.json" in file_names
    +        assert all("/" not in name and ".." not in name for name in file_names)
    +
    +
     @pytest.mark.usefixtures("session")
     async def test_upload_bad_zip_file_returns_400(client: AsyncClient, logged_in_headers):
         """Uploading a corrupt/invalid ZIP file returns 400 with a descriptive error."""
    
  • src/lfx/docker/Dockerfile+4 1 modified
    @@ -24,9 +24,11 @@ ENV UV_LINK_MODE=copy
     # Workspace root metadata + lockfile
     COPY pyproject.toml uv.lock ./
     
    -# Member's pyproject so uv knows about 'lfx' (no source yet, better cache)
    +# Member pyproject files so uv knows about workspace packages (no source yet, better cache)
     COPY src/lfx/pyproject.toml /app/src/lfx/pyproject.toml
     COPY src/lfx/README.md /app/src/lfx/README.md
    +COPY src/sdk/pyproject.toml /app/src/sdk/pyproject.toml
    +COPY src/sdk/README.md /app/src/sdk/README.md
     
     # Create the venv and install *only* what lfx needs (no dev)
     # We expect some packages to be built from source, so we mount the cache
    @@ -35,6 +37,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
     
     # --- Now copy the source (doesn't bust the deps layer) ---
     COPY src/lfx/src /app/src/lfx/src
    +COPY src/sdk/src /app/src/sdk/src
     
     # Install the LFX package into the virtual environment (non-editable)
     RUN --mount=type=cache,target=/root/.cache/uv \
    
  • src/lfx/docker/Dockerfile.dev+4 1 modified
    @@ -27,9 +27,11 @@ RUN apt-get update \
     # Workspace root metadata + lockfile
     COPY pyproject.toml uv.lock ./
     
    -# Member's pyproject so uv knows about workspace packages (no source yet, better cache)
    +# Member pyproject files so uv knows about workspace packages (no source yet, better cache)
     COPY src/lfx/pyproject.toml /app/src/lfx/pyproject.toml
     COPY src/lfx/README.md /app/src/lfx/README.md
    +COPY src/sdk/pyproject.toml /app/src/sdk/pyproject.toml
    +COPY src/sdk/README.md /app/src/sdk/README.md
     COPY src/backend/base/pyproject.toml /app/src/backend/base/pyproject.toml
     COPY src/backend/base/README.md /app/src/backend/base/README.md
     
    @@ -40,6 +42,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
     # --- Now copy the source and tests (doesn't bust the deps layer) ---
     COPY src/lfx/src /app/src/lfx/src
     COPY src/lfx/tests /app/src/lfx/tests
    +COPY src/sdk/src /app/src/sdk/src
     
     # Install the LFX package into the virtual environment (editable for dev)
     RUN --mount=type=cache,target=/root/.cache/uv \
    
  • src/lfx/pyproject.toml+8 1 modified
    @@ -33,6 +33,7 @@ dependencies = [
         "passlib>=1.7.4,<2.0.0",
         "pydantic-settings>=2.10.1,<3.0.0",
         "tomli>=2.2.1,<3.0.0",
    +    "pyyaml>=6.0.0,<7.0.0",
         "orjson>=3.11.6,<4.0.0",
         "asyncer>=0.0.8,<1.0.0",
         "structlog>=25.4.0,<26.0.0",
    @@ -44,6 +45,7 @@ dependencies = [
         "pypdf>=6.9.0,<7.0.0",
         "cryptography>=46.0.6",
         "ag-ui-protocol>=0.1.10",
    +    "langflow-sdk>=0.1.0",
         "markitdown>=0.1.4,<2.0.0",
         "setuptools>=80.0.0,<81.0.0",
         "wheel>=0.46.2,<1.0.0",
    @@ -54,6 +56,9 @@ dependencies = [
     lfx = "lfx.__main__:main"
     lfx-mcp = "lfx.mcp.__main__:main"
     
    +[project.entry-points.pytest11]
    +lfx = "lfx.testing"
    +
     [build-system]
     requires = ["hatchling"]
     build-backend = "hatchling.build"
    @@ -72,7 +77,9 @@ markers = [
         "unit: Unit tests",
         "integration: Integration tests",
         "slow: Slow-running tests",
    -    "asyncio: Async tests"
    +    "asyncio: Async tests",
    +    "lfx_env_file(path): path to a .env file loaded before this test's flow execution",
    +    "lfx_timeout(seconds): timeout in seconds for this test's flow execution"
     ]
     
     [dependency-groups]
    
  • src/lfx/src/lfx/cli/_authoring_commands.py+207 0 added
    @@ -0,0 +1,207 @@
    +"""Authoring commands: create, requirements, validate."""
    +
    +import typer
    +
    +
    +def register(app: typer.Typer) -> None:
    +    """Register authoring-stage commands on *app*."""
    +
    +    @app.command(name="create", help="Create a new flow JSON from a built-in template", rich_help_panel="Authoring")
    +    def create_command_wrapper(
    +        name: str = typer.Argument(help="Display name for the new flow (also used as the filename)."),
    +        template: str = typer.Option(
    +            "hello-world",
    +            "--template",
    +            "-t",
    +            help="Template to use. Run with --list to see all available templates.",
    +        ),
    +        output_dir: str = typer.Option(
    +            "flows",
    +            "--output-dir",
    +            "-o",
    +            help="Directory to write the new flow JSON into (created if absent; default: flows/).",
    +        ),
    +        *,
    +        list_templates: bool = typer.Option(
    +            False,
    +            "--list",
    +            "-l",
    +            help="Print available templates and exit.",
    +            is_eager=True,
    +        ),
    +        overwrite: bool = typer.Option(
    +            False,
    +            "--overwrite",
    +            help="Overwrite the destination file if it already exists.",
    +        ),
    +    ) -> None:
    +        """Scaffold a new Langflow flow JSON from a built-in template (lazy-loaded)."""
    +        from pathlib import Path
    +
    +        from lfx.cli.create import create_command, print_templates
    +
    +        if list_templates:
    +            print_templates()
    +            raise typer.Exit(0)
    +
    +        create_command(
    +            name=name,
    +            template=template,
    +            output_dir=Path(output_dir),
    +            overwrite=overwrite,
    +        )
    +
    +    @app.command(
    +        name="requirements",
    +        help="Generate requirements.txt for a flow",
    +        no_args_is_help=True,
    +        rich_help_panel="Authoring",
    +    )
    +    def requirements_command_wrapper(
    +        flow_path: str = typer.Argument(help="Path to the Langflow flow JSON file"),
    +        output: str | None = typer.Option(
    +            None,
    +            "--output",
    +            "-o",
    +            help="Output file path (default: stdout)",
    +        ),
    +        lfx_package: str = typer.Option(
    +            "lfx",
    +            "--lfx-package",
    +            help="Name of the LFX package (default: lfx)",
    +        ),
    +        *,
    +        no_lfx: bool = typer.Option(
    +            False,
    +            "--no-lfx",
    +            help="Exclude the LFX package from output",
    +        ),
    +        no_pin: bool = typer.Option(
    +            False,
    +            "--no-pin",
    +            help="Do not pin package versions (default: pin to currently installed versions)",
    +        ),
    +    ) -> None:
    +        """Generate requirements.txt from a Langflow flow JSON (lazy-loaded)."""
    +        import json
    +        from pathlib import Path
    +
    +        from lfx.utils.flow_requirements import generate_requirements_txt
    +
    +        path = Path(flow_path)
    +        if not path.is_file():
    +            typer.echo(f"Error: File not found: {path}", err=True)
    +            raise typer.Exit(1)
    +
    +        try:
    +            flow = json.loads(path.read_text(encoding="utf-8"))
    +        except (json.JSONDecodeError, OSError) as e:
    +            typer.echo(f"Error: Could not read flow JSON: {e}", err=True)
    +            raise typer.Exit(1) from e
    +
    +        content = generate_requirements_txt(
    +            flow,
    +            lfx_package=lfx_package,
    +            include_lfx=not no_lfx,
    +            pin_versions=not no_pin,
    +        )
    +
    +        if output:
    +            try:
    +                Path(output).write_text(content, encoding="utf-8")
    +            except OSError as e:
    +                typer.echo(f"Error: Could not write to {output}: {e}", err=True)
    +                raise typer.Exit(1) from e
    +            typer.echo(f"Requirements written to {output}")
    +        else:
    +            typer.echo(content, nl=False)
    +
    +    @app.command(name="validate", help="Validate one or more flow JSON files", rich_help_panel="Authoring")
    +    def validate_command_wrapper(
    +        flow_paths: list[str] = typer.Argument(
    +            default=None,
    +            help="Path(s) to Langflow flow JSON file(s) or directories to validate. Defaults to flows/.",
    +        ),
    +        dir_path: str | None = typer.Option(
    +            None,
    +            "--dir",
    +            "-d",
    +            help="Directory of flow JSON files to validate (validates all *.json files). Defaults to flows/.",
    +        ),
    +        level: int = typer.Option(
    +            4,
    +            "--level",
    +            "-l",
    +            min=1,
    +            max=4,
    +            help=(
    +                "Validation depth: "
    +                "1=structural JSON, "
    +                "2=+component existence, "
    +                "3=+edge type compatibility, "
    +                "4=+required inputs connected"
    +            ),
    +        ),
    +        skip_components: bool = typer.Option(
    +            False,
    +            "--skip-components",
    +            help="Skip component existence checks (level 2)",
    +        ),
    +        skip_edge_types: bool = typer.Option(
    +            False,
    +            "--skip-edge-types",
    +            help="Skip edge type compatibility checks (level 3)",
    +        ),
    +        skip_required_inputs: bool = typer.Option(
    +            False,
    +            "--skip-required-inputs",
    +            help="Skip required-inputs checks (level 4)",
    +        ),
    +        skip_version_check: bool = typer.Option(
    +            False,
    +            "--skip-version-check",
    +            help="Skip version-mismatch / outdated-component warnings",
    +        ),
    +        skip_credentials: bool = typer.Option(
    +            False,
    +            "--skip-credentials",
    +            help="Skip missing-credentials warnings for password/secret fields",
    +        ),
    +        strict: bool = typer.Option(
    +            False,
    +            "--strict",
    +            help="Treat warnings as errors (exit 1 if any warnings are found)",
    +        ),
    +        verbose: bool = typer.Option(
    +            False,
    +            "--verbose",
    +            "-v",
    +            help="Print all issues including warnings for passing flows",
    +        ),
    +        output_format: str = typer.Option(
    +            "text",
    +            "--format",
    +            "-f",
    +            help="Output format: text (default) or json",
    +        ),
    +    ) -> None:
    +        """Validate Langflow flow JSON files without executing them (lazy-loaded)."""
    +        from lfx.cli.validate import validate_command
    +
    +        # Merge --dir into positional paths for a consistent interface with push
    +        effective_paths = list(flow_paths or [])
    +        if dir_path is not None:
    +            effective_paths.append(dir_path)
    +
    +        validate_command(
    +            flow_paths=effective_paths,
    +            level=level,
    +            skip_components=skip_components,
    +            skip_edge_types=skip_edge_types,
    +            skip_required_inputs=skip_required_inputs,
    +            skip_version_check=skip_version_check,
    +            skip_credentials=skip_credentials,
    +            strict=strict,
    +            verbose=verbose,
    +            output_format=output_format,
    +        )
    
  • src/lfx/src/lfx/cli/common.py+34 3 modified
    @@ -17,7 +17,7 @@
     from io import StringIO
     from pathlib import Path
     from shutil import which
    -from typing import TYPE_CHECKING
    +from typing import TYPE_CHECKING, Any
     from urllib.parse import urlparse
     
     import httpx
    @@ -106,8 +106,14 @@ def get_best_access_host(host: str) -> str:
     
     
     def get_api_key() -> str:
    -    """Get the API key from environment variable."""
    -    api_key = os.getenv("LANGFLOW_API_KEY")
    +    """Get the API key from environment variable.
    +
    +    Used by ``lfx serve`` to set the superuser key on the local server.
    +    For *remote* commands (push, pull, login, …), the per-environment key
    +    is resolved via :func:`lfx.config.resolve_environment` and the
    +    ``api_key_env`` field in ``.lfx/environments.yaml``.
    +    """
    +    api_key = os.getenv("LANGFLOW_API_KEY") or os.getenv("LFX_API_KEY")
         if not api_key:
             msg = "LANGFLOW_API_KEY environment variable is not set"
             raise ValueError(msg)
    @@ -613,6 +619,31 @@ def download_and_extract_repo(url: str, verbose_print, *, timeout: float = 60.0)
             return root_path
     
     
    +def load_sdk(command_name: str) -> Any:
    +    """Lazily import ``langflow_sdk`` to keep CLI startup fast.
    +
    +    Raises :class:`typer.BadParameter` with install guidance when the package
    +    is not available.
    +
    +    Args:
    +        command_name: Name of the CLI command requesting the SDK (used in
    +            the error message).
    +    """
    +    try:
    +        import langflow_sdk  # type: ignore[import-untyped]
    +    except ImportError as exc:
    +        msg = f"langflow-sdk is required for lfx {command_name}. Install it with: pip install langflow-sdk"
    +        raise typer.BadParameter(msg) from exc
    +    else:
    +        return langflow_sdk
    +
    +
    +def safe_filename(name: str) -> str:
    +    """Convert a flow name to a safe filesystem basename (no extension)."""
    +    safe = "".join(c if c.isalnum() or c in "-_ " else "_" for c in name)
    +    return safe.strip().replace(" ", "_")
    +
    +
     def extract_script_docstring(script_path: Path) -> str | None:
         """Extract the module-level docstring from a Python script.
     
    
  • src/lfx/src/lfx/cli/create.py+157 0 added
    @@ -0,0 +1,157 @@
    +"""lfx create -- scaffold a new flow JSON from a built-in template.
    +
    +Writes a ready-to-edit flow JSON file into the target directory so teams
    +can start from a known-good structure rather than an empty file.
    +
    +Examples::
    +
    +    lfx create my-chatbot
    +    lfx create my-rag --template hello-world
    +    lfx create my-flow --output-dir ./flows --overwrite
    +    lfx create --list
    +"""
    +
    +from __future__ import annotations
    +
    +import json
    +import uuid
    +from pathlib import Path
    +from typing import Any
    +
    +import typer
    +from rich.console import Console
    +from rich.table import Table
    +
    +console = Console()
    +
    +_FLOWS_TEMPLATE_DIR = Path(__file__).parent.parent / "templates" / "flows"
    +
    +# Descriptions shown in ``lfx create --list``
    +_TEMPLATE_DESCRIPTIONS: dict[str, str] = {
    +    "hello-world": "ChatInput → ChatOutput — minimal echo flow, no LLM required",
    +}
    +
    +
    +# ---------------------------------------------------------------------------
    +# Helpers
    +# ---------------------------------------------------------------------------
    +
    +
    +def list_templates() -> list[str]:
    +    """Return the names of all available flow templates (sorted)."""
    +    if not _FLOWS_TEMPLATE_DIR.exists():
    +        return []
    +    return sorted(p.stem for p in _FLOWS_TEMPLATE_DIR.glob("*.json"))
    +
    +
    +def _load_template(name: str) -> dict[str, Any]:
    +    """Load and parse a template JSON by name.  Raises FileNotFoundError if missing."""
    +    path = _FLOWS_TEMPLATE_DIR / f"{name}.json"
    +    if not path.exists():
    +        available = list_templates()
    +        hint = f"  Available templates: {', '.join(available)}" if available else "  No templates found."
    +        msg = f"Template '{name}' not found.\n{hint}"
    +        raise FileNotFoundError(msg)
    +    return json.loads(path.read_text(encoding="utf-8"))
    +
    +
    +def _slugify(name: str) -> str:
    +    """Convert a flow name to a safe filename stem (lowercase, hyphens)."""
    +    return name.lower().replace(" ", "-").replace("_", "-")
    +
    +
    +# ---------------------------------------------------------------------------
    +# Core command (importable for testing and for init seeding)
    +# ---------------------------------------------------------------------------
    +
    +
    +def _is_lfx_project() -> bool:
    +    """Return True if cwd (or a parent up to .git) contains .lfx/environments.yaml."""
    +    cwd = Path.cwd()
    +    for directory in (cwd, *cwd.parents):
    +        if (directory / ".lfx" / "environments.yaml").is_file():
    +            return True
    +        if (directory / ".lfx" / "environments.yml").is_file():
    +            return True
    +        if (directory / ".git").is_dir() or directory.parent == directory:
    +            break
    +    return False
    +
    +
    +def create_command(
    +    name: str,
    +    *,
    +    template: str = "hello-world",
    +    output_dir: Path = Path("flows"),
    +    overwrite: bool = False,
    +) -> Path:
    +    """Create a new flow JSON from *template* and write it to *output_dir/<slug>.json*.
    +
    +    Returns the path of the written file.
    +    Raises ``typer.Exit`` on user-facing errors so the CLI reports them cleanly.
    +    """
    +    if not _is_lfx_project():
    +        console.print(
    +            "[yellow]Warning:[/yellow] No .lfx/environments.yaml found in this project. "
    +            "Run [bold]lfx init[/bold] first to scaffold a project."
    +        )
    +
    +    available = list_templates()
    +    if not available:
    +        console.print("[red]Error:[/red] No flow templates found in the lfx package.")
    +        raise typer.Exit(1)
    +
    +    if template not in available:
    +        console.print(
    +            f"[red]Error:[/red] Unknown template [bold]{template!r}[/bold]. Available: {', '.join(available)}"
    +        )
    +        raise typer.Exit(1)
    +
    +    slug = _slugify(name)
    +    output_dir = output_dir.resolve()
    +    dest = output_dir / f"{slug}.json"
    +
    +    if dest.exists() and not overwrite:
    +        console.print(f"[red]Error:[/red] {dest} already exists. Use [bold]--overwrite[/bold] to replace it.")
    +        raise typer.Exit(1)
    +
    +    # Load template and stamp with a fresh UUID + the requested name
    +    flow = _load_template(template)
    +    flow["id"] = str(uuid.uuid4())
    +    flow["name"] = name
    +
    +    output_dir.mkdir(parents=True, exist_ok=True)
    +    dest.write_text(json.dumps(flow, indent=2), encoding="utf-8")
    +
    +    console.print(f"[bold green]✓[/bold green] Created [bold]{dest}[/bold]")
    +    console.print(f"  Template : {template}")
    +    console.print(f"  Flow ID  : {flow['id']}")
    +    console.print()
    +    console.print("Next steps:")
    +    console.print(f"  [bold]lfx validate {dest}[/bold]")
    +    console.print(f"  [bold]lfx serve {dest}[/bold]")
    +
    +    return dest
    +
    +
    +# ---------------------------------------------------------------------------
    +# Listing helper (also used by the CLI --list flag)
    +# ---------------------------------------------------------------------------
    +
    +
    +def print_templates() -> None:
    +    """Print available templates as a Rich table."""
    +    available = list_templates()
    +    if not available:
    +        console.print("[yellow]No flow templates found.[/yellow]")
    +        return
    +
    +    table = Table(title="Available flow templates", show_header=True, header_style="bold")
    +    table.add_column("Name", style="cyan", no_wrap=True)
    +    table.add_column("Description")
    +
    +    for name in available:
    +        desc = _TEMPLATE_DESCRIPTIONS.get(name, "")
    +        table.add_row(name, desc)
    +
    +    console.print(table)
    
  • src/lfx/src/lfx/cli/export.py+188 0 added
    @@ -0,0 +1,188 @@
    +"""lfx export -- serialize flows to git-friendly JSON.
    +
    +Two modes of operation
    +----------------------
    +Local (default)
    +    Read one or more flow JSON files from disk, normalize them, and write the
    +    result back to disk (or stdout).  No network connection required.
    +
    +Remote (--env / --flow-id / --project-id)
    +    Pull flows directly from a running Langflow instance using the
    +    ``langflow-sdk`` HTTP client, normalize them, and write to disk.
    +
    +Examples:
    +--------
    +Normalize a local file in-place::
    +
    +    lfx export my_flow.json --in-place
    +
    +Normalize and write to a new file::
    +
    +    lfx export my_flow.json -o my_flow.normalized.json
    +
    +Pull a single flow from staging and write to the current directory::
    +
    +    lfx export --flow-id <uuid> --env staging
    +
    +Export an entire project from staging into ./flows/::
    +
    +    lfx export --project-id <uuid> --env staging --output-dir ./flows/
    +"""
    +
    +from __future__ import annotations
    +
    +import json
    +import sys
    +from pathlib import Path
    +from typing import Any
    +from uuid import UUID
    +
    +import typer
    +from rich.console import Console
    +
    +from lfx.cli.common import load_sdk, safe_filename
    +
    +console = Console(stderr=True)
    +
    +
    +def _write_flow(
    +    flow: dict[str, Any],
    +    *,
    +    sdk: Any,
    +    output: Path | None,
    +    in_place: bool,
    +    source_path: Path | None,
    +    indent: int,
    +) -> Path | None:
    +    """Serialise *flow* and write it to the appropriate destination.
    +
    +    Returns the path written to, or ``None`` if writing to stdout.
    +    """
    +    content = sdk.flow_to_json(flow, indent=indent)
    +
    +    if in_place and source_path:
    +        source_path.write_text(content, encoding="utf-8")
    +        return source_path
    +
    +    if output:
    +        output.write_text(content, encoding="utf-8")
    +        return output
    +
    +    sys.stdout.write(content)
    +    return None
    +
    +
    +def export_command(
    +    flow_paths: list[str],
    +    *,
    +    output: str | None,
    +    output_dir: str | None,
    +    env: str | None,
    +    flow_id: str | None,
    +    project_id: str | None,
    +    environments_file: str | None,
    +    target: str | None = None,
    +    api_key: str | None = None,
    +    in_place: bool,
    +    strip_volatile: bool,
    +    strip_secrets: bool,
    +    code_as_lines: bool,
    +    strip_node_volatile: bool,
    +    indent: int,
    +) -> None:
    +    sdk = load_sdk("export")
    +
    +    normalize_kwargs = {
    +        "strip_volatile": strip_volatile,
    +        "strip_secrets": strip_secrets,
    +        "sort_keys": True,
    +        "code_as_lines": code_as_lines,
    +        "strip_node_volatile": strip_node_volatile,
    +    }
    +
    +    # ------------------------------------------------------------------
    +    # Remote mode: pull from a live Langflow instance
    +    # ------------------------------------------------------------------
    +    if flow_id or project_id:
    +        if not env and not target:
    +            console.print("[red]Error:[/red] --env or --target is required for remote export.")
    +            raise typer.Exit(1)
    +
    +        from lfx.config import ConfigError, resolve_environment
    +
    +        try:
    +            env_cfg = resolve_environment(
    +                env,
    +                target=target,
    +                api_key=api_key,
    +                environments_file=environments_file,
    +            )
    +        except ConfigError as exc:
    +            console.print(f"[red]Error:[/red] {exc}")
    +            raise typer.Exit(1) from exc
    +
    +        client = sdk.Client(base_url=env_cfg.url, api_key=env_cfg.api_key)
    +
    +        if flow_id:
    +            flow_obj = client.get_flow(UUID(flow_id))
    +            normalized = sdk.normalize_flow(flow_obj.model_dump(mode="json"), **normalize_kwargs)
    +            dest_dir = Path(output_dir) if output_dir else Path.cwd()
    +            dest_dir.mkdir(parents=True, exist_ok=True)
    +            safe_name = safe_filename(flow_obj.name)
    +            out_path = dest_dir / f"{safe_name}.json"
    +            out_path.write_text(sdk.flow_to_json(normalized, indent=indent), encoding="utf-8")
    +            console.print(f"[green]Exported[/green] {flow_obj.name!r} → {out_path}")
    +            return
    +
    +        # Project mode: export all flows
    +        project = client.get_project(UUID(project_id))
    +        dest_dir = Path(output_dir) if output_dir else Path.cwd() / safe_filename(project.name)
    +        dest_dir.mkdir(parents=True, exist_ok=True)
    +
    +        exported = 0
    +        for flow_obj in project.flows:
    +            normalized = sdk.normalize_flow(flow_obj.model_dump(mode="json"), **normalize_kwargs)
    +            safe_name = safe_filename(flow_obj.name)
    +            out_path = dest_dir / f"{safe_name}.json"
    +            out_path.write_text(sdk.flow_to_json(normalized, indent=indent), encoding="utf-8")
    +            console.print(f"[green]Exported[/green] {flow_obj.name!r} → {out_path}")
    +            exported += 1
    +
    +        console.print(f"\n[bold]{exported}[/bold] flow(s) exported to {dest_dir}")
    +        return
    +
    +    # ------------------------------------------------------------------
    +    # Local mode: normalize files already on disk
    +    # ------------------------------------------------------------------
    +    if not flow_paths:
    +        console.print("[red]Error:[/red] Provide at least one flow JSON file, or use --flow-id / --project-id.")
    +        raise typer.Exit(1)
    +
    +    if output and len(flow_paths) > 1:
    +        console.print("[red]Error:[/red] --output can only be used with a single input file.")
    +        raise typer.Exit(1)
    +
    +    out_path_obj = Path(output) if output else None
    +
    +    for raw_path in flow_paths:
    +        src = Path(raw_path)
    +        if not src.exists():
    +            console.print(f"[red]Error:[/red] File not found: {src}")
    +            raise typer.Exit(1)
    +
    +        try:
    +            normalized = sdk.normalize_flow_file(src, **normalize_kwargs)
    +        except (json.JSONDecodeError, OSError, ValueError) as exc:
    +            console.print(f"[red]Error:[/red] Could not process {src}: {exc}")
    +            raise typer.Exit(1) from exc
    +
    +        dest = _write_flow(
    +            normalized,
    +            sdk=sdk,
    +            output=out_path_obj,
    +            in_place=in_place,
    +            source_path=src,
    +            indent=indent,
    +        )
    +        if dest:
    +            console.print(f"[green]Exported[/green] {src} → {dest}")
    
  • src/lfx/src/lfx/cli/init.py+257 0 added
    @@ -0,0 +1,257 @@
    +"""lfx init -- scaffold a new Flow DevOps project.
    +
    +Creates the standard directory layout, an environments config stub,
    +example tests, and (optionally) GitHub Actions CI workflows -- everything
    +a team needs to start treating flows as code.
    +
    +Examples::
    +
    +    lfx init my-rag-project
    +    lfx init .                   # scaffold into the current directory
    +    lfx init my-project --no-github-actions
    +"""
    +
    +from __future__ import annotations
    +
    +from pathlib import Path
    +from typing import Any
    +
    +import typer
    +from rich.console import Console
    +from rich.tree import Tree
    +
    +console = Console()
    +
    +# ---------------------------------------------------------------------------
    +# Templates embedded as strings (environments config and test stubs)
    +# ---------------------------------------------------------------------------
    +
    +_ENVIRONMENTS_YAML = """\
    +# .lfx/environments.yaml
    +#
    +# Configure your Langflow instances here.
    +# Safe to commit — API keys are NEVER stored in this file.
    +# The api_key_env value is the NAME of an environment variable that holds
    +# the actual API key; set that variable in your shell or CI secrets.
    +#
    +# Quick start:
    +#   1. Open Langflow → Settings → API Keys → Create a new key
    +#   2. export LANGFLOW_LOCAL_API_KEY=<your key>
    +#   3. lfx export --env local --flow-id <uuid> --output-dir flows/
    +
    +environments:
    +  local:
    +    url: http://localhost:7860
    +    api_key_env: LANGFLOW_LOCAL_API_KEY
    +
    +  staging:
    +    url: https://staging.langflow.example.com
    +    api_key_env: LANGFLOW_STAGING_API_KEY
    +
    +  production:
    +    url: https://langflow.example.com
    +    api_key_env: LANGFLOW_PROD_API_KEY
    +
    +defaults:
    +  environment: local
    +"""
    +
    +_TEST_FLOWS_PY = '''\
    +"""Integration tests for Langflow flows.
    +
    +Run against a local instance (started with ``lfx serve``):
    +
    +    pytest tests/ --langflow-url http://localhost:8000
    +
    +Run against a named environment (staging, production, etc.):
    +
    +    pytest tests/ --langflow-env staging -m integration
    +
    +The flow_runner fixture auto-skips when no connection is configured,
    +so these tests are safe to include in any CI pipeline.
    +"""
    +
    +import pytest
    +
    +
    +@pytest.mark.integration
    +def test_flow_responds(flow_runner):
    +    """Smoke test: every flow should return a non-empty response."""
    +    # TODO: replace "my-flow-endpoint" with your flow\'s endpoint name or UUID
    +    result = flow_runner("my-flow-endpoint", "Hello!")
    +    assert result.first_text_output() is not None, "Flow returned no output"
    +
    +
    +@pytest.mark.integration
    +def test_flow_output_quality(flow_runner):
    +    """Example: assert on the content of the response."""
    +    result = flow_runner("my-flow-endpoint", "What is Langflow?")
    +    text = result.first_text_output()
    +    assert text is not None
    +    assert len(text) > 20, f"Response seems too short: {text!r}"
    +'''
    +
    +_GITIGNORE = """\
    +# Langflow credentials -- never commit API keys
    +# (langflow-environments.toml may contain literal keys; .lfx/environments.yaml is safe to commit)
    +langflow-environments.toml
    +"""
    +
    +# Templates bundled inside the Python package
    +_TEMPLATES_DIR = Path(__file__).parent.parent / "templates"
    +
    +
    +# ---------------------------------------------------------------------------
    +# Helpers
    +# ---------------------------------------------------------------------------
    +
    +
    +def _write(
    +    path: Path,
    +    content: str,
    +    label: str,
    +    created: list[tuple[str, str]],
    +    *,
    +    target: Path,
    +    overwrite: bool,
    +) -> None:
    +    path.parent.mkdir(parents=True, exist_ok=True)
    +    if path.exists() and not overwrite:
    +        return
    +    path.write_text(content, encoding="utf-8")
    +    created.append((str(path.relative_to(target)), label))
    +
    +
    +def _copy_template(
    +    src: Path, dest: Path, label: str, created: list[tuple[str, str]], *, target: Path, overwrite: bool
    +) -> None:
    +    dest.parent.mkdir(parents=True, exist_ok=True)
    +    if dest.exists() and not overwrite:
    +        return
    +    dest.write_text(src.read_text(encoding="utf-8"), encoding="utf-8")
    +    created.append((str(dest.relative_to(target)), label))
    +
    +
    +def _render_tree(target: Path, created: list[tuple[str, str]]) -> None:
    +    label = f"[bold]{target.name}/[/bold]" if target != Path.cwd() else "[bold].[/bold] (current directory)"
    +    tree = Tree(label)
    +    branch_nodes: dict[str, Any] = {}
    +
    +    for rel_path, annotation in sorted(created, key=lambda x: x[0]):
    +        parts = Path(rel_path).parts
    +        node = tree
    +        for i, part in enumerate(parts[:-1]):
    +            key = "/".join(parts[: i + 1])
    +            if key not in branch_nodes:
    +                branch_nodes[key] = node.add(f"[bold blue]{part}/[/bold blue]")
    +            node = branch_nodes[key]
    +        suffix = f"  [dim]{annotation}[/dim]" if annotation else ""
    +        node.add(f"[green]{parts[-1]}[/green]{suffix}")
    +
    +    console.print()
    +    console.print(tree)
    +
    +
    +# ---------------------------------------------------------------------------
    +# Command
    +# ---------------------------------------------------------------------------
    +
    +
    +def init_command(
    +    project_dir: Path,
    +    *,
    +    github_actions: bool,
    +    overwrite: bool,
    +    example: bool = True,
    +) -> None:
    +    """Scaffold a Flow DevOps project at *project_dir*."""
    +    target = project_dir.resolve()
    +
    +    if target.exists() and not overwrite:
    +        existing = [p for p in target.iterdir() if p.name != ".git"]
    +        if existing:
    +            msg = f"{target} already exists and is not empty. Use [bold]--overwrite[/bold] to scaffold into it anyway."
    +            console.print(f"[red]Error:[/red] {msg}")
    +            raise typer.Exit(1)
    +
    +    target.mkdir(parents=True, exist_ok=True)
    +    created: list[tuple[str, str]] = []
    +
    +    kw: dict[str, Any] = {"target": target, "overwrite": overwrite, "created": created}
    +
    +    # flows/
    +    (target / "flows").mkdir(exist_ok=True)
    +    if example:
    +        from lfx.cli.create import create_command as _create
    +
    +        try:
    +            _create(
    +                "hello-world",
    +                template="hello-world",
    +                output_dir=target / "flows",
    +                overwrite=overwrite,
    +            )
    +            created.append(("flows/hello-world.json", "starter flow — edit or replace"))
    +        except (OSError, ValueError, TypeError, RuntimeError) as exc:
    +            # Don't let a template failure block the rest of init
    +            console.print(f"[yellow]Warning:[/yellow] Could not seed starter flow: {exc}")
    +    else:
    +        _write(target / "flows" / ".gitkeep", "", "versioned empty directory", **kw)
    +
    +    # tests/
    +    _write(target / "tests" / "__init__.py", "", "", **kw)
    +    _write(target / "tests" / "test_flows.py", _TEST_FLOWS_PY, "flow_runner example tests", **kw)
    +
    +    # .lfx/environments.yaml
    +    _write(
    +        target / ".lfx" / "environments.yaml",
    +        _ENVIRONMENTS_YAML,
    +        "edit with your instance URLs + API key env var names (safe to commit)",
    +        **kw,
    +    )
    +
    +    # .gitignore — keep langflow-environments.toml ignored for backward compat
    +    gitignore = target / ".gitignore"
    +    if gitignore.exists():
    +        existing_content = gitignore.read_text(encoding="utf-8")
    +        if "langflow-environments.toml" not in existing_content:
    +            gitignore.write_text(existing_content.rstrip() + "\n\n" + _GITIGNORE, encoding="utf-8")
    +            created.append((".gitignore", "appended credentials ignore rule"))
    +    else:
    +        _write(gitignore, _GITIGNORE, "ignores legacy credentials file", **kw)
    +
    +    # GitHub Actions CI workflows
    +    if github_actions:
    +        gha_src = _TEMPLATES_DIR / "github-actions"
    +        if gha_src.exists():
    +            for tmpl in sorted(gha_src.glob("*.yml")):
    +                dest = target / ".github" / "workflows" / tmpl.name
    +                _copy_template(tmpl, dest, "CI workflow", created, target=target, overwrite=overwrite)
    +        else:
    +            console.print("[yellow]Warning:[/yellow] GitHub Actions templates not found; skipping.")
    +
    +    # Generic shell CI scripts (always scaffolded — work with any CI system)
    +    shell_src = _TEMPLATES_DIR / "shell"
    +    if shell_src.exists():
    +        for tmpl in sorted(shell_src.glob("*.sh")):
    +            dest = target / "ci" / tmpl.name
    +            _copy_template(tmpl, dest, "generic CI script", created, target=target, overwrite=overwrite)
    +            dest.chmod(dest.stat().st_mode | 0o111)  # ensure executable bit
    +
    +    # Print the created-files tree
    +    _render_tree(target, created)
    +
    +    # Next-steps guide
    +    console.print()
    +    console.print("[bold green]✓ Project scaffolded.[/bold green]  Next steps:\n")
    +    console.print("  1. Edit [bold].lfx/environments.yaml[/bold] with your instance URL")
    +    console.print("  2. [bold]export LANGFLOW_LOCAL_API_KEY=<key>[/bold]   (Settings → API Keys)")
    +    if example:
    +        console.print("  3. [bold]lfx validate flows/hello-world.json[/bold]  (check the starter flow)")
    +        console.print("  4. [bold]lfx serve flows/hello-world.json[/bold]     (run it locally)")
    +        console.print("  5. [bold]lfx push --dir flows/ --env local[/bold]    (deploy to Langflow)")
    +    else:
    +        console.print("  3. [bold]lfx create my-flow --template hello-world[/bold]")
    +        console.print("  4. [bold]lfx push --dir flows/ --env local[/bold]")
    +    console.print(f"  {'6' if example else '5'}. [bold]pytest tests/ --langflow-env local[/bold]")
    +    console.print()
    
  • src/lfx/src/lfx/cli/login.py+215 0 added
    @@ -0,0 +1,215 @@
    +"""lfx login -- validate credentials against a remote Langflow instance.
    +
    +Tests whether the configured URL and API key are reachable and accepted,
    +then prints a summary with guidance on how to fix any problems.
    +
    +Usage examples
    +--------------
    +Validate a named environment::
    +
    +    lfx login --env staging
    +
    +Validate inline credentials::
    +
    +    lfx login --target https://langflow.example.com --api-key sk-abc123
    +
    +Validate whichever environment is the default in your config::
    +
    +    lfx login
    +"""
    +
    +from __future__ import annotations
    +
    +import os
    +from pathlib import Path
    +from typing import Any
    +
    +import typer
    +from rich.console import Console
    +from rich.panel import Panel
    +
    +from lfx.cli.common import load_sdk
    +
    +console = Console()
    +err_console = Console(stderr=True)
    +
    +_KEY_MASK = 8  # characters shown before "..."
    +
    +
    +# ---------------------------------------------------------------------------
    +# Helpers
    +# ---------------------------------------------------------------------------
    +
    +
    +def _mask_key(key: str) -> str:
    +    if len(key) <= _KEY_MASK:
    +        return "***"
    +    return key[:_KEY_MASK] + "..."
    +
    +
    +def _api_key_env_name(env_name: str, environments_file: str | None) -> str | None:
    +    """Return the ``api_key_env`` var name for *env_name* from the config file, or ``None``."""
    +    try:
    +        from lfx.config.environments import _find_config_file, _load_config
    +
    +        override = Path(environments_file) if environments_file else None
    +        config_path = _find_config_file(override)
    +        if config_path is None:
    +            return None
    +        _load_config(config_path)
    +
    +        # Re-read raw to get api_key_env
    +        from lfx.config.environments import _load_raw
    +
    +        raw = _load_raw(config_path)
    +        block = (raw.get("environments") or {}).get(env_name, {})
    +        return block.get("api_key_env") if isinstance(block, dict) else None
    +    except Exception:  # noqa: BLE001
    +        return None
    +
    +
    +def _probe_connection(client: Any, sdk: Any) -> tuple[bool, str, int]:
    +    """Attempt a lightweight API call. Return ``(ok, message, flow_count)``."""
    +    try:
    +        flows = client.list_flows(page=1, size=1)
    +        return True, "OK", len(flows)
    +    except sdk.LangflowAuthError:
    +        return False, "auth", 0
    +    except sdk.LangflowConnectionError as exc:
    +        return False, f"connection:{exc}", 0
    +    except sdk.LangflowHTTPError as exc:
    +        return False, f"http:{exc}", 0
    +    except Exception as exc:  # noqa: BLE001
    +        # A Pydantic ValidationError means the HTTP request completed and was
    +        # authenticated (auth is fine) but the SDK's schema doesn't match the
    +        # local Langflow version exactly.  Treat this as a successful probe.
    +        if "ValidationError" in type(exc).__qualname__:
    +            return True, "OK", 0
    +        return False, f"error:{exc}", 0
    +
    +
    +# ---------------------------------------------------------------------------
    +# CLI entry point
    +# ---------------------------------------------------------------------------
    +
    +
    +def login_command(
    +    *,
    +    env: str | None,
    +    environments_file: str | None,
    +    target: str | None,
    +    api_key: str | None,
    +) -> None:
    +    sdk = load_sdk("login")
    +
    +    from lfx.config import ConfigError, resolve_environment
    +
    +    try:
    +        env_cfg = resolve_environment(
    +            env,
    +            target=target,
    +            api_key=api_key,
    +            environments_file=environments_file,
    +        )
    +    except ConfigError as exc:
    +        err_console.print(f"[red]Configuration error:[/red] {exc}")
    +        raise typer.Exit(1) from exc
    +
    +    # ------------------------------------------------------------------
    +    # Key guidance (before probing)
    +    # ------------------------------------------------------------------
    +    key_env_name: str | None = None
    +    if env_cfg.api_key is None and env_cfg.name not in ("__inline__", "__env__"):
    +        key_env_name = _api_key_env_name(env_cfg.name, environments_file)
    +
    +    if env_cfg.api_key is None:
    +        warning_parts = ["[yellow]Warning:[/yellow] No API key configured."]
    +        if key_env_name:
    +            warning_parts.append(f"  Set [bold]export {key_env_name}=<your-key>[/bold] then retry.")
    +        else:
    +            warning_parts.append("  Add [bold]api_key_env: LANGFLOW_<ENV>_API_KEY[/bold] to your config,")
    +            warning_parts.append("  then set that environment variable to your API key.")
    +        for line in warning_parts:
    +            err_console.print(line)
    +
    +    # ------------------------------------------------------------------
    +    # Probe the connection
    +    # ------------------------------------------------------------------
    +    client = sdk.Client(base_url=env_cfg.url, api_key=env_cfg.api_key)
    +
    +    console.print(f"[dim]Connecting to[/dim] {env_cfg.url} …")
    +    ok, msg, _flow_count = _probe_connection(client, sdk)
    +
    +    if not ok:
    +        if msg == "auth":
    +            err_console.print()
    +            err_console.print("[red bold]✗ Authentication failed.[/red bold]")
    +            err_console.print(f"  URL:  {env_cfg.url}")
    +            if env_cfg.api_key:
    +                err_console.print(f"  Key:  {_mask_key(env_cfg.api_key)}")
    +            err_console.print()
    +            err_console.print("[bold]How to fix:[/bold]")
    +            if key_env_name:
    +                err_console.print("  1. Open Langflow → Settings → API Keys → Create a new key")
    +                err_console.print(f"  2. [bold]export {key_env_name}=<your-new-key>[/bold]")
    +            elif env_cfg.name not in ("__inline__", "__env__"):
    +                err_console.print("  1. Open Langflow → Settings → API Keys → Create a new key")
    +                err_console.print("  2. Pass [bold]--api-key <key>[/bold] or configure api_key_env in your YAML")
    +            else:
    +                err_console.print("  1. Open Langflow → Settings → API Keys → Create a new key")
    +                err_console.print("  2. Pass [bold]--api-key <key>[/bold]")
    +            raise typer.Exit(1)
    +
    +        if msg.startswith("connection:"):
    +            err_console.print()
    +            err_console.print("[red bold]✗ Cannot connect.[/red bold]")
    +            err_console.print(f"  URL: {env_cfg.url}")
    +            err_console.print()
    +            err_console.print("[bold]How to fix:[/bold]")
    +            err_console.print("  • Make sure your Langflow instance is running")
    +            err_console.print("  • Check the URL in your .lfx/environments.yaml")
    +            err_console.print("  • If running locally: [bold]langflow run[/bold] or [bold]lfx serve <flow.json>[/bold]")
    +            raise typer.Exit(1)
    +
    +        # Generic HTTP or other error
    +        err_console.print(f"\n[red bold]✗ Request failed:[/red bold] {msg.split(':', 1)[-1]}")
    +        raise typer.Exit(1)
    +
    +    # ------------------------------------------------------------------
    +    # Success
    +    # ------------------------------------------------------------------
    +    masked_key = _mask_key(env_cfg.api_key) if env_cfg.api_key else "[dim](none)[/dim]"
    +
    +    key_source = ""
    +    if key_env_name and env_cfg.api_key:
    +        key_source = f"  [dim]from env var {key_env_name}[/dim]"
    +    elif os.environ.get("LANGFLOW_API_KEY") == env_cfg.api_key and env_cfg.api_key:
    +        key_source = "  [dim]from LANGFLOW_API_KEY[/dim]"
    +
    +    env_label = env_cfg.name if env_cfg.name not in ("__inline__", "__env__") else "(inline)"
    +
    +    console.print()
    +    console.print(
    +        Panel.fit(
    +            f"[bold green]✓ Connected successfully![/bold green]\n\n"
    +            f"[bold]Environment:[/bold] {env_label}\n"
    +            f"[bold]URL:[/bold]         {env_cfg.url}\n"
    +            f"[bold]API key:[/bold]     {masked_key}{key_source}",
    +            title="[bold blue]lfx login[/bold blue]",
    +            border_style="green",
    +        )
    +    )
    +
    +    if not env_cfg.api_key:
    +        console.print()
    +        console.print("[dim]Note: connected without an API key (anonymous access).[/dim]")
    +        console.print("[dim]Some operations may fail. Add api_key_env to your config.[/dim]")
    +
    +    if env_cfg.name in ("__inline__", "__env__") and env_cfg.api_key:
    +        console.print()
    +        console.print("[dim]Tip: to avoid passing credentials each time, add to .lfx/environments.yaml:[/dim]")
    +        env_display = env or "myenv"
    +        console.print("[dim]  environments:[/dim]")
    +        console.print(f"[dim]    {env_display}:[/dim]")
    +        console.print(f"[dim]      url: {env_cfg.url}[/dim]")
    +        console.print(f"[dim]      api_key_env: LANGFLOW_{env_display.upper()}_API_KEY[/dim]")
    
  • src/lfx/src/lfx/cli/pull.py+252 0 added
    @@ -0,0 +1,252 @@
    +"""lfx pull -- fetch flows from a remote Langflow instance to local files.
    +
    +Downloads flows from a live Langflow instance, normalizes them for version
    +control, and writes them to a local directory.  Repeated pulls are safe:
    +existing files are overwritten with the latest remote state.
    +
    +Usage examples
    +--------------
    +Pull all flows from staging to ``flows/``::
    +
    +    lfx pull --env staging
    +
    +Pull all flows in a named project::
    +
    +    lfx pull --env staging --project "My RAG Pipeline"
    +
    +Pull a single flow by UUID::
    +
    +    lfx pull --env staging --flow-id <uuid>
    +
    +Pull to a custom directory::
    +
    +    lfx pull --env production --output-dir ./local-flows/
    +"""
    +
    +from __future__ import annotations
    +
    +from dataclasses import dataclass
    +from pathlib import Path
    +from typing import Any
    +from uuid import UUID
    +
    +import typer
    +from rich.console import Console
    +from rich.table import Table
    +
    +from lfx.cli.common import load_sdk, safe_filename
    +from lfx.log.logger import logger
    +
    +console = Console(stderr=True)
    +ok_console = Console()
    +
    +
    +# ---------------------------------------------------------------------------
    +# Result types
    +# ---------------------------------------------------------------------------
    +
    +
    +@dataclass
    +class PullResult:
    +    flow_id: UUID
    +    flow_name: str
    +    path: Path
    +    status: str  # "unchanged" | "updated" | "created" | "error"
    +    error: str | None = None
    +
    +    @property
    +    def ok(self) -> bool:
    +        return self.status != "error"
    +
    +
    +# ---------------------------------------------------------------------------
    +# Helpers
    +# ---------------------------------------------------------------------------
    +
    +
    +def _write_flow(
    +    flow_obj: Any,
    +    *,
    +    sdk: Any,
    +    dest_dir: Path,
    +    strip_secrets: bool,
    +    indent: int,
    +) -> PullResult:
    +    """Normalize and write a single flow to *dest_dir*."""
    +    flow_id = flow_obj.id
    +    flow_name = flow_obj.name
    +
    +    try:
    +        normalized = sdk.normalize_flow(
    +            flow_obj.model_dump(mode="json"),
    +            strip_volatile=True,
    +            strip_secrets=strip_secrets,
    +            sort_keys=True,
    +        )
    +        safe_name = safe_filename(flow_name)
    +        out_path = dest_dir / f"{safe_name}.json"
    +        new_content = sdk.flow_to_json(normalized, indent=indent)
    +        if out_path.exists() and out_path.read_text(encoding="utf-8") == new_content:
    +            status = "unchanged"
    +        else:
    +            status = "created" if not out_path.exists() else "updated"
    +            out_path.write_text(new_content, encoding="utf-8")
    +        return PullResult(flow_id=flow_id, flow_name=flow_name, path=out_path, status=status)
    +    except Exception as exc:  # noqa: BLE001
    +        logger.debug("Failed to write flow %s", flow_id, exc_info=True)
    +        dummy_path = dest_dir / f"{flow_id}.json"
    +        return PullResult(flow_id=flow_id, flow_name=flow_name, path=dummy_path, status="error", error=str(exc))
    +
    +
    +def _render_results(results: list[PullResult]) -> None:
    +    table = Table(show_header=True, header_style="bold")
    +    table.add_column("Name")
    +    table.add_column("ID")
    +    table.add_column("File")
    +    table.add_column("Status")
    +
    +    status_style = {
    +        "unchanged": ("dim", "UNCHANGED"),
    +        "updated": ("yellow", "UPDATED"),
    +        "created": ("green", "CREATED"),
    +        "error": ("red", "ERROR"),
    +    }
    +
    +    for r in results:
    +        color, label = status_style.get(r.status, ("white", r.status.upper()))
    +        if r.error:
    +            label += f": {r.error}"
    +        table.add_row(r.flow_name, str(r.flow_id), str(r.path), f"[{color}]{label}[/{color}]")
    +
    +    ok_console.print()
    +    ok_console.print(table)
    +
    +    errors = [r for r in results if not r.ok]
    +    n_changed = sum(1 for r in results if r.status in ("created", "updated"))
    +    n_unchanged = sum(1 for r in results if r.status == "unchanged")
    +    if errors:
    +        console.print(f"\n[red]{len(errors)} pull(s) failed.[/red]")
    +    else:
    +        parts = []
    +        if n_changed:
    +            parts.append(f"[green]{n_changed} updated[/green]")
    +        if n_unchanged:
    +            parts.append(f"[dim]{n_unchanged} unchanged[/dim]")
    +        ok_console.print("\n" + ", ".join(parts) + ".")
    +
    +
    +# ---------------------------------------------------------------------------
    +# CLI entry point
    +# ---------------------------------------------------------------------------
    +
    +
    +def pull_command(
    +    *,
    +    env: str | None,
    +    output_dir: str | None,
    +    flow_id: str | None,
    +    project: str | None,
    +    project_id: str | None,
    +    environments_file: str | None,
    +    target: str | None = None,
    +    api_key: str | None = None,
    +    strip_secrets: bool,
    +    indent: int,
    +) -> None:
    +    sdk = load_sdk("pull")
    +
    +    from lfx.config import ConfigError, resolve_environment
    +
    +    try:
    +        env_cfg = resolve_environment(
    +            env,
    +            target=target,
    +            api_key=api_key,
    +            environments_file=environments_file,
    +        )
    +    except ConfigError as exc:
    +        console.print(f"[red]Error:[/red] {exc}")
    +        raise typer.Exit(1) from exc
    +
    +    client = sdk.Client(base_url=env_cfg.url, api_key=env_cfg.api_key)
    +
    +    dest_dir = Path(output_dir) if output_dir else Path("flows")
    +    dest_dir.mkdir(parents=True, exist_ok=True)
    +
    +    results: list[PullResult] = []
    +
    +    # ---- Single flow by ID -----------------------------------------------
    +    if flow_id:
    +        try:
    +            flow_obj = client.get_flow(UUID(flow_id))
    +        except Exception as exc:
    +            console.print(f"[red]Error:[/red] Could not fetch flow {flow_id}: {exc}")
    +            raise typer.Exit(1) from exc
    +
    +        result = _write_flow(flow_obj, sdk=sdk, dest_dir=dest_dir, strip_secrets=strip_secrets, indent=indent)
    +        results.append(result)
    +        if result.ok:
    +            console.print(f"[green]Pulled[/green] {result.flow_name!r} → {result.path}")
    +        else:
    +            console.print(f"[red]Failed[/red] {result.flow_name!r}: {result.error}")
    +
    +    # ---- All flows in a named project ------------------------------------
    +    elif project or project_id:
    +        if project_id:
    +            try:
    +                proj = client.get_project(UUID(project_id))
    +            except Exception as exc:
    +                console.print(f"[red]Error:[/red] Could not fetch project {project_id}: {exc}")
    +                raise typer.Exit(1) from exc
    +        else:
    +            projects = client.list_projects()
    +            matched = [p for p in projects if p.name == project]
    +            if not matched:
    +                names = ", ".join(repr(p.name) for p in projects) or "(none)"
    +                console.print(f"[red]Error:[/red] Project {project!r} not found. Available: {names}")
    +                raise typer.Exit(1)
    +            try:
    +                proj = client.get_project(matched[0].id)
    +            except Exception as exc:
    +                console.print(f"[red]Error:[/red] Could not fetch project {project!r}: {exc}")
    +                raise typer.Exit(1) from exc
    +
    +        console.print(f"[dim]Pulling from project[/dim] {proj.name!r} (id={proj.id})")
    +
    +        for flow_obj in proj.flows:
    +            result = _write_flow(flow_obj, sdk=sdk, dest_dir=dest_dir, strip_secrets=strip_secrets, indent=indent)
    +            results.append(result)
    +            if result.status == "unchanged":
    +                console.print(f"[dim]Unchanged[/dim] {result.flow_name!r}")
    +            elif result.ok:
    +                console.print(f"[green]Pulled[/green] {result.flow_name!r} → {result.path}")
    +            else:
    +                console.print(f"[red]Failed[/red] {result.flow_name!r}: {result.error}")
    +
    +    # ---- All flows in the environment ------------------------------------
    +    else:
    +        console.print(f"[dim]Pulling all flows from[/dim] {env_cfg.url}")
    +        try:
    +            flows = client.list_flows(get_all=True, remove_example_flows=True)
    +        except Exception as exc:
    +            console.print(f"[red]Error:[/red] Could not list flows: {exc}")
    +            raise typer.Exit(1) from exc
    +
    +        if not flows:
    +            console.print("[yellow]Warning:[/yellow] No flows found on the remote instance.")
    +            return
    +
    +        for flow_obj in flows:
    +            result = _write_flow(flow_obj, sdk=sdk, dest_dir=dest_dir, strip_secrets=strip_secrets, indent=indent)
    +            results.append(result)
    +            if result.status == "unchanged":
    +                console.print(f"[dim]Unchanged[/dim] {result.flow_name!r}")
    +            elif result.ok:
    +                console.print(f"[green]Pulled[/green] {result.flow_name!r} → {result.path}")
    +            else:
    +                console.print(f"[red]Failed[/red] {result.flow_name!r}: {result.error}")
    +
    +    _render_results(results)
    +
    +    if any(not r.ok for r in results):
    +        raise typer.Exit(1)
    
  • src/lfx/src/lfx/cli/push.py+419 0 added
    @@ -0,0 +1,419 @@
    +"""lfx push -- push normalized flow JSON to a remote Langflow instance.
    +
    +Uses stable flow IDs for upsert (PUT /api/v1/flows/{id}), so repeated pushes
    +are idempotent: the first push creates the flow, subsequent ones update it in
    +place without changing its ID on the remote instance.
    +
    +Usage examples
    +--------------
    +Push a single flow to staging::
    +
    +    lfx push my_flow.json --env staging
    +
    +Push several flows at once::
    +
    +    lfx push flows/*.json --env staging
    +
    +Push all flows in a directory and place them in a named project::
    +
    +    lfx push --dir ./flows/ --env staging --project "My RAG Pipeline"
    +
    +Dry-run to see what would happen::
    +
    +    lfx push ./flows/ --env production --dry-run
    +"""
    +
    +from __future__ import annotations
    +
    +import json
    +from dataclasses import dataclass
    +from pathlib import Path
    +from typing import Any
    +from uuid import UUID
    +
    +import typer
    +from rich.console import Console
    +from rich.table import Table
    +
    +from lfx.cli.common import load_sdk
    +
    +console = Console(stderr=True)
    +ok_console = Console()
    +
    +
    +# ---------------------------------------------------------------------------
    +# Result types
    +# ---------------------------------------------------------------------------
    +
    +
    +@dataclass
    +class PushResult:
    +    path: Path
    +    flow_id: UUID
    +    flow_name: str
    +    status: str  # "created" | "updated" | "unchanged" | "error" | "dry-run"
    +    error: str | None = None
    +    flow_url: str | None = None
    +
    +    @property
    +    def ok(self) -> bool:
    +        return self.status in ("created", "updated", "unchanged", "dry-run")
    +
    +
    +# ---------------------------------------------------------------------------
    +# Helpers
    +# ---------------------------------------------------------------------------
    +
    +
    +def _load_flow_file(path: Path) -> dict[str, Any]:
    +    """Read and parse a flow JSON file; raise Exit on error."""
    +    try:
    +        return json.loads(path.read_text(encoding="utf-8"))
    +    except (OSError, json.JSONDecodeError) as exc:
    +        console.print(f"[red]Error:[/red] Cannot read {path}: {exc}")
    +        raise typer.Exit(1) from exc
    +
    +
    +def _extract_flow_id(flow: dict[str, Any], path: Path) -> UUID:
    +    """Extract and validate the flow's stable ID from the JSON."""
    +    raw_id = flow.get("id")
    +    if not raw_id:
    +        console.print(f"[red]Error:[/red] {path} has no 'id' field. Run [bold]lfx export[/bold] first.")
    +        raise typer.Exit(1)
    +    try:
    +        return UUID(str(raw_id))
    +    except ValueError:
    +        console.print(f"[red]Error:[/red] {path} has an invalid 'id': {raw_id!r}")
    +        raise typer.Exit(1)  # noqa: B904
    +
    +
    +def _flow_to_create(sdk: Any, flow: dict[str, Any], folder_id: UUID | None) -> Any:
    +    """Build a FlowCreate from a normalized flow dict."""
    +    return sdk.FlowCreate(
    +        name=flow.get("name", "Untitled"),
    +        description=flow.get("description"),
    +        data=flow.get("data"),
    +        is_component=flow.get("is_component", False),
    +        endpoint_name=flow.get("endpoint_name"),
    +        tags=flow.get("tags"),
    +        folder_id=folder_id or (UUID(flow["folder_id"]) if flow.get("folder_id") else None),
    +        icon=flow.get("icon"),
    +        icon_bg_color=flow.get("icon_bg_color"),
    +        locked=flow.get("locked", False),
    +        mcp_enabled=flow.get("mcp_enabled", False),
    +    )
    +
    +
    +def _upsert_single(
    +    client: Any,
    +    sdk: Any,
    +    path: Path,
    +    flow_id: UUID,
    +    flow_create: Any,
    +    *,
    +    dry_run: bool,
    +    flow_name: str,
    +    base_url: str,
    +    local_file_content: str | None = None,
    +    strip_secrets: bool = True,
    +) -> PushResult:
    +    flow_url = f"{base_url.rstrip('/')}/flow/{flow_id}"
    +
    +    if dry_run:
    +        return PushResult(path=path, flow_id=flow_id, flow_name=flow_name, status="dry-run", flow_url=flow_url)
    +
    +    # Compare normalized remote against local file to detect unchanged flows,
    +    # avoiding a spurious PUT when nothing has actually changed.
    +    # Import directly from serialization so this internal comparison is not
    +    # counted as a call to the public sdk.normalize_flow (keeps tests clean).
    +    if local_file_content is not None:
    +        try:
    +            # Use direct module imports (not sdk.*) so mock call-counts in tests
    +            # stay accurate and so the except clause uses a real exception class.
    +            from langflow_sdk.exceptions import LangflowNotFoundError
    +            from langflow_sdk.serialization import flow_to_json, normalize_flow
    +
    +            remote = client.get_flow(flow_id)
    +            remote_normalized = normalize_flow(
    +                remote.model_dump(mode="json"),
    +                strip_volatile=True,
    +                strip_secrets=strip_secrets,
    +                sort_keys=True,
    +            )
    +            if flow_to_json(remote_normalized) == local_file_content:
    +                return PushResult(
    +                    path=path,
    +                    flow_id=flow_id,
    +                    flow_name=flow_name,
    +                    status="unchanged",
    +                    flow_url=flow_url,
    +                )
    +        except LangflowNotFoundError:
    +            pass  # Flow doesn't exist yet — fall through to create it
    +        except Exception:  # noqa: BLE001
    +            import logging
    +
    +            logging.getLogger(__name__).debug("Remote comparison failed; proceeding with push", exc_info=True)
    +
    +    try:
    +        _, created = client.upsert_flow(flow_id, flow_create)
    +        status = "created" if created else "updated"
    +        return PushResult(path=path, flow_id=flow_id, flow_name=flow_name, status=status, flow_url=flow_url)
    +    except sdk.LangflowHTTPError as exc:
    +        return PushResult(
    +            path=path,
    +            flow_id=flow_id,
    +            flow_name=flow_name,
    +            status="error",
    +            error=str(exc),
    +            flow_url=flow_url,
    +        )
    +
    +
    +def _find_or_create_project(
    +    client: Any,
    +    sdk: Any,
    +    project_name: str,
    +    *,
    +    dry_run: bool,
    +) -> UUID | None:
    +    """Return the UUID of a project with *project_name*, creating if needed.
    +
    +    Returns ``None`` in dry-run mode (project may not exist yet).
    +    """
    +    projects = client.list_projects()
    +    for p in projects:
    +        if p.name == project_name:
    +            console.print(f"[dim]Project[/dim] {project_name!r} found (id={p.id})")
    +            return p.id
    +
    +    if dry_run:
    +        console.print(f"[dim]Project[/dim] {project_name!r} would be created (dry-run)")
    +        return None
    +
    +    project = client.create_project(sdk.ProjectCreate(name=project_name))
    +    console.print(f"[green]Created project[/green] {project_name!r} (id={project.id})")
    +    return project.id
    +
    +
    +def _find_project_root() -> Path | None:
    +    """Return the lfx project root (directory containing .lfx/), or None.
    +
    +    Only the ``.lfx`` marker is used — ``.git`` is intentionally excluded so
    +    that the containment check doesn't reject paths when running inside a
    +    larger monorepo or outside any lfx project.
    +    """
    +    cwd = Path.cwd()
    +    for directory in (cwd, *cwd.parents):
    +        if (directory / ".lfx").is_dir():
    +            return directory
    +        # Stop at a filesystem root
    +        if directory.parent == directory:
    +            break
    +    return None
    +
    +
    +def _check_path_containment(p: Path, root: Path | None) -> None:
    +    """Ensure *p* is inside *root*; skip check when no project root is found."""
    +    if root is None:
    +        return
    +    try:
    +        p.resolve().relative_to(root.resolve())
    +    except ValueError:
    +        console.print(
    +            f"[red]Error:[/red] Path {p} is outside the project root ({root}). "
    +            "Refusing to push files from outside the project."
    +        )
    +        raise typer.Exit(1)  # noqa: B904
    +
    +
    +def _collect_flow_files(sources: list[str], dir_path: str | None) -> list[Path]:
    +    """Resolve the set of flow JSON files to push.
    +
    +    When neither explicit file paths nor ``--dir`` are given, defaults to
    +    ``flows/`` — mirroring the behaviour of ``lfx pull``.
    +    """
    +    paths: list[Path] = []
    +    root = _find_project_root()
    +
    +    # Default to flows/ when nothing is specified, just like lfx pull does.
    +    effective_dir = dir_path or (None if sources else "flows")
    +
    +    if effective_dir:
    +        d = Path(effective_dir)
    +        _check_path_containment(d, root)
    +        if not d.is_dir():
    +            console.print(f"[red]Error:[/red] Directory not found: {d}")
    +            raise typer.Exit(1)
    +        paths.extend(sorted(d.glob("*.json")))
    +        if not paths:
    +            console.print(f"[yellow]Warning:[/yellow] No *.json files found in {d}")
    +
    +    for s in sources:
    +        p = Path(s)
    +        _check_path_containment(p, root)
    +        if not p.exists():
    +            console.print(f"[red]Error:[/red] File not found: {p}")
    +            raise typer.Exit(1)
    +        if p.is_dir():
    +            dir_jsons = sorted(p.glob("*.json"))
    +            if not dir_jsons:
    +                console.print(f"[yellow]Warning:[/yellow] No *.json files found in {p}")
    +            paths.extend(dir_jsons)
    +        else:
    +            paths.append(p)
    +
    +    return paths
    +
    +
    +def _render_results(results: list[PushResult], *, dry_run: bool) -> None:
    +    table = Table(show_header=True, header_style="bold")
    +    table.add_column("File")
    +    table.add_column("Name")
    +    table.add_column("ID")
    +    table.add_column("Status")
    +    table.add_column("URL")
    +
    +    status_colors = {
    +        "created": "green",
    +        "updated": "cyan",
    +        "unchanged": "dim",
    +        "dry-run": "yellow",
    +        "error": "red",
    +    }
    +
    +    for r in results:
    +        color = status_colors.get(r.status, "white")
    +        label = r.status.upper() + (f": {r.error}" if r.error else "")
    +        url_cell = f"[blue]{r.flow_url}[/blue]" if r.flow_url and r.ok else (r.flow_url or "")
    +        table.add_row(
    +            str(r.path),
    +            r.flow_name,
    +            str(r.flow_id),
    +            f"[{color}]{label}[/{color}]",
    +            url_cell,
    +        )
    +
    +    ok_console.print()
    +    ok_console.print(table)
    +
    +    errors = [r for r in results if not r.ok]
    +    if errors:
    +        console.print(f"\n[red]{len(errors)} push(es) failed.[/red]")
    +    elif dry_run:
    +        ok_console.print(f"\n[yellow]{len(results)} flow(s) would be pushed (dry-run).[/yellow]")
    +    else:
    +        created = sum(1 for r in results if r.status == "created")
    +        updated = sum(1 for r in results if r.status == "updated")
    +        unchanged = sum(1 for r in results if r.status == "unchanged")
    +        parts = []
    +        if created:
    +            parts.append(f"[green]{created} created[/green]")
    +        if updated:
    +            parts.append(f"[cyan]{updated} updated[/cyan]")
    +        if unchanged:
    +            parts.append(f"[dim]{unchanged} unchanged[/dim]")
    +        ok_console.print("\n" + ", ".join(parts) + ".")
    +
    +
    +# ---------------------------------------------------------------------------
    +# CLI entry point
    +# ---------------------------------------------------------------------------
    +
    +
    +def push_command(
    +    flow_paths: list[str],
    +    *,
    +    env: str | None,
    +    dir_path: str | None,
    +    project: str | None,
    +    project_id: str | None,
    +    environments_file: str | None,
    +    target: str | None = None,
    +    api_key: str | None = None,
    +    dry_run: bool,
    +    normalize: bool,
    +    strip_secrets: bool,
    +) -> None:
    +    sdk = load_sdk("push")
    +
    +    from lfx.config import ConfigError, resolve_environment
    +
    +    try:
    +        env_cfg = resolve_environment(
    +            env,
    +            target=target,
    +            api_key=api_key,
    +            environments_file=environments_file,
    +        )
    +    except ConfigError as exc:
    +        console.print(f"[red]Error:[/red] {exc}")
    +        raise typer.Exit(1) from exc
    +
    +    client = sdk.Client(base_url=env_cfg.url, api_key=env_cfg.api_key)
    +
    +    paths = _collect_flow_files(flow_paths, dir_path)
    +    if not paths:
    +        console.print(
    +            "[red]Error:[/red] No *.json flow files found. "
    +            "Run [bold]lfx pull[/bold] first, or pass explicit file paths."
    +        )
    +        raise typer.Exit(1)
    +
    +    # Resolve target project folder_id
    +    target_folder_id: UUID | None = None
    +    if project_id:
    +        target_folder_id = UUID(project_id)
    +    elif project:
    +        target_folder_id = _find_or_create_project(client, sdk, project, dry_run=dry_run)
    +
    +    results: list[PushResult] = []
    +
    +    for path in paths:
    +        raw_flow = _load_flow_file(path)
    +
    +        if normalize:
    +            raw_flow = sdk.normalize_flow(
    +                raw_flow,
    +                strip_volatile=True,
    +                strip_secrets=strip_secrets,
    +                sort_keys=True,
    +            )
    +
    +        flow_id = _extract_flow_id(raw_flow, path)
    +        flow_name = raw_flow.get("name", path.stem)
    +        flow_create = _flow_to_create(sdk, raw_flow, target_folder_id)
    +        # Capture normalized content now so _upsert_single can compare against remote.
    +        local_file_content = sdk.flow_to_json(raw_flow) if normalize else None
    +
    +        result = _upsert_single(
    +            client,
    +            sdk,
    +            path,
    +            flow_id,
    +            flow_create,
    +            dry_run=dry_run,
    +            flow_name=flow_name,
    +            base_url=env_cfg.url,
    +            local_file_content=local_file_content,
    +            strip_secrets=strip_secrets,
    +        )
    +        results.append(result)
    +
    +        if dry_run:
    +            console.print(f"[yellow]DRY-RUN[/yellow] Would push {flow_name!r} ({flow_id})")
    +        elif result.status == "unchanged":
    +            console.print(f"[dim]Unchanged[/dim] {flow_name!r}")
    +        elif result.status == "created":
    +            url_hint = f"  [dim]{result.flow_url}[/dim]" if result.flow_url else ""
    +            console.print(f"[green]Created[/green]  {flow_name!r} ({flow_id}){url_hint}")
    +        elif result.status == "updated":
    +            url_hint = f"  [dim]{result.flow_url}[/dim]" if result.flow_url else ""
    +            console.print(f"[cyan]Updated[/cyan]  {flow_name!r} ({flow_id}){url_hint}")
    +        else:
    +            console.print(f"[red]Failed[/red]   {flow_name!r} ({flow_id}): {result.error}")
    +
    +    _render_results(results, dry_run=dry_run)
    +
    +    if any(not r.ok for r in results):
    +        raise typer.Exit(1)
    
  • src/lfx/src/lfx/cli/_remote_commands.py+322 0 added
    @@ -0,0 +1,322 @@
    +"""Remote commands: status, push, pull, export."""
    +
    +import typer
    +
    +
    +def register(app: typer.Typer) -> None:
    +    """Register remote-stage commands on *app*."""
    +
    +    @app.command(
    +        name="status", help="Compare local flow files against a remote Langflow instance", rich_help_panel="Remote"
    +    )
    +    def status_command_wrapper(
    +        flow_paths: list[str] = typer.Argument(
    +            default=None,
    +            help="Specific flow JSON file(s) to check. Omit to scan --dir (default: flows/).",
    +        ),
    +        env: str | None = typer.Option(
    +            None,
    +            "--env",
    +            "-e",
    +            help="Environment name from .lfx/environments.yaml. Uses [defaults] if omitted.",
    +        ),
    +        dir_path: str | None = typer.Option(
    +            None,
    +            "--dir",
    +            "-d",
    +            help="Directory of flow JSON files to compare (default: flows/ in cwd).",
    +        ),
    +        environments_file: str | None = typer.Option(
    +            None,
    +            "--environments-file",
    +            help="Path to environments config file (.yaml or .toml; overrides default lookup).",
    +        ),
    +        target: str | None = typer.Option(
    +            None,
    +            "--target",
    +            help="Langflow instance URL (inline override — skips config file lookup).",
    +        ),
    +        api_key: str | None = typer.Option(
    +            None,
    +            "--api-key",
    +            help="API key for the Langflow instance (used with --target or to override config).",
    +        ),
    +        show_remote_only: bool = typer.Option(
    +            False,
    +            "--remote-only",
    +            help="Also list flows that exist on the server but have no local file.",
    +        ),
    +    ) -> None:
    +        """Show whether local flow files are in sync, ahead, or missing vs the remote instance."""
    +        from lfx.cli.status import status_command
    +
    +        status_command(
    +            dir_path=dir_path,
    +            flow_paths=flow_paths or [],
    +            env=env,
    +            environments_file=environments_file,
    +            target=target,
    +            api_key=api_key,
    +            show_remote_only=show_remote_only,
    +        )
    +
    +    @app.command(
    +        name="push",
    +        help="Push flow JSON to a remote Langflow instance (upsert by stable ID)",
    +        rich_help_panel="Remote",
    +    )
    +    def push_command_wrapper(
    +        flow_paths: list[str] = typer.Argument(
    +            default=None,
    +            help="Path(s) to flow JSON file(s) to push. Use --dir for a whole directory.",
    +        ),
    +        env: str | None = typer.Option(
    +            None,
    +            "--env",
    +            "-e",
    +            help="Environment name from .lfx/environments.yaml. Use --target for inline configuration.",
    +        ),
    +        dir_path: str | None = typer.Option(
    +            None,
    +            "--dir",
    +            "-d",
    +            help="Directory of flow JSON files to push (pushes all *.json files). Defaults to flows/.",
    +        ),
    +        project: str | None = typer.Option(
    +            None,
    +            "--project",
    +            "-p",
    +            help="Target project name on the remote instance. Created if it does not exist.",
    +        ),
    +        project_id: str | None = typer.Option(
    +            None,
    +            "--project-id",
    +            help="Target project UUID (alternative to --project).",
    +        ),
    +        environments_file: str | None = typer.Option(
    +            None,
    +            "--environments-file",
    +            help="Path to environments config file (.yaml or .toml; overrides default lookup).",
    +        ),
    +        target: str | None = typer.Option(
    +            None,
    +            "--target",
    +            help="Langflow instance URL (inline override — skips config file lookup).",
    +        ),
    +        api_key: str | None = typer.Option(
    +            None,
    +            "--api-key",
    +            help="API key for the Langflow instance (used with --target or to override config).",
    +        ),
    +        dry_run: bool = typer.Option(
    +            False,
    +            "--dry-run",
    +            help="Show what would be pushed without making any changes.",
    +        ),
    +        normalize: bool = typer.Option(
    +            True,
    +            "--normalize/--no-normalize",
    +            help="Normalize (strip volatile fields, sort keys) before pushing.",
    +        ),
    +        strip_secrets: bool = typer.Option(
    +            True,
    +            "--strip-secrets/--keep-secrets",
    +            help="Clear password/load_from_db field values before pushing.",
    +        ),
    +    ) -> None:
    +        """Push Langflow flows to a remote instance using stable IDs for upsert (lazy-loaded)."""
    +        from lfx.cli.push import push_command
    +
    +        push_command(
    +            flow_paths=flow_paths or [],
    +            env=env,
    +            dir_path=dir_path,
    +            project=project,
    +            project_id=project_id,
    +            environments_file=environments_file,
    +            target=target,
    +            api_key=api_key,
    +            dry_run=dry_run,
    +            normalize=normalize,
    +            strip_secrets=strip_secrets,
    +        )
    +
    +    @app.command(
    +        name="pull", help="Pull flows from a remote Langflow instance to local files", rich_help_panel="Remote"
    +    )
    +    def pull_command_wrapper(
    +        env: str | None = typer.Option(
    +            None,
    +            "--env",
    +            "-e",
    +            help="Environment name from .lfx/environments.yaml. Uses [defaults] if omitted.",
    +        ),
    +        output_dir: str | None = typer.Option(
    +            None,
    +            "--output-dir",
    +            "-d",
    +            help="Directory to write pulled flows into (default: flows/).",
    +        ),
    +        flow_id: str | None = typer.Option(
    +            None,
    +            "--flow-id",
    +            help="Pull a single flow by UUID.",
    +        ),
    +        project: str | None = typer.Option(
    +            None,
    +            "--project",
    +            "-p",
    +            help="Pull all flows in a named project.",
    +        ),
    +        project_id: str | None = typer.Option(
    +            None,
    +            "--project-id",
    +            help="Pull all flows in a project by UUID.",
    +        ),
    +        environments_file: str | None = typer.Option(
    +            None,
    +            "--environments-file",
    +            help="Path to environments config file (.yaml or .toml; overrides default lookup).",
    +        ),
    +        target: str | None = typer.Option(
    +            None,
    +            "--target",
    +            help="Langflow instance URL (inline override — skips config file lookup).",
    +        ),
    +        api_key: str | None = typer.Option(
    +            None,
    +            "--api-key",
    +            help="API key for the Langflow instance (used with --target or to override config).",
    +        ),
    +        strip_secrets: bool = typer.Option(
    +            True,
    +            "--strip-secrets/--keep-secrets",
    +            help="Clear password/load_from_db field values (default: strip).",
    +        ),
    +        indent: int = typer.Option(
    +            2,
    +            "--indent",
    +            help="JSON indentation level.",
    +        ),
    +    ) -> None:
    +        """Pull and normalize flows from a remote Langflow instance (lazy-loaded)."""
    +        from lfx.cli.pull import pull_command
    +
    +        pull_command(
    +            env=env,
    +            output_dir=output_dir,
    +            flow_id=flow_id,
    +            project=project,
    +            project_id=project_id,
    +            environments_file=environments_file,
    +            target=target,
    +            api_key=api_key,
    +            strip_secrets=strip_secrets,
    +            indent=indent,
    +        )
    +
    +    @app.command(
    +        name="export",
    +        help="Normalize flow JSON for git (local) or pull from a remote instance",
    +        rich_help_panel="Remote",
    +    )
    +    def export_command_wrapper(
    +        flow_paths: list[str] = typer.Argument(
    +            default=None,
    +            help="Path(s) to local flow JSON file(s) to normalize. Omit when using --flow-id or --project-id.",
    +        ),
    +        output: str | None = typer.Option(
    +            None,
    +            "--output",
    +            "-o",
    +            help="Output file path (single-file local mode only).",
    +        ),
    +        output_dir: str | None = typer.Option(
    +            None,
    +            "--output-dir",
    +            "-d",
    +            help="Directory to write exported flows into (remote mode or multi-file).",
    +        ),
    +        env: str | None = typer.Option(
    +            None,
    +            "--env",
    +            "-e",
    +            help="Environment name from .lfx/environments.yaml (required for remote mode unless --target is used).",
    +        ),
    +        flow_id: str | None = typer.Option(
    +            None,
    +            "--flow-id",
    +            help="Pull and export a single flow by UUID from the remote instance.",
    +        ),
    +        project_id: str | None = typer.Option(
    +            None,
    +            "--project-id",
    +            help="Pull and export all flows in a project by UUID from the remote instance.",
    +        ),
    +        environments_file: str | None = typer.Option(
    +            None,
    +            "--environments-file",
    +            help="Path to environments config file (.yaml or .toml; overrides default lookup).",
    +        ),
    +        target: str | None = typer.Option(
    +            None,
    +            "--target",
    +            help="Langflow instance URL (inline override — skips config file lookup).",
    +        ),
    +        api_key: str | None = typer.Option(
    +            None,
    +            "--api-key",
    +            help="API key for the Langflow instance (used with --target or to override config).",
    +        ),
    +        in_place: bool = typer.Option(
    +            False,
    +            "--in-place",
    +            "-i",
    +            help="Overwrite each input file with its normalized version.",
    +        ),
    +        strip_volatile: bool = typer.Option(
    +            True,
    +            "--strip-volatile/--keep-volatile",
    +            help="Strip instance-specific fields (updated_at, user_id, folder_id).",
    +        ),
    +        strip_secrets: bool = typer.Option(
    +            True,
    +            "--strip-secrets/--keep-secrets",
    +            help="Clear values of password/load_from_db template fields.",
    +        ),
    +        code_as_lines: bool = typer.Option(
    +            False,
    +            "--code-as-lines",
    +            help="Convert code-type template field values to a list of lines.",
    +        ),
    +        strip_node_volatile: bool = typer.Option(
    +            True,
    +            "--strip-node-volatile/--keep-node-volatile",
    +            help="Strip transient node keys (positionAbsolute, dragging, selected).",
    +        ),
    +        indent: int = typer.Option(
    +            2,
    +            "--indent",
    +            help="JSON indentation level.",
    +        ),
    +    ) -> None:
    +        """Export and normalize Langflow flow JSON for version control (lazy-loaded)."""
    +        from lfx.cli.export import export_command
    +
    +        export_command(
    +            flow_paths=flow_paths or [],
    +            output=output,
    +            output_dir=output_dir,
    +            env=env,
    +            flow_id=flow_id,
    +            project_id=project_id,
    +            environments_file=environments_file,
    +            target=target,
    +            api_key=api_key,
    +            in_place=in_place,
    +            strip_volatile=strip_volatile,
    +            strip_secrets=strip_secrets,
    +            code_as_lines=code_as_lines,
    +            strip_node_volatile=strip_node_volatile,
    +            indent=indent,
    +        )
    
  • src/lfx/src/lfx/cli/_running_commands.py+143 0 added
    @@ -0,0 +1,143 @@
    +"""Running commands: run, serve."""
    +
    +import typer
    +
    +
    +def register(app: typer.Typer) -> None:
    +    """Register running-stage commands on *app*."""
    +
    +    @app.command(name="run", help="Run a flow directly", no_args_is_help=True, rich_help_panel="Running")
    +    def run_command_wrapper(
    +        script_path: str | None = typer.Argument(
    +            None, help="Path to the Python script (.py) or JSON flow (.json) containing a graph"
    +        ),
    +        input_value: str | None = typer.Argument(None, help="Input value to pass to the graph"),
    +        input_value_option: str | None = typer.Option(
    +            None,
    +            "--input-value",
    +            help="Input value to pass to the graph (alternative to positional argument)",
    +        ),
    +        output_format: str = typer.Option(
    +            "json",
    +            "--format",
    +            "-f",
    +            help="Output format: json, text, message, or result",
    +        ),
    +        flow_json: str | None = typer.Option(
    +            None,
    +            "--flow-json",
    +            help="Inline JSON flow content as a string (alternative to script_path)",
    +        ),
    +        *,
    +        stdin: bool = typer.Option(
    +            default=False,
    +            show_default=True,
    +            help="Read JSON flow content from stdin (alternative to script_path)",
    +        ),
    +        check_variables: bool = typer.Option(
    +            default=True,
    +            show_default=True,
    +            help="Check global variables for environment compatibility",
    +        ),
    +        verbose: bool = typer.Option(
    +            False,
    +            "-v",
    +            "--verbose",
    +            help="Show basic progress information",
    +        ),
    +        verbose_detailed: bool = typer.Option(
    +            False,
    +            "-vv",
    +            help="Show detailed progress and debug information",
    +        ),
    +        verbose_full: bool = typer.Option(
    +            False,
    +            "-vvv",
    +            help="Show full debugging output including component logs",
    +        ),
    +        timing: bool = typer.Option(
    +            default=False,
    +            show_default=True,
    +            help="Include detailed timing information in output",
    +        ),
    +    ) -> None:
    +        """Run a flow directly (lazy-loaded)."""
    +        from pathlib import Path
    +
    +        from lfx.cli.run import run
    +
    +        # Convert script_path string to Path if provided
    +        script_path_obj = Path(script_path) if script_path else None
    +
    +        run(
    +            script_path=script_path_obj,
    +            input_value=input_value,
    +            input_value_option=input_value_option,
    +            output_format=output_format,
    +            flow_json=flow_json,
    +            stdin=stdin,
    +            check_variables=check_variables,
    +            verbose=verbose,
    +            verbose_detailed=verbose_detailed,
    +            verbose_full=verbose_full,
    +            timing=timing,
    +        )
    +
    +    @app.command(name="serve", help="Serve a flow as an API", no_args_is_help=True, rich_help_panel="Running")
    +    def serve_command_wrapper(
    +        script_path: str | None = typer.Argument(
    +            None,
    +            help=(
    +                "Path to JSON flow (.json) or Python script (.py) file or stdin input. "
    +                "Optional when using --flow-json or --stdin."
    +            ),
    +        ),
    +        host: str = typer.Option("127.0.0.1", "--host", "-h", help="Host to bind the server to"),
    +        port: int = typer.Option(8000, "--port", "-p", help="Port to bind the server to"),
    +        verbose: bool = typer.Option(False, "--verbose", "-v", help="Show diagnostic output and execution details"),
    +        env_file: str | None = typer.Option(
    +            None,
    +            "--env-file",
    +            help="Path to the .env file containing environment variables",
    +        ),
    +        log_level: str = typer.Option(
    +            "warning",
    +            "--log-level",
    +            help="Logging level. One of: debug, info, warning, error, critical",
    +        ),
    +        flow_json: str | None = typer.Option(
    +            None,
    +            "--flow-json",
    +            help="Inline JSON flow content as a string (alternative to script_path)",
    +        ),
    +        *,
    +        stdin: bool = typer.Option(
    +            False,
    +            "--stdin",
    +            help="Read JSON flow content from stdin (alternative to script_path)",
    +        ),
    +        check_variables: bool = typer.Option(
    +            True,
    +            "--check-variables/--no-check-variables",
    +            help="Check global variables for environment compatibility",
    +        ),
    +    ) -> None:
    +        """Serve LFX flows as a web API (lazy-loaded)."""
    +        from pathlib import Path
    +
    +        from lfx.cli.commands import serve_command
    +
    +        # Convert env_file string to Path if provided
    +        env_file_path = Path(env_file) if env_file else None
    +
    +        serve_command(
    +            script_path=script_path,
    +            host=host,
    +            port=port,
    +            verbose=verbose,
    +            env_file=env_file_path,
    +            log_level=log_level,
    +            flow_json=flow_json,
    +            stdin=stdin,
    +            check_variables=check_variables,
    +        )
    
  • src/lfx/src/lfx/cli/_setup_commands.py+75 0 added
    @@ -0,0 +1,75 @@
    +"""Setup commands: init, login."""
    +
    +import typer
    +
    +
    +def register(app: typer.Typer) -> None:
    +    """Register setup-stage commands on *app*."""
    +
    +    @app.command(name="init", help="Scaffold a new Flow DevOps project", rich_help_panel="Setup")
    +    def init_command_wrapper(
    +        project_dir: str = typer.Argument(
    +            ".",
    +            help="Directory to scaffold (created if it does not exist; default: current directory).",
    +        ),
    +        github_actions: bool = typer.Option(
    +            True,
    +            "--github-actions/--no-github-actions",
    +            help="Copy GitHub Actions workflow templates into .github/workflows/.",
    +        ),
    +        overwrite: bool = typer.Option(
    +            False,
    +            "--overwrite",
    +            help="Write files even if the target directory already contains files.",
    +        ),
    +        example: bool = typer.Option(
    +            True,
    +            "--example/--no-example",
    +            help="Seed flows/ with a hello-world.json starter flow (default: true).",
    +        ),
    +    ) -> None:
    +        """Scaffold a Flow DevOps project: flows/, tests/, environments config, and CI templates."""
    +        from pathlib import Path
    +
    +        from lfx.cli.init import init_command
    +
    +        init_command(
    +            project_dir=Path(project_dir),
    +            github_actions=github_actions,
    +            overwrite=overwrite,
    +            example=example,
    +        )
    +
    +    @app.command(name="login", help="Validate credentials against a remote Langflow instance", rich_help_panel="Setup")
    +    def login_command_wrapper(
    +        env: str | None = typer.Option(
    +            None,
    +            "--env",
    +            "-e",
    +            help="Environment name from .lfx/environments.yaml. Uses [defaults] if omitted.",
    +        ),
    +        environments_file: str | None = typer.Option(
    +            None,
    +            "--environments-file",
    +            help="Path to environments config file (.yaml or .toml; overrides default lookup).",
    +        ),
    +        target: str | None = typer.Option(
    +            None,
    +            "--target",
    +            help="Langflow instance URL (inline override — skips config file lookup).",
    +        ),
    +        api_key: str | None = typer.Option(
    +            None,
    +            "--api-key",
    +            help="API key for the Langflow instance (used with --target or to override config).",
    +        ),
    +    ) -> None:
    +        """Test connectivity and authentication for a Langflow environment (lazy-loaded)."""
    +        from lfx.cli.login import login_command
    +
    +        login_command(
    +            env=env,
    +            environments_file=environments_file,
    +            target=target,
    +            api_key=api_key,
    +        )
    
  • src/lfx/src/lfx/cli/status.py+288 0 added
    @@ -0,0 +1,288 @@
    +"""lfx status -- compare local flow files against a remote Langflow instance.
    +
    +Shows, for each local flow JSON, whether it is in sync with the remote,
    +ahead (locally modified), brand new (not yet pushed), or missing entirely.
    +Optionally surfaces flows that exist on the server but have no local file.
    +
    +Examples::
    +
    +    lfx status                          # scans flows/ in cwd, uses [defaults] env
    +    lfx status --env staging            # compare against staging
    +    lfx status --dir ./my-flows/        # specify a custom flows directory
    +    lfx status --env prod --remote-only # also show server flows not tracked locally
    +"""
    +
    +from __future__ import annotations
    +
    +import contextlib
    +import hashlib
    +import json
    +from dataclasses import dataclass, field
    +from datetime import datetime, timezone
    +from pathlib import Path
    +from uuid import UUID
    +
    +import typer
    +from rich import box
    +from rich.console import Console
    +from rich.table import Table
    +
    +from lfx.cli.common import load_sdk
    +
    +console = Console()
    +
    +# ---------------------------------------------------------------------------
    +# Data model
    +# ---------------------------------------------------------------------------
    +
    +_STATUS_SYNCED = "synced"
    +_STATUS_AHEAD = "ahead"
    +_STATUS_BEHIND = "behind"
    +_STATUS_NEW = "new"
    +_STATUS_REMOTE_ONLY = "remote-only"
    +_STATUS_NO_ID = "no-id"
    +_STATUS_ERROR = "error"
    +
    +_STATUS_STYLE: dict[str, tuple[str, str, str]] = {
    +    _STATUS_SYNCED: ("✓", "green", "synced"),
    +    _STATUS_AHEAD: ("↑", "yellow", "ahead"),
    +    _STATUS_BEHIND: ("↓", "yellow", "behind"),
    +    _STATUS_NEW: ("+", "cyan", "new"),
    +    _STATUS_REMOTE_ONLY: ("○", "blue", "remote only"),
    +    _STATUS_NO_ID: ("?", "dim", "no id"),
    +    _STATUS_ERROR: ("✗", "red", "error"),
    +}
    +
    +
    +@dataclass
    +class FlowStatus:
    +    name: str
    +    status: str
    +    path: Path | None = None
    +    flow_id: UUID | None = None
    +    detail: str = field(default="")
    +
    +
    +# ---------------------------------------------------------------------------
    +# Helpers
    +# ---------------------------------------------------------------------------
    +
    +
    +def _load_sdk() -> tuple[object, object, object, type]:
    +    """Return (normalize_flow, flow_to_json, Client, LangflowNotFoundError) from langflow_sdk."""
    +    sdk = load_sdk("status")
    +    from langflow_sdk.exceptions import LangflowNotFoundError
    +    from langflow_sdk.serialization import flow_to_json, normalize_flow
    +
    +    return normalize_flow, flow_to_json, sdk.Client, LangflowNotFoundError
    +
    +
    +def _flow_hash(flow_dict: dict, normalize_flow: object, flow_to_json: object) -> str:
    +    """Return a short deterministic hash of the normalized flow content."""
    +    normalized = normalize_flow(flow_dict)  # type: ignore[operator]
    +    content = flow_to_json(normalized)  # type: ignore[operator]
    +    return hashlib.sha256(content.encode()).hexdigest()[:12]
    +
    +
    +def _collect_files(dir_path: str | None, flow_paths: list[str]) -> list[Path]:
    +    """Resolve the set of local flow files to examine."""
    +    files: list[Path] = []
    +
    +    if dir_path:
    +        d = Path(dir_path)
    +        if not d.is_dir():
    +            msg = f"Directory not found: {d}"
    +            console.print(f"[red]Error:[/red] {msg}")
    +            raise typer.Exit(1)
    +        files.extend(sorted(d.glob("*.json")))
    +    elif flow_paths:
    +        files.extend(Path(p) for p in flow_paths)
    +    else:
    +        # Default: flows/ in cwd
    +        default = Path.cwd() / "flows"
    +        if default.is_dir():
    +            files.extend(sorted(default.glob("*.json")))
    +
    +    return files
    +
    +
    +def _render_table(statuses: list[FlowStatus], env_label: str) -> None:
    +    table = Table(
    +        box=box.SIMPLE,
    +        show_header=True,
    +        header_style="bold",
    +        title=f"Flow status vs [bold]{env_label}[/bold]",
    +        title_justify="left",
    +    )
    +    table.add_column("Flow", min_width=24)
    +    table.add_column("ID", style="dim", min_width=10)
    +    table.add_column("File", style="dim")
    +    table.add_column("Status", min_width=14)
    +
    +    for s in statuses:
    +        icon, color, label = _STATUS_STYLE.get(s.status, ("?", "dim", s.status))
    +        detail_str = f"  [dim]({s.detail})[/dim]" if s.detail else ""
    +        id_str = str(s.flow_id)[:8] + "…" if s.flow_id else "—"
    +        file_str = s.path.name if s.path else "—"
    +        table.add_row(
    +            s.name,
    +            id_str,
    +            file_str,
    +            f"[{color}]{icon} {label}[/{color}]{detail_str}",
    +        )
    +
    +    console.print(table)
    +
    +    # Summary line
    +    counts: dict[str, int] = {}
    +    for s in statuses:
    +        counts[s.status] = counts.get(s.status, 0) + 1
    +
    +    parts = []
    +    for status, (_, color, label) in _STATUS_STYLE.items():
    +        if counts.get(status):
    +            parts.append(f"[{color}]{counts[status]} {label}[/{color}]")
    +
    +    if parts:
    +        console.print("  " + "  ·  ".join(parts))
    +        console.print()
    +
    +
    +# ---------------------------------------------------------------------------
    +# Command
    +# ---------------------------------------------------------------------------
    +
    +
    +def status_command(
    +    dir_path: str | None,
    +    flow_paths: list[str],
    +    env: str | None,
    +    environments_file: str | None,
    +    *,
    +    target: str | None = None,
    +    api_key: str | None = None,
    +    show_remote_only: bool,
    +) -> None:
    +    """Compare local flow files against the remote instance and render a status table."""
    +    normalize_flow, flow_to_json, client_cls, not_found_error = _load_sdk()
    +
    +    from lfx.config import ConfigError, resolve_environment
    +
    +    try:
    +        env_cfg = resolve_environment(
    +            env,
    +            target=target,
    +            api_key=api_key,
    +            environments_file=environments_file,
    +        )
    +    except ConfigError as exc:
    +        console.print(f"[red]Error:[/red] {exc}")
    +        raise typer.Exit(1) from exc
    +
    +    try:
    +        client = client_cls(base_url=env_cfg.url, api_key=env_cfg.api_key)
    +    except Exception as exc:
    +        console.print(f"[red]Error:[/red] Could not create client for {env_cfg.url!r}: {exc}")
    +        raise typer.Exit(1) from exc
    +
    +    try:
    +        env_label = env_cfg.name
    +        local_files = _collect_files(dir_path, flow_paths)
    +
    +        if not local_files and not show_remote_only:
    +            console.print("[yellow]No flow files found.[/yellow] Use [bold]--dir[/bold] to specify a directory.")
    +            raise typer.Exit(0)
    +
    +        statuses: list[FlowStatus] = []
    +        seen_ids: set[UUID] = set()
    +
    +        # ------------------------------------------------------------------ #
    +        # Check each local file                                               #
    +        # ------------------------------------------------------------------ #
    +        for path in local_files:
    +            if not path.exists():
    +                statuses.append(FlowStatus(name=path.name, status=_STATUS_ERROR, path=path, detail="file not found"))
    +                continue
    +
    +            try:
    +                raw: dict = json.loads(path.read_text(encoding="utf-8"))
    +            except (json.JSONDecodeError, OSError) as exc:
    +                statuses.append(FlowStatus(name=path.name, status=_STATUS_ERROR, path=path, detail=str(exc)))
    +                continue
    +
    +            name: str = raw.get("name", path.stem)
    +            raw_id = raw.get("id")
    +
    +            if not raw_id:
    +                statuses.append(
    +                    FlowStatus(
    +                        name=name,
    +                        status=_STATUS_NO_ID,
    +                        path=path,
    +                        detail="run lfx export --env <env> first to assign a stable id",
    +                    )
    +                )
    +                continue
    +
    +            try:
    +                flow_id = UUID(str(raw_id))
    +            except ValueError:
    +                statuses.append(
    +                    FlowStatus(name=name, status=_STATUS_ERROR, path=path, detail=f"invalid id: {raw_id!r}")
    +                )
    +                continue
    +
    +            seen_ids.add(flow_id)
    +
    +            try:
    +                remote_flow = client.get_flow(flow_id)
    +            except not_found_error:
    +                statuses.append(FlowStatus(name=name, status=_STATUS_NEW, path=path, flow_id=flow_id))
    +                continue
    +            except Exception as exc:  # noqa: BLE001
    +                statuses.append(
    +                    FlowStatus(name=name, status=_STATUS_ERROR, path=path, flow_id=flow_id, detail=str(exc))
    +                )
    +                continue
    +
    +            local_hash = _flow_hash(raw, normalize_flow, flow_to_json)
    +            remote_hash = _flow_hash(remote_flow.model_dump(mode="json"), normalize_flow, flow_to_json)
    +
    +            if local_hash == remote_hash:
    +                statuses.append(FlowStatus(name=name, status=_STATUS_SYNCED, path=path, flow_id=flow_id))
    +            else:
    +                remote_updated_at: datetime | None = remote_flow.updated_at
    +                local_mtime = datetime.fromtimestamp(path.stat().st_mtime, tz=timezone.utc)
    +                if remote_updated_at and remote_updated_at.tzinfo is None:
    +                    remote_updated_at = remote_updated_at.replace(tzinfo=timezone.utc)
    +
    +                status = _STATUS_BEHIND if remote_updated_at and remote_updated_at > local_mtime else _STATUS_AHEAD
    +                statuses.append(FlowStatus(name=name, status=status, path=path, flow_id=flow_id))
    +
    +        # ------------------------------------------------------------------ #
    +        # Remote-only flows                                                   #
    +        # ------------------------------------------------------------------ #
    +        if show_remote_only:
    +            try:
    +                all_remote = client.list_flows(get_all=True)
    +                statuses.extend(
    +                    FlowStatus(
    +                        name=remote_flow.name,
    +                        status=_STATUS_REMOTE_ONLY,
    +                        flow_id=remote_flow.id,
    +                    )
    +                    for remote_flow in all_remote
    +                    if remote_flow.id not in seen_ids
    +                )
    +            except Exception as exc:  # noqa: BLE001
    +                console.print(f"[yellow]Warning:[/yellow] Could not list remote flows: {exc}")
    +    finally:
    +        with contextlib.suppress(OSError):
    +            client.close()
    +
    +    _render_table(statuses, env_label)
    +
    +    # Exit 1 when anything is out of sync so CI pipelines can detect drift
    +    not_clean = [s for s in statuses if s.status not in (_STATUS_SYNCED,)]
    +    if not_clean:
    +        raise typer.Exit(1)
    
  • src/lfx/src/lfx/cli/validate.py+60 0 added
    @@ -0,0 +1,60 @@
    +"""lfx validate -- structural and semantic validation of Langflow flow JSON.
    +
    +Validation levels (each level implies all levels below it):
    +
    +    Level 1 - structural
    +        The file parses as valid JSON and contains the expected top-level keys
    +        (``id``, ``name``, ``data``, ``data.nodes``, ``data.edges``).
    +        Also checks for orphaned nodes (no edges at all) and unused nodes
    +        (not reachable from any output node), and warns about version mismatches
    +        (nodes built with a different Langflow version than the one installed).
    +
    +    Level 2 - components
    +        Every node's ``data.type`` references a component type that exists in
    +        the lfx component registry.
    +
    +    Level 3 - edge types
    +        Connected ports carry compatible types (source output type must be
    +        assignable to target input type).
    +
    +    Level 4 - required inputs
    +        Every required input field on every component has a value or an
    +        incoming edge connected to it.  Also checks that password/secret fields
    +        have a value or a matching environment variable set.
    +
    +Use ``--level`` to select how deep to go, or ``--skip-*`` flags to opt out of
    +individual checks while still running the others.
    +
    +Pass ``--strict`` to treat warnings as errors (exit code 1).
    +
    +This module is a thin wrapper that re-exports symbols from the
    +``lfx.cli.validation`` subpackage so that all existing imports continue
    +to work unchanged.
    +"""
    +
    +from __future__ import annotations
    +
    +# Re-export everything from the validation subpackage.
    +# This keeps ``from lfx.cli.validate import ...`` working for all consumers
    +# (the CLI entry point in __main__.py, tests, etc.).
    +from lfx.cli.validation import (  # noqa: F401
    +    ValidationIssue,
    +    ValidationResult,
    +    _check_missing_credentials,
    +    _check_orphaned_nodes,
    +    _check_unused_nodes,
    +    _check_version_mismatch,
    +    _expand_paths,
    +    _get_lf_version,
    +    _node_display_name,
    +    validate_command,
    +    validate_flow_file,
    +)
    +
    +# Level constants (re-exported for backwards compatibility)
    +from lfx.cli.validation.core import (  # noqa: F401
    +    _LEVEL_COMPONENTS,
    +    _LEVEL_EDGE_TYPES,
    +    _LEVEL_REQUIRED_INPUTS,
    +    _LEVEL_STRUCTURAL,
    +)
    
  • src/lfx/src/lfx/cli/validation/core.py+300 0 added
    @@ -0,0 +1,300 @@
    +"""Core validation types, constants, and orchestrator for flow validation.
    +
    +Contains the ValidationIssue and ValidationResult dataclasses, level constants,
    +the main validate_flow_file orchestrator, and CLI rendering / path helpers.
    +"""
    +
    +from __future__ import annotations
    +
    +import json
    +import sys
    +from dataclasses import dataclass, field
    +from pathlib import Path
    +from typing import Any
    +
    +import typer
    +from rich.console import Console
    +
    +from lfx.cli.validation.semantic import (
    +    _check_component_existence,
    +    _check_edge_type_compatibility,
    +    _check_missing_credentials,
    +    _check_required_inputs,
    +)
    +from lfx.cli.validation.structural import (
    +    _check_orphaned_nodes,
    +    _check_structural,
    +    _check_unused_nodes,
    +    _check_version_mismatch,
    +)
    +
    +console = Console(stderr=True)
    +ok_console = Console()
    +
    +# Validation level constants
    +LEVEL_STRUCTURAL = 1
    +LEVEL_COMPONENTS = 2
    +LEVEL_EDGE_TYPES = 3
    +LEVEL_REQUIRED_INPUTS = 4
    +
    +# Keep underscore-prefixed aliases for backwards compatibility
    +_LEVEL_STRUCTURAL = LEVEL_STRUCTURAL
    +_LEVEL_COMPONENTS = LEVEL_COMPONENTS
    +_LEVEL_EDGE_TYPES = LEVEL_EDGE_TYPES
    +_LEVEL_REQUIRED_INPUTS = LEVEL_REQUIRED_INPUTS
    +
    +
    +# ---------------------------------------------------------------------------
    +# Result types
    +# ---------------------------------------------------------------------------
    +
    +
    +@dataclass
    +class ValidationIssue:
    +    level: int
    +    severity: str  # "error" | "warning"
    +    node_id: str | None
    +    node_name: str | None
    +    message: str
    +
    +
    +@dataclass
    +class ValidationResult:
    +    path: Path
    +    issues: list[ValidationIssue] = field(default_factory=list)
    +
    +    @property
    +    def errors(self) -> list[ValidationIssue]:
    +        return [i for i in self.issues if i.severity == "error"]
    +
    +    @property
    +    def warnings(self) -> list[ValidationIssue]:
    +        return [i for i in self.issues if i.severity == "warning"]
    +
    +    @property
    +    def ok(self) -> bool:
    +        return not self.errors
    +
    +
    +# ---------------------------------------------------------------------------
    +# Helpers
    +# ---------------------------------------------------------------------------
    +
    +
    +def _node_display_name(node: dict[str, Any]) -> str | None:
    +    return node.get("data", {}).get("node", {}).get("display_name") or node.get("data", {}).get("id") or node.get("id")
    +
    +
    +def _get_lf_version() -> str | None:
    +    """Return the installed Langflow version string, or *None* if not installed.
    +
    +    Tries the four known package names in order of preference so the check
    +    works with released builds, nightly builds, and editable installs.
    +    """
    +    from importlib.metadata import PackageNotFoundError, version
    +
    +    for pkg in ("langflow-base", "langflow", "langflow-base-nightly", "langflow-nightly"):
    +        try:
    +            return version(pkg)
    +        except PackageNotFoundError:
    +            continue
    +    return None
    +
    +
    +# ---------------------------------------------------------------------------
    +# Orchestrator
    +# ---------------------------------------------------------------------------
    +
    +
    +def validate_flow_file(
    +    path: Path,
    +    *,
    +    level: int = LEVEL_REQUIRED_INPUTS,
    +    skip_components: bool = False,
    +    skip_edge_types: bool = False,
    +    skip_required_inputs: bool = False,
    +    skip_version_check: bool = False,
    +    skip_credentials: bool = False,
    +) -> ValidationResult:
    +    result = ValidationResult(path=path)
    +
    +    try:
    +        raw = path.read_text(encoding="utf-8")
    +        flow: dict[str, Any] = json.loads(raw)
    +    except OSError as exc:
    +        result.issues.append(
    +            ValidationIssue(
    +                level=LEVEL_STRUCTURAL,
    +                severity="error",
    +                node_id=None,
    +                node_name=None,
    +                message=f"Cannot read file: {exc}",
    +            )
    +        )
    +        return result
    +    except json.JSONDecodeError as exc:
    +        result.issues.append(
    +            ValidationIssue(
    +                level=LEVEL_STRUCTURAL,
    +                severity="error",
    +                node_id=None,
    +                node_name=None,
    +                message=f"Invalid JSON: {exc}",
    +            )
    +        )
    +        return result
    +
    +    # Level 1 - structural (JSON shape + orphaned/unused node checks)
    +    can_continue = _check_structural(flow, result)
    +    if can_continue:
    +        _check_orphaned_nodes(flow, result)
    +        _check_unused_nodes(flow, result)
    +        # Extended: version mismatch / outdated components
    +        if not skip_version_check:
    +            _check_version_mismatch(flow, result)
    +    if not can_continue or level < LEVEL_COMPONENTS:
    +        return result
    +
    +    # Level 2 - component existence
    +    if not skip_components:
    +        _check_component_existence(flow, result)
    +    if level < LEVEL_EDGE_TYPES:
    +        return result
    +
    +    # Level 3 - edge type compatibility
    +    if not skip_edge_types:
    +        _check_edge_type_compatibility(flow, result)
    +    if level < LEVEL_REQUIRED_INPUTS:
    +        return result
    +
    +    # Level 4 - required inputs + extended: missing credentials
    +    if not skip_required_inputs:
    +        _check_required_inputs(flow, result)
    +    if not skip_credentials:
    +        _check_missing_credentials(flow, result)
    +
    +    return result
    +
    +
    +# ---------------------------------------------------------------------------
    +# CLI rendering and path helpers
    +# ---------------------------------------------------------------------------
    +
    +
    +def _render_result(
    +    result: ValidationResult,
    +    *,
    +    index: int,
    +    total: int,
    +    verbose: bool,
    +    strict: bool = False,
    +) -> None:
    +    counter = f"[dim][{index}/{total}][/dim] " if total > 1 else ""
    +    label = f"[bold]{result.path}[/bold]"
    +    passes = result.ok and not (strict and result.warnings)
    +    if passes:
    +        ok_console.print(f"{counter}[green]\u2713[/green] {label}")
    +    else:
    +        console.print(f"{counter}[red]\u2717[/red] {label}")
    +
    +    show_issues = verbose or not passes
    +    if show_issues:
    +        for issue in result.issues:
    +            effective_severity = "error" if (strict and issue.severity == "warning") else issue.severity
    +            color = "red" if effective_severity == "error" else "yellow"
    +            loc = f" [{issue.node_name or issue.node_id}]" if (issue.node_id or issue.node_name) else ""
    +            console.print(f"  [{color}][L{issue.level} {effective_severity.upper()}][/{color}]{loc} {issue.message}")
    +
    +
    +def _expand_paths(raw_paths: list[str]) -> list[Path]:
    +    """Expand each entry to a list of .json files.
    +
    +    * If the path is a directory, collect every ``*.json`` file recursively.
    +    * If the path is a file, return it as-is.
    +    * If the path does not exist, print an error and exit 2.
    +    """
    +    paths: list[Path] = []
    +    for raw in raw_paths:
    +        p = Path(raw)
    +        if not p.exists():
    +            console.print(f"[red]Error:[/red] Path not found: {p}")
    +            raise typer.Exit(2)
    +        if p.is_dir():
    +            found = sorted(p.rglob("*.json"))
    +            if not found:
    +                console.print(f"[yellow]Warning:[/yellow] No .json files found in {p}")
    +            paths.extend(found)
    +        else:
    +            paths.append(p)
    +    return paths
    +
    +
    +_DEFAULT_FLOWS_DIR = "flows"
    +
    +
    +def validate_command(
    +    flow_paths: list[str],
    +    level: int,
    +    *,
    +    skip_components: bool,
    +    skip_edge_types: bool,
    +    skip_required_inputs: bool,
    +    skip_version_check: bool,
    +    skip_credentials: bool,
    +    strict: bool,
    +    verbose: bool,
    +    output_format: str,
    +) -> None:
    +    if not flow_paths:
    +        flow_paths = [_DEFAULT_FLOWS_DIR]
    +
    +    paths = _expand_paths(flow_paths)
    +
    +    if not paths:
    +        console.print("[yellow]No flow files to validate.[/yellow]")
    +        raise typer.Exit(0)
    +
    +    results: list[ValidationResult] = []
    +    for i, p in enumerate(paths, start=1):
    +        result = validate_flow_file(
    +            p,
    +            level=level,
    +            skip_components=skip_components,
    +            skip_edge_types=skip_edge_types,
    +            skip_required_inputs=skip_required_inputs,
    +            skip_version_check=skip_version_check,
    +            skip_credentials=skip_credentials,
    +        )
    +        results.append(result)
    +        if output_format != "json":
    +            _render_result(result, index=i, total=len(paths), verbose=verbose, strict=strict)
    +
    +    if output_format == "json":
    +        import json as _json
    +
    +        out = [
    +            {
    +                "path": str(r.path),
    +                "ok": r.ok if not strict else (not r.errors and not r.warnings),
    +                "issues": [
    +                    {
    +                        "level": i.level,
    +                        "severity": i.severity,
    +                        "node_id": i.node_id,
    +                        "node_name": i.node_name,
    +                        "message": i.message,
    +                    }
    +                    for i in r.issues
    +                ],
    +            }
    +            for r in results
    +        ]
    +        sys.stdout.write(_json.dumps(out, indent=2) + "\n")
    +    elif len(paths) > 1:
    +        passed = sum(1 for r in results if r.ok and not (strict and r.warnings))
    +        failed = len(results) - passed
    +        color = "green" if failed == 0 else "red"
    +        ok_console.print(f"\n[{color}]Validated {len(paths)} flows: {passed} passed, {failed} failed.[/{color}]")
    +
    +    if any((not r.ok) or (strict and r.warnings) for r in results):
    +        raise typer.Exit(1)
    
  • src/lfx/src/lfx/cli/validation/_env_validation.py+0 0 renamed
  • src/lfx/src/lfx/cli/validation/__init__.py+61 0 added
    @@ -0,0 +1,61 @@
    +"""Validation subpackage for lfx flow validation.
    +
    +Re-exports all public symbols so that existing imports from
    +``lfx.cli.validate`` (which delegates here) continue to work.
    +"""
    +
    +from lfx.cli.validation._env_validation import (
    +    is_valid_env_var_name,
    +    validate_global_variables_for_env,
    +)
    +from lfx.cli.validation.core import (
    +    LEVEL_COMPONENTS,
    +    LEVEL_EDGE_TYPES,
    +    LEVEL_REQUIRED_INPUTS,
    +    LEVEL_STRUCTURAL,
    +    ValidationIssue,
    +    ValidationResult,
    +    _expand_paths,
    +    _get_lf_version,
    +    _node_display_name,
    +    _render_result,
    +    validate_command,
    +    validate_flow_file,
    +)
    +from lfx.cli.validation.semantic import (
    +    _check_component_existence,
    +    _check_edge_type_compatibility,
    +    _check_missing_credentials,
    +    _check_required_inputs,
    +)
    +from lfx.cli.validation.structural import (
    +    _check_orphaned_nodes,
    +    _check_structural,
    +    _check_unused_nodes,
    +    _check_version_mismatch,
    +)
    +
    +__all__ = [
    +    "LEVEL_COMPONENTS",
    +    "LEVEL_EDGE_TYPES",
    +    "LEVEL_REQUIRED_INPUTS",
    +    "LEVEL_STRUCTURAL",
    +    "ValidationIssue",
    +    "ValidationResult",
    +    "_check_component_existence",
    +    "_check_edge_type_compatibility",
    +    "_check_missing_credentials",
    +    "_check_orphaned_nodes",
    +    "_check_required_inputs",
    +    "_check_structural",
    +    "_check_unused_nodes",
    +    "_check_version_mismatch",
    +    "_expand_paths",
    +    "_get_lf_version",
    +    "_node_display_name",
    +    "_render_result",
    +    "is_valid_env_var_name",
    +    "validate_command",
    +    "validate_flow_file",
    +    "validate_global_variables_for_env",
    +]
    
  • src/lfx/src/lfx/cli/validation/semantic.py+271 0 added
    @@ -0,0 +1,271 @@
    +"""Semantic validation checks for Langflow flow JSON.
    +
    +Includes component existence (Level 2), edge type compatibility (Level 3),
    +required inputs (Level 4), and credential checks.
    +"""
    +
    +from __future__ import annotations
    +
    +import os
    +from typing import TYPE_CHECKING, Any
    +
    +if TYPE_CHECKING:
    +    from lfx.cli.validation.core import ValidationResult
    +
    +# Validation level constants (local copies to avoid circular import)
    +_LEVEL_COMPONENTS = 2
    +_LEVEL_EDGE_TYPES = 3
    +_LEVEL_REQUIRED_INPUTS = 4
    +
    +
    +def _make_issue(
    +    level: int,
    +    severity: str,
    +    node_id: str | None,
    +    node_name: str | None,
    +    message: str,
    +) -> Any:
    +    from lfx.cli.validation.core import ValidationIssue
    +
    +    return ValidationIssue(
    +        level=level,
    +        severity=severity,
    +        node_id=node_id,
    +        node_name=node_name,
    +        message=message,
    +    )
    +
    +
    +def _node_display_name(node: dict[str, Any]) -> str | None:
    +    from lfx.cli.validation.core import _node_display_name as _ndn
    +
    +    return _ndn(node)
    +
    +
    +# ---------------------------------------------------------------------------
    +# Level 2 - component existence (loads lfx component registry)
    +# ---------------------------------------------------------------------------
    +
    +
    +def _check_component_existence(flow: dict[str, Any], result: ValidationResult) -> None:
    +    try:
    +        from lfx.interface.utils import initialize_components  # type: ignore[import-untyped]
    +
    +        component_registry: set[str] = set(initialize_components().keys())
    +    except Exception as exc:  # noqa: BLE001
    +        result.issues.append(
    +            _make_issue(
    +                level=_LEVEL_COMPONENTS,
    +                severity="warning",
    +                node_id=None,
    +                node_name=None,
    +                message=f"Could not load component registry (skipping component checks): {exc}",
    +            )
    +        )
    +        return
    +
    +    for node in flow.get("data", {}).get("nodes", []):
    +        if not isinstance(node, dict):
    +            continue
    +        node_data = node.get("data", {})
    +        component_type: str | None = node_data.get("type")
    +        if not component_type:
    +            continue
    +        if component_type not in component_registry:
    +            result.issues.append(
    +                _make_issue(
    +                    level=_LEVEL_COMPONENTS,
    +                    severity="error",
    +                    node_id=node.get("id"),
    +                    node_name=_node_display_name(node),
    +                    message=(f"Unknown component type '{component_type}'. This component may be missing or outdated."),
    +                )
    +            )
    +
    +
    +# ---------------------------------------------------------------------------
    +# Level 3 - edge type compatibility
    +# ---------------------------------------------------------------------------
    +
    +
    +def _check_edge_type_compatibility(flow: dict[str, Any], result: ValidationResult) -> None:
    +    """Check that source output types are compatible with target input types.
    +
    +    This is a best-effort check: if type information is missing from the node
    +    template we emit a warning rather than an error.
    +    """
    +    data = flow.get("data", {})
    +    nodes_by_id: dict[str, dict[str, Any]] = {
    +        n["id"]: n for n in data.get("nodes", []) if isinstance(n, dict) and "id" in n
    +    }
    +
    +    for edge in data.get("edges", []):
    +        if not isinstance(edge, dict):
    +            continue
    +        src_id: str | None = edge.get("source")
    +        tgt_id: str | None = edge.get("target")
    +        src_handle: dict[str, Any] = edge.get("data", {}).get("sourceHandle", {}) or {}
    +        tgt_handle: dict[str, Any] = edge.get("data", {}).get("targetHandle", {}) or {}
    +
    +        if not src_id or not tgt_id:
    +            continue
    +
    +        src_node = nodes_by_id.get(src_id)
    +        tgt_node = nodes_by_id.get(tgt_id)
    +        if not src_node or not tgt_node:
    +            result.issues.append(
    +                _make_issue(
    +                    level=_LEVEL_EDGE_TYPES,
    +                    severity="error",
    +                    node_id=None,
    +                    node_name=None,
    +                    message=(f"Edge references non-existent node(s): source={src_id!r}, target={tgt_id!r}"),
    +                )
    +            )
    +            continue
    +
    +        output_types: list[str] = src_handle.get("output_types", [])
    +        src_type: str | None = output_types[0] if output_types else None
    +        tgt_type: str | None = tgt_handle.get("type")
    +
    +        if src_type and tgt_type and tgt_type not in {src_type, "Any"}:
    +            result.issues.append(
    +                _make_issue(
    +                    level=_LEVEL_EDGE_TYPES,
    +                    severity="warning",
    +                    node_id=tgt_id,
    +                    node_name=_node_display_name(tgt_node),
    +                    message=(
    +                        f"Possible type mismatch on edge from "
    +                        f"'{_node_display_name(src_node)}' -> '{_node_display_name(tgt_node)}': "
    +                        f"source emits '{src_type}', target expects '{tgt_type}'"
    +                    ),
    +                )
    +            )
    +
    +
    +# ---------------------------------------------------------------------------
    +# Level 4 - required inputs connected
    +# ---------------------------------------------------------------------------
    +
    +
    +def _check_required_inputs(flow: dict[str, Any], result: ValidationResult) -> None:
    +    """Verify that all required input fields have a value or an incoming edge."""
    +    data = flow.get("data", {})
    +    nodes = data.get("nodes", [])
    +    edges = data.get("edges", [])
    +
    +    # Build set of (node_id, field_name) pairs that receive an edge
    +    connected_inputs: set[tuple[str, str]] = set()
    +    for edge in edges:
    +        if not isinstance(edge, dict):
    +            continue
    +        tgt_id = edge.get("target")
    +        tgt_handle = edge.get("data", {}).get("targetHandle", {}) or {}
    +        field_name = tgt_handle.get("fieldName")
    +        if tgt_id and field_name:
    +            connected_inputs.add((tgt_id, field_name))
    +
    +    for node in nodes:
    +        if not isinstance(node, dict):
    +            continue
    +        node_id = node.get("id")
    +        node_data = node.get("data", {})
    +        template: dict[str, Any] = node_data.get("node", {}).get("template", {})
    +
    +        for field_name, field_def in template.items():
    +            if field_name.startswith("_") or not isinstance(field_def, dict):
    +                continue
    +            is_required = field_def.get("required", False)
    +            show = field_def.get("show", True)
    +            if not is_required or not show:
    +                continue
    +
    +            has_value = field_def.get("value") not in (None, "", [], {})
    +            has_edge = (node_id, field_name) in connected_inputs
    +
    +            if not has_value and not has_edge:
    +                result.issues.append(
    +                    _make_issue(
    +                        level=_LEVEL_REQUIRED_INPUTS,
    +                        severity="error",
    +                        node_id=node_id,
    +                        node_name=_node_display_name(node),
    +                        message=f"Required input '{field_name}' has no value and no incoming edge",
    +                    )
    +                )
    +
    +
    +# ---------------------------------------------------------------------------
    +# Missing credentials
    +# ---------------------------------------------------------------------------
    +
    +
    +def _check_missing_credentials(flow: dict[str, Any], result: ValidationResult) -> None:
    +    """Warn when password/secret fields have no value and no matching env var.
    +
    +    A template field is considered a *credential field* when it has
    +    ``"password": true`` (or ``"display_password": true``).  If no value is
    +    stored in the flow JSON *and* no corresponding environment variable is set
    +    *and* the field has no incoming edge, a warning is emitted so the user
    +    knows to provide the secret before running the flow.
    +
    +    The environment variable name is derived by uppercasing the field name and
    +    replacing hyphens with underscores (e.g. ``openai_api_key`` ->
    +    ``OPENAI_API_KEY``).
    +    """
    +    data = flow.get("data", {})
    +    edges = data.get("edges", [])
    +
    +    # Build the set of (node_id, field_name) pairs that receive an edge
    +    connected_inputs: set[tuple[str, str]] = set()
    +    for edge in edges:
    +        if not isinstance(edge, dict):
    +            continue
    +        tgt_id = edge.get("target")
    +        tgt_handle = edge.get("data", {}).get("targetHandle", {}) or {}
    +        field_name = tgt_handle.get("fieldName")
    +        if tgt_id and field_name:
    +            connected_inputs.add((tgt_id, field_name))
    +
    +    for node in data.get("nodes", []):
    +        if not isinstance(node, dict):
    +            continue
    +        node_id = node.get("id")
    +        node_data = node.get("data", {})
    +        template: dict[str, Any] = node_data.get("node", {}).get("template", {})
    +
    +        for field_name, field_def in template.items():
    +            if field_name.startswith("_") or not isinstance(field_def, dict):
    +                continue
    +
    +            is_credential = field_def.get("password", False) or field_def.get("display_password", False)
    +            if not is_credential:
    +                continue
    +
    +            show = field_def.get("show", True)
    +            if not show:
    +                continue
    +
    +            # Already satisfied? Check value, incoming edge, or env var.
    +            has_value = bool(field_def.get("value"))
    +            has_edge = (node_id, field_name) in connected_inputs
    +            if has_value or has_edge:
    +                continue
    +
    +            env_key = field_name.upper().replace("-", "_")
    +            if os.environ.get(env_key):
    +                continue
    +
    +            result.issues.append(
    +                _make_issue(
    +                    level=_LEVEL_REQUIRED_INPUTS,
    +                    severity="warning",
    +                    node_id=node_id,
    +                    node_name=_node_display_name(node),
    +                    message=(
    +                        f"Credential field '{field_name}' has no value "
    +                        f"(set ${env_key} or configure via global variables)"
    +                    ),
    +                )
    +            )
    
  • src/lfx/src/lfx/cli/validation/structural.py+292 0 added
    @@ -0,0 +1,292 @@
    +"""Level 1 structural validation checks for Langflow flow JSON.
    +
    +Includes JSON shape validation, node/edge structure checks, orphaned and
    +unused node detection, and version mismatch warnings.
    +"""
    +
    +from __future__ import annotations
    +
    +from typing import TYPE_CHECKING, Any
    +
    +if TYPE_CHECKING:
    +    from lfx.cli.validation.core import ValidationResult
    +
    +# These are imported lazily to avoid circular imports at module level.
    +# The functions receive a ValidationResult and create ValidationIssue instances.
    +
    +
    +def _make_issue(
    +    level: int,
    +    severity: str,
    +    node_id: str | None,
    +    node_name: str | None,
    +    message: str,
    +) -> Any:
    +    from lfx.cli.validation.core import ValidationIssue
    +
    +    return ValidationIssue(
    +        level=level,
    +        severity=severity,
    +        node_id=node_id,
    +        node_name=node_name,
    +        message=message,
    +    )
    +
    +
    +def _node_display_name(node: dict[str, Any]) -> str | None:
    +    from lfx.cli.validation.core import _node_display_name as _ndn
    +
    +    return _ndn(node)
    +
    +
    +# Validation level constants (local copies to avoid circular import)
    +_LEVEL_STRUCTURAL = 1
    +
    +_REQUIRED_TOP_LEVEL = {"id", "name", "data"}
    +_REQUIRED_DATA_KEYS = {"nodes", "edges"}
    +
    +
    +# ---------------------------------------------------------------------------
    +# Level 1 - structural checks (pure JSON, no component loading)
    +# ---------------------------------------------------------------------------
    +
    +
    +def _check_structural(flow: dict[str, Any], result: ValidationResult) -> bool:
    +    """Return False if the flow is so broken that further checks cannot run."""
    +    ok = True
    +    missing_top = _REQUIRED_TOP_LEVEL - set(flow.keys())
    +    for key in sorted(missing_top):
    +        result.issues.append(
    +            _make_issue(
    +                level=_LEVEL_STRUCTURAL,
    +                severity="error",
    +                node_id=None,
    +                node_name=None,
    +                message=f"Missing required top-level field: '{key}'",
    +            )
    +        )
    +        ok = False
    +
    +    data = flow.get("data")
    +    if not isinstance(data, dict):
    +        result.issues.append(
    +            _make_issue(
    +                level=_LEVEL_STRUCTURAL,
    +                severity="error",
    +                node_id=None,
    +                node_name=None,
    +                message="'data' must be a JSON object",
    +            )
    +        )
    +        return False
    +
    +    missing_data = _REQUIRED_DATA_KEYS - set(data.keys())
    +    for key in sorted(missing_data):
    +        result.issues.append(
    +            _make_issue(
    +                level=_LEVEL_STRUCTURAL,
    +                severity="error",
    +                node_id=None,
    +                node_name=None,
    +                message=f"Missing required field: 'data.{key}'",
    +            )
    +        )
    +        ok = False
    +
    +    nodes = data.get("nodes", [])
    +    if not isinstance(nodes, list):
    +        result.issues.append(
    +            _make_issue(
    +                level=_LEVEL_STRUCTURAL,
    +                severity="error",
    +                node_id=None,
    +                node_name=None,
    +                message="'data.nodes' must be a JSON array",
    +            )
    +        )
    +        return False
    +
    +    for i, node in enumerate(nodes):
    +        if not isinstance(node, dict):
    +            result.issues.append(
    +                _make_issue(
    +                    level=_LEVEL_STRUCTURAL,
    +                    severity="error",
    +                    node_id=None,
    +                    node_name=None,
    +                    message=f"Node at index {i} is not a JSON object",
    +                )
    +            )
    +            ok = False
    +            continue
    +        for req in ("id", "data"):
    +            if req not in node:
    +                result.issues.append(
    +                    _make_issue(
    +                        level=_LEVEL_STRUCTURAL,
    +                        severity="error",
    +                        node_id=node.get("id"),
    +                        node_name=_node_display_name(node),
    +                        message=f"Node at index {i} is missing required field '{req}'",
    +                    )
    +                )
    +                ok = False
    +
    +        node_data = node.get("data", {})
    +        if isinstance(node_data, dict) and "type" not in node_data:
    +            result.issues.append(
    +                _make_issue(
    +                    level=_LEVEL_STRUCTURAL,
    +                    severity="warning",
    +                    node_id=node.get("id"),
    +                    node_name=_node_display_name(node),
    +                    message="Node is missing 'data.type' -- component type cannot be determined",
    +                )
    +            )
    +
    +    return ok
    +
    +
    +# ---------------------------------------------------------------------------
    +# Orphaned and unused node checks
    +# ---------------------------------------------------------------------------
    +
    +
    +def _check_orphaned_nodes(flow: dict[str, Any], result: ValidationResult) -> None:
    +    """Warn about nodes that have no edges connecting them to the rest of the graph.
    +
    +    A node is *orphaned* when it appears in no edge (neither as source nor as
    +    target).  Single-node flows are exempt.
    +    """
    +    data = flow.get("data", {})
    +    nodes: list[dict[str, Any]] = [n for n in data.get("nodes", []) if isinstance(n, dict) and "id" in n]
    +    edges: list[dict[str, Any]] = [e for e in data.get("edges", []) if isinstance(e, dict)]
    +
    +    if len(nodes) <= 1:
    +        return  # single-node flows are always "connected"
    +
    +    connected_ids: set[str] = set()
    +    for edge in edges:
    +        if edge.get("source"):
    +            connected_ids.add(edge["source"])
    +        if edge.get("target"):
    +            connected_ids.add(edge["target"])
    +
    +    for node in nodes:
    +        node_id = node["id"]
    +        if node_id not in connected_ids:
    +            result.issues.append(
    +                _make_issue(
    +                    level=_LEVEL_STRUCTURAL,
    +                    severity="warning",
    +                    node_id=node_id,
    +                    node_name=_node_display_name(node),
    +                    message="Orphaned node: not connected to any other node",
    +                )
    +            )
    +
    +
    +def _check_unused_nodes(flow: dict[str, Any], result: ValidationResult) -> None:
    +    """Warn about nodes whose outputs never reach an output node.
    +
    +    Walks the graph backwards from every node whose ``data.type`` ends with
    +    ``"Output"`` (e.g. ``ChatOutput``, ``TextOutput``).  Any node that is not
    +    reachable from an output node is considered unused.
    +
    +    Single-node flows and flows with no output nodes are skipped.
    +    """
    +    data = flow.get("data", {})
    +    nodes: list[dict[str, Any]] = [n for n in data.get("nodes", []) if isinstance(n, dict) and "id" in n]
    +    edges: list[dict[str, Any]] = [e for e in data.get("edges", []) if isinstance(e, dict)]
    +
    +    if len(nodes) <= 1:
    +        return
    +
    +    # Build reverse adjacency: for each node, which nodes feed INTO it
    +    # (i.e. target -> {sources})
    +    predecessors: dict[str, set[str]] = {n["id"]: set() for n in nodes}
    +    for edge in edges:
    +        src = edge.get("source")
    +        tgt = edge.get("target")
    +        if src and tgt and tgt in predecessors:
    +            predecessors[tgt].add(src)
    +
    +    # Identify output nodes by type suffix
    +    output_node_ids: set[str] = set()
    +    for node in nodes:
    +        component_type: str = node.get("data", {}).get("type", "") or ""
    +        if component_type.endswith("Output"):
    +            output_node_ids.add(node["id"])
    +
    +    if not output_node_ids:
    +        return  # can't determine "useful" without knowing output nodes
    +
    +    # BFS backwards from all output nodes to find every contributing node
    +    reachable: set[str] = set()
    +    queue: list[str] = list(output_node_ids)
    +    while queue:
    +        current = queue.pop()
    +        if current in reachable:
    +            continue
    +        reachable.add(current)
    +        queue.extend(predecessors.get(current, set()) - reachable)
    +
    +    nodes_by_id = {n["id"]: n for n in nodes}
    +    for node_id, node in nodes_by_id.items():
    +        if node_id not in reachable:
    +            result.issues.append(
    +                _make_issue(
    +                    level=_LEVEL_STRUCTURAL,
    +                    severity="warning",
    +                    node_id=node_id,
    +                    node_name=_node_display_name(node),
    +                    message="Unused node: does not contribute to any output",
    +                )
    +            )
    +
    +
    +# ---------------------------------------------------------------------------
    +# Version mismatch / outdated components
    +# ---------------------------------------------------------------------------
    +
    +
    +def _check_version_mismatch(flow: dict[str, Any], result: ValidationResult) -> None:
    +    """Warn when nodes were built with a different Langflow version.
    +
    +    Each unique ``lf_version`` embedded in the node metadata that differs from
    +    the currently installed Langflow version triggers a single warning covering
    +    all affected nodes.  If Langflow is not installed the check is skipped
    +    silently (lfx can run standalone).
    +    """
    +    from lfx.cli.validation.core import _get_lf_version
    +
    +    installed = _get_lf_version()
    +    if installed is None:
    +        return  # Langflow not installed; skip silently
    +
    +    nodes: list[dict[str, Any]] = [n for n in flow.get("data", {}).get("nodes", []) if isinstance(n, dict)]
    +
    +    # Collect node IDs grouped by the version they were built with
    +    version_to_nodes: dict[str, list[str]] = {}
    +    for node in nodes:
    +        lf_version: str | None = node.get("data", {}).get("node", {}).get("lf_version")
    +        if lf_version and lf_version != installed:
    +            version_to_nodes.setdefault(lf_version, []).append(_node_display_name(node) or node.get("id") or "?")
    +
    +    _max_sample = 3
    +    for built_version, node_names in sorted(version_to_nodes.items()):
    +        count = len(node_names)
    +        sample = ", ".join(node_names[:_max_sample]) + (" ..." if count > _max_sample else "")
    +        result.issues.append(
    +            _make_issue(
    +                level=_LEVEL_STRUCTURAL,
    +                severity="warning",
    +                node_id=None,
    +                node_name=None,
    +                message=(
    +                    f"{count} component(s) built with Langflow {built_version} "
    +                    f"(installed: {installed}) -- re-export recommended. "
    +                    f"Affected: {sample}"
    +                ),
    +            )
    +        )
    
  • src/lfx/src/lfx/config/environments.py+344 0 added
    @@ -0,0 +1,344 @@
    +"""lfx environment configuration — resolve Langflow instance URL and API key.
    +
    +Config file lookup order
    +------------------------
    +1.  Explicit path given via ``--environments-file`` / ``environments_file`` parameter.
    +2.  ``.lfx/environments.yaml`` in the current working directory, then each
    +    parent directory up to the first ``.git`` boundary (project root discovery).
    +3.  ``~/.lfx/environments.yaml`` (user-level config).
    +4.  ``langflow-environments.toml`` in the current working directory
    +    (backward-compatible with the langflow-sdk TOML format).
    +
    +YAML file format
    +----------------
    +.. code-block:: yaml
    +
    +    environments:
    +      local:
    +        url: http://localhost:7860
    +        api_key_env: LANGFLOW_LOCAL_API_KEY
    +
    +      staging:
    +        url: https://staging.langflow.example.com
    +        api_key_env: LANGFLOW_STAGING_API_KEY
    +
    +      production:
    +        url: https://langflow.example.com
    +        api_key_env: LANGFLOW_PROD_API_KEY
    +
    +    defaults:
    +      environment: local
    +
    +TOML format is also accepted (``langflow-environments.toml`` or any ``.toml``
    +file passed via ``--environments-file``).
    +
    +The ``api_key_env`` field names an *environment variable* that holds the API
    +key.  The actual key is never stored in the file.
    +"""
    +
    +from __future__ import annotations
    +
    +import os
    +from dataclasses import dataclass
    +from pathlib import Path
    +from typing import Any
    +
    +# ---------------------------------------------------------------------------
    +# Public types
    +# ---------------------------------------------------------------------------
    +
    +
    +class ConfigError(Exception):
    +    """Raised when the config file is missing, malformed, or an environment name cannot be resolved.
    +
    +    Note: a missing API key env var is *not* a ``ConfigError`` — ``api_key``
    +    will simply be ``None`` on the returned :class:`LangflowEnvironment`.
    +    Commands that require a key validate it themselves and raise an appropriate
    +    error with actionable guidance.
    +    """
    +
    +
    +@dataclass
    +class LangflowEnvironment:
    +    """A fully-resolved Langflow target instance.
    +
    +    Attributes:
    +        name:    Human-readable label (environment name or ``"__inline__"``).
    +        url:     Base URL of the Langflow instance.
    +        api_key: Resolved API key value, or ``None`` if not configured.
    +    """
    +
    +    name: str
    +    url: str
    +    api_key: str | None
    +
    +
    +# ---------------------------------------------------------------------------
    +# Config file discovery
    +# ---------------------------------------------------------------------------
    +
    +_YAML_NAMES: tuple[str, ...] = ("environments.yaml", "environments.yml")
    +_TOML_FALLBACK = "langflow-environments.toml"
    +_LFX_DIR = ".lfx"
    +
    +
    +def _find_config_file(override: Path | None) -> Path | None:
    +    """Return the first existing config file following the lookup order.
    +
    +    Parameters
    +    ----------
    +    override:
    +        Explicit path supplied by the caller (``--environments-file``).
    +        If given, only this path is checked.
    +
    +    Raises:
    +    ------
    +    ConfigError:
    +        If *override* is given but the file does not exist.
    +    """
    +    if override is not None:
    +        if not override.is_file():
    +            msg = f"Config file not found: {override}"
    +            raise ConfigError(msg)
    +        return override
    +
    +    # Walk up from cwd looking for .lfx/environments.yaml
    +    cwd = Path.cwd()
    +    for directory in (cwd, *cwd.parents):
    +        for name in _YAML_NAMES:
    +            candidate = directory / _LFX_DIR / name
    +            if candidate.is_file():
    +                return candidate
    +        # Stop walking at a git root or the filesystem root
    +        if (directory / ".git").is_dir() or directory.parent == directory:
    +            break
    +
    +    # User-level YAML
    +    for name in _YAML_NAMES:
    +        user_yaml = Path.home() / _LFX_DIR / name
    +        if user_yaml.is_file():
    +            return user_yaml
    +
    +    # Backward-compat: langflow-environments.toml in cwd
    +    toml_fallback = cwd / _TOML_FALLBACK
    +    if toml_fallback.is_file():
    +        return toml_fallback
    +
    +    return None
    +
    +
    +# ---------------------------------------------------------------------------
    +# Parsing
    +# ---------------------------------------------------------------------------
    +
    +
    +def _parse_yaml(text: str, path: Path) -> dict[str, Any]:
    +    try:
    +        import yaml  # type: ignore[import-untyped]
    +    except ImportError as exc:
    +        msg = "PyYAML is required to read .yaml config files. Install it with: pip install pyyaml"
    +        raise ConfigError(msg) from exc
    +    try:
    +        result = yaml.safe_load(text)
    +    except Exception as exc:
    +        msg = f"Invalid YAML in {path}: {exc}"
    +        raise ConfigError(msg) from exc
    +    if not isinstance(result, dict):
    +        msg = f"Expected a YAML mapping at the top level of {path}, got {type(result).__name__}"
    +        raise ConfigError(msg)
    +    return result
    +
    +
    +def _parse_toml(path: Path) -> dict[str, Any]:
    +    try:
    +        import tomllib
    +    except ImportError:
    +        try:
    +            import tomli as tomllib  # type: ignore[no-reattr,assignment]
    +        except ImportError as exc:
    +            msg = "tomllib (Python ≥3.11) or tomli is required for .toml config files. Install with: pip install tomli"
    +            raise ConfigError(msg) from exc
    +    try:
    +        with path.open("rb") as fh:
    +            return tomllib.load(fh)
    +    except OSError as exc:
    +        msg = f"Cannot read {path}: {exc}"
    +        raise ConfigError(msg) from exc
    +    except Exception as exc:
    +        msg = f"Invalid TOML in {path}: {exc}"
    +        raise ConfigError(msg) from exc
    +
    +
    +def _load_raw(path: Path) -> dict[str, Any]:
    +    """Return the raw parsed config dict from *path* (YAML or TOML)."""
    +    suffix = path.suffix.lower()
    +    if suffix in (".yaml", ".yml"):
    +        return _parse_yaml(path.read_text(encoding="utf-8"), path)
    +    if suffix == ".toml":
    +        return _parse_toml(path)
    +    # Unknown extension — try YAML first, then TOML
    +    try:
    +        return _parse_yaml(path.read_text(encoding="utf-8"), path)
    +    except ConfigError:
    +        return _parse_toml(path)
    +
    +
    +def _parse_env_block(name: str, block: Any, config_path: Path) -> LangflowEnvironment:
    +    if not isinstance(block, dict):
    +        msg = f"Environment {name!r} in {config_path} must be a mapping, got {type(block).__name__}"
    +        raise ConfigError(msg)
    +    if "url" not in block:
    +        msg = f"Environment {name!r} in {config_path} is missing the required 'url' field."
    +        raise ConfigError(msg)
    +    url: str = str(block["url"])
    +    api_key: str | None = None
    +
    +    if "api_key_env" in block:
    +        var_name: str = str(block["api_key_env"])
    +        api_key = os.environ.get(var_name)
    +        # api_key may be None here; callers that require a key raise their own error.
    +    elif "api_key" in block:
    +        import warnings
    +
    +        warnings.warn(
    +            f"Environment {name!r}: literal api_key in config file is not recommended. "
    +            "Use api_key_env to reference an environment variable instead.",
    +            UserWarning,
    +            stacklevel=2,
    +        )
    +        api_key = str(block["api_key"])
    +
    +    return LangflowEnvironment(name=name, url=url, api_key=api_key)
    +
    +
    +def _load_config(path: Path) -> tuple[dict[str, LangflowEnvironment], str | None]:
    +    """Return ``(environments_dict, default_env_name)`` from the config at *path*."""
    +    raw = _load_raw(path)
    +
    +    raw_envs: Any = raw.get("environments") or {}
    +    if not isinstance(raw_envs, dict):
    +        msg = f"'environments' in {path} must be a mapping, got {type(raw_envs).__name__}"
    +        raise ConfigError(msg)
    +
    +    envs: dict[str, LangflowEnvironment] = {}
    +    for env_name, block in raw_envs.items():
    +        envs[str(env_name)] = _parse_env_block(str(env_name), block, path)
    +
    +    defaults: Any = raw.get("defaults") or {}
    +    default_name: str | None = defaults.get("environment") if isinstance(defaults, dict) else None
    +
    +    return envs, default_name
    +
    +
    +# ---------------------------------------------------------------------------
    +# Public API
    +# ---------------------------------------------------------------------------
    +
    +
    +def resolve_environment(
    +    env: str | None,
    +    *,
    +    target: str | None = None,
    +    api_key: str | None = None,
    +    environments_file: str | None = None,
    +) -> LangflowEnvironment:
    +    """Resolve an environment name (or inline flags) to a :class:`LangflowEnvironment`.
    +
    +    Precedence
    +    ----------
    +    1. **Inline mode** — if *target* is given, return immediately without
    +       reading any config file.  *api_key* is used as-is (its value, not a
    +       variable name).
    +    2. **Named env** — look up *env* (or the configured default) in the config
    +       file discovered by the lookup order described in this module's docstring.
    +    3. **Env-var fallback** — if no config file exists and no *env* was
    +       requested, fall back to ``LANGFLOW_URL`` / ``LANGFLOW_API_KEY`` (or
    +       ``LFX_URL`` / ``LFX_API_KEY``) env vars before raising.
    +
    +    Parameters
    +    ----------
    +    env:
    +        Environment name from the config file (e.g. ``"staging"``).
    +    target:
    +        Inline URL override — bypasses config file lookup entirely.
    +    api_key:
    +        Inline API key value.  When used with *target*, taken as-is.
    +        When used alongside an *env* from the config, overrides the resolved key.
    +    environments_file:
    +        Explicit path to a config file (YAML or TOML).  Overrides the
    +        automatic discovery order.
    +
    +    Returns:
    +    -------
    +    LangflowEnvironment:
    +        Fully-resolved environment with ``url`` and ``api_key``.
    +
    +    Raises:
    +    ------
    +    ConfigError:
    +        When resolution fails: file not found, unknown environment name,
    +        malformed config, etc.
    +    """
    +    # -----------------------------------------------------------------------
    +    # Mode 1: inline (--target provided)
    +    # -----------------------------------------------------------------------
    +    if target is not None:
    +        name = env or "__inline__"
    +        return LangflowEnvironment(name=name, url=target, api_key=api_key)
    +
    +    # -----------------------------------------------------------------------
    +    # Mode 2: config file
    +    # -----------------------------------------------------------------------
    +    override = Path(environments_file) if environments_file else None
    +    config_path = _find_config_file(override)
    +
    +    if config_path is None:
    +        # No config file found — try env-var fallback before giving up
    +        lf_url = os.environ.get("LANGFLOW_URL") or os.environ.get("LFX_URL")
    +        if lf_url and env is None:
    +            lf_key = api_key or os.environ.get("LANGFLOW_API_KEY") or os.environ.get("LFX_API_KEY")
    +            return LangflowEnvironment(name="__env__", url=lf_url, api_key=lf_key)
    +
    +        if env is not None:
    +            msg = (
    +                f"Environment {env!r} requested but no config file was found.\n"
    +                f"  • Create .lfx/environments.yaml in your project root, or\n"
    +                f"  • Pass --target <url> [--api-key <key>] for inline configuration.\n"
    +                f"  • Run 'lfx init' to scaffold a project with a config template."
    +            )
    +            raise ConfigError(msg)
    +
    +        msg = (
    +            "No --env, --target URL, or config file found.\n"
    +            "Options:\n"
    +            "  • lfx <cmd> --env <name>              (requires .lfx/environments.yaml)\n"
    +            "  • lfx <cmd> --target <url>             (inline, no config file needed)\n"
    +            "  • export LANGFLOW_URL=<url>            (env-var fallback)\n"
    +            "  • lfx init                             (scaffold a project with a template)"
    +        )
    +        raise ConfigError(msg)
    +
    +    all_envs, default_name = _load_config(config_path)
    +
    +    resolved_name = env or default_name
    +    if resolved_name is None:
    +        available = ", ".join(sorted(all_envs)) or "(none defined)"
    +        msg = (
    +            f"No --env given and no 'defaults.environment' set in {config_path}.\n"
    +            f"Available environments: {available}\n"
    +            f"Pass --env <name> or add a 'defaults.environment' key to the config."
    +        )
    +        raise ConfigError(msg)
    +
    +    if resolved_name not in all_envs:
    +        available = ", ".join(sorted(all_envs)) or "(none defined)"
    +        msg = f"Environment {resolved_name!r} not found in {config_path}.\nAvailable environments: {available}"
    +        raise ConfigError(msg)
    +
    +    resolved = all_envs[resolved_name]
    +
    +    # --api-key overrides the key resolved from the config file
    +    if api_key is not None:
    +        resolved = LangflowEnvironment(name=resolved.name, url=resolved.url, api_key=api_key)
    +
    +    return resolved
    
  • src/lfx/src/lfx/config/__init__.py+5 0 added
    @@ -0,0 +1,5 @@
    +"""lfx configuration — environment resolution for Langflow CLI commands."""
    +
    +from lfx.config.environments import ConfigError, LangflowEnvironment, resolve_environment
    +
    +__all__ = ["ConfigError", "LangflowEnvironment", "resolve_environment"]
    
  • src/lfx/src/lfx/__main__.py+29 193 modified
    @@ -1,211 +1,47 @@
     """LFX CLI entry point."""
     
    +from importlib.metadata import version as _pkg_version
    +
     import typer
     
    +from lfx.cli._authoring_commands import register as _register_authoring
    +from lfx.cli._remote_commands import register as _register_remote
    +from lfx.cli._running_commands import register as _register_running
    +from lfx.cli._setup_commands import register as _register_setup
    +
    +
    +def _version_callback(value: bool) -> None:
    +    if value:
    +        typer.echo(f"lfx {_pkg_version('lfx')}")
    +        raise typer.Exit(0)
    +
    +
     app = typer.Typer(
         name="lfx",
         help="lfx - Langflow Executor",
         add_completion=False,
     )
     
     
    -@app.command(name="serve", help="Serve a flow as an API", no_args_is_help=True)
    -def serve_command_wrapper(
    -    script_path: str | None = typer.Argument(
    -        None,
    -        help=(
    -            "Path to JSON flow (.json) or Python script (.py) file or stdin input. "
    -            "Optional when using --flow-json or --stdin."
    -        ),
    -    ),
    -    host: str = typer.Option("127.0.0.1", "--host", "-h", help="Host to bind the server to"),
    -    port: int = typer.Option(8000, "--port", "-p", help="Port to bind the server to"),
    -    verbose: bool = typer.Option(False, "--verbose", "-v", help="Show diagnostic output and execution details"),  # noqa: FBT001, FBT003
    -    env_file: str | None = typer.Option(
    -        None,
    -        "--env-file",
    -        help="Path to the .env file containing environment variables",
    -    ),
    -    log_level: str = typer.Option(
    -        "warning",
    -        "--log-level",
    -        help="Logging level. One of: debug, info, warning, error, critical",
    -    ),
    -    flow_json: str | None = typer.Option(
    -        None,
    -        "--flow-json",
    -        help="Inline JSON flow content as a string (alternative to script_path)",
    -    ),
    -    *,
    -    stdin: bool = typer.Option(
    -        False,  # noqa: FBT003
    -        "--stdin",
    -        help="Read JSON flow content from stdin (alternative to script_path)",
    -    ),
    -    check_variables: bool = typer.Option(
    -        True,  # noqa: FBT003
    -        "--check-variables/--no-check-variables",
    -        help="Check global variables for environment compatibility",
    +@app.callback()
    +def _app_callback(
    +    version: bool = typer.Option(
    +        False,
    +        "--version",
    +        "-V",
    +        help="Show the lfx version and exit.",
    +        is_eager=True,
    +        callback=_version_callback,
         ),
     ) -> None:
    -    """Serve LFX flows as a web API (lazy-loaded)."""
    -    from pathlib import Path
    -
    -    from lfx.cli.commands import serve_command
    -
    -    # Convert env_file string to Path if provided
    -    env_file_path = Path(env_file) if env_file else None
    -
    -    return serve_command(
    -        script_path=script_path,
    -        host=host,
    -        port=port,
    -        verbose=verbose,
    -        env_file=env_file_path,
    -        log_level=log_level,
    -        flow_json=flow_json,
    -        stdin=stdin,
    -        check_variables=check_variables,
    -    )
    -
    -
    -@app.command(name="run", help="Run a flow directly", no_args_is_help=True)
    -def run_command_wrapper(
    -    script_path: str | None = typer.Argument(
    -        None, help="Path to the Python script (.py) or JSON flow (.json) containing a graph"
    -    ),
    -    input_value: str | None = typer.Argument(None, help="Input value to pass to the graph"),
    -    input_value_option: str | None = typer.Option(
    -        None,
    -        "--input-value",
    -        help="Input value to pass to the graph (alternative to positional argument)",
    -    ),
    -    output_format: str = typer.Option(
    -        "json",
    -        "--format",
    -        "-f",
    -        help="Output format: json, text, message, or result",
    -    ),
    -    flow_json: str | None = typer.Option(
    -        None,
    -        "--flow-json",
    -        help="Inline JSON flow content as a string (alternative to script_path)",
    -    ),
    -    *,
    -    stdin: bool = typer.Option(
    -        default=False,
    -        show_default=True,
    -        help="Read JSON flow content from stdin (alternative to script_path)",
    -    ),
    -    check_variables: bool = typer.Option(
    -        default=True,
    -        show_default=True,
    -        help="Check global variables for environment compatibility",
    -    ),
    -    verbose: bool = typer.Option(
    -        False,  # noqa: FBT003
    -        "-v",
    -        "--verbose",
    -        help="Show basic progress information",
    -    ),
    -    verbose_detailed: bool = typer.Option(
    -        False,  # noqa: FBT003
    -        "-vv",
    -        help="Show detailed progress and debug information",
    -    ),
    -    verbose_full: bool = typer.Option(
    -        False,  # noqa: FBT003
    -        "-vvv",
    -        help="Show full debugging output including component logs",
    -    ),
    -    timing: bool = typer.Option(
    -        default=False,
    -        show_default=True,
    -        help="Include detailed timing information in output",
    -    ),
    -) -> None:
    -    """Run a flow directly (lazy-loaded)."""
    -    from pathlib import Path
    -
    -    from lfx.cli.run import run
    -
    -    # Convert script_path string to Path if provided
    -    script_path_obj = Path(script_path) if script_path else None
    -
    -    return run(
    -        script_path=script_path_obj,
    -        input_value=input_value,
    -        input_value_option=input_value_option,
    -        output_format=output_format,
    -        flow_json=flow_json,
    -        stdin=stdin,
    -        check_variables=check_variables,
    -        verbose=verbose,
    -        verbose_detailed=verbose_detailed,
    -        verbose_full=verbose_full,
    -        timing=timing,
    -    )
    -
    -
    -@app.command(name="requirements", help="Generate requirements.txt for a flow", no_args_is_help=True)
    -def requirements_command_wrapper(
    -    flow_path: str = typer.Argument(help="Path to the Langflow flow JSON file"),
    -    output: str | None = typer.Option(
    -        None,
    -        "--output",
    -        "-o",
    -        help="Output file path (default: stdout)",
    -    ),
    -    lfx_package: str = typer.Option(
    -        "lfx",
    -        "--lfx-package",
    -        help="Name of the LFX package (default: lfx)",
    -    ),
    -    *,
    -    no_lfx: bool = typer.Option(
    -        False,  # noqa: FBT003
    -        "--no-lfx",
    -        help="Exclude the LFX package from output",
    -    ),
    -    no_pin: bool = typer.Option(
    -        False,  # noqa: FBT003
    -        "--no-pin",
    -        help="Do not pin package versions (default: pin to currently installed versions)",
    -    ),
    -) -> None:
    -    """Generate requirements.txt from a Langflow flow JSON (lazy-loaded)."""
    -    import json
    -    from pathlib import Path
    -
    -    from lfx.utils.flow_requirements import generate_requirements_txt
    -
    -    path = Path(flow_path)
    -    if not path.is_file():
    -        typer.echo(f"Error: File not found: {path}", err=True)
    -        raise typer.Exit(1)
    -
    -    try:
    -        flow = json.loads(path.read_text(encoding="utf-8"))
    -    except (json.JSONDecodeError, OSError) as e:
    -        typer.echo(f"Error: Could not read flow JSON: {e}", err=True)
    -        raise typer.Exit(1) from e
    +    """Lfx - Langflow Executor."""
     
    -    content = generate_requirements_txt(
    -        flow,
    -        lfx_package=lfx_package,
    -        include_lfx=not no_lfx,
    -        pin_versions=not no_pin,
    -    )
     
    -    if output:
    -        try:
    -            Path(output).write_text(content, encoding="utf-8")
    -        except OSError as e:
    -            typer.echo(f"Error: Could not write to {output}: {e}", err=True)
    -            raise typer.Exit(1) from e
    -        typer.echo(f"Requirements written to {output}")
    -    else:
    -        typer.echo(content, nl=False)
    +# Register command groups (order determines help-panel ordering)
    +_register_setup(app)
    +_register_authoring(app)
    +_register_running(app)
    +_register_remote(app)
     
     
     def main():
    
  • src/lfx/src/lfx/templates/flows/hello-world.json+495 0 added
    @@ -0,0 +1,495 @@
    +{
    +  "data": {
    +    "edges": [
    +      {
    +        "animated": false,
    +        "className": "",
    +        "data": {
    +          "sourceHandle": {
    +            "dataType": "TextInput",
    +            "id": "TextInput-J1CQK",
    +            "name": "text",
    +            "output_types": [
    +              "Message"
    +            ]
    +          },
    +          "targetHandle": {
    +            "fieldName": "input_value",
    +            "id": "ChatOutput-boh63",
    +            "inputTypes": [
    +              "Data",
    +              "JSON",
    +              "DataFrame",
    +              "Table",
    +              "Message"
    +            ],
    +            "type": "other"
    +          }
    +        },
    +        "id": "xy-edge__TextInput-J1CQK{œdataTypeœ:œTextInputœ,œidœ:œTextInput-J1CQKœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-boh63{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-boh63œ,œinputTypesœ:[œDataœ,œJSONœ,œDataFrameœ,œTableœ,œMessageœ],œtypeœ:œotherœ}",
    +        "selected": false,
    +        "source": "TextInput-J1CQK",
    +        "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-J1CQKœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}",
    +        "target": "ChatOutput-boh63",
    +        "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-boh63œ,œinputTypesœ:[œDataœ,œJSONœ,œDataFrameœ,œTableœ,œMessageœ],œtypeœ:œotherœ}"
    +      }
    +    ],
    +    "nodes": [
    +      {
    +        "data": {
    +          "id": "TextInput-J1CQK",
    +          "node": {
    +            "base_classes": [
    +              "Message"
    +            ],
    +            "beta": false,
    +            "conditional_paths": [],
    +            "custom_fields": {},
    +            "description": "Get user text inputs.",
    +            "display_name": "Text Input",
    +            "documentation": "https://docs.langflow.org/text-input-and-output",
    +            "edited": false,
    +            "field_order": [
    +              "input_value",
    +              "use_global_variable"
    +            ],
    +            "frozen": false,
    +            "icon": "type",
    +            "legacy": false,
    +            "lf_version": "1.9.0",
    +            "metadata": {
    +              "code_hash": "518f16485886",
    +              "dependencies": {
    +                "dependencies": [
    +                  {
    +                    "name": "lfx",
    +                    "version": null
    +                  }
    +                ],
    +                "total_dependencies": 1
    +              },
    +              "module": "lfx.components.input_output.text.TextInputComponent"
    +            },
    +            "minimized": false,
    +            "output_types": [],
    +            "outputs": [
    +              {
    +                "allows_loop": false,
    +                "cache": true,
    +                "display_name": "Output Text",
    +                "group_outputs": false,
    +                "method": "text_response",
    +                "name": "text",
    +                "selected": "Message",
    +                "tool_mode": true,
    +                "types": [
    +                  "Message"
    +                ],
    +                "value": "__UNDEFINED__"
    +              }
    +            ],
    +            "pinned": false,
    +            "template": {
    +              "_type": "Component",
    +              "code": {
    +                "advanced": true,
    +                "dynamic": true,
    +                "fileTypes": [],
    +                "file_path": "",
    +                "info": "",
    +                "list": false,
    +                "load_from_db": false,
    +                "multiline": true,
    +                "name": "code",
    +                "password": false,
    +                "placeholder": "",
    +                "required": true,
    +                "show": true,
    +                "title_case": false,
    +                "type": "code",
    +                "value": "from typing import Any\n\nfrom lfx.base.io.text import TextComponent\nfrom lfx.io import BoolInput, MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n    display_name = \"Text Input\"\n    description = \"Get user text inputs.\"\n    documentation: str = \"https://docs.langflow.org/text-input-and-output\"\n    icon = \"type\"\n    name = \"TextInput\"\n\n    inputs = [\n        MultilineInput(\n            name=\"input_value\",\n            display_name=\"Text\",\n            info=\"Text to be passed as input.\",\n        ),\n        BoolInput(\n            name=\"use_global_variable\",\n            display_name=\"Use Global Variable\",\n            info=\"Enable to select from global variables (shows globe icon). Disables multiline editing.\",\n            value=False,\n            advanced=True,\n            real_time_refresh=True,\n        ),\n    ]\n    outputs = [\n        Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n    ]\n\n    def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n        if field_name == \"use_global_variable\":\n            if field_value:\n                # Enable global variable mode: single-line with password masking and globe dropdown\n                build_config[\"input_value\"][\"multiline\"] = False\n                build_config[\"input_value\"][\"password\"] = True\n            else:\n                # Default mode: multiline text editing\n                build_config[\"input_value\"][\"multiline\"] = True\n                build_config[\"input_value\"][\"password\"] = False\n        return build_config\n\n    def text_response(self) -> Message:\n        return Message(\n            text=self.input_value,\n        )\n"
    +              },
    +              "input_value": {
    +                "_input_type": "MultilineInput",
    +                "advanced": false,
    +                "ai_enabled": false,
    +                "copy_field": false,
    +                "display_name": "Text",
    +                "dynamic": false,
    +                "info": "Text to be passed as input.",
    +                "input_types": [
    +                  "Message"
    +                ],
    +                "list": false,
    +                "list_add_label": "Add More",
    +                "load_from_db": false,
    +                "multiline": true,
    +                "name": "input_value",
    +                "override_skip": false,
    +                "password": false,
    +                "placeholder": "",
    +                "required": false,
    +                "show": true,
    +                "title_case": false,
    +                "tool_mode": false,
    +                "trace_as_input": true,
    +                "trace_as_metadata": true,
    +                "track_in_telemetry": false,
    +                "type": "str",
    +                "value": "Hello, World!"
    +              },
    +              "use_global_variable": {
    +                "_input_type": "BoolInput",
    +                "advanced": true,
    +                "display_name": "Use Global Variable",
    +                "dynamic": false,
    +                "info": "Enable to select from global variables (shows globe icon). Disables multiline editing.",
    +                "list": false,
    +                "list_add_label": "Add More",
    +                "name": "use_global_variable",
    +                "override_skip": false,
    +                "placeholder": "",
    +                "real_time_refresh": true,
    +                "required": false,
    +                "show": true,
    +                "title_case": false,
    +                "tool_mode": false,
    +                "trace_as_metadata": true,
    +                "track_in_telemetry": true,
    +                "type": "bool",
    +                "value": false
    +              }
    +            },
    +            "tool_mode": false
    +          },
    +          "showNode": true,
    +          "type": "TextInput"
    +        },
    +        "id": "TextInput-J1CQK",
    +        "measured": {
    +          "height": 207,
    +          "width": 320
    +        },
    +        "position": {
    +          "x": 392,
    +          "y": 219
    +        },
    +        "type": "genericNode"
    +      },
    +      {
    +        "data": {
    +          "id": "ChatOutput-boh63",
    +          "node": {
    +            "base_classes": [
    +              "Message"
    +            ],
    +            "beta": false,
    +            "conditional_paths": [],
    +            "custom_fields": {},
    +            "description": "Display a chat message in the Playground.",
    +            "display_name": "Chat Output",
    +            "documentation": "https://docs.langflow.org/chat-input-and-output",
    +            "edited": false,
    +            "field_order": [
    +              "input_value",
    +              "should_store_message",
    +              "sender",
    +              "sender_name",
    +              "session_id",
    +              "context_id",
    +              "data_template",
    +              "clean_data"
    +            ],
    +            "frozen": false,
    +            "icon": "MessagesSquare",
    +            "legacy": false,
    +            "lf_version": "1.9.0",
    +            "metadata": {
    +              "code_hash": "c312c84b1777",
    +              "dependencies": {
    +                "dependencies": [
    +                  {
    +                    "name": "orjson",
    +                    "version": null
    +                  },
    +                  {
    +                    "name": "fastapi",
    +                    "version": null
    +                  },
    +                  {
    +                    "name": "lfx",
    +                    "version": null
    +                  }
    +                ],
    +                "total_dependencies": 3
    +              },
    +              "module": "lfx.components.input_output.chat_output.ChatOutput"
    +            },
    +            "minimized": true,
    +            "output_types": [],
    +            "outputs": [
    +              {
    +                "allows_loop": false,
    +                "cache": true,
    +                "display_name": "Output Message",
    +                "group_outputs": false,
    +                "method": "message_response",
    +                "name": "message",
    +                "selected": "Message",
    +                "tool_mode": true,
    +                "types": [
    +                  "Message"
    +                ],
    +                "value": "__UNDEFINED__"
    +              }
    +            ],
    +            "pinned": false,
    +            "template": {
    +              "_type": "Component",
    +              "clean_data": {
    +                "_input_type": "BoolInput",
    +                "advanced": true,
    +                "display_name": "Basic Clean Data",
    +                "dynamic": false,
    +                "info": "Whether to clean data before converting to string.",
    +                "list": false,
    +                "list_add_label": "Add More",
    +                "name": "clean_data",
    +                "override_skip": false,
    +                "placeholder": "",
    +                "required": false,
    +                "show": true,
    +                "title_case": false,
    +                "tool_mode": false,
    +                "trace_as_metadata": true,
    +                "track_in_telemetry": true,
    +                "type": "bool",
    +                "value": true
    +              },
    +              "code": {
    +                "advanced": true,
    +                "dynamic": true,
    +                "fileTypes": [],
    +                "file_path": "",
    +                "info": "",
    +                "list": false,
    +                "load_from_db": false,
    +                "multiline": true,
    +                "name": "code",
    +                "password": false,
    +                "placeholder": "",
    +                "required": true,
    +                "show": true,
    +                "title_case": false,
    +                "type": "code",
    +                "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n    MESSAGE_SENDER_AI,\n    MESSAGE_SENDER_NAME_AI,\n    MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n    display_name = \"Chat Output\"\n    description = \"Display a chat message in the Playground.\"\n    documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n    icon = \"MessagesSquare\"\n    name = \"ChatOutput\"\n    minimized = True\n\n    inputs = [\n        HandleInput(\n            name=\"input_value\",\n            display_name=\"Inputs\",\n            info=\"Message to be passed as output.\",\n            input_types=[\"Data\", \"JSON\", \"DataFrame\", \"Table\", \"Message\"],\n            required=True,\n        ),\n        BoolInput(\n            name=\"should_store_message\",\n            display_name=\"Store Messages\",\n            info=\"Store the message in the history.\",\n            value=True,\n            advanced=True,\n        ),\n        DropdownInput(\n            name=\"sender\",\n            display_name=\"Sender Type\",\n            options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n            value=MESSAGE_SENDER_AI,\n            advanced=True,\n            info=\"Type of sender.\",\n        ),\n        MessageTextInput(\n            name=\"sender_name\",\n            display_name=\"Sender Name\",\n            info=\"Name of the sender.\",\n            value=MESSAGE_SENDER_NAME_AI,\n            advanced=True,\n        ),\n        MessageTextInput(\n            name=\"session_id\",\n            display_name=\"Session ID\",\n            info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n            advanced=True,\n        ),\n        MessageTextInput(\n            name=\"context_id\",\n            display_name=\"Context ID\",\n            info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n            value=\"\",\n            advanced=True,\n        ),\n        MessageTextInput(\n            name=\"data_template\",\n            display_name=\"Data Template\",\n            value=\"{text}\",\n            advanced=True,\n            info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n        ),\n        BoolInput(\n            name=\"clean_data\",\n            display_name=\"Basic Clean Data\",\n            value=True,\n            advanced=True,\n            info=\"Whether to clean data before converting to string.\",\n        ),\n    ]\n    outputs = [\n        Output(\n            display_name=\"Output Message\",\n            name=\"message\",\n            method=\"message_response\",\n        ),\n    ]\n\n    def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n        source_dict = {}\n        if id_:\n            source_dict[\"id\"] = id_\n        if display_name:\n            source_dict[\"display_name\"] = display_name\n        if source:\n            # Handle case where source is a ChatOpenAI object\n            if hasattr(source, \"model_name\"):\n                source_dict[\"source\"] = source.model_name\n            elif hasattr(source, \"model\"):\n                source_dict[\"source\"] = str(source.model)\n            else:\n                source_dict[\"source\"] = str(source)\n        return Source(**source_dict)\n\n    async def message_response(self) -> Message:\n        # First convert the input to string if needed\n        text = self.convert_to_string()\n\n        # Get source properties\n        source, _, display_name, source_id = self.get_properties_from_source_component()\n\n        # Create or use existing Message object\n        if isinstance(self.input_value, Message) and not self.is_connected_to_chat_input():\n            message = self.input_value\n            # Update message properties\n            message.text = text\n            # Preserve existing session_id from the incoming message if it exists\n            existing_session_id = message.session_id\n        else:\n            message = Message(text=text)\n            existing_session_id = None\n\n        # Set message properties\n        message.sender = self.sender\n        message.sender_name = self.sender_name\n        # Preserve session_id from incoming message, or use component/graph session_id\n        message.session_id = (\n            self.session_id or existing_session_id or (self.graph.session_id if hasattr(self, \"graph\") else None) or \"\"\n        )\n        message.context_id = self.context_id\n        message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n        message.properties.source = self._build_source(source_id, display_name, source)\n\n        # Store message if needed\n        if message.session_id and self.should_store_message:\n            stored_message = await self.send_message(message)\n            self.message.value = stored_message\n            message = stored_message\n\n        self.status = message\n        return message\n\n    def _serialize_data(self, data: Data) -> str:\n        \"\"\"Serialize Data object to JSON string.\"\"\"\n        # Convert data.data to JSON-serializable format\n        serializable_data = jsonable_encoder(data.data)\n        # Serialize with orjson, enabling pretty printing with indentation\n        json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n        # Convert bytes to string and wrap in Markdown code blocks\n        return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n    def _validate_input(self) -> None:\n        \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n        if self.input_value is None:\n            msg = \"Input data cannot be None\"\n            raise ValueError(msg)\n        if isinstance(self.input_value, list) and not all(\n            isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n        ):\n            invalid_types = [\n                type(item).__name__\n                for item in self.input_value\n                if not isinstance(item, Message | Data | DataFrame | str)\n            ]\n            msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n            raise TypeError(msg)\n        if not isinstance(\n            self.input_value,\n            Message | Data | DataFrame | str | list | Generator | type(None),\n        ):\n            type_name = type(self.input_value).__name__\n            msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n            raise TypeError(msg)\n\n    def convert_to_string(self) -> str | Generator[Any, None, None]:\n        \"\"\"Convert input data to string with proper error handling.\"\"\"\n        self._validate_input()\n        if isinstance(self.input_value, list):\n            clean_data: bool = getattr(self, \"clean_data\", False)\n            return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n        if isinstance(self.input_value, Generator):\n            return self.input_value\n        return safe_convert(self.input_value)\n"
    +              },
    +              "context_id": {
    +                "_input_type": "MessageTextInput",
    +                "advanced": true,
    +                "display_name": "Context ID",
    +                "dynamic": false,
    +                "info": "The context ID of the chat. Adds an extra layer to the local memory.",
    +                "input_types": [
    +                  "Message"
    +                ],
    +                "list": false,
    +                "list_add_label": "Add More",
    +                "load_from_db": false,
    +                "name": "context_id",
    +                "override_skip": false,
    +                "placeholder": "",
    +                "required": false,
    +                "show": true,
    +                "title_case": false,
    +                "tool_mode": false,
    +                "trace_as_input": true,
    +                "trace_as_metadata": true,
    +                "track_in_telemetry": false,
    +                "type": "str",
    +                "value": ""
    +              },
    +              "data_template": {
    +                "_input_type": "MessageTextInput",
    +                "advanced": true,
    +                "display_name": "Data Template",
    +                "dynamic": false,
    +                "info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.",
    +                "input_types": [
    +                  "Message"
    +                ],
    +                "list": false,
    +                "list_add_label": "Add More",
    +                "load_from_db": false,
    +                "name": "data_template",
    +                "override_skip": false,
    +                "placeholder": "",
    +                "required": false,
    +                "show": true,
    +                "title_case": false,
    +                "tool_mode": false,
    +                "trace_as_input": true,
    +                "trace_as_metadata": true,
    +                "track_in_telemetry": false,
    +                "type": "str",
    +                "value": "{text}"
    +              },
    +              "input_value": {
    +                "_input_type": "HandleInput",
    +                "advanced": false,
    +                "display_name": "Inputs",
    +                "dynamic": false,
    +                "info": "Message to be passed as output.",
    +                "input_types": [
    +                  "Data",
    +                  "JSON",
    +                  "DataFrame",
    +                  "Table",
    +                  "Message"
    +                ],
    +                "list": false,
    +                "list_add_label": "Add More",
    +                "name": "input_value",
    +                "override_skip": false,
    +                "placeholder": "",
    +                "required": true,
    +                "show": true,
    +                "title_case": false,
    +                "trace_as_metadata": true,
    +                "track_in_telemetry": false,
    +                "type": "other",
    +                "value": ""
    +              },
    +              "sender": {
    +                "_input_type": "DropdownInput",
    +                "advanced": true,
    +                "combobox": false,
    +                "dialog_inputs": {},
    +                "display_name": "Sender Type",
    +                "dynamic": false,
    +                "external_options": {},
    +                "info": "Type of sender.",
    +                "name": "sender",
    +                "options": [
    +                  "Machine",
    +                  "User"
    +                ],
    +                "options_metadata": [],
    +                "override_skip": false,
    +                "placeholder": "",
    +                "required": false,
    +                "show": true,
    +                "title_case": false,
    +                "toggle": false,
    +                "tool_mode": false,
    +                "trace_as_metadata": true,
    +                "track_in_telemetry": true,
    +                "type": "str",
    +                "value": "Machine"
    +              },
    +              "sender_name": {
    +                "_input_type": "MessageTextInput",
    +                "advanced": true,
    +                "display_name": "Sender Name",
    +                "dynamic": false,
    +                "info": "Name of the sender.",
    +                "input_types": [
    +                  "Message"
    +                ],
    +                "list": false,
    +                "list_add_label": "Add More",
    +                "load_from_db": false,
    +                "name": "sender_name",
    +                "override_skip": false,
    +                "placeholder": "",
    +                "required": false,
    +                "show": true,
    +                "title_case": false,
    +                "tool_mode": false,
    +                "trace_as_input": true,
    +                "trace_as_metadata": true,
    +                "track_in_telemetry": false,
    +                "type": "str",
    +                "value": "AI"
    +              },
    +              "session_id": {
    +                "_input_type": "MessageTextInput",
    +                "advanced": true,
    +                "display_name": "Session ID",
    +                "dynamic": false,
    +                "info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
    +                "input_types": [
    +                  "Message"
    +                ],
    +                "list": false,
    +                "list_add_label": "Add More",
    +                "load_from_db": false,
    +                "name": "session_id",
    +                "override_skip": false,
    +                "placeholder": "",
    +                "required": false,
    +                "show": true,
    +                "title_case": false,
    +                "tool_mode": false,
    +                "trace_as_input": true,
    +                "trace_as_metadata": true,
    +                "track_in_telemetry": false,
    +                "type": "str",
    +                "value": ""
    +              },
    +              "should_store_message": {
    +                "_input_type": "BoolInput",
    +                "advanced": true,
    +                "display_name": "Store Messages",
    +                "dynamic": false,
    +                "info": "Store the message in the history.",
    +                "list": false,
    +                "list_add_label": "Add More",
    +                "name": "should_store_message",
    +                "override_skip": false,
    +                "placeholder": "",
    +                "required": false,
    +                "show": true,
    +                "title_case": false,
    +                "tool_mode": false,
    +                "trace_as_metadata": true,
    +                "track_in_telemetry": true,
    +                "type": "bool",
    +                "value": true
    +              }
    +            },
    +            "tool_mode": false
    +          },
    +          "showNode": false,
    +          "type": "ChatOutput"
    +        },
    +        "id": "ChatOutput-boh63",
    +        "measured": {
    +          "height": 52,
    +          "width": 192
    +        },
    +        "position": {
    +          "x": 831,
    +          "y": 292
    +        },
    +        "type": "genericNode"
    +      }
    +    ],
    +    "viewport": {
    +      "x": -619.5,
    +      "y": -230,
    +      "zoom": 2
    +    }
    +  },
    +  "description": "A basic \"Hello World\" template.",
    +  "endpoint_name": null,
    +  "icon": null,
    +  "icon_bg_color": null,
    +  "id": "44d4caa3-488a-4d5d-9a1d-20af07107439",
    +  "is_component": false,
    +  "locked": false,
    +  "mcp_enabled": false,
    +  "name": "hello-world",
    +  "tags": [],
    +  "webhook": false
    +}
    
  • src/lfx/src/lfx/templates/github-actions/langflow-push.yml+102 0 added
    @@ -0,0 +1,102 @@
    +# langflow-push.yml
    +#
    +# PURPOSE
    +#   Deploy (upsert) all flows to a remote Langflow instance whenever
    +#   flow files are merged to main.  Uses stable flow IDs so re-running
    +#   this workflow always converges to the same state.
    +#
    +# PREREQUISITES
    +#   pip install lfx langflow-sdk
    +#
    +# GITHUB SETUP  (Settings → Environments → New environment → "production")
    +#   Variables  (plain text):
    +#     LANGFLOW_PROD_URL        e.g. https://langflow.example.com
    +#     LANGFLOW_PROJECT_NAME    e.g. "Production Flows"   (optional)
    +#   Secrets    (masked):
    +#     LANGFLOW_PROD_API_KEY    your production API key
    +#
    +#   Using a GitHub Environment lets you require manual approval before
    +#   each production deploy (Settings → Environments → Required reviewers).
    +#
    +# USAGE
    +#   Copy to .github/workflows/langflow-push.yml in your repo.
    +#   The workflow triggers automatically on merges to main that touch
    +#   flows/ and can also be triggered manually from the Actions tab.
    +
    +name: Deploy Langflow Flows
    +
    +on:
    +  push:
    +    branches:
    +      - main
    +    paths:
    +      - "flows/**/*.json"
    +  # Manual trigger with optional dry-run flag
    +  workflow_dispatch:
    +    inputs:
    +      dry_run:
    +        description: "Dry run (show what would be pushed without making changes)"
    +        type: boolean
    +        default: false
    +
    +jobs:
    +  push:
    +    name: lfx push
    +    runs-on: ubuntu-latest
    +    timeout-minutes: 10
    +
    +    environment: production
    +
    +    steps:
    +      - name: Checkout
    +        uses: actions/checkout@v4
    +
    +      - name: Set up Python
    +        uses: actions/setup-python@v5
    +        with:
    +          python-version: "3.12"
    +          cache: pip
    +
    +      # ------------------------------------------------------------------ #
    +      # Install lfx + the SDK it needs to push flows.                      #
    +      # ------------------------------------------------------------------ #
    +      - name: Install dependencies
    +        run: pip install lfx langflow-sdk
    +
    +      # ------------------------------------------------------------------ #
    +      # Write environments config at CI time.                              #
    +      # The URL is a plain variable; the API key stays in secrets and is   #
    +      # resolved by langflow-sdk via the api_key_env pointer at runtime.  #
    +      # ------------------------------------------------------------------ #
    +      - name: Write environments config
    +        run: >-
    +          printf '[environments.production]\nurl = "%s"\napi_key_env = "LANGFLOW_PROD_API_KEY"\n'
    +          "$LANGFLOW_PROD_URL" > langflow-environments.toml
    +        env:
    +          LANGFLOW_PROD_URL: ${{ vars.LANGFLOW_PROD_URL }}
    +
    +      # ------------------------------------------------------------------ #
    +      # Push flows.  Add --project to group flows into a folder.           #
    +      # Remove --project / --project-id lines if you don't use projects.  #
    +      # ------------------------------------------------------------------ #
    +      - name: Push flows
    +        env:
    +          # Resolved from the GitHub variable; falls back to "Main" if unset
    +          LANGFLOW_PROJECT_NAME: ${{ vars.LANGFLOW_PROJECT_NAME || 'Main' }}
    +          LANGFLOW_ENVIRONMENTS_FILE: langflow-environments.toml
    +          LANGFLOW_PROD_API_KEY: ${{ secrets.LANGFLOW_PROD_API_KEY }}
    +        run: |
    +          lfx push \
    +            --dir flows/ \
    +            --env production \
    +            --project "$LANGFLOW_PROJECT_NAME" \
    +            ${{ github.event.inputs.dry_run == 'true' && '--dry-run' || '' }}
    +
    +      - name: Summary
    +        if: always()
    +        run: |
    +          echo "### Deploy summary" >> "$GITHUB_STEP_SUMMARY"
    +          echo "- Environment: **production**" >> "$GITHUB_STEP_SUMMARY"
    +          echo "- Triggered by: ${{ github.event_name }}" >> "$GITHUB_STEP_SUMMARY"
    +          echo "- Dry run: ${{ github.event.inputs.dry_run || 'false' }}" >> "$GITHUB_STEP_SUMMARY"
    +          echo "- Commit: \`${{ github.sha }}\`" >> "$GITHUB_STEP_SUMMARY"
    
  • src/lfx/src/lfx/templates/github-actions/langflow-test.yml+88 0 added
    @@ -0,0 +1,88 @@
    +# langflow-test.yml
    +#
    +# PURPOSE
    +#   Run your pytest flow-integration tests against a live staging instance
    +#   on every PR that touches flow files or tests.
    +#
    +# PREREQUISITES
    +#   pip install "langflow-sdk[testing]" pytest
    +#
    +# GITHUB SETUP  (Settings → Environments → New environment → "staging")
    +#   Variables  (plain text, visible in logs):
    +#     LANGFLOW_STAGING_URL   e.g. https://staging.langflow.example.com
    +#   Secrets    (masked, never logged):
    +#     LANGFLOW_STAGING_API_KEY   your staging API key
    +#
    +# USAGE
    +#   Copy to .github/workflows/langflow-test.yml in your repo.
    +#   Write tests in tests/ using the flow_runner fixture:
    +#
    +#       def test_my_flow(flow_runner):
    +#           response = flow_runner("my-endpoint", "Hello!")
    +#           assert response.first_text_output() is not None
    +#
    +# SKIPPING
    +#   Tests are automatically skipped when no connection is configured,
    +#   so PRs from external contributors (who lack secrets) still pass.
    +
    +name: Test Langflow Flows
    +
    +on:
    +  pull_request:
    +    paths:
    +      - "flows/**/*.json"
    +      - "tests/**/*.py"
    +  workflow_dispatch:
    +
    +jobs:
    +  test:
    +    name: pytest (flow_runner)
    +    runs-on: ubuntu-latest
    +    timeout-minutes: 15
    +
    +    # Remove this `environment:` block if you are not using GitHub Environments.
    +    # The staging secrets/variables must still be available via some mechanism.
    +    environment: staging
    +
    +    steps:
    +      - name: Checkout
    +        uses: actions/checkout@v4
    +
    +      - name: Set up Python
    +        uses: actions/setup-python@v5
    +        with:
    +          python-version: "3.12"
    +          cache: pip
    +
    +      # ------------------------------------------------------------------ #
    +      # Install testing dependencies.                                       #
    +      # Adjust versions / add other project dependencies as needed.        #
    +      # ------------------------------------------------------------------ #
    +      - name: Install dependencies
    +        run: pip install "langflow-sdk[testing]" pytest
    +
    +      # ------------------------------------------------------------------ #
    +      # Write a minimal environments config so --langflow-env works.       #
    +      # The URL comes from a plain variable; the key is injected as an     #
    +      # env var and resolved by langflow-sdk at runtime (never written to  #
    +      # disk).                                                              #
    +      # ------------------------------------------------------------------ #
    +      - name: Write environments config
    +        run: >-
    +          printf '[environments.staging]\nurl = "%s"\napi_key_env = "LANGFLOW_STAGING_API_KEY"\n'
    +          "$LANGFLOW_STAGING_URL" > langflow-environments.toml
    +        env:
    +          LANGFLOW_STAGING_URL: ${{ vars.LANGFLOW_STAGING_URL }}
    +
    +      - name: Run flow tests
    +        env:
    +          # URL for the flow_runner --langflow-env option
    +          LANGFLOW_ENVIRONMENTS_FILE: langflow-environments.toml
    +          # API key resolved by api_key_env in the environments file
    +          LANGFLOW_STAGING_API_KEY: ${{ secrets.LANGFLOW_STAGING_API_KEY }}
    +        run: |
    +          pytest tests/ \
    +            --langflow-env staging \
    +            -m integration \
    +            -v \
    +            --tb=short
    
  • src/lfx/src/lfx/templates/github-actions/langflow-validate.yml+54 0 added
    @@ -0,0 +1,54 @@
    +# langflow-validate.yml
    +#
    +# PURPOSE
    +#   Block a PR if any changed flow JSON fails lfx validate.
    +#   No secrets required -- this is a pure static analysis step.
    +#
    +# USAGE
    +#   Copy to .github/workflows/langflow-validate.yml in your repo.
    +#   Adjust `paths` to match where you store your flow files.
    +#
    +# CUSTOMISATION
    +#   --level 4   Full validation (structure + components + edge types + required inputs).
    +#               Lower to 1-3 to skip progressively deeper checks.
    +#   --skip-*    Use --skip-components, --skip-edge-types, or --skip-required-inputs
    +#               to suppress individual check categories.
    +
    +name: Validate Langflow Flows
    +
    +on:
    +  pull_request:
    +    paths:
    +      - "flows/**/*.json"
    +  # Allow manual runs from the Actions tab
    +  workflow_dispatch:
    +
    +jobs:
    +  validate:
    +    name: lfx validate
    +    runs-on: ubuntu-latest
    +    timeout-minutes: 5
    +
    +    steps:
    +      - name: Checkout
    +        uses: actions/checkout@v4
    +
    +      - name: Set up Python
    +        uses: actions/setup-python@v5
    +        with:
    +          python-version: "3.12"
    +          cache: pip
    +
    +      # ------------------------------------------------------------------ #
    +      # Install lfx.  Replace with your published package name / version.  #
    +      # ------------------------------------------------------------------ #
    +      - name: Install lfx
    +        run: pip install lfx
    +
    +      - name: Validate flows
    +        run: |
    +          lfx validate flows/ \
    +            --level 4 \
    +            --format text
    +        # Exit code 1  => validation errors found  (blocks merge)
    +        # Exit code 2  => a flow file was not found
    
  • src/lfx/src/lfx/templates/gitlab-ci/langflow.yml+152 0 added
    @@ -0,0 +1,152 @@
    +# langflow.yml  --  GitLab CI template for the Flow DevOps Toolkit
    +#
    +# PURPOSE
    +#   Drop-in GitLab CI fragment providing three stages:
    +#     validate  -- static analysis on every MR (no secrets needed)
    +#     test      -- integration tests against staging on every MR
    +#     deploy    -- push flows to production on merge to main
    +#
    +# USAGE
    +#   Option A: include the whole file in your .gitlab-ci.yml:
    +#
    +#       include:
    +#         - local: .gitlab/ci/langflow.yml
    +#
    +#   Option B: copy the individual job definitions you need directly
    +#             into your existing .gitlab-ci.yml.
    +#
    +# GITLAB SETUP
    +#   Settings → CI/CD → Variables:
    +#
    +#   Variable name               Type     Protected  Masked  Example value
    +#   ──────────────────────────  ───────  ─────────  ──────  ─────────────────────────────
    +#   LANGFLOW_STAGING_URL        Variable     ✓         ✗    https://staging.langflow.example.com
    +#   LANGFLOW_STAGING_API_KEY    Variable     ✓         ✓    <staging api key>
    +#   LANGFLOW_PROD_URL           Variable     ✓         ✗    https://langflow.example.com
    +#   LANGFLOW_PROD_API_KEY       Variable     ✓         ✓    <production api key>
    +#   LANGFLOW_PROJECT_NAME       Variable     ✗         ✗    Production Flows
    +#
    +#   Marking LANGFLOW_*_API_KEY as "Masked" prevents the value from appearing
    +#   in job logs.  Marking variables as "Protected" limits them to protected
    +#   branches (main / tags), which restricts production access automatically.
    +
    +# ── Stages ────────────────────────────────────────────────────────────────── #
    +
    +stages:
    +  - validate
    +  - test
    +  - deploy
    +
    +# ── Shared config ─────────────────────────────────────────────────────────── #
    +
    +.langflow-base:
    +  image: python:3.12-slim
    +  before_script:
    +    - pip install --quiet lfx langflow-sdk "langflow-sdk[testing]" pytest
    +
    +.write-staging-env: &write-staging-env
    +  - >-
    +    printf '[environments.staging]\nurl = "%s"\napi_key_env = "LANGFLOW_STAGING_API_KEY"\n'
    +    "$LANGFLOW_STAGING_URL" > langflow-environments.toml
    +
    +.write-prod-env: &write-prod-env
    +  - >-
    +    printf '[environments.production]\nurl = "%s"\napi_key_env = "LANGFLOW_PROD_API_KEY"\n'
    +    "$LANGFLOW_PROD_URL" > langflow-environments.toml
    +
    +# ── validate ──────────────────────────────────────────────────────────────── #
    +
    +langflow:validate:
    +  extends: .langflow-base
    +  stage: validate
    +  rules:
    +    # Run on every MR that touches a flow file
    +    - if: $CI_PIPELINE_SOURCE == "merge_request_event"
    +      changes:
    +        - flows/**/*.json
    +    # Allow manual trigger from the pipeline UI
    +    - when: manual
    +      allow_failure: true
    +  script:
    +    - lfx validate flows/ --level 4 --format text
    +  # Exit code 1 = validation errors  →  job fails, MR blocked
    +  # Exit code 2 = file not found     →  job fails
    +  allow_failure: false
    +
    +# ── test ──────────────────────────────────────────────────────────────────── #
    +
    +langflow:test:
    +  extends: .langflow-base
    +  stage: test
    +  rules:
    +    - if: $CI_PIPELINE_SOURCE == "merge_request_event"
    +      changes:
    +        - flows/**/*.json
    +        - tests/**/*.py
    +    - when: manual
    +      allow_failure: true
    +  script:
    +    - *write-staging-env
    +    - >-
    +      pytest tests/
    +      --langflow-env staging
    +      -m integration
    +      -v
    +      --tb=short
    +  variables:
    +    LANGFLOW_ENVIRONMENTS_FILE: langflow-environments.toml
    +  # Tests auto-skip when LANGFLOW_STAGING_URL is absent, so this job never
    +  # hard-fails on feature branches that lack staging access.
    +  allow_failure: false
    +
    +# ── deploy ────────────────────────────────────────────────────────────────── #
    +
    +langflow:deploy:
    +  extends: .langflow-base
    +  stage: deploy
    +  rules:
    +    # Auto-deploy when flow files change on main
    +    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
    +      changes:
    +        - flows/**/*.json
    +    # Manual deploy from any branch / pipeline UI
    +    - when: manual
    +      allow_failure: true
    +  script:
    +    - *write-prod-env
    +    # Use CI_ENVIRONMENT_NAME in the job summary
    +    - echo "Deploying to production (${LANGFLOW_PROD_URL})"
    +    - >-
    +      lfx push
    +      --dir flows/
    +      --env production
    +      --project "${LANGFLOW_PROJECT_NAME:-Main}"
    +  variables:
    +    LANGFLOW_ENVIRONMENTS_FILE: langflow-environments.toml
    +  environment:
    +    name: production
    +    url: $LANGFLOW_PROD_URL
    +  # Protect this job by restricting LANGFLOW_PROD_API_KEY to protected
    +  # branches only (see setup instructions above).
    +  allow_failure: false
    +
    +# ── optional: dry-run ─────────────────────────────────────────────────────── #
    +
    +langflow:deploy:dry-run:
    +  extends: langflow:deploy
    +  stage: deploy
    +  rules:
    +    - when: manual
    +      allow_failure: true
    +  script:
    +    - *write-prod-env
    +    - echo "Dry run -- no changes will be made"
    +    - >-
    +      lfx push
    +      --dir flows/
    +      --env production
    +      --project "${LANGFLOW_PROJECT_NAME:-Main}"
    +      --dry-run
    +  environment:
    +    name: production (dry run)
    +    url: $LANGFLOW_PROD_URL
    
  • src/lfx/src/lfx/templates/README.md+166 0 added
    @@ -0,0 +1,166 @@
    +# CI/CD Pipeline Templates
    +
    +Ready-to-use workflow files for the Flow DevOps Toolkit.
    +Copy the files you need into your project's CI configuration.
    +
    +## GitHub Actions
    +
    +| File | Trigger | Secrets needed |
    +|------|---------|----------------|
    +| [`github-actions/langflow-validate.yml`](github-actions/langflow-validate.yml) | PR touching `flows/**/*.json` | None |
    +| [`github-actions/langflow-test.yml`](github-actions/langflow-test.yml) | PR touching flows or tests | `LANGFLOW_STAGING_API_KEY` |
    +| [`github-actions/langflow-push.yml`](github-actions/langflow-push.yml) | Push to `main` touching flows | `LANGFLOW_PROD_API_KEY` |
    +
    +### Quick start
    +
    +```bash
    +mkdir -p .github/workflows
    +cp github-actions/langflow-validate.yml \
    +   github-actions/langflow-test.yml \
    +   github-actions/langflow-push.yml \
    +   .github/workflows/
    +```
    +
    +Configure these in **Settings → Environments**:
    +
    +**`staging`** environment (used by `langflow-test.yml`):
    +| Name | Type | Value |
    +|------|------|-------|
    +| `LANGFLOW_STAGING_URL` | Variable | `https://staging.langflow.example.com` |
    +| `LANGFLOW_STAGING_API_KEY` | Secret | your staging API key |
    +
    +**`production`** environment (used by `langflow-push.yml`):
    +| Name | Type | Value |
    +|------|------|-------|
    +| `LANGFLOW_PROD_URL` | Variable | `https://langflow.example.com` |
    +| `LANGFLOW_PROD_API_KEY` | Secret | your production API key |
    +| `LANGFLOW_PROJECT_NAME` | Variable | `Production Flows` *(optional)* |
    +
    +Add **Required reviewers** to the `production` environment to gate every deploy
    +behind a manual approval step.
    +
    +---
    +
    +## GitLab CI
    +
    +| File | Description |
    +|------|-------------|
    +| [`gitlab-ci/langflow.yml`](gitlab-ci/langflow.yml) | Three-stage template: validate → test → deploy |
    +
    +### Quick start
    +
    +```bash
    +mkdir -p .gitlab/ci
    +cp gitlab-ci/langflow.yml .gitlab/ci/
    +```
    +
    +Add to your `.gitlab-ci.yml`:
    +
    +```yaml
    +include:
    +  - local: .gitlab/ci/langflow.yml
    +```
    +
    +Configure these in **Settings → CI/CD → Variables**:
    +
    +| Variable | Protected | Masked | Description |
    +|----------|-----------|--------|-------------|
    +| `LANGFLOW_STAGING_URL` | ✓ | ✗ | Staging instance URL |
    +| `LANGFLOW_STAGING_API_KEY` | ✓ | ✓ | Staging API key |
    +| `LANGFLOW_PROD_URL` | ✓ | ✗ | Production instance URL |
    +| `LANGFLOW_PROD_API_KEY` | ✓ | ✓ | Production API key |
    +| `LANGFLOW_PROJECT_NAME` | ✗ | ✗ | Project folder name *(optional)* |
    +
    +---
    +
    +## Shell scripts (`ci/`)
    +
    +The `shell/` templates (`ci-validate.sh`, `ci-test.sh`, `ci-push.sh`) work with
    +any CI system (Jenkins, CircleCI, Bitbucket Pipelines, Azure Pipelines, etc.).
    +They are copied to `ci/` by `lfx init`.
    +
    +### Environment variables
    +
    +#### `ci-validate.sh`
    +
    +| Variable | Default | Description |
    +|----------|---------|-------------|
    +| `FLOWS_DIR` | `flows/` | Directory containing flow JSON files |
    +| `VALIDATE_LEVEL` | `4` | Validation depth (1–4) |
    +| `VALIDATE_FORMAT` | `text` | Output format: `text` or `json` |
    +| `LFX_VERSION` | *(latest)* | PEP 508 version specifier for `lfx`, e.g. `>=0.4,<1` or `==1.2.3` |
    +
    +#### `ci-test.sh`
    +
    +| Variable | Default | Description |
    +|----------|---------|-------------|
    +| `LANGFLOW_URL` | — | URL of target Langflow instance (Approach A) |
    +| `LANGFLOW_API_KEY` | — | API key for target instance (Approach A) |
    +| `LANGFLOW_ENV` | — | Environment name from config (Approach B) |
    +| `LANGFLOW_ENVIRONMENTS_FILE` | `langflow-environments.toml` | Path to environments config (Approach B) |
    +| `TESTS_DIR` | `tests/` | Directory containing test files |
    +| `PYTEST_MARKERS` | `integration` | Markers passed to `pytest -m` |
    +| `PYTEST_ARGS` | — | Extra arguments forwarded verbatim to pytest |
    +| `SDK_VERSION` | *(latest)* | PEP 508 version specifier for `langflow-sdk` |
    +
    +#### `ci-push.sh`
    +
    +| Variable | Default | Description |
    +|----------|---------|-------------|
    +| `LANGFLOW_URL` | — | URL of target Langflow instance (Approach A) |
    +| `LANGFLOW_API_KEY` | — | API key for target instance (Approach A) |
    +| `LANGFLOW_ENV` | — | Environment name from config (Approach B) |
    +| `LANGFLOW_ENVIRONMENTS_FILE` | `langflow-environments.toml` | Path to environments config (Approach B) |
    +| `FLOWS_DIR` | `flows/` | Directory containing flow JSON files |
    +| `LANGFLOW_PROJECT` | — | Project (folder) name on the remote instance |
    +| `LANGFLOW_PROJECT_ID` | — | Project UUID (takes precedence over `LANGFLOW_PROJECT`) |
    +| `DRY_RUN` | `false` | Set to `true` to preview without making changes |
    +| `LFX_VERSION` | *(latest)* | PEP 508 version specifier for `lfx` |
    +
    +---
    +
    +## How it all fits together
    +
    +```
    +PR opened
    +  │
    +  ├── langflow-validate  ──── lfx validate flows/ --level 4
    +  │                           ↳ blocks merge if any flow is malformed
    +  │
    +  └── langflow-test  ──────── pytest tests/ --langflow-env staging
    +                              ↳ skips gracefully if staging is unavailable
    +
    +Merge to main
    +  │
    +  └── langflow-push  ──────── lfx push --dir flows/ --env production
    +                              ↳ upserts every flow by stable ID
    +                              ↳ idempotent: safe to re-run
    +```
    +
    +## Writing integration tests
    +
    +Install the testing extra:
    +
    +```bash
    +pip install "langflow-sdk[testing]"
    +```
    +
    +Create `tests/test_flows.py`:
    +
    +```python
    +def test_rag_flow(flow_runner):
    +    response = flow_runner("rag-endpoint", "What is Langflow?")
    +    assert "Langflow" in response.first_text_output()
    +
    +async def test_async_flow(async_flow_runner):
    +    response = await async_flow_runner("my-endpoint", "Hello!")
    +    assert response.first_text_output() is not None
    +```
    +
    +Run locally against staging:
    +
    +```bash
    +LANGFLOW_URL=https://staging.langflow.example.com \
    +LANGFLOW_API_KEY=<key> \
    +pytest tests/ -m integration
    +```
    
  • src/lfx/src/lfx/templates/shell/ci-push.sh+131 0 added
    @@ -0,0 +1,131 @@
    +#!/usr/bin/env bash
    +# ci-push.sh
    +#
    +# PURPOSE
    +#   Push (upsert) Langflow flow JSON files to a remote Langflow instance
    +#   using `lfx push`.  Stable flow IDs mean re-running always converges.
    +#
    +# USAGE
    +#   chmod +x ci-push.sh
    +#   export LANGFLOW_URL=https://staging.langflow.example.com
    +#   export LANGFLOW_API_KEY=<your-api-key>
    +#   ./ci-push.sh
    +#
    +# ENVIRONMENT VARIABLES — connection (pick one approach)
    +#
    +#   Approach A: direct URL + key (simplest)
    +#     LANGFLOW_URL        URL of the target Langflow instance.
    +#     LANGFLOW_API_KEY    API key for that instance.
    +#
    +#   Approach B: named environment from a TOML config
    +#     LANGFLOW_ENV                 Name of the environment block.
    +#                                  e.g. staging  or  production
    +#     LANGFLOW_ENVIRONMENTS_FILE   Path to environments TOML.
    +#                                  Default: langflow-environments.toml
    +#     <api_key_env var>            The env var named in api_key_env inside the
    +#                                  TOML block.  Must be exported separately.
    +#
    +#   The TOML format:
    +#
    +#     [environments.staging]
    +#     url         = "https://staging.langflow.example.com"
    +#     api_key_env  = "LANGFLOW_STAGING_API_KEY"
    +#
    +#     [environments.production]
    +#     url         = "https://langflow.example.com"
    +#     api_key_env  = "LANGFLOW_PROD_API_KEY"
    +#
    +# ENVIRONMENT VARIABLES — behaviour
    +#   FLOWS_DIR            Directory containing flow JSON files.
    +#                        Default: flows/
    +#   LANGFLOW_PROJECT     Project (folder) name on the remote instance.
    +#                        Default: (no project — flows go to the default folder)
    +#   LANGFLOW_PROJECT_ID  Project UUID.  Takes precedence over LANGFLOW_PROJECT.
    +#   DRY_RUN              Set to "true" to show what would be pushed without
    +#                        making any changes.  Default: false
    +#   LFX_VERSION          lfx PEP 508 version specifier suffix appended directly
    +#                        to the package name, e.g. ">=0.4,<1" or "==1.2.3".
    +#                        Default: installs latest.
    +#
    +# EXIT CODES
    +#   0  All flows pushed (or dry-run completed) successfully
    +#   1  One or more flows failed to push
    +#
    +# INTEGRATIONS
    +#   Jenkins:          sh 'ci-push.sh'
    +#   CircleCI:         - run: bash ci-push.sh
    +#   Bitbucket:        - bash ci-push.sh
    +#   Azure Pipelines:  - script: bash ci-push.sh
    +
    +set -euo pipefail
    +
    +# ── Configuration ─────────────────────────────────────────────────────────── #
    +
    +FLOWS_DIR="${FLOWS_DIR:-flows/}"
    +LANGFLOW_ENV="${LANGFLOW_ENV:-}"
    +LANGFLOW_ENVIRONMENTS_FILE="${LANGFLOW_ENVIRONMENTS_FILE:-langflow-environments.toml}"
    +LANGFLOW_URL="${LANGFLOW_URL:-}"
    +LANGFLOW_API_KEY="${LANGFLOW_API_KEY:-}"
    +LANGFLOW_PROJECT="${LANGFLOW_PROJECT:-}"
    +LANGFLOW_PROJECT_ID="${LANGFLOW_PROJECT_ID:-}"
    +DRY_RUN="${DRY_RUN:-false}"
    +LFX_VERSION="${LFX_VERSION:-}"
    +
    +# Normalise LFX_VERSION: if it looks like a bare version (starts with a digit),
    +# prepend "==" so the pip specifier is valid.
    +if [[ -n "${LFX_VERSION}" && "${LFX_VERSION}" =~ ^[0-9] ]]; then
    +  LFX_VERSION="==${LFX_VERSION}"
    +fi
    +
    +# ── Install lfx ───────────────────────────────────────────────────────────── #
    +
    +echo "==> Installing lfx${LFX_VERSION:+ ${LFX_VERSION}} ..."
    +pip install --quiet "lfx${LFX_VERSION}" langflow-sdk
    +
    +# ── Build environments file if using Approach B ───────────────────────────── #
    +
    +if [[ -n "${LANGFLOW_ENV}" && ! -f "${LANGFLOW_ENVIRONMENTS_FILE}" ]]; then
    +  ENV_UPPER="${LANGFLOW_ENV^^}"
    +  ENV_UPPER="${ENV_UPPER//-/_}"
    +  URL_VAR="LANGFLOW_${ENV_UPPER}_URL"
    +  KEY_VAR="LANGFLOW_${ENV_UPPER}_API_KEY"
    +
    +  echo "==> Writing ${LANGFLOW_ENVIRONMENTS_FILE} for environment '${LANGFLOW_ENV}' ..."
    +  printf '[environments.%s]\nurl = "%s"\napi_key_env = "%s"\n' \
    +    "${LANGFLOW_ENV}" \
    +    "${!URL_VAR:-}" \
    +    "${KEY_VAR}" \
    +    > "${LANGFLOW_ENVIRONMENTS_FILE}"
    +  export LANGFLOW_ENVIRONMENTS_FILE
    +fi
    +
    +# ── Build lfx push command ────────────────────────────────────────────────── #
    +
    +PUSH_CMD=(lfx push --dir "${FLOWS_DIR}")
    +
    +if [[ -n "${LANGFLOW_ENV}" ]]; then
    +  PUSH_CMD+=(--env "${LANGFLOW_ENV}")
    +elif [[ -n "${LANGFLOW_URL}" ]]; then
    +  PUSH_CMD+=(--target "${LANGFLOW_URL}")
    +  [[ -n "${LANGFLOW_API_KEY}" ]] && PUSH_CMD+=(--api-key "${LANGFLOW_API_KEY}")
    +else
    +  echo "ERROR: set LANGFLOW_ENV (Approach B) or LANGFLOW_URL (Approach A)" >&2
    +  exit 1
    +fi
    +
    +if [[ -n "${LANGFLOW_PROJECT_ID}" ]]; then
    +  PUSH_CMD+=(--project-id "${LANGFLOW_PROJECT_ID}")
    +elif [[ -n "${LANGFLOW_PROJECT}" ]]; then
    +  PUSH_CMD+=(--project "${LANGFLOW_PROJECT}")
    +fi
    +
    +[[ "${DRY_RUN}" == "true" ]] && PUSH_CMD+=(--dry-run)
    +
    +# ── Push ──────────────────────────────────────────────────────────────────── #
    +
    +echo "==> Pushing flows from ${FLOWS_DIR} ..."
    +[[ "${DRY_RUN}" == "true" ]] && echo "    (dry run — no changes will be made)"
    +echo "==> Running: ${PUSH_CMD[*]}"
    +"${PUSH_CMD[@]}"
    +
    +echo "==> Done."
    
  • src/lfx/src/lfx/templates/shell/ci-test.sh+119 0 added
    @@ -0,0 +1,119 @@
    +#!/usr/bin/env bash
    +# ci-test.sh
    +#
    +# PURPOSE
    +#   Run pytest flow-integration tests against a live Langflow instance
    +#   using the langflow-sdk `flow_runner` fixture.
    +#
    +# USAGE
    +#   chmod +x ci-test.sh
    +#   ./ci-test.sh
    +#
    +# ENVIRONMENT VARIABLES — connection (pick one approach)
    +#
    +#   Approach A: direct URL + key (simplest)
    +#     LANGFLOW_URL        URL of the target Langflow instance.
    +#                         e.g. https://staging.langflow.example.com
    +#     LANGFLOW_API_KEY    API key for that instance.
    +#
    +#   Approach B: named environment from a TOML config
    +#     LANGFLOW_ENV                 Name of the environment block in the TOML.
    +#                                  e.g. staging
    +#     LANGFLOW_ENVIRONMENTS_FILE   Path to the environments TOML.
    +#                                  Default: langflow-environments.toml
    +#     <api_key_env var>            The env var named in api_key_env inside the
    +#                                  TOML block, e.g. LANGFLOW_STAGING_API_KEY.
    +#
    +#   The TOML format (see also ci-push.sh):
    +#
    +#     [environments.staging]
    +#     url        = "https://staging.langflow.example.com"
    +#     api_key_env = "LANGFLOW_STAGING_API_KEY"
    +#
    +# ENVIRONMENT VARIABLES — behaviour
    +#   TESTS_DIR        Directory containing test files.  Default: tests/
    +#   PYTEST_MARKERS   Markers to pass to -m.  Default: integration
    +#   PYTEST_ARGS      Extra arguments forwarded verbatim to pytest.
    +#   SDK_VERSION      langflow-sdk PEP 508 version specifier suffix appended
    +#                    directly to the package name, e.g. ">=0.4,<1" or "==1.2.3".
    +#                    Default: installs latest.
    +#
    +# SKIPPING
    +#   When neither LANGFLOW_URL nor LANGFLOW_ENV is set the tests auto-skip
    +#   (the flow_runner fixture detects no connection).  This means the script
    +#   exits 0 even when run on a branch that lacks the necessary secrets.
    +#
    +# EXIT CODES
    +#   0  All tests passed (or skipped due to missing connection)
    +#   1  One or more tests failed
    +#
    +# INTEGRATIONS
    +#   Jenkins:          sh 'ci-test.sh'
    +#   CircleCI:         - run: bash ci-test.sh
    +#   Bitbucket:        - bash ci-test.sh
    +#   Azure Pipelines:  - script: bash ci-test.sh
    +
    +set -euo pipefail
    +
    +# ── Configuration ─────────────────────────────────────────────────────────── #
    +
    +TESTS_DIR="${TESTS_DIR:-tests/}"
    +PYTEST_MARKERS="${PYTEST_MARKERS:-integration}"
    +PYTEST_ARGS="${PYTEST_ARGS:-}"
    +SDK_VERSION="${SDK_VERSION:-}"
    +LANGFLOW_ENV="${LANGFLOW_ENV:-}"
    +LANGFLOW_ENVIRONMENTS_FILE="${LANGFLOW_ENVIRONMENTS_FILE:-langflow-environments.toml}"
    +
    +# ── Install dependencies ───────────────────────────────────────────────────── #
    +
    +# Normalise SDK_VERSION: if it looks like a bare version (starts with a digit),
    +# prepend "==" so the pip specifier is valid.
    +if [[ -n "${SDK_VERSION}" && "${SDK_VERSION}" =~ ^[0-9] ]]; then
    +  SDK_VERSION="==${SDK_VERSION}"
    +fi
    +
    +echo "==> Installing langflow-sdk[testing] and pytest ..."
    +pip install --quiet \
    +  "langflow-sdk[testing]${SDK_VERSION}" \
    +  pytest
    +
    +# ── Build environments file if using Approach B ───────────────────────────── #
    +
    +if [[ -n "${LANGFLOW_ENV}" && ! -f "${LANGFLOW_ENVIRONMENTS_FILE}" ]]; then
    +  # Derive variable names from the env name (uppercased, hyphens → underscores)
    +  ENV_UPPER="${LANGFLOW_ENV^^}"
    +  ENV_UPPER="${ENV_UPPER//-/_}"
    +  URL_VAR="LANGFLOW_${ENV_UPPER}_URL"
    +  KEY_VAR="LANGFLOW_${ENV_UPPER}_API_KEY"
    +
    +  echo "==> Writing ${LANGFLOW_ENVIRONMENTS_FILE} for environment '${LANGFLOW_ENV}' ..."
    +  printf '[environments.%s]\nurl = "%s"\napi_key_env = "%s"\n' \
    +    "${LANGFLOW_ENV}" \
    +    "${!URL_VAR:-}" \
    +    "${KEY_VAR}" \
    +    > "${LANGFLOW_ENVIRONMENTS_FILE}"
    +fi
    +
    +# ── Run tests ─────────────────────────────────────────────────────────────── #
    +
    +# Build pytest command
    +PYTEST_CMD=(pytest "${TESTS_DIR}" -v --tb=short)
    +
    +if [[ -n "${PYTEST_MARKERS}" ]]; then
    +  PYTEST_CMD+=(-m "${PYTEST_MARKERS}")
    +fi
    +
    +if [[ -n "${LANGFLOW_ENV}" ]]; then
    +  PYTEST_CMD+=(--langflow-env "${LANGFLOW_ENV}")
    +  export LANGFLOW_ENVIRONMENTS_FILE
    +elif [[ -n "${LANGFLOW_URL:-}" ]]; then
    +  PYTEST_CMD+=(--langflow-url "${LANGFLOW_URL}")
    +  [[ -n "${LANGFLOW_API_KEY:-}" ]] && PYTEST_CMD+=(--langflow-api-key "${LANGFLOW_API_KEY}")
    +fi
    +
    +# Append any extra user-supplied args
    +# shellcheck disable=SC2206
    +[[ -n "${PYTEST_ARGS}" ]] && PYTEST_CMD+=(${PYTEST_ARGS})
    +
    +echo "==> Running: ${PYTEST_CMD[*]}"
    +"${PYTEST_CMD[@]}"
    
  • src/lfx/src/lfx/templates/shell/ci-validate.sh+63 0 added
    @@ -0,0 +1,63 @@
    +#!/usr/bin/env bash
    +# ci-validate.sh
    +#
    +# PURPOSE
    +#   Validate all Langflow flow JSON files using `lfx validate`.
    +#   No secrets or network access required — pure static analysis.
    +#
    +# USAGE
    +#   chmod +x ci-validate.sh
    +#   ./ci-validate.sh
    +#
    +# ENVIRONMENT VARIABLES
    +#   FLOWS_DIR        Directory containing flow JSON files.
    +#                    Default: flows/
    +#   VALIDATE_LEVEL   Depth of validation (1–4).  Level 4 checks structure,
    +#                    components, edge types, AND required inputs.
    +#                    Default: 4
    +#   VALIDATE_FORMAT  Output format: text | json.
    +#                    Default: text
    +#   LFX_VERSION      lfx PEP 508 version specifier suffix appended directly
    +#                    to the package name, e.g. ">=0.4,<1" or "==1.2.3".
    +#                    Default: installs latest.
    +#
    +# EXIT CODES
    +#   0  All flows valid
    +#   1  One or more flows failed validation
    +#   2  Flow file / directory not found
    +#
    +# INTEGRATIONS
    +#   Jenkins:          sh 'ci-validate.sh'
    +#   CircleCI:         - run: bash ci-validate.sh
    +#   Bitbucket:        - bash ci-validate.sh
    +#   Azure Pipelines:  - script: bash ci-validate.sh
    +#   Generic:          bash ci-validate.sh
    +
    +set -euo pipefail
    +
    +# ── Configuration ─────────────────────────────────────────────────────────── #
    +
    +FLOWS_DIR="${FLOWS_DIR:-flows/}"
    +VALIDATE_LEVEL="${VALIDATE_LEVEL:-4}"
    +VALIDATE_FORMAT="${VALIDATE_FORMAT:-text}"
    +LFX_VERSION="${LFX_VERSION:-}"
    +
    +# Normalise LFX_VERSION: if it looks like a bare version (starts with a digit),
    +# prepend "==" so the pip specifier is valid.
    +if [[ -n "${LFX_VERSION}" && "${LFX_VERSION}" =~ ^[0-9] ]]; then
    +  LFX_VERSION="==${LFX_VERSION}"
    +fi
    +
    +# ── Install lfx ───────────────────────────────────────────────────────────── #
    +
    +echo "==> Installing lfx${LFX_VERSION:+ ${LFX_VERSION}} ..."
    +pip install --quiet "lfx${LFX_VERSION}"
    +
    +# ── Validate ──────────────────────────────────────────────────────────────── #
    +
    +echo "==> Validating flows in ${FLOWS_DIR} (level ${VALIDATE_LEVEL}) ..."
    +lfx validate "${FLOWS_DIR}" \
    +  --level "${VALIDATE_LEVEL}" \
    +  --format "${VALIDATE_FORMAT}"
    +
    +echo "==> All flows valid."
    
  • src/lfx/src/lfx/testing/__init__.py+88 0 added
    @@ -0,0 +1,88 @@
    +"""pytest plugin providing flow_runner fixtures for local Langflow flow execution.
    +
    +The plugin is auto-discovered via the ``pytest11`` entry-point, so no
    +``conftest.py`` changes are needed.  Configure defaults via CLI options or
    +environment variables::
    +
    +    pytest --lfx-env-file .env --lfx-timeout 60 tests/
    +
    +Per-test overrides via markers::
    +
    +    @pytest.mark.lfx_env_file(".env.test")
    +    @pytest.mark.lfx_timeout(30)
    +    def test_my_flow(flow_runner):
    +        result = flow_runner("flows/greeting.json", input_value="Hello")
    +        assert result.status == "success"
    +        assert "hello" in result.text.lower()
    +
    +Tweaks (component-level field overrides, keyed by node id/type/display_name)::
    +
    +    def test_with_tweaks(flow_runner):
    +        result = flow_runner(
    +            "flows/rag.json",
    +            input_value="What is Langflow?",
    +            tweaks={"OpenAI": {"model_name": "gpt-4o-mini", "temperature": 0.0}},
    +        )
    +        assert result.status == "success"
    +
    +Async tests::
    +
    +    async def test_async(async_flow_runner):
    +        result = await async_flow_runner("flows/greeting.json", input_value="Hi")
    +        assert result.status == "success"
    +"""
    +
    +from __future__ import annotations
    +
    +# -- plugin.py (pytest hooks & fixtures) -------------------------------------
    +from lfx.testing.plugin import (
    +    _SKIP_NO_REMOTE,
    +    _get_marker_arg,
    +    _resolve_async_remote_client,
    +    _resolve_remote_client,
    +    _resolve_runner_config,
    +    async_flow_runner,
    +    flow_runner,
    +    pytest_addoption,
    +    pytest_configure,
    +)
    +
    +# -- result.py ---------------------------------------------------------------
    +from lfx.testing.result import FlowResult, _build_result, _build_result_from_sdk_response
    +
    +# -- runners.py --------------------------------------------------------------
    +from lfx.testing.runners import (
    +    AsyncLocalFlowRunner,
    +    AsyncRemoteFlowRunner,
    +    LocalFlowRunner,
    +    RemoteFlowRunner,
    +    _apply_tweaks,
    +    _load_dotenv,
    +    _resolve_flow_args,
    +    _run_async,
    +    _run_sync,
    +)
    +
    +__all__ = [
    +    "_SKIP_NO_REMOTE",
    +    "AsyncLocalFlowRunner",
    +    "AsyncRemoteFlowRunner",
    +    "FlowResult",
    +    "LocalFlowRunner",
    +    "RemoteFlowRunner",
    +    "_apply_tweaks",
    +    "_build_result",
    +    "_build_result_from_sdk_response",
    +    "_get_marker_arg",
    +    "_load_dotenv",
    +    "_resolve_async_remote_client",
    +    "_resolve_flow_args",
    +    "_resolve_remote_client",
    +    "_resolve_runner_config",
    +    "_run_async",
    +    "_run_sync",
    +    "async_flow_runner",
    +    "flow_runner",
    +    "pytest_addoption",
    +    "pytest_configure",
    +]
    
  • src/lfx/src/lfx/testing/plugin.py+292 0 added
    @@ -0,0 +1,292 @@
    +"""pytest plugin hooks, fixtures, and marker registration for lfx.testing."""
    +
    +from __future__ import annotations
    +
    +import contextlib
    +import os
    +from pathlib import Path
    +from typing import Any
    +
    +try:
    +    import pytest
    +except ImportError as exc:
    +    msg = "pytest is required for lfx.testing. Install it with: pip install pytest  (or pip install 'lfx[dev]')"
    +    raise ImportError(msg) from exc
    +
    +from lfx.testing.runners import (
    +    AsyncLocalFlowRunner,
    +    AsyncRemoteFlowRunner,
    +    LocalFlowRunner,
    +    RemoteFlowRunner,
    +)
    +
    +# ---------------------------------------------------------------------------
    +# pytest plugin hooks
    +# ---------------------------------------------------------------------------
    +
    +
    +def pytest_addoption(parser: pytest.Parser) -> None:
    +    """Register lfx-specific CLI options."""
    +    group = parser.getgroup("lfx", "lfx local flow execution options")
    +    group.addoption(
    +        "--lfx-env-file",
    +        dest="lfx_env_file",
    +        default=None,
    +        metavar="PATH",
    +        help="Path to a .env file loaded before each flow execution.",
    +    )
    +    group.addoption(
    +        "--lfx-timeout",
    +        dest="lfx_timeout",
    +        default=None,
    +        type=float,
    +        metavar="SECONDS",
    +        help="Default timeout in seconds for flow execution (0 = no limit).",
    +    )
    +    group.addoption(
    +        "--lfx-flow-dir",
    +        dest="lfx_flow_dir",
    +        default=None,
    +        metavar="DIR",
    +        help="Base directory for resolving relative flow paths (default: cwd).",
    +    )
    +
    +    # Guard against duplicate registration when langflow-sdk[testing] is also installed.
    +    # Both plugins expose the same --langflow-* options; only register them once.
    +    remote = parser.getgroup("langflow", "Langflow remote integration testing options")
    +    _remote_opts = {
    +        "--langflow-env": {
    +            "dest": "langflow_env",
    +            "default": None,
    +            "metavar": "NAME",
    +            "help": (
    +                "Named environment from .lfx/environments.yaml or langflow-environments.toml. "
    +                "When set, flow_runner targets the remote instance instead of running locally."
    +            ),
    +        },
    +        "--langflow-url": {
    +            "dest": "langflow_url",
    +            "default": None,
    +            "metavar": "URL",
    +            "help": "Base URL of the remote Langflow instance (overrides --langflow-env).",
    +        },
    +        "--langflow-api-key": {
    +            "dest": "langflow_api_key",
    +            "default": None,
    +            "metavar": "KEY",
    +            "help": "API key for the remote Langflow instance.",
    +        },
    +        "--langflow-environments-file": {
    +            "dest": "langflow_environments_file",
    +            "default": None,
    +            "metavar": "PATH",
    +            "help": "Path to environments config file (.yaml or .toml; overrides default lookup).",
    +        },
    +    }
    +    for flag, kwargs in _remote_opts.items():
    +        with contextlib.suppress(ValueError):
    +            remote.addoption(flag, **kwargs)
    +
    +
    +def pytest_configure(config: pytest.Config) -> None:
    +    """Register custom markers so pytest --strict-markers does not reject them."""
    +    config.addinivalue_line(
    +        "markers",
    +        "lfx_env_file(path): path to a .env file loaded before this test's flow execution",
    +    )
    +    config.addinivalue_line(
    +        "markers",
    +        "lfx_timeout(seconds): timeout in seconds for this test's flow execution",
    +    )
    +    config.addinivalue_line(
    +        "markers",
    +        "integration: integration test that requires a live Langflow instance",
    +    )
    +
    +
    +_SKIP_NO_REMOTE = (
    +    "No remote Langflow connection configured. "
    +    "Pass --langflow-url <URL> or --langflow-env <NAME> to run against a live instance."
    +)
    +
    +
    +def _resolve_remote_client(request: pytest.FixtureRequest) -> Any | None:
    +    """Return a sync SDK client if remote options are configured, else ``None``.
    +
    +    Priority:
    +    1. ``--langflow-url`` / ``LANGFLOW_URL`` -- direct URL (with optional ``--langflow-api-key``)
    +    2. ``--langflow-env`` / ``LANGFLOW_ENV`` -- named environment from TOML/YAML file
    +    """
    +    url: str | None = request.config.getoption("langflow_url", default=None) or os.environ.get("LANGFLOW_URL")
    +    env_name: str | None = request.config.getoption("langflow_env", default=None) or os.environ.get("LANGFLOW_ENV")
    +
    +    if not url and not env_name:
    +        return None
    +
    +    try:
    +        import langflow_sdk  # type: ignore[import-untyped]
    +    except ImportError:
    +        pytest.skip("langflow-sdk is required for remote testing. Install: pip install langflow-sdk")
    +
    +    if url:
    +        api_key: str | None = request.config.getoption("langflow_api_key", default=None) or os.environ.get(
    +            "LANGFLOW_API_KEY"
    +        )
    +        return langflow_sdk.Client(base_url=url, api_key=api_key)
    +
    +    # Named environment
    +    env_file: str | None = request.config.getoption("langflow_environments_file", default=None) or os.environ.get(
    +        "LANGFLOW_ENVIRONMENTS_FILE"
    +    )
    +    try:
    +        from pathlib import Path as _Path
    +
    +        from langflow_sdk.environments import get_client  # type: ignore[import-untyped]
    +
    +        return get_client(env_name, config_file=_Path(env_file) if env_file else None)
    +    except Exception as exc:  # noqa: BLE001
    +        pytest.skip(f"Could not configure Langflow environment {env_name!r}: {exc}")
    +
    +
    +def _resolve_async_remote_client(request: pytest.FixtureRequest) -> Any | None:
    +    """Return an async SDK client if remote options are configured, else ``None``."""
    +    url: str | None = request.config.getoption("langflow_url", default=None) or os.environ.get("LANGFLOW_URL")
    +    env_name: str | None = request.config.getoption("langflow_env", default=None) or os.environ.get("LANGFLOW_ENV")
    +
    +    if not url and not env_name:
    +        return None
    +
    +    try:
    +        import langflow_sdk  # type: ignore[import-untyped]
    +    except ImportError:
    +        pytest.skip("langflow-sdk is required for remote testing. Install: pip install langflow-sdk")
    +
    +    if url:
    +        api_key: str | None = request.config.getoption("langflow_api_key", default=None) or os.environ.get(
    +            "LANGFLOW_API_KEY"
    +        )
    +        return langflow_sdk.AsyncClient(base_url=url, api_key=api_key)
    +
    +    env_file: str | None = request.config.getoption("langflow_environments_file", default=None) or os.environ.get(
    +        "LANGFLOW_ENVIRONMENTS_FILE"
    +    )
    +    try:
    +        from pathlib import Path as _Path
    +
    +        from langflow_sdk.environments import get_async_client  # type: ignore[import-untyped]
    +
    +        return get_async_client(env_name, config_file=_Path(env_file) if env_file else None)
    +    except Exception as exc:  # noqa: BLE001
    +        pytest.skip(f"Could not configure Langflow environment {env_name!r}: {exc}")
    +
    +
    +def _get_marker_arg(request: pytest.FixtureRequest, name: str) -> Any:
    +    """Return the first positional argument of marker *name*, or ``None``."""
    +    marker = request.node.get_closest_marker(name)
    +    return marker.args[0] if marker and marker.args else None
    +
    +
    +def _resolve_runner_config(
    +    request: pytest.FixtureRequest,
    +) -> tuple[str | None, float | None, Path | None]:
    +    """Return ``(env_file, timeout, base_dir)`` with marker > CLI > env-var precedence."""
    +    # env_file: marker > --lfx-env-file > LFX_ENV_FILE
    +    env_file: str | None = (
    +        _get_marker_arg(request, "lfx_env_file")
    +        or request.config.getoption("lfx_env_file", default=None)
    +        or os.environ.get("LFX_ENV_FILE")
    +    )
    +
    +    # timeout: marker > --lfx-timeout > LFX_TIMEOUT
    +    timeout: float | None = _get_marker_arg(request, "lfx_timeout")
    +    if timeout is None:
    +        raw_t = request.config.getoption("lfx_timeout", default=None) or os.environ.get("LFX_TIMEOUT")
    +        if raw_t is not None:
    +            with contextlib.suppress(TypeError, ValueError):
    +                timeout = float(raw_t)
    +
    +    # base_dir: --lfx-flow-dir > LFX_FLOW_DIR > None (defaults to cwd in runner)
    +    dir_str: str | None = request.config.getoption("lfx_flow_dir", default=None) or os.environ.get("LFX_FLOW_DIR")
    +    base_dir: Path | None = Path(dir_str) if dir_str else None
    +
    +    return env_file, timeout, base_dir
    +
    +
    +@pytest.fixture
    +def flow_runner(
    +    request: pytest.FixtureRequest,
    +) -> LocalFlowRunner | RemoteFlowRunner:
    +    """Fixture providing a sync flow runner -- local or remote depending on CLI options.
    +
    +    **Local mode** (default)
    +        Runs the flow in-process.  Configure with:
    +
    +        * ``@pytest.mark.lfx_env_file(path)`` / ``@pytest.mark.lfx_timeout(seconds)``
    +        * ``--lfx-env-file`` / ``--lfx-timeout`` / ``--lfx-flow-dir``
    +        * ``LFX_ENV_FILE`` / ``LFX_TIMEOUT`` / ``LFX_FLOW_DIR``
    +
    +    **Remote mode** (when ``--langflow-env`` or ``--langflow-url`` is supplied)
    +        Calls the live Langflow API.  Requires ``langflow-sdk``.
    +
    +        * ``--langflow-env <NAME>`` -- named environment from ``.lfx/environments.yaml``
    +        * ``--langflow-url <URL>`` -- direct URL
    +        * ``--langflow-api-key <KEY>`` / ``LANGFLOW_API_KEY``
    +        * ``--langflow-environments-file <PATH>`` / ``LANGFLOW_ENVIRONMENTS_FILE``
    +        * ``LANGFLOW_ENV`` / ``LANGFLOW_URL``
    +
    +    Example (local)::
    +
    +        def test_greeting(flow_runner):
    +            result = flow_runner("flows/greeting.json", input_value="Hello")
    +            assert result.ok
    +
    +    Example (remote -- run with ``pytest --langflow-env staging``)::
    +
    +        @pytest.mark.integration
    +        def test_greeting(flow_runner):
    +            result = flow_runner("greeting-endpoint", "Hello!")
    +            assert result.first_text_output() is not None
    +    """
    +    client = _resolve_remote_client(request)
    +    if client is not None:
    +        return RemoteFlowRunner(client)
    +
    +    env_file, timeout, base_dir = _resolve_runner_config(request)
    +    return LocalFlowRunner(
    +        default_env_file=env_file,
    +        default_timeout=timeout,
    +        base_dir=base_dir,
    +    )
    +
    +
    +@pytest.fixture
    +def async_flow_runner(
    +    request: pytest.FixtureRequest,
    +) -> AsyncLocalFlowRunner | AsyncRemoteFlowRunner:
    +    """Fixture providing an async flow runner -- local or remote depending on CLI options.
    +
    +    Same mode-selection logic as :func:`flow_runner`.
    +
    +    Example (local)::
    +
    +        async def test_greeting(async_flow_runner):
    +            result = await async_flow_runner("flows/greeting.json", input_value="Hi")
    +            assert result.ok
    +
    +    Example (remote)::
    +
    +        @pytest.mark.integration
    +        async def test_greeting(async_flow_runner):
    +            result = await async_flow_runner("greeting-endpoint", "Hi!")
    +            assert result.first_text_output() is not None
    +    """
    +    client = _resolve_async_remote_client(request)
    +    if client is not None:
    +        return AsyncRemoteFlowRunner(client)
    +
    +    env_file, timeout, base_dir = _resolve_runner_config(request)
    +    return AsyncLocalFlowRunner(
    +        default_env_file=env_file,
    +        default_timeout=timeout,
    +        base_dir=base_dir,
    +    )
    
  • src/lfx/src/lfx/testing/result.py+124 0 added
    @@ -0,0 +1,124 @@
    +"""FlowResult dataclass and helpers for building results from raw dicts."""
    +
    +from __future__ import annotations
    +
    +import json
    +from dataclasses import dataclass
    +from typing import Any
    +
    +_TEXT_REPR_MAX = 60
    +
    +
    +@dataclass
    +class FlowResult:
    +    """Result of a local flow execution via the ``flow_runner`` fixture.
    +
    +    Attributes:
    +        status:   ``"success"`` or ``"error"``.
    +        text:     Primary text output of the flow (first non-empty text/result key).
    +        messages: List of message dicts produced by the flow.
    +        outputs:  Raw outputs dict from graph execution.
    +        logs:     Captured stdout/stderr from execution.
    +        error:    Error message when status is ``"error"``, else ``None``.
    +        timing:   Per-component timing dict when ``timing=True`` was passed, else ``None``.
    +        raw:      The unprocessed result dict returned by ``run_flow()``.
    +    """
    +
    +    status: str
    +    text: str | None
    +    messages: list[dict[str, Any]]
    +    outputs: dict[str, Any]
    +    logs: str
    +    error: str | None
    +    timing: dict[str, Any] | None
    +    raw: dict[str, Any]
    +
    +    @property
    +    def ok(self) -> bool:
    +        """``True`` when *status* is ``"success"``."""
    +        return self.status == "success"
    +
    +    def first_text_output(self) -> str | None:
    +        """Return the primary text output, or ``None`` if there is none.
    +
    +        Convenience alias for :attr:`text`, compatible with the
    +        ``langflow_sdk.RunResponse`` interface so test code works against
    +        both local and remote runners without changes.
    +        """
    +        return self.text
    +
    +    def __repr__(self) -> str:
    +        snippet = (
    +            repr(self.text[:_TEXT_REPR_MAX] + "\u2026")
    +            if self.text and len(self.text) > _TEXT_REPR_MAX
    +            else repr(self.text)
    +        )
    +        return f"FlowResult(status={self.status!r}, text={snippet})"
    +
    +
    +def _build_result(raw: dict[str, Any]) -> FlowResult:
    +    """Construct a :class:`FlowResult` from the dict returned by ``run_flow()``."""
    +    # ``success`` may be absent (treat as True for forward compat)
    +    is_error = (raw.get("success") is False) or raw.get("type") == "error"
    +    status = "error" if is_error else "success"
    +
    +    # Extract primary text from several candidate keys, in priority order
    +    text: str | None = None
    +    for key in ("result", "text", "output"):
    +        val = raw.get(key)
    +        if val is not None:
    +            text = val if isinstance(val, str) else json.dumps(val)
    +            break
    +
    +    messages: list[dict[str, Any]] = raw.get("messages") or []
    +    if not isinstance(messages, list):
    +        messages = []
    +
    +    outputs: dict[str, Any] = raw.get("outputs") or raw.get("result_dict") or {}
    +    if not isinstance(outputs, dict):
    +        outputs = {}
    +
    +    error_msg: str | None = None
    +    if is_error:
    +        error_msg = raw.get("exception_message") or raw.get("error") or "Unknown error"
    +
    +    return FlowResult(
    +        status=status,
    +        text=text,
    +        messages=messages,
    +        outputs=outputs,
    +        logs=raw.get("logs", ""),
    +        error=error_msg,
    +        timing=raw.get("timing"),
    +        raw=raw,
    +    )
    +
    +
    +def _build_result_from_sdk_response(response: Any) -> FlowResult:
    +    """Convert a ``langflow_sdk.RunResponse`` to a :class:`FlowResult`.
    +
    +    Extracts text, messages, and raw outputs from the SDK response so that
    +    test assertions written against :class:`FlowResult` work identically
    +    whether the runner is local or remote.
    +    """
    +    text = response.first_text_output()
    +
    +    messages: list[dict[str, Any]] = []
    +    outputs: dict[str, Any] = {}
    +    for i, out in enumerate(response.outputs):
    +        outputs[str(i)] = out.results
    +        for component_out in out.outputs:
    +            msg = component_out.get("results", {}).get("message")
    +            if isinstance(msg, dict):
    +                messages.append(msg)
    +
    +    return FlowResult(
    +        status="success",
    +        text=text,
    +        messages=messages,
    +        outputs=outputs,
    +        logs="",
    +        error=None,
    +        timing=None,
    +        raw=response.model_dump(),
    +    )
    
  • src/lfx/src/lfx/testing/runners.py+473 0 added
    @@ -0,0 +1,473 @@
    +"""Flow runner classes and internal helpers for local and remote execution."""
    +
    +from __future__ import annotations
    +
    +import asyncio
    +import copy
    +import json
    +from pathlib import Path
    +from typing import Any
    +
    +from lfx.testing.result import FlowResult, _build_result, _build_result_from_sdk_response
    +
    +# ---------------------------------------------------------------------------
    +# Internal helpers
    +# ---------------------------------------------------------------------------
    +
    +
    +def _apply_tweaks(flow_dict: dict[str, Any], tweaks: dict[str, dict[str, Any]]) -> dict[str, Any]:
    +    """Return a *deep copy* of *flow_dict* with template field values patched.
    +
    +    *tweaks* maps a node identifier -- one of the node's ``id``, ``data.type``,
    +    or ``display_name`` -- to a ``{field_name: new_value}`` dict.  All nodes
    +    whose identifier matches a tweak key are updated.
    +    """
    +    flow = copy.deepcopy(flow_dict)
    +    nodes = flow.get("data", {}).get("nodes", [])
    +    for node in nodes:
    +        node_data: dict = node.get("data") or {}
    +        node_id: str = node.get("id", "")
    +        node_type: str = node_data.get("type", "")
    +        node_obj: dict = node_data.get("node") or {}
    +        display_name: str = node_obj.get("display_name", "")
    +        template: dict = node_obj.get("template") or {}
    +
    +        for tweak_key, field_overrides in tweaks.items():
    +            if tweak_key not in (node_id, node_type, display_name):
    +                continue
    +            for fname, fvalue in field_overrides.items():
    +                if fname not in template:
    +                    continue
    +                if isinstance(template[fname], dict):
    +                    template[fname]["value"] = fvalue
    +                else:
    +                    template[fname] = fvalue
    +    return flow
    +
    +
    +def _load_dotenv(env_file: str | Path) -> None:
    +    """Load environment variables from *env_file* using python-dotenv."""
    +    from dotenv import load_dotenv
    +
    +    load_dotenv(str(env_file), override=True)
    +
    +
    +def _resolve_flow_args(
    +    flow: str | Path | dict[str, Any],
    +    tweaks: dict[str, dict[str, Any]] | None,
    +    base_dir: Path,
    +) -> tuple[Path | None, str | None]:
    +    """Return ``(script_path, flow_json)`` suitable for passing to ``run_flow()``.
    +
    +    When *tweaks* are requested for a JSON flow, the file is loaded, patched,
    +    and returned as an inline JSON string so that ``run_flow()`` picks up the
    +    overrides without modifying any file on disk.
    +    """
    +    if isinstance(flow, dict):
    +        patched = _apply_tweaks(flow, tweaks) if tweaks else flow
    +        return None, json.dumps(patched)
    +
    +    flow_path = Path(flow)
    +    if not flow_path.is_absolute():
    +        flow_path = base_dir / flow_path
    +
    +    if tweaks and flow_path.suffix.lower() == ".json":
    +        try:
    +            raw_dict = json.loads(flow_path.read_text(encoding="utf-8"))
    +            return None, json.dumps(_apply_tweaks(raw_dict, tweaks))
    +        except Exception:  # noqa: BLE001
    +            import logging
    +
    +            logging.getLogger(__name__).debug(
    +                "Failed to apply tweaks to %s; using unmodified flow", flow_path, exc_info=True
    +            )
    +
    +    return flow_path, None
    +
    +
    +# ---------------------------------------------------------------------------
    +# Async core execution
    +# ---------------------------------------------------------------------------
    +
    +
    +async def _run_async(
    +    *,
    +    script_path: Path | None,
    +    flow_json: str | None,
    +    input_value: str | None,
    +    check_variables: bool,
    +    global_variables: dict[str, str] | None,
    +    session_id: str | None,
    +    user_id: str | None,
    +    timing: bool,
    +    timeout: float | None,
    +) -> dict[str, Any]:
    +    """Invoke ``run_flow()`` with an optional timeout; always returns a dict."""
    +    from lfx.run.base import RunError, run_flow
    +
    +    async def _inner() -> dict:
    +        return await run_flow(
    +            script_path=script_path,
    +            flow_json=flow_json,
    +            input_value=input_value,
    +            check_variables=check_variables,
    +            global_variables=global_variables,
    +            session_id=session_id,
    +            user_id=user_id,
    +            timing=timing,
    +        )
    +
    +    try:
    +        if timeout is not None:
    +            return await asyncio.wait_for(_inner(), timeout=timeout)
    +        return await _inner()
    +    except asyncio.TimeoutError:
    +        return {
    +            "success": False,
    +            "type": "error",
    +            "exception_type": "TimeoutError",
    +            "exception_message": f"Flow execution timed out after {timeout:.1f}s",
    +        }
    +    except RunError as exc:
    +        orig = exc.original_exception
    +        return {
    +            "success": False,
    +            "type": "error",
    +            "exception_type": type(orig).__name__ if orig else "RunError",
    +            "exception_message": str(exc),
    +        }
    +    except Exception as exc:  # noqa: BLE001
    +        return {
    +            "success": False,
    +            "type": "error",
    +            "exception_type": type(exc).__name__,
    +            "exception_message": str(exc),
    +        }
    +
    +
    +def _run_sync(**kwargs: Any) -> dict[str, Any]:
    +    """Run ``_run_async`` synchronously, handling already-running event loops.
    +
    +    When called from inside a running event loop (e.g. a ``pytest-asyncio``
    +    test that requests the sync ``flow_runner`` fixture), the coroutine is
    +    dispatched to a fresh thread with its own event loop so we don't deadlock.
    +    """
    +    coro = _run_async(**kwargs)
    +
    +    try:
    +        asyncio.get_running_loop()
    +    except RuntimeError:
    +        # No running loop -- safe to use asyncio.run() directly
    +        return asyncio.run(coro)
    +
    +    # There is a running loop; run in an isolated thread to avoid deadlock
    +    import concurrent.futures
    +
    +    with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
    +        future = pool.submit(asyncio.run, coro)
    +        try:
    +            t = kwargs.get("timeout")
    +            return future.result(timeout=t)
    +        except Exception as exc:  # noqa: BLE001
    +            return {
    +                "success": False,
    +                "type": "error",
    +                "exception_type": type(exc).__name__,
    +                "exception_message": str(exc),
    +            }
    +
    +
    +# ---------------------------------------------------------------------------
    +# Runner base helpers
    +# ---------------------------------------------------------------------------
    +
    +
    +def _import_remote_run_request():
    +    """Import and return the SDK ``RunRequest`` model with a helpful error."""
    +    try:
    +        from langflow_sdk.models import RunRequest  # type: ignore[import-untyped]
    +    except ImportError as exc:
    +        msg = "langflow-sdk is required for remote flow testing. Install: pip install langflow-sdk"
    +        raise ImportError(msg) from exc
    +    return RunRequest
    +
    +
    +def _build_remote_error_result(exc: Exception) -> FlowResult:
    +    """Return a standardized failed ``FlowResult`` for remote runner errors."""
    +    return FlowResult(
    +        status="error",
    +        text=None,
    +        messages=[],
    +        outputs={},
    +        logs="",
    +        error=str(exc),
    +        timing=None,
    +        raw={},
    +    )
    +
    +
    +class _BaseLocalFlowRunner:
    +    """Shared initialization and argument resolution for local flow runners."""
    +
    +    def __init__(
    +        self,
    +        *,
    +        default_env_file: str | Path | None = None,
    +        default_timeout: float | None = None,
    +        base_dir: Path | None = None,
    +    ) -> None:
    +        self._default_env_file = default_env_file
    +        self._default_timeout = default_timeout
    +        self._base_dir = base_dir or Path.cwd()
    +
    +    def _build_run_kwargs(
    +        self,
    +        flow: str | Path | dict[str, Any],
    +        input_value: str | None,
    +        *,
    +        tweaks: dict[str, dict[str, Any]] | None,
    +        global_variables: dict[str, str] | None,
    +        env_file: str | Path | None,
    +        timeout: float | None,
    +        check_variables: bool,
    +        session_id: str | None,
    +        user_id: str | None,
    +        timing: bool,
    +    ) -> dict[str, Any]:
    +        """Build keyword arguments shared by sync and async local execution."""
    +        if env_file or self._default_env_file:
    +            _load_dotenv(env_file or self._default_env_file)
    +
    +        script_path, flow_json = _resolve_flow_args(flow, tweaks, self._base_dir)
    +        resolved_timeout = timeout if timeout is not None else self._default_timeout
    +
    +        return {
    +            "script_path": script_path,
    +            "flow_json": flow_json,
    +            "input_value": input_value,
    +            "check_variables": check_variables,
    +            "global_variables": global_variables,
    +            "session_id": session_id,
    +            "user_id": user_id,
    +            "timing": timing,
    +            "timeout": resolved_timeout,
    +        }
    +
    +
    +class _BaseRemoteFlowRunner:
    +    """Shared request/error handling for sync and async remote runners."""
    +
    +    def __init__(self, client: Any) -> None:
    +        self._client = client
    +
    +    def _build_run_request(
    +        self,
    +        *,
    +        input_value: str,
    +        input_type: str,
    +        output_type: str,
    +        tweaks: dict[str, Any] | None,
    +    ):
    +        """Create an SDK ``RunRequest`` for remote execution."""
    +        run_request_model = _import_remote_run_request()
    +        return run_request_model(
    +            input_value=input_value,
    +            input_type=input_type,
    +            output_type=output_type,
    +            tweaks=tweaks,
    +        )
    +
    +
    +# ---------------------------------------------------------------------------
    +# Callable classes
    +# ---------------------------------------------------------------------------
    +
    +
    +class LocalFlowRunner(_BaseLocalFlowRunner):
    +    """Sync callable returned by the :func:`flow_runner` fixture.
    +
    +    Instantiate via the ``flow_runner`` pytest fixture -- do not construct
    +    directly in test code.  Call it like a function::
    +
    +        def test_greeting(flow_runner):
    +            result = flow_runner("flows/greeting.json", input_value="Hello")
    +            assert result.status == "success"
    +            assert "hello" in result.text.lower()
    +
    +    The first positional argument can be:
    +
    +    * A path string or :class:`~pathlib.Path` to a ``.json`` or ``.py`` flow file.
    +    * A ``dict`` (already-parsed flow JSON).
    +
    +    Relative paths are resolved against ``--lfx-flow-dir`` (default: ``cwd``).
    +    """
    +
    +    def __call__(
    +        self,
    +        flow: str | Path | dict[str, Any],
    +        input_value: str | None = None,
    +        *,
    +        tweaks: dict[str, dict[str, Any]] | None = None,
    +        global_variables: dict[str, str] | None = None,
    +        env_file: str | Path | None = None,
    +        timeout: float | None = None,
    +        check_variables: bool = False,
    +        session_id: str | None = None,
    +        user_id: str | None = None,
    +        timing: bool = False,
    +    ) -> FlowResult:
    +        """Execute a flow synchronously and return a :class:`FlowResult`.
    +
    +        Args:
    +            flow: Path (``.json``/``.py``) or parsed flow dict.
    +            input_value: Chat/text input string to pass into the flow.
    +            tweaks: Component-level overrides -- ``{node_id|type|name: {field: value}}``.
    +            global_variables: Key->value pairs injected into the graph context.
    +            env_file: ``.env`` file loaded before execution (overrides fixture default).
    +            timeout: Seconds before aborting; ``None`` means no limit.
    +            check_variables: Validate that global variables exist in the environment.
    +            session_id: Session ID for memory isolation between calls.
    +            user_id: User ID attached to the graph.
    +            timing: Include per-component timing in :attr:`FlowResult.timing`.
    +        """
    +        raw = _run_sync(
    +            **self._build_run_kwargs(
    +                flow,
    +                input_value,
    +                tweaks=tweaks,
    +                global_variables=global_variables,
    +                env_file=env_file,
    +                timeout=timeout,
    +                check_variables=check_variables,
    +                session_id=session_id,
    +                user_id=user_id,
    +                timing=timing,
    +            )
    +        )
    +        return _build_result(raw)
    +
    +
    +class AsyncLocalFlowRunner(_BaseLocalFlowRunner):
    +    """Async callable returned by the :func:`async_flow_runner` fixture.
    +
    +    Use with ``await`` inside an ``async def`` test::
    +
    +        async def test_greeting(async_flow_runner):
    +            result = await async_flow_runner("flows/greeting.json", input_value="Hello")
    +            assert result.status == "success"
    +    """
    +
    +    async def __call__(
    +        self,
    +        flow: str | Path | dict[str, Any],
    +        input_value: str | None = None,
    +        *,
    +        tweaks: dict[str, dict[str, Any]] | None = None,
    +        global_variables: dict[str, str] | None = None,
    +        env_file: str | Path | None = None,
    +        timeout: float | None = None,
    +        check_variables: bool = False,
    +        session_id: str | None = None,
    +        user_id: str | None = None,
    +        timing: bool = False,
    +    ) -> FlowResult:
    +        """Execute a flow asynchronously and return a :class:`FlowResult`."""
    +        raw = await _run_async(
    +            **self._build_run_kwargs(
    +                flow,
    +                input_value,
    +                tweaks=tweaks,
    +                global_variables=global_variables,
    +                env_file=env_file,
    +                timeout=timeout,
    +                check_variables=check_variables,
    +                session_id=session_id,
    +                user_id=user_id,
    +                timing=timing,
    +            )
    +        )
    +        return _build_result(raw)
    +
    +
    +# ---------------------------------------------------------------------------
    +# Remote runners (requires langflow-sdk)
    +# ---------------------------------------------------------------------------
    +
    +
    +class RemoteFlowRunner(_BaseRemoteFlowRunner):
    +    """Sync callable that runs flows against a live Langflow instance.
    +
    +    Returned by :func:`flow_runner` when ``--langflow-env`` or
    +    ``--langflow-url`` is passed to pytest.  Call it like a function::
    +
    +        def test_greeting(flow_runner):
    +            result = flow_runner("greeting-endpoint", "Hello!")
    +            assert result.first_text_output() is not None
    +
    +    The first argument is a flow endpoint name or UUID (not a local file
    +    path).  Keyword arguments that only apply to local execution (e.g.
    +    ``env_file``, ``global_variables``) are accepted but silently ignored
    +    so that test code is portable between local and remote modes.
    +    """
    +
    +    def __call__(
    +        self,
    +        flow_id_or_endpoint: str,
    +        input_value: str = "",
    +        *,
    +        input_type: str = "chat",
    +        output_type: str = "chat",
    +        tweaks: dict[str, Any] | None = None,
    +        **_kwargs: Any,
    +    ) -> FlowResult:
    +        """Run *flow_id_or_endpoint* against the remote instance."""
    +        request = self._build_run_request(
    +            input_value=input_value,
    +            input_type=input_type,
    +            output_type=output_type,
    +            tweaks=tweaks,
    +        )
    +
    +        try:
    +            response = self._client.run_flow(flow_id_or_endpoint, request)
    +        except Exception as exc:  # noqa: BLE001
    +            return _build_remote_error_result(exc)
    +
    +        return _build_result_from_sdk_response(response)
    +
    +
    +class AsyncRemoteFlowRunner(_BaseRemoteFlowRunner):
    +    """Async callable that runs flows against a live Langflow instance.
    +
    +    Returned by :func:`async_flow_runner` when ``--langflow-env`` or
    +    ``--langflow-url`` is passed to pytest.  Use with ``await``::
    +
    +        async def test_greeting(async_flow_runner):
    +            result = await async_flow_runner("greeting-endpoint", "Hello!")
    +            assert result.first_text_output() is not None
    +    """
    +
    +    async def __call__(
    +        self,
    +        flow_id_or_endpoint: str,
    +        input_value: str = "",
    +        *,
    +        input_type: str = "chat",
    +        output_type: str = "chat",
    +        tweaks: dict[str, Any] | None = None,
    +        **_kwargs: Any,
    +    ) -> FlowResult:
    +        """Run *flow_id_or_endpoint* asynchronously against the remote instance."""
    +        request = self._build_run_request(
    +            input_value=input_value,
    +            input_type=input_type,
    +            output_type=output_type,
    +            tweaks=tweaks,
    +        )
    +
    +        try:
    +            response = await self._client.run_flow(flow_id_or_endpoint, request)
    +        except Exception as exc:  # noqa: BLE001
    +            return _build_remote_error_result(exc)
    +
    +        return _build_result_from_sdk_response(response)
    
  • src/lfx/tests/unit/cli/test_cli_help_smoke.py+35 0 added
    @@ -0,0 +1,35 @@
    +"""Smoke tests: every lfx subcommand's --help must exit 0."""
    +
    +from __future__ import annotations
    +
    +import pytest
    +from lfx.__main__ import app
    +from typer.testing import CliRunner
    +
    +runner = CliRunner()
    +
    +ALL_SUBCOMMANDS = [
    +    "init",
    +    "login",
    +    "create",
    +    "requirements",
    +    "validate",
    +    "run",
    +    "serve",
    +    "status",
    +    "push",
    +    "pull",
    +    "export",
    +]
    +
    +
    +def test_root_help():
    +    result = runner.invoke(app, ["--help"])
    +    assert result.exit_code == 0
    +    assert "lfx" in result.output.lower()
    +
    +
    +@pytest.mark.parametrize("cmd", ALL_SUBCOMMANDS)
    +def test_subcommand_help(cmd: str):
    +    result = runner.invoke(app, [cmd, "--help"])
    +    assert result.exit_code == 0, f"`lfx {cmd} --help` failed: {result.output}"
    
  • src/lfx/tests/unit/cli/test_create_command.py+362 0 added
    @@ -0,0 +1,362 @@
    +"""Unit tests for ``lfx create`` and the ``--example`` seeding in ``lfx init``.
    +
    +All tests run entirely in-process; no running Langflow instance required.
    +"""
    +
    +from __future__ import annotations
    +
    +import json
    +from typing import TYPE_CHECKING
    +from unittest.mock import patch
    +
    +if TYPE_CHECKING:
    +    from pathlib import Path
    +
    +import pytest
    +import typer
    +from lfx.__main__ import app
    +from lfx.cli.create import (
    +    _load_template,
    +    _slugify,
    +    create_command,
    +    list_templates,
    +    print_templates,
    +)
    +from lfx.cli.init import init_command
    +from typer.testing import CliRunner
    +
    +runner = CliRunner()
    +
    +# ---------------------------------------------------------------------------
    +# Helpers
    +# ---------------------------------------------------------------------------
    +
    +_REAL_TEMPLATES = list_templates()
    +
    +
    +def _read_flow(path: Path) -> dict:
    +    return json.loads(path.read_text(encoding="utf-8"))
    +
    +
    +# ---------------------------------------------------------------------------
    +# list_templates()
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestListTemplates:
    +    def test_returns_list_of_strings(self):
    +        result = list_templates()
    +        assert isinstance(result, list)
    +        assert all(isinstance(t, str) for t in result)
    +
    +    def test_includes_known_templates(self):
    +        result = list_templates()
    +        assert "hello-world" in result
    +
    +    def test_sorted(self):
    +        result = list_templates()
    +        assert result == sorted(result)
    +
    +    def test_returns_empty_when_dir_missing(self, tmp_path):
    +        fake_dir = tmp_path / "nonexistent"
    +        with patch("lfx.cli.create._FLOWS_TEMPLATE_DIR", fake_dir):
    +            assert list_templates() == []
    +
    +
    +# ---------------------------------------------------------------------------
    +# _slugify()
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestSlugify:
    +    def test_lowercases(self):
    +        assert _slugify("MyFlow") == "myflow"
    +
    +    def test_spaces_to_hyphens(self):
    +        assert _slugify("My Flow Name") == "my-flow-name"
    +
    +    def test_underscores_to_hyphens(self):
    +        assert _slugify("my_flow") == "my-flow"
    +
    +    def test_already_slug(self):
    +        assert _slugify("my-flow") == "my-flow"
    +
    +
    +# ---------------------------------------------------------------------------
    +# _load_template()
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestLoadTemplate:
    +    def test_loads_hello_world(self):
    +        flow = _load_template("hello-world")
    +        assert "id" in flow
    +        assert "name" in flow
    +        assert "data" in flow
    +
    +    def test_raises_on_unknown_template(self):
    +        with pytest.raises(FileNotFoundError, match="not found"):
    +            _load_template("does-not-exist")
    +
    +    def test_error_lists_available_templates(self):
    +        with pytest.raises(FileNotFoundError, match="hello-world"):
    +            _load_template("does-not-exist")
    +
    +
    +# ---------------------------------------------------------------------------
    +# Template JSON structure (level-1 validity)
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestTemplateStructure:
    +    """Every bundled template must satisfy level-1 structural validation."""
    +
    +    @pytest.mark.parametrize("template_name", _REAL_TEMPLATES)
    +    def test_has_required_top_level_keys(self, template_name):
    +        flow = _load_template(template_name)
    +        for key in ("id", "name", "data"):
    +            assert key in flow, f"Template {template_name!r} missing top-level key '{key}'"
    +
    +    @pytest.mark.parametrize("template_name", _REAL_TEMPLATES)
    +    def test_has_nodes_and_edges(self, template_name):
    +        flow = _load_template(template_name)
    +        assert "nodes" in flow["data"]
    +        assert "edges" in flow["data"]
    +
    +    @pytest.mark.parametrize("template_name", _REAL_TEMPLATES)
    +    def test_every_node_has_id_and_data(self, template_name):
    +        flow = _load_template(template_name)
    +        for node in flow["data"]["nodes"]:
    +            assert "id" in node, f"Node missing 'id' in template {template_name!r}"
    +            assert "data" in node, f"Node missing 'data' in template {template_name!r}"
    +
    +    @pytest.mark.parametrize("template_name", _REAL_TEMPLATES)
    +    def test_every_node_has_type(self, template_name):
    +        """Nodes should declare data.type to avoid validate warnings."""
    +        flow = _load_template(template_name)
    +        for node in flow["data"]["nodes"]:
    +            assert "type" in node["data"], f"Node {node['id']!r} in template {template_name!r} missing 'data.type'"
    +
    +    @pytest.mark.parametrize("template_name", _REAL_TEMPLATES)
    +    def test_edges_reference_existing_nodes(self, template_name):
    +        flow = _load_template(template_name)
    +        node_ids = {n["id"] for n in flow["data"]["nodes"]}
    +        for edge in flow["data"]["edges"]:
    +            assert edge["source"] in node_ids, f"Edge source {edge['source']!r} not in nodes for {template_name!r}"
    +            assert edge["target"] in node_ids, f"Edge target {edge['target']!r} not in nodes for {template_name!r}"
    +
    +    def test_hello_world_has_input_and_output(self):
    +        flow = _load_template("hello-world")
    +        types = {n["data"]["type"] for n in flow["data"]["nodes"]}
    +        assert "ChatOutput" in types
    +        assert len(types) >= 2
    +
    +    def test_hello_world_edge_connects_input_to_output(self):
    +        flow = _load_template("hello-world")
    +        node_ids = {n["id"] for n in flow["data"]["nodes"]}
    +        for edge in flow["data"]["edges"]:
    +            assert edge["source"] in node_ids
    +            assert edge["target"] in node_ids
    +
    +
    +# ---------------------------------------------------------------------------
    +# create_command() — core logic
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestCreateCommand:
    +    def test_creates_file_with_correct_name(self, tmp_path):
    +        dest = create_command("my-flow", output_dir=tmp_path)
    +        assert dest == tmp_path / "my-flow.json"
    +        assert dest.exists()
    +
    +    def test_slugifies_name_for_filename(self, tmp_path):
    +        dest = create_command("My Cool Flow", output_dir=tmp_path)
    +        assert dest.name == "my-cool-flow.json"
    +
    +    def test_flow_name_in_json_is_original(self, tmp_path):
    +        create_command("My Cool Flow", output_dir=tmp_path)
    +        flow = _read_flow(tmp_path / "my-cool-flow.json")
    +        assert flow["name"] == "My Cool Flow"
    +
    +    def test_generates_unique_uuid(self, tmp_path):
    +        dest1 = create_command("flow-a", output_dir=tmp_path)
    +        dest2 = create_command("flow-b", output_dir=tmp_path)
    +        id1 = _read_flow(dest1)["id"]
    +        id2 = _read_flow(dest2)["id"]
    +        assert id1 != id2
    +        # Must not be the placeholder
    +        assert id1 != "00000000-0000-0000-0000-000000000000"
    +        assert id2 != "00000000-0000-0000-0000-000000000000"
    +
    +    def test_uses_default_template_hello_world(self, tmp_path):
    +        dest = create_command("test", output_dir=tmp_path)
    +        flow = _read_flow(dest)
    +        types = {n["data"]["type"] for n in flow["data"]["nodes"]}
    +        assert "ChatOutput" in types
    +
    +    def test_creates_output_dir_if_missing(self, tmp_path):
    +        nested = tmp_path / "a" / "b" / "flows"
    +        dest = create_command("my-flow", output_dir=nested)
    +        assert dest.exists()
    +
    +    def test_raises_exit1_on_unknown_template(self, tmp_path):
    +        with pytest.raises(typer.Exit):
    +            create_command("test", template="nonexistent", output_dir=tmp_path)
    +
    +    def test_raises_exit1_if_file_exists_no_overwrite(self, tmp_path):
    +        create_command("my-flow", output_dir=tmp_path)
    +        with pytest.raises(typer.Exit):
    +            create_command("my-flow", output_dir=tmp_path, overwrite=False)
    +
    +    def test_overwrites_existing_file_when_flag_set(self, tmp_path):
    +        dest = create_command("my-flow", output_dir=tmp_path)
    +        original_id = _read_flow(dest)["id"]
    +        dest2 = create_command("my-flow", output_dir=tmp_path, overwrite=True)
    +        new_id = _read_flow(dest2)["id"]
    +        assert dest == dest2
    +        assert new_id != original_id  # new UUID stamped each time
    +
    +    def test_output_is_valid_json(self, tmp_path):
    +        dest = create_command("my-flow", output_dir=tmp_path)
    +        # Should not raise
    +        flow = json.loads(dest.read_text(encoding="utf-8"))
    +        assert isinstance(flow, dict)
    +
    +    def test_raises_exit1_when_no_templates_dir(self, tmp_path):
    +        fake_dir = tmp_path / "empty"
    +        with patch("lfx.cli.create._FLOWS_TEMPLATE_DIR", fake_dir), pytest.raises(typer.Exit):
    +            create_command("test", output_dir=tmp_path)
    +
    +
    +# ---------------------------------------------------------------------------
    +# print_templates() — smoke test
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestPrintTemplates:
    +    def test_does_not_raise(self):
    +        print_templates()  # just checking it doesn't blow up
    +
    +    def test_prints_nothing_when_no_templates(self, tmp_path):
    +        with patch("lfx.cli.create._FLOWS_TEMPLATE_DIR", tmp_path / "empty"):
    +            print_templates()
    +
    +
    +# ---------------------------------------------------------------------------
    +# CLI integration — lfx create
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestCreateCLI:
    +    def test_creates_flow_via_cli(self, tmp_path):
    +        result = runner.invoke(
    +            app,
    +            ["create", "hello", "--output-dir", str(tmp_path)],
    +        )
    +        assert result.exit_code == 0, result.output
    +        assert (tmp_path / "hello.json").exists()
    +
    +    def test_list_flag_prints_templates_and_exits(self):
    +        result = runner.invoke(app, ["create", "--list", "ignored-name"])
    +        assert result.exit_code == 0
    +        assert "hello-world" in result.output
    +
    +    def test_unknown_template_exits_nonzero(self, tmp_path):
    +        result = runner.invoke(
    +            app,
    +            ["create", "test", "--template", "no-such-template", "--output-dir", str(tmp_path)],
    +        )
    +        assert result.exit_code != 0
    +
    +    def test_overwrite_flag_replaces_file(self, tmp_path):
    +        runner.invoke(app, ["create", "my-flow", "--output-dir", str(tmp_path)])
    +        result = runner.invoke(
    +            app,
    +            ["create", "my-flow", "--output-dir", str(tmp_path), "--overwrite"],
    +        )
    +        assert result.exit_code == 0, result.output
    +
    +    def test_no_overwrite_exits_nonzero_on_existing(self, tmp_path):
    +        runner.invoke(app, ["create", "my-flow", "--output-dir", str(tmp_path)])
    +        result = runner.invoke(app, ["create", "my-flow", "--output-dir", str(tmp_path)])
    +        assert result.exit_code != 0
    +
    +    def test_explicit_template_hello_world(self, tmp_path):
    +        result = runner.invoke(
    +            app,
    +            ["create", "my-flow", "--template", "hello-world", "--output-dir", str(tmp_path)],
    +        )
    +        assert result.exit_code == 0, result.output
    +        flow = _read_flow(tmp_path / "my-flow.json")
    +        types = {n["data"]["type"] for n in flow["data"]["nodes"]}
    +        assert "ChatOutput" in types
    +
    +
    +# ---------------------------------------------------------------------------
    +# init_command — --example seeding
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestInitExampleSeeding:
    +    def test_seeds_hello_world_by_default(self, tmp_path):
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=False, example=True)
    +        assert (tmp_path / "flows" / "hello-world.json").exists()
    +
    +    def test_seeded_flow_is_valid_json(self, tmp_path):
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=False, example=True)
    +        flow = _read_flow(tmp_path / "flows" / "hello-world.json")
    +        assert "id" in flow
    +        assert "name" in flow
    +        assert flow["name"] == "hello-world"
    +
    +    def test_no_example_does_not_create_hello_world(self, tmp_path):
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=False, example=False)
    +        assert not (tmp_path / "flows" / "hello-world.json").exists()
    +
    +    def test_no_example_creates_gitkeep(self, tmp_path):
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=False, example=False)
    +        assert (tmp_path / "flows" / ".gitkeep").exists()
    +
    +    def test_example_still_creates_other_files(self, tmp_path):
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=False, example=True)
    +        assert (tmp_path / ".lfx" / "environments.yaml").exists()
    +        assert (tmp_path / "tests" / "test_flows.py").exists()
    +
    +    def test_seeded_flow_has_unique_uuid(self, tmp_path):
    +        p1 = tmp_path / "proj1"
    +        p2 = tmp_path / "proj2"
    +        init_command(project_dir=p1, github_actions=False, overwrite=False, example=True)
    +        init_command(project_dir=p2, github_actions=False, overwrite=False, example=True)
    +        id1 = _read_flow(p1 / "flows" / "hello-world.json")["id"]
    +        id2 = _read_flow(p2 / "flows" / "hello-world.json")["id"]
    +        assert id1 != id2
    +
    +    def test_init_cli_example_flag(self, tmp_path):
    +        result = runner.invoke(
    +            app,
    +            ["init", str(tmp_path), "--no-github-actions", "--example"],
    +        )
    +        assert result.exit_code == 0, result.output
    +        assert (tmp_path / "flows" / "hello-world.json").exists()
    +
    +    def test_init_cli_no_example_flag(self, tmp_path):
    +        result = runner.invoke(
    +            app,
    +            ["init", str(tmp_path), "--no-github-actions", "--no-example"],
    +        )
    +        assert result.exit_code == 0, result.output
    +        assert not (tmp_path / "flows" / "hello-world.json").exists()
    +
    +    def test_graceful_fallback_when_template_fails(self, tmp_path):
    +        """If the template dir is missing, init should warn but not crash."""
    +        fake_dir = tmp_path / "no-templates"
    +        with patch("lfx.cli.create._FLOWS_TEMPLATE_DIR", fake_dir):
    +            # Should not raise — the BLE001-guarded except swallows it
    +            init_command(
    +                project_dir=tmp_path / "proj",
    +                github_actions=False,
    +                overwrite=False,
    +                example=True,
    +            )
    +        assert (tmp_path / "proj" / ".lfx" / "environments.yaml").exists()
    
  • src/lfx/tests/unit/cli/test_export_command.py+723 0 added
    @@ -0,0 +1,723 @@
    +"""Unit tests for lfx export -- export_command and helpers.
    +
    +All tests run entirely in-process; no real Langflow instance or SDK required.
    +The SDK module is replaced wholesale with MagicMock so only the export logic
    +(file normalization, output routing, remote pull, project export) is under test.
    +"""
    +# pragma: allowlist secret -- all credentials in this file are fake test data
    +
    +from __future__ import annotations
    +
    +import json
    +import sys
    +from io import StringIO
    +from unittest.mock import MagicMock, patch
    +
    +import pytest
    +import typer
    +
    +# ---------------------------------------------------------------------------
    +# Shared constants
    +# ---------------------------------------------------------------------------
    +
    +_BASE_URL = "http://langflow.test"
    +_API_KEY = "test-api-key-export"  # pragma: allowlist secret
    +_FLOW_ID = "aaaaaaaa-0000-0000-0000-000000000001"
    +_PROJECT_ID = "bbbbbbbb-0000-0000-0000-000000000001"
    +
    +_FLOW_DICT: dict = {
    +    "id": _FLOW_ID,
    +    "name": "My Test Flow",
    +    "data": {"nodes": [], "edges": []},
    +}
    +
    +_NORMALIZED_JSON = json.dumps(_FLOW_DICT, indent=2)
    +
    +
    +# ---------------------------------------------------------------------------
    +# Fake SDK helpers
    +# ---------------------------------------------------------------------------
    +
    +
    +def _make_flow_obj(name: str = "My Test Flow", flow_id: str = _FLOW_ID) -> MagicMock:
    +    """Return a mock flow object with .name, .model_dump()."""
    +    obj = MagicMock()
    +    obj.name = name
    +    obj.id = flow_id
    +    obj.model_dump.return_value = {"id": flow_id, "name": name, "data": {}}
    +    return obj
    +
    +
    +def _make_project_obj(
    +    name: str = "My Project",
    +    project_id: str = _PROJECT_ID,
    +    flows: list | None = None,
    +) -> MagicMock:
    +    obj = MagicMock()
    +    obj.name = name
    +    obj.id = project_id
    +    obj.flows = flows if flows is not None else [_make_flow_obj()]
    +    return obj
    +
    +
    +def _make_client_mock(
    +    flow_obj: MagicMock | None = None,
    +    project_obj: MagicMock | None = None,
    +) -> MagicMock:
    +    client = MagicMock()
    +    client.get_flow.return_value = flow_obj if flow_obj is not None else _make_flow_obj()
    +    client.get_project.return_value = project_obj if project_obj is not None else _make_project_obj()
    +    return client
    +
    +
    +def _make_sdk_mock(client_mock: MagicMock | None = None) -> MagicMock:
    +    """Return a mock langflow_sdk module wired up for export tests."""
    +    if client_mock is None:
    +        client_mock = _make_client_mock()
    +    sdk = MagicMock()
    +    sdk.Client.return_value = client_mock
    +    sdk.normalize_flow_file.return_value = _FLOW_DICT
    +    sdk.normalize_flow.return_value = _FLOW_DICT
    +    sdk.flow_to_json.return_value = _NORMALIZED_JSON
    +    return sdk
    +
    +
    +def _make_env_cfg(url: str = _BASE_URL, api_key: str | None = _API_KEY) -> MagicMock:
    +    env_cfg = MagicMock()
    +    env_cfg.url = url
    +    env_cfg.api_key = api_key
    +    return env_cfg
    +
    +
    +def _run_export(
    +    flow_paths: list[str],
    +    *,
    +    output: str | None = None,
    +    output_dir: str | None = None,
    +    env: str | None = None,
    +    flow_id: str | None = None,
    +    project_id: str | None = None,
    +    environments_file: str | None = None,
    +    target: str | None = _BASE_URL,
    +    api_key: str | None = _API_KEY,
    +    in_place: bool = False,
    +    strip_volatile: bool = False,
    +    strip_secrets: bool = False,
    +    code_as_lines: bool = False,
    +    strip_node_volatile: bool = False,
    +    indent: int = 2,
    +    sdk_mock: MagicMock | None = None,
    +    env_cfg: MagicMock | None = None,
    +) -> None:
    +    """Invoke export_command with a mocked SDK and optional mocked env."""
    +    from lfx.cli.export import export_command
    +
    +    mock = sdk_mock if sdk_mock is not None else _make_sdk_mock()
    +    resolved_env = env_cfg if env_cfg is not None else _make_env_cfg()
    +
    +    with (
    +        patch("lfx.cli.export.load_sdk", return_value=mock),
    +        patch("lfx.config.resolve_environment", return_value=resolved_env),
    +    ):
    +        export_command(
    +            flow_paths=flow_paths,
    +            output=output,
    +            output_dir=output_dir,
    +            env=env,
    +            flow_id=flow_id,
    +            project_id=project_id,
    +            environments_file=environments_file,
    +            target=target,
    +            api_key=api_key,
    +            in_place=in_place,
    +            strip_volatile=strip_volatile,
    +            strip_secrets=strip_secrets,
    +            code_as_lines=code_as_lines,
    +            strip_node_volatile=strip_node_volatile,
    +            indent=indent,
    +        )
    +
    +
    +# ---------------------------------------------------------------------------
    +# _safe_filename
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestSafeFilename:
    +    def test_alphanumeric_unchanged(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        assert _safe_filename("MyFlow123") == "MyFlow123"
    +
    +    def test_spaces_replaced_with_underscores(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        assert _safe_filename("My Test Flow") == "My_Test_Flow"
    +
    +    def test_special_chars_replaced_with_underscores(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        result = _safe_filename("flow/name:with*special?chars")
    +        assert "/" not in result
    +        assert ":" not in result
    +        assert "*" not in result
    +        assert "?" not in result
    +
    +    def test_leading_trailing_spaces_stripped(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        result = _safe_filename("  flow name  ")
    +        assert not result.startswith("_")
    +        assert not result.endswith("_")
    +        assert "flow_name" in result
    +
    +    def test_hyphens_and_underscores_preserved(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        result = _safe_filename("my-flow_name")
    +        assert result == "my-flow_name"
    +
    +    def test_empty_string(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        result = _safe_filename("")
    +        assert result == ""
    +
    +    def test_unicode_chars_replaced(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        result = _safe_filename("flow\u2019s name")
    +        assert "\u2019" not in result
    +
    +    def test_all_spaces_produces_underscores(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        result = _safe_filename("a b c")
    +        assert result == "a_b_c"
    +
    +
    +# ---------------------------------------------------------------------------
    +# _write_flow
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestWriteFlow:
    +    def test_in_place_writes_to_source_path(self, tmp_path):
    +        from lfx.cli.export import _write_flow
    +
    +        src = tmp_path / "flow.json"
    +        src.write_text("{}", encoding="utf-8")
    +        sdk = _make_sdk_mock()
    +        result = _write_flow(_FLOW_DICT, sdk=sdk, output=None, in_place=True, source_path=src, indent=2)
    +        assert result == src
    +        assert src.read_text(encoding="utf-8") == _NORMALIZED_JSON
    +
    +    def test_in_place_returns_source_path(self, tmp_path):
    +        from lfx.cli.export import _write_flow
    +
    +        src = tmp_path / "flow.json"
    +        src.write_text("{}", encoding="utf-8")
    +        sdk = _make_sdk_mock()
    +        result = _write_flow(_FLOW_DICT, sdk=sdk, output=None, in_place=True, source_path=src, indent=2)
    +        assert result == src
    +
    +    def test_output_given_writes_to_output(self, tmp_path):
    +        from lfx.cli.export import _write_flow
    +
    +        out = tmp_path / "out.json"
    +        sdk = _make_sdk_mock()
    +        result = _write_flow(_FLOW_DICT, sdk=sdk, output=out, in_place=False, source_path=None, indent=2)
    +        assert result == out
    +        assert out.read_text(encoding="utf-8") == _NORMALIZED_JSON
    +
    +    def test_output_given_returns_output_path(self, tmp_path):
    +        from lfx.cli.export import _write_flow
    +
    +        out = tmp_path / "result.json"
    +        sdk = _make_sdk_mock()
    +        result = _write_flow(_FLOW_DICT, sdk=sdk, output=out, in_place=False, source_path=None, indent=2)
    +        assert result == out
    +
    +    def test_no_output_no_in_place_writes_to_stdout(self):
    +        from lfx.cli.export import _write_flow
    +
    +        sdk = _make_sdk_mock()
    +        captured = StringIO()
    +        with patch.object(sys, "stdout", captured):
    +            _write_flow(_FLOW_DICT, sdk=sdk, output=None, in_place=False, source_path=None, indent=2)
    +        assert _NORMALIZED_JSON in captured.getvalue()
    +
    +    def test_no_output_no_in_place_returns_none(self):
    +        from lfx.cli.export import _write_flow
    +
    +        sdk = _make_sdk_mock()
    +        captured = StringIO()
    +        with patch.object(sys, "stdout", captured):
    +            result = _write_flow(_FLOW_DICT, sdk=sdk, output=None, in_place=False, source_path=None, indent=2)
    +        assert result is None
    +
    +    def test_in_place_false_with_source_path_uses_output(self, tmp_path):
    +        from lfx.cli.export import _write_flow
    +
    +        src = tmp_path / "src.json"
    +        src.write_text("{}", encoding="utf-8")
    +        out = tmp_path / "dest.json"
    +        sdk = _make_sdk_mock()
    +        result = _write_flow(_FLOW_DICT, sdk=sdk, output=out, in_place=False, source_path=src, indent=2)
    +        assert result == out
    +        assert out.exists()
    +
    +    def test_sdk_flow_to_json_called_with_indent(self, tmp_path):
    +        from lfx.cli.export import _write_flow
    +
    +        out = tmp_path / "out.json"
    +        sdk = _make_sdk_mock()
    +        _write_flow(_FLOW_DICT, sdk=sdk, output=out, in_place=False, source_path=None, indent=4)
    +        sdk.flow_to_json.assert_called_once_with(_FLOW_DICT, indent=4)
    +
    +
    +# ---------------------------------------------------------------------------
    +# Local mode
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestExportCommandLocalMode:
    +    def test_single_file_to_stdout(self, tmp_path):
    +        src = tmp_path / "flow.json"
    +        src.write_text(json.dumps(_FLOW_DICT), encoding="utf-8")
    +        sdk = _make_sdk_mock()
    +        captured = StringIO()
    +        with patch.object(sys, "stdout", captured):
    +            _run_export([str(src)], sdk_mock=sdk)
    +        sdk.normalize_flow_file.assert_called_once()
    +
    +    def test_single_file_with_output_writes_file(self, tmp_path):
    +        src = tmp_path / "flow.json"
    +        src.write_text(json.dumps(_FLOW_DICT), encoding="utf-8")
    +        out = tmp_path / "exported.json"
    +        sdk = _make_sdk_mock()
    +        _run_export([str(src)], output=str(out), sdk_mock=sdk)
    +        assert out.exists()
    +        assert out.read_text(encoding="utf-8") == _NORMALIZED_JSON
    +
    +    def test_single_file_in_place_overwrites_source(self, tmp_path):
    +        src = tmp_path / "flow.json"
    +        src.write_text(json.dumps(_FLOW_DICT), encoding="utf-8")
    +        sdk = _make_sdk_mock()
    +        _run_export([str(src)], in_place=True, sdk_mock=sdk)
    +        assert src.read_text(encoding="utf-8") == _NORMALIZED_JSON
    +
    +    def test_multiple_files_processed(self, tmp_path):
    +        f1 = tmp_path / "a.json"
    +        f2 = tmp_path / "b.json"
    +        f1.write_text(json.dumps(_FLOW_DICT), encoding="utf-8")
    +        f2.write_text(json.dumps(_FLOW_DICT), encoding="utf-8")
    +        sdk = _make_sdk_mock()
    +        captured = StringIO()
    +        with patch.object(sys, "stdout", captured):
    +            _run_export([str(f1), str(f2)], sdk_mock=sdk)
    +        assert sdk.normalize_flow_file.call_count == 2
    +
    +    def test_multiple_files_with_output_raises_exit_1(self, tmp_path):
    +        f1 = tmp_path / "a.json"
    +        f2 = tmp_path / "b.json"
    +        f1.write_text(json.dumps(_FLOW_DICT), encoding="utf-8")
    +        f2.write_text(json.dumps(_FLOW_DICT), encoding="utf-8")
    +        out = tmp_path / "out.json"
    +        sdk = _make_sdk_mock()
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_export([str(f1), str(f2)], output=str(out), sdk_mock=sdk)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_file_not_found_raises_exit_1(self, tmp_path):
    +        sdk = _make_sdk_mock()
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_export([str(tmp_path / "missing.json")], sdk_mock=sdk)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_normalize_raises_exits_1(self, tmp_path):
    +        src = tmp_path / "flow.json"
    +        src.write_text(json.dumps(_FLOW_DICT), encoding="utf-8")
    +        sdk = _make_sdk_mock()
    +        sdk.normalize_flow_file.side_effect = ValueError("bad flow")
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_export([str(src)], sdk_mock=sdk)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_no_flow_paths_raises_exit_1(self):
    +        sdk = _make_sdk_mock()
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_export([], sdk_mock=sdk)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_normalize_kwargs_passed_correctly(self, tmp_path):
    +        src = tmp_path / "flow.json"
    +        src.write_text(json.dumps(_FLOW_DICT), encoding="utf-8")
    +        sdk = _make_sdk_mock()
    +        captured = StringIO()
    +        with patch.object(sys, "stdout", captured):
    +            _run_export(
    +                [str(src)],
    +                strip_volatile=True,
    +                strip_secrets=True,
    +                code_as_lines=True,
    +                strip_node_volatile=True,
    +                sdk_mock=sdk,
    +            )
    +        call_kwargs = sdk.normalize_flow_file.call_args.kwargs
    +        assert call_kwargs["strip_volatile"] is True
    +        assert call_kwargs["strip_secrets"] is True
    +        assert call_kwargs["code_as_lines"] is True
    +        assert call_kwargs["strip_node_volatile"] is True
    +
    +    def test_indent_passed_to_flow_to_json(self, tmp_path):
    +        src = tmp_path / "flow.json"
    +        src.write_text(json.dumps(_FLOW_DICT), encoding="utf-8")
    +        out = tmp_path / "out.json"
    +        sdk = _make_sdk_mock()
    +        _run_export([str(src)], output=str(out), indent=4, sdk_mock=sdk)
    +        sdk.flow_to_json.assert_called_with(sdk.normalize_flow_file.return_value, indent=4)
    +
    +
    +# ---------------------------------------------------------------------------
    +# Remote mode — single flow by ID
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestExportCommandRemoteFlowId:
    +    def test_missing_env_and_target_raises_exit_1(self):
    +        sdk = _make_sdk_mock()
    +        from lfx.cli.export import export_command
    +
    +        with patch("lfx.cli.export.load_sdk", return_value=sdk), pytest.raises(typer.Exit) as exc_info:
    +            export_command(
    +                flow_paths=[],
    +                output=None,
    +                output_dir=None,
    +                env=None,
    +                flow_id=_FLOW_ID,
    +                project_id=None,
    +                environments_file=None,
    +                target=None,
    +                api_key=None,
    +                in_place=False,
    +                strip_volatile=False,
    +                strip_secrets=False,
    +                code_as_lines=False,
    +                strip_node_volatile=False,
    +                indent=2,
    +            )
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_flow_id_fetches_and_writes_to_output_dir(self, tmp_path):
    +        flow_obj = _make_flow_obj(name="Fetched Flow")
    +        client = _make_client_mock(flow_obj=flow_obj)
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        _run_export(
    +            [],
    +            flow_id=_FLOW_ID,
    +            env="staging",
    +            output_dir=str(tmp_path),
    +            sdk_mock=sdk,
    +            env_cfg=env_cfg,
    +        )
    +        client.get_flow.assert_called_once()
    +        written = list(tmp_path.glob("*.json"))
    +        assert len(written) == 1
    +
    +    def test_flow_id_filename_uses_safe_name(self, tmp_path):
    +        flow_obj = _make_flow_obj(name="My Flow: Special!")
    +        client = _make_client_mock(flow_obj=flow_obj)
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        _run_export(
    +            [],
    +            flow_id=_FLOW_ID,
    +            env="staging",
    +            output_dir=str(tmp_path),
    +            sdk_mock=sdk,
    +            env_cfg=env_cfg,
    +        )
    +        written = list(tmp_path.glob("*.json"))
    +        assert len(written) == 1
    +        assert ":" not in written[0].name
    +        assert "!" not in written[0].name
    +
    +    def test_flow_id_creates_output_dir_if_missing(self, tmp_path):
    +        dest = tmp_path / "new_dir" / "subdir"
    +        flow_obj = _make_flow_obj()
    +        client = _make_client_mock(flow_obj=flow_obj)
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        _run_export(
    +            [],
    +            flow_id=_FLOW_ID,
    +            env="staging",
    +            output_dir=str(dest),
    +            sdk_mock=sdk,
    +            env_cfg=env_cfg,
    +        )
    +        assert dest.exists()
    +
    +    def test_flow_id_uses_cwd_when_output_dir_not_specified(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        flow_obj = _make_flow_obj(name="CwdFlow")
    +        client = _make_client_mock(flow_obj=flow_obj)
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        _run_export(
    +            [],
    +            flow_id=_FLOW_ID,
    +            env="staging",
    +            output_dir=None,
    +            sdk_mock=sdk,
    +            env_cfg=env_cfg,
    +        )
    +        written = list(tmp_path.glob("*.json"))
    +        assert len(written) == 1
    +
    +    def test_config_error_raises_exit_1(self, tmp_path):
    +        from lfx.cli.export import export_command
    +        from lfx.config import ConfigError
    +
    +        sdk = _make_sdk_mock()
    +        with (
    +            patch("lfx.cli.export.load_sdk", return_value=sdk),
    +            patch("lfx.config.resolve_environment", side_effect=ConfigError("bad config")),
    +            pytest.raises(typer.Exit) as exc_info,
    +        ):
    +            export_command(
    +                flow_paths=[],
    +                output=None,
    +                output_dir=str(tmp_path),
    +                env="staging",
    +                flow_id=_FLOW_ID,
    +                project_id=None,
    +                environments_file=None,
    +                target=None,
    +                api_key=None,
    +                in_place=False,
    +                strip_volatile=False,
    +                strip_secrets=False,
    +                code_as_lines=False,
    +                strip_node_volatile=False,
    +                indent=2,
    +            )
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_client_constructed_with_env_credentials(self, tmp_path):
    +        flow_obj = _make_flow_obj()
    +        client = _make_client_mock(flow_obj=flow_obj)
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg(url=_BASE_URL, api_key=_API_KEY)
    +        _run_export(
    +            [],
    +            flow_id=_FLOW_ID,
    +            env="staging",
    +            output_dir=str(tmp_path),
    +            sdk_mock=sdk,
    +            env_cfg=env_cfg,
    +        )
    +        sdk.Client.assert_called_once_with(base_url=_BASE_URL, api_key=_API_KEY)
    +
    +    def test_normalize_flow_called_with_kwargs(self, tmp_path):
    +        flow_obj = _make_flow_obj()
    +        client = _make_client_mock(flow_obj=flow_obj)
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        _run_export(
    +            [],
    +            flow_id=_FLOW_ID,
    +            env="staging",
    +            output_dir=str(tmp_path),
    +            strip_volatile=True,
    +            strip_secrets=True,
    +            sdk_mock=sdk,
    +            env_cfg=env_cfg,
    +        )
    +        call_kwargs = sdk.normalize_flow.call_args.kwargs
    +        assert call_kwargs["strip_volatile"] is True
    +        assert call_kwargs["strip_secrets"] is True
    +
    +
    +# ---------------------------------------------------------------------------
    +# Remote mode — project
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestExportCommandRemoteProject:
    +    def test_project_exports_all_flows(self, tmp_path):
    +        flows = [_make_flow_obj(name=f"Flow {i}", flow_id=f"aaaaaaaa-0000-0000-0000-{i:012d}") for i in range(3)]
    +        project_obj = _make_project_obj(name="Test Project", flows=flows)
    +        client = _make_client_mock(project_obj=project_obj)
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        _run_export(
    +            [],
    +            project_id=_PROJECT_ID,
    +            env="staging",
    +            output_dir=str(tmp_path),
    +            sdk_mock=sdk,
    +            env_cfg=env_cfg,
    +        )
    +        client.get_project.assert_called_once()
    +        written = list(tmp_path.glob("*.json"))
    +        assert len(written) == 3
    +
    +    def test_project_creates_dir_named_after_project_when_output_dir_not_given(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        project_obj = _make_project_obj(name="My Project")
    +        client = _make_client_mock(project_obj=project_obj)
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        _run_export(
    +            [],
    +            project_id=_PROJECT_ID,
    +            env="staging",
    +            output_dir=None,
    +            sdk_mock=sdk,
    +            env_cfg=env_cfg,
    +        )
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        expected_dir = tmp_path / _safe_filename("My Project")
    +        assert expected_dir.exists()
    +
    +    def test_project_writes_json_for_each_flow(self, tmp_path):
    +        flows = [_make_flow_obj(name="Alpha"), _make_flow_obj(name="Beta")]
    +        project_obj = _make_project_obj(flows=flows)
    +        client = _make_client_mock(project_obj=project_obj)
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        _run_export(
    +            [],
    +            project_id=_PROJECT_ID,
    +            env="staging",
    +            output_dir=str(tmp_path),
    +            sdk_mock=sdk,
    +            env_cfg=env_cfg,
    +        )
    +        assert sdk.normalize_flow.call_count == 2
    +        assert sdk.flow_to_json.call_count == 2
    +
    +    def test_project_config_error_raises_exit_1(self, tmp_path):
    +        from lfx.cli.export import export_command
    +        from lfx.config import ConfigError
    +
    +        sdk = _make_sdk_mock()
    +        with (
    +            patch("lfx.cli.export.load_sdk", return_value=sdk),
    +            patch("lfx.config.resolve_environment", side_effect=ConfigError("cfg error")),
    +            pytest.raises(typer.Exit) as exc_info,
    +        ):
    +            export_command(
    +                flow_paths=[],
    +                output=None,
    +                output_dir=str(tmp_path),
    +                env="staging",
    +                flow_id=None,
    +                project_id=_PROJECT_ID,
    +                environments_file=None,
    +                target=None,
    +                api_key=None,
    +                in_place=False,
    +                strip_volatile=False,
    +                strip_secrets=False,
    +                code_as_lines=False,
    +                strip_node_volatile=False,
    +                indent=2,
    +            )
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_project_missing_env_and_target_raises_exit_1(self):
    +        from lfx.cli.export import export_command
    +
    +        sdk = _make_sdk_mock()
    +        with patch("lfx.cli.export.load_sdk", return_value=sdk), pytest.raises(typer.Exit) as exc_info:
    +            export_command(
    +                flow_paths=[],
    +                output=None,
    +                output_dir=None,
    +                env=None,
    +                flow_id=None,
    +                project_id=_PROJECT_ID,
    +                environments_file=None,
    +                target=None,
    +                api_key=None,
    +                in_place=False,
    +                strip_volatile=False,
    +                strip_secrets=False,
    +                code_as_lines=False,
    +                strip_node_volatile=False,
    +                indent=2,
    +            )
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_project_empty_flows_exports_zero_files(self, tmp_path):
    +        project_obj = _make_project_obj(flows=[])
    +        client = _make_client_mock(project_obj=project_obj)
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        _run_export(
    +            [],
    +            project_id=_PROJECT_ID,
    +            env="staging",
    +            output_dir=str(tmp_path),
    +            sdk_mock=sdk,
    +            env_cfg=env_cfg,
    +        )
    +        written = list(tmp_path.glob("*.json"))
    +        assert len(written) == 0
    +
    +    def test_project_flow_filenames_are_safe(self, tmp_path):
    +        flows = [_make_flow_obj(name="Flow: with/special*chars")]
    +        project_obj = _make_project_obj(flows=flows)
    +        client = _make_client_mock(project_obj=project_obj)
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        _run_export(
    +            [],
    +            project_id=_PROJECT_ID,
    +            env="staging",
    +            output_dir=str(tmp_path),
    +            sdk_mock=sdk,
    +            env_cfg=env_cfg,
    +        )
    +        written = list(tmp_path.glob("*.json"))
    +        assert len(written) == 1
    +        assert ":" not in written[0].name
    +        assert "/" not in written[0].name
    +        assert "*" not in written[0].name
    +
    +    def test_project_target_used_without_env_name(self, tmp_path):
    +        """--target alone (no --env) is sufficient for project remote mode."""
    +        flows = [_make_flow_obj()]
    +        project_obj = _make_project_obj(flows=flows)
    +        client = _make_client_mock(project_obj=project_obj)
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        from lfx.cli.export import export_command
    +
    +        with (
    +            patch("lfx.cli.export.load_sdk", return_value=sdk),
    +            patch("lfx.config.resolve_environment", return_value=env_cfg),
    +        ):
    +            export_command(
    +                flow_paths=[],
    +                output=None,
    +                output_dir=str(tmp_path),
    +                env=None,
    +                flow_id=None,
    +                project_id=_PROJECT_ID,
    +                environments_file=None,
    +                target=_BASE_URL,
    +                api_key=_API_KEY,
    +                in_place=False,
    +                strip_volatile=False,
    +                strip_secrets=False,
    +                code_as_lines=False,
    +                strip_node_volatile=False,
    +                indent=2,
    +            )
    +        client.get_project.assert_called_once()
    
  • src/lfx/tests/unit/cli/test_init_command.py+658 0 added
    @@ -0,0 +1,658 @@
    +"""Unit tests for ``lfx init`` — init_command and helpers.
    +
    +All tests run entirely in-process; no running Langflow instance required.
    +Filesystem operations use ``tmp_path`` so every test gets a fresh sandbox.
    +"""
    +
    +from __future__ import annotations
    +
    +from typing import TYPE_CHECKING
    +from unittest.mock import patch
    +
    +import pytest
    +import typer
    +from lfx.__main__ import app
    +from lfx.cli.init import (
    +    _ENVIRONMENTS_YAML,
    +    _GITIGNORE,
    +    _TEMPLATES_DIR,
    +    _TEST_FLOWS_PY,
    +    _copy_template,
    +    _write,
    +    init_command,
    +)
    +from typer.testing import CliRunner
    +
    +if TYPE_CHECKING:
    +    from pathlib import Path
    +
    +runner = CliRunner()
    +
    +# ---------------------------------------------------------------------------
    +# Constants / helpers
    +# ---------------------------------------------------------------------------
    +
    +_GHA_SRC = _TEMPLATES_DIR / "github-actions"
    +_SHELL_SRC = _TEMPLATES_DIR / "shell"
    +
    +
    +def _run_init(
    +    project_dir: Path,
    +    *,
    +    github_actions: bool = False,
    +    overwrite: bool = False,
    +    example: bool = False,
    +) -> None:
    +    """Thin wrapper around init_command with safe defaults for most tests."""
    +    init_command(
    +        project_dir=project_dir,
    +        github_actions=github_actions,
    +        overwrite=overwrite,
    +        example=example,
    +    )
    +
    +
    +# ---------------------------------------------------------------------------
    +# _write() helper
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestWriteHelper:
    +    def test_creates_file_with_content(self, tmp_path: Path) -> None:
    +        target = tmp_path
    +        created: list[tuple[str, str]] = []
    +        dest = tmp_path / "output.txt"
    +        _write(dest, "hello", "label", created, target=target, overwrite=False)
    +        assert dest.exists()
    +        assert dest.read_text(encoding="utf-8") == "hello"
    +
    +    def test_appends_to_created_list(self, tmp_path: Path) -> None:
    +        target = tmp_path
    +        created: list[tuple[str, str]] = []
    +        dest = tmp_path / "output.txt"
    +        _write(dest, "content", "my-label", created, target=target, overwrite=False)
    +        assert len(created) == 1
    +        assert created[0] == ("output.txt", "my-label")
    +
    +    def test_skips_if_exists_and_no_overwrite(self, tmp_path: Path) -> None:
    +        target = tmp_path
    +        created: list[tuple[str, str]] = []
    +        dest = tmp_path / "output.txt"
    +        dest.write_text("original", encoding="utf-8")
    +        _write(dest, "new-content", "label", created, target=target, overwrite=False)
    +        assert dest.read_text(encoding="utf-8") == "original"
    +        assert created == []
    +
    +    def test_overwrites_if_flag_set(self, tmp_path: Path) -> None:
    +        target = tmp_path
    +        created: list[tuple[str, str]] = []
    +        dest = tmp_path / "output.txt"
    +        dest.write_text("original", encoding="utf-8")
    +        _write(dest, "new-content", "label", created, target=target, overwrite=True)
    +        assert dest.read_text(encoding="utf-8") == "new-content"
    +        assert len(created) == 1
    +
    +    def test_creates_parent_directories(self, tmp_path: Path) -> None:
    +        target = tmp_path
    +        created: list[tuple[str, str]] = []
    +        dest = tmp_path / "a" / "b" / "c" / "file.txt"
    +        _write(dest, "content", "label", created, target=target, overwrite=False)
    +        assert dest.exists()
    +
    +    def test_relative_path_in_created_list(self, tmp_path: Path) -> None:
    +        target = tmp_path
    +        created: list[tuple[str, str]] = []
    +        dest = tmp_path / "sub" / "file.txt"
    +        _write(dest, "content", "label", created, target=target, overwrite=False)
    +        assert created[0][0] == "sub/file.txt"
    +
    +    def test_empty_content_creates_file(self, tmp_path: Path) -> None:
    +        target = tmp_path
    +        created: list[tuple[str, str]] = []
    +        dest = tmp_path / "empty.txt"
    +        _write(dest, "", "label", created, target=target, overwrite=False)
    +        assert dest.exists()
    +        assert dest.read_text(encoding="utf-8") == ""
    +
    +    def test_overwrite_on_nonexistent_file_creates_it(self, tmp_path: Path) -> None:
    +        target = tmp_path
    +        created: list[tuple[str, str]] = []
    +        dest = tmp_path / "new.txt"
    +        _write(dest, "content", "label", created, target=target, overwrite=True)
    +        assert dest.exists()
    +        assert len(created) == 1
    +
    +
    +# ---------------------------------------------------------------------------
    +# _copy_template() helper
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestCopyTemplateHelper:
    +    def test_copies_content_from_src(self, tmp_path: Path) -> None:
    +        src = tmp_path / "src.txt"
    +        src.write_text("template body", encoding="utf-8")
    +        dest = tmp_path / "output" / "dest.txt"
    +        target = tmp_path
    +        created: list[tuple[str, str]] = []
    +        _copy_template(src, dest, "label", created, target=target, overwrite=False)
    +        assert dest.read_text(encoding="utf-8") == "template body"
    +
    +    def test_appends_to_created_list(self, tmp_path: Path) -> None:
    +        src = tmp_path / "tpl.txt"
    +        src.write_text("content", encoding="utf-8")
    +        dest = tmp_path / "out" / "tpl.txt"
    +        target = tmp_path
    +        created: list[tuple[str, str]] = []
    +        _copy_template(src, dest, "my-label", created, target=target, overwrite=False)
    +        assert len(created) == 1
    +        assert created[0][1] == "my-label"
    +
    +    def test_skips_if_dest_exists_and_no_overwrite(self, tmp_path: Path) -> None:
    +        src = tmp_path / "src.txt"
    +        src.write_text("new content", encoding="utf-8")
    +        dest = tmp_path / "dest.txt"
    +        dest.write_text("original", encoding="utf-8")
    +        target = tmp_path
    +        created: list[tuple[str, str]] = []
    +        _copy_template(src, dest, "label", created, target=target, overwrite=False)
    +        assert dest.read_text(encoding="utf-8") == "original"
    +        assert created == []
    +
    +    def test_overwrites_dest_when_flag_set(self, tmp_path: Path) -> None:
    +        src = tmp_path / "src.txt"
    +        src.write_text("new content", encoding="utf-8")
    +        dest = tmp_path / "dest.txt"
    +        dest.write_text("original", encoding="utf-8")
    +        target = tmp_path
    +        created: list[tuple[str, str]] = []
    +        _copy_template(src, dest, "label", created, target=target, overwrite=True)
    +        assert dest.read_text(encoding="utf-8") == "new content"
    +        assert len(created) == 1
    +
    +    def test_creates_parent_directories(self, tmp_path: Path) -> None:
    +        src = tmp_path / "src.txt"
    +        src.write_text("content", encoding="utf-8")
    +        dest = tmp_path / "deep" / "nested" / "dest.txt"
    +        target = tmp_path
    +        created: list[tuple[str, str]] = []
    +        _copy_template(src, dest, "label", created, target=target, overwrite=False)
    +        assert dest.exists()
    +
    +    def test_relative_path_stored_in_created(self, tmp_path: Path) -> None:
    +        src = tmp_path / "src.txt"
    +        src.write_text("content", encoding="utf-8")
    +        dest = tmp_path / "subdir" / "out.txt"
    +        target = tmp_path
    +        created: list[tuple[str, str]] = []
    +        _copy_template(src, dest, "label", created, target=target, overwrite=False)
    +        assert created[0][0] == "subdir/out.txt"
    +
    +
    +# ---------------------------------------------------------------------------
    +# init_command — directory creation
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestInitCommandDirectoryCreation:
    +    def test_creates_flows_directory(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path / "proj")
    +        assert (tmp_path / "proj" / "flows").is_dir()
    +
    +    def test_creates_tests_directory(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path / "proj")
    +        assert (tmp_path / "proj" / "tests").is_dir()
    +
    +    def test_creates_lfx_directory(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path / "proj")
    +        assert (tmp_path / "proj" / ".lfx").is_dir()
    +
    +    def test_creates_project_dir_if_missing(self, tmp_path: Path) -> None:
    +        target = tmp_path / "brand-new-project"
    +        assert not target.exists()
    +        _run_init(target)
    +        assert target.is_dir()
    +
    +    def test_creates_nested_project_dir(self, tmp_path: Path) -> None:
    +        target = tmp_path / "a" / "b" / "c"
    +        _run_init(target)
    +        assert target.is_dir()
    +
    +
    +# ---------------------------------------------------------------------------
    +# init_command — required files created
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestInitCommandRequiredFiles:
    +    def test_creates_tests_init_py(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path)
    +        assert (tmp_path / "tests" / "__init__.py").exists()
    +
    +    def test_creates_tests_test_flows_py(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path)
    +        assert (tmp_path / "tests" / "test_flows.py").exists()
    +
    +    def test_test_flows_py_has_expected_content(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path)
    +        content = (tmp_path / "tests" / "test_flows.py").read_text(encoding="utf-8")
    +        assert "flow_runner" in content
    +        assert "pytest.mark.integration" in content
    +
    +    def test_test_flows_py_content_matches_template(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path)
    +        content = (tmp_path / "tests" / "test_flows.py").read_text(encoding="utf-8")
    +        assert content == _TEST_FLOWS_PY
    +
    +    def test_creates_environments_yaml(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path)
    +        assert (tmp_path / ".lfx" / "environments.yaml").exists()
    +
    +    def test_environments_yaml_has_expected_content(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path)
    +        content = (tmp_path / ".lfx" / "environments.yaml").read_text(encoding="utf-8")
    +        assert "environments:" in content
    +        assert "local:" in content
    +        assert "staging:" in content
    +        assert "production:" in content
    +
    +    def test_environments_yaml_content_matches_template(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path)
    +        content = (tmp_path / ".lfx" / "environments.yaml").read_text(encoding="utf-8")
    +        assert content == _ENVIRONMENTS_YAML
    +
    +    def test_creates_gitignore(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path)
    +        assert (tmp_path / ".gitignore").exists()
    +
    +    def test_gitignore_has_langflow_entry(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path)
    +        content = (tmp_path / ".gitignore").read_text(encoding="utf-8")
    +        assert "langflow-environments.toml" in content
    +
    +
    +# ---------------------------------------------------------------------------
    +# init_command — .gitignore behaviour
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestInitCommandGitignore:
    +    def test_creates_new_gitignore_if_missing(self, tmp_path: Path) -> None:
    +        assert not (tmp_path / ".gitignore").exists()
    +        _run_init(tmp_path)
    +        assert (tmp_path / ".gitignore").exists()
    +
    +    def test_new_gitignore_content_matches_template(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path)
    +        content = (tmp_path / ".gitignore").read_text(encoding="utf-8")
    +        assert content == _GITIGNORE
    +
    +    def test_appends_rule_to_existing_gitignore_without_entry(self, tmp_path: Path) -> None:
    +        gitignore = tmp_path / ".gitignore"
    +        gitignore.write_text("*.pyc\n__pycache__/\n", encoding="utf-8")
    +        # Directory already has .gitignore so use overwrite=True to bypass the non-empty guard
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=True, example=False)
    +        content = gitignore.read_text(encoding="utf-8")
    +        assert "*.pyc" in content
    +        assert "__pycache__/" in content
    +        assert "langflow-environments.toml" in content
    +
    +    def test_does_not_append_duplicate_rule(self, tmp_path: Path) -> None:
    +        gitignore = tmp_path / ".gitignore"
    +        gitignore.write_text("langflow-environments.toml\n", encoding="utf-8")
    +        # Directory already has .gitignore so use overwrite=True to bypass the non-empty guard
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=True, example=False)
    +        content = gitignore.read_text(encoding="utf-8")
    +        # Should appear exactly once
    +        assert content.count("langflow-environments.toml") == 1
    +
    +    def test_existing_gitignore_preserved_and_appended(self, tmp_path: Path) -> None:
    +        gitignore = tmp_path / ".gitignore"
    +        original = "node_modules/\ndist/\n"
    +        gitignore.write_text(original, encoding="utf-8")
    +        # Directory already has .gitignore so use overwrite=True to bypass the non-empty guard
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=True, example=False)
    +        content = gitignore.read_text(encoding="utf-8")
    +        # Original content is kept
    +        assert "node_modules/" in content
    +        assert "dist/" in content
    +        # Rule is appended
    +        assert "langflow-environments.toml" in content
    +
    +    def test_append_uses_double_newline_separator(self, tmp_path: Path) -> None:
    +        gitignore = tmp_path / ".gitignore"
    +        gitignore.write_text("*.pyc\n", encoding="utf-8")
    +        # Directory already has .gitignore so use overwrite=True to bypass the non-empty guard
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=True, example=False)
    +        content = gitignore.read_text(encoding="utf-8")
    +        # The separator between existing content and appended block should be \n\n
    +        assert "\n\n" in content
    +
    +
    +# ---------------------------------------------------------------------------
    +# init_command — example=True (with seeded hello-world flow)
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestInitCommandExampleTrue:
    +    def test_creates_hello_world_json(self, tmp_path: Path) -> None:
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=False, example=True)
    +        assert (tmp_path / "flows" / "hello-world.json").exists()
    +
    +    def test_hello_world_is_valid_json(self, tmp_path: Path) -> None:
    +        import json
    +
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=False, example=True)
    +        content = (tmp_path / "flows" / "hello-world.json").read_text(encoding="utf-8")
    +        flow = json.loads(content)
    +        assert isinstance(flow, dict)
    +
    +    def test_hello_world_has_id_and_name(self, tmp_path: Path) -> None:
    +        import json
    +
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=False, example=True)
    +        flow = json.loads((tmp_path / "flows" / "hello-world.json").read_text(encoding="utf-8"))
    +        assert "id" in flow
    +        assert "name" in flow
    +        assert flow["name"] == "hello-world"
    +
    +    def test_no_gitkeep_when_example_is_true(self, tmp_path: Path) -> None:
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=False, example=True)
    +        assert not (tmp_path / "flows" / ".gitkeep").exists()
    +
    +    def test_other_files_still_created_with_example(self, tmp_path: Path) -> None:
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=False, example=True)
    +        assert (tmp_path / ".lfx" / "environments.yaml").exists()
    +        assert (tmp_path / "tests" / "test_flows.py").exists()
    +        assert (tmp_path / "tests" / "__init__.py").exists()
    +
    +    def test_graceful_fallback_when_template_missing(self, tmp_path: Path) -> None:
    +        """If create_command raises, init should warn but continue scaffolding."""
    +        fake_dir = tmp_path / "no-templates"
    +        with patch("lfx.cli.create._FLOWS_TEMPLATE_DIR", fake_dir):
    +            # Should not raise — BLE001-guarded except swallows the failure
    +            init_command(
    +                project_dir=tmp_path / "proj",
    +                github_actions=False,
    +                overwrite=False,
    +                example=True,
    +            )
    +        assert (tmp_path / "proj" / ".lfx" / "environments.yaml").exists()
    +
    +
    +# ---------------------------------------------------------------------------
    +# init_command — example=False (with .gitkeep)
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestInitCommandExampleFalse:
    +    def test_creates_gitkeep_in_flows(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path, example=False)
    +        assert (tmp_path / "flows" / ".gitkeep").exists()
    +
    +    def test_no_hello_world_when_example_false(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path, example=False)
    +        assert not (tmp_path / "flows" / "hello-world.json").exists()
    +
    +    def test_gitkeep_is_empty_file(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path, example=False)
    +        assert (tmp_path / "flows" / ".gitkeep").read_text(encoding="utf-8") == ""
    +
    +    def test_other_files_still_created_without_example(self, tmp_path: Path) -> None:
    +        _run_init(tmp_path, example=False)
    +        assert (tmp_path / ".lfx" / "environments.yaml").exists()
    +        assert (tmp_path / "tests" / "test_flows.py").exists()
    +        assert (tmp_path / "tests" / "__init__.py").exists()
    +        assert (tmp_path / ".gitignore").exists()
    +
    +
    +# ---------------------------------------------------------------------------
    +# init_command — GitHub Actions templates
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestInitCommandGitHubActions:
    +    def test_creates_github_workflows_dir_when_gha_true(self, tmp_path: Path) -> None:
    +        if not _GHA_SRC.exists():
    +            pytest.skip("GitHub Actions templates not bundled")
    +        init_command(project_dir=tmp_path, github_actions=True, overwrite=False, example=False)
    +        assert (tmp_path / ".github" / "workflows").is_dir()
    +
    +    def test_copies_all_yml_templates(self, tmp_path: Path) -> None:
    +        if not _GHA_SRC.exists():
    +            pytest.skip("GitHub Actions templates not bundled")
    +        expected = sorted(t.name for t in _GHA_SRC.glob("*.yml"))
    +        init_command(project_dir=tmp_path, github_actions=True, overwrite=False, example=False)
    +        created = sorted(p.name for p in (tmp_path / ".github" / "workflows").glob("*.yml"))
    +        assert created == expected
    +
    +    def test_workflow_files_are_non_empty(self, tmp_path: Path) -> None:
    +        if not _GHA_SRC.exists():
    +            pytest.skip("GitHub Actions templates not bundled")
    +        init_command(project_dir=tmp_path, github_actions=True, overwrite=False, example=False)
    +        for yml in (tmp_path / ".github" / "workflows").glob("*.yml"):
    +            assert yml.stat().st_size > 0, f"{yml.name} should not be empty"
    +
    +    def test_workflow_file_content_matches_template(self, tmp_path: Path) -> None:
    +        if not _GHA_SRC.exists():
    +            pytest.skip("GitHub Actions templates not bundled")
    +        init_command(project_dir=tmp_path, github_actions=True, overwrite=False, example=False)
    +        for src_tmpl in _GHA_SRC.glob("*.yml"):
    +            dest = tmp_path / ".github" / "workflows" / src_tmpl.name
    +            assert dest.read_text(encoding="utf-8") == src_tmpl.read_text(encoding="utf-8")
    +
    +    def test_skips_github_workflows_when_gha_false(self, tmp_path: Path) -> None:
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=False, example=False)
    +        assert not (tmp_path / ".github").exists()
    +
    +    def test_warns_when_gha_templates_dir_missing(self, tmp_path: Path) -> None:
    +        """When the template directory is absent a warning is printed but no error raised."""
    +        missing_templates = tmp_path / "no-such-templates"
    +        with patch("lfx.cli.init._TEMPLATES_DIR", missing_templates):
    +            # Should not raise
    +            init_command(project_dir=tmp_path / "proj", github_actions=True, overwrite=False, example=False)
    +        # The project's other files should still be scaffolded
    +        assert (tmp_path / "proj" / ".lfx" / "environments.yaml").exists()
    +
    +
    +# ---------------------------------------------------------------------------
    +# init_command — shell CI scripts (always scaffolded)
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestInitCommandShellScripts:
    +    def test_creates_ci_directory(self, tmp_path: Path) -> None:
    +        if not _SHELL_SRC.exists():
    +            pytest.skip("Shell templates not bundled")
    +        _run_init(tmp_path)
    +        assert (tmp_path / "ci").is_dir()
    +
    +    def test_copies_all_sh_templates(self, tmp_path: Path) -> None:
    +        if not _SHELL_SRC.exists():
    +            pytest.skip("Shell templates not bundled")
    +        expected = sorted(t.name for t in _SHELL_SRC.glob("*.sh"))
    +        _run_init(tmp_path)
    +        created = sorted(p.name for p in (tmp_path / "ci").glob("*.sh"))
    +        assert created == expected
    +
    +    def test_shell_scripts_are_executable(self, tmp_path: Path) -> None:
    +        if not _SHELL_SRC.exists():
    +            pytest.skip("Shell templates not bundled")
    +        _run_init(tmp_path)
    +        import stat
    +
    +        for script in (tmp_path / "ci").glob("*.sh"):
    +            mode = script.stat().st_mode
    +            assert mode & stat.S_IXUSR, f"{script.name} should have executable bit set"
    +
    +    def test_shell_script_content_matches_template(self, tmp_path: Path) -> None:
    +        if not _SHELL_SRC.exists():
    +            pytest.skip("Shell templates not bundled")
    +        _run_init(tmp_path)
    +        for src_tmpl in _SHELL_SRC.glob("*.sh"):
    +            dest = tmp_path / "ci" / src_tmpl.name
    +            assert dest.read_text(encoding="utf-8") == src_tmpl.read_text(encoding="utf-8")
    +
    +
    +# ---------------------------------------------------------------------------
    +# init_command — non-empty directory guard
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestInitCommandNonEmptyGuard:
    +    def test_exits_with_code_1_on_non_empty_dir_without_overwrite(self, tmp_path: Path) -> None:
    +        (tmp_path / "existing.txt").write_text("data", encoding="utf-8")
    +        with pytest.raises(typer.Exit) as exc_info:
    +            init_command(project_dir=tmp_path, github_actions=False, overwrite=False, example=False)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_empty_dir_does_not_raise(self, tmp_path: Path) -> None:
    +        target = tmp_path / "empty"
    +        target.mkdir()
    +        # Should not raise
    +        init_command(project_dir=target, github_actions=False, overwrite=False, example=False)
    +
    +    def test_nonexistent_dir_does_not_raise(self, tmp_path: Path) -> None:
    +        target = tmp_path / "brand-new"
    +        # Should not raise
    +        init_command(project_dir=target, github_actions=False, overwrite=False, example=False)
    +
    +    def test_git_dir_only_is_not_considered_non_empty(self, tmp_path: Path) -> None:
    +        """A directory containing only .git is treated as empty by the guard."""
    +        (tmp_path / ".git").mkdir()
    +        # Should not raise — .git is excluded from the check
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=False, example=False)
    +
    +    def test_overwrite_succeeds_on_non_empty_dir(self, tmp_path: Path) -> None:
    +        (tmp_path / "existing.txt").write_text("data", encoding="utf-8")
    +        # Should not raise
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=True, example=False)
    +        assert (tmp_path / ".lfx" / "environments.yaml").exists()
    +
    +
    +# ---------------------------------------------------------------------------
    +# init_command — --overwrite re-creates files
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestInitCommandOverwrite:
    +    def test_overwrite_replaces_environments_yaml(self, tmp_path: Path) -> None:
    +        env_yaml = tmp_path / ".lfx" / "environments.yaml"
    +        env_yaml.parent.mkdir(parents=True)
    +        env_yaml.write_text("# old content\n", encoding="utf-8")
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=True, example=False)
    +        content = env_yaml.read_text(encoding="utf-8")
    +        assert content == _ENVIRONMENTS_YAML
    +
    +    def test_overwrite_replaces_test_flows_py(self, tmp_path: Path) -> None:
    +        tests_dir = tmp_path / "tests"
    +        tests_dir.mkdir(parents=True)
    +        test_file = tests_dir / "test_flows.py"
    +        test_file.write_text("# old tests\n", encoding="utf-8")
    +        init_command(project_dir=tmp_path, github_actions=False, overwrite=True, example=False)
    +        content = test_file.read_text(encoding="utf-8")
    +        assert content == _TEST_FLOWS_PY
    +
    +    def test_no_overwrite_preserves_environments_yaml(self, tmp_path: Path) -> None:
    +        """_write skips existing files when overwrite=False (tested via the helper directly)."""
    +        target = tmp_path
    +        created: list[tuple[str, str]] = []
    +        dest = tmp_path / ".lfx" / "environments.yaml"
    +        dest.parent.mkdir(parents=True)
    +        dest.write_text("# custom content\n", encoding="utf-8")
    +        _write(dest, _ENVIRONMENTS_YAML, "label", created, target=target, overwrite=False)
    +        # Original content must be preserved
    +        assert dest.read_text(encoding="utf-8") == "# custom content\n"
    +        assert created == []
    +
    +    def test_overwrite_with_github_actions_replaces_workflows(self, tmp_path: Path) -> None:
    +        if not _GHA_SRC.exists():
    +            pytest.skip("GitHub Actions templates not bundled")
    +        # First init
    +        init_command(project_dir=tmp_path, github_actions=True, overwrite=False, example=False)
    +        # Corrupt one workflow file
    +        wf_dir = tmp_path / ".github" / "workflows"
    +        first_wf = next(wf_dir.glob("*.yml"))
    +        first_wf.write_text("# corrupted\n", encoding="utf-8")
    +        # Overwrite init
    +        init_command(project_dir=tmp_path, github_actions=True, overwrite=True, example=False)
    +        # Content should be restored from template
    +        src_content = (_GHA_SRC / first_wf.name).read_text(encoding="utf-8")
    +        assert first_wf.read_text(encoding="utf-8") == src_content
    +
    +
    +# ---------------------------------------------------------------------------
    +# CLI wrapper via typer CliRunner
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestInitCLI:
    +    def test_basic_init_exits_zero(self, tmp_path: Path) -> None:
    +        result = runner.invoke(app, ["init", str(tmp_path / "proj")])
    +        assert result.exit_code == 0, result.output
    +
    +    def test_basic_init_creates_environments_yaml(self, tmp_path: Path) -> None:
    +        proj = tmp_path / "proj"
    +        result = runner.invoke(app, ["init", str(proj)])
    +        assert result.exit_code == 0, result.output
    +        assert (proj / ".lfx" / "environments.yaml").exists()
    +
    +    def test_basic_init_creates_test_flows_py(self, tmp_path: Path) -> None:
    +        proj = tmp_path / "proj"
    +        result = runner.invoke(app, ["init", str(proj)])
    +        assert result.exit_code == 0, result.output
    +        assert (proj / "tests" / "test_flows.py").exists()
    +
    +    def test_no_github_actions_flag(self, tmp_path: Path) -> None:
    +        proj = tmp_path / "proj"
    +        result = runner.invoke(app, ["init", str(proj), "--no-github-actions"])
    +        assert result.exit_code == 0, result.output
    +        assert not (proj / ".github").exists()
    +
    +    def test_github_actions_flag_creates_workflows(self, tmp_path: Path) -> None:
    +        if not _GHA_SRC.exists():
    +            pytest.skip("GitHub Actions templates not bundled")
    +        proj = tmp_path / "proj"
    +        result = runner.invoke(app, ["init", str(proj), "--github-actions"])
    +        assert result.exit_code == 0, result.output
    +        assert (proj / ".github" / "workflows").is_dir()
    +
    +    def test_no_example_flag_creates_gitkeep(self, tmp_path: Path) -> None:
    +        proj = tmp_path / "proj"
    +        result = runner.invoke(app, ["init", str(proj), "--no-github-actions", "--no-example"])
    +        assert result.exit_code == 0, result.output
    +        assert (proj / "flows" / ".gitkeep").exists()
    +        assert not (proj / "flows" / "hello-world.json").exists()
    +
    +    def test_example_flag_creates_hello_world(self, tmp_path: Path) -> None:
    +        proj = tmp_path / "proj"
    +        result = runner.invoke(app, ["init", str(proj), "--no-github-actions", "--example"])
    +        assert result.exit_code == 0, result.output
    +        assert (proj / "flows" / "hello-world.json").exists()
    +
    +    def test_overwrite_flag_reinitialises_non_empty_dir(self, tmp_path: Path) -> None:
    +        proj = tmp_path / "proj"
    +        proj.mkdir()
    +        (proj / "some-file.txt").write_text("existing", encoding="utf-8")
    +        result = runner.invoke(app, ["init", str(proj), "--no-github-actions", "--no-example", "--overwrite"])
    +        assert result.exit_code == 0, result.output
    +        assert (proj / ".lfx" / "environments.yaml").exists()
    +
    +    def test_non_empty_dir_without_overwrite_exits_nonzero(self, tmp_path: Path) -> None:
    +        proj = tmp_path / "proj"
    +        proj.mkdir()
    +        (proj / "some-file.txt").write_text("existing", encoding="utf-8")
    +        result = runner.invoke(app, ["init", str(proj), "--no-github-actions"])
    +        assert result.exit_code != 0
    +
    +    def test_default_project_dir_is_current_directory(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
    +        """When no project_dir argument is given the CLI defaults to '.'."""
    +        monkeypatch.chdir(tmp_path)
    +        result = runner.invoke(app, ["init", "--no-github-actions", "--no-example"])
    +        assert result.exit_code == 0, result.output
    +        assert (tmp_path / ".lfx" / "environments.yaml").exists()
    +
    +    def test_init_output_mentions_next_steps(self, tmp_path: Path) -> None:
    +        result = runner.invoke(app, ["init", str(tmp_path / "proj"), "--no-github-actions"])
    +        assert result.exit_code == 0, result.output
    +        assert "Next steps" in result.output
    
  • src/lfx/tests/unit/cli/test_lazy_imports.py+8 6 modified
    @@ -41,16 +41,18 @@ def test_main_module_importable(self):
             assert callable(lfx.__main__.main)
     
         def test_serve_command_wrapper_exists(self):
    -        """Test that serve_command_wrapper is defined."""
    -        from lfx.__main__ import serve_command_wrapper
    +        """Test that serve command is registered on the app."""
    +        from lfx.__main__ import app
     
    -        assert callable(serve_command_wrapper)
    +        command_names = [c.name for c in app.registered_commands]
    +        assert "serve" in command_names
     
         def test_run_command_wrapper_exists(self):
    -        """Test that run_command_wrapper is defined."""
    -        from lfx.__main__ import run_command_wrapper
    +        """Test that run command is registered on the app."""
    +        from lfx.__main__ import app
     
    -        assert callable(run_command_wrapper)
    +        command_names = [c.name for c in app.registered_commands]
    +        assert "run" in command_names
     
     
     class TestRunCommandLazyImports:
    
  • src/lfx/tests/unit/cli/test_login_command.py+517 0 added
    @@ -0,0 +1,517 @@
    +"""Unit tests for lfx login -- login_command and helpers.
    +
    +All tests run entirely in-process; no real Langflow instance or SDK required.
    +The SDK module is replaced wholesale with MagicMock so only the login logic
    +(key masking, connection probing, success/failure output) is under test.
    +"""
    +# pragma: allowlist secret -- all credentials in this file are fake test data
    +
    +from __future__ import annotations
    +
    +from unittest.mock import MagicMock, patch
    +
    +import pytest
    +import typer
    +
    +# ---------------------------------------------------------------------------
    +# Shared constants
    +# ---------------------------------------------------------------------------
    +
    +_BASE_URL = "http://langflow.test"
    +_API_KEY = "abcdefghijklmnop"  # pragma: allowlist secret  (16 chars — longer than 8)
    +_SHORT_KEY = "short"  # pragma: allowlist secret  (5 chars — at or under the 8-char mask threshold)
    +_EXACT_KEY = "exactly8"  # pragma: allowlist secret  (exactly 8 chars)
    +
    +
    +# ---------------------------------------------------------------------------
    +# Fake SDK exception classes
    +# ---------------------------------------------------------------------------
    +
    +
    +class _FakeLangflowAuthError(Exception):
    +    """Stand-in for langflow_sdk.LangflowAuthError in unit tests."""
    +
    +
    +class _FakeLangflowConnectionError(Exception):
    +    """Stand-in for langflow_sdk.LangflowConnectionError in unit tests."""
    +
    +
    +class _FakeLangflowHTTPError(Exception):
    +    """Stand-in for langflow_sdk.LangflowHTTPError in unit tests."""
    +
    +    def __init__(self, status_code: int, detail: str) -> None:
    +        self.status_code = status_code
    +        super().__init__(detail)
    +
    +
    +# A fake exception whose qualname contains "ValidationError" so _probe_connection
    +# treats it as a successful probe.
    +class _FakeValidationError(Exception):
    +    """Simulates a Pydantic ValidationError (qualname contains 'ValidationError')."""
    +
    +
    +_FakeValidationError.__qualname__ = "ModelMetaclass.ValidationError"
    +
    +
    +# ---------------------------------------------------------------------------
    +# SDK / client / env helpers
    +# ---------------------------------------------------------------------------
    +
    +
    +def _make_client_mock(flows: list | None = None) -> MagicMock:
    +    client = MagicMock()
    +    client.list_flows.return_value = flows if flows is not None else [MagicMock(), MagicMock()]
    +    return client
    +
    +
    +def _make_sdk_mock(client_mock: MagicMock | None = None) -> MagicMock:
    +    """Return a mock langflow_sdk module wired up for login tests."""
    +    if client_mock is None:
    +        client_mock = _make_client_mock()
    +    sdk = MagicMock()
    +    sdk.Client.return_value = client_mock
    +    sdk.LangflowAuthError = _FakeLangflowAuthError
    +    sdk.LangflowConnectionError = _FakeLangflowConnectionError
    +    sdk.LangflowHTTPError = _FakeLangflowHTTPError
    +    return sdk
    +
    +
    +def _make_env_cfg(
    +    url: str = _BASE_URL,
    +    api_key: str | None = _API_KEY,
    +    name: str = "staging",
    +) -> MagicMock:
    +    env_cfg = MagicMock()
    +    env_cfg.url = url
    +    env_cfg.api_key = api_key
    +    env_cfg.name = name
    +    return env_cfg
    +
    +
    +def _run_login(
    +    *,
    +    env: str | None = "staging",
    +    environments_file: str | None = None,
    +    target: str | None = None,
    +    api_key: str | None = _API_KEY,
    +    sdk_mock: MagicMock | None = None,
    +    env_cfg: MagicMock | None = None,
    +) -> None:
    +    """Invoke login_command with a mocked SDK and mocked env resolution."""
    +    from lfx.cli.login import login_command
    +
    +    mock = sdk_mock if sdk_mock is not None else _make_sdk_mock()
    +    resolved_env = env_cfg if env_cfg is not None else _make_env_cfg()
    +
    +    with (
    +        patch("lfx.cli.login.load_sdk", return_value=mock),
    +        patch("lfx.config.resolve_environment", return_value=resolved_env),
    +    ):
    +        login_command(
    +            env=env,
    +            environments_file=environments_file,
    +            target=target,
    +            api_key=api_key,
    +        )
    +
    +
    +# ---------------------------------------------------------------------------
    +# _mask_key
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestMaskKey:
    +    def test_long_key_shows_first_8_chars_plus_ellipsis(self):
    +        from lfx.cli.login import _mask_key
    +
    +        result = _mask_key(_API_KEY)
    +        assert result == _API_KEY[:8] + "..."
    +
    +    def test_key_exactly_8_chars_returns_stars(self):
    +        from lfx.cli.login import _mask_key
    +
    +        result = _mask_key(_EXACT_KEY)
    +        assert result == "***"
    +
    +    def test_short_key_returns_stars(self):
    +        from lfx.cli.login import _mask_key
    +
    +        result = _mask_key(_SHORT_KEY)
    +        assert result == "***"
    +
    +    def test_empty_key_returns_stars(self):
    +        from lfx.cli.login import _mask_key
    +
    +        result = _mask_key("")
    +        assert result == "***"
    +
    +    def test_exactly_9_chars_key_shows_first_8_plus_ellipsis(self):
    +        from lfx.cli.login import _mask_key
    +
    +        key = "123456789"  # pragma: allowlist secret
    +        result = _mask_key(key)
    +        assert result == "12345678..."
    +
    +    def test_mask_does_not_reveal_full_key(self):
    +        from lfx.cli.login import _mask_key
    +
    +        key = "supersecretkeyvalue"  # pragma: allowlist secret
    +        result = _mask_key(key)
    +        assert key not in result
    +        assert result.endswith("...")
    +
    +
    +# ---------------------------------------------------------------------------
    +# _probe_connection
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestProbeConnection:
    +    def test_successful_list_flows_returns_ok(self):
    +        from lfx.cli.login import _probe_connection
    +
    +        flows = [MagicMock(), MagicMock(), MagicMock()]
    +        client = _make_client_mock(flows=flows)
    +        sdk = _make_sdk_mock(client_mock=client)
    +        ok, msg, count = _probe_connection(client, sdk)
    +        assert ok is True
    +        assert msg == "OK"
    +        assert count == 3
    +
    +    def test_empty_flow_list_returns_ok_with_zero_count(self):
    +        from lfx.cli.login import _probe_connection
    +
    +        client = _make_client_mock(flows=[])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        ok, _msg, count = _probe_connection(client, sdk)
    +        assert ok is True
    +        assert count == 0
    +
    +    def test_auth_error_returns_auth_message(self):
    +        from lfx.cli.login import _probe_connection
    +
    +        client = _make_client_mock()
    +        client.list_flows.side_effect = _FakeLangflowAuthError("unauthorized")
    +        sdk = _make_sdk_mock(client_mock=client)
    +        ok, msg, count = _probe_connection(client, sdk)
    +        assert ok is False
    +        assert msg == "auth"
    +        assert count == 0
    +
    +    def test_connection_error_returns_connection_message(self):
    +        from lfx.cli.login import _probe_connection
    +
    +        client = _make_client_mock()
    +        client.list_flows.side_effect = _FakeLangflowConnectionError("refused")
    +        sdk = _make_sdk_mock(client_mock=client)
    +        ok, msg, count = _probe_connection(client, sdk)
    +        assert ok is False
    +        assert msg.startswith("connection:")
    +        assert count == 0
    +
    +    def test_http_error_returns_http_message(self):
    +        from lfx.cli.login import _probe_connection
    +
    +        client = _make_client_mock()
    +        client.list_flows.side_effect = _FakeLangflowHTTPError(503, "service unavailable")
    +        sdk = _make_sdk_mock(client_mock=client)
    +        ok, msg, count = _probe_connection(client, sdk)
    +        assert ok is False
    +        assert msg.startswith("http:")
    +        assert count == 0
    +
    +    def test_validation_error_qualname_returns_ok(self):
    +        from lfx.cli.login import _probe_connection
    +
    +        client = _make_client_mock()
    +        client.list_flows.side_effect = _FakeValidationError("schema mismatch")
    +        sdk = _make_sdk_mock(client_mock=client)
    +        ok, msg, count = _probe_connection(client, sdk)
    +        assert ok is True
    +        assert msg == "OK"
    +        assert count == 0
    +
    +    def test_generic_exception_returns_error_message(self):
    +        from lfx.cli.login import _probe_connection
    +
    +        client = _make_client_mock()
    +        client.list_flows.side_effect = RuntimeError("something went wrong")
    +        sdk = _make_sdk_mock(client_mock=client)
    +        ok, msg, count = _probe_connection(client, sdk)
    +        assert ok is False
    +        assert msg.startswith("error:")
    +        assert count == 0
    +
    +    def test_connection_error_message_contains_original_message(self):
    +        from lfx.cli.login import _probe_connection
    +
    +        original_msg = "Connection refused at port 7860"
    +        client = _make_client_mock()
    +        client.list_flows.side_effect = _FakeLangflowConnectionError(original_msg)
    +        sdk = _make_sdk_mock(client_mock=client)
    +        _, msg, _ = _probe_connection(client, sdk)
    +        assert original_msg in msg
    +
    +    def test_flow_count_matches_returned_list_length(self):
    +        from lfx.cli.login import _probe_connection
    +
    +        flows = [MagicMock() for _ in range(5)]
    +        client = _make_client_mock(flows=flows)
    +        sdk = _make_sdk_mock(client_mock=client)
    +        _, _, count = _probe_connection(client, sdk)
    +        assert count == 5
    +
    +
    +# ---------------------------------------------------------------------------
    +# login_command — success
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestLoginCommandSuccess:
    +    def test_successful_probe_does_not_raise(self):
    +        client = _make_client_mock(flows=[MagicMock()])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        _run_login(sdk_mock=sdk, env_cfg=env_cfg)  # Should not raise
    +
    +    def test_successful_probe_calls_client_list_flows(self):
    +        client = _make_client_mock(flows=[MagicMock()])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        _run_login(sdk_mock=sdk, env_cfg=env_cfg)
    +        client.list_flows.assert_called_once_with(page=1, size=1)
    +
    +    def test_client_constructed_with_resolved_credentials(self):
    +        client = _make_client_mock()
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg(url=_BASE_URL, api_key=_API_KEY)
    +        _run_login(sdk_mock=sdk, env_cfg=env_cfg)
    +        sdk.Client.assert_called_once_with(base_url=_BASE_URL, api_key=_API_KEY)
    +
    +    def test_validation_error_probe_succeeds_without_exit(self):
    +        """A ValidationError during list_flows is treated as a successful probe."""
    +        client = _make_client_mock()
    +        client.list_flows.side_effect = _FakeValidationError("schema drift")
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        _run_login(sdk_mock=sdk, env_cfg=env_cfg)  # Should not raise
    +
    +
    +# ---------------------------------------------------------------------------
    +# login_command — auth failure
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestLoginCommandAuthFailure:
    +    def test_auth_error_raises_exit_1(self):
    +        client = _make_client_mock()
    +        client.list_flows.side_effect = _FakeLangflowAuthError("unauthorized")
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_login(sdk_mock=sdk, env_cfg=env_cfg)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_auth_failure_with_api_key_includes_masked_key(self):
    +        """When auth fails and a key is configured, masked key is shown on stderr."""
    +        client = _make_client_mock()
    +        client.list_flows.side_effect = _FakeLangflowAuthError("forbidden")
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg(api_key=_API_KEY)
    +        with pytest.raises(typer.Exit):
    +            _run_login(sdk_mock=sdk, env_cfg=env_cfg)
    +
    +
    +# ---------------------------------------------------------------------------
    +# login_command — connection failure
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestLoginCommandConnectionFailure:
    +    def test_connection_error_raises_exit_1(self):
    +        client = _make_client_mock()
    +        client.list_flows.side_effect = _FakeLangflowConnectionError("timeout")
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_login(sdk_mock=sdk, env_cfg=env_cfg)
    +        assert exc_info.value.exit_code == 1
    +
    +
    +# ---------------------------------------------------------------------------
    +# login_command — HTTP error
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestLoginCommandHTTPError:
    +    def test_http_error_raises_exit_1(self):
    +        client = _make_client_mock()
    +        client.list_flows.side_effect = _FakeLangflowHTTPError(500, "internal server error")
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_login(sdk_mock=sdk, env_cfg=env_cfg)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_http_404_raises_exit_1(self):
    +        client = _make_client_mock()
    +        client.list_flows.side_effect = _FakeLangflowHTTPError(404, "not found")
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_login(sdk_mock=sdk, env_cfg=env_cfg)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_generic_error_raises_exit_1(self):
    +        client = _make_client_mock()
    +        client.list_flows.side_effect = RuntimeError("unexpected")
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg()
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_login(sdk_mock=sdk, env_cfg=env_cfg)
    +        assert exc_info.value.exit_code == 1
    +
    +
    +# ---------------------------------------------------------------------------
    +# login_command — config error
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestLoginCommandConfigError:
    +    def test_config_error_raises_exit_1(self):
    +        from lfx.cli.login import login_command
    +        from lfx.config import ConfigError
    +
    +        sdk = _make_sdk_mock()
    +        with (
    +            patch("lfx.cli.login.load_sdk", return_value=sdk),
    +            patch("lfx.config.resolve_environment", side_effect=ConfigError("no such env")),
    +            pytest.raises(typer.Exit) as exc_info,
    +        ):
    +            login_command(
    +                env="nonexistent",
    +                environments_file=None,
    +                target=None,
    +                api_key=None,
    +            )
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_missing_environments_file_raises_exit_1(self, tmp_path):
    +        from lfx.cli.login import login_command
    +
    +        sdk = _make_sdk_mock()
    +        missing_file = str(tmp_path / "no_such.yaml")
    +        with (
    +            patch("lfx.cli.login.load_sdk", return_value=sdk),
    +            pytest.raises(typer.Exit),
    +        ):
    +            login_command(
    +                env="myenv",
    +                environments_file=missing_file,
    +                target=None,
    +                api_key=None,
    +            )
    +
    +
    +# ---------------------------------------------------------------------------
    +# login_command — no API key
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestLoginCommandNoApiKey:
    +    def test_no_api_key_does_not_exit_early_on_success(self):
    +        """When api_key is None, login still proceeds and succeeds if probe passes."""
    +        client = _make_client_mock(flows=[MagicMock()])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg(api_key=None, name="staging")
    +        _run_login(sdk_mock=sdk, env_cfg=env_cfg)  # Should not raise
    +        client.list_flows.assert_called_once()
    +
    +    def test_no_api_key_inline_env_does_not_exit_early(self):
    +        """__inline__ env with no key still probes without raising."""
    +        client = _make_client_mock(flows=[])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg(api_key=None, name="__inline__")
    +        _run_login(sdk_mock=sdk, env_cfg=env_cfg)  # Should not raise
    +
    +    def test_no_api_key_env_env_does_not_exit_early(self):
    +        """__env__ special name with no key still proceeds."""
    +        client = _make_client_mock(flows=[])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg(api_key=None, name="__env__")
    +        _run_login(sdk_mock=sdk, env_cfg=env_cfg)  # Should not raise
    +
    +    def test_no_api_key_client_still_constructed(self):
    +        """Client is constructed even without an API key."""
    +        client = _make_client_mock()
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg(api_key=None)
    +        _run_login(sdk_mock=sdk, env_cfg=env_cfg)
    +        sdk.Client.assert_called_once_with(base_url=_BASE_URL, api_key=None)
    +
    +
    +# ---------------------------------------------------------------------------
    +# login_command — SDK not installed
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestLoginCommandSdkNotInstalled:
    +    def test_sdk_load_raises_bad_parameter(self):
    +        from lfx.cli.login import login_command
    +
    +        with (
    +            patch(
    +                "lfx.cli.login.load_sdk",
    +                side_effect=typer.BadParameter("langflow-sdk is required for lfx login"),
    +            ),
    +            pytest.raises(typer.BadParameter),
    +        ):
    +            login_command(
    +                env="staging",
    +                environments_file=None,
    +                target=None,
    +                api_key=None,
    +            )
    +
    +    def test_sdk_load_bad_parameter_message(self):
    +        from lfx.cli.login import login_command
    +
    +        with (
    +            patch(
    +                "lfx.cli.login.load_sdk",
    +                side_effect=typer.BadParameter("langflow-sdk is required"),
    +            ),
    +            pytest.raises(typer.BadParameter) as exc_info,
    +        ):
    +            login_command(
    +                env="staging",
    +                environments_file=None,
    +                target=None,
    +                api_key=None,
    +            )
    +        assert "langflow-sdk" in str(exc_info.value)
    +
    +
    +# ---------------------------------------------------------------------------
    +# login_command — inline / env-var environment behaviour
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestLoginCommandInlineEnv:
    +    def test_inline_env_success_does_not_raise(self):
    +        client = _make_client_mock()
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg(name="__inline__")
    +        _run_login(env=None, target=_BASE_URL, api_key=_API_KEY, sdk_mock=sdk, env_cfg=env_cfg)
    +
    +    def test_env_var_env_success_does_not_raise(self):
    +        client = _make_client_mock()
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg(name="__env__")
    +        _run_login(env=None, target=_BASE_URL, api_key=_API_KEY, sdk_mock=sdk, env_cfg=env_cfg)
    +
    +    def test_named_env_success_does_not_raise(self):
    +        client = _make_client_mock()
    +        sdk = _make_sdk_mock(client_mock=client)
    +        env_cfg = _make_env_cfg(name="production")
    +        _run_login(env="production", sdk_mock=sdk, env_cfg=env_cfg)
    
  • src/lfx/tests/unit/cli/test_pull_command.py+883 0 added
    @@ -0,0 +1,883 @@
    +"""Unit tests for lfx pull -- pull_command and helpers.
    +
    +All tests run entirely in-process; no real Langflow instance or SDK required.
    +The SDK module is replaced wholesale with MagicMock so only the pull logic
    +(flow fetching, file writing, project resolution, result rendering)
    +is under test.
    +"""
    +# pragma: allowlist secret -- all credentials in this file are fake test data
    +
    +from __future__ import annotations
    +
    +import json
    +from types import SimpleNamespace
    +from typing import TYPE_CHECKING
    +from unittest.mock import MagicMock, patch
    +from uuid import UUID
    +
    +import pytest
    +import typer
    +
    +if TYPE_CHECKING:
    +    from pathlib import Path
    +
    +# ---------------------------------------------------------------------------
    +# Shared constants
    +# ---------------------------------------------------------------------------
    +
    +_BASE_URL = "http://langflow.test"
    +_API_KEY = "test-key"  # pragma: allowlist secret
    +_FLOW_ID = UUID("aaaaaaaa-0000-0000-0000-000000000001")
    +_FLOW_ID_2 = UUID("aaaaaaaa-0000-0000-0000-000000000002")
    +_PROJECT_ID = UUID("bbbbbbbb-0000-0000-0000-000000000001")
    +
    +_FLOW_DICT: dict = {
    +    "id": str(_FLOW_ID),
    +    "name": "My Flow",
    +    "data": {"nodes": [], "edges": []},
    +}
    +
    +_FLOW_DICT_2: dict = {
    +    "id": str(_FLOW_ID_2),
    +    "name": "Second Flow",
    +    "data": {"nodes": [], "edges": []},
    +}
    +
    +_FLOW_JSON = json.dumps(_FLOW_DICT, indent=2)
    +_FLOW_JSON_2 = json.dumps(_FLOW_DICT_2, indent=2)
    +
    +
    +# ---------------------------------------------------------------------------
    +# Test helpers
    +# ---------------------------------------------------------------------------
    +
    +
    +def _fake_env_config(
    +    url: str = _BASE_URL,
    +    api_key: str = _API_KEY,
    +    name: str = "local",
    +) -> MagicMock:
    +    """Return a MagicMock that looks like a resolved EnvironmentConfig."""
    +    cfg = MagicMock()
    +    cfg.url = url
    +    cfg.api_key = api_key
    +    cfg.name = name
    +    return cfg
    +
    +
    +def _fake_flow_obj(
    +    flow_id: UUID = _FLOW_ID,
    +    name: str = "My Flow",
    +    flow_dict: dict | None = None,
    +) -> MagicMock:
    +    """Return a MagicMock that looks like a langflow_sdk Flow object."""
    +    flow = MagicMock()
    +    flow.id = flow_id
    +    flow.name = name
    +    flow.model_dump.return_value = flow_dict if flow_dict is not None else _FLOW_DICT
    +    return flow
    +
    +
    +def _fake_project(
    +    name: str = "My Project",
    +    project_id: UUID = _PROJECT_ID,
    +    flows: list | None = None,
    +) -> MagicMock:
    +    """Return a MagicMock that looks like a langflow_sdk Project object."""
    +    proj = MagicMock()
    +    proj.name = name
    +    proj.id = project_id
    +    proj.flows = flows if flows is not None else []
    +    return proj
    +
    +
    +def _fake_project_summary(
    +    name: str = "My Project",
    +    project_id: UUID = _PROJECT_ID,
    +) -> SimpleNamespace:
    +    """Return a lightweight project summary like sdk.list_projects() provides."""
    +    return SimpleNamespace(name=name, id=project_id)
    +
    +
    +def _make_client_mock(flows: list | None = None) -> MagicMock:
    +    """Return a mock SDK client pre-configured for common pull scenarios."""
    +    client = MagicMock()
    +    client.list_flows.return_value = flows if flows is not None else []
    +    client.list_projects.return_value = []
    +    return client
    +
    +
    +def _make_sdk_mock(
    +    client_mock: MagicMock | None = None,
    +    flow_json: str | None = None,
    +) -> MagicMock:
    +    """Return a mock langflow_sdk module wired to client_mock."""
    +    if client_mock is None:
    +        client_mock = _make_client_mock()
    +
    +    sdk = MagicMock()
    +    sdk.Client.return_value = client_mock
    +    sdk.normalize_flow.side_effect = lambda d, **_kw: d
    +    sdk.flow_to_json.return_value = flow_json if flow_json is not None else _FLOW_JSON
    +    return sdk
    +
    +
    +def _run_pull(
    +    *,
    +    tmp_path: Path | None = None,  # noqa: ARG001 — kept for call-site readability
    +    env: str | None = None,
    +    output_dir: str | None = None,
    +    flow_id: str | None = None,
    +    project: str | None = None,
    +    project_id: str | None = None,
    +    environments_file: str | None = None,
    +    strip_secrets: bool = False,
    +    indent: int = 2,
    +    sdk_mock: MagicMock | None = None,
    +    env_cfg: MagicMock | None = None,
    +) -> None:
    +    """Invoke pull_command with mocked SDK and env resolution."""
    +    from lfx.cli.pull import pull_command
    +
    +    mock_sdk = sdk_mock if sdk_mock is not None else _make_sdk_mock()
    +    mock_cfg = env_cfg if env_cfg is not None else _fake_env_config()
    +
    +    with (
    +        patch("lfx.cli.pull.load_sdk", return_value=mock_sdk),
    +        patch("lfx.config.resolve_environment", return_value=mock_cfg),
    +    ):
    +        pull_command(
    +            env=env,
    +            output_dir=output_dir,
    +            flow_id=flow_id,
    +            project=project,
    +            project_id=project_id,
    +            environments_file=environments_file,
    +            target=_BASE_URL,
    +            api_key=_API_KEY,
    +            strip_secrets=strip_secrets,
    +            indent=indent,
    +        )
    +
    +
    +# ---------------------------------------------------------------------------
    +# _safe_filename
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestSafeFilename:
    +    def test_alphanumeric_passthrough(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        assert _safe_filename("MyFlow123") == "MyFlow123"
    +
    +    def test_spaces_become_underscores(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        assert _safe_filename("My Flow") == "My_Flow"
    +
    +    def test_multiple_spaces_become_underscores(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        assert _safe_filename("a b c") == "a_b_c"
    +
    +    def test_special_chars_become_underscores(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        result = _safe_filename("flow@version#1!")
    +        assert "@" not in result
    +        assert "#" not in result
    +        assert "!" not in result
    +
    +    def test_leading_whitespace_stripped(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        assert _safe_filename("  My Flow") == "My_Flow"
    +
    +    def test_trailing_whitespace_stripped(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        assert _safe_filename("My Flow  ") == "My_Flow"
    +
    +    def test_both_ends_stripped(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        assert _safe_filename("  My Flow  ") == "My_Flow"
    +
    +    def test_hyphens_preserved(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        assert _safe_filename("my-flow") == "my-flow"
    +
    +    def test_underscores_preserved(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        assert _safe_filename("my_flow") == "my_flow"
    +
    +    def test_empty_string_returns_empty(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        assert _safe_filename("") == ""
    +
    +    def test_slash_becomes_underscore(self):
    +        from lfx.cli.common import safe_filename as _safe_filename
    +
    +        result = _safe_filename("flows/version")
    +        assert "/" not in result
    +
    +
    +# ---------------------------------------------------------------------------
    +# _write_flow helper
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestWriteFlow:
    +    def test_status_created_when_file_does_not_exist(self, tmp_path: Path):
    +        from lfx.cli.pull import _write_flow
    +
    +        flow = _fake_flow_obj()
    +        sdk = _make_sdk_mock()
    +        result = _write_flow(flow, sdk=sdk, dest_dir=tmp_path, strip_secrets=False, indent=2)
    +        assert result.status == "created"
    +
    +    def test_status_updated_when_file_exists_with_different_content(self, tmp_path: Path):
    +        from lfx.cli.pull import _write_flow
    +
    +        flow = _fake_flow_obj()
    +        sdk = _make_sdk_mock(flow_json=_FLOW_JSON)
    +        # Write different content first
    +        (tmp_path / "My_Flow.json").write_text("old content", encoding="utf-8")
    +        result = _write_flow(flow, sdk=sdk, dest_dir=tmp_path, strip_secrets=False, indent=2)
    +        assert result.status == "updated"
    +
    +    def test_status_unchanged_when_file_exists_with_same_content(self, tmp_path: Path):
    +        from lfx.cli.pull import _write_flow
    +
    +        flow = _fake_flow_obj()
    +        sdk = _make_sdk_mock(flow_json=_FLOW_JSON)
    +        # Write identical content first
    +        (tmp_path / "My_Flow.json").write_text(_FLOW_JSON, encoding="utf-8")
    +        result = _write_flow(flow, sdk=sdk, dest_dir=tmp_path, strip_secrets=False, indent=2)
    +        assert result.status == "unchanged"
    +
    +    def test_status_error_when_normalize_raises(self, tmp_path: Path):
    +        from lfx.cli.pull import _write_flow
    +
    +        flow = _fake_flow_obj()
    +        sdk = _make_sdk_mock()
    +        sdk.normalize_flow.side_effect = RuntimeError("normalize failed")
    +        result = _write_flow(flow, sdk=sdk, dest_dir=tmp_path, strip_secrets=False, indent=2)
    +        assert result.status == "error"
    +        assert result.error is not None
    +        assert "normalize failed" in result.error
    +
    +    def test_error_result_is_not_ok(self, tmp_path: Path):
    +        from lfx.cli.pull import _write_flow
    +
    +        flow = _fake_flow_obj()
    +        sdk = _make_sdk_mock()
    +        sdk.normalize_flow.side_effect = RuntimeError("boom")
    +        result = _write_flow(flow, sdk=sdk, dest_dir=tmp_path, strip_secrets=False, indent=2)
    +        assert result.ok is False
    +
    +    def test_written_file_has_correct_content(self, tmp_path: Path):
    +        from lfx.cli.pull import _write_flow
    +
    +        flow = _fake_flow_obj()
    +        expected_content = json.dumps(_FLOW_DICT, indent=4)
    +        sdk = _make_sdk_mock(flow_json=expected_content)
    +        result = _write_flow(flow, sdk=sdk, dest_dir=tmp_path, strip_secrets=False, indent=4)
    +        assert result.path.exists()
    +        assert result.path.read_text(encoding="utf-8") == expected_content
    +
    +    def test_file_not_written_when_unchanged(self, tmp_path: Path):
    +        from lfx.cli.pull import _write_flow
    +
    +        flow = _fake_flow_obj()
    +        sdk = _make_sdk_mock(flow_json=_FLOW_JSON)
    +        out_path = tmp_path / "My_Flow.json"
    +        out_path.write_text(_FLOW_JSON, encoding="utf-8")
    +        mtime_before = out_path.stat().st_mtime
    +        _write_flow(flow, sdk=sdk, dest_dir=tmp_path, strip_secrets=False, indent=2)
    +        assert out_path.stat().st_mtime == mtime_before
    +
    +    def test_result_contains_correct_flow_id(self, tmp_path: Path):
    +        from lfx.cli.pull import _write_flow
    +
    +        flow = _fake_flow_obj(flow_id=_FLOW_ID)
    +        sdk = _make_sdk_mock()
    +        result = _write_flow(flow, sdk=sdk, dest_dir=tmp_path, strip_secrets=False, indent=2)
    +        assert result.flow_id == _FLOW_ID
    +
    +    def test_result_contains_correct_flow_name(self, tmp_path: Path):
    +        from lfx.cli.pull import _write_flow
    +
    +        flow = _fake_flow_obj(name="Special Flow")
    +        sdk = _make_sdk_mock()
    +        result = _write_flow(flow, sdk=sdk, dest_dir=tmp_path, strip_secrets=False, indent=2)
    +        assert result.flow_name == "Special Flow"
    +
    +    def test_normalize_called_with_strip_secrets_true(self, tmp_path: Path):
    +        from lfx.cli.pull import _write_flow
    +
    +        flow = _fake_flow_obj()
    +        sdk = _make_sdk_mock()
    +        _write_flow(flow, sdk=sdk, dest_dir=tmp_path, strip_secrets=True, indent=2)
    +        sdk.normalize_flow.assert_called_once()
    +        call_kwargs = sdk.normalize_flow.call_args.kwargs
    +        assert call_kwargs.get("strip_secrets") is True
    +
    +    def test_normalize_called_with_strip_secrets_false(self, tmp_path: Path):
    +        from lfx.cli.pull import _write_flow
    +
    +        flow = _fake_flow_obj()
    +        sdk = _make_sdk_mock()
    +        _write_flow(flow, sdk=sdk, dest_dir=tmp_path, strip_secrets=False, indent=2)
    +        call_kwargs = sdk.normalize_flow.call_args.kwargs
    +        assert call_kwargs.get("strip_secrets") is False
    +
    +    def test_flow_to_json_called_with_indent(self, tmp_path: Path):
    +        from lfx.cli.pull import _write_flow
    +
    +        flow = _fake_flow_obj()
    +        sdk = _make_sdk_mock()
    +        _write_flow(flow, sdk=sdk, dest_dir=tmp_path, strip_secrets=False, indent=4)
    +        sdk.flow_to_json.assert_called_once()
    +        call_kwargs = sdk.flow_to_json.call_args.kwargs
    +        assert call_kwargs.get("indent") == 4
    +
    +    def test_error_result_has_dummy_path(self, tmp_path: Path):
    +        from lfx.cli.pull import _write_flow
    +
    +        flow = _fake_flow_obj(flow_id=_FLOW_ID)
    +        sdk = _make_sdk_mock()
    +        sdk.normalize_flow.side_effect = RuntimeError("fail")
    +        result = _write_flow(flow, sdk=sdk, dest_dir=tmp_path, strip_secrets=False, indent=2)
    +        assert str(_FLOW_ID) in str(result.path)
    +
    +    def test_logs_debug_when_write_fails(self, tmp_path: Path):
    +        from lfx.cli.pull import _write_flow
    +
    +        flow = _fake_flow_obj(flow_id=_FLOW_ID)
    +        sdk = _make_sdk_mock()
    +        sdk.normalize_flow.side_effect = RuntimeError("fail")
    +
    +        with patch("lfx.cli.pull.logger") as mock_logger:
    +            _write_flow(flow, sdk=sdk, dest_dir=tmp_path, strip_secrets=False, indent=2)
    +
    +        mock_logger.debug.assert_called_once_with("Failed to write flow %s", _FLOW_ID, exc_info=True)
    +
    +
    +# ---------------------------------------------------------------------------
    +# PullResult
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestPullResult:
    +    def test_created_is_ok(self, tmp_path: Path):
    +        from lfx.cli.pull import PullResult
    +
    +        r = PullResult(flow_id=_FLOW_ID, flow_name="F", path=tmp_path / "f.json", status="created")
    +        assert r.ok is True
    +
    +    def test_updated_is_ok(self, tmp_path: Path):
    +        from lfx.cli.pull import PullResult
    +
    +        r = PullResult(flow_id=_FLOW_ID, flow_name="F", path=tmp_path / "f.json", status="updated")
    +        assert r.ok is True
    +
    +    def test_unchanged_is_ok(self, tmp_path: Path):
    +        from lfx.cli.pull import PullResult
    +
    +        r = PullResult(flow_id=_FLOW_ID, flow_name="F", path=tmp_path / "f.json", status="unchanged")
    +        assert r.ok is True
    +
    +    def test_error_is_not_ok(self, tmp_path: Path):
    +        from lfx.cli.pull import PullResult
    +
    +        r = PullResult(flow_id=_FLOW_ID, flow_name="F", path=tmp_path / "f.json", status="error", error="timeout")
    +        assert r.ok is False
    +
    +    def test_error_message_stored(self, tmp_path: Path):
    +        from lfx.cli.pull import PullResult
    +
    +        r = PullResult(flow_id=_FLOW_ID, flow_name="F", path=tmp_path / "f.json", status="error", error="some error")
    +        assert r.error == "some error"
    +
    +
    +# ---------------------------------------------------------------------------
    +# pull_command — single flow by ID
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestPullCommandSingleFlow:
    +    def test_file_is_written_to_output_dir(self, tmp_path: Path):
    +        flow = _fake_flow_obj()
    +        client = _make_client_mock()
    +        client.get_flow.return_value = flow
    +        sdk = _make_sdk_mock(client_mock=client)
    +        _run_pull(tmp_path=tmp_path, flow_id=str(_FLOW_ID), output_dir=str(tmp_path), sdk_mock=sdk)
    +        json_files = list(tmp_path.glob("*.json"))
    +        assert len(json_files) == 1
    +
    +    def test_uses_correct_flow_id_uuid(self, tmp_path: Path):
    +        flow = _fake_flow_obj()
    +        client = _make_client_mock()
    +        client.get_flow.return_value = flow
    +        sdk = _make_sdk_mock(client_mock=client)
    +        _run_pull(tmp_path=tmp_path, flow_id=str(_FLOW_ID), output_dir=str(tmp_path), sdk_mock=sdk)
    +        client.get_flow.assert_called_once_with(_FLOW_ID)
    +
    +    def test_exits_0_on_success(self, tmp_path: Path):
    +        """pull_command should not raise typer.Exit on success."""
    +        flow = _fake_flow_obj()
    +        client = _make_client_mock()
    +        client.get_flow.return_value = flow
    +        sdk = _make_sdk_mock(client_mock=client)
    +        # Should not raise
    +        _run_pull(tmp_path=tmp_path, flow_id=str(_FLOW_ID), output_dir=str(tmp_path), sdk_mock=sdk)
    +
    +    def test_exits_1_if_get_flow_raises(self, tmp_path: Path):
    +        client = _make_client_mock()
    +        client.get_flow.side_effect = RuntimeError("not found")
    +        sdk = _make_sdk_mock(client_mock=client)
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_pull(tmp_path=tmp_path, flow_id=str(_FLOW_ID), output_dir=str(tmp_path), sdk_mock=sdk)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_single_flow_write_error_exits_1(self, tmp_path: Path):
    +        flow = _fake_flow_obj()
    +        client = _make_client_mock()
    +        client.get_flow.return_value = flow
    +        sdk = _make_sdk_mock(client_mock=client)
    +        sdk.normalize_flow.side_effect = RuntimeError("normalize failed")
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_pull(tmp_path=tmp_path, flow_id=str(_FLOW_ID), output_dir=str(tmp_path), sdk_mock=sdk)
    +        assert exc_info.value.exit_code == 1
    +
    +
    +# ---------------------------------------------------------------------------
    +# pull_command — project by name
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestPullCommandProjectByName:
    +    def test_resolves_project_from_list_projects(self, tmp_path: Path):
    +        flow = _fake_flow_obj()
    +        proj = _fake_project(name="My Project", flows=[flow])
    +        client = _make_client_mock()
    +        client.list_projects.return_value = [_fake_project_summary(name="My Project", project_id=proj.id)]
    +        client.get_project.return_value = proj
    +        sdk = _make_sdk_mock(client_mock=client)
    +        _run_pull(tmp_path=tmp_path, project="My Project", output_dir=str(tmp_path), sdk_mock=sdk)
    +        client.list_projects.assert_called_once()
    +        client.get_project.assert_called_once_with(proj.id)
    +
    +    def test_exits_1_if_project_name_not_found(self, tmp_path: Path):
    +        client = _make_client_mock()
    +        client.list_projects.return_value = [_fake_project_summary(name="Other Project")]
    +        sdk = _make_sdk_mock(client_mock=client)
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_pull(tmp_path=tmp_path, project="Nonexistent", output_dir=str(tmp_path), sdk_mock=sdk)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_exits_1_when_no_projects_exist(self, tmp_path: Path):
    +        client = _make_client_mock()
    +        client.list_projects.return_value = []
    +        sdk = _make_sdk_mock(client_mock=client)
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_pull(tmp_path=tmp_path, project="My Project", output_dir=str(tmp_path), sdk_mock=sdk)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_writes_all_flows_in_project(self, tmp_path: Path):
    +        flow1 = _fake_flow_obj(flow_id=_FLOW_ID, name="Flow One")
    +        flow2 = _fake_flow_obj(flow_id=_FLOW_ID_2, name="Flow Two")
    +        proj = _fake_project(name="My Project", flows=[flow1, flow2])
    +        client = _make_client_mock()
    +        client.list_projects.return_value = [_fake_project_summary(name="My Project", project_id=proj.id)]
    +        client.get_project.return_value = proj
    +        sdk = _make_sdk_mock(client_mock=client)
    +        _run_pull(tmp_path=tmp_path, project="My Project", output_dir=str(tmp_path), sdk_mock=sdk)
    +        assert sdk.normalize_flow.call_count == 2
    +
    +    def test_uses_first_matching_project(self, tmp_path: Path):
    +        proj1 = _fake_project(name="My Project", project_id=_PROJECT_ID, flows=[_fake_flow_obj()])
    +        proj2 = _fake_project(
    +            name="My Project",
    +            project_id=UUID("bbbbbbbb-0000-0000-0000-000000000002"),
    +            flows=[],
    +        )
    +        client = _make_client_mock()
    +        client.list_projects.return_value = [
    +            _fake_project_summary(name="My Project", project_id=proj1.id),
    +            _fake_project_summary(name="My Project", project_id=proj2.id),
    +        ]
    +        client.get_project.return_value = proj1
    +        sdk = _make_sdk_mock(client_mock=client)
    +        _run_pull(tmp_path=tmp_path, project="My Project", output_dir=str(tmp_path), sdk_mock=sdk)
    +        # Only flows from proj1 are written (1 flow)
    +        assert sdk.normalize_flow.call_count == 1
    +
    +    def test_exits_1_if_get_project_for_name_raises(self, tmp_path: Path):
    +        client = _make_client_mock()
    +        client.list_projects.return_value = [_fake_project_summary(name="My Project", project_id=_PROJECT_ID)]
    +        client.get_project.side_effect = RuntimeError("boom")
    +        sdk = _make_sdk_mock(client_mock=client)
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_pull(tmp_path=tmp_path, project="My Project", output_dir=str(tmp_path), sdk_mock=sdk)
    +        assert exc_info.value.exit_code == 1
    +
    +
    +# ---------------------------------------------------------------------------
    +# pull_command — project by ID
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestPullCommandProjectById:
    +    def test_uses_get_project_with_uuid(self, tmp_path: Path):
    +        proj = _fake_project(flows=[_fake_flow_obj()])
    +        client = _make_client_mock()
    +        client.get_project.return_value = proj
    +        sdk = _make_sdk_mock(client_mock=client)
    +        _run_pull(
    +            tmp_path=tmp_path,
    +            project_id=str(_PROJECT_ID),
    +            output_dir=str(tmp_path),
    +            sdk_mock=sdk,
    +        )
    +        client.get_project.assert_called_once_with(_PROJECT_ID)
    +
    +    def test_exits_1_if_get_project_raises(self, tmp_path: Path):
    +        client = _make_client_mock()
    +        client.get_project.side_effect = RuntimeError("not found")
    +        sdk = _make_sdk_mock(client_mock=client)
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_pull(
    +                tmp_path=tmp_path,
    +                project_id=str(_PROJECT_ID),
    +                output_dir=str(tmp_path),
    +                sdk_mock=sdk,
    +            )
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_skips_list_projects_when_project_id_given(self, tmp_path: Path):
    +        proj = _fake_project(flows=[_fake_flow_obj()])
    +        client = _make_client_mock()
    +        client.get_project.return_value = proj
    +        sdk = _make_sdk_mock(client_mock=client)
    +        _run_pull(
    +            tmp_path=tmp_path,
    +            project_id=str(_PROJECT_ID),
    +            output_dir=str(tmp_path),
    +            sdk_mock=sdk,
    +        )
    +        client.list_projects.assert_not_called()
    +
    +    def test_writes_flows_from_fetched_project(self, tmp_path: Path):
    +        flow1 = _fake_flow_obj(flow_id=_FLOW_ID, name="Alpha")
    +        flow2 = _fake_flow_obj(flow_id=_FLOW_ID_2, name="Beta")
    +        proj = _fake_project(flows=[flow1, flow2])
    +        client = _make_client_mock()
    +        client.get_project.return_value = proj
    +        sdk = _make_sdk_mock(client_mock=client)
    +        _run_pull(
    +            tmp_path=tmp_path,
    +            project_id=str(_PROJECT_ID),
    +            output_dir=str(tmp_path),
    +            sdk_mock=sdk,
    +        )
    +        assert sdk.normalize_flow.call_count == 2
    +
    +
    +# ---------------------------------------------------------------------------
    +# pull_command — all flows
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestPullCommandAllFlows:
    +    def test_calls_list_flows_with_correct_args(self, tmp_path: Path):
    +        client = _make_client_mock(flows=[_fake_flow_obj()])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        _run_pull(tmp_path=tmp_path, output_dir=str(tmp_path), sdk_mock=sdk)
    +        client.list_flows.assert_called_once_with(get_all=True, remove_example_flows=True)
    +
    +    def test_prints_warning_and_returns_when_no_flows(self, tmp_path: Path):
    +        client = _make_client_mock(flows=[])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        # Should not raise, should return early
    +        _run_pull(tmp_path=tmp_path, output_dir=str(tmp_path), sdk_mock=sdk)
    +        sdk.normalize_flow.assert_not_called()
    +
    +    def test_writes_multiple_flows(self, tmp_path: Path):
    +        flow1 = _fake_flow_obj(flow_id=_FLOW_ID, name="Alpha")
    +        flow2 = _fake_flow_obj(flow_id=_FLOW_ID_2, name="Beta")
    +        client = _make_client_mock(flows=[flow1, flow2])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        _run_pull(tmp_path=tmp_path, output_dir=str(tmp_path), sdk_mock=sdk)
    +        assert sdk.normalize_flow.call_count == 2
    +
    +    def test_exits_1_if_list_flows_raises(self, tmp_path: Path):
    +        client = _make_client_mock()
    +        client.list_flows.side_effect = RuntimeError("connection refused")
    +        sdk = _make_sdk_mock(client_mock=client)
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_pull(tmp_path=tmp_path, output_dir=str(tmp_path), sdk_mock=sdk)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_exits_1_if_any_flow_write_fails(self, tmp_path: Path):
    +        flow1 = _fake_flow_obj(flow_id=_FLOW_ID, name="Good")
    +        flow2 = _fake_flow_obj(flow_id=_FLOW_ID_2, name="Bad")
    +        client = _make_client_mock(flows=[flow1, flow2])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        call_count = 0
    +
    +        def maybe_fail(d, **_kw):
    +            nonlocal call_count
    +            call_count += 1
    +            if call_count == 2:
    +                msg = "write failed"
    +                raise RuntimeError(msg)
    +            return d
    +
    +        sdk.normalize_flow.side_effect = maybe_fail
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_pull(tmp_path=tmp_path, output_dir=str(tmp_path), sdk_mock=sdk)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_all_flows_attempted_even_on_partial_error(self, tmp_path: Path):
    +        flow1 = _fake_flow_obj(flow_id=_FLOW_ID, name="Good")
    +        flow2 = _fake_flow_obj(flow_id=_FLOW_ID_2, name="Bad")
    +        client = _make_client_mock(flows=[flow1, flow2])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        call_count = 0
    +
    +        def maybe_fail(d, **_kw):
    +            nonlocal call_count
    +            call_count += 1
    +            if call_count == 1:
    +                msg = "first fails"
    +                raise RuntimeError(msg)
    +            return d
    +
    +        sdk.normalize_flow.side_effect = maybe_fail
    +        with pytest.raises(typer.Exit):
    +            _run_pull(tmp_path=tmp_path, output_dir=str(tmp_path), sdk_mock=sdk)
    +        assert call_count == 2
    +
    +
    +# ---------------------------------------------------------------------------
    +# pull_command — error handling
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestPullCommandErrorHandling:
    +    def test_config_error_exits_1(self, tmp_path: Path):
    +        from lfx.cli.pull import pull_command
    +        from lfx.config import ConfigError
    +
    +        sdk = _make_sdk_mock()
    +        with (
    +            patch("lfx.cli.pull.load_sdk", return_value=sdk),
    +            patch("lfx.config.resolve_environment", side_effect=ConfigError("bad config")),
    +            pytest.raises(typer.Exit) as exc_info,
    +        ):
    +            pull_command(
    +                env="nonexistent",
    +                output_dir=str(tmp_path),
    +                flow_id=None,
    +                project=None,
    +                project_id=None,
    +                environments_file=None,
    +                target=None,
    +                api_key=None,
    +                strip_secrets=False,
    +                indent=2,
    +            )
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_sdk_not_installed_raises_bad_parameter(self, tmp_path: Path):
    +        from lfx.cli.pull import pull_command
    +
    +        with (
    +            patch("lfx.cli.pull.load_sdk", side_effect=typer.BadParameter("langflow-sdk is required")),
    +            pytest.raises(typer.BadParameter),
    +        ):
    +            pull_command(
    +                env=None,
    +                output_dir=str(tmp_path),
    +                flow_id=None,
    +                project=None,
    +                project_id=None,
    +                environments_file=None,
    +                target=_BASE_URL,
    +                api_key=_API_KEY,
    +                strip_secrets=False,
    +                indent=2,
    +            )
    +
    +    def test_pull_errors_exit_1(self, tmp_path: Path):
    +        flow = _fake_flow_obj()
    +        client = _make_client_mock(flows=[flow])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        sdk.normalize_flow.side_effect = RuntimeError("corrupted")
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_pull(tmp_path=tmp_path, output_dir=str(tmp_path), sdk_mock=sdk)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_client_constructed_with_resolved_url_and_key(self, tmp_path: Path):
    +        flow = _fake_flow_obj()
    +        client = _make_client_mock(flows=[flow])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        cfg = _fake_env_config(url="http://custom.server", api_key="custom-key")  # pragma: allowlist secret
    +        _run_pull(tmp_path=tmp_path, output_dir=str(tmp_path), sdk_mock=sdk, env_cfg=cfg)
    +        sdk.Client.assert_called_once_with(
    +            base_url="http://custom.server",
    +            api_key="custom-key",  # pragma: allowlist secret
    +        )
    +
    +
    +# ---------------------------------------------------------------------------
    +# pull_command — output_dir behaviour
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestPullCommandOutputDir:
    +    def test_defaults_to_flows_dir_when_not_specified(self, tmp_path: Path):
    +        """When output_dir is None the command uses 'flows' as the destination."""
    +        from lfx.cli.pull import pull_command
    +
    +        flow = _fake_flow_obj()
    +        client = _make_client_mock(flows=[flow])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        cfg = _fake_env_config()
    +
    +        import os
    +        from pathlib import Path as _Path
    +
    +        original_cwd = _Path.cwd()
    +        os.chdir(tmp_path)
    +        try:
    +            with (
    +                patch("lfx.cli.pull.load_sdk", return_value=sdk),
    +                patch("lfx.config.resolve_environment", return_value=cfg),
    +            ):
    +                pull_command(
    +                    env=None,
    +                    output_dir=None,
    +                    flow_id=None,
    +                    project=None,
    +                    project_id=None,
    +                    environments_file=None,
    +                    target=_BASE_URL,
    +                    api_key=_API_KEY,
    +                    strip_secrets=False,
    +                    indent=2,
    +                )
    +            flows_dir = tmp_path / "flows"
    +            assert flows_dir.exists()
    +        finally:
    +            os.chdir(original_cwd)
    +
    +    def test_creates_output_dir_if_not_exists(self, tmp_path: Path):
    +        flow = _fake_flow_obj()
    +        client = _make_client_mock(flows=[flow])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        new_dir = tmp_path / "brand_new_dir"
    +        assert not new_dir.exists()
    +        _run_pull(tmp_path=tmp_path, output_dir=str(new_dir), sdk_mock=sdk)
    +        assert new_dir.exists()
    +
    +    def test_uses_custom_dir_when_specified(self, tmp_path: Path):
    +        flow = _fake_flow_obj()
    +        client = _make_client_mock(flows=[flow])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        custom_dir = tmp_path / "my_custom_flows"
    +        _run_pull(tmp_path=tmp_path, output_dir=str(custom_dir), sdk_mock=sdk)
    +        assert custom_dir.exists()
    +        json_files = list(custom_dir.glob("*.json"))
    +        assert len(json_files) == 1
    +
    +    def test_creates_nested_output_dir(self, tmp_path: Path):
    +        flow = _fake_flow_obj()
    +        client = _make_client_mock(flows=[flow])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        nested_dir = tmp_path / "a" / "b" / "c"
    +        _run_pull(tmp_path=tmp_path, output_dir=str(nested_dir), sdk_mock=sdk)
    +        assert nested_dir.exists()
    +
    +    def test_uses_existing_output_dir(self, tmp_path: Path):
    +        flow = _fake_flow_obj()
    +        client = _make_client_mock(flows=[flow])
    +        sdk = _make_sdk_mock(client_mock=client)
    +        existing_dir = tmp_path / "existing"
    +        existing_dir.mkdir()
    +        _run_pull(tmp_path=tmp_path, output_dir=str(existing_dir), sdk_mock=sdk)
    +        json_files = list(existing_dir.glob("*.json"))
    +        assert len(json_files) == 1
    +
    +
    +# ---------------------------------------------------------------------------
    +# _render_results
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestRenderResults:
    +    def test_does_not_crash_with_mixed_statuses(self, tmp_path: Path):
    +        from lfx.cli.pull import PullResult, _render_results
    +
    +        results = [
    +            PullResult(flow_id=_FLOW_ID, flow_name="A", path=tmp_path / "a.json", status="created"),
    +            PullResult(flow_id=_FLOW_ID_2, flow_name="B", path=tmp_path / "b.json", status="updated"),
    +            PullResult(
    +                flow_id=UUID("cccccccc-0000-0000-0000-000000000001"),
    +                flow_name="C",
    +                path=tmp_path / "c.json",
    +                status="unchanged",
    +            ),
    +            PullResult(
    +                flow_id=UUID("dddddddd-0000-0000-0000-000000000001"),
    +                flow_name="D",
    +                path=tmp_path / "d.json",
    +                status="error",
    +                error="something went wrong",
    +            ),
    +        ]
    +        # Should not raise
    +        _render_results(results)
    +
    +    def test_does_not_crash_with_empty_results(self):
    +        from lfx.cli.pull import _render_results
    +
    +        _render_results([])
    +
    +    def test_does_not_crash_with_all_unchanged(self, tmp_path: Path):
    +        from lfx.cli.pull import PullResult, _render_results
    +
    +        results = [
    +            PullResult(flow_id=_FLOW_ID, flow_name="A", path=tmp_path / "a.json", status="unchanged"),
    +            PullResult(flow_id=_FLOW_ID_2, flow_name="B", path=tmp_path / "b.json", status="unchanged"),
    +        ]
    +        _render_results(results)
    +
    +    def test_does_not_crash_with_all_errors(self, tmp_path: Path):
    +        from lfx.cli.pull import PullResult, _render_results
    +
    +        results = [
    +            PullResult(
    +                flow_id=_FLOW_ID,
    +                flow_name="A",
    +                path=tmp_path / "a.json",
    +                status="error",
    +                error="network error",
    +            ),
    +        ]
    +        _render_results(results)
    +
    +    def test_does_not_crash_with_single_created(self, tmp_path: Path):
    +        from lfx.cli.pull import PullResult, _render_results
    +
    +        results = [
    +            PullResult(flow_id=_FLOW_ID, flow_name="My Flow", path=tmp_path / "my_flow.json", status="created"),
    +        ]
    +        _render_results(results)
    
  • src/lfx/tests/unit/cli/test_push_command.py+662 0 added
    @@ -0,0 +1,662 @@
    +"""Unit tests for lfx push -- push_command and helpers.
    +
    +All tests run entirely in-process; no real Langflow instance or SDK required.
    +The SDK module is replaced wholesale with MagicMock so only the push logic
    +(file loading, upsert routing, dry-run, project resolution, result rendering)
    +is under test.
    +"""
    +# pragma: allowlist secret -- all credentials in this file are fake test data
    +
    +from __future__ import annotations
    +
    +import json
    +from typing import TYPE_CHECKING
    +from unittest.mock import MagicMock, patch
    +from uuid import UUID
    +
    +import pytest
    +import typer
    +
    +if TYPE_CHECKING:
    +    from pathlib import Path
    +
    +# ---------------------------------------------------------------------------
    +# Shared constants
    +# ---------------------------------------------------------------------------
    +
    +_BASE_URL = "http://langflow.test"
    +_API_KEY = "test-key"  # pragma: allowlist secret
    +_FLOW_ID = UUID("aaaaaaaa-0000-0000-0000-000000000001")
    +_FLOW_ID_2 = UUID("aaaaaaaa-0000-0000-0000-000000000002")
    +_PROJECT_ID = UUID("bbbbbbbb-0000-0000-0000-000000000001")
    +
    +_FLOW_DICT: dict = {
    +    "id": str(_FLOW_ID),
    +    "name": "My Test Flow",
    +    "data": {"nodes": [], "edges": []},
    +}
    +
    +_FLOW_DICT_2: dict = {
    +    "id": str(_FLOW_ID_2),
    +    "name": "Second Flow",
    +    "data": {"nodes": [], "edges": []},
    +}
    +
    +
    +# ---------------------------------------------------------------------------
    +# Fake exception class — avoids importing langflow_sdk in isolation mode
    +# ---------------------------------------------------------------------------
    +
    +
    +class _FakeLangflowHTTPError(Exception):
    +    """Stand-in for langflow_sdk.LangflowHTTPError in unit tests."""
    +
    +    def __init__(self, status_code: int, detail: str) -> None:
    +        self.status_code = status_code
    +        super().__init__(detail)
    +
    +
    +# ---------------------------------------------------------------------------
    +# Test helpers
    +# ---------------------------------------------------------------------------
    +
    +
    +def _write_flow(tmp_path: Path, name: str, flow: dict | None = None) -> Path:
    +    p = tmp_path / name
    +    p.parent.mkdir(parents=True, exist_ok=True)
    +    p.write_text(json.dumps(flow if flow is not None else _FLOW_DICT), encoding="utf-8")
    +    return p
    +
    +
    +def _fake_project(name: str = "My Project", project_id: UUID = _PROJECT_ID) -> MagicMock:
    +    """Return a MagicMock that looks like a langflow_sdk.models.Project."""
    +    proj = MagicMock()
    +    proj.name = name
    +    proj.id = project_id
    +    return proj
    +
    +
    +def _fake_flow(flow_id: UUID = _FLOW_ID, name: str = "My Test Flow") -> MagicMock:
    +    """Return a MagicMock that looks like a langflow_sdk.models.Flow."""
    +    flow = MagicMock()
    +    flow.id = flow_id
    +    flow.name = name
    +    return flow
    +
    +
    +def _make_client_mock(*, create: bool = True) -> MagicMock:
    +    """Return a mock SDK client whose upsert_flow returns (flow, created)."""
    +    client = MagicMock()
    +    client.upsert_flow.return_value = (_fake_flow(), create)
    +    client.list_projects.return_value = []
    +    client.create_project.return_value = _fake_project()
    +    return client
    +
    +
    +def _make_sdk_mock(client_mock: MagicMock | None = None) -> MagicMock:
    +    """Return a mock langflow_sdk module wired to client_mock.
    +
    +    SDK exception and model types are replaced with lightweight fakes so
    +    the test file has zero imports from langflow_sdk.
    +    """
    +    if client_mock is None:
    +        client_mock = _make_client_mock()
    +
    +    sdk = MagicMock()
    +    sdk.Client.return_value = client_mock
    +    sdk.LangflowHTTPError = _FakeLangflowHTTPError
    +    # FlowCreate / ProjectCreate stay as MagicMock callables — push.py calls
    +    # them as constructors and passes the result to client.upsert_flow /
    +    # client.create_project.  We check the constructor call kwargs in tests.
    +    return sdk
    +
    +
    +def _run_push(
    +    flow_paths: list[str],
    +    *,
    +    dir_path: str | None = None,
    +    project: str | None = None,
    +    project_id: str | None = None,
    +    environments_file: str | None = None,
    +    env: str | None = None,
    +    dry_run: bool = False,
    +    normalize: bool = False,
    +    strip_secrets: bool = False,
    +    sdk_mock: MagicMock | None = None,
    +) -> None:
    +    """Invoke push_command with mocked SDK, using --target for inline env resolution."""
    +    from lfx.cli.push import push_command
    +
    +    mock = sdk_mock if sdk_mock is not None else _make_sdk_mock()
    +    with patch("lfx.cli.push.load_sdk", return_value=mock):
    +        push_command(
    +            flow_paths=flow_paths,
    +            env=env,
    +            dir_path=dir_path,
    +            project=project,
    +            project_id=project_id,
    +            environments_file=environments_file,
    +            target=_BASE_URL,
    +            api_key=_API_KEY,
    +            dry_run=dry_run,
    +            normalize=normalize,
    +            strip_secrets=strip_secrets,
    +        )
    +
    +
    +# ---------------------------------------------------------------------------
    +# PushResult
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestPushResult:
    +    def test_created_is_ok(self, tmp_path):
    +        from lfx.cli.push import PushResult
    +
    +        r = PushResult(path=tmp_path / "f.json", flow_id=_FLOW_ID, flow_name="F", status="created")
    +        assert r.ok is True
    +
    +    def test_updated_is_ok(self, tmp_path):
    +        from lfx.cli.push import PushResult
    +
    +        r = PushResult(path=tmp_path / "f.json", flow_id=_FLOW_ID, flow_name="F", status="updated")
    +        assert r.ok is True
    +
    +    def test_dry_run_is_ok(self, tmp_path):
    +        from lfx.cli.push import PushResult
    +
    +        r = PushResult(path=tmp_path / "f.json", flow_id=_FLOW_ID, flow_name="F", status="dry-run")
    +        assert r.ok is True
    +
    +    def test_error_is_not_ok(self, tmp_path):
    +        from lfx.cli.push import PushResult
    +
    +        r = PushResult(path=tmp_path / "f.json", flow_id=_FLOW_ID, flow_name="F", status="error", error="timeout")
    +        assert r.ok is False
    +
    +    def test_error_message_stored(self, tmp_path):
    +        from lfx.cli.push import PushResult
    +
    +        r = PushResult(path=tmp_path / "f.json", flow_id=_FLOW_ID, flow_name="F", status="error", error="some error")
    +        assert r.error == "some error"
    +
    +
    +# ---------------------------------------------------------------------------
    +# _load_flow_file
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestLoadFlowFile:
    +    def test_valid_json_returns_dict(self, tmp_path):
    +        from lfx.cli.push import _load_flow_file
    +
    +        p = _write_flow(tmp_path, "flow.json")
    +        result = _load_flow_file(p)
    +        assert result["id"] == str(_FLOW_ID)
    +        assert result["name"] == "My Test Flow"
    +
    +    def test_invalid_json_raises_exit(self, tmp_path):
    +        from lfx.cli.push import _load_flow_file
    +
    +        p = tmp_path / "bad.json"
    +        p.write_text("not valid json", encoding="utf-8")
    +        with pytest.raises(typer.Exit):
    +            _load_flow_file(p)
    +
    +    def test_missing_file_raises_exit(self, tmp_path):
    +        from lfx.cli.push import _load_flow_file
    +
    +        with pytest.raises(typer.Exit):
    +            _load_flow_file(tmp_path / "nonexistent.json")
    +
    +
    +# ---------------------------------------------------------------------------
    +# _extract_flow_id
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestExtractFlowId:
    +    def test_valid_uuid_returned(self, tmp_path):
    +        from lfx.cli.push import _extract_flow_id
    +
    +        path = tmp_path / "f.json"
    +        result = _extract_flow_id({"id": str(_FLOW_ID)}, path)
    +        assert result == _FLOW_ID
    +
    +    def test_missing_id_raises_exit(self, tmp_path):
    +        from lfx.cli.push import _extract_flow_id
    +
    +        with pytest.raises(typer.Exit):
    +            _extract_flow_id({}, tmp_path / "f.json")
    +
    +    def test_null_id_raises_exit(self, tmp_path):
    +        from lfx.cli.push import _extract_flow_id
    +
    +        with pytest.raises(typer.Exit):
    +            _extract_flow_id({"id": None}, tmp_path / "f.json")
    +
    +    def test_invalid_uuid_raises_exit(self, tmp_path):
    +        from lfx.cli.push import _extract_flow_id
    +
    +        with pytest.raises(typer.Exit):
    +            _extract_flow_id({"id": "not-a-uuid"}, tmp_path / "f.json")
    +
    +    def test_uuid_type_returned(self, tmp_path):
    +        from lfx.cli.push import _extract_flow_id
    +
    +        path = tmp_path / "f.json"
    +        result = _extract_flow_id({"id": "aaaaaaaa-0000-0000-0000-000000000001"}, path)
    +        assert isinstance(result, UUID)
    +
    +
    +# ---------------------------------------------------------------------------
    +# _collect_flow_files
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestCollectFlowFiles:
    +    def test_single_path_returned(self, tmp_path):
    +        from lfx.cli.push import _collect_flow_files
    +
    +        p = _write_flow(tmp_path, "a.json")
    +        result = _collect_flow_files([str(p)], None)
    +        assert result == [p]
    +
    +    def test_multiple_paths_returned(self, tmp_path):
    +        from lfx.cli.push import _collect_flow_files
    +
    +        p1 = _write_flow(tmp_path, "a.json")
    +        p2 = _write_flow(tmp_path, "b.json", _FLOW_DICT_2)
    +        result = _collect_flow_files([str(p1), str(p2)], None)
    +        assert set(result) == {p1, p2}
    +
    +    def test_dir_finds_all_json(self, tmp_path):
    +        from lfx.cli.push import _collect_flow_files
    +
    +        d = tmp_path / "flows"
    +        d.mkdir()
    +        (d / "a.json").write_text("{}", encoding="utf-8")
    +        (d / "b.json").write_text("{}", encoding="utf-8")
    +        (d / "notes.txt").write_text("ignore", encoding="utf-8")
    +        result = _collect_flow_files([], str(d))
    +        assert len(result) == 2
    +        assert all(p.suffix == ".json" for p in result)
    +
    +    def test_dir_and_paths_combined(self, tmp_path):
    +        from lfx.cli.push import _collect_flow_files
    +
    +        d = tmp_path / "flows"
    +        d.mkdir()
    +        dir_flow = d / "dir.json"
    +        dir_flow.write_text("{}", encoding="utf-8")
    +        extra = _write_flow(tmp_path, "extra.json")
    +        result = _collect_flow_files([str(extra)], str(d))
    +        assert dir_flow in result
    +        assert extra in result
    +
    +    def test_missing_path_raises_exit(self, tmp_path):
    +        from lfx.cli.push import _collect_flow_files
    +
    +        with pytest.raises(typer.Exit):
    +            _collect_flow_files([str(tmp_path / "missing.json")], None)
    +
    +    def test_dir_is_file_raises_exit(self, tmp_path):
    +        from lfx.cli.push import _collect_flow_files
    +
    +        p = _write_flow(tmp_path, "a.json")
    +        with pytest.raises(typer.Exit):
    +            _collect_flow_files([], str(p))  # file passed as dir
    +
    +    def test_empty_dir_returns_empty_list(self, tmp_path):
    +        from lfx.cli.push import _collect_flow_files
    +
    +        d = tmp_path / "empty"
    +        d.mkdir()
    +        result = _collect_flow_files([], str(d))
    +        assert result == []
    +
    +
    +# ---------------------------------------------------------------------------
    +# _upsert_single
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestUpsertSingle:
    +    def _flow_create_mock(self) -> MagicMock:
    +        return MagicMock()
    +
    +    def test_dry_run_returns_dry_run_status(self, tmp_path):
    +        from lfx.cli.push import _upsert_single
    +
    +        client = MagicMock()
    +        sdk = _make_sdk_mock(client)
    +        result = _upsert_single(
    +            client,
    +            sdk,
    +            tmp_path / "f.json",
    +            _FLOW_ID,
    +            self._flow_create_mock(),
    +            dry_run=True,
    +            flow_name="T",
    +            base_url="http://test",
    +        )
    +        assert result.status == "dry-run"
    +        assert result.ok is True
    +        client.upsert_flow.assert_not_called()
    +
    +    def test_create_returns_created_status(self, tmp_path):
    +        from lfx.cli.push import _upsert_single
    +
    +        client = _make_client_mock(create=True)
    +        sdk = _make_sdk_mock(client)
    +        result = _upsert_single(
    +            client,
    +            sdk,
    +            tmp_path / "f.json",
    +            _FLOW_ID,
    +            self._flow_create_mock(),
    +            dry_run=False,
    +            flow_name="T",
    +            base_url="http://test",
    +        )
    +        assert result.status == "created"
    +        assert result.ok is True
    +
    +    def test_update_returns_updated_status(self, tmp_path):
    +        from lfx.cli.push import _upsert_single
    +
    +        client = _make_client_mock(create=False)
    +        sdk = _make_sdk_mock(client)
    +        result = _upsert_single(
    +            client,
    +            sdk,
    +            tmp_path / "f.json",
    +            _FLOW_ID,
    +            self._flow_create_mock(),
    +            dry_run=False,
    +            flow_name="T",
    +            base_url="http://test",
    +        )
    +        assert result.status == "updated"
    +        assert result.ok is True
    +
    +    def test_http_error_returns_error_status(self, tmp_path):
    +        from lfx.cli.push import _upsert_single
    +
    +        client = _make_client_mock()
    +        client.upsert_flow.side_effect = _FakeLangflowHTTPError(500, "server error")
    +        sdk = _make_sdk_mock(client)
    +        result = _upsert_single(
    +            client,
    +            sdk,
    +            tmp_path / "f.json",
    +            _FLOW_ID,
    +            self._flow_create_mock(),
    +            dry_run=False,
    +            flow_name="T",
    +            base_url="http://test",
    +        )
    +        assert result.status == "error"
    +        assert result.error is not None
    +        assert result.ok is False
    +
    +    def test_upsert_called_with_correct_flow_id(self, tmp_path):
    +        from lfx.cli.push import _upsert_single
    +
    +        client = _make_client_mock(create=True)
    +        sdk = _make_sdk_mock(client)
    +        fc = self._flow_create_mock()
    +        _upsert_single(
    +            client, sdk, tmp_path / "f.json", _FLOW_ID, fc, dry_run=False, flow_name="T", base_url="http://test"
    +        )
    +        client.upsert_flow.assert_called_once_with(_FLOW_ID, fc)
    +
    +
    +# ---------------------------------------------------------------------------
    +# _find_or_create_project
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestFindOrCreateProject:
    +    def test_found_project_returns_id(self):
    +        from lfx.cli.push import _find_or_create_project
    +
    +        client = MagicMock()
    +        client.list_projects.return_value = [_fake_project(name="MyProj")]
    +        sdk = _make_sdk_mock(client)
    +        result = _find_or_create_project(client, sdk, "MyProj", dry_run=False)
    +        assert result == _PROJECT_ID
    +        client.create_project.assert_not_called()
    +
    +    def test_not_found_creates_and_returns_id(self):
    +        from lfx.cli.push import _find_or_create_project
    +
    +        client = MagicMock()
    +        client.list_projects.return_value = []
    +        client.create_project.return_value = _fake_project(name="NewProj")
    +        sdk = _make_sdk_mock(client)
    +        result = _find_or_create_project(client, sdk, "NewProj", dry_run=False)
    +        assert result == _PROJECT_ID
    +        client.create_project.assert_called_once()
    +
    +    def test_dry_run_returns_none_without_creating(self):
    +        from lfx.cli.push import _find_or_create_project
    +
    +        client = MagicMock()
    +        client.list_projects.return_value = []
    +        sdk = _make_sdk_mock(client)
    +        result = _find_or_create_project(client, sdk, "Ghost", dry_run=True)
    +        assert result is None
    +        client.create_project.assert_not_called()
    +
    +    def test_dry_run_existing_project_returns_id(self):
    +        from lfx.cli.push import _find_or_create_project
    +
    +        client = MagicMock()
    +        client.list_projects.return_value = [_fake_project(name="Found")]
    +        sdk = _make_sdk_mock(client)
    +        result = _find_or_create_project(client, sdk, "Found", dry_run=True)
    +        assert result == _PROJECT_ID
    +
    +
    +# ---------------------------------------------------------------------------
    +# push_command — integration (mocked SDK)
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestPushCommand:
    +    def test_single_file_create(self, tmp_path):
    +        """Single flow JSON → upsert called once → created."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        client = _make_client_mock(create=True)
    +        sdk = _make_sdk_mock(client)
    +        _run_push([str(p)], sdk_mock=sdk)
    +        client.upsert_flow.assert_called_once()
    +        flow_id_arg = client.upsert_flow.call_args[0][0]
    +        assert flow_id_arg == _FLOW_ID
    +
    +    def test_single_file_update(self, tmp_path):
    +        """Second push → update (200)."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        client = _make_client_mock(create=False)
    +        sdk = _make_sdk_mock(client)
    +        _run_push([str(p)], sdk_mock=sdk)
    +        client.upsert_flow.assert_called_once()
    +
    +    def test_directory_pushes_all_json_files(self, tmp_path):
    +        """--dir pushes every *.json in the directory."""
    +        d = tmp_path / "flows"
    +        d.mkdir()
    +        (d / "a.json").write_text(json.dumps(_FLOW_DICT), encoding="utf-8")
    +        (d / "b.json").write_text(json.dumps(_FLOW_DICT_2), encoding="utf-8")
    +        (d / "readme.txt").write_text("ignore", encoding="utf-8")
    +
    +        client = _make_client_mock(create=True)
    +        client.upsert_flow.side_effect = [
    +            (_fake_flow(_FLOW_ID), True),
    +            (_fake_flow(_FLOW_ID_2, "Second Flow"), True),
    +        ]
    +        sdk = _make_sdk_mock(client)
    +        _run_push([], dir_path=str(d), sdk_mock=sdk)
    +        assert client.upsert_flow.call_count == 2
    +
    +    def test_dry_run_makes_no_upsert_calls(self, tmp_path):
    +        """Dry-run: upsert_flow is never called."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        client = _make_client_mock()
    +        sdk = _make_sdk_mock(client)
    +        _run_push([str(p)], dry_run=True, sdk_mock=sdk)
    +        client.upsert_flow.assert_not_called()
    +
    +    def test_dry_run_creates_client_but_not_upsert(self, tmp_path):
    +        """In dry-run mode the SDK Client is constructed (for project lookups).
    +
    +        upsert_flow is never called.
    +        """
    +        p = _write_flow(tmp_path, "flow.json")
    +        client = _make_client_mock()
    +        sdk = _make_sdk_mock(client)
    +        _run_push([str(p)], dry_run=True, sdk_mock=sdk)
    +        sdk.Client.assert_called_once()
    +        client.upsert_flow.assert_not_called()
    +
    +    def test_no_flow_files_exits(self):
    +        """No paths and no --dir → Exit(1)."""
    +        with pytest.raises(typer.Exit):
    +            _run_push([])
    +
    +    def test_flow_missing_id_exits(self, tmp_path):
    +        """JSON without 'id' field → Exit(1)."""
    +        p = tmp_path / "no_id.json"
    +        p.write_text(json.dumps({"name": "Orphan", "data": {}}), encoding="utf-8")
    +        with pytest.raises(typer.Exit):
    +            _run_push([str(p)])
    +
    +    def test_http_error_exits_with_code_1(self, tmp_path):
    +        """HTTP failure on any flow → Exit(1) after all flows attempted."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        client = _make_client_mock()
    +        client.upsert_flow.side_effect = _FakeLangflowHTTPError(500, "oops")
    +        sdk = _make_sdk_mock(client)
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_push([str(p)], sdk_mock=sdk)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_with_project_name_resolves_folder_id(self, tmp_path):
    +        """--project triggers lookup; resolved folder_id passed to FlowCreate."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        client = _make_client_mock(create=True)
    +        client.list_projects.return_value = [_fake_project(name="My Proj")]
    +        sdk = _make_sdk_mock(client)
    +        _run_push([str(p)], project="My Proj", sdk_mock=sdk)
    +        client.list_projects.assert_called_once()
    +        fc_kwargs = sdk.FlowCreate.call_args.kwargs
    +        assert fc_kwargs["folder_id"] == _PROJECT_ID
    +
    +    def test_with_project_name_creates_if_not_found(self, tmp_path):
    +        """--project creates the remote project when it doesn't exist."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        client = _make_client_mock(create=True)
    +        client.list_projects.return_value = []
    +        sdk = _make_sdk_mock(client)
    +        _run_push([str(p)], project="New Proj", sdk_mock=sdk)
    +        client.create_project.assert_called_once()
    +
    +    def test_with_project_id_skips_lookup(self, tmp_path):
    +        """--project-id bypasses list_projects entirely."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        client = _make_client_mock(create=True)
    +        sdk = _make_sdk_mock(client)
    +        _run_push([str(p)], project_id=str(_PROJECT_ID), sdk_mock=sdk)
    +        client.list_projects.assert_not_called()
    +        fc_kwargs = sdk.FlowCreate.call_args.kwargs
    +        assert fc_kwargs["folder_id"] == _PROJECT_ID
    +
    +    def test_normalize_calls_sdk_normalize_flow(self, tmp_path):
    +        """--normalize invokes sdk.normalize_flow before upsert."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        client = _make_client_mock(create=True)
    +        sdk = _make_sdk_mock(client)
    +        sdk.normalize_flow.return_value = _FLOW_DICT
    +        _run_push([str(p)], normalize=True, sdk_mock=sdk)
    +        sdk.normalize_flow.assert_called_once()
    +
    +    def test_no_normalize_skips_sdk_normalize(self, tmp_path):
    +        """With normalize=False the SDK normalize_flow is never called."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        client = _make_client_mock(create=True)
    +        sdk = _make_sdk_mock(client)
    +        _run_push([str(p)], normalize=False, sdk_mock=sdk)
    +        sdk.normalize_flow.assert_not_called()
    +
    +    def test_partial_error_still_attempts_all_files(self, tmp_path):
    +        """An error on one flow does not abort the remaining pushes."""
    +        p1 = _write_flow(tmp_path, "ok.json")
    +        p2 = _write_flow(tmp_path, "bad.json", _FLOW_DICT_2)
    +        client = _make_client_mock()
    +        client.upsert_flow.side_effect = [
    +            (_fake_flow(), True),
    +            _FakeLangflowHTTPError(500, "server down"),
    +        ]
    +        sdk = _make_sdk_mock(client)
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_push([str(p1), str(p2)], sdk_mock=sdk)
    +        assert exc_info.value.exit_code == 1
    +        assert client.upsert_flow.call_count == 2  # both files were attempted
    +
    +    def test_client_constructed_with_resolved_url_and_key(self, tmp_path):
    +        """SDK Client is constructed with the inline URL and API key."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        client = _make_client_mock()
    +        sdk = _make_sdk_mock(client)
    +        _run_push([str(p)], sdk_mock=sdk)
    +        sdk.Client.assert_called_once_with(base_url=_BASE_URL, api_key=_API_KEY)
    +
    +    def test_environments_file_resolves_url(self, tmp_path):
    +        """Environment is resolved from a TOML config file (no --target)."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        env_file = tmp_path / "langflow-environments.toml"
    +        env_file.write_text(
    +            f'[environments.ci]\nurl = "{_BASE_URL}"\n',
    +            encoding="utf-8",
    +        )
    +        from lfx.cli.push import push_command
    +
    +        client = _make_client_mock(create=True)
    +        sdk = _make_sdk_mock(client)
    +        with patch("lfx.cli.push.load_sdk", return_value=sdk):
    +            push_command(
    +                flow_paths=[str(p)],
    +                env="ci",
    +                dir_path=None,
    +                project=None,
    +                project_id=None,
    +                environments_file=str(env_file),
    +                target=None,
    +                api_key=None,
    +                dry_run=False,
    +                normalize=False,
    +                strip_secrets=False,
    +            )
    +        sdk.Client.assert_called_once_with(base_url=_BASE_URL, api_key=None)
    +
    +    def test_missing_environments_file_exits(self, tmp_path):
    +        """A non-existent --environments-file → ConfigError → Exit(1)."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        from lfx.cli.push import push_command
    +
    +        sdk = _make_sdk_mock()
    +        with patch("lfx.cli.push.load_sdk", return_value=sdk), pytest.raises(typer.Exit):
    +            push_command(
    +                flow_paths=[str(p)],
    +                env="missing",
    +                dir_path=None,
    +                project=None,
    +                project_id=None,
    +                environments_file=str(tmp_path / "missing.yaml"),
    +                target=None,
    +                api_key=None,
    +                dry_run=False,
    +                normalize=False,
    +                strip_secrets=False,
    +            )
    
  • src/lfx/tests/unit/cli/test_serve_simple.py+56 55 modified
    @@ -6,20 +6,30 @@
     from pathlib import Path
     from unittest.mock import patch
     
    +import pytest
     from typer.testing import CliRunner
     
    +# Tests that invoke `serve` with valid-looking input may hang while langflow
    +# initialises async server infrastructure.  Skip those in CI only; all
    +# import/help/utility/fast-error tests run everywhere.
    +_ci_env = os.environ.get("CI", "")
    +_is_ci = _ci_env.lower() in {"1", "true", "yes"}
    +_skip_in_ci = pytest.mark.skipif(
    +    _is_ci,
    +    reason="serve startup hangs in CI — pending root-cause fix",
    +)
    +
     
     def test_cli_imports():
         """Test that we can import the CLI components."""
    -    # These imports should work without errors
         from lfx.__main__ import app, main
     
         assert main is not None
         assert app is not None
     
     
     def test_serve_command_help():
    -    """Test that serve command shows help."""
    +    """Test that serve command shows help without starting a server."""
         from lfx.__main__ import app
     
         runner = CliRunner()
    @@ -29,96 +39,87 @@ def test_serve_command_help():
         assert "Serve a flow as an API" in result.output
     
     
    -def test_serve_command_missing_api_key():
    -    """Test that serve command fails without API key."""
    -    from lfx.__main__ import app
    -
    -    # Create a temporary JSON flow file
    -    flow_data = {
    -        "data": {
    -            "nodes": [],
    -            "edges": [],
    -        }
    -    }
    -
    -    with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
    -        json.dump(flow_data, f)
    -        temp_path = f.name
    -
    -    try:
    -        # Clear API key from environment
    -        with patch.dict(os.environ, {}, clear=True):
    -            runner = CliRunner()
    -            result = runner.invoke(app, ["serve", temp_path])
    -
    -            assert result.exit_code == 1
    -            # Check both output and exception since typer may output to different streams
    -            assert "LANGFLOW_API_KEY" in str(result.output or result.exception or "")
    -    finally:
    -        Path(temp_path).unlink()
    -
    -
    -def test_serve_command_with_flow_json():
    -    """Test serve command with inline JSON."""
    -    from lfx.__main__ import app
    -
    -    flow_json = '{"data": {"nodes": [], "edges": []}}'
    -
    -    with patch.dict(os.environ, {"LANGFLOW_API_KEY": "test-key"}), patch("uvicorn.run") as mock_uvicorn:
    -        runner = CliRunner()
    -        result = runner.invoke(app, ["serve", "--flow-json", flow_json])
    -
    -        # Should try to start the server
    -        assert mock_uvicorn.called or result.exit_code != 0
    -
    -
     def test_serve_command_invalid_json():
    -    """Test serve command with invalid JSON."""
    +    """Test serve command fails fast on unparseable JSON (before any server init)."""
         from lfx.__main__ import app
     
         invalid_json = '{"invalid": json}'
     
    -    with patch.dict(os.environ, {"LANGFLOW_API_KEY": "test-key"}):
    +    with patch.dict(os.environ, {"LANGFLOW_API_KEY": "test-key"}):  # pragma: allowlist secret
             runner = CliRunner()
             result = runner.invoke(app, ["serve", "--flow-json", invalid_json], catch_exceptions=False)
     
             assert result.exit_code == 1
     
     
     def test_serve_command_nonexistent_file():
    -    """Test serve command with non-existent file."""
    +    """Test serve command fails fast when the flow file does not exist."""
         from lfx.__main__ import app
     
    -    with patch.dict(os.environ, {"LANGFLOW_API_KEY": "test-key"}):
    +    with patch.dict(os.environ, {"LANGFLOW_API_KEY": "test-key"}):  # pragma: allowlist secret
             runner = CliRunner()
             result = runner.invoke(app, ["serve", "/path/to/nonexistent/file.json"], catch_exceptions=False)
     
             assert result.exit_code == 1
     
     
     def test_cli_utility_functions():
    -    """Test basic utility functions that don't have complex dependencies."""
    +    """Test port/host/flow-ID utilities — no server involved."""
         from lfx.cli.common import (
             flow_id_from_path,
             get_best_access_host,
             get_free_port,
             is_port_in_use,
         )
     
    -    # Test port functions
    -    assert not is_port_in_use(0)  # Port 0 is always available
    +    assert not is_port_in_use(0)
     
         port = get_free_port(8000)
         assert 8000 <= port < 65535
     
    -    # Test host resolution
         assert get_best_access_host("0.0.0.0") == "localhost"
         assert get_best_access_host("") == "localhost"
         assert get_best_access_host("127.0.0.1") == "127.0.0.1"
     
    -    # Test flow ID generation
         root = Path("/tmp/flows")
         path = root / "test.json"
         flow_id = flow_id_from_path(path, root)
         assert isinstance(flow_id, str)
    -    assert len(flow_id) == 36  # UUID length
    +    assert len(flow_id) == 36  # UUID
    +
    +
    +@_skip_in_ci
    +def test_serve_command_missing_api_key():
    +    """Serve command must exit 1 and mention LANGFLOW_API_KEY when no key is set."""
    +    from lfx.__main__ import app
    +
    +    flow_data = {"data": {"nodes": [], "edges": []}}
    +
    +    with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
    +        json.dump(flow_data, f)
    +        temp_path = f.name
    +
    +    try:
    +        with patch.dict(os.environ, {}, clear=True):
    +            runner = CliRunner()
    +            result = runner.invoke(app, ["serve", temp_path])
    +
    +            assert result.exit_code == 1
    +            assert "LANGFLOW_API_KEY" in str(result.output or result.exception or "")
    +    finally:
    +        Path(temp_path).unlink()
    +
    +
    +@_skip_in_ci
    +def test_serve_command_with_flow_json():
    +    """Serve command with a valid payload should attempt to call uvicorn.run."""
    +    from lfx.__main__ import app
    +
    +    flow_json = '{"data": {"nodes": [], "edges": []}}'
    +
    +    env = {"LANGFLOW_API_KEY": "test-key"}  # pragma: allowlist secret
    +    with patch.dict(os.environ, env), patch("uvicorn.run") as mock_uvicorn:
    +        runner = CliRunner()
    +        result = runner.invoke(app, ["serve", "--flow-json", flow_json])
    +
    +        assert mock_uvicorn.called or result.exit_code != 0
    
  • src/lfx/tests/unit/cli/test_status_command.py+954 0 added
    @@ -0,0 +1,954 @@
    +"""Unit tests for lfx status -- status_command and helpers.
    +
    +All tests run entirely in-process; no real Langflow instance or SDK required.
    +The SDK is replaced via patch so only the status logic (file collection, hash
    +comparison, table rendering, exit-code rules) is exercised.
    +"""
    +# pragma: allowlist secret -- all credentials in this file are fake test data
    +
    +from __future__ import annotations
    +
    +import io
    +import json
    +from typing import TYPE_CHECKING
    +from unittest.mock import MagicMock, patch
    +from uuid import UUID
    +
    +import pytest
    +import typer
    +
    +if TYPE_CHECKING:
    +    from pathlib import Path
    +
    +# ---------------------------------------------------------------------------
    +# Shared constants
    +# ---------------------------------------------------------------------------
    +
    +_BASE_URL = "http://langflow.test"
    +_API_KEY = "test-key"  # pragma: allowlist secret
    +_FLOW_ID = UUID("aaaaaaaa-0000-0000-0000-000000000001")
    +_FLOW_ID_2 = UUID("aaaaaaaa-0000-0000-0000-000000000002")
    +
    +_FLOW_DICT: dict = {
    +    "id": str(_FLOW_ID),
    +    "name": "My Test Flow",
    +    "data": {"nodes": [], "edges": []},
    +}
    +
    +_FLOW_DICT_2: dict = {
    +    "id": str(_FLOW_ID_2),
    +    "name": "Second Flow",
    +    "data": {"nodes": [{"id": "n1"}], "edges": []},
    +}
    +
    +
    +# ---------------------------------------------------------------------------
    +# Fake exception class — avoids importing langflow_sdk in isolation mode
    +# ---------------------------------------------------------------------------
    +
    +
    +class _FakeLangflowNotFoundError(Exception):
    +    """Stand-in for langflow_sdk.exceptions.LangflowNotFoundError in unit tests."""
    +
    +
    +# ---------------------------------------------------------------------------
    +# Test helpers
    +# ---------------------------------------------------------------------------
    +
    +
    +def _write_flow(tmp_path: Path, name: str, flow: dict | None = None) -> Path:
    +    p = tmp_path / name
    +    p.parent.mkdir(parents=True, exist_ok=True)
    +    p.write_text(json.dumps(flow if flow is not None else _FLOW_DICT), encoding="utf-8")
    +    return p
    +
    +
    +def _fake_env_config(url: str = _BASE_URL, api_key: str = _API_KEY, name: str = "test-env") -> MagicMock:
    +    """Return a MagicMock that looks like an lfx EnvConfig."""
    +    cfg = MagicMock()
    +    cfg.url = url
    +    cfg.api_key = api_key
    +    cfg.name = name
    +    return cfg
    +
    +
    +def _fake_remote_flow(flow_id: UUID = _FLOW_ID, flow_dict: dict | None = None) -> MagicMock:
    +    """Return a MagicMock that looks like a langflow_sdk Flow model."""
    +    remote = MagicMock()
    +    remote.id = flow_id
    +    remote.name = (flow_dict or _FLOW_DICT).get("name", "Remote Flow")
    +    remote.updated_at = None
    +    # model_dump returns a copy of the dict so hash comparison works correctly
    +    remote.model_dump.return_value = dict(flow_dict or _FLOW_DICT)
    +    return remote
    +
    +
    +def _identity_normalize(flow: dict) -> dict:
    +    """Identity normalize_flow function for tests."""
    +    return flow
    +
    +
    +def _json_flow_to_json(flow: dict) -> str:
    +    """Deterministic flow_to_json function for tests."""
    +    return json.dumps(flow, sort_keys=True)
    +
    +
    +def _make_client_mock(
    +    *,
    +    remote_flow: MagicMock | None = None,
    +    get_flow_side_effect: BaseException | None = None,
    +    list_flows_result: list | None = None,
    +) -> MagicMock:
    +    """Return a mock SDK client."""
    +    client = MagicMock()
    +    if get_flow_side_effect is not None:
    +        client.get_flow.side_effect = get_flow_side_effect
    +    else:
    +        client.get_flow.return_value = remote_flow if remote_flow is not None else _fake_remote_flow()
    +    client.list_flows.return_value = list_flows_result if list_flows_result is not None else []
    +    return client
    +
    +
    +def _make_sdk_triple(
    +    client_mock: MagicMock | None = None,
    +) -> tuple[object, object, MagicMock, type[Exception]]:
    +    """Return the mocked _load_sdk() payload for status_command tests."""
    +    if client_mock is None:
    +        client_mock = _make_client_mock()
    +    client_cls = MagicMock(return_value=client_mock)
    +    return _identity_normalize, _json_flow_to_json, client_cls, _FakeLangflowNotFoundError
    +
    +
    +class _CloseAwareClient:
    +    """Minimal client that raises if used after close()."""
    +
    +    def __init__(self, remote_flow: MagicMock, list_flows_result: list | None = None) -> None:
    +        self._remote_flow = remote_flow
    +        self._list_flows_result = list_flows_result if list_flows_result is not None else []
    +        self.closed = False
    +        self.get_flow_calls: list[UUID] = []
    +        self.list_flows_calls: list[dict] = []
    +
    +    def get_flow(self, flow_id: UUID) -> MagicMock:
    +        if self.closed:
    +            msg = "client already closed"
    +            raise RuntimeError(msg)
    +        self.get_flow_calls.append(flow_id)
    +        return self._remote_flow
    +
    +    def list_flows(self, **kwargs) -> list:
    +        if self.closed:
    +            msg = "client already closed"
    +            raise RuntimeError(msg)
    +        self.list_flows_calls.append(kwargs)
    +        return self._list_flows_result
    +
    +    def close(self) -> None:
    +        self.closed = True
    +
    +
    +def _run_status(
    +    flow_paths: list[str],
    +    *,
    +    dir_path: str | None = None,
    +    env: str | None = None,
    +    environments_file: str | None = None,
    +    show_remote_only: bool = False,
    +    sdk_triple: tuple | None = None,
    +    env_cfg: MagicMock | None = None,
    +) -> None:
    +    """Invoke status_command with fully mocked SDK and config resolution.
    +
    +    The status command now expects _load_sdk() to provide the not-found exception
    +    alongside the serialization helpers and client class.
    +    """
    +    from lfx.cli.status import status_command
    +
    +    triple = sdk_triple if sdk_triple is not None else _make_sdk_triple()
    +    cfg = env_cfg if env_cfg is not None else _fake_env_config()
    +
    +    with (
    +        patch("lfx.cli.status._load_sdk", return_value=triple),
    +        patch("lfx.config.resolve_environment", return_value=cfg),
    +    ):
    +        status_command(
    +            dir_path=dir_path,
    +            flow_paths=flow_paths,
    +            env=env,
    +            environments_file=environments_file,
    +            target=_BASE_URL,
    +            api_key=_API_KEY,
    +            show_remote_only=show_remote_only,
    +        )
    +
    +
    +# ---------------------------------------------------------------------------
    +# FlowStatus dataclass
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestFlowStatus:
    +    def test_required_fields(self):
    +        from lfx.cli.status import FlowStatus
    +
    +        s = FlowStatus(name="MyFlow", status="synced")
    +        assert s.name == "MyFlow"
    +        assert s.status == "synced"
    +
    +    def test_path_defaults_to_none(self):
    +        from lfx.cli.status import FlowStatus
    +
    +        s = FlowStatus(name="F", status="new")
    +        assert s.path is None
    +
    +    def test_flow_id_defaults_to_none(self):
    +        from lfx.cli.status import FlowStatus
    +
    +        s = FlowStatus(name="F", status="new")
    +        assert s.flow_id is None
    +
    +    def test_detail_defaults_to_empty_string(self):
    +        from lfx.cli.status import FlowStatus
    +
    +        s = FlowStatus(name="F", status="error")
    +        assert s.detail == ""
    +
    +    def test_all_fields_assignable(self, tmp_path):
    +        from pathlib import Path
    +
    +        from lfx.cli.status import FlowStatus
    +
    +        p = Path(tmp_path / "flow.json")
    +        s = FlowStatus(name="X", status="ahead", path=p, flow_id=_FLOW_ID, detail="local change")
    +        assert s.path == p
    +        assert s.flow_id == _FLOW_ID
    +        assert s.detail == "local change"
    +
    +    def test_status_constants_exist(self):
    +        import lfx.cli.status as m
    +
    +        assert m._STATUS_SYNCED == "synced"
    +        assert m._STATUS_AHEAD == "ahead"
    +        assert m._STATUS_NEW == "new"
    +        assert m._STATUS_REMOTE_ONLY == "remote-only"
    +        assert m._STATUS_NO_ID == "no-id"
    +        assert m._STATUS_ERROR == "error"
    +
    +
    +# ---------------------------------------------------------------------------
    +# _flow_hash
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestFlowHash:
    +    def test_returns_12_char_string(self):
    +        from lfx.cli.status import _flow_hash
    +
    +        result = _flow_hash(_FLOW_DICT, _identity_normalize, _json_flow_to_json)
    +        assert isinstance(result, str)
    +        assert len(result) == 12
    +
    +    def test_deterministic_for_same_input(self):
    +        from lfx.cli.status import _flow_hash
    +
    +        h1 = _flow_hash(_FLOW_DICT, _identity_normalize, _json_flow_to_json)
    +        h2 = _flow_hash(_FLOW_DICT, _identity_normalize, _json_flow_to_json)
    +        assert h1 == h2
    +
    +    def test_different_for_different_input(self):
    +        from lfx.cli.status import _flow_hash
    +
    +        h1 = _flow_hash(_FLOW_DICT, _identity_normalize, _json_flow_to_json)
    +        h2 = _flow_hash(_FLOW_DICT_2, _identity_normalize, _json_flow_to_json)
    +        assert h1 != h2
    +
    +    def test_normalize_fn_is_called(self):
    +        from lfx.cli.status import _flow_hash
    +
    +        normalize_mock = MagicMock(return_value=_FLOW_DICT)
    +        _flow_hash(_FLOW_DICT, normalize_mock, _json_flow_to_json)
    +        normalize_mock.assert_called_once_with(_FLOW_DICT)
    +
    +    def test_flow_to_json_fn_is_called(self):
    +        from lfx.cli.status import _flow_hash
    +
    +        to_json_mock = MagicMock(return_value=json.dumps(_FLOW_DICT))
    +        _flow_hash(_FLOW_DICT, _identity_normalize, to_json_mock)
    +        to_json_mock.assert_called_once_with(_FLOW_DICT)
    +
    +    def test_hash_is_hexadecimal(self):
    +        from lfx.cli.status import _flow_hash
    +
    +        result = _flow_hash(_FLOW_DICT, _identity_normalize, _json_flow_to_json)
    +        int(result, 16)  # raises ValueError if not hex
    +
    +
    +# ---------------------------------------------------------------------------
    +# _collect_files
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestCollectFiles:
    +    def test_dir_path_returns_json_files(self, tmp_path):
    +        from lfx.cli.status import _collect_files
    +
    +        d = tmp_path / "flows"
    +        d.mkdir()
    +        (d / "a.json").write_text("{}", encoding="utf-8")
    +        (d / "b.json").write_text("{}", encoding="utf-8")
    +        (d / "notes.txt").write_text("ignore", encoding="utf-8")
    +        result = _collect_files(str(d), [])
    +        assert len(result) == 2
    +        assert all(p.suffix == ".json" for p in result)
    +
    +    def test_dir_path_returns_sorted_files(self, tmp_path):
    +        from lfx.cli.status import _collect_files
    +
    +        d = tmp_path / "flows"
    +        d.mkdir()
    +        (d / "z.json").write_text("{}", encoding="utf-8")
    +        (d / "a.json").write_text("{}", encoding="utf-8")
    +        result = _collect_files(str(d), [])
    +        assert result[0].name == "a.json"
    +        assert result[1].name == "z.json"
    +
    +    def test_flow_paths_returned_as_paths(self, tmp_path):
    +        from pathlib import Path
    +
    +        from lfx.cli.status import _collect_files
    +
    +        p1 = _write_flow(tmp_path, "x.json")
    +        p2 = _write_flow(tmp_path, "y.json", _FLOW_DICT_2)
    +        result = _collect_files(None, [str(p1), str(p2)])
    +        assert Path(str(p1)) in result
    +        assert Path(str(p2)) in result
    +
    +    def test_empty_args_uses_flows_cwd_when_exists(self, tmp_path, monkeypatch):
    +        from lfx.cli.status import _collect_files
    +
    +        flows_dir = tmp_path / "flows"
    +        flows_dir.mkdir()
    +        (flows_dir / "test.json").write_text("{}", encoding="utf-8")
    +        monkeypatch.chdir(tmp_path)
    +        result = _collect_files(None, [])
    +        assert len(result) == 1
    +        assert result[0].name == "test.json"
    +
    +    def test_empty_args_no_flows_dir_returns_empty(self, tmp_path, monkeypatch):
    +        from lfx.cli.status import _collect_files
    +
    +        monkeypatch.chdir(tmp_path)
    +        result = _collect_files(None, [])
    +        assert result == []
    +
    +    def test_nonexistent_dir_exits_1(self, tmp_path):
    +        from lfx.cli.status import _collect_files
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _collect_files(str(tmp_path / "does_not_exist"), [])
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_dir_path_takes_priority_over_flow_paths(self, tmp_path):
    +        from lfx.cli.status import _collect_files
    +
    +        d = tmp_path / "flows"
    +        d.mkdir()
    +        (d / "dir_flow.json").write_text("{}", encoding="utf-8")
    +        extra = _write_flow(tmp_path, "extra.json")
    +        # When dir_path is provided, flow_paths is ignored per code path (elif)
    +        result = _collect_files(str(d), [str(extra)])
    +        names = [p.name for p in result]
    +        assert "dir_flow.json" in names
    +
    +    def test_empty_dir_returns_empty_list(self, tmp_path):
    +        from lfx.cli.status import _collect_files
    +
    +        d = tmp_path / "empty"
    +        d.mkdir()
    +        result = _collect_files(str(d), [])
    +        assert result == []
    +
    +
    +# ---------------------------------------------------------------------------
    +# _render_table
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestRenderTable:
    +    @staticmethod
    +    def _capture_render(statuses, env_label):
    +        """Render the table to a string buffer and return the plain-text output."""
    +        from lfx.cli.status import _render_table
    +        from rich.console import Console
    +
    +        buf = io.StringIO()
    +        fake_console = Console(file=buf, width=200, no_color=True)
    +        with patch("lfx.cli.status.console", fake_console):
    +            _render_table(statuses, env_label)
    +        return buf.getvalue()
    +
    +    def test_empty_list_does_not_crash(self):
    +        from lfx.cli.status import _render_table
    +
    +        _render_table([], "test-env")
    +
    +    def test_synced_status_contains_flow_name_and_label(self, tmp_path):
    +        from lfx.cli.status import FlowStatus
    +
    +        statuses = [FlowStatus(name="MyFlow", status="synced", path=tmp_path / "f.json", flow_id=_FLOW_ID)]
    +        output = self._capture_render(statuses, "test-env")
    +        assert "MyFlow" in output
    +        assert "synced" in output.lower()
    +
    +    def test_ahead_status_contains_label(self, tmp_path):
    +        from lfx.cli.status import FlowStatus
    +
    +        statuses = [FlowStatus(name="AheadFlow", status="ahead", path=tmp_path / "f.json", flow_id=_FLOW_ID)]
    +        output = self._capture_render(statuses, "test-env")
    +        assert "AheadFlow" in output
    +        assert "ahead" in output.lower()
    +
    +    def test_new_status_contains_label(self, tmp_path):
    +        from lfx.cli.status import FlowStatus
    +
    +        statuses = [FlowStatus(name="NewFlow", status="new", path=tmp_path / "f.json", flow_id=_FLOW_ID)]
    +        output = self._capture_render(statuses, "test-env")
    +        assert "NewFlow" in output
    +        assert "new" in output.lower()
    +
    +    def test_remote_only_status_contains_label(self):
    +        from lfx.cli.status import FlowStatus
    +
    +        statuses = [FlowStatus(name="RemoteFlow", status="remote-only", flow_id=_FLOW_ID)]
    +        output = self._capture_render(statuses, "test-env")
    +        assert "RemoteFlow" in output
    +        assert "remote only" in output.lower()
    +
    +    def test_no_id_status_contains_detail(self, tmp_path):
    +        from lfx.cli.status import FlowStatus
    +
    +        statuses = [
    +            FlowStatus(
    +                name="OrphanFlow",
    +                status="no-id",
    +                path=tmp_path / "f.json",
    +                detail="run lfx export first",
    +            )
    +        ]
    +        output = self._capture_render(statuses, "test-env")
    +        assert "OrphanFlow" in output
    +        assert "no id" in output.lower()
    +        assert "run lfx export first" in output
    +
    +    def test_error_status_contains_detail(self, tmp_path):
    +        from lfx.cli.status import FlowStatus
    +
    +        statuses = [FlowStatus(name="BadFlow", status="error", path=tmp_path / "f.json", detail="parse error")]
    +        output = self._capture_render(statuses, "test-env")
    +        assert "BadFlow" in output
    +        assert "error" in output.lower()
    +        assert "parse error" in output
    +
    +    def test_all_statuses_together_contains_all_names(self, tmp_path):
    +        from lfx.cli.status import FlowStatus
    +
    +        statuses = [
    +            FlowStatus(name="Synced", status="synced", path=tmp_path / "s.json", flow_id=_FLOW_ID),
    +            FlowStatus(name="Ahead", status="ahead", path=tmp_path / "a.json", flow_id=_FLOW_ID_2),
    +            FlowStatus(name="New", status="new", path=tmp_path / "n.json"),
    +            FlowStatus(name="Remote", status="remote-only", flow_id=_FLOW_ID),
    +            FlowStatus(name="NoId", status="no-id", path=tmp_path / "noid.json"),
    +            FlowStatus(name="Err", status="error", path=tmp_path / "e.json", detail="oops"),
    +        ]
    +        output = self._capture_render(statuses, "production")
    +        for s in statuses:
    +            assert s.name in output
    +
    +    def test_env_label_appears_in_title(self, tmp_path):
    +        from lfx.cli.status import FlowStatus
    +
    +        statuses = [FlowStatus(name="F", status="synced", path=tmp_path / "f.json", flow_id=_FLOW_ID)]
    +        output = self._capture_render(statuses, "my-staging")
    +        assert "my-staging" in output
    +
    +    def test_status_with_no_path_shows_dash(self):
    +        from lfx.cli.status import FlowStatus
    +
    +        statuses = [FlowStatus(name="Ghost", status="remote-only", flow_id=_FLOW_ID)]
    +        output = self._capture_render(statuses, "env")
    +        assert "Ghost" in output
    +
    +    def test_status_with_no_flow_id_shows_dash(self, tmp_path):
    +        from lfx.cli.status import FlowStatus
    +
    +        statuses = [FlowStatus(name="NoIdFlow", status="no-id", path=tmp_path / "f.json")]
    +        output = self._capture_render(statuses, "env")
    +        assert "NoIdFlow" in output
    +
    +
    +# ---------------------------------------------------------------------------
    +# status_command — synced (exits 0)
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestStatusCommandSynced:
    +    def test_synced_flow_exits_0(self, tmp_path):
    +        """When local and remote hashes match, exits 0 (no Exit raised by typer)."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        remote = _fake_remote_flow(flow_dict=_FLOW_DICT)
    +        client = _make_client_mock(remote_flow=remote)
    +        triple = _make_sdk_triple(client)
    +        # Should complete without raising Exit(1)
    +        _run_status([str(p)], sdk_triple=triple)
    +
    +    def test_synced_multiple_flows_exits_0(self, tmp_path):
    +        """All synced flows → no Exit raised (exit 0)."""
    +        p1 = _write_flow(tmp_path, "a.json", _FLOW_DICT)
    +        p2 = _write_flow(tmp_path, "b.json", _FLOW_DICT_2)
    +
    +        remote1 = _fake_remote_flow(_FLOW_ID, _FLOW_DICT)
    +        remote2 = _fake_remote_flow(_FLOW_ID_2, _FLOW_DICT_2)
    +        client = _make_client_mock()
    +        client.get_flow.side_effect = [remote1, remote2]
    +        triple = _make_sdk_triple(client)
    +        _run_status([str(p1), str(p2)], sdk_triple=triple)
    +
    +    def test_synced_calls_get_flow_with_correct_id(self, tmp_path):
    +        """get_flow is called with the UUID from the flow file."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        remote = _fake_remote_flow(flow_dict=_FLOW_DICT)
    +        client = _make_client_mock(remote_flow=remote)
    +        triple = _make_sdk_triple(client)
    +        _run_status([str(p)], sdk_triple=triple)
    +        client.get_flow.assert_called_once_with(_FLOW_ID)
    +
    +    def test_client_constructed_with_env_url_and_key(self, tmp_path):
    +        """SDK Client is created with the resolved env URL and API key."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        remote = _fake_remote_flow(flow_dict=_FLOW_DICT)
    +        client = _make_client_mock(remote_flow=remote)
    +        normalize_fn, to_json_fn, client_cls, not_found_error = _make_sdk_triple(client)
    +        cfg = _fake_env_config(url="http://custom.test", api_key="my-key")  # pragma: allowlist secret
    +
    +        with (
    +            patch("lfx.cli.status._load_sdk", return_value=(normalize_fn, to_json_fn, client_cls, not_found_error)),
    +            patch("lfx.config.resolve_environment", return_value=cfg),
    +        ):
    +            from lfx.cli.status import status_command
    +
    +            status_command(
    +                dir_path=None,
    +                flow_paths=[str(p)],
    +                env=None,
    +                environments_file=None,
    +                target="http://custom.test",
    +                api_key="my-key",  # pragma: allowlist secret
    +                show_remote_only=False,
    +            )
    +        client_cls.assert_called_once_with(base_url="http://custom.test", api_key="my-key")  # pragma: allowlist secret
    +
    +    def test_client_is_closed_after_requests_finish(self, tmp_path):
    +        """The client stays usable during status checks and is closed at the end."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        remote = _fake_remote_flow(flow_dict=_FLOW_DICT)
    +        client = _CloseAwareClient(remote)
    +        triple = _identity_normalize, _json_flow_to_json, MagicMock(return_value=client), _FakeLangflowNotFoundError
    +
    +        _run_status([str(p)], sdk_triple=triple)
    +
    +        assert client.get_flow_calls == [_FLOW_ID]
    +        assert client.closed is True
    +
    +
    +# ---------------------------------------------------------------------------
    +# status_command — ahead (exits 1)
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestStatusCommandAhead:
    +    def test_ahead_flow_exits_1(self, tmp_path):
    +        """Local hash differs from remote → exits 1."""
    +        p = _write_flow(tmp_path, "flow.json", _FLOW_DICT)
    +        # Remote has different data so hashes won't match
    +        remote_dict = dict(_FLOW_DICT)
    +        remote_dict["data"] = {"nodes": [{"id": "different"}], "edges": []}
    +        remote = _fake_remote_flow(_FLOW_ID, remote_dict)
    +        client = _make_client_mock(remote_flow=remote)
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_status([str(p)], sdk_triple=triple)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_ahead_status_in_mixed_results_exits_1(self, tmp_path):
    +        """Even one 'ahead' flow among synced ones → exits 1."""
    +        p_synced = _write_flow(tmp_path, "synced.json", _FLOW_DICT)
    +        p_ahead = _write_flow(tmp_path, "ahead.json", _FLOW_DICT_2)
    +
    +        remote_synced = _fake_remote_flow(_FLOW_ID, _FLOW_DICT)
    +        remote_ahead_dict = dict(_FLOW_DICT_2)
    +        remote_ahead_dict["data"] = {"nodes": [{"id": "old"}], "edges": []}
    +        remote_ahead = _fake_remote_flow(_FLOW_ID_2, remote_ahead_dict)
    +
    +        client = _make_client_mock()
    +        client.get_flow.side_effect = [remote_synced, remote_ahead]
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_status([str(p_synced), str(p_ahead)], sdk_triple=triple)
    +        assert exc_info.value.exit_code == 1
    +
    +
    +# ---------------------------------------------------------------------------
    +# status_command — new (exits 1)
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestStatusCommandNew:
    +    def test_not_found_on_remote_gives_new_status(self, tmp_path):
    +        """Remote raises LangflowNotFoundError → status 'new' → exits 1."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        client = _make_client_mock(get_flow_side_effect=_FakeLangflowNotFoundError("not found"))
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_status([str(p)], sdk_triple=triple)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_not_found_does_not_call_model_dump(self, tmp_path):
    +        """When flow is new (not found), no model_dump call is made."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        remote = _fake_remote_flow(flow_dict=_FLOW_DICT)
    +        client = _make_client_mock(get_flow_side_effect=_FakeLangflowNotFoundError("not found"))
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit):
    +            _run_status([str(p)], sdk_triple=triple)
    +        remote.model_dump.assert_not_called()
    +
    +
    +# ---------------------------------------------------------------------------
    +# status_command — no-id (exits 1)
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestStatusCommandNoId:
    +    def test_flow_without_id_gives_no_id_status(self, tmp_path):
    +        """Flow file with no 'id' field → status 'no-id' → exits 1."""
    +        p = tmp_path / "no_id.json"
    +        p.write_text(json.dumps({"name": "Orphan", "data": {}}), encoding="utf-8")
    +        client = _make_client_mock()
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_status([str(p)], sdk_triple=triple)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_flow_with_null_id_gives_no_id_status(self, tmp_path):
    +        """Flow file with id=null → status 'no-id' → exits 1."""
    +        p = tmp_path / "null_id.json"
    +        p.write_text(json.dumps({"id": None, "name": "NullId"}), encoding="utf-8")
    +        client = _make_client_mock()
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_status([str(p)], sdk_triple=triple)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_no_id_flow_does_not_call_get_flow(self, tmp_path):
    +        """get_flow is never called when a flow has no id."""
    +        p = tmp_path / "no_id.json"
    +        p.write_text(json.dumps({"name": "Orphan"}), encoding="utf-8")
    +        client = _make_client_mock()
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit):
    +            _run_status([str(p)], sdk_triple=triple)
    +        client.get_flow.assert_not_called()
    +
    +
    +# ---------------------------------------------------------------------------
    +# status_command — error cases (exits 1)
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestStatusCommandErrors:
    +    def test_invalid_json_gives_error_status(self, tmp_path):
    +        """Malformed JSON → status 'error' → exits 1."""
    +        p = tmp_path / "bad.json"
    +        p.write_text("this is not json{{", encoding="utf-8")
    +        client = _make_client_mock()
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_status([str(p)], sdk_triple=triple)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_invalid_uuid_in_id_gives_error_status(self, tmp_path):
    +        """Flow with a non-UUID 'id' value → status 'error' → exits 1."""
    +        p = tmp_path / "bad_uuid.json"
    +        p.write_text(json.dumps({"id": "not-a-valid-uuid", "name": "BadId"}), encoding="utf-8")
    +        client = _make_client_mock()
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_status([str(p)], sdk_triple=triple)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_generic_client_get_flow_exception_gives_error_status(self, tmp_path):
    +        """Unexpected exception from client.get_flow → status 'error' → exits 1."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        client = _make_client_mock(get_flow_side_effect=RuntimeError("network timeout"))
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_status([str(p)], sdk_triple=triple)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_error_on_one_flow_still_processes_others(self, tmp_path):
    +        """An error on flow 1 doesn't abort processing of flow 2."""
    +        p_bad = tmp_path / "bad.json"
    +        p_bad.write_text("{{invalid", encoding="utf-8")
    +        p_good = _write_flow(tmp_path, "good.json")
    +        remote = _fake_remote_flow(flow_dict=_FLOW_DICT)
    +        client = _make_client_mock(remote_flow=remote)
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_status([str(p_bad), str(p_good)], sdk_triple=triple)
    +        # Exits 1 due to the error flow; the good flow was also processed
    +        assert exc_info.value.exit_code == 1
    +        client.get_flow.assert_called_once()
    +
    +    def test_missing_file_in_flow_paths_gives_error_status(self, tmp_path):
    +        """A path that doesn't exist → error status → exits 1."""
    +        nonexistent = tmp_path / "ghost.json"
    +        client = _make_client_mock()
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_status([str(nonexistent)], sdk_triple=triple)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_invalid_json_does_not_call_get_flow(self, tmp_path):
    +        """get_flow is never called when JSON parsing fails."""
    +        p = tmp_path / "bad.json"
    +        p.write_text("not json", encoding="utf-8")
    +        client = _make_client_mock()
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit):
    +            _run_status([str(p)], sdk_triple=triple)
    +        client.get_flow.assert_not_called()
    +
    +
    +# ---------------------------------------------------------------------------
    +# status_command — no local files
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestStatusCommandNoLocalFiles:
    +    def test_empty_dir_no_paths_exits_0_with_warning(self, tmp_path):
    +        """No flow files found and show_remote_only=False → exits 0 (via typer.Exit(0))."""
    +        d = tmp_path / "empty"
    +        d.mkdir()
    +        client = _make_client_mock()
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_status([], dir_path=str(d), sdk_triple=triple, show_remote_only=False)
    +        assert exc_info.value.exit_code == 0
    +
    +    def test_empty_dir_no_paths_does_not_call_get_flow(self, tmp_path):
    +        """When no files found, get_flow is never invoked."""
    +        d = tmp_path / "empty"
    +        d.mkdir()
    +        client = _make_client_mock()
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit):
    +            _run_status([], dir_path=str(d), sdk_triple=triple, show_remote_only=False)
    +        client.get_flow.assert_not_called()
    +
    +    def test_no_files_with_show_remote_only_continues(self, tmp_path):
    +        """show_remote_only=True with no local files → proceeds to list_flows."""
    +        d = tmp_path / "empty"
    +        d.mkdir()
    +        remote = MagicMock()
    +        remote.id = _FLOW_ID
    +        remote.name = "RemoteFlow"
    +        client = _make_client_mock(list_flows_result=[remote])
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_status([], dir_path=str(d), sdk_triple=triple, show_remote_only=True)
    +        # remote-only entries are not synced → exits 1
    +        assert exc_info.value.exit_code == 1
    +        client.list_flows.assert_called_once()
    +
    +
    +# ---------------------------------------------------------------------------
    +# status_command — remote-only
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestStatusCommandRemoteOnly:
    +    def test_show_remote_only_calls_list_flows(self, tmp_path):
    +        """show_remote_only=True → client.list_flows(get_all=True) is called."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        remote_local = _fake_remote_flow(flow_dict=_FLOW_DICT)
    +        client = _make_client_mock(remote_flow=remote_local, list_flows_result=[])
    +        triple = _make_sdk_triple(client)
    +
    +        _run_status([str(p)], sdk_triple=triple, show_remote_only=True)
    +        client.list_flows.assert_called_once_with(get_all=True)
    +
    +    def test_without_show_remote_only_does_not_call_list_flows(self, tmp_path):
    +        """show_remote_only=False → list_flows is never called."""
    +        p = _write_flow(tmp_path, "flow.json")
    +        remote = _fake_remote_flow(flow_dict=_FLOW_DICT)
    +        client = _make_client_mock(remote_flow=remote)
    +        triple = _make_sdk_triple(client)
    +
    +        _run_status([str(p)], sdk_triple=triple, show_remote_only=False)
    +        client.list_flows.assert_not_called()
    +
    +    def test_remote_only_flows_excluded_if_seen_locally(self, tmp_path):
    +        """Flows already tracked locally are not added as remote-only entries."""
    +        p = _write_flow(tmp_path, "flow.json", _FLOW_DICT)
    +        remote_local = _fake_remote_flow(_FLOW_ID, _FLOW_DICT)
    +        # list_flows returns the same flow that's tracked locally
    +        list_entry = MagicMock()
    +        list_entry.id = _FLOW_ID
    +        list_entry.name = "My Test Flow"
    +        client = _make_client_mock(remote_flow=remote_local, list_flows_result=[list_entry])
    +        triple = _make_sdk_triple(client)
    +
    +        # All synced, no remote-only → exits 0 (no Exit raised)
    +        _run_status([str(p)], sdk_triple=triple, show_remote_only=True)
    +
    +    def test_remote_only_untracked_flow_appended(self, tmp_path):
    +        """Flows on remote that have no local file appear as remote-only → exits 1."""
    +        p = _write_flow(tmp_path, "flow.json", _FLOW_DICT)
    +        remote_local = _fake_remote_flow(_FLOW_ID, _FLOW_DICT)
    +
    +        # A second remote flow that has no local counterpart
    +        remote_extra = MagicMock()
    +        remote_extra.id = _FLOW_ID_2
    +        remote_extra.name = "Remote Only Flow"
    +
    +        local_entry = MagicMock()
    +        local_entry.id = _FLOW_ID
    +        local_entry.name = "My Test Flow"
    +
    +        client = _make_client_mock(remote_flow=remote_local, list_flows_result=[local_entry, remote_extra])
    +        triple = _make_sdk_triple(client)
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            _run_status([str(p)], sdk_triple=triple, show_remote_only=True)
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_list_flows_exception_prints_warning_and_continues(self, tmp_path):
    +        """If list_flows raises, a warning is printed but the command continues."""
    +        p = _write_flow(tmp_path, "flow.json", _FLOW_DICT)
    +        remote_local = _fake_remote_flow(_FLOW_ID, _FLOW_DICT)
    +        client = _make_client_mock(remote_flow=remote_local)
    +        client.list_flows.side_effect = RuntimeError("list_flows failed")
    +        triple = _make_sdk_triple(client)
    +
    +        # Should not raise an unexpected exception — synced flow still exits 0
    +        _run_status([str(p)], sdk_triple=triple, show_remote_only=True)
    +
    +
    +# ---------------------------------------------------------------------------
    +# status_command — config error
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestStatusCommandConfigError:
    +    def test_config_error_exits_1(self, tmp_path):
    +        """ConfigError from resolve_environment → exits 1."""
    +        from lfx.cli.status import status_command
    +        from lfx.config import ConfigError
    +
    +        p = _write_flow(tmp_path, "flow.json")
    +        triple = _make_sdk_triple()
    +
    +        with (
    +            patch("lfx.cli.status._load_sdk", return_value=triple),
    +            patch("lfx.config.resolve_environment", side_effect=ConfigError("bad config")),
    +            pytest.raises(typer.Exit) as exc_info,
    +        ):
    +            status_command(
    +                dir_path=None,
    +                flow_paths=[str(p)],
    +                env="nonexistent",
    +                environments_file=str(tmp_path / "missing.toml"),
    +                target=None,
    +                api_key=None,
    +                show_remote_only=False,
    +            )
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_missing_environments_file_exits_1(self, tmp_path):
    +        """A non-existent --environments-file passed through config → exits 1."""
    +        from lfx.cli.status import status_command
    +
    +        p = _write_flow(tmp_path, "flow.json")
    +        triple = _make_sdk_triple()
    +
    +        with (
    +            patch("lfx.cli.status._load_sdk", return_value=triple),
    +            pytest.raises(typer.Exit) as exc_info,
    +        ):
    +            status_command(
    +                dir_path=None,
    +                flow_paths=[str(p)],
    +                env="ci",
    +                environments_file=str(tmp_path / "does_not_exist.toml"),
    +                target=None,
    +                api_key=None,
    +                show_remote_only=False,
    +            )
    +        assert exc_info.value.exit_code == 1
    +
    +
    +# ---------------------------------------------------------------------------
    +# status_command — all synced → exits 0
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestStatusCommandAllSynced:
    +    def test_all_synced_no_exit_raised(self, tmp_path):
    +        """When every local flow matches remote, status_command returns normally (exit 0)."""
    +        p = _write_flow(tmp_path, "flow.json", _FLOW_DICT)
    +        remote = _fake_remote_flow(_FLOW_ID, _FLOW_DICT)
    +        client = _make_client_mock(remote_flow=remote)
    +        triple = _make_sdk_triple(client)
    +
    +        # Must NOT raise typer.Exit
    +        _run_status([str(p)], sdk_triple=triple)
    +
    +    def test_all_synced_does_not_raise_exit_1(self, tmp_path):
    +        """Confirm that Exit(1) is specifically not raised for all-synced scenario."""
    +        p = _write_flow(tmp_path, "flow.json", _FLOW_DICT)
    +        remote = _fake_remote_flow(_FLOW_ID, _FLOW_DICT)
    +        client = _make_client_mock(remote_flow=remote)
    +        triple = _make_sdk_triple(client)
    +
    +        try:
    +            _run_status([str(p)], sdk_triple=triple)
    +        except typer.Exit as exc:
    +            pytest.fail(f"Expected no Exit for all-synced but got Exit({exc.exit_code})")
    +
    +    def test_dir_with_all_synced_files_exits_0(self, tmp_path):
    +        """--dir with all synced flows → no Exit raised."""
    +        d = tmp_path / "flows"
    +        d.mkdir()
    +        (d / "flow1.json").write_text(json.dumps(_FLOW_DICT), encoding="utf-8")
    +
    +        remote = _fake_remote_flow(_FLOW_ID, _FLOW_DICT)
    +        client = _make_client_mock(remote_flow=remote)
    +        triple = _make_sdk_triple(client)
    +
    +        _run_status([], dir_path=str(d), sdk_triple=triple)
    
  • src/lfx/tests/unit/cli/test_validate_command.py+900 0 added
    @@ -0,0 +1,900 @@
    +"""Unit tests for lfx validate — structural, extended checks, directory scanning, strict mode.
    +
    +All tests run entirely in-process (no running Langflow instance or component
    +registry required).  Level-2 component checks are skipped via skip_components=True
    +so the registry never needs to be loaded.
    +"""
    +
    +from __future__ import annotations
    +
    +import json
    +from pathlib import Path
    +from unittest.mock import patch
    +
    +import pytest
    +from lfx.cli.validate import (
    +    ValidationResult,
    +    _check_missing_credentials,
    +    _check_orphaned_nodes,
    +    _check_unused_nodes,
    +    _check_version_mismatch,
    +    _expand_paths,
    +    validate_command,
    +    validate_flow_file,
    +)
    +
    +# ---------------------------------------------------------------------------
    +# Fixtures / helpers
    +# ---------------------------------------------------------------------------
    +
    +_MINIMAL_VALID = {
    +    "id": "11111111-1111-1111-1111-111111111111",
    +    "name": "Test Flow",
    +    "data": {"nodes": [], "edges": []},
    +}
    +
    +_NODE = {
    +    "id": "node-a",
    +    "data": {
    +        "id": "node-a",
    +        "type": "ChatInput",
    +        "node": {
    +            "display_name": "Chat Input",
    +            "template": {},
    +        },
    +    },
    +    "position": {"x": 0, "y": 0},
    +}
    +
    +
    +def _write_flow(tmp_path: Path, name: str, flow: dict) -> Path:
    +    p = tmp_path / name
    +    p.write_text(json.dumps(flow), encoding="utf-8")
    +    return p
    +
    +
    +def _make_result(issues=None) -> ValidationResult:
    +    result = ValidationResult(path=Path("test.json"))
    +    if issues:
    +        result.issues = issues
    +    return result
    +
    +
    +# ---------------------------------------------------------------------------
    +# validate_flow_file — Level 1: structural
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestStructural:
    +    def test_valid_minimal_flow(self, tmp_path):
    +        p = _write_flow(tmp_path, "flow.json", _MINIMAL_VALID)
    +        result = validate_flow_file(p, level=1)
    +        assert result.ok
    +        assert not result.errors
    +
    +    def test_invalid_json(self, tmp_path):
    +        p = tmp_path / "bad.json"
    +        p.write_text("{not valid json", encoding="utf-8")
    +        result = validate_flow_file(p, level=1)
    +        assert not result.ok
    +        assert any("Invalid JSON" in i.message for i in result.errors)
    +
    +    def test_missing_top_level_id(self, tmp_path):
    +        flow = {k: v for k, v in _MINIMAL_VALID.items() if k != "id"}
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +        result = validate_flow_file(p, level=1)
    +        assert not result.ok
    +        assert any("id" in i.message for i in result.errors)
    +
    +    def test_missing_top_level_name(self, tmp_path):
    +        flow = {k: v for k, v in _MINIMAL_VALID.items() if k != "name"}
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +        result = validate_flow_file(p, level=1)
    +        assert not result.ok
    +
    +    def test_missing_data_nodes(self, tmp_path):
    +        flow = {**_MINIMAL_VALID, "data": {"edges": []}}
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +        result = validate_flow_file(p, level=1)
    +        assert not result.ok
    +        assert any("data.nodes" in i.message for i in result.errors)
    +
    +    def test_missing_data_edges(self, tmp_path):
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": []}}
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +        result = validate_flow_file(p, level=1)
    +        assert not result.ok
    +
    +    def test_data_not_object(self, tmp_path):
    +        flow = {**_MINIMAL_VALID, "data": "not-an-object"}
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +        result = validate_flow_file(p, level=1)
    +        assert not result.ok
    +
    +    def test_file_not_found(self, tmp_path):
    +        result = validate_flow_file(tmp_path / "missing.json", level=1)
    +        assert not result.ok
    +        assert any("Cannot read" in i.message for i in result.errors)
    +
    +
    +# ---------------------------------------------------------------------------
    +# _check_orphaned_nodes
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestOrphanedNodes:
    +    def _result(self):
    +        return ValidationResult(path=Path("x.json"))
    +
    +    def test_single_node_not_orphaned(self):
    +        """Single-node flows are exempt from orphan detection."""
    +        flow = {
    +            "data": {
    +                "nodes": [_NODE],
    +                "edges": [],
    +            }
    +        }
    +        result = self._result()
    +        _check_orphaned_nodes(flow, result)
    +        assert not result.warnings
    +
    +    def test_empty_flow_not_orphaned(self):
    +        flow = {"data": {"nodes": [], "edges": []}}
    +        result = self._result()
    +        _check_orphaned_nodes(flow, result)
    +        assert not result.warnings
    +
    +    def test_connected_two_nodes_not_orphaned(self):
    +        node_b = {**_NODE, "id": "node-b", "data": {**_NODE["data"], "id": "node-b"}}
    +        flow = {
    +            "data": {
    +                "nodes": [_NODE, node_b],
    +                "edges": [{"source": "node-a", "target": "node-b"}],
    +            }
    +        }
    +        result = self._result()
    +        _check_orphaned_nodes(flow, result)
    +        assert not result.warnings
    +
    +    def test_disconnected_node_is_orphaned(self):
    +        node_b = {**_NODE, "id": "node-b", "data": {**_NODE["data"], "id": "node-b"}}
    +        node_c = {**_NODE, "id": "node-c", "data": {**_NODE["data"], "id": "node-c"}}
    +        flow = {
    +            "data": {
    +                # node-c has no edges
    +                "nodes": [_NODE, node_b, node_c],
    +                "edges": [{"source": "node-a", "target": "node-b"}],
    +            }
    +        }
    +        result = self._result()
    +        _check_orphaned_nodes(flow, result)
    +        orphan_warnings = [w for w in result.warnings if "Orphaned" in w.message]
    +        assert len(orphan_warnings) == 1
    +        assert orphan_warnings[0].node_id == "node-c"
    +
    +
    +# ---------------------------------------------------------------------------
    +# _check_unused_nodes
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestUnusedNodes:
    +    def _result(self):
    +        return ValidationResult(path=Path("x.json"))
    +
    +    def _output_node(self, node_id: str) -> dict:
    +        return {
    +            "id": node_id,
    +            "data": {
    +                "id": node_id,
    +                "type": "ChatOutput",
    +                "node": {"display_name": "Chat Output", "template": {}},
    +            },
    +        }
    +
    +    def _middle_node(self, node_id: str) -> dict:
    +        return {
    +            "id": node_id,
    +            "data": {
    +                "id": node_id,
    +                "type": "TextSplitter",
    +                "node": {"display_name": "Splitter", "template": {}},
    +            },
    +        }
    +
    +    def test_single_node_exempt(self):
    +        flow = {"data": {"nodes": [_NODE], "edges": []}}
    +        result = self._result()
    +        _check_unused_nodes(flow, result)
    +        assert not result.warnings
    +
    +    def test_no_output_nodes_skipped(self):
    +        """If there are no output nodes, unused check is skipped entirely."""
    +        node_b = self._middle_node("node-b")
    +        flow = {
    +            "data": {
    +                "nodes": [_NODE, node_b],
    +                "edges": [],
    +            }
    +        }
    +        result = self._result()
    +        _check_unused_nodes(flow, result)
    +        assert not result.warnings
    +
    +    def test_node_feeding_output_is_used(self):
    +        out = self._output_node("out")
    +        flow = {
    +            "data": {
    +                "nodes": [_NODE, out],
    +                "edges": [{"source": "node-a", "target": "out"}],
    +            }
    +        }
    +        result = self._result()
    +        _check_unused_nodes(flow, result)
    +        assert not result.warnings
    +
    +    def test_dangling_node_not_reaching_output_is_unused(self):
    +        out = self._output_node("out")
    +        mid = self._middle_node("mid")
    +        dangling = self._middle_node("dangling")
    +        flow = {
    +            "data": {
    +                "nodes": [_NODE, mid, out, dangling],
    +                "edges": [
    +                    {"source": "node-a", "target": "mid"},
    +                    {"source": "mid", "target": "out"},
    +                    # dangling has no path to out
    +                ],
    +            }
    +        }
    +        result = self._result()
    +        _check_unused_nodes(flow, result)
    +        unused = [w for w in result.warnings if "Unused" in w.message]
    +        assert len(unused) == 1
    +        assert unused[0].node_id == "dangling"
    +
    +
    +# ---------------------------------------------------------------------------
    +# validate_flow_file — levels 3 & 4 (no component registry needed)
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestEdgeTypeCheck:
    +    def test_edge_to_missing_node_is_error(self, tmp_path):
    +        flow = {
    +            **_MINIMAL_VALID,
    +            "data": {
    +                "nodes": [_NODE],
    +                "edges": [{"source": "node-a", "target": "DOES-NOT-EXIST"}],
    +            },
    +        }
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +        result = validate_flow_file(p, level=3, skip_components=True)
    +        assert any("non-existent" in i.message for i in result.errors)
    +
    +    def test_compatible_types_no_warning(self, tmp_path):
    +        node_b = {**_NODE, "id": "node-b", "data": {**_NODE["data"], "id": "node-b"}}
    +        edge = {
    +            "source": "node-a",
    +            "target": "node-b",
    +            "data": {
    +                "sourceHandle": {"output_types": ["Message"]},
    +                "targetHandle": {"type": "Message", "fieldName": "input_value"},
    +            },
    +        }
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [_NODE, node_b], "edges": [edge]}}
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +        result = validate_flow_file(p, level=3, skip_components=True)
    +        type_warnings = [i for i in result.warnings if "type mismatch" in i.message.lower()]
    +        assert not type_warnings
    +
    +    def test_incompatible_types_gives_warning(self, tmp_path):
    +        node_b = {**_NODE, "id": "node-b", "data": {**_NODE["data"], "id": "node-b"}}
    +        edge = {
    +            "source": "node-a",
    +            "target": "node-b",
    +            "data": {
    +                "sourceHandle": {"output_types": ["Message"]},
    +                "targetHandle": {"type": "DataFrame", "fieldName": "input_value"},
    +            },
    +        }
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [_NODE, node_b], "edges": [edge]}}
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +        result = validate_flow_file(p, level=3, skip_components=True)
    +        type_warnings = [i for i in result.warnings if "type mismatch" in i.message.lower()]
    +        assert type_warnings
    +
    +
    +class TestRequiredInputs:
    +    def _node_with_required_field(self, has_value: bool) -> dict:  # noqa: FBT001
    +        return {
    +            "id": "node-req",
    +            "data": {
    +                "id": "node-req",
    +                "type": "SomeComponent",
    +                "node": {
    +                    "display_name": "Some",
    +                    "template": {
    +                        "my_field": {
    +                            "required": True,
    +                            "show": True,
    +                            "value": "filled" if has_value else None,
    +                        }
    +                    },
    +                },
    +            },
    +        }
    +
    +    def test_required_field_with_value_passes(self, tmp_path):
    +        node = self._node_with_required_field(has_value=True)
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [node], "edges": []}}
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +        result = validate_flow_file(p, level=4, skip_components=True, skip_edge_types=True)
    +        req_errors = [e for e in result.errors if "Required input" in e.message]
    +        assert not req_errors
    +
    +    def test_required_field_without_value_or_edge_fails(self, tmp_path):
    +        node = self._node_with_required_field(has_value=False)
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [node], "edges": []}}
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +        result = validate_flow_file(p, level=4, skip_components=True, skip_edge_types=True)
    +        req_errors = [e for e in result.errors if "Required input" in e.message]
    +        assert req_errors
    +
    +    def test_required_field_with_incoming_edge_passes(self, tmp_path):
    +        node = self._node_with_required_field(has_value=False)
    +        edge = {
    +            "source": "other",
    +            "target": "node-req",
    +            "data": {"targetHandle": {"fieldName": "my_field"}},
    +        }
    +        src = {**_NODE, "id": "other"}
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [node, src], "edges": [edge]}}
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +        result = validate_flow_file(p, level=4, skip_components=True, skip_edge_types=True)
    +        req_errors = [e for e in result.errors if "Required input" in e.message]
    +        assert not req_errors
    +
    +
    +# ---------------------------------------------------------------------------
    +# Directory expansion (_expand_paths)
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestExpandPaths:
    +    def test_single_file(self, tmp_path):
    +        p = _write_flow(tmp_path, "a.json", _MINIMAL_VALID)
    +        result = _expand_paths([str(p)])
    +        assert result == [p]
    +
    +    def test_directory_finds_json_files(self, tmp_path):
    +        _write_flow(tmp_path, "a.json", _MINIMAL_VALID)
    +        _write_flow(tmp_path, "b.json", _MINIMAL_VALID)
    +        (tmp_path / "readme.txt").write_text("ignore me")
    +        result = _expand_paths([str(tmp_path)])
    +        assert len(result) == 2
    +        assert all(p.suffix == ".json" for p in result)
    +
    +    def test_directory_recurses_into_subdirs(self, tmp_path):
    +        sub = tmp_path / "sub"
    +        sub.mkdir()
    +        _write_flow(tmp_path, "root.json", _MINIMAL_VALID)
    +        _write_flow(sub, "nested.json", _MINIMAL_VALID)
    +        result = _expand_paths([str(tmp_path)])
    +        assert len(result) == 2
    +
    +    def test_empty_directory_returns_no_paths(self, tmp_path):
    +        # Should not raise; validate_command handles empty list separately
    +        result = _expand_paths([str(tmp_path)])
    +        assert result == []
    +
    +    def test_missing_path_raises_exit(self, tmp_path):
    +        import typer
    +
    +        with pytest.raises(typer.Exit):
    +            _expand_paths([str(tmp_path / "nonexistent.json")])
    +
    +
    +# ---------------------------------------------------------------------------
    +# --strict mode
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestStrictMode:
    +    def test_strict_warning_becomes_failure(self, tmp_path):
    +        """A flow with only warnings passes normally but fails under --strict."""
    +        # Two-node flow with one orphaned node → produces a warning
    +        node_b = {**_NODE, "id": "node-b", "data": {**_NODE["data"], "id": "node-b"}}
    +        flow = {
    +            **_MINIMAL_VALID,
    +            "data": {
    +                "nodes": [_NODE, node_b],
    +                "edges": [],  # both nodes are orphaned → warnings
    +            },
    +        }
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +
    +        # Without strict — should pass (only warnings)
    +        result = validate_flow_file(p, level=1, skip_components=True)
    +        assert result.ok  # no errors, so ok=True
    +        assert result.warnings  # but there are warnings
    +
    +        # validate_command --strict should exit 1
    +        import typer
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            validate_command(
    +                flow_paths=[str(p)],
    +                level=1,
    +                skip_components=True,
    +                skip_edge_types=True,
    +                skip_required_inputs=True,
    +                skip_version_check=True,
    +                skip_credentials=True,
    +                strict=True,
    +                verbose=False,
    +                output_format="text",
    +            )
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_no_issues_strict_passes(self, tmp_path):
    +        """A fully clean flow passes even under --strict."""
    +        p = _write_flow(tmp_path, "flow.json", _MINIMAL_VALID)
    +        # Should not raise
    +        validate_command(
    +            flow_paths=[str(p)],
    +            level=1,
    +            skip_components=True,
    +            skip_edge_types=True,
    +            skip_required_inputs=True,
    +            skip_version_check=True,
    +            skip_credentials=True,
    +            strict=True,
    +            verbose=False,
    +            output_format="text",
    +        )
    +
    +    def test_strict_json_output_marks_ok_false(self, tmp_path, capsys):
    +        """Under --strict, JSON output sets ok=false for flows with warnings."""
    +        node_b = {**_NODE, "id": "node-b", "data": {**_NODE["data"], "id": "node-b"}}
    +        flow = {
    +            **_MINIMAL_VALID,
    +            "data": {"nodes": [_NODE, node_b], "edges": []},
    +        }
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +
    +        import typer
    +
    +        with pytest.raises(typer.Exit):
    +            validate_command(
    +                flow_paths=[str(p)],
    +                level=1,
    +                skip_components=True,
    +                skip_edge_types=True,
    +                skip_required_inputs=True,
    +                skip_version_check=True,
    +                skip_credentials=True,
    +                strict=True,
    +                verbose=False,
    +                output_format="json",
    +            )
    +
    +        captured = capsys.readouterr()
    +        out = json.loads(captured.out)
    +        assert len(out) == 1
    +        assert out[0]["ok"] is False
    +
    +
    +# ---------------------------------------------------------------------------
    +# --format json output shape
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestJsonOutput:
    +    def test_json_output_shape(self, tmp_path, capsys):
    +        p = _write_flow(tmp_path, "flow.json", _MINIMAL_VALID)
    +        validate_command(
    +            flow_paths=[str(p)],
    +            level=1,
    +            skip_components=True,
    +            skip_edge_types=True,
    +            skip_required_inputs=True,
    +            skip_version_check=True,
    +            skip_credentials=True,
    +            strict=False,
    +            verbose=False,
    +            output_format="json",
    +        )
    +        captured = capsys.readouterr()
    +        out = json.loads(captured.out)
    +        assert isinstance(out, list)
    +        assert out[0]["ok"] is True
    +        assert "issues" in out[0]
    +        assert "path" in out[0]
    +
    +    def test_json_output_includes_errors(self, tmp_path, capsys):
    +        bad_flow = {k: v for k, v in _MINIMAL_VALID.items() if k != "id"}
    +        p = _write_flow(tmp_path, "flow.json", bad_flow)
    +        import typer
    +
    +        with pytest.raises(typer.Exit):
    +            validate_command(
    +                flow_paths=[str(p)],
    +                level=1,
    +                skip_components=True,
    +                skip_edge_types=True,
    +                skip_required_inputs=True,
    +                skip_version_check=True,
    +                skip_credentials=True,
    +                strict=False,
    +                verbose=False,
    +                output_format="json",
    +            )
    +        captured = capsys.readouterr()
    +        out = json.loads(captured.out)
    +        assert out[0]["ok"] is False
    +        errors = [i for i in out[0]["issues"] if i["severity"] == "error"]
    +        assert errors
    +
    +
    +# ---------------------------------------------------------------------------
    +# Exit codes
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestExitCodes:
    +    def test_clean_flow_exits_0(self, tmp_path):
    +        p = _write_flow(tmp_path, "flow.json", _MINIMAL_VALID)
    +        # No exception raised means exit 0
    +        validate_command(
    +            flow_paths=[str(p)],
    +            level=1,
    +            skip_components=True,
    +            skip_edge_types=True,
    +            skip_required_inputs=True,
    +            skip_version_check=True,
    +            skip_credentials=True,
    +            strict=False,
    +            verbose=False,
    +            output_format="text",
    +        )
    +
    +    def test_invalid_flow_exits_1(self, tmp_path):
    +        import typer
    +
    +        bad = {k: v for k, v in _MINIMAL_VALID.items() if k != "name"}
    +        p = _write_flow(tmp_path, "flow.json", bad)
    +        with pytest.raises(typer.Exit) as exc_info:
    +            validate_command(
    +                flow_paths=[str(p)],
    +                level=1,
    +                skip_components=True,
    +                skip_edge_types=True,
    +                skip_required_inputs=True,
    +                skip_version_check=True,
    +                skip_credentials=True,
    +                strict=False,
    +                verbose=False,
    +                output_format="text",
    +            )
    +        assert exc_info.value.exit_code == 1
    +
    +    def test_missing_file_exits_2(self, tmp_path):
    +        import typer
    +
    +        with pytest.raises(typer.Exit) as exc_info:
    +            validate_command(
    +                flow_paths=[str(tmp_path / "ghost.json")],
    +                level=1,
    +                skip_components=True,
    +                skip_edge_types=True,
    +                skip_required_inputs=True,
    +                skip_version_check=True,
    +                skip_credentials=True,
    +                strict=False,
    +                verbose=False,
    +                output_format="text",
    +            )
    +        assert exc_info.value.exit_code == 2
    +
    +    def test_directory_with_invalid_flow_exits_1(self, tmp_path):
    +        import typer
    +
    +        bad = {k: v for k, v in _MINIMAL_VALID.items() if k != "name"}
    +        _write_flow(tmp_path, "bad.json", bad)
    +        with pytest.raises(typer.Exit) as exc_info:
    +            validate_command(
    +                flow_paths=[str(tmp_path)],
    +                level=1,
    +                skip_components=True,
    +                skip_edge_types=True,
    +                skip_required_inputs=True,
    +                skip_version_check=True,
    +                skip_credentials=True,
    +                strict=False,
    +                verbose=False,
    +                output_format="text",
    +            )
    +        assert exc_info.value.exit_code == 1
    +
    +
    +# ---------------------------------------------------------------------------
    +# Extended check: version mismatch / outdated components
    +# ---------------------------------------------------------------------------
    +
    +_NODE_WITH_VERSION = {
    +    "id": "node-v",
    +    "data": {
    +        "id": "node-v",
    +        "type": "ChatInput",
    +        "node": {
    +            "display_name": "Chat Input",
    +            "lf_version": "1.8.0",
    +            "template": {},
    +        },
    +    },
    +}
    +
    +
    +class TestVersionMismatch:
    +    def _make_result(self) -> ValidationResult:
    +        return ValidationResult(path=Path("test.json"))
    +
    +    def test_no_lf_version_field_produces_no_warning(self):
    +        """Nodes without lf_version metadata are ignored."""
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [_NODE], "edges": []}}
    +        result = self._make_result()
    +        with patch("lfx.cli.validation.core._get_lf_version", return_value="1.9.0"):
    +            _check_version_mismatch(flow, result)
    +        assert not result.warnings
    +
    +    def test_matching_version_produces_no_warning(self):
    +        """lf_version equal to installed version → no warning."""
    +        node = {
    +            **_NODE_WITH_VERSION,
    +            "data": {
    +                **_NODE_WITH_VERSION["data"],
    +                "node": {**_NODE_WITH_VERSION["data"]["node"], "lf_version": "1.9.0"},
    +            },
    +        }
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [node], "edges": []}}
    +        result = self._make_result()
    +        with patch("lfx.cli.validation.core._get_lf_version", return_value="1.9.0"):
    +            _check_version_mismatch(flow, result)
    +        assert not result.warnings
    +
    +    def test_mismatched_version_produces_warning(self):
    +        """lf_version != installed → one warning mentioning both versions."""
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [_NODE_WITH_VERSION], "edges": []}}
    +        result = self._make_result()
    +        with patch("lfx.cli.validation.core._get_lf_version", return_value="1.9.0"):
    +            _check_version_mismatch(flow, result)
    +        assert len(result.warnings) == 1
    +        msg = result.warnings[0].message
    +        assert "1.8.0" in msg
    +        assert "1.9.0" in msg
    +
    +    def test_multiple_different_versions_produce_one_warning_each(self):
    +        """Two distinct old versions → two separate warnings."""
    +        node_b = {
    +            "id": "node-b",
    +            "data": {
    +                "id": "node-b",
    +                "type": "ChatOutput",
    +                "node": {"display_name": "Chat Output", "lf_version": "1.7.0", "template": {}},
    +            },
    +        }
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [_NODE_WITH_VERSION, node_b], "edges": []}}
    +        result = self._make_result()
    +        with patch("lfx.cli.validation.core._get_lf_version", return_value="1.9.0"):
    +            _check_version_mismatch(flow, result)
    +        assert len(result.warnings) == 2
    +
    +    def test_langflow_not_installed_skips_check(self):
    +        """If Langflow is not installed, version check is skipped silently."""
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [_NODE_WITH_VERSION], "edges": []}}
    +        result = self._make_result()
    +        with patch("lfx.cli.validation.core._get_lf_version", return_value=None):
    +            _check_version_mismatch(flow, result)
    +        assert not result.warnings
    +
    +    def test_skip_version_check_flag_suppresses_warning(self, tmp_path):
    +        """--skip-version-check prevents version warnings from appearing."""
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [_NODE_WITH_VERSION], "edges": []}}
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +        with patch("lfx.cli.validation.core._get_lf_version", return_value="1.9.0"):
    +            result = validate_flow_file(p, level=1, skip_components=True, skip_version_check=True)
    +        assert not result.warnings
    +
    +    def test_version_warning_strict_mode_causes_exit_1(self, tmp_path):
    +        """Version mismatch warning + --strict → exit 1."""
    +        import typer
    +
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [_NODE_WITH_VERSION], "edges": []}}
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +        with (
    +            patch("lfx.cli.validation.core._get_lf_version", return_value="1.9.0"),
    +            pytest.raises(typer.Exit) as exc_info,
    +        ):
    +            validate_command(
    +                flow_paths=[str(p)],
    +                level=1,
    +                skip_components=True,
    +                skip_edge_types=True,
    +                skip_required_inputs=True,
    +                skip_version_check=False,
    +                skip_credentials=True,
    +                strict=True,
    +                verbose=False,
    +                output_format="text",
    +            )
    +        assert exc_info.value.exit_code == 1
    +
    +
    +# ---------------------------------------------------------------------------
    +# Extended check: missing credentials
    +# ---------------------------------------------------------------------------
    +
    +
    +def _node_with_password_field(value: str | None = None, *, show: bool = True) -> dict:
    +    return {
    +        "id": "node-cred",
    +        "data": {
    +            "id": "node-cred",
    +            "type": "OpenAIModel",
    +            "node": {
    +                "display_name": "OpenAI",
    +                "template": {
    +                    "openai_api_key": {
    +                        "password": True,
    +                        "show": show,
    +                        "required": False,
    +                        "value": value,
    +                    }
    +                },
    +            },
    +        },
    +    }
    +
    +
    +class TestMissingCredentials:
    +    def _make_result(self) -> ValidationResult:
    +        return ValidationResult(path=Path("test.json"))
    +
    +    def test_password_field_with_value_no_warning(self):
    +        """Password field that already has a value → no warning."""
    +        node = _node_with_password_field(value="sk-test")
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [node], "edges": []}}
    +        result = self._make_result()
    +        _check_missing_credentials(flow, result)
    +        assert not result.warnings
    +
    +    def test_password_field_no_value_no_env_warns(self, monkeypatch):
    +        """Password field with no value and no env var → warning."""
    +        monkeypatch.delenv("OPENAI_API_KEY", raising=False)
    +        node = _node_with_password_field(value=None)
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [node], "edges": []}}
    +        result = self._make_result()
    +        _check_missing_credentials(flow, result)
    +        assert len(result.warnings) == 1
    +        assert "openai_api_key" in result.warnings[0].message
    +        assert "OPENAI_API_KEY" in result.warnings[0].message
    +
    +    def test_password_field_env_var_set_no_warning(self, monkeypatch):
    +        """Env var matching field name → no warning."""
    +        monkeypatch.setenv("OPENAI_API_KEY", "sk-from-env")
    +        node = _node_with_password_field(value=None)
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [node], "edges": []}}
    +        result = self._make_result()
    +        _check_missing_credentials(flow, result)
    +        assert not result.warnings
    +
    +    def test_hidden_password_field_skipped(self, monkeypatch):
    +        """Password fields with show=False are not surfaced."""
    +        monkeypatch.delenv("OPENAI_API_KEY", raising=False)
    +        node = _node_with_password_field(value=None, show=False)
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [node], "edges": []}}
    +        result = self._make_result()
    +        _check_missing_credentials(flow, result)
    +        assert not result.warnings
    +
    +    def test_password_field_with_incoming_edge_no_warning(self):
    +        """Password field covered by an incoming edge → no warning."""
    +        node = _node_with_password_field(value=None)
    +        edge = {
    +            "source": "other-node",
    +            "target": "node-cred",
    +            "data": {
    +                "targetHandle": {"fieldName": "openai_api_key", "type": "str"},
    +                "sourceHandle": {"output_types": ["str"]},
    +            },
    +        }
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [node], "edges": [edge]}}
    +        result = self._make_result()
    +        _check_missing_credentials(flow, result)
    +        assert not result.warnings
    +
    +    def test_non_password_field_not_checked(self, monkeypatch):
    +        """Regular (non-password) fields are ignored even when empty."""
    +        monkeypatch.delenv("MY_PARAM", raising=False)
    +        node = {
    +            "id": "node-text",
    +            "data": {
    +                "id": "node-text",
    +                "type": "TextInput",
    +                "node": {
    +                    "display_name": "Text",
    +                    "template": {
    +                        "my_param": {
    +                            "password": False,
    +                            "show": True,
    +                            "required": False,
    +                            "value": None,
    +                        }
    +                    },
    +                },
    +            },
    +        }
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [node], "edges": []}}
    +        result = self._make_result()
    +        _check_missing_credentials(flow, result)
    +        assert not result.warnings
    +
    +    def test_display_password_field_also_triggers_warning(self, monkeypatch):
    +        """Fields with display_password=True are also treated as credentials."""
    +        monkeypatch.delenv("SECRET_TOKEN", raising=False)
    +        node = {
    +            "id": "node-dp",
    +            "data": {
    +                "id": "node-dp",
    +                "type": "CustomComp",
    +                "node": {
    +                    "display_name": "Custom",
    +                    "template": {
    +                        "secret_token": {
    +                            "display_password": True,
    +                            "show": True,
    +                            "required": False,
    +                            "value": None,
    +                        }
    +                    },
    +                },
    +            },
    +        }
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [node], "edges": []}}
    +        result = self._make_result()
    +        _check_missing_credentials(flow, result)
    +        assert len(result.warnings) == 1
    +
    +    def test_skip_credentials_flag_suppresses_warning(self, tmp_path, monkeypatch):
    +        """--skip-credentials prevents missing-credential warnings."""
    +        monkeypatch.delenv("OPENAI_API_KEY", raising=False)
    +        node = _node_with_password_field(value=None)
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [node], "edges": []}}
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +        result = validate_flow_file(p, level=4, skip_components=True, skip_edge_types=True, skip_credentials=True)
    +        cred_warnings = [w for w in result.warnings if "openai_api_key" in w.message]
    +        assert not cred_warnings
    +
    +    def test_credential_warning_strict_causes_exit_1(self, tmp_path, monkeypatch):
    +        """Missing credential warning + --strict → exit 1."""
    +        import typer
    +
    +        monkeypatch.delenv("OPENAI_API_KEY", raising=False)
    +        node = _node_with_password_field(value=None)
    +        flow = {**_MINIMAL_VALID, "data": {"nodes": [node], "edges": []}}
    +        p = _write_flow(tmp_path, "flow.json", flow)
    +        with pytest.raises(typer.Exit) as exc_info:
    +            validate_command(
    +                flow_paths=[str(p)],
    +                level=4,
    +                skip_components=True,
    +                skip_edge_types=True,
    +                skip_required_inputs=True,
    +                skip_version_check=True,
    +                skip_credentials=False,
    +                strict=True,
    +                verbose=False,
    +                output_format="text",
    +            )
    +        assert exc_info.value.exit_code == 1
    
  • src/lfx/tests/unit/config/__init__.py+0 0 added
  • src/lfx/tests/unit/config/test_environments.py+463 0 added
    @@ -0,0 +1,463 @@
    +"""Unit tests for lfx.config.environments — environment resolution.
    +
    +All tests run entirely in-process; no real Langflow instance or SDK required.
    +"""
    +
    +from __future__ import annotations
    +
    +from typing import TYPE_CHECKING
    +
    +import pytest
    +
    +if TYPE_CHECKING:
    +    from pathlib import Path
    +
    +from lfx.config.environments import (
    +    ConfigError,
    +    LangflowEnvironment,
    +    _find_config_file,
    +    _load_config,
    +    _parse_env_block,
    +    _parse_toml,
    +    _parse_yaml,
    +    resolve_environment,
    +)
    +
    +# ---------------------------------------------------------------------------
    +# Helpers
    +# ---------------------------------------------------------------------------
    +
    +_MINIMAL_YAML = """\
    +environments:
    +  local:
    +    url: http://localhost:7860
    +    api_key_env: MY_LOCAL_KEY
    +
    +  staging:
    +    url: https://staging.example.com
    +    api_key_env: MY_STAGING_KEY
    +
    +defaults:
    +  environment: local
    +"""
    +
    +_MINIMAL_TOML = """\
    +[environments.local]
    +url = "http://localhost:7860"
    +api_key_env = "MY_LOCAL_KEY"  # pragma: allowlist secret
    +
    +[environments.staging]
    +url = "https://staging.example.com"
    +api_key_env = "MY_STAGING_KEY"  # pragma: allowlist secret
    +
    +[defaults]
    +environment = "local"
    +"""
    +
    +_NO_DEFAULT_YAML = """\
    +environments:
    +  staging:
    +    url: https://staging.example.com
    +    api_key_env: MY_STAGING_KEY
    +"""
    +
    +
    +def _write(tmp_path: Path, name: str, content: str) -> Path:
    +    p = tmp_path / name
    +    p.parent.mkdir(parents=True, exist_ok=True)
    +    p.write_text(content, encoding="utf-8")
    +    return p
    +
    +
    +# ---------------------------------------------------------------------------
    +# LangflowEnvironment
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestLangflowEnvironment:
    +    def test_fields_stored(self):
    +        e = LangflowEnvironment(name="staging", url="https://x.com", api_key="key123")  # pragma: allowlist secret
    +        assert e.name == "staging"
    +        assert e.url == "https://x.com"
    +        assert e.api_key == "key123"  # pragma: allowlist secret
    +
    +    def test_api_key_may_be_none(self):
    +        e = LangflowEnvironment(name="local", url="http://localhost:7860", api_key=None)
    +        assert e.api_key is None
    +
    +
    +# ---------------------------------------------------------------------------
    +# _parse_yaml
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestParseYaml:
    +    def test_parses_valid_yaml(self, tmp_path):
    +        path = _write(tmp_path, "e.yaml", _MINIMAL_YAML)
    +        result = _parse_yaml(_MINIMAL_YAML, path)
    +        assert "environments" in result
    +        assert "local" in result["environments"]
    +
    +    def test_raises_on_invalid_yaml(self, tmp_path):
    +        path = _write(tmp_path, "e.yaml", ":\n  - invalid: [")
    +        with pytest.raises(ConfigError, match="Invalid YAML"):
    +            _parse_yaml(":\n  - invalid: [", path)
    +
    +    def test_raises_when_top_level_not_mapping(self, tmp_path):
    +        path = _write(tmp_path, "e.yaml", "- item1\n- item2\n")
    +        with pytest.raises(ConfigError, match="mapping"):
    +            _parse_yaml("- item1\n- item2\n", path)
    +
    +    def test_returns_dict(self, tmp_path):
    +        path = _write(tmp_path, "e.yaml", "key: value\n")
    +        result = _parse_yaml("key: value\n", path)
    +        assert isinstance(result, dict)
    +
    +
    +# ---------------------------------------------------------------------------
    +# _parse_toml
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestParseToml:
    +    def test_parses_valid_toml(self, tmp_path):
    +        path = _write(tmp_path, "e.toml", _MINIMAL_TOML)
    +        result = _parse_toml(path)
    +        assert "environments" in result
    +        assert "local" in result["environments"]
    +
    +    def test_raises_on_invalid_toml(self, tmp_path):
    +        path = _write(tmp_path, "e.toml", "not = valid = toml\n")
    +        with pytest.raises(ConfigError):
    +            _parse_toml(path)
    +
    +    def test_raises_on_missing_file(self, tmp_path):
    +        path = tmp_path / "missing.toml"
    +        with pytest.raises(ConfigError, match="Cannot read"):
    +            _parse_toml(path)
    +
    +
    +# ---------------------------------------------------------------------------
    +# _parse_env_block
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestParseEnvBlock:
    +    def test_minimal_block_with_url(self, tmp_path, monkeypatch):
    +        monkeypatch.setenv("MY_KEY", "secret123")  # pragma: allowlist secret
    +        block = {"url": "http://localhost:7860", "api_key_env": "MY_KEY"}  # pragma: allowlist secret
    +        path = tmp_path / "e.yaml"
    +        result = _parse_env_block("local", block, path)
    +        assert result.url == "http://localhost:7860"
    +        assert result.api_key == "secret123"  # pragma: allowlist secret
    +        assert result.name == "local"
    +
    +    def test_missing_url_raises(self, tmp_path):
    +        path = tmp_path / "e.yaml"
    +        with pytest.raises(ConfigError, match=r"missing.*'url'"):
    +            _parse_env_block("local", {"api_key_env": "FOO"}, path)  # pragma: allowlist secret
    +
    +    def test_not_a_dict_raises(self, tmp_path):
    +        path = tmp_path / "e.yaml"
    +        with pytest.raises(ConfigError, match="mapping"):
    +            _parse_env_block("local", "not a dict", path)
    +
    +    def test_missing_env_var_returns_none(self, tmp_path, monkeypatch):
    +        monkeypatch.delenv("MISSING_KEY", raising=False)
    +        path = tmp_path / "e.yaml"
    +        result = _parse_env_block("local", {"url": "http://x", "api_key_env": "MISSING_KEY"}, path)
    +        assert result.api_key is None
    +
    +    def test_literal_api_key_in_block(self, tmp_path):
    +        path = tmp_path / "e.yaml"
    +        block = {"url": "http://x", "api_key": "direct-key"}  # pragma: allowlist secret
    +        result = _parse_env_block("local", block, path)
    +        assert result.api_key == "direct-key"  # pragma: allowlist secret
    +
    +    def test_no_key_field_api_key_is_none(self, tmp_path):
    +        path = tmp_path / "e.yaml"
    +        result = _parse_env_block("local", {"url": "http://x"}, path)
    +        assert result.api_key is None
    +
    +
    +# ---------------------------------------------------------------------------
    +# _find_config_file
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestFindConfigFile:
    +    def test_override_file_returned_when_it_exists(self, tmp_path):
    +        p = _write(tmp_path, "my.yaml", _MINIMAL_YAML)
    +        assert _find_config_file(p) == p
    +
    +    def test_override_missing_raises(self, tmp_path):
    +        missing = tmp_path / "missing.yaml"
    +        with pytest.raises(ConfigError, match="not found"):
    +            _find_config_file(missing)
    +
    +    def test_finds_lfx_yaml_in_cwd(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        p = _write(tmp_path, ".lfx/environments.yaml", _MINIMAL_YAML)
    +        assert _find_config_file(None) == p
    +
    +    def test_finds_lfx_yml_in_cwd(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        p = _write(tmp_path, ".lfx/environments.yml", _MINIMAL_YAML)
    +        assert _find_config_file(None) == p
    +
    +    def test_walks_up_to_parent(self, tmp_path, monkeypatch):
    +        parent_yaml = _write(tmp_path, ".lfx/environments.yaml", _MINIMAL_YAML)
    +        child = tmp_path / "subdir"
    +        child.mkdir()
    +        monkeypatch.chdir(child)
    +        # No .git boundary, so should walk up to tmp_path
    +        result = _find_config_file(None)
    +        assert result == parent_yaml
    +
    +    def test_stops_at_git_boundary(self, tmp_path, monkeypatch):
    +        # Create .git in cwd so the walk stops there
    +        cwd = tmp_path / "project"
    +        cwd.mkdir()
    +        (cwd / ".git").mkdir()
    +        # Parent has a YAML config — should NOT be found (stopped by .git boundary)
    +        _write(tmp_path, ".lfx/environments.yaml", _MINIMAL_YAML)
    +        monkeypatch.chdir(cwd)
    +        # Also check no TOML fallback
    +        result = _find_config_file(None)
    +        assert result is None
    +
    +    def test_toml_fallback_when_no_yaml(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        p = _write(tmp_path, "langflow-environments.toml", _MINIMAL_TOML)
    +        assert _find_config_file(None) == p
    +
    +    def test_returns_none_when_nothing_found(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        # Create a git boundary so it doesn't walk up further
    +        (tmp_path / ".git").mkdir()
    +        assert _find_config_file(None) is None
    +
    +
    +# ---------------------------------------------------------------------------
    +# _load_config
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestLoadConfig:
    +    def test_loads_yaml_environments(self, tmp_path, monkeypatch):
    +        monkeypatch.setenv("MY_LOCAL_KEY", "local-key")  # pragma: allowlist secret
    +        monkeypatch.setenv("MY_STAGING_KEY", "staging-key")  # pragma: allowlist secret
    +        p = _write(tmp_path, "e.yaml", _MINIMAL_YAML)
    +        envs, default = _load_config(p)
    +        assert "local" in envs
    +        assert "staging" in envs
    +        assert default == "local"
    +        assert envs["local"].url == "http://localhost:7860"
    +        assert envs["local"].api_key == "local-key"  # pragma: allowlist secret
    +
    +    def test_loads_toml_environments(self, tmp_path, monkeypatch):
    +        monkeypatch.setenv("MY_LOCAL_KEY", "local-key")  # pragma: allowlist secret
    +        p = _write(tmp_path, "e.toml", _MINIMAL_TOML)
    +        envs, default = _load_config(p)
    +        assert "local" in envs
    +        assert default == "local"
    +
    +    def test_no_defaults_returns_none(self, tmp_path):
    +        p = _write(tmp_path, "e.yaml", _NO_DEFAULT_YAML)
    +        _, default = _load_config(p)
    +        assert default is None
    +
    +    def test_missing_env_var_does_not_raise_in_load(self, tmp_path, monkeypatch):
    +        monkeypatch.delenv("MY_LOCAL_KEY", raising=False)
    +        p = _write(tmp_path, "e.yaml", _MINIMAL_YAML)
    +        # _load_config does NOT raise for missing env vars — just returns None for api_key
    +        envs, _ = _load_config(p)
    +        assert envs["local"].api_key is None
    +
    +    def test_malformed_environments_key_raises(self, tmp_path):
    +        bad_yaml = "environments: not_a_mapping_but_a_string\n"
    +        p = _write(tmp_path, "e.yaml", bad_yaml)
    +        with pytest.raises(ConfigError, match="mapping"):
    +            _load_config(p)
    +
    +
    +# ---------------------------------------------------------------------------
    +# resolve_environment — inline mode
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestResolveInlineMode:
    +    def test_target_returns_inline_env(self):
    +        result = resolve_environment(None, target="http://localhost:7860")
    +        assert result.url == "http://localhost:7860"
    +        assert result.name == "__inline__"
    +        assert result.api_key is None
    +
    +    def test_target_with_api_key(self):
    +        result = resolve_environment(None, target="http://localhost:7860", api_key="mykey")  # pragma: allowlist secret
    +        assert result.api_key == "mykey"  # pragma: allowlist secret
    +
    +    def test_target_with_env_name_uses_env_name_as_label(self):
    +        result = resolve_environment("staging", target="http://localhost:7860", api_key="k")  # pragma: allowlist secret
    +        assert result.name == "staging"
    +        assert result.url == "http://localhost:7860"
    +
    +    def test_target_ignores_environments_file(self, tmp_path):
    +        # Even if environments_file is given, --target bypasses it entirely
    +        result = resolve_environment(
    +            None,
    +            target="http://localhost:7860",
    +            environments_file=str(tmp_path / "missing.yaml"),
    +        )
    +        assert result.url == "http://localhost:7860"
    +
    +
    +# ---------------------------------------------------------------------------
    +# resolve_environment — config file mode
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestResolveConfigMode:
    +    def test_named_env_resolved(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        monkeypatch.setenv("MY_STAGING_KEY", "staging-secret")  # pragma: allowlist secret
    +        _write(tmp_path, ".lfx/environments.yaml", _MINIMAL_YAML)
    +        result = resolve_environment("staging")
    +        assert result.url == "https://staging.example.com"
    +        assert result.api_key == "staging-secret"  # pragma: allowlist secret
    +        assert result.name == "staging"
    +
    +    def test_default_env_used_when_no_env_given(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        monkeypatch.setenv("MY_LOCAL_KEY", "local-secret")  # pragma: allowlist secret
    +        _write(tmp_path, ".lfx/environments.yaml", _MINIMAL_YAML)
    +        result = resolve_environment(None)
    +        assert result.url == "http://localhost:7860"
    +        assert result.api_key == "local-secret"  # pragma: allowlist secret
    +
    +    def test_explicit_environments_file_used(self, tmp_path, monkeypatch):
    +        monkeypatch.setenv("MY_LOCAL_KEY", "local-key")  # pragma: allowlist secret
    +        p = _write(tmp_path, "custom/env.yaml", _MINIMAL_YAML)
    +        result = resolve_environment("local", environments_file=str(p))
    +        assert result.url == "http://localhost:7860"
    +
    +    def test_api_key_override_applied(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        monkeypatch.setenv("MY_LOCAL_KEY", "config-key")  # pragma: allowlist secret
    +        _write(tmp_path, ".lfx/environments.yaml", _MINIMAL_YAML)
    +        result = resolve_environment("local", api_key="override-key")  # pragma: allowlist secret
    +        assert result.api_key == "override-key"  # pragma: allowlist secret
    +
    +    def test_missing_env_var_returns_none_api_key(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        monkeypatch.delenv("MY_LOCAL_KEY", raising=False)
    +        _write(tmp_path, ".lfx/environments.yaml", _MINIMAL_YAML)
    +        result = resolve_environment("local")
    +        # Missing env var yields None — caller decides whether to treat this as error
    +        assert result.api_key is None
    +
    +    def test_unknown_env_name_raises(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        _write(tmp_path, ".lfx/environments.yaml", _MINIMAL_YAML)
    +        with pytest.raises(ConfigError, match=r"'production'.*not found"):
    +            resolve_environment("production")
    +
    +    def test_no_default_and_no_env_name_raises(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        _write(tmp_path, ".lfx/environments.yaml", _NO_DEFAULT_YAML)
    +        with pytest.raises(ConfigError, match="No --env given"):
    +            resolve_environment(None)
    +
    +    def test_toml_file_also_works(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        monkeypatch.setenv("MY_LOCAL_KEY", "local-secret")  # pragma: allowlist secret
    +        _write(tmp_path, "langflow-environments.toml", _MINIMAL_TOML)
    +        result = resolve_environment("local")
    +        assert result.url == "http://localhost:7860"
    +        assert result.api_key == "local-secret"  # pragma: allowlist secret
    +
    +
    +# ---------------------------------------------------------------------------
    +# resolve_environment — no config file fallbacks
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestResolveNoConfigFallbacks:
    +    def test_langflow_url_env_var_used_as_fallback(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        (tmp_path / ".git").mkdir()  # stop file walk here
    +        monkeypatch.setenv("LANGFLOW_URL", "http://fallback:7860")
    +        monkeypatch.setenv("LANGFLOW_API_KEY", "fallback-key")  # pragma: allowlist secret
    +        result = resolve_environment(None)
    +        assert result.url == "http://fallback:7860"
    +        assert result.api_key == "fallback-key"  # pragma: allowlist secret
    +        assert result.name == "__env__"
    +
    +    def test_lfx_url_env_var_used_as_fallback(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        (tmp_path / ".git").mkdir()
    +        monkeypatch.delenv("LANGFLOW_URL", raising=False)
    +        monkeypatch.setenv("LFX_URL", "http://lfx-fallback:7860")
    +        monkeypatch.setenv("LFX_API_KEY", "lfx-key")  # pragma: allowlist secret
    +        result = resolve_environment(None)
    +        assert result.url == "http://lfx-fallback:7860"
    +        assert result.api_key == "lfx-key"  # pragma: allowlist secret
    +
    +    def test_named_env_without_config_raises_clear_error(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        (tmp_path / ".git").mkdir()
    +        monkeypatch.delenv("LANGFLOW_URL", raising=False)
    +        monkeypatch.delenv("LFX_URL", raising=False)
    +        with pytest.raises(ConfigError, match=r"'staging'.*no config file"):
    +            resolve_environment("staging")
    +
    +    def test_no_env_no_config_no_env_vars_raises(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        (tmp_path / ".git").mkdir()
    +        monkeypatch.delenv("LANGFLOW_URL", raising=False)
    +        monkeypatch.delenv("LFX_URL", raising=False)
    +        with pytest.raises(ConfigError, match="No --env"):
    +            resolve_environment(None)
    +
    +    def test_missing_explicit_environments_file_raises(self, tmp_path):
    +        missing = tmp_path / "missing.yaml"
    +        with pytest.raises(ConfigError, match="not found"):
    +            resolve_environment("staging", environments_file=str(missing))
    +
    +    def test_inline_api_key_override_with_env_var_url(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        (tmp_path / ".git").mkdir()
    +        monkeypatch.setenv("LANGFLOW_URL", "http://fallback:7860")
    +        monkeypatch.delenv("LANGFLOW_API_KEY", raising=False)
    +        # api_key arg should override the env-var based key
    +        result = resolve_environment(None, api_key="override-key")  # pragma: allowlist secret
    +        # In fallback mode, api_key_inline takes precedence
    +        assert result.api_key == "override-key"  # pragma: allowlist secret
    +
    +
    +# ---------------------------------------------------------------------------
    +# Error message quality
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestErrorMessages:
    +    def test_unknown_env_message_lists_available(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        _write(tmp_path, ".lfx/environments.yaml", _MINIMAL_YAML)
    +        with pytest.raises(ConfigError, match="local") as exc_info:
    +            resolve_environment("typo-env")
    +        assert "staging" in str(exc_info.value)
    +
    +    def test_no_config_message_suggests_init(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        (tmp_path / ".git").mkdir()
    +        monkeypatch.delenv("LANGFLOW_URL", raising=False)
    +        monkeypatch.delenv("LFX_URL", raising=False)
    +        with pytest.raises(ConfigError, match="lfx init"):
    +            resolve_environment("staging")
    +
    +    def test_no_default_message_shows_available(self, tmp_path, monkeypatch):
    +        monkeypatch.chdir(tmp_path)
    +        _write(tmp_path, ".lfx/environments.yaml", _NO_DEFAULT_YAML)
    +        with pytest.raises(ConfigError, match="staging"):
    +            resolve_environment(None)
    
  • src/lfx/tests/unit/test_testing_plugin.py+643 0 added
    @@ -0,0 +1,643 @@
    +"""Unit tests for lfx.testing — the flow_runner pytest plugin.
    +
    +All tests mock ``_run_sync`` / ``_run_async`` so no real Langflow instance is needed.
    +"""
    +
    +from __future__ import annotations
    +
    +import json
    +from pathlib import Path
    +from unittest.mock import AsyncMock, MagicMock, patch
    +
    +from lfx.testing import (
    +    AsyncLocalFlowRunner,
    +    FlowResult,
    +    LocalFlowRunner,
    +    _apply_tweaks,
    +    _build_result,
    +    _get_marker_arg,
    +    _resolve_flow_args,
    +    _resolve_runner_config,
    +)
    +
    +# ---------------------------------------------------------------------------
    +# Helpers
    +# ---------------------------------------------------------------------------
    +
    +_SUCCESS_RAW: dict = {
    +    "success": True,
    +    "result": "Hello, world!",
    +    "messages": [{"role": "assistant", "content": "Hello, world!"}],
    +    "outputs": {"answer": "Hello, world!"},
    +    "logs": "",
    +    "timing": None,
    +}
    +
    +_ERROR_RAW: dict = {
    +    "success": False,
    +    "type": "error",
    +    "exception_message": "Something went wrong",
    +    "messages": [],
    +    "outputs": {},
    +    "logs": "",
    +}
    +
    +
    +def _make_flow_dict(
    +    *,
    +    node_id: str = "node-a",
    +    node_type: str = "OpenAI",
    +    display_name: str = "OpenAI",
    +    fields: dict | None = None,
    +) -> dict:
    +    """Return a minimal flow dict with a single node containing *fields* in its template."""
    +    template: dict = {}
    +    for fname, fvalue in (fields or {}).items():
    +        template[fname] = {"value": fvalue, "type": "str"}
    +    return {
    +        "id": "flow-1",
    +        "name": "Test Flow",
    +        "data": {
    +            "nodes": [
    +                {
    +                    "id": node_id,
    +                    "data": {
    +                        "id": node_id,
    +                        "type": node_type,
    +                        "node": {
    +                            "display_name": display_name,
    +                            "template": template,
    +                        },
    +                    },
    +                }
    +            ],
    +            "edges": [],
    +        },
    +    }
    +
    +
    +# ---------------------------------------------------------------------------
    +# FlowResult
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestFlowResult:
    +    def test_ok_when_success(self):
    +        r = FlowResult(
    +            status="success",
    +            text="hi",
    +            messages=[],
    +            outputs={},
    +            logs="",
    +            error=None,
    +            timing=None,
    +            raw={},
    +        )
    +        assert r.ok is True
    +
    +    def test_not_ok_when_error(self):
    +        r = FlowResult(
    +            status="error",
    +            text=None,
    +            messages=[],
    +            outputs={},
    +            logs="",
    +            error="boom",
    +            timing=None,
    +            raw={},
    +        )
    +        assert r.ok is False
    +
    +    def test_repr_truncates_long_text(self):
    +        long_text = "x" * 100
    +        r = FlowResult(
    +            status="success",
    +            text=long_text,
    +            messages=[],
    +            outputs={},
    +            logs="",
    +            error=None,
    +            timing=None,
    +            raw={},
    +        )
    +        assert "…" in repr(r)
    +
    +    def test_repr_short_text(self):
    +        r = FlowResult(
    +            status="success",
    +            text="hi",
    +            messages=[],
    +            outputs={},
    +            logs="",
    +            error=None,
    +            timing=None,
    +            raw={},
    +        )
    +        assert "…" not in repr(r)
    +
    +    def test_repr_none_text(self):
    +        r = FlowResult(
    +            status="error",
    +            text=None,
    +            messages=[],
    +            outputs={},
    +            logs="",
    +            error="boom",
    +            timing=None,
    +            raw={},
    +        )
    +        assert "None" in repr(r)
    +
    +
    +# ---------------------------------------------------------------------------
    +# _build_result
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestBuildResult:
    +    def test_success_from_success_true(self):
    +        r = _build_result(_SUCCESS_RAW)
    +        assert r.status == "success"
    +        assert r.ok is True
    +
    +    def test_error_from_success_false(self):
    +        r = _build_result(_ERROR_RAW)
    +        assert r.status == "error"
    +        assert r.ok is False
    +        assert r.error == "Something went wrong"
    +
    +    def test_error_from_type_error_key(self):
    +        raw = {"type": "error", "exception_message": "oops"}
    +        r = _build_result(raw)
    +        assert r.status == "error"
    +
    +    def test_text_from_result_key(self):
    +        r = _build_result({"result": "answer"})
    +        assert r.text == "answer"
    +
    +    def test_text_from_text_key(self):
    +        r = _build_result({"text": "answer"})
    +        assert r.text == "answer"
    +
    +    def test_text_from_output_key(self):
    +        r = _build_result({"output": "answer"})
    +        assert r.text == "answer"
    +
    +    def test_text_priority_result_over_text(self):
    +        r = _build_result({"result": "primary", "text": "secondary"})
    +        assert r.text == "primary"
    +
    +    def test_text_non_string_serialised(self):
    +        r = _build_result({"result": {"key": "val"}})
    +        assert r.text == '{"key": "val"}'
    +
    +    def test_messages_extracted(self):
    +        r = _build_result({"messages": [{"role": "user"}]})
    +        assert r.messages == [{"role": "user"}]
    +
    +    def test_messages_defaults_to_empty_list(self):
    +        r = _build_result({})
    +        assert r.messages == []
    +
    +    def test_messages_non_list_defaults_to_empty(self):
    +        r = _build_result({"messages": "bad"})
    +        assert r.messages == []
    +
    +    def test_outputs_from_outputs_key(self):
    +        r = _build_result({"outputs": {"k": "v"}})
    +        assert r.outputs == {"k": "v"}
    +
    +    def test_outputs_from_result_dict_fallback(self):
    +        r = _build_result({"result_dict": {"k": "v"}})
    +        assert r.outputs == {"k": "v"}
    +
    +    def test_timing_propagated(self):
    +        r = _build_result({"timing": {"node1": 0.5}})
    +        assert r.timing == {"node1": 0.5}
    +
    +    def test_timing_absent(self):
    +        r = _build_result({})
    +        assert r.timing is None
    +
    +    def test_logs_propagated(self):
    +        r = _build_result({"logs": "some log"})
    +        assert r.logs == "some log"
    +
    +    def test_error_fallback_unknown(self):
    +        r = _build_result({"success": False})
    +        assert r.error == "Unknown error"
    +
    +    def test_raw_preserved(self):
    +        raw = {"success": True, "result": "x"}
    +        r = _build_result(raw)
    +        assert r.raw is raw
    +
    +
    +# ---------------------------------------------------------------------------
    +# _apply_tweaks
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestApplyTweaks:
    +    def test_tweak_by_node_type(self):
    +        flow = _make_flow_dict(node_type="OpenAI", fields={"model_name": "gpt-4"})
    +        patched = _apply_tweaks(flow, {"OpenAI": {"model_name": "gpt-4o-mini"}})
    +        template = patched["data"]["nodes"][0]["data"]["node"]["template"]
    +        assert template["model_name"]["value"] == "gpt-4o-mini"
    +
    +    def test_tweak_by_node_id(self):
    +        flow = _make_flow_dict(node_id="abc-123", fields={"temperature": 0.7})
    +        patched = _apply_tweaks(flow, {"abc-123": {"temperature": 0.0}})
    +        template = patched["data"]["nodes"][0]["data"]["node"]["template"]
    +        assert template["temperature"]["value"] == 0.0
    +
    +    def test_tweak_by_display_name(self):
    +        flow = _make_flow_dict(display_name="My OpenAI", fields={"max_tokens": 100})
    +        patched = _apply_tweaks(flow, {"My OpenAI": {"max_tokens": 200}})
    +        template = patched["data"]["nodes"][0]["data"]["node"]["template"]
    +        assert template["max_tokens"]["value"] == 200
    +
    +    def test_unknown_tweak_key_ignored(self):
    +        flow = _make_flow_dict(fields={"temperature": 0.7})
    +        patched = _apply_tweaks(flow, {"NonExistentNode": {"temperature": 0.0}})
    +        template = patched["data"]["nodes"][0]["data"]["node"]["template"]
    +        assert template["temperature"]["value"] == 0.7
    +
    +    def test_unknown_field_ignored(self):
    +        flow = _make_flow_dict(node_type="OpenAI", fields={"model_name": "gpt-4"})
    +        patched = _apply_tweaks(flow, {"OpenAI": {"nonexistent_field": "value"}})
    +        template = patched["data"]["nodes"][0]["data"]["node"]["template"]
    +        assert "nonexistent_field" not in template
    +
    +    def test_does_not_modify_original(self):
    +        flow = _make_flow_dict(node_type="OpenAI", fields={"model_name": "gpt-4"})
    +        _apply_tweaks(flow, {"OpenAI": {"model_name": "gpt-4o-mini"}})
    +        # Original should be unchanged
    +        template = flow["data"]["nodes"][0]["data"]["node"]["template"]
    +        assert template["model_name"]["value"] == "gpt-4"
    +
    +    def test_no_tweaks_returns_identical_structure(self):
    +        flow = _make_flow_dict(fields={"k": "v"})
    +        patched = _apply_tweaks(flow, {})
    +        assert patched["data"]["nodes"][0]["data"]["node"]["template"]["k"]["value"] == "v"
    +
    +
    +# ---------------------------------------------------------------------------
    +# _resolve_flow_args
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestResolveFlowArgs:
    +    def test_dict_flow_no_tweaks(self):
    +        flow = {"data": {"nodes": [], "edges": []}}
    +        script_path, flow_json = _resolve_flow_args(flow, None, Path("/base"))
    +        assert script_path is None
    +        assert json.loads(flow_json) == flow  # type: ignore[arg-type]
    +
    +    def test_dict_flow_with_tweaks(self):
    +        flow = _make_flow_dict(node_type="OpenAI", fields={"model_name": "gpt-4"})
    +        _, flow_json = _resolve_flow_args(flow, {"OpenAI": {"model_name": "gpt-4o-mini"}}, Path("/base"))
    +        parsed = json.loads(flow_json)  # type: ignore[arg-type]
    +        template = parsed["data"]["nodes"][0]["data"]["node"]["template"]
    +        assert template["model_name"]["value"] == "gpt-4o-mini"
    +
    +    def test_json_file_without_tweaks(self, tmp_path):
    +        flow = {"data": {"nodes": [], "edges": []}}
    +        p = tmp_path / "flow.json"
    +        p.write_text(json.dumps(flow))
    +        script_path, flow_json = _resolve_flow_args(p, None, tmp_path)
    +        assert script_path == p
    +        assert flow_json is None
    +
    +    def test_json_file_with_tweaks_inlines(self, tmp_path):
    +        flow = _make_flow_dict(node_type="OpenAI", fields={"model_name": "gpt-4"})
    +        p = tmp_path / "flow.json"
    +        p.write_text(json.dumps(flow))
    +        script_path, flow_json = _resolve_flow_args(p, {"OpenAI": {"model_name": "gpt-4o-mini"}}, tmp_path)
    +        assert script_path is None
    +        parsed = json.loads(flow_json)  # type: ignore[arg-type]
    +        template = parsed["data"]["nodes"][0]["data"]["node"]["template"]
    +        assert template["model_name"]["value"] == "gpt-4o-mini"
    +
    +    def test_py_file_with_tweaks_uses_file_path(self, tmp_path):
    +        p = tmp_path / "flow.py"
    +        p.write_text("# python flow")
    +        script_path, flow_json = _resolve_flow_args(p, {"SomeNode": {"field": "value"}}, tmp_path)
    +        # .py files are never inlined; tweaks ignored for Python flows
    +        assert script_path == p
    +        assert flow_json is None
    +
    +    def test_relative_path_resolved_against_base_dir(self, tmp_path):
    +        flow = {"data": {"nodes": [], "edges": []}}
    +        p = tmp_path / "flow.json"
    +        p.write_text(json.dumps(flow))
    +        script_path, _ = _resolve_flow_args("flow.json", None, tmp_path)
    +        assert script_path == tmp_path / "flow.json"
    +
    +
    +# ---------------------------------------------------------------------------
    +# LocalFlowRunner
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestLocalFlowRunner:
    +    def test_success_result(self):
    +        with patch("lfx.testing.runners._run_sync", return_value=_SUCCESS_RAW) as mock_run:
    +            runner = LocalFlowRunner()
    +            result = runner({"data": {"nodes": [], "edges": []}}, input_value="hi")
    +        assert result.ok
    +        assert result.text == "Hello, world!"
    +        mock_run.assert_called_once()
    +
    +    def test_error_result(self):
    +        with patch("lfx.testing.runners._run_sync", return_value=_ERROR_RAW):
    +            runner = LocalFlowRunner()
    +            result = runner({"data": {"nodes": [], "edges": []}})
    +        assert not result.ok
    +        assert result.error == "Something went wrong"
    +
    +    def test_default_timeout_used_when_no_per_call_timeout(self):
    +        with patch("lfx.testing.runners._run_sync", return_value=_SUCCESS_RAW) as mock_run:
    +            runner = LocalFlowRunner(default_timeout=30.0)
    +            runner({"data": {"nodes": [], "edges": []}})
    +        assert mock_run.call_args.kwargs["timeout"] == 30.0
    +
    +    def test_per_call_timeout_overrides_default(self):
    +        with patch("lfx.testing.runners._run_sync", return_value=_SUCCESS_RAW) as mock_run:
    +            runner = LocalFlowRunner(default_timeout=30.0)
    +            runner({"data": {"nodes": [], "edges": []}}, timeout=5.0)
    +        assert mock_run.call_args.kwargs["timeout"] == 5.0
    +
    +    def test_env_file_loaded(self, tmp_path):
    +        env_file = tmp_path / ".env"
    +        env_file.write_text("MY_VAR=1\n")
    +        with (
    +            patch("lfx.testing.runners._run_sync", return_value=_SUCCESS_RAW),
    +            patch("lfx.testing.runners._load_dotenv") as mock_load,
    +        ):
    +            runner = LocalFlowRunner(default_env_file=str(env_file))
    +            runner({"data": {"nodes": [], "edges": []}})
    +        mock_load.assert_called_once_with(str(env_file))
    +
    +    def test_per_call_env_file_overrides_default(self, tmp_path):
    +        default_env = tmp_path / "default.env"
    +        per_call_env = tmp_path / "per_call.env"
    +        default_env.write_text("A=1\n")
    +        per_call_env.write_text("B=2\n")
    +        with (
    +            patch("lfx.testing.runners._run_sync", return_value=_SUCCESS_RAW),
    +            patch("lfx.testing.runners._load_dotenv") as mock_load,
    +        ):
    +            runner = LocalFlowRunner(default_env_file=str(default_env))
    +            runner({"data": {"nodes": [], "edges": []}}, env_file=str(per_call_env))
    +        mock_load.assert_called_once_with(str(per_call_env))
    +
    +    def test_no_env_file_no_load(self):
    +        with (
    +            patch("lfx.testing.runners._run_sync", return_value=_SUCCESS_RAW),
    +            patch("lfx.testing.runners._load_dotenv") as mock_load,
    +        ):
    +            runner = LocalFlowRunner()
    +            runner({"data": {"nodes": [], "edges": []}})
    +        mock_load.assert_not_called()
    +
    +    def test_tweaks_applied(self):
    +        flow = _make_flow_dict(node_type="OpenAI", fields={"model_name": "gpt-4"})
    +        with patch("lfx.testing.runners._run_sync", return_value=_SUCCESS_RAW) as mock_run:
    +            runner = LocalFlowRunner()
    +            runner(flow, tweaks={"OpenAI": {"model_name": "gpt-4o-mini"}})
    +        # When tweaks are provided for a dict flow, flow_json is passed (not script_path)
    +        kwargs = mock_run.call_args.kwargs
    +        assert kwargs["flow_json"] is not None
    +        parsed = json.loads(kwargs["flow_json"])
    +        template = parsed["data"]["nodes"][0]["data"]["node"]["template"]
    +        assert template["model_name"]["value"] == "gpt-4o-mini"
    +
    +    def test_base_dir_resolved(self, tmp_path):
    +        flow_file = tmp_path / "my_flow.json"
    +        flow_file.write_text(json.dumps({"data": {"nodes": [], "edges": []}}))
    +        with patch("lfx.testing.runners._run_sync", return_value=_SUCCESS_RAW) as mock_run:
    +            runner = LocalFlowRunner(base_dir=tmp_path)
    +            runner("my_flow.json")
    +        kwargs = mock_run.call_args.kwargs
    +        assert kwargs["script_path"] == tmp_path / "my_flow.json"
    +
    +
    +# ---------------------------------------------------------------------------
    +# AsyncLocalFlowRunner
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestAsyncLocalFlowRunner:
    +    async def test_success_result(self):
    +        with patch("lfx.testing.runners._run_async", new_callable=AsyncMock, return_value=_SUCCESS_RAW):
    +            runner = AsyncLocalFlowRunner()
    +            result = await runner({"data": {"nodes": [], "edges": []}}, input_value="hi")
    +        assert result.ok
    +        assert result.text == "Hello, world!"
    +
    +    async def test_error_result(self):
    +        with patch("lfx.testing.runners._run_async", new_callable=AsyncMock, return_value=_ERROR_RAW):
    +            runner = AsyncLocalFlowRunner()
    +            result = await runner({"data": {"nodes": [], "edges": []}})
    +        assert not result.ok
    +        assert result.error == "Something went wrong"
    +
    +    async def test_default_timeout_used(self):
    +        with patch("lfx.testing.runners._run_async", new_callable=AsyncMock, return_value=_SUCCESS_RAW) as mock_run:
    +            runner = AsyncLocalFlowRunner(default_timeout=45.0)
    +            await runner({"data": {"nodes": [], "edges": []}})
    +        assert mock_run.call_args.kwargs["timeout"] == 45.0
    +
    +    async def test_per_call_timeout_overrides_default(self):
    +        with patch("lfx.testing.runners._run_async", new_callable=AsyncMock, return_value=_SUCCESS_RAW) as mock_run:
    +            runner = AsyncLocalFlowRunner(default_timeout=45.0)
    +            await runner({"data": {"nodes": [], "edges": []}}, timeout=10.0)
    +        assert mock_run.call_args.kwargs["timeout"] == 10.0
    +
    +    async def test_env_file_loaded(self, tmp_path):
    +        env_file = tmp_path / ".env"
    +        env_file.write_text("MY_VAR=1\n")
    +        with (
    +            patch("lfx.testing.runners._run_async", new_callable=AsyncMock, return_value=_SUCCESS_RAW),
    +            patch("lfx.testing.runners._load_dotenv") as mock_load,
    +        ):
    +            runner = AsyncLocalFlowRunner(default_env_file=str(env_file))
    +            await runner({"data": {"nodes": [], "edges": []}})
    +        mock_load.assert_called_once_with(str(env_file))
    +
    +
    +# ---------------------------------------------------------------------------
    +# _get_marker_arg
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestGetMarkerArg:
    +    def test_returns_arg_when_marker_present(self):
    +        """Simulate a request node with a marker that has args."""
    +        marker = MagicMock()
    +        marker.args = ["/path/to/.env"]
    +        node = MagicMock()
    +        node.get_closest_marker.return_value = marker
    +        request = MagicMock()
    +        request.node = node
    +
    +        result = _get_marker_arg(request, "lfx_env_file")
    +        assert result == "/path/to/.env"
    +
    +    def test_returns_none_when_marker_absent(self):
    +        node = MagicMock()
    +        node.get_closest_marker.return_value = None
    +        request = MagicMock()
    +        request.node = node
    +
    +        result = _get_marker_arg(request, "lfx_env_file")
    +        assert result is None
    +
    +    def test_returns_none_when_marker_has_no_args(self):
    +        marker = MagicMock()
    +        marker.args = []
    +        node = MagicMock()
    +        node.get_closest_marker.return_value = marker
    +        request = MagicMock()
    +        request.node = node
    +
    +        result = _get_marker_arg(request, "lfx_env_file")
    +        assert result is None
    +
    +
    +# ---------------------------------------------------------------------------
    +# _resolve_runner_config
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestResolveRunnerConfig:
    +    def _make_request(
    +        self,
    +        *,
    +        env_file_marker: str | None = None,
    +        timeout_marker: float | None = None,
    +        cli_env_file: str | None = None,
    +        cli_timeout: float | None = None,
    +        cli_flow_dir: str | None = None,
    +    ) -> MagicMock:
    +        """Build a minimal mock pytest.FixtureRequest."""
    +
    +        def get_closest_marker(name: str):
    +            if name == "lfx_env_file" and env_file_marker is not None:
    +                m = MagicMock()
    +                m.args = [env_file_marker]
    +                return m
    +            if name == "lfx_timeout" and timeout_marker is not None:
    +                m = MagicMock()
    +                m.args = [timeout_marker]
    +                return m
    +            return None
    +
    +        node = MagicMock()
    +        node.get_closest_marker.side_effect = get_closest_marker
    +
    +        def getoption(name, default=None):
    +            mapping = {
    +                "lfx_env_file": cli_env_file,
    +                "lfx_timeout": cli_timeout,
    +                "lfx_flow_dir": cli_flow_dir,
    +            }
    +            return mapping.get(name, default)
    +
    +        config = MagicMock()
    +        config.getoption.side_effect = getoption
    +
    +        request = MagicMock()
    +        request.node = node
    +        request.config = config
    +        return request
    +
    +    def test_marker_takes_precedence_over_cli(self, monkeypatch):
    +        monkeypatch.delenv("LFX_ENV_FILE", raising=False)
    +        monkeypatch.delenv("LFX_TIMEOUT", raising=False)
    +        monkeypatch.delenv("LFX_FLOW_DIR", raising=False)
    +        req = self._make_request(
    +            env_file_marker=".env.marker",
    +            cli_env_file=".env.cli",
    +            timeout_marker=99.0,
    +            cli_timeout=30.0,
    +        )
    +        env_file, timeout, _ = _resolve_runner_config(req)
    +        assert env_file == ".env.marker"
    +        assert timeout == 99.0
    +
    +    def test_cli_option_used_when_no_marker(self, monkeypatch):
    +        monkeypatch.delenv("LFX_ENV_FILE", raising=False)
    +        monkeypatch.delenv("LFX_TIMEOUT", raising=False)
    +        monkeypatch.delenv("LFX_FLOW_DIR", raising=False)
    +        req = self._make_request(cli_env_file=".env.cli", cli_timeout=15.0)
    +        env_file, timeout, _ = _resolve_runner_config(req)
    +        assert env_file == ".env.cli"
    +        assert timeout == 15.0
    +
    +    def test_env_var_used_as_fallback(self, monkeypatch):
    +        monkeypatch.setenv("LFX_ENV_FILE", ".env.envvar")
    +        monkeypatch.setenv("LFX_TIMEOUT", "42")
    +        monkeypatch.delenv("LFX_FLOW_DIR", raising=False)
    +        req = self._make_request()
    +        env_file, timeout, _ = _resolve_runner_config(req)
    +        assert env_file == ".env.envvar"
    +        assert timeout == 42.0
    +
    +    def test_all_none_when_nothing_configured(self, monkeypatch):
    +        monkeypatch.delenv("LFX_ENV_FILE", raising=False)
    +        monkeypatch.delenv("LFX_TIMEOUT", raising=False)
    +        monkeypatch.delenv("LFX_FLOW_DIR", raising=False)
    +        req = self._make_request()
    +        env_file, timeout, base_dir = _resolve_runner_config(req)
    +        assert env_file is None
    +        assert timeout is None
    +        assert base_dir is None
    +
    +    def test_flow_dir_cli_option_resolved(self, monkeypatch, tmp_path):
    +        monkeypatch.delenv("LFX_ENV_FILE", raising=False)
    +        monkeypatch.delenv("LFX_TIMEOUT", raising=False)
    +        monkeypatch.delenv("LFX_FLOW_DIR", raising=False)
    +        req = self._make_request(cli_flow_dir=str(tmp_path))
    +        _, _, base_dir = _resolve_runner_config(req)
    +        assert base_dir == tmp_path
    +
    +    def test_flow_dir_env_var_fallback(self, monkeypatch, tmp_path):
    +        monkeypatch.delenv("LFX_ENV_FILE", raising=False)
    +        monkeypatch.delenv("LFX_TIMEOUT", raising=False)
    +        monkeypatch.setenv("LFX_FLOW_DIR", str(tmp_path))
    +        req = self._make_request()
    +        _, _, base_dir = _resolve_runner_config(req)
    +        assert base_dir == tmp_path
    +
    +    def test_invalid_timeout_env_var_ignored(self, monkeypatch):
    +        monkeypatch.delenv("LFX_ENV_FILE", raising=False)
    +        monkeypatch.setenv("LFX_TIMEOUT", "not-a-number")
    +        monkeypatch.delenv("LFX_FLOW_DIR", raising=False)
    +        req = self._make_request()
    +        _, timeout, _ = _resolve_runner_config(req)
    +        assert timeout is None
    +
    +
    +# ---------------------------------------------------------------------------
    +# pytest fixture integration — smoke test using the real fixtures
    +# ---------------------------------------------------------------------------
    +
    +
    +class TestFixtures:
    +    def test_flow_runner_fixture_returns_local_runner(self, flow_runner):
    +        assert isinstance(flow_runner, LocalFlowRunner)
    +
    +    def test_async_flow_runner_fixture_returns_async_runner(self, async_flow_runner):
    +        assert isinstance(async_flow_runner, AsyncLocalFlowRunner)
    +
    +    def test_flow_runner_fixture_runs_flow(self, flow_runner):
    +        with patch("lfx.testing.runners._run_sync", return_value=_SUCCESS_RAW):
    +            result = flow_runner({"data": {"nodes": [], "edges": []}}, input_value="hi")
    +        assert result.ok
    +
    +    async def test_async_flow_runner_fixture_runs_flow(self, async_flow_runner):
    +        with patch("lfx.testing.runners._run_async", new_callable=AsyncMock, return_value=_SUCCESS_RAW):
    +            result = await async_flow_runner({"data": {"nodes": [], "edges": []}})
    +        assert result.ok
    
  • src/sdk/langflow-environments.toml.example+29 0 added
    @@ -0,0 +1,29 @@
    +# langflow-environments.toml
    +#
    +# Copy this file to langflow-environments.toml (in your project root or
    +# ~/.config/langflow/environments.toml) and fill in your instance details.
    +#
    +# The api_key_env field names an *environment variable* that holds the API key.
    +# This keeps secrets out of the config file and out of version control.
    +#
    +# Lookup order for this file:
    +#   1. Explicit path passed to get_client() / load_environments()
    +#   2. LANGFLOW_ENVIRONMENTS_FILE environment variable
    +#   3. langflow-environments.toml in the current working directory
    +#   4. ~/.config/langflow/environments.toml
    +
    +[environments.local]
    +url = "http://localhost:7860"
    +# No api_key_env means unauthenticated (AUTO_LOGIN must be enabled)
    +
    +[environments.staging]
    +url = "https://staging.langflow.example.com"
    +api_key_env = "LANGFLOW_STAGING_API_KEY" # pragma: allowlist secret
    +
    +[environments.production]
    +url = "https://langflow.example.com"
    +api_key_env = "LANGFLOW_PROD_API_KEY" # pragma: allowlist secret
    +
    +# Which environment is used when no name is specified
    +[defaults]
    +environment = "staging"
    
  • src/sdk/pyproject.toml+53 0 added
    @@ -0,0 +1,53 @@
    +[project]
    +name = "langflow-sdk"
    +version = "0.1.0"
    +description = "Python SDK for the Langflow REST API"
    +readme = "README.md"
    +requires-python = ">=3.10,<3.14"
    +license = "MIT"
    +keywords = ["langflow", "sdk", "ai", "workflow"]
    +
    +dependencies = [
    +    "httpx[http2]>=0.24.0,<1.0.0",
    +    "pydantic>=2.0.0,<3.0.0",
    +    "pydantic-settings>=2.10.1,<3.0.0",
    +    "tomli>=2.2.1,<3.0.0",
    +    "typing-extensions>=4.14.0,<5.0.0",
    +]
    +
    +[project.optional-dependencies]
    +testing = [
    +    "pytest>=8.0",
    +    "pytest-asyncio>=0.26.0",
    +]
    +
    +[project.entry-points."pytest11"]
    +langflow = "langflow_sdk.testing"
    +
    +[build-system]
    +requires = ["hatchling"]
    +build-backend = "hatchling.build"
    +
    +[tool.hatch.build.targets.wheel]
    +packages = ["src/langflow_sdk"]
    +
    +[tool.pytest.ini_options]
    +asyncio_mode = "auto"
    +testpaths = ["tests"]
    +python_files = "test_*.py"
    +python_classes = "Test*"
    +python_functions = "test_*"
    +addopts = "-v --tb=short --strict-markers --disable-warnings --color=yes"
    +markers = [
    +    "unit: Unit tests",
    +    "integration: Integration tests (require a live Langflow instance)",
    +]
    +
    +[dependency-groups]
    +dev = [
    +    "pytest>=8.4.1",
    +    "pytest-asyncio>=0.26.0",
    +    "pytest-cov>=7.0.0",
    +    "respx>=0.21.0",
    +    "ruff>=0.9.10",
    +]
    
  • src/sdk/README.md+0 0 added
  • src/sdk/src/langflow_sdk/_async_client.py+423 0 added
    @@ -0,0 +1,423 @@
    +"""Async HTTP client for the Langflow REST API.
    +
    +Preferred usage via the short alias::
    +
    +    from langflow_sdk import AsyncClient
    +
    +    async with AsyncClient("https://langflow.example.com", api_key="...") as client:
    +        flows = await client.list_flows()
    +"""
    +
    +from __future__ import annotations
    +
    +import asyncio
    +from typing import TYPE_CHECKING, Any
    +
    +import httpx
    +
    +from langflow_sdk._client_common import _ClientCommon
    +from langflow_sdk._http import (
    +    _DEFAULT_TIMEOUT,
    +    _build_headers,
    +    _connection_error,
    +    _logger,
    +    _raise_for_status,
    +    _raise_for_status_code,
    +)
    +from langflow_sdk.background_job import BackgroundJob
    +from langflow_sdk.models import (
    +    Flow,
    +    FlowCreate,
    +    FlowUpdate,
    +    Project,
    +    ProjectCreate,
    +    ProjectUpdate,
    +    ProjectWithFlows,
    +    RunRequest,
    +    RunResponse,
    +    StreamChunk,
    +)
    +
    +if TYPE_CHECKING:
    +    from collections.abc import AsyncIterator
    +    from pathlib import Path
    +    from uuid import UUID
    +
    +    from typing_extensions import Self
    +
    +
    +class AsyncLangflowClient(_ClientCommon):
    +    """Async client for the Langflow REST API.
    +
    +    Prefer the short alias :data:`AsyncClient` for new code::
    +
    +        from langflow_sdk import AsyncClient
    +
    +        async with AsyncClient("https://langflow.example.com", api_key="...") as client:
    +            flows = await client.list_flows()
    +    """
    +
    +    def __init__(
    +        self,
    +        base_url: str,
    +        api_key: str | None = None,
    +        timeout: float = _DEFAULT_TIMEOUT,
    +        httpx_client: httpx.AsyncClient | None = None,
    +    ) -> None:
    +        self._base_url = base_url.rstrip("/")
    +        self._api_key = api_key
    +        self._owns_client = httpx_client is None
    +        self._http = httpx_client or httpx.AsyncClient(
    +            base_url=self._base_url,
    +            headers=_build_headers(api_key),
    +            timeout=timeout,
    +        )
    +
    +    async def aclose(self) -> None:
    +        if self._owns_client:
    +            await self._http.aclose()
    +
    +    async def __aenter__(self) -> Self:
    +        return self
    +
    +    async def __aexit__(self, *_: object) -> None:
    +        await self.aclose()
    +
    +    # ------------------------------------------------------------------
    +    # Internals
    +    # ------------------------------------------------------------------
    +
    +    async def _request(
    +        self,
    +        method: str,
    +        path: str,
    +        *,
    +        json: Any = None,
    +        params: dict[str, Any] | None = None,
    +        content: bytes | None = None,
    +        headers: dict[str, str] | None = None,
    +    ) -> httpx.Response:
    +        try:
    +            response = await self._http.request(
    +                method,
    +                path,
    +                json=json,
    +                params=params,
    +                content=content,
    +                headers=headers,
    +            )
    +        except httpx.ConnectError as exc:
    +            _logger.debug("Connection error to %s", self._base_url, exc_info=True)
    +            raise _connection_error(self._base_url, exc) from exc
    +        _logger.debug("HTTP %s %s -> %s", method, path, response.status_code)
    +        _raise_for_status(response)
    +        return response
    +
    +    # ------------------------------------------------------------------
    +    # Flows
    +    # ------------------------------------------------------------------
    +
    +    async def list_flows(
    +        self,
    +        *,
    +        folder_id: UUID | str | None = None,
    +        remove_example_flows: bool = False,
    +        components_only: bool = False,
    +        get_all: bool = False,
    +        header_flows: bool = False,
    +        page: int = 1,
    +        size: int = 50,
    +    ) -> list[Flow]:
    +        resp = await self._request(
    +            "GET",
    +            "/api/v1/flows/",
    +            params=self._build_flow_list_params(
    +                folder_id=folder_id,
    +                remove_example_flows=remove_example_flows,
    +                components_only=components_only,
    +                get_all=get_all,
    +                header_flows=header_flows,
    +                page=page,
    +                size=size,
    +            ),
    +        )
    +        return self._validate_model_list(Flow, resp.json())
    +
    +    async def get_flow(self, flow_id: UUID | str) -> Flow:
    +        resp = await self._request("GET", f"/api/v1/flows/{flow_id}")
    +        return self._validate_model(Flow, resp.json())
    +
    +    async def create_flow(self, flow: FlowCreate) -> Flow:
    +        resp = await self._request("POST", "/api/v1/flows/", json=self._model_payload(flow))
    +        return self._validate_model(Flow, resp.json())
    +
    +    async def update_flow(self, flow_id: UUID | str, update: FlowUpdate) -> Flow:
    +        resp = await self._request(
    +            "PATCH",
    +            f"/api/v1/flows/{flow_id}",
    +            json=self._model_payload(update),
    +        )
    +        return self._validate_model(Flow, resp.json())
    +
    +    async def upsert_flow(self, flow_id: UUID | str, flow: FlowCreate) -> tuple[Flow, bool]:
    +        """Create-or-update by stable ID. Returns ``(flow, created)``."""
    +        resp = await self._request(
    +            "PUT",
    +            f"/api/v1/flows/{flow_id}",
    +            json=self._model_payload(flow),
    +        )
    +        return self._upsert_result(Flow, resp)
    +
    +    async def delete_flow(self, flow_id: UUID | str) -> None:
    +        await self._request("DELETE", f"/api/v1/flows/{flow_id}")
    +
    +    async def run_flow(
    +        self,
    +        flow_id_or_endpoint: UUID | str,
    +        request: RunRequest,
    +    ) -> RunResponse:
    +        resp = await self._request(
    +            "POST",
    +            f"/api/v1/run/{flow_id_or_endpoint}",
    +            json=self._model_payload(request),
    +        )
    +        return self._validate_model(RunResponse, resp.json())
    +
    +    async def run(
    +        self,
    +        flow_id_or_endpoint: UUID | str,
    +        input_value: str = "",
    +        *,
    +        input_type: str = "chat",
    +        output_type: str = "chat",
    +        tweaks: dict[str, Any] | None = None,
    +    ) -> RunResponse:
    +        """Run a flow and return the full response.
    +
    +        Convenience wrapper around :meth:`run_flow` that accepts plain keyword
    +        arguments instead of a :class:`RunRequest`::
    +
    +            result = await client.run("my-flow", input_value="Hello")
    +            print(result.first_text_output())
    +        """
    +        return await self.run_flow(
    +            flow_id_or_endpoint,
    +            self._build_run_request(
    +                input_value=input_value,
    +                input_type=input_type,
    +                output_type=output_type,
    +                tweaks=tweaks,
    +            ),
    +        )
    +
    +    async def run_background(
    +        self,
    +        flow_id_or_endpoint: UUID | str,
    +        input_value: str = "",
    +        *,
    +        input_type: str = "chat",
    +        output_type: str = "chat",
    +        tweaks: dict[str, Any] | None = None,
    +    ) -> BackgroundJob:
    +        """Start a flow run as a background asyncio task and return immediately.
    +
    +        The returned :class:`BackgroundJob` lets you poll status or await
    +        completion without blocking the event loop::
    +
    +            job = await client.run_background("my-flow", input_value="Hello!")
    +
    +            # ...do other work...
    +
    +            response = await job.wait_for_completion(timeout=60.0)
    +            print(response.get_chat_output())
    +
    +        Args:
    +            flow_id_or_endpoint: Flow UUID or named endpoint.
    +            input_value: Text input passed to the flow.
    +            input_type: Langflow input type (default ``"chat"``).
    +            output_type: Langflow output type (default ``"chat"``).
    +            tweaks: Optional component tweaks dict.
    +
    +        Returns:
    +            A :class:`BackgroundJob` wrapping the in-flight asyncio task.
    +
    +        Adapted from ``BackgroundJob`` in langflow-ai/sdk PR #1
    +        (Janardan Singh Kavia, IBM Corp., Apache 2.0).
    +        """
    +        task: asyncio.Task[RunResponse] = asyncio.create_task(
    +            self.run(
    +                flow_id_or_endpoint,
    +                input_value,
    +                input_type=input_type,
    +                output_type=output_type,
    +                tweaks=tweaks,
    +            )
    +        )
    +        return BackgroundJob(task)
    +
    +    def stream(
    +        self,
    +        flow_id_or_endpoint: UUID | str,
    +        input_value: str = "",
    +        *,
    +        input_type: str = "chat",
    +        output_type: str = "chat",
    +        tweaks: dict[str, Any] | None = None,
    +    ) -> AsyncIterator[StreamChunk]:
    +        """Stream a flow run, yielding :class:`StreamChunk` objects as they arrive.
    +
    +        Uses server-sent events (SSE) to receive incremental output::
    +
    +            async for chunk in client.stream("my-flow", input_value="Hello"):
    +                if chunk.is_token:
    +                    print(chunk.text, end="", flush=True)
    +                elif chunk.is_end:
    +                    response = chunk.final_response()
    +        """
    +        return self._aiter_stream(
    +            f"/api/v1/run/{flow_id_or_endpoint}",
    +            self._build_stream_payload(
    +                input_value=input_value,
    +                input_type=input_type,
    +                output_type=output_type,
    +                tweaks=tweaks,
    +            ),
    +        )
    +
    +    async def _aiter_stream(self, path: str, payload: dict[str, Any]) -> AsyncIterator[StreamChunk]:
    +        """Open a streaming POST request and async-yield parsed event chunks."""
    +        try:
    +            async with self._http.stream("POST", path, json=payload) as response:
    +                if not response.is_success:
    +                    body = await response.aread()
    +                    _raise_for_status_code(response.status_code, self._extract_error_detail(body))
    +                async for line in response.aiter_lines():
    +                    raw = line.strip()
    +                    if not raw:
    +                        continue
    +                    chunk = self._parse_stream_chunk(raw)
    +                    if chunk is not None:
    +                        yield chunk
    +        except httpx.ConnectError as exc:
    +            raise _connection_error(self._base_url, exc) from exc
    +
    +    # ------------------------------------------------------------------
    +    # Projects
    +    # ------------------------------------------------------------------
    +
    +    async def list_projects(self) -> list[Project]:
    +        resp = await self._request("GET", "/api/v1/projects/")
    +        return self._validate_model_list(Project, resp.json())
    +
    +    async def get_project(self, project_id: UUID | str) -> ProjectWithFlows:
    +        resp = await self._request("GET", f"/api/v1/projects/{project_id}")
    +        return self._validate_model(ProjectWithFlows, resp.json())
    +
    +    async def create_project(self, project: ProjectCreate) -> Project:
    +        resp = await self._request("POST", "/api/v1/projects/", json=self._model_payload(project))
    +        return self._validate_model(Project, resp.json())
    +
    +    async def update_project(self, project_id: UUID | str, update: ProjectUpdate) -> Project:
    +        resp = await self._request(
    +            "PATCH",
    +            f"/api/v1/projects/{project_id}",
    +            json=self._model_payload(update),
    +        )
    +        return self._validate_model(Project, resp.json())
    +
    +    async def delete_project(self, project_id: UUID | str) -> None:
    +        await self._request("DELETE", f"/api/v1/projects/{project_id}")
    +
    +    async def download_project(self, project_id: UUID | str) -> dict[str, bytes]:
    +        """Download all flows in a project.
    +
    +        Raises :class:`ValueError` if the archive contains more than 500
    +        entries or any single entry exceeds 50 MB (zip-bomb protection).
    +        """
    +        resp = await self._request("GET", f"/api/v1/projects/download/{project_id}")
    +        return self._extract_project_archive(resp.content)
    +
    +    async def upload_project(self, zip_bytes: bytes) -> list[Flow]:
    +        resp = await self._request(
    +            "POST",
    +            "/api/v1/projects/upload/",
    +            content=zip_bytes,
    +            headers={"Content-Type": "application/octet-stream"},
    +        )
    +        return self._validate_model_list(Flow, resp.json())
    +
    +    # ------------------------------------------------------------------
    +    # File I/O helpers
    +    # ------------------------------------------------------------------
    +
    +    async def push(self, path: str | Path) -> tuple[Flow, bool]:
    +        """Upload or update a flow from a local JSON file.
    +
    +        The ``id`` field embedded in the file is used for upsert.
    +        Returns ``(flow, created)``::
    +
    +            flow, created = await client.push("flows/my-flow.json")
    +        """
    +        flow_id, flow_create = self._load_flow_file(path)
    +        return await self.upsert_flow(flow_id, flow_create)
    +
    +    async def pull(
    +        self,
    +        flow_id: UUID | str,
    +        *,
    +        output: str | Path | None = None,
    +    ) -> dict[str, Any]:
    +        """Download a flow and return it as a normalized dict.
    +
    +        Strips volatile fields, clears secrets, and sorts keys.
    +        When *output* is given the JSON is also written to that path::
    +
    +            data = await client.pull("my-flow-id")
    +            await client.pull("my-flow-id", output="flows/my-flow.json")
    +        """
    +        flow = await self.get_flow(flow_id)
    +        return self._normalize_and_write_flow(flow.model_dump(mode="json"), output=output)
    +
    +    async def push_project(self, directory: str | Path) -> list[tuple[Flow, bool]]:
    +        """Push all ``*.json`` flow files in *directory* to the server concurrently.
    +
    +        Returns a list of ``(flow, created)`` pairs in sorted filename order::
    +
    +            results = await client.push_project("flows/my-project/")
    +        """
    +        return list(await asyncio.gather(*[self.push(path) for path in self._project_json_paths(directory)]))
    +
    +    async def pull_project(
    +        self,
    +        project_id: UUID | str,
    +        *,
    +        output_dir: str | Path,
    +    ) -> dict[str, Path]:
    +        """Download all flows in a project and write them to *output_dir*.
    +
    +        Each flow is normalized before being written as ``<flow-name>.json``.
    +        Returns ``{flow_name: file_path}``.
    +
    +        .. note::
    +            Flows with duplicate names overwrite each other.  See
    +            :meth:`LangflowClient.pull_project` for details.
    +
    +        ::
    +
    +            written = await client.pull_project("project-id", output_dir="flows/")
    +        """
    +        return self._write_project_flows(await self.download_project(project_id), output_dir=output_dir)
    +
    +
    +# ---------------------------------------------------------------------------
    +# Short alias  (preferred for new code)
    +# ---------------------------------------------------------------------------
    +
    +#: Short alias for :class:`AsyncLangflowClient`.
    +#:
    +#: Example::
    +#:
    +#:     from langflow_sdk import AsyncClient
    +#:     async with AsyncClient("https://langflow.example.com", api_key="...") as c:
    +#:         flows = await c.list_flows()
    +AsyncClient = AsyncLangflowClient
    
  • src/sdk/src/langflow_sdk/background_job.py+110 0 added
    @@ -0,0 +1,110 @@
    +"""BackgroundJob — non-blocking flow execution via asyncio.
    +
    +Wraps an :class:`asyncio.Task` so callers can start a flow run and poll or
    +await it without blocking the event loop.  Mirrors the ``BackgroundJob`` API
    +from langflow-ai/sdk PR #1 (Janardan Singh Kavia, IBM Corp., Apache 2.0)
    +adapted for the Langflow V1 ``/api/v1/run/{id}`` endpoint.
    +
    +Typical usage::
    +
    +    async with AsyncClient("https://langflow.example.com", api_key="...") as client:
    +        job = await client.run_background("my-flow", input_value="Hello!")
    +
    +        # Option 1 — poll status without blocking
    +        if job.is_running():
    +            print("still going…")
    +
    +        # Option 2 — await completion with a timeout
    +        response = await job.wait_for_completion(timeout=60.0)
    +        print(response.get_chat_output())
    +"""
    +
    +from __future__ import annotations
    +
    +import asyncio
    +import contextlib
    +from typing import TYPE_CHECKING
    +
    +from langflow_sdk.exceptions import LangflowTimeoutError
    +
    +if TYPE_CHECKING:
    +    from langflow_sdk.models import RunResponse
    +
    +
    +class BackgroundJob:
    +    """Non-blocking handle for an in-flight :meth:`AsyncLangflowClient.run` call.
    +
    +    Returned by :meth:`AsyncLangflowClient.run_background`.  The underlying
    +    network request runs in an :class:`asyncio.Task` so the caller's event
    +    loop remains free.
    +
    +    Adapted from ``BackgroundJob`` in langflow-ai/sdk PR #1
    +    (Janardan Singh Kavia, IBM Corp., Apache 2.0).
    +    """
    +
    +    def __init__(self, task: asyncio.Task[RunResponse]) -> None:
    +        self._task = task
    +
    +    # ------------------------------------------------------------------
    +    # Status helpers
    +    # ------------------------------------------------------------------
    +
    +    def is_running(self) -> bool:
    +        """Return ``True`` while the flow run is still in flight."""
    +        return not self._task.done()
    +
    +    def is_completed(self) -> bool:
    +        """Return ``True`` when the run finished successfully (no exception, not cancelled)."""
    +        return self._task.done() and not self._task.cancelled() and self._task.exception() is None
    +
    +    def is_failed(self) -> bool:
    +        """Return ``True`` when the run raised an exception or was cancelled."""
    +        return self._task.done() and (self._task.cancelled() or self._task.exception() is not None)
    +
    +    # ------------------------------------------------------------------
    +    # Awaiting / cancellation
    +    # ------------------------------------------------------------------
    +
    +    async def wait_for_completion(
    +        self,
    +        *,
    +        timeout: float | None = None,
    +    ) -> RunResponse:
    +        """Await the background run and return the :class:`RunResponse`.
    +
    +        Args:
    +            timeout: Maximum seconds to wait.  ``None`` (default) means wait
    +                     indefinitely.  Raises :exc:`LangflowTimeoutError` on
    +                     expiry.
    +
    +        Returns:
    +            The :class:`RunResponse` from the completed flow run.
    +
    +        Raises:
    +            LangflowTimeoutError: If *timeout* elapses before the run finishes.
    +            Exception: Any exception raised by the underlying flow run is
    +                re-raised as-is.
    +        """
    +        try:
    +            return await asyncio.wait_for(asyncio.shield(self._task), timeout=timeout)
    +        except asyncio.TimeoutError as exc:
    +            msg = (
    +                f"Background job did not complete within {timeout}s. "
    +                "The run is still in flight — call wait_for_completion() again "
    +                "or cancel() to abort."
    +            )
    +            raise LangflowTimeoutError(msg) from exc
    +
    +    async def cancel(self) -> bool:
    +        """Request cancellation of the background task.
    +
    +        Returns:
    +            ``True`` if the cancellation was successfully delivered,
    +            ``False`` if the task had already finished.
    +        """
    +        if self._task.done():
    +            return False
    +        self._task.cancel()
    +        with contextlib.suppress(asyncio.CancelledError):
    +            await self._task
    +        return True
    
  • src/sdk/src/langflow_sdk/_client_common.py+211 0 added
    @@ -0,0 +1,211 @@
    +"""Shared helpers used by both sync and async Langflow SDK clients."""
    +
    +from __future__ import annotations
    +
    +import io
    +import json
    +import zipfile
    +from pathlib import Path
    +from typing import Any, TypeVar
    +
    +from langflow_sdk._http import _HTTP_201_CREATED, _logger
    +from langflow_sdk.models import FlowCreate, RunRequest, StreamChunk
    +from langflow_sdk.serialization import flow_to_json, normalize_flow
    +
    +_ModelT = TypeVar("_ModelT")
    +
    +
    +class _ClientCommon:
    +    """Shared client helpers that are independent of sync vs async transport."""
    +
    +    _MAX_ZIP_ENTRIES = 500
    +    _MAX_ENTRY_BYTES = 50 * 1024 * 1024  # 50 MB per file
    +
    +    @staticmethod
    +    def _model_payload(model: Any) -> dict[str, Any]:
    +        """Return a JSON-safe payload for request models."""
    +        return model.model_dump(mode="json", exclude_none=True)
    +
    +    @staticmethod
    +    def _validate_model(model_type: type[_ModelT], payload: Any) -> _ModelT:
    +        """Validate one SDK model from decoded JSON data."""
    +        return model_type.model_validate(payload)
    +
    +    @classmethod
    +    def _validate_model_list(cls, model_type: type[_ModelT], payload: list[Any]) -> list[_ModelT]:
    +        """Validate a homogeneous SDK model list from decoded JSON data."""
    +        return [cls._validate_model(model_type, item) for item in payload]
    +
    +    @staticmethod
    +    def _build_flow_list_params(
    +        *,
    +        folder_id: Any = None,
    +        remove_example_flows: bool,
    +        components_only: bool,
    +        get_all: bool,
    +        header_flows: bool,
    +        page: int,
    +        size: int,
    +    ) -> dict[str, Any]:
    +        """Build query parameters for the flows listing endpoints."""
    +        params: dict[str, Any] = {
    +            "remove_example_flows": remove_example_flows,
    +            "components_only": components_only,
    +            "get_all": get_all,
    +            "header_flows": header_flows,
    +            "page": page,
    +            "size": size,
    +        }
    +        if folder_id is not None:
    +            params["folder_id"] = str(folder_id)
    +        return params
    +
    +    @classmethod
    +    def _build_run_request(
    +        cls,
    +        *,
    +        input_value: str,
    +        input_type: str,
    +        output_type: str,
    +        tweaks: dict[str, Any] | None,
    +        stream: bool = False,
    +    ) -> RunRequest:
    +        """Build a ``RunRequest`` shared by sync and async clients."""
    +        payload: dict[str, Any] = {
    +            "input_value": input_value,
    +            "input_type": input_type,
    +            "output_type": output_type,
    +            "tweaks": tweaks,
    +        }
    +        if stream:
    +            payload["stream"] = True
    +        return RunRequest(
    +            **payload,
    +        )
    +
    +    @classmethod
    +    def _build_stream_payload(
    +        cls,
    +        *,
    +        input_value: str,
    +        input_type: str,
    +        output_type: str,
    +        tweaks: dict[str, Any] | None,
    +    ) -> dict[str, Any]:
    +        """Build the POST body for streamed runs."""
    +        return cls._model_payload(
    +            cls._build_run_request(
    +                input_value=input_value,
    +                input_type=input_type,
    +                output_type=output_type,
    +                tweaks=tweaks,
    +                stream=True,
    +            )
    +        )
    +
    +    @staticmethod
    +    def _extract_error_detail(body: bytes) -> str:
    +        """Extract a user-facing error detail from an HTTP error response body."""
    +        try:
    +            parsed = json.loads(body)
    +            if isinstance(parsed, dict):
    +                return parsed.get("detail", body.decode(errors="replace"))
    +        except Exception:  # noqa: BLE001
    +            _logger.debug("Failed to parse error response body as JSON", exc_info=True)
    +        return body.decode(errors="replace")
    +
    +    @staticmethod
    +    def _parse_stream_chunk(raw: str) -> StreamChunk | None:
    +        """Parse one NDJSON/SSE chunk, skipping malformed lines."""
    +        try:
    +            obj = json.loads(raw)
    +            return StreamChunk(event=obj["event"], data=obj.get("data", {}))
    +        except (json.JSONDecodeError, KeyError):
    +            _logger.debug("Skipping malformed SSE chunk", exc_info=True)
    +            return None
    +
    +    @classmethod
    +    def _extract_project_archive(cls, content: bytes) -> dict[str, bytes]:
    +        """Read and validate the project ZIP payload returned by the API."""
    +        flows: dict[str, bytes] = {}
    +        with zipfile.ZipFile(io.BytesIO(content)) as zf:
    +            entries = zf.infolist()
    +            if len(entries) > cls._MAX_ZIP_ENTRIES:
    +                msg = f"ZIP contains {len(entries)} entries, exceeding the limit of {cls._MAX_ZIP_ENTRIES}"
    +                raise ValueError(msg)
    +            for info in entries:
    +                if info.file_size > cls._MAX_ENTRY_BYTES:
    +                    _logger.warning(
    +                        "Skipping ZIP entry %r: declared size %d exceeds limit",
    +                        info.filename,
    +                        info.file_size,
    +                    )
    +                    continue
    +                raw = zf.read(info.filename)
    +                if len(raw) > cls._MAX_ENTRY_BYTES:
    +                    _logger.warning("Skipping ZIP entry %r: actual size %d exceeds limit", info.filename, len(raw))
    +                    continue
    +                flows[info.filename] = raw
    +        return flows
    +
    +    @staticmethod
    +    def _load_flow_file(path: str | Path) -> tuple[Any, FlowCreate]:
    +        """Load a flow file from disk and prepare it for upsert."""
    +        path = Path(path)
    +        data: dict[str, Any] = json.loads(path.read_text(encoding="utf-8"))
    +        flow_id = data.get("id")
    +        if not flow_id:
    +            msg = f"Flow file {str(path)!r} does not contain an 'id' field; cannot upsert"
    +            raise ValueError(msg)
    +        flow_create = FlowCreate.model_validate({k: v for k, v in data.items() if k != "id"})
    +        return flow_id, flow_create
    +
    +    @staticmethod
    +    def _normalize_flow_payload(payload: dict[str, Any]) -> dict[str, Any]:
    +        """Normalize flow data for stable local serialization."""
    +        return normalize_flow(payload)
    +
    +    @classmethod
    +    def _normalize_and_write_flow(
    +        cls,
    +        payload: dict[str, Any],
    +        *,
    +        output: str | Path | None = None,
    +    ) -> dict[str, Any]:
    +        """Normalize flow data and optionally persist it to disk."""
    +        normalized = cls._normalize_flow_payload(payload)
    +        if output is not None:
    +            out = Path(output)
    +            out.parent.mkdir(parents=True, exist_ok=True)
    +            out.write_text(flow_to_json(normalized), encoding="utf-8")
    +        return normalized
    +
    +    @classmethod
    +    def _write_project_flows(
    +        cls,
    +        raw_flows: dict[str, bytes],
    +        *,
    +        output_dir: str | Path,
    +    ) -> dict[str, Path]:
    +        """Normalize and write a downloaded project archive to disk."""
    +        out = Path(output_dir)
    +        out.mkdir(parents=True, exist_ok=True)
    +        written: dict[str, Path] = {}
    +        for filename, content in raw_flows.items():
    +            data: dict[str, Any] = json.loads(content.decode("utf-8"))
    +            normalized = cls._normalize_flow_payload(data)
    +            name = str(normalized.get("name") or Path(filename).stem)
    +            dest = out / f"{name}.json"
    +            dest.write_text(flow_to_json(normalized), encoding="utf-8")
    +            written[name] = dest
    +        return written
    +
    +    @staticmethod
    +    def _project_json_paths(directory: str | Path) -> list[Path]:
    +        """Return sorted ``*.json`` paths within a project directory."""
    +        return sorted(Path(directory).glob("*.json"))
    +
    +    @classmethod
    +    def _upsert_result(cls, model_type: type[_ModelT], response: Any) -> tuple[_ModelT, bool]:
    +        """Return ``(model, created)`` for an upsert response."""
    +        return cls._validate_model(model_type, response.json()), response.status_code == _HTTP_201_CREATED
    
  • src/sdk/src/langflow_sdk/client.py+411 0 added
    @@ -0,0 +1,411 @@
    +"""Sync HTTP client for the Langflow REST API.
    +
    +Preferred usage via the short alias::
    +
    +    from langflow_sdk import Client
    +
    +    client = Client("https://langflow.example.com", api_key="...")
    +    flows  = client.list_flows()
    +    result = client.run_flow("my-endpoint", RunRequest(input_value="Hello"))
    +
    +The async counterpart lives in :mod:`langflow_sdk._async_client`.
    +"""
    +
    +from __future__ import annotations
    +
    +from typing import TYPE_CHECKING, Any
    +
    +import httpx
    +
    +# Re-export async client so that existing ``from langflow_sdk.client import ...``
    +# statements continue to work without changes.
    +from langflow_sdk._async_client import AsyncClient, AsyncLangflowClient
    +from langflow_sdk._client_common import _ClientCommon
    +from langflow_sdk._http import (
    +    _DEFAULT_TIMEOUT,
    +    _build_headers,
    +    _connection_error,
    +    _logger,
    +    _raise_for_status,
    +    _raise_for_status_code,
    +)
    +from langflow_sdk.models import (
    +    Flow,
    +    FlowCreate,
    +    FlowUpdate,
    +    Project,
    +    ProjectCreate,
    +    ProjectUpdate,
    +    ProjectWithFlows,
    +    RunRequest,
    +    RunResponse,
    +    StreamChunk,
    +)
    +
    +if TYPE_CHECKING:
    +    from collections.abc import Iterator
    +    from pathlib import Path
    +    from uuid import UUID
    +
    +    from typing_extensions import Self
    +
    +
    +# ---------------------------------------------------------------------------
    +# Synchronous client
    +# ---------------------------------------------------------------------------
    +
    +
    +class LangflowClient(_ClientCommon):
    +    """Synchronous client for the Langflow REST API.
    +
    +    Prefer the short alias :data:`Client` for new code::
    +
    +        from langflow_sdk import Client
    +
    +        client = Client("https://langflow.example.com", api_key="...")
    +        flows  = client.list_flows()
    +        result = client.run_flow("my-endpoint", RunRequest(input_value="Hello"))
    +    """
    +
    +    def __init__(
    +        self,
    +        base_url: str,
    +        api_key: str | None = None,
    +        timeout: float = _DEFAULT_TIMEOUT,
    +        httpx_client: httpx.Client | None = None,
    +    ) -> None:
    +        self._base_url = base_url.rstrip("/")
    +        self._api_key = api_key
    +        self._owns_client = httpx_client is None
    +        self._http = httpx_client or httpx.Client(
    +            base_url=self._base_url,
    +            headers=_build_headers(api_key),
    +            timeout=timeout,
    +        )
    +
    +    def close(self) -> None:
    +        if self._owns_client:
    +            self._http.close()
    +
    +    def __enter__(self) -> Self:
    +        return self
    +
    +    def __exit__(self, *_: object) -> None:
    +        self.close()
    +
    +    # ------------------------------------------------------------------
    +    # Internals
    +    # ------------------------------------------------------------------
    +
    +    def _request(
    +        self,
    +        method: str,
    +        path: str,
    +        *,
    +        json: Any = None,
    +        params: dict[str, Any] | None = None,
    +        content: bytes | None = None,
    +        headers: dict[str, str] | None = None,
    +    ) -> httpx.Response:
    +        try:
    +            response = self._http.request(
    +                method,
    +                path,
    +                json=json,
    +                params=params,
    +                content=content,
    +                headers=headers,
    +            )
    +        except httpx.ConnectError as exc:
    +            _logger.debug("Connection error to %s", self._base_url, exc_info=True)
    +            raise _connection_error(self._base_url, exc) from exc
    +        _logger.debug("HTTP %s %s -> %s", method, path, response.status_code)
    +        _raise_for_status(response)
    +        return response
    +
    +    # ------------------------------------------------------------------
    +    # Flows
    +    # ------------------------------------------------------------------
    +
    +    def list_flows(
    +        self,
    +        *,
    +        folder_id: UUID | str | None = None,
    +        remove_example_flows: bool = False,
    +        components_only: bool = False,
    +        get_all: bool = False,
    +        header_flows: bool = False,
    +        page: int = 1,
    +        size: int = 50,
    +    ) -> list[Flow]:
    +        resp = self._request(
    +            "GET",
    +            "/api/v1/flows/",
    +            params=self._build_flow_list_params(
    +                folder_id=folder_id,
    +                remove_example_flows=remove_example_flows,
    +                components_only=components_only,
    +                get_all=get_all,
    +                header_flows=header_flows,
    +                page=page,
    +                size=size,
    +            ),
    +        )
    +        return self._validate_model_list(Flow, resp.json())
    +
    +    def get_flow(self, flow_id: UUID | str) -> Flow:
    +        resp = self._request("GET", f"/api/v1/flows/{flow_id}")
    +        return self._validate_model(Flow, resp.json())
    +
    +    def create_flow(self, flow: FlowCreate) -> Flow:
    +        resp = self._request("POST", "/api/v1/flows/", json=self._model_payload(flow))
    +        return self._validate_model(Flow, resp.json())
    +
    +    def update_flow(self, flow_id: UUID | str, update: FlowUpdate) -> Flow:
    +        resp = self._request(
    +            "PATCH",
    +            f"/api/v1/flows/{flow_id}",
    +            json=self._model_payload(update),
    +        )
    +        return self._validate_model(Flow, resp.json())
    +
    +    def upsert_flow(self, flow_id: UUID | str, flow: FlowCreate) -> tuple[Flow, bool]:
    +        """Create-or-update a flow by its stable ID.
    +
    +        Returns ``(flow, created)`` where ``created`` is ``True`` when a new
    +        flow was inserted and ``False`` when an existing one was updated.
    +        """
    +        resp = self._request(
    +            "PUT",
    +            f"/api/v1/flows/{flow_id}",
    +            json=self._model_payload(flow),
    +        )
    +        return self._upsert_result(Flow, resp)
    +
    +    def delete_flow(self, flow_id: UUID | str) -> None:
    +        self._request("DELETE", f"/api/v1/flows/{flow_id}")
    +
    +    def run_flow(
    +        self,
    +        flow_id_or_endpoint: UUID | str,
    +        request: RunRequest,
    +    ) -> RunResponse:
    +        resp = self._request(
    +            "POST",
    +            f"/api/v1/run/{flow_id_or_endpoint}",
    +            json=self._model_payload(request),
    +        )
    +        return self._validate_model(RunResponse, resp.json())
    +
    +    def run(
    +        self,
    +        flow_id_or_endpoint: UUID | str,
    +        input_value: str = "",
    +        *,
    +        input_type: str = "chat",
    +        output_type: str = "chat",
    +        tweaks: dict[str, Any] | None = None,
    +    ) -> RunResponse:
    +        """Run a flow and return the full response.
    +
    +        Convenience wrapper around :meth:`run_flow` that accepts plain keyword
    +        arguments instead of a :class:`RunRequest`::
    +
    +            result = client.run("my-flow", input_value="Hello")
    +            print(result.first_text_output())
    +        """
    +        return self.run_flow(
    +            flow_id_or_endpoint,
    +            self._build_run_request(
    +                input_value=input_value,
    +                input_type=input_type,
    +                output_type=output_type,
    +                tweaks=tweaks,
    +            ),
    +        )
    +
    +    def stream(
    +        self,
    +        flow_id_or_endpoint: UUID | str,
    +        input_value: str = "",
    +        *,
    +        input_type: str = "chat",
    +        output_type: str = "chat",
    +        tweaks: dict[str, Any] | None = None,
    +    ) -> Iterator[StreamChunk]:
    +        """Stream a flow run, yielding :class:`StreamChunk` objects as they arrive.
    +
    +        Uses server-sent events (SSE) to receive incremental output::
    +
    +            for chunk in client.stream("my-flow", input_value="Hello"):
    +                if chunk.is_token:
    +                    print(chunk.text, end="", flush=True)
    +                elif chunk.is_end:
    +                    response = chunk.final_response()
    +        """
    +        return self._iter_stream(
    +            f"/api/v1/run/{flow_id_or_endpoint}",
    +            self._build_stream_payload(
    +                input_value=input_value,
    +                input_type=input_type,
    +                output_type=output_type,
    +                tweaks=tweaks,
    +            ),
    +        )
    +
    +    def _iter_stream(self, path: str, payload: dict[str, Any]) -> Iterator[StreamChunk]:
    +        """Open a streaming POST request and yield parsed event chunks."""
    +        try:
    +            with self._http.stream("POST", path, json=payload) as response:
    +                if not response.is_success:
    +                    body = response.read()
    +                    _raise_for_status_code(response.status_code, self._extract_error_detail(body))
    +                for line in response.iter_lines():
    +                    raw = line.strip()
    +                    if not raw:
    +                        continue
    +                    chunk = self._parse_stream_chunk(raw)
    +                    if chunk is not None:
    +                        yield chunk
    +        except httpx.ConnectError as exc:
    +            raise _connection_error(self._base_url, exc) from exc
    +
    +    # ------------------------------------------------------------------
    +    # Projects (Folders)
    +    # ------------------------------------------------------------------
    +
    +    def list_projects(self) -> list[Project]:
    +        resp = self._request("GET", "/api/v1/projects/")
    +        return self._validate_model_list(Project, resp.json())
    +
    +    def get_project(self, project_id: UUID | str) -> ProjectWithFlows:
    +        resp = self._request("GET", f"/api/v1/projects/{project_id}")
    +        return self._validate_model(ProjectWithFlows, resp.json())
    +
    +    def create_project(self, project: ProjectCreate) -> Project:
    +        resp = self._request("POST", "/api/v1/projects/", json=self._model_payload(project))
    +        return self._validate_model(Project, resp.json())
    +
    +    def update_project(self, project_id: UUID | str, update: ProjectUpdate) -> Project:
    +        resp = self._request(
    +            "PATCH",
    +            f"/api/v1/projects/{project_id}",
    +            json=self._model_payload(update),
    +        )
    +        return self._validate_model(Project, resp.json())
    +
    +    def delete_project(self, project_id: UUID | str) -> None:
    +        self._request("DELETE", f"/api/v1/projects/{project_id}")
    +
    +    def download_project(self, project_id: UUID | str) -> dict[str, bytes]:
    +        """Download all flows in a project.
    +
    +        Returns a mapping of ``{flow_name: raw_json_bytes}`` extracted from
    +        the ZIP archive returned by the server.
    +
    +        Raises :class:`ValueError` if the archive contains more than 500
    +        entries or any single entry exceeds 50 MB (zip-bomb protection).
    +        """
    +        resp = self._request("GET", f"/api/v1/projects/download/{project_id}")
    +        return self._extract_project_archive(resp.content)
    +
    +    def upload_project(self, zip_bytes: bytes) -> list[Flow]:
    +        """Upload a project ZIP archive and return the created flows."""
    +        resp = self._request(
    +            "POST",
    +            "/api/v1/projects/upload/",
    +            content=zip_bytes,
    +            headers={"Content-Type": "application/octet-stream"},
    +        )
    +        return self._validate_model_list(Flow, resp.json())
    +
    +    # ------------------------------------------------------------------
    +    # File I/O helpers
    +    # ------------------------------------------------------------------
    +
    +    def push(self, path: str | Path) -> tuple[Flow, bool]:
    +        """Upload or update a flow from a local JSON file.
    +
    +        The ``id`` field embedded in the file is used for upsert
    +        (create-or-update via ``PUT /api/v1/flows/{id}``).
    +        Returns ``(flow, created)`` where ``created`` is ``True`` when the
    +        flow was newly created and ``False`` when it was updated::
    +
    +            flow, created = client.push("flows/my-flow.json")
    +        """
    +        flow_id, flow_create = self._load_flow_file(path)
    +        return self.upsert_flow(flow_id, flow_create)
    +
    +    def pull(
    +        self,
    +        flow_id: UUID | str,
    +        *,
    +        output: str | Path | None = None,
    +    ) -> dict[str, Any]:
    +        """Download a flow and return it as a normalized dict.
    +
    +        Strips volatile fields (``updated_at``, ``user_id``, ...), clears
    +        secrets, and sorts keys for stable diffs.  When *output* is given the
    +        normalized JSON is also written to that file path::
    +
    +            data = client.pull("my-flow-id")
    +            client.pull("my-flow-id", output="flows/my-flow.json")
    +        """
    +        flow = self.get_flow(flow_id)
    +        return self._normalize_and_write_flow(flow.model_dump(mode="json"), output=output)
    +
    +    def push_project(self, directory: str | Path) -> list[tuple[Flow, bool]]:
    +        """Push all ``*.json`` flow files in *directory* to the server.
    +
    +        Each file is upserted using the ``id`` field it contains.
    +        Returns a list of ``(flow, created)`` pairs in the order files were
    +        processed::
    +
    +            results = client.push_project("flows/my-project/")
    +            for flow, created in results:
    +                print("created" if created else "updated", flow.name)
    +        """
    +        return [self.push(path) for path in self._project_json_paths(directory)]
    +
    +    def pull_project(
    +        self,
    +        project_id: UUID | str,
    +        *,
    +        output_dir: str | Path,
    +    ) -> dict[str, Path]:
    +        """Download all flows in a project and write them to *output_dir*.
    +
    +        Each flow is normalized (volatile fields stripped, keys sorted) before
    +        being written as ``<flow-name>.json``.  *output_dir* is created if it
    +        does not exist.  Returns a mapping of ``{flow_name: file_path}``.
    +
    +        .. note::
    +            If two flows in the project share the same name the second one
    +            overwrites the first on disk and in the returned mapping.  Flow
    +            names within a project should be unique; this situation indicates
    +            a data problem on the server.
    +
    +        ::
    +
    +            written = client.pull_project("project-id", output_dir="flows/")
    +            for name, path in written.items():
    +                print(name, "->", path)
    +        """
    +        return self._write_project_flows(self.download_project(project_id), output_dir=output_dir)
    +
    +
    +# ---------------------------------------------------------------------------
    +# Short alias  (preferred for new code)
    +# ---------------------------------------------------------------------------
    +
    +#: Short alias for :class:`LangflowClient`.
    +#:
    +#: Example::
    +#:
    +#:     from langflow_sdk import Client
    +#:     client = Client("https://langflow.example.com", api_key="...")
    +#:     flows  = client.list_flows()
    +#:     result = client.run_flow("my-endpoint", RunRequest(input_value="Hello"))
    +Client = LangflowClient
    +
    +__all__ = ["AsyncClient", "AsyncLangflowClient", "Client", "LangflowClient"]
    
  • src/sdk/src/langflow_sdk/environments.py+255 0 added
    @@ -0,0 +1,255 @@
    +"""Environment configuration for langflow-sdk.
    +
    +Loads named environment definitions from a TOML file so teams can switch
    +between Langflow instances (dev / staging / production) without code changes.
    +
    +Config file lookup order
    +------------------------
    +1. Path given explicitly to ``load_environments()`` or ``get_client()``.
    +2. The ``LANGFLOW_ENVIRONMENTS_FILE`` environment variable.
    +3. ``langflow-environments.toml`` in the current working directory.
    +4. ``~/.config/langflow/environments.toml``
    +
    +File format
    +-----------
    +.. code-block:: toml
    +
    +    [environments.staging]
    +    url = "https://staging.langflow.example.com"
    +    api_key_env = "LANGFLOW_STAGING_API_KEY"   # env-var that holds the key  # pragma: allowlist secret
    +
    +    [environments.production]
    +    url = "https://langflow.example.com"
    +    api_key_env = "LANGFLOW_PROD_API_KEY"  # pragma: allowlist secret
    +
    +    # Optional: set a default so callers don't have to name an environment
    +    [defaults]
    +    environment = "staging"
    +"""
    +
    +from __future__ import annotations
    +
    +import os
    +from pathlib import Path
    +from typing import Any
    +
    +from langflow_sdk.exceptions import EnvironmentConfigError, EnvironmentNotFoundError
    +
    +try:
    +    import tomllib  # Python 3.11+
    +except ImportError:  # pragma: no cover
    +    import tomli as tomllib  # type: ignore[no-reattr,assignment]
    +
    +_ENV_VAR = "LANGFLOW_ENVIRONMENTS_FILE"
    +_LOCAL_NAME = "langflow-environments.toml"
    +_USER_PATH = Path.home() / ".config" / "langflow" / "environments.toml"
    +
    +_EXAMPLE_CONFIG = """\
    +# langflow-environments.toml
    +#
    +# Define named Langflow environments.  The api_key_env field is the *name*
    +# of an environment variable that holds the API key for that instance.
    +#
    +# [environments.staging]
    +# url = "https://staging.langflow.example.com"
    +# api_key_env = "LANGFLOW_STAGING_API_KEY"  # pragma: allowlist secret
    +#
    +# [environments.production]
    +# url = "https://langflow.example.com"
    +# api_key_env = "LANGFLOW_PROD_API_KEY"  # pragma: allowlist secret
    +#
    +# [defaults]
    +# environment = "staging"
    +"""
    +
    +
    +class EnvironmentConfig:
    +    """A single named environment definition."""
    +
    +    def __init__(self, name: str, url: str, api_key: str | None) -> None:
    +        self.name = name
    +        self.url = url
    +        self.api_key = api_key
    +
    +    def __repr__(self) -> str:
    +        masked = f"{self.api_key[:4]}..." if self.api_key else None
    +        return f"EnvironmentConfig(name={self.name!r}, url={self.url!r}, api_key={masked!r})"
    +
    +
    +def _candidate_paths(explicit: Path | str | None) -> list[Path]:
    +    candidates: list[Path] = []
    +    if explicit:
    +        candidates.append(Path(explicit))
    +    env_path = os.environ.get(_ENV_VAR)
    +    if env_path:
    +        candidates.append(Path(env_path))
    +    candidates.append(Path.cwd() / _LOCAL_NAME)
    +    candidates.append(_USER_PATH)
    +    return candidates
    +
    +
    +def _load_toml(path: Path) -> dict[str, Any]:
    +    try:
    +        with path.open("rb") as fh:
    +            return tomllib.load(fh)
    +    except OSError as exc:
    +        raise EnvironmentConfigError(f"Cannot read environments file {path}: {exc}") from exc
    +    except Exception as exc:
    +        raise EnvironmentConfigError(f"Invalid TOML in {path}: {exc}") from exc
    +
    +
    +def _parse_env(raw: dict[str, Any], file_path: Path, name: str) -> EnvironmentConfig:
    +    if "url" not in raw:
    +        raise EnvironmentConfigError(f"Environment {name!r} in {file_path} is missing the required 'url' field.")
    +    url: str = raw["url"]
    +    api_key: str | None = None
    +    if "api_key_env" in raw:
    +        api_key_env_name: str = raw["api_key_env"]
    +        api_key = os.environ.get(api_key_env_name)
    +    elif "api_key" in raw:
    +        import warnings
    +
    +        warnings.warn(
    +            f"Environment {name!r}: literal api_key in config file is not recommended. "
    +            "Use api_key_env to reference an environment variable instead.",
    +            UserWarning,
    +            stacklevel=2,
    +        )
    +        api_key = raw["api_key"]
    +    return EnvironmentConfig(name=name, url=url, api_key=api_key)
    +
    +
    +def load_environments(
    +    config_file: Path | str | None = None,
    +) -> dict[str, EnvironmentConfig]:
    +    """Load all environments from the config file.
    +
    +    Parameters
    +    ----------
    +    config_file:
    +        Explicit path to a ``langflow-environments.toml`` file. If omitted,
    +        the lookup order described in the module docstring is used.
    +
    +    Returns:
    +    -------
    +    dict[str, EnvironmentConfig]
    +        Mapping of environment name → ``EnvironmentConfig``.
    +
    +    Raises:
    +    ------
    +    EnvironmentConfigError
    +        If no config file is found or the file is malformed.
    +    """
    +    file_path: Path | None = None
    +    for candidate in _candidate_paths(config_file):
    +        if candidate.exists():
    +            file_path = candidate
    +            break
    +
    +    if file_path is None:
    +        raise EnvironmentConfigError(
    +            "No langflow-environments.toml found. "
    +            f"Set {_ENV_VAR} or create one in the current directory.\n\n" + _EXAMPLE_CONFIG
    +        )
    +
    +    raw = _load_toml(file_path)
    +    raw_envs = raw.get("environments", {})
    +    if not isinstance(raw_envs, dict):
    +        raise EnvironmentConfigError(f"Expected [environments] to be a TOML table in {file_path}")
    +
    +    result: dict[str, EnvironmentConfig] = {}
    +    for name, env_data in raw_envs.items():
    +        if not isinstance(env_data, dict):
    +            raise EnvironmentConfigError(f"Environment {name!r} in {file_path} must be a TOML table.")
    +        result[name] = _parse_env(env_data, file_path, name)
    +    return result
    +
    +
    +def get_environment(
    +    name: str | None = None,
    +    *,
    +    config_file: Path | str | None = None,
    +) -> EnvironmentConfig:
    +    """Look up a named environment from the config file.
    +
    +    If *name* is ``None``, the ``[defaults] environment`` key is used.
    +
    +    Raises:
    +    ------
    +    EnvironmentNotFoundError
    +        If *name* is not defined in the config.
    +    EnvironmentConfigError
    +        If no default is set and *name* is ``None``.
    +    """
    +    # Find the config file once and reuse for both default lookup and environment loading.
    +    file_path: Path | None = None
    +    for candidate in _candidate_paths(config_file):
    +        if candidate.exists():
    +            file_path = candidate
    +            break
    +
    +    if name is None:
    +        if file_path:
    +            raw = _load_toml(file_path)
    +            name = raw.get("defaults", {}).get("environment")
    +        if name is None:
    +            msg = "No environment name given and no [defaults] environment set in the config file."
    +            raise EnvironmentConfigError(msg)
    +
    +    environments = load_environments(file_path or config_file)
    +
    +    if name not in environments:
    +        raise EnvironmentNotFoundError(name)
    +    return environments[name]
    +
    +
    +def get_client(
    +    environment: str | None = None,
    +    *,
    +    config_file: Path | str | None = None,
    +    timeout: float = 60.0,
    +) -> Client:  # noqa: F821  (resolved at runtime)
    +    """Convenience factory: load config and return a ready :class:`Client`.
    +
    +    Parameters
    +    ----------
    +    environment:
    +        Name of the environment to use (e.g. ``"staging"``). If ``None``,
    +        the ``[defaults] environment`` key in the config file is used.
    +    config_file:
    +        Optional explicit path to the environments TOML file.
    +    timeout:
    +        HTTP request timeout in seconds.
    +
    +    Example::
    +
    +        from langflow_sdk import get_client
    +
    +        client = get_client("staging")
    +        flows  = client.list_flows()
    +    """
    +    from langflow_sdk.client import Client
    +
    +    env = get_environment(environment, config_file=config_file)
    +    return Client(base_url=env.url, api_key=env.api_key, timeout=timeout)
    +
    +
    +def get_async_client(
    +    environment: str | None = None,
    +    *,
    +    config_file: Path | str | None = None,
    +    timeout: float = 60.0,
    +) -> AsyncClient:  # noqa: F821  (resolved at runtime)
    +    """Convenience factory: return a ready :class:`AsyncClient`.
    +
    +    Example::
    +
    +        from langflow_sdk import get_async_client
    +
    +        async with get_async_client("staging") as client:
    +            flows = await client.list_flows()
    +    """
    +    from langflow_sdk.client import AsyncClient
    +
    +    env = get_environment(environment, config_file=config_file)
    +    return AsyncClient(base_url=env.url, api_key=env.api_key, timeout=timeout)
    
  • src/sdk/src/langflow_sdk/exceptions.py+54 0 added
    @@ -0,0 +1,54 @@
    +"""Exceptions raised by the Langflow SDK."""
    +
    +from __future__ import annotations
    +
    +
    +class LangflowError(Exception):
    +    """Base class for all Langflow SDK errors."""
    +
    +
    +class LangflowHTTPError(LangflowError):
    +    """An HTTP error was returned by the Langflow API."""
    +
    +    def __init__(self, status_code: int, detail: str) -> None:
    +        self.status_code = status_code
    +        self.detail = detail
    +        super().__init__(f"HTTP {status_code}: {detail}")
    +
    +
    +class LangflowNotFoundError(LangflowHTTPError):
    +    """The requested resource was not found (404)."""
    +
    +
    +class LangflowAuthError(LangflowHTTPError):
    +    """Authentication failed (401/403)."""
    +
    +
    +class LangflowValidationError(LangflowHTTPError):
    +    """The request payload was rejected by the server (422)."""
    +
    +
    +class LangflowConnectionError(LangflowError):
    +    """Could not connect to the Langflow instance."""
    +
    +
    +class LangflowTimeoutError(LangflowError):
    +    """A background job or polling operation exceeded its timeout.
    +
    +    Adapted from ``LangflowV2TimeoutError`` in langflow-ai/sdk PR #1
    +    (Janardan Singh Kavia, IBM Corp., Apache 2.0).
    +    """
    +
    +
    +class EnvironmentNotFoundError(LangflowError):
    +    """The named environment is not defined in the environments config."""
    +
    +    def __init__(self, name: str) -> None:
    +        self.name = name
    +        super().__init__(
    +            f"Environment {name!r} not found. Check your langflow-environments.toml (or LANGFLOW_ENV variable)."
    +        )
    +
    +
    +class EnvironmentConfigError(LangflowError):
    +    """The environments config file is malformed or missing required fields."""
    
  • src/sdk/src/langflow_sdk/_http.py+55 0 added
    @@ -0,0 +1,55 @@
    +"""Shared HTTP helpers and constants used by both sync and async clients."""
    +
    +from __future__ import annotations
    +
    +import logging
    +from http import HTTPStatus
    +
    +import httpx
    +
    +from langflow_sdk.exceptions import (
    +    LangflowAuthError,
    +    LangflowConnectionError,
    +    LangflowHTTPError,
    +    LangflowNotFoundError,
    +    LangflowValidationError,
    +)
    +
    +_logger = logging.getLogger("langflow_sdk.client")
    +
    +_DEFAULT_TIMEOUT = 60.0
    +_HTTP_201_CREATED = HTTPStatus.CREATED.value
    +
    +
    +def _raise_for_status_code(status: int, detail: str) -> None:
    +    """Raise a typed SDK exception for the given HTTP status code and detail."""
    +    if status in (HTTPStatus.UNAUTHORIZED, HTTPStatus.FORBIDDEN):
    +        raise LangflowAuthError(status, detail)
    +    if status == HTTPStatus.NOT_FOUND:
    +        raise LangflowNotFoundError(status, detail)
    +    if status == HTTPStatus.UNPROCESSABLE_ENTITY:
    +        raise LangflowValidationError(status, detail)
    +    raise LangflowHTTPError(status, detail)
    +
    +
    +def _raise_for_status(response: httpx.Response) -> None:
    +    """Convert httpx HTTP errors into typed SDK exceptions."""
    +    if response.is_success:
    +        return
    +    try:
    +        detail = response.json().get("detail", response.text)
    +    except Exception:  # noqa: BLE001
    +        detail = response.text
    +    _raise_for_status_code(response.status_code, detail)
    +
    +
    +def _build_headers(api_key: str | None) -> dict[str, str]:
    +    headers: dict[str, str] = {"Content-Type": "application/json"}
    +    if api_key:
    +        headers["x-api-key"] = api_key
    +    return headers
    +
    +
    +def _connection_error(base_url: str, exc: Exception) -> LangflowConnectionError:
    +    msg = f"Could not connect to Langflow at {base_url}: {exc}"
    +    return LangflowConnectionError(msg)
    
  • src/sdk/src/langflow_sdk/__init__.py+73 0 added
    @@ -0,0 +1,73 @@
    +"""langflow-sdk -- Python SDK for the Langflow REST API."""
    +
    +from langflow_sdk._async_client import AsyncClient, AsyncLangflowClient
    +from langflow_sdk.background_job import BackgroundJob
    +from langflow_sdk.client import Client, LangflowClient
    +from langflow_sdk.environments import (
    +    EnvironmentConfig,
    +    get_async_client,
    +    get_client,
    +    get_environment,
    +    load_environments,
    +)
    +from langflow_sdk.exceptions import (
    +    EnvironmentConfigError,
    +    EnvironmentNotFoundError,
    +    LangflowAuthError,
    +    LangflowConnectionError,
    +    LangflowError,
    +    LangflowHTTPError,
    +    LangflowNotFoundError,
    +    LangflowTimeoutError,
    +    LangflowValidationError,
    +)
    +from langflow_sdk.models import (
    +    Flow,
    +    FlowCreate,
    +    FlowUpdate,
    +    Project,
    +    ProjectCreate,
    +    ProjectUpdate,
    +    ProjectWithFlows,
    +    RunOutput,
    +    RunRequest,
    +    RunResponse,
    +    StreamChunk,
    +)
    +from langflow_sdk.serialization import flow_to_json, normalize_flow, normalize_flow_file
    +
    +__all__ = [
    +    "AsyncClient",  # short alias for AsyncLangflowClient (preferred)
    +    "AsyncLangflowClient",
    +    "BackgroundJob",
    +    "Client",  # short alias for LangflowClient (preferred)
    +    "EnvironmentConfig",
    +    "EnvironmentConfigError",
    +    "EnvironmentNotFoundError",
    +    "Flow",
    +    "FlowCreate",
    +    "FlowUpdate",
    +    "LangflowAuthError",
    +    "LangflowClient",
    +    "LangflowConnectionError",
    +    "LangflowError",
    +    "LangflowHTTPError",
    +    "LangflowNotFoundError",
    +    "LangflowTimeoutError",
    +    "LangflowValidationError",
    +    "Project",
    +    "ProjectCreate",
    +    "ProjectUpdate",
    +    "ProjectWithFlows",
    +    "RunOutput",
    +    "RunRequest",
    +    "RunResponse",
    +    "StreamChunk",
    +    "flow_to_json",
    +    "get_async_client",
    +    "get_client",
    +    "get_environment",
    +    "load_environments",
    +    "normalize_flow",
    +    "normalize_flow_file",
    +]
    
  • src/sdk/src/langflow_sdk/models.py+0 0 added
  • src/sdk/src/langflow_sdk/serialization.py+224 0 added
  • src/sdk/src/langflow_sdk/testing.py+307 0 added
  • src/sdk/src/langflow_sdk/_version.py+1 0 added
    @@ -0,0 +1 @@
    +__version__ = "0.1.0"
    
  • src/sdk/tests/__init__.py+0 0 added
  • src/sdk/tests/test_background_job.py+316 0 added
  • src/sdk/tests/test_client_aliases.py+66 0 added
  • src/sdk/tests/test_file_io.py+451 0 added
  • src/sdk/tests/test_models.py+269 0 added
  • src/sdk/tests/test_push.py+239 0 added
  • src/sdk/tests/test_serialization.py+264 0 added
  • src/sdk/tests/test_streaming.py+465 0 added
  • src/sdk/tests/test_testing.py+309 0 added
  • uv.lock+53 0 modified

Vulnerability mechanics

Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.

References

7

News mentions

0

No linked articles in our index yet.