diff --git a/.env.example b/.env.example deleted file mode 100644 index 1328b838..00000000 --- a/.env.example +++ /dev/null @@ -1,6 +0,0 @@ -# LLM Providers (set the one you use) -OPENAI_API_KEY= -GOOGLE_API_KEY= -ANTHROPIC_API_KEY= -XAI_API_KEY= -OPENROUTER_API_KEY= diff --git a/.gitignore b/.gitignore index 9a2904a9..170c741d 100644 --- a/.gitignore +++ b/.gitignore @@ -217,3 +217,17 @@ __marimo__/ # Cache **/data_cache/ + +# Generated outputs / reports +reports/ +results/ +output/ +outputs/ +artifacts/ +runs/ +logs/ + +# JetBrains IDE +.idea/ + + diff --git a/README.md b/README.md index 34310010..1673664b 100644 --- a/README.md +++ b/README.md @@ -158,6 +158,17 @@ An interface will appear showing results as they load, letting you track the age

+### Streamlit UI + +A web UI runs the same pipeline as the CLI without duplicating logic: + +```bash +pip install streamlit +streamlit run ui/streamlit_app.py +``` + +Use the sidebar to choose agents, ticker, date range, and optional CLI flags (research depth, LLM provider, models). Click **Run Trading Agent** to execute; the report can be previewed and downloaded as `complete_report.md` (identical to the CLI output). The UI lives under `ui/` and does not affect `python -m cli.main`. + ## TradingAgents Package ### Implementation Details diff --git a/cli/main.py b/cli/main.py index fb97d189..bcacf557 100644 --- a/cli/main.py +++ b/cli/main.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Optional, Callable import datetime import typer from pathlib import Path @@ -896,6 +896,120 @@ def format_tool_args(args, max_length=80) -> str: return result[:max_length - 3] + "..." return result + +def run_analysis_programmatic( + selections: dict, + log_callback: Optional[Callable[[str], None]] = None, +) -> tuple[Optional[dict], Optional[Path], Optional[str]]: + """Run the same analysis pipeline as the CLI without interactive prompts. + + Used by the Streamlit UI (and any other programmatic caller). No business + logic is duplicated: this uses the same config, graph, and save_report_to_disk. + + Args: + selections: Dict with keys ticker, analysis_date, analysts (list of + analyst keys e.g. ["market", "news"] or AnalystType enums), + research_depth, llm_provider, backend_url, shallow_thinker, + deep_thinker, google_thinking_level (optional), openai_reasoning_effort (optional). + log_callback: Optional callable(line: str) invoked for each log line + (messages, tool calls, section updates) for live UI display. + + Returns: + (final_state, report_file_path, error_message). + On success: (final_state, Path to complete_report.md, None). + On failure: (None, None, error_message string). + """ + from cli.stats_handler import StatsCallbackHandler + + def log(line: str) -> None: + if log_callback: + log_callback(line) + + try: + # Normalize analysts to list of strings + raw_analysts = selections.get("analysts") or ["market", "news", "fundamentals"] + selected_set = set() + for a in raw_analysts: + selected_set.add(a.value if hasattr(a, "value") else a) + selected_analyst_keys = [a for a in ANALYST_ORDER if a in selected_set] + if not selected_analyst_keys: + selected_analyst_keys = ["market", "news"] + + config = DEFAULT_CONFIG.copy() + config["max_debate_rounds"] = selections.get("research_depth", 1) + config["max_risk_discuss_rounds"] = selections.get("research_depth", 1) + config["quick_think_llm"] = selections.get("shallow_thinker", config["quick_think_llm"]) + config["deep_think_llm"] = selections.get("deep_thinker", config["deep_think_llm"]) + config["backend_url"] = selections.get("backend_url", config["backend_url"]) + config["llm_provider"] = (selections.get("llm_provider") or "openai").lower() + config["google_thinking_level"] = selections.get("google_thinking_level") + config["openai_reasoning_effort"] = selections.get("openai_reasoning_effort") + + stats_handler = StatsCallbackHandler() + graph = TradingAgentsGraph( + selected_analyst_keys, + config=config, + debug=True, + callbacks=[stats_handler], + ) + + ticker = (selections.get("ticker") or "SPY").strip().upper() + analysis_date = selections.get("analysis_date") or datetime.datetime.now().strftime("%Y-%m-%d") + + log(f"Starting analysis: {ticker} @ {analysis_date}") + log(f"Analysts: {', '.join(selected_analyst_keys)}") + + init_agent_state = graph.propagator.create_initial_state(ticker, analysis_date) + args = graph.propagator.get_graph_args(callbacks=[stats_handler]) + + _last_message_id = None + trace = [] + for chunk in graph.graph.stream(init_agent_state, **args): + if len(chunk.get("messages", [])) > 0: + last_message = chunk["messages"][-1] + msg_id = getattr(last_message, "id", None) + if msg_id != _last_message_id: + _last_message_id = msg_id + msg_type, content = classify_message_type(last_message) + if content and content.strip(): + ts = datetime.datetime.now().strftime("%H:%M:%S") + preview = (content[:200] + "...") if len(content) > 200 else content + log(f"[{ts}] [{msg_type}] {preview}") + if hasattr(last_message, "tool_calls") and last_message.tool_calls: + for tc in last_message.tool_calls: + name = tc.get("name", getattr(tc, "name", "?")) + targs = tc.get("args", getattr(tc, "args", {})) + ts = datetime.datetime.now().strftime("%H:%M:%S") + log(f"[{ts}] [Tool] {name}({format_tool_args(targs)})") + if chunk.get("investment_debate_state") and chunk["investment_debate_state"].get("judge_decision"): + log("[Section] Research Team decision ready") + if chunk.get("trader_investment_plan"): + log("[Section] Trading Team plan ready") + if chunk.get("risk_debate_state") and chunk["risk_debate_state"].get("judge_decision"): + log("[Section] Portfolio Manager decision ready") + trace.append(chunk) + + if not trace: + return None, None, "No output from pipeline" + + final_state = trace[-1] + log("Analysis complete. Saving report...") + + timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + save_path = Path.cwd() / "reports" / f"{ticker}_{timestamp}" + report_file = save_report_to_disk(final_state, ticker, save_path) + log(f"Report saved: {report_file}") + + return final_state, report_file, None + except Exception as e: + import traceback + err_msg = f"{type(e).__name__}: {e}" + log(f"Error: {err_msg}") + if log_callback: + log_callback(traceback.format_exc()) + return None, None, err_msg + + def run_analysis(): # First get all user selections selections = get_user_selections() diff --git a/cli/utils.py b/cli/utils.py index aa097fb5..8919fa47 100644 --- a/cli/utils.py +++ b/cli/utils.py @@ -257,6 +257,7 @@ def select_llm_provider() -> tuple[str, str]: # Define OpenAI api options with their corresponding endpoints BASE_URLS = [ ("OpenAI", "https://api.openai.com/v1"), + ("Ark", "https://ark.ap-southeast.bytepluses.com/api/v3"), ("Google", "https://generativelanguage.googleapis.com/v1"), ("Anthropic", "https://api.anthropic.com/"), ("xAI", "https://api.x.ai/v1"), diff --git a/pyproject.toml b/pyproject.toml index aea3b888..27bc07d9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,6 +5,7 @@ description = "Add your description here" readme = "README.md" requires-python = ">=3.10" dependencies = [ + "streamlit>=1.28.0", "backtrader>=1.9.78.123", "chainlit>=2.5.5", "langchain-anthropic>=0.3.15", diff --git a/requirements.txt b/requirements.txt index 5ce93729..1fa7180b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ +streamlit>=1.28.0 typing-extensions langchain-openai langchain-experimental diff --git a/tradingagents/llm_clients/factory.py b/tradingagents/llm_clients/factory.py index 028c88a2..431fc268 100644 --- a/tradingagents/llm_clients/factory.py +++ b/tradingagents/llm_clients/factory.py @@ -28,7 +28,7 @@ def create_llm_client( """ provider_lower = provider.lower() - if provider_lower in ("openai", "ollama", "openrouter"): + if provider_lower in ("openai", "ollama", "openrouter", "ark"): return OpenAIClient(model, base_url, provider=provider_lower, **kwargs) if provider_lower == "xai": diff --git a/tradingagents/llm_clients/openai_client.py b/tradingagents/llm_clients/openai_client.py index 7011895f..83bfaf20 100644 --- a/tradingagents/llm_clients/openai_client.py +++ b/tradingagents/llm_clients/openai_client.py @@ -29,7 +29,15 @@ class UnifiedChatOpenAI(ChatOpenAI): class OpenAIClient(BaseLLMClient): - """Client for OpenAI, Ollama, OpenRouter, and xAI providers.""" + """Client for OpenAI-compatible providers. + + Supported providers: + - openai → OpenAI platform + - ollama → Local Ollama server (no auth) + - openrouter → OpenRouter API + - xai → xAI / Grok API + - ark → ByteDance Ark (OpenAI-compatible API) + """ def __init__( self, @@ -58,6 +66,16 @@ class OpenAIClient(BaseLLMClient): elif self.provider == "ollama": llm_kwargs["base_url"] = "http://localhost:11434/v1" llm_kwargs["api_key"] = "ollama" # Ollama doesn't require auth + elif self.provider == "ark": + # ByteDance Ark (OpenAI-compatible) – API key from ARK_API_KEY + # Default base_url matches official docs but can be overridden. + llm_kwargs["base_url"] = ( + self.base_url + or "https://ark.ap-southeast.bytepluses.com/api/v3" + ) + api_key = os.environ.get("ARK_API_KEY") + if api_key: + llm_kwargs["api_key"] = api_key elif self.base_url: llm_kwargs["base_url"] = self.base_url diff --git a/ui/__init__.py b/ui/__init__.py new file mode 100644 index 00000000..75af3f17 --- /dev/null +++ b/ui/__init__.py @@ -0,0 +1 @@ +# TradingAgents UI package (Streamlit app and CLI wrapper). diff --git a/ui/assets/.gitkeep b/ui/assets/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/ui/cli_wrapper.py b/ui/cli_wrapper.py new file mode 100644 index 00000000..8e5a7859 --- /dev/null +++ b/ui/cli_wrapper.py @@ -0,0 +1,106 @@ +""" +CLI wrapper for TradingAgents: programmatic interface used by the Streamlit UI. + +This module does NOT duplicate business logic. It calls the same programmatic +runner exposed by the CLI (cli.main.run_analysis_programmatic), which in turn +uses the same graph, config, and save_report_to_disk as the interactive CLI. + +How CLI and UI share logic: +- Interactive CLI: cli.main.run_analysis() → get_user_selections() → run_analysis_programmatic + is NOT used by CLI; CLI uses its own loop with Rich. The shared core is + run_analysis_programmatic(), which uses TradingAgentsGraph and save_report_to_disk. +- UI: streamlit_app.py builds a selections dict from form inputs and calls + run_trading_agent() here, which calls run_analysis_programmatic(selections, log_callback). + +To add new agents in the future: +- Add the analyst type in tradingagents (and wire into the graph). +- Add the option in cli/models.AnalystType and cli.utils (for CLI prompts). +- Add the option in ui/streamlit_app.py sidebar (analyst checkboxes) and ensure + the selections["analysts"] list passed to run_trading_agent includes the new key. +""" + +from __future__ import annotations + +import threading +from pathlib import Path +from typing import Callable, List, Optional, Tuple + +# Ensure project root is on path when running as streamlit run ui/streamlit_app.py +import sys +_ui_dir = Path(__file__).resolve().parent +_project_root = _ui_dir.parent +if str(_project_root) not in sys.path: + sys.path.insert(0, str(_project_root)) + + +def run_trading_agent( + selections: dict, + log_callback: Optional[Callable[[str], None]] = None, +) -> Tuple[bool, Optional[Path], Optional[str], Optional[dict]]: + """ + Run the TradingAgents pipeline with the given selections (same as CLI options). + + Args: + selections: Dict with ticker, analysis_date, analysts, research_depth, + llm_provider, backend_url, shallow_thinker, deep_thinker, + google_thinking_level (optional), openai_reasoning_effort (optional). + log_callback: Optional callable(line) for live log streaming. + + Returns: + (success, report_file_path, error_message, final_state). + - success: True if the run completed and report was saved. + - report_file_path: Path to complete_report.md (identical to CLI output). + - error_message: Non-empty only when success is False. + - final_state: Last chunk state for preview; None on failure. + """ + from cli.main import run_analysis_programmatic + + final_state, report_path, err = run_analysis_programmatic(selections, log_callback=log_callback) + if err: + return False, None, err, None + return True, report_path, None, final_state + + +def build_report_preview_markdown(final_state: dict, ticker: str) -> str: + """ + Build a single Markdown string for the full report from final_state. + + Matches the structure of complete_report.md produced by save_report_to_disk + so the UI preview is consistent with the downloaded file. + """ + if not final_state: + return "" + parts = [f"# Trading Analysis Report: {ticker}\n"] + # Analyst sections + for key, title in [ + ("market_report", "Market Analysis"), + ("sentiment_report", "Social Sentiment"), + ("news_report", "News Analysis"), + ("fundamentals_report", "Fundamentals Analysis"), + ]: + if final_state.get(key): + parts.append(f"## {title}\n\n{final_state[key]}") + if final_state.get("investment_debate_state"): + debate = final_state["investment_debate_state"] + parts.append("## Research Team Decision\n") + if debate.get("bull_history"): + parts.append(f"### Bull Researcher\n{debate['bull_history']}") + if debate.get("bear_history"): + parts.append(f"### Bear Researcher\n{debate['bear_history']}") + if debate.get("judge_decision"): + parts.append(f"### Research Manager\n{debate['judge_decision']}") + if final_state.get("trader_investment_plan"): + parts.append("## Trading Team Plan\n\n" + final_state["trader_investment_plan"]) + if final_state.get("risk_debate_state"): + risk = final_state["risk_debate_state"] + parts.append("## Risk Management Team Decision\n") + for key, label in [ + ("aggressive_history", "Aggressive Analyst"), + ("conservative_history", "Conservative Analyst"), + ("neutral_history", "Neutral Analyst"), + ]: + if risk.get(key): + parts.append(f"### {label}\n{risk[key]}") + if risk.get("judge_decision"): + parts.append("## Portfolio Manager Decision\n\n" + risk["judge_decision"]) + return "\n\n".join(parts) diff --git a/ui/streamlit_app.py b/ui/streamlit_app.py new file mode 100644 index 00000000..4950c1ee --- /dev/null +++ b/ui/streamlit_app.py @@ -0,0 +1,227 @@ +# -*- coding: utf-8 -*- +""" +TradingAgents Streamlit UI + +Run from project root: + pip install streamlit + streamlit run ui/streamlit_app.py + +This UI wraps the same pipeline as the CLI (python -m cli.main analyze). +No business logic is duplicated: the UI builds a selections dict and calls +cli.main.run_analysis_programmatic via ui.cli_wrapper. + +How CLI and UI share logic: +- Both use tradingagents.graph.TradingAgentsGraph and cli.main.save_report_to_disk. +- CLI: interactive prompts → run_analysis() with Rich live display. +- UI: form inputs → run_trading_agent() → run_analysis_programmatic() with log_callback. + +Adding new agents: extend the graph and config, then add the analyst option +to the sidebar "Analyst / strategy selection" and to cli.models.AnalystType. +""" + +from __future__ import annotations + +import io +from pathlib import Path +from datetime import datetime, date +from typing import List, Optional + +import streamlit as st + +# Ensure project root is on path +_UI_DIR = Path(__file__).resolve().parent +_PROJECT_ROOT = _UI_DIR.parent +if str(_PROJECT_ROOT) not in __import__("sys").path: + __import__("sys").path.insert(0, str(_PROJECT_ROOT)) + +from ui import cli_wrapper + +# ----------------------------------------------------------------------------- +# Option constants (mirror CLI choices; no business logic) +# ----------------------------------------------------------------------------- +LLM_PROVIDERS = [ + ("OpenAI", "openai", "https://api.openai.com/v1"), + ("Ark (ByteDance)", "ark", "https://ark.ap-southeast.bytepluses.com/api/v3"), + ("Google", "google", "https://generativelanguage.googleapis.com/v1"), + ("Anthropic", "anthropic", "https://api.anthropic.com/"), + ("xAI", "xai", "https://api.x.ai/v1"), + ("Openrouter", "openrouter", "https://openrouter.ai/api/v1"), + ("Ollama", "ollama", "http://localhost:11434/v1"), +] + +ANALYST_OPTIONS = [ + ("Market", "market"), + ("Social Media", "social"), + ("News", "news"), + ("Fundamentals", "fundamentals"), +] + +RESEARCH_DEPTH_OPTIONS = [ + ("Shallow — quick research, few rounds", 1), + ("Medium — moderate debate rounds", 3), + ("Deep — comprehensive research", 5), +] + +# Per-provider model options (display, value) +SHALLOW_OPTIONS = { + "openai": [("GPT-5 Mini", "gpt-5-mini"), ("GPT-5 Nano", "gpt-5-nano"), ("GPT-5.2", "gpt-5.2"), ("GPT-4.1", "gpt-4.1")], + "anthropic": [("Claude Haiku 4.5", "claude-haiku-4-5"), ("Claude Sonnet 4.5", "claude-sonnet-4-5"), ("Claude Sonnet 4", "claude-sonnet-4-20250514")], + "google": [("Gemini 3 Flash", "gemini-3-flash-preview"), ("Gemini 2.5 Flash", "gemini-2.5-flash"), ("Gemini 2.5 Flash Lite", "gemini-2.5-flash-lite")], + "xai": [("Grok 4.1 Fast (Non-Reasoning)", "grok-4-1-fast-non-reasoning"), ("Grok 4 Fast (Reasoning)", "grok-4-fast-reasoning")], + "openrouter": [("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"), ("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free")], + "ollama": [("Qwen3:latest", "qwen3:latest"), ("GPT-OSS:latest", "gpt-oss:latest"), ("GLM-4.7-Flash:latest", "glm-4.7-flash:latest")], + "ark": [("Ark seed-1-8-251228", "seed-1-8-251228")], +} +DEEP_OPTIONS = { + "openai": [("GPT-5.2", "gpt-5.2"), ("GPT-5.1", "gpt-5.1"), ("GPT-5", "gpt-5"), ("GPT-4.1", "gpt-4.1"), ("GPT-5 Mini", "gpt-5-mini")], + "anthropic": [("Claude Sonnet 4.5", "claude-sonnet-4-5"), ("Claude Opus 4.5", "claude-opus-4-5"), ("Claude Haiku 4.5", "claude-haiku-4-5")], + "google": [("Gemini 3 Pro", "gemini-3-pro-preview"), ("Gemini 3 Flash", "gemini-3-flash-preview"), ("Gemini 2.5 Flash", "gemini-2.5-flash")], + "xai": [("Grok 4.1 Fast (Reasoning)", "grok-4-1-fast-reasoning"), ("Grok 4 Fast (Reasoning)", "grok-4-fast-reasoning"), ("Grok 4", "grok-4-0709")], + "openrouter": [("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"), ("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free")], + "ollama": [("GLM-4.7-Flash:latest", "glm-4.7-flash:latest"), ("GPT-OSS:latest", "gpt-oss:latest"), ("Qwen3:latest", "qwen3:latest")], + "ark": [("Ark seed-1-8-251228", "seed-1-8-251228")], +} + + +def _default_provider_options(provider_key: str): + shallow = SHALLOW_OPTIONS.get(provider_key, [("Default", "gpt-5-mini")]) + deep = DEEP_OPTIONS.get(provider_key, [("Default", "gpt-5.2")]) + return shallow, deep + + +def main() -> None: + st.set_page_config( + page_title="TradingAgents", + page_icon="📈", + layout="wide", + initial_sidebar_state="expanded", + ) + + # Minimal custom style for a clean, professional look + st.markdown(""" + + """, unsafe_allow_html=True) + + # ----- Sidebar ----- + with st.sidebar: + st.markdown("## 📊 TradingAgents") + st.markdown("---") + st.markdown("### Agent / strategy selection") + selected_analysts: List[str] = st.multiselect( + "Analyst team", + options=[v for _, v in ANALYST_OPTIONS], + default=["market", "news", "fundamentals"], + format_func=lambda x: next(d for d, v in ANALYST_OPTIONS if v == x), + ) + if not selected_analysts: + st.warning("Select at least one analyst.") + st.markdown("### Symbols") + ticker_input = st.text_input("Ticker symbol(s)", value="SPY", help="Primary symbol; multi-symbol support can be extended.") + ticker = (ticker_input or "SPY").strip().upper().split()[0] + st.markdown("### Date range") + today = date.today() + analysis_date = st.date_input("Analysis date", value=today, max_value=today) + analysis_date_str = analysis_date.strftime("%Y-%m-%d") + st.markdown("### Capital / risk (optional)") + capital = st.number_input("Capital (reserved)", min_value=0.0, value=100000.0, step=10000.0, format="%.0f") + risk_pct = st.slider("Risk % (reserved)", 0.0, 50.0, 2.0, 0.5) + st.markdown("### Optional CLI flags") + research_depth_label, research_depth = st.selectbox( + "Research depth", + options=RESEARCH_DEPTH_OPTIONS, + index=1, + format_func=lambda x: x[0], + ) + research_depth_value = research_depth + provider_display, provider_key, backend_url = st.selectbox( + "LLM provider", + options=LLM_PROVIDERS, + index=0, + format_func=lambda x: x[0], + ) + shallow_opts, deep_opts = _default_provider_options(provider_key) + shallow_thinker = st.selectbox("Quick-thinking model", options=[v for _, v in shallow_opts], format_func=lambda x: next(d for d, v in shallow_opts if v == x)) + deep_thinker = st.selectbox("Deep-thinking model", options=[v for _, v in deep_opts], format_func=lambda x: next(d for d, v in deep_opts if v == x)) + google_thinking = None + openai_effort = None + if provider_key == "google": + google_thinking = st.selectbox("Gemini thinking mode", ["high", "minimal"], index=0) + elif provider_key == "openai": + openai_effort = st.selectbox("OpenAI reasoning effort", ["medium", "high", "low"], index=0) + st.markdown("---") + + # ----- Main area ----- + st.title("TradingAgents") + st.caption("Multi-Agents LLM Financial Trading — same pipeline as CLI, no logic duplication.") + + run_clicked = st.button("Run Trading Agent", type="primary", use_container_width=True) + + log_placeholder = st.empty() + report_placeholder = st.empty() + download_placeholder = st.empty() + error_placeholder = st.empty() + + # Clear previous result when starting a new run + if run_clicked: + error_placeholder.empty() + download_placeholder.empty() + report_placeholder.empty() + log_lines: List[str] = [] + + def on_log(line: str) -> None: + log_lines.append(line) + + with st.spinner("Running pipeline…"): + selections = { + "ticker": ticker, + "analysis_date": analysis_date_str, + "analysts": selected_analysts if selected_analysts else ["market", "news", "fundamentals"], + "research_depth": research_depth_value, + "llm_provider": provider_key, + "backend_url": backend_url, + "shallow_thinker": shallow_thinker, + "deep_thinker": deep_thinker, + "google_thinking_level": google_thinking, + "openai_reasoning_effort": openai_effort, + } + success, report_path, err_msg, final_state = cli_wrapper.run_trading_agent(selections, log_callback=on_log) + + with log_placeholder: + st.markdown("#### Live execution log") + st.text_area("Log", value="\n".join(log_lines), height=280, key="run_log", label_visibility="collapsed") + + if not success: + error_placeholder.error(f"Run failed: {err_msg}") + else: + st.success("Run completed. Report saved.") + preview_md = cli_wrapper.build_report_preview_markdown(final_state, ticker) + with report_placeholder: + st.markdown("### Report preview") + if preview_md: + st.markdown(preview_md, unsafe_allow_html=False) + else: + st.info("No preview content.") + if report_path and report_path.exists(): + report_bytes = report_path.read_text(encoding="utf-8") + download_placeholder.download_button( + "Download report (complete_report.md)", + data=report_bytes, + file_name=report_path.name, + mime="text/markdown", + use_container_width=True, + ) + + with st.sidebar: + st.markdown("---") + st.markdown("**Docs**") + st.markdown("- CLI: `python -m cli.main analyze`") + st.markdown("- UI: `streamlit run ui/streamlit_app.py`") + + +if __name__ == "__main__": + main()