feat: add more frontend config options

This commit is contained in:
Kevin Bruton 2025-09-29 08:58:02 +02:00
parent 437b913c2c
commit 26ccc48066
3 changed files with 235 additions and 8 deletions

View File

@ -148,17 +148,46 @@ def update_execution_state(state: Dict[str, Any]):
if child.get("children"))
app_state["overall_progress"] = min(100, int((completed_agents / max(total_phases, 1)) * 100))
def run_trading_process(company_symbol: str):
def run_trading_process(company_symbol: str, config: Dict[str, Any]):
"""Runs the TradingAgentsGraph in a separate thread."""
with app_state_lock:
app_state["overall_status"] = "in_progress"
app_state["overall_progress"] = 0
try:
graph = TradingAgentsGraph()
current_date = time.strftime("%Y-%m-%d") # Use current date for analysis
# Import and create custom config
from tradingagents.default_config import DEFAULT_CONFIG
# Create custom configuration with user selections
custom_config = DEFAULT_CONFIG.copy()
custom_config["llm_provider"] = config["llm_provider"]
custom_config["max_debate_rounds"] = config["max_debate_rounds"]
custom_config["cost_per_trade"] = config["cost_per_trade"]
# Set the appropriate LLM models based on provider
if config["llm_provider"] == "google":
custom_config["gemini_quick_think_llm"] = config["quick_think_llm"]
custom_config["gemini_deep_think_llm"] = config["deep_think_llm"]
else:
custom_config["quick_think_llm"] = config["quick_think_llm"]
custom_config["deep_think_llm"] = config["deep_think_llm"]
# Set backend URL based on provider
if config["llm_provider"] == "openrouter":
custom_config["backend_url"] = "https://openrouter.ai/api/v1"
elif config["llm_provider"] == "google":
custom_config["backend_url"] = "https://generativelanguage.googleapis.com/v1"
elif config["llm_provider"] == "anthropic":
custom_config["backend_url"] = "https://api.anthropic.com/"
elif config["llm_provider"] == "ollama":
custom_config["backend_url"] = f"http://{os.getenv('OLLAMA_HOST', 'localhost')}:11434/v1"
else: # openai
custom_config["backend_url"] = "https://api.openai.com/v1"
graph = TradingAgentsGraph(config=custom_config)
analysis_date = config["analysis_date"] # Use user-selected date
# The propagate method now accepts the callback and trade_date
final_state = graph.propagate(company_symbol, trade_date=current_date, on_step_callback=update_execution_state)
final_state = graph.propagate(company_symbol, trade_date=analysis_date, on_step_callback=update_execution_state)
with app_state_lock:
app_state["overall_status"] = "completed"
@ -197,7 +226,16 @@ async def read_root():
return template.render(app_state=app_state)
@app.post("/start", response_class=HTMLResponse)
async def start_process(background_tasks: BackgroundTasks, company_symbol: str = Form(...)):
async def start_process(
background_tasks: BackgroundTasks,
company_symbol: str = Form(...),
llm_provider: str = Form(...),
quick_think_llm: str = Form(...),
deep_think_llm: str = Form(...),
max_debate_rounds: int = Form(...),
cost_per_trade: float = Form(...),
analysis_date: str = Form(...)
):
# Check if all required environment variables are set
missing_vars = [var for var in required_env_vars if not os.getenv(var)]
if missing_vars:
@ -224,8 +262,18 @@ async def start_process(background_tasks: BackgroundTasks, company_symbol: str =
app_state["execution_tree"] = [] # Clear for new run
app_state["overall_status"] = "in_progress"
app_state["overall_progress"] = 0
# Store all configuration parameters
app_state["config"] = {
"llm_provider": llm_provider,
"quick_think_llm": quick_think_llm,
"deep_think_llm": deep_think_llm,
"max_debate_rounds": max_debate_rounds,
"cost_per_trade": cost_per_trade,
"analysis_date": analysis_date
}
background_tasks.add_task(run_trading_process, company_symbol)
background_tasks.add_task(run_trading_process, company_symbol, app_state["config"])
template = jinja_env.get_template("_partials/left_panel.html")
return template.render(tree=app_state["execution_tree"], app_state=app_state)

View File

@ -103,7 +103,8 @@ body {
font-size: 0.9em;
}
#config-form input {
#config-form input,
#config-form select {
width: 100%;
padding: 10px;
margin-bottom: 15px;
@ -113,14 +114,35 @@ body {
color: var(--text-primary);
font-size: 1em;
transition: border-color 0.3s ease, box-shadow 0.3s ease;
box-sizing: border-box;
}
#config-form input:focus {
#config-form input:focus,
#config-form select:focus {
outline: none;
border-color: var(--accent-color);
box-shadow: 0 0 0 2px rgba(76, 175, 80, 0.2);
}
/* Specific styling for select dropdowns */
#config-form select {
cursor: pointer;
background-image: url("data:image/svg+xml;charset=UTF-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23a0a0a0' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='6,9 12,15 18,9'%3E%3C/polyline%3E%3C/svg%3E");
background-repeat: no-repeat;
background-position: right 10px center;
background-size: 16px;
padding-right: 40px;
appearance: none;
-webkit-appearance: none;
-moz-appearance: none;
}
#config-form select option {
background-color: var(--input-bg);
color: var(--text-primary);
padding: 8px;
}
#config-form button {
width: 100%;
padding: 12px;

View File

@ -22,6 +22,41 @@
<form hx-post="/start" hx-target="#left-panel" hx-swap="innerHTML" hx-indicator="#loading">
<label for="company_symbol">Company Symbol:</label>
<input type="text" id="company_symbol" name="company_symbol" value="AAPL" required>
<label for="llm_provider">LLM Provider:</label>
<select id="llm_provider" name="llm_provider" onchange="updateModelOptions()" required>
<option value="openai">OpenAI</option>
<option value="openrouter" selected>OpenRouter</option>
<option value="google">Google (Gemini)</option>
<option value="anthropic">Anthropic</option>
<option value="ollama">Ollama</option>
</select>
<label for="quick_think_llm">Quick Think LLM:</label>
<select id="quick_think_llm" name="quick_think_llm" required>
<!-- Options will be populated by JavaScript -->
</select>
<label for="deep_think_llm">Deep Think LLM:</label>
<select id="deep_think_llm" name="deep_think_llm" required>
<!-- Options will be populated by JavaScript -->
</select>
<label for="max_debate_rounds">Max Debate Rounds:</label>
<select id="max_debate_rounds" name="max_debate_rounds" required>
<option value="1" selected>1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>
<label for="cost_per_trade">Cost Per Trade ($):</label>
<input type="number" id="cost_per_trade" name="cost_per_trade" value="2.0" step="0.1" min="0" required>
<label for="analysis_date">Analysis Date:</label>
<input type="date" id="analysis_date" name="analysis_date" required>
<button type="submit">Start Process</button>
<div id="loading" class="htmx-indicator">Starting process...</div>
</form>
@ -32,5 +67,127 @@
<p>Enter a company symbol (e.g., AAPL, MSFT, GOOGL) and click "Start Process" to begin the trading analysis.</p>
</div>
</div>
<script>
// Model options for each provider
const modelOptions = {
"openai": {
"quick": [
{ value: "gpt-4o-mini", text: "GPT-4o-mini - Fast and efficient for quick tasks" },
{ value: "gpt-4.1-nano", text: "GPT-4.1-nano - Ultra-lightweight model for basic operations" },
{ value: "gpt-4.1-mini", text: "GPT-4.1-mini - Compact model with good performance" },
{ value: "gpt-4o", text: "GPT-4o - Standard model with solid capabilities" }
],
"deep": [
{ value: "gpt-4.1-nano", text: "GPT-4.1-nano - Ultra-lightweight model for basic operations" },
{ value: "gpt-4.1-mini", text: "GPT-4.1-mini - Compact model with good performance" },
{ value: "gpt-4o", text: "GPT-4o - Standard model with solid capabilities" },
{ value: "o4-mini", text: "o4-mini - Specialized reasoning model (compact)" },
{ value: "o3-mini", text: "o3-mini - Advanced reasoning model (lightweight)" },
{ value: "o3", text: "o3 - Full advanced reasoning model" },
{ value: "o1", text: "o1 - Premier reasoning and problem-solving model" }
]
},
"openrouter": {
"quick": [
{ value: "x-ai/grok-4-fast:free", text: "xAI: Grok 4 Fast (free)" },
{ value: "deepseek/deepseek-chat-v3.1:free", text: "DeepSeek: DeepSeek V3.1 (free)" },
{ value: "z-ai/glm-4-32b", text: "Z.AI: GLM 4 32B" },
{ value: "meta-llama/llama-4-scout:free", text: "Meta: Llama 4 Scout" },
{ value: "meta-llama/llama-3.3-8b-instruct:free", text: "Meta: Llama 3.3 8B Instruct" },
{ value: "google/gemini-2.0-flash-exp:free", text: "Google: Gemini 2.0 Flash (free)" }
],
"deep": [
{ value: "qwen/qwen3-235b-a22b:free", text: "Qwen: Qwen3 235B A22B (free)" },
{ value: "openai/gpt-oss-120b:free", text: "OpenAI: gpt-oss-120b (free)" },
{ value: "z-ai/glm-4-32b", text: "Z.AI: GLM 4 32B" },
{ value: "deepseek/deepseek-chat-v3-0324:free", text: "DeepSeek V3 - 685B-parameter model" }
]
},
"google": {
"quick": [
{ value: "gemini-2.0-flash-lite", text: "Gemini 2.0 Flash-Lite - Cost efficiency and low latency" },
{ value: "gemini-2.0-flash", text: "Gemini 2.0 Flash - Next generation features, speed, and thinking" },
{ value: "gemini-2.5-flash-preview-05-20", text: "Gemini 2.5 Flash - Adaptive thinking, cost efficiency" }
],
"deep": [
{ value: "gemini-2.0-flash-lite", text: "Gemini 2.0 Flash-Lite - Cost efficiency and low latency" },
{ value: "gemini-2.0-flash", text: "Gemini 2.0 Flash - Next generation features, speed, and thinking" },
{ value: "gemini-2.5-flash-preview-05-20", text: "Gemini 2.5 Flash - Adaptive thinking, cost efficiency" },
{ value: "gemini-2.5-pro-preview-06-05", text: "Gemini 2.5 Pro" }
]
},
"anthropic": {
"quick": [
{ value: "claude-3-5-haiku-latest", text: "Claude Haiku 3.5 - Fast inference and standard capabilities" },
{ value: "claude-3-5-sonnet-latest", text: "Claude Sonnet 3.5 - Highly capable standard model" },
{ value: "claude-3-7-sonnet-latest", text: "Claude Sonnet 3.7 - Exceptional hybrid reasoning" },
{ value: "claude-sonnet-4-0", text: "Claude Sonnet 4 - High performance and excellent reasoning" }
],
"deep": [
{ value: "claude-3-5-haiku-latest", text: "Claude Haiku 3.5 - Fast inference and standard capabilities" },
{ value: "claude-3-5-sonnet-latest", text: "Claude Sonnet 3.5 - Highly capable standard model" },
{ value: "claude-3-7-sonnet-latest", text: "Claude Sonnet 3.7 - Exceptional hybrid reasoning" },
{ value: "claude-sonnet-4-0", text: "Claude Sonnet 4 - High performance and excellent reasoning" },
{ value: "claude-opus-4-0", text: "Claude Opus 4 - Most powerful Anthropic model" }
]
},
"ollama": {
"quick": [
{ value: "granite3.3:2b", text: "Granite 3.3 2B" },
{ value: "llama3.1", text: "llama3.1 local" },
{ value: "llama3.2", text: "llama3.2 local" }
],
"deep": [
{ value: "granite3.3:2b", text: "Granite 3.3 2B" },
{ value: "llama3.1", text: "llama3.1 local" },
{ value: "qwen3", text: "qwen3" }
]
}
};
function updateModelOptions() {
const provider = document.getElementById('llm_provider').value;
const quickSelect = document.getElementById('quick_think_llm');
const deepSelect = document.getElementById('deep_think_llm');
// Clear existing options
quickSelect.innerHTML = '';
deepSelect.innerHTML = '';
// Populate quick think options
if (modelOptions[provider] && modelOptions[provider].quick) {
modelOptions[provider].quick.forEach(model => {
const option = document.createElement('option');
option.value = model.value;
option.textContent = model.text;
quickSelect.appendChild(option);
});
}
// Populate deep think options
if (modelOptions[provider] && modelOptions[provider].deep) {
modelOptions[provider].deep.forEach(model => {
const option = document.createElement('option');
option.value = model.value;
option.textContent = model.text;
deepSelect.appendChild(option);
});
}
}
// Set current date as default for analysis_date
function setCurrentDate() {
const today = new Date();
const dateString = today.toISOString().split('T')[0];
document.getElementById('analysis_date').value = dateString;
}
// Initialize the page
document.addEventListener('DOMContentLoaded', function() {
setCurrentDate();
updateModelOptions(); // Set initial model options for OpenRouter
});
</script>
</body>
</html>