feat: restructure to swing trading pipeline with auto stock screening

- Remove debate/risk management stages (bull/bear researchers, research manager, risk debate)
- Simplify flow: Screening → Analysts (market, news, fundamentals) → Trader
- Add screening pipeline: universe builder → technical screen → fundamental screen → LLM ranking
- Add Korean market support (KRX data via FDR/pykrx, DART API, Korean news)
- Trader now outputs SWING_ORDER (BUY/SELL/PASS with entry/stop/profit/size/hold_days)
- Add swing CLI command for full pipeline execution
- Add portfolio state management module
- Pin finance-datareader==0.9.101 with fallback universe for KRX API issues

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
hyejwon 2026-03-11 19:29:24 +09:00
parent f047f26df0
commit 57d82164d2
45 changed files with 4192 additions and 1196 deletions

7
.gitignore vendored
View File

@ -217,3 +217,10 @@ __marimo__/
# Cache
**/data_cache/
# OS
.DS_Store
# Project outputs
reports/
results/

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,7 @@
from enum import Enum
from typing import List, Optional, Dict
from pydantic import BaseModel
class AnalystType(str, Enum):
MARKET = "market"
SOCIAL = "social"
NEWS = "news"
FUNDAMENTALS = "fundamentals"

View File

@ -1,13 +1,12 @@
import questionary
from typing import List, Optional, Tuple, Dict
from typing import List, Tuple
from cli.models import AnalystType
ANALYST_ORDER = [
("Market Analyst", AnalystType.MARKET),
("Social Media Analyst", AnalystType.SOCIAL),
("News Analyst", AnalystType.NEWS),
("Fundamentals Analyst", AnalystType.FUNDAMENTALS),
("Market Analyst (기술적 분석)", AnalystType.MARKET),
("News Analyst (뉴스 분석)", AnalystType.NEWS),
("Fundamentals Analyst (기본적 분석)", AnalystType.FUNDAMENTALS),
]
@ -25,7 +24,7 @@ def get_ticker() -> str:
).ask()
if not ticker:
console.print("\n[red]No ticker symbol provided. Exiting...[/red]")
print("\n[red]No ticker symbol provided. Exiting...[/red]")
exit(1)
return ticker.strip().upper()
@ -58,7 +57,7 @@ def get_analysis_date() -> str:
).ask()
if not date:
console.print("\n[red]No date provided. Exiting...[/red]")
print("\nNo date provided. Exiting...")
exit(1)
return date.strip()
@ -84,48 +83,15 @@ def select_analysts() -> List[AnalystType]:
).ask()
if not choices:
console.print("\n[red]No analysts selected. Exiting...[/red]")
print("\nNo analysts selected. Exiting...")
exit(1)
return choices
def select_research_depth() -> int:
"""Select research depth using an interactive selection."""
# Define research depth options with their corresponding values
DEPTH_OPTIONS = [
("Shallow - Quick research, few debate and strategy discussion rounds", 1),
("Medium - Middle ground, moderate debate rounds and strategy discussion", 3),
("Deep - Comprehensive research, in depth debate and strategy discussion", 5),
]
choice = questionary.select(
"Select Your [Research Depth]:",
choices=[
questionary.Choice(display, value=value) for display, value in DEPTH_OPTIONS
],
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
style=questionary.Style(
[
("selected", "fg:yellow noinherit"),
("highlighted", "fg:yellow noinherit"),
("pointer", "fg:yellow noinherit"),
]
),
).ask()
if choice is None:
console.print("\n[red]No research depth selected. Exiting...[/red]")
exit(1)
return choice
def select_shallow_thinking_agent(provider) -> str:
"""Select shallow thinking llm engine using an interactive selection."""
# Define shallow thinking llm engine options with their corresponding model names
SHALLOW_AGENT_OPTIONS = {
"openai": [
("GPT-5 Mini - Cost-optimized reasoning", "gpt-5-mini"),
@ -179,9 +145,7 @@ def select_shallow_thinking_agent(provider) -> str:
).ask()
if choice is None:
console.print(
"\n[red]No shallow thinking llm engine selected. Exiting...[/red]"
)
print("\nNo quick-thinking LLM engine selected. Exiting...")
exit(1)
return choice
@ -190,7 +154,6 @@ def select_shallow_thinking_agent(provider) -> str:
def select_deep_thinking_agent(provider) -> str:
"""Select deep thinking llm engine using an interactive selection."""
# Define deep thinking llm engine options with their corresponding model names
DEEP_AGENT_OPTIONS = {
"openai": [
("GPT-5.2 - Latest flagship", "gpt-5.2"),
@ -247,14 +210,14 @@ def select_deep_thinking_agent(provider) -> str:
).ask()
if choice is None:
console.print("\n[red]No deep thinking llm engine selected. Exiting...[/red]")
print("\nNo deep-thinking LLM engine selected. Exiting...")
exit(1)
return choice
def select_llm_provider() -> tuple[str, str]:
"""Select the OpenAI api url using interactive selection."""
# Define OpenAI api options with their corresponding endpoints
def select_llm_provider() -> Tuple[str, str]:
"""Select the LLM provider using interactive selection."""
BASE_URLS = [
("OpenAI", "https://api.openai.com/v1"),
("Google", "https://generativelanguage.googleapis.com/v1"),
@ -263,7 +226,7 @@ def select_llm_provider() -> tuple[str, str]:
("Openrouter", "https://openrouter.ai/api/v1"),
("Ollama", "http://localhost:11434/v1"),
]
choice = questionary.select(
"Select your LLM Provider:",
choices=[
@ -279,11 +242,11 @@ def select_llm_provider() -> tuple[str, str]:
]
),
).ask()
if choice is None:
console.print("\n[red]no OpenAI backend selected. Exiting...[/red]")
print("\nNo LLM provider selected. Exiting...")
exit(1)
display_name, url = choice
print(f"You selected: {display_name}\tURL: {url}")
@ -309,11 +272,7 @@ def ask_openai_reasoning_effort() -> str:
def ask_gemini_thinking_config() -> str | None:
"""Ask for Gemini thinking configuration.
Returns thinking_level: "high" or "minimal".
Client maps to appropriate API param based on model series.
"""
"""Ask for Gemini thinking configuration."""
return questionary.select(
"Select Thinking Mode:",
choices=[

View File

@ -31,6 +31,9 @@ dependencies = [
"tqdm>=4.67.1",
"typing-extensions>=4.14.0",
"yfinance>=0.2.63",
"finance-datareader==0.9.101",
"numpy>=2.2.6",
"ta>=0.11.0",
]
[project.scripts]

View File

@ -1,21 +1,10 @@
from .utils.agent_utils import create_msg_delete
from .utils.agent_states import AgentState, InvestDebateState, RiskDebateState
from .utils.agent_states import AgentState
from .utils.memory import FinancialSituationMemory
from .analysts.fundamentals_analyst import create_fundamentals_analyst
from .analysts.market_analyst import create_market_analyst
from .analysts.news_analyst import create_news_analyst
from .analysts.social_media_analyst import create_social_media_analyst
from .researchers.bear_researcher import create_bear_researcher
from .researchers.bull_researcher import create_bull_researcher
from .risk_mgmt.aggressive_debator import create_aggressive_debator
from .risk_mgmt.conservative_debator import create_conservative_debator
from .risk_mgmt.neutral_debator import create_neutral_debator
from .managers.research_manager import create_research_manager
from .managers.risk_manager import create_risk_manager
from .trader.trader import create_trader
@ -23,18 +12,8 @@ __all__ = [
"FinancialSituationMemory",
"AgentState",
"create_msg_delete",
"InvestDebateState",
"RiskDebateState",
"create_bear_researcher",
"create_bull_researcher",
"create_research_manager",
"create_fundamentals_analyst",
"create_market_analyst",
"create_neutral_debator",
"create_news_analyst",
"create_aggressive_debator",
"create_risk_manager",
"create_conservative_debator",
"create_social_media_analyst",
"create_trader",
]

View File

@ -2,6 +2,12 @@ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import time
import json
from tradingagents.agents.utils.agent_utils import get_fundamentals, get_balance_sheet, get_cashflow, get_income_statement, get_insider_transactions
from tradingagents.agents.utils.korean_prompt import (
KOREAN_INVESTOR_GUIDE,
KOREAN_REPORT_FORMAT_GUIDE,
SWING_TRADING_CONTEXT,
SWING_PORTFOLIO_CONTEXT,
)
from tradingagents.dataflows.config import get_config
@ -21,9 +27,24 @@ def create_fundamentals_analyst(llm):
system_message = (
"You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, and company financial history to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
+ " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."
+ " Use the available tools: `get_fundamentals` for comprehensive company analysis, `get_balance_sheet`, `get_cashflow`, and `get_income_statement` for specific financial statements.",
+ " Use the available tools: `get_fundamentals` for comprehensive company analysis, `get_balance_sheet`, `get_cashflow`, and `get_income_statement` for specific financial statements."
+ KOREAN_INVESTOR_GUIDE
+ KOREAN_REPORT_FORMAT_GUIDE
+ SWING_TRADING_CONTEXT
+ SWING_PORTFOLIO_CONTEXT
)
# Inject swing context if available
screening_ctx = state.get("screening_context", "")
portfolio_ctx = state.get("portfolio_context", "")
position_status = state.get("position_status", "NONE")
if screening_ctx or portfolio_ctx:
system_message += f"\n\n[현재 분석 컨텍스트]\n포지션 상태: {position_status}\n"
if screening_ctx:
system_message += f"스크리닝 선정 이유: {screening_ctx}\n"
if portfolio_ctx:
system_message += f"\n{portfolio_ctx}\n"
prompt = ChatPromptTemplate.from_messages(
[
(
@ -32,8 +53,8 @@ def create_fundamentals_analyst(llm):
" Use the provided tools to progress towards answering the question."
" If you are unable to fully answer, that's OK; another assistant with different tools"
" will help where you left off. Execute what you can to make progress."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/PASS** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/PASS** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The company we want to look at is {ticker}",
),

View File

@ -1,7 +1,21 @@
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import time
import json
from tradingagents.agents.utils.agent_utils import get_stock_data, get_indicators
from tradingagents.agents.utils.agent_utils import (
get_stock_data,
get_indicators,
get_krx_stock_data,
get_krx_indicators,
get_exchange_rate,
get_korea_index,
get_investor_trading,
)
from tradingagents.agents.utils.korean_prompt import (
KOREAN_INVESTOR_GUIDE,
KOREAN_REPORT_FORMAT_GUIDE,
SWING_TRADING_CONTEXT,
SWING_PORTFOLIO_CONTEXT,
)
from tradingagents.dataflows.config import get_config
@ -44,8 +58,23 @@ Volume-Based Indicators:
- Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_stock_data first to retrieve the CSV that is needed to generate indicators. Then use get_indicators with the specific indicator names. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."""
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."""
+ KOREAN_INVESTOR_GUIDE
+ KOREAN_REPORT_FORMAT_GUIDE
+ SWING_TRADING_CONTEXT
+ SWING_PORTFOLIO_CONTEXT
)
# Inject swing context if available
screening_ctx = state.get("screening_context", "")
portfolio_ctx = state.get("portfolio_context", "")
position_status = state.get("position_status", "NONE")
if screening_ctx or portfolio_ctx:
system_message += f"\n\n[현재 분석 컨텍스트]\n포지션 상태: {position_status}\n"
if screening_ctx:
system_message += f"스크리닝 선정 이유: {screening_ctx}\n"
if portfolio_ctx:
system_message += f"\n{portfolio_ctx}\n"
prompt = ChatPromptTemplate.from_messages(
[
(
@ -54,8 +83,8 @@ Volume-Based Indicators:
" Use the provided tools to progress towards answering the question."
" If you are unable to fully answer, that's OK; another assistant with different tools"
" will help where you left off. Execute what you can to make progress."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/PASS** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/PASS** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The company we want to look at is {ticker}",
),

View File

@ -2,6 +2,12 @@ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import time
import json
from tradingagents.agents.utils.agent_utils import get_news, get_global_news
from tradingagents.agents.utils.korean_prompt import (
KOREAN_INVESTOR_GUIDE,
KOREAN_REPORT_FORMAT_GUIDE,
SWING_TRADING_CONTEXT,
SWING_PORTFOLIO_CONTEXT,
)
from tradingagents.dataflows.config import get_config
@ -18,8 +24,23 @@ def create_news_analyst(llm):
system_message = (
"You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Use the available tools: get_news(query, start_date, end_date) for company-specific or targeted news searches, and get_global_news(curr_date, look_back_days, limit) for broader macroeconomic news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."""
+ KOREAN_INVESTOR_GUIDE
+ KOREAN_REPORT_FORMAT_GUIDE
+ SWING_TRADING_CONTEXT
+ SWING_PORTFOLIO_CONTEXT
)
# Inject swing context if available
screening_ctx = state.get("screening_context", "")
portfolio_ctx = state.get("portfolio_context", "")
position_status = state.get("position_status", "NONE")
if screening_ctx or portfolio_ctx:
system_message += f"\n\n[현재 분석 컨텍스트]\n포지션 상태: {position_status}\n"
if screening_ctx:
system_message += f"스크리닝 선정 이유: {screening_ctx}\n"
if portfolio_ctx:
system_message += f"\n{portfolio_ctx}\n"
prompt = ChatPromptTemplate.from_messages(
[
(
@ -28,8 +49,8 @@ def create_news_analyst(llm):
" Use the provided tools to progress towards answering the question."
" If you are unable to fully answer, that's OK; another assistant with different tools"
" will help where you left off. Execute what you can to make progress."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/PASS** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/PASS** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. We are looking at the company {ticker}",
),

View File

@ -2,6 +2,10 @@ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import time
import json
from tradingagents.agents.utils.agent_utils import get_news
from tradingagents.agents.utils.korean_prompt import (
KOREAN_INVESTOR_GUIDE,
KOREAN_REPORT_FORMAT_GUIDE,
)
from tradingagents.dataflows.config import get_config
@ -17,7 +21,9 @@ def create_social_media_analyst(llm):
system_message = (
"You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Use the get_news(query, start_date, end_date) tool to search for company-specific news and social media discussions. Try to look at all sources possible from social media to sentiment to news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""",
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."""
+ KOREAN_INVESTOR_GUIDE
+ KOREAN_REPORT_FORMAT_GUIDE
)
prompt = ChatPromptTemplate.from_messages(
@ -28,8 +34,8 @@ def create_social_media_analyst(llm):
" Use the provided tools to progress towards answering the question."
" If you are unable to fully answer, that's OK; another assistant with different tools"
" will help where you left off. Execute what you can to make progress."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/PASS** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/PASS** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The current company we want to analyze is {ticker}",
),

View File

@ -1,5 +1,9 @@
import time
import json
from tradingagents.agents.utils.korean_prompt import (
KOREAN_INVESTOR_GUIDE,
KOREAN_DEBATE_GUIDE,
)
def create_research_manager(llm, memory):
@ -19,23 +23,33 @@ def create_research_manager(llm, memory):
for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n"
prompt = f"""As the portfolio manager and debate facilitator, your role is to critically evaluate this round of debate and make a definitive decision: align with the bear analyst, the bull analyst, or choose Hold only if it is strongly justified based on the arguments presented.
prompt = f"""As the portfolio manager and debate facilitator, your role is to critically evaluate this round of debate and make a definitive decision on whether to ENTER a NEW long position in this stock.
Summarize the key points from both sides concisely, focusing on the most compelling evidence or reasoning. Your recommendationBuy, Sell, or Holdmust be clear and actionable. Avoid defaulting to Hold simply because both sides have valid points; commit to a stance grounded in the debate's strongest arguments.
Context: There is currently NO existing position in this stock. The only question is: "Should we BUY to open a new position now, or PASS and not enter?"
Additionally, develop a detailed investment plan for the trader. This should include:
Your decision options are:
- **BUY**: Enter a new long position now the bull case is compelling and the entry timing is right.
- **PASS**: Do not enter the risks or poor timing outweigh the opportunity; wait for a better setup.
Your Recommendation: A decisive stance supported by the most convincing arguments.
Rationale: An explanation of why these arguments lead to your conclusion.
Strategic Actions: Concrete steps for implementing the recommendation.
Take into account your past mistakes on similar situations. Use these insights to refine your decision-making and ensure you are learning and improving. Present your analysis conversationally, as if speaking naturally, without special formatting.
Summarize the key points from both sides concisely, focusing on the most compelling evidence or reasoning about entry timing and risk/reward. Avoid defaulting to PASS simply because both sides have valid points; commit to a stance grounded in the debate's strongest arguments.
Additionally, develop a detailed entry plan for the trader if recommending BUY. This should include:
Your Recommendation: A decisive BUY or PASS stance supported by the most convincing arguments.
Rationale: An explanation of why these arguments lead to your conclusion about entering now.
Strategic Actions: If BUY concrete entry parameters (price levels, position sizing approach, stop-loss levels). If PASS what conditions would need to change before reconsidering entry.
Take into account your past mistakes on similar situations. Use these insights to refine your decision-making and ensure you are learning and improving. Present your analysis conversationally, as if speaking naturally, without special formatting.
Here are your past reflections on mistakes:
\"{past_memory_str}\"
Here is the debate:
Debate History:
{history}"""
{history}
{KOREAN_INVESTOR_GUIDE}
{KOREAN_DEBATE_GUIDE}
"""
response = llm.invoke(prompt)
new_investment_debate_state = {

View File

@ -1,5 +1,10 @@
import time
import json
from tradingagents.agents.utils.korean_prompt import (
KOREAN_INVESTOR_GUIDE,
KOREAN_DEBATE_GUIDE,
KOREAN_FINAL_DECISION_GUIDE,
)
def create_risk_manager(llm, memory):
@ -22,16 +27,22 @@ def create_risk_manager(llm, memory):
for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n"
prompt = f"""As the Risk Management Judge and Debate Facilitator, your goal is to evaluate the debate between three risk analysts—Aggressive, Neutral, and Conservative—and determine the best course of action for the trader. Your decision must result in a clear recommendation: Buy, Sell, or Hold. Choose Hold only if strongly justified by specific arguments, not as a fallback when all sides seem valid. Strive for clarity and decisiveness.
prompt = f"""As the Risk Management Judge and Debate Facilitator, your goal is to evaluate the debate between three risk analysts—Aggressive, Neutral, and Conservative—and determine whether to ENTER a NEW long position in this stock.
Context: There is currently NO existing position. The trader is evaluating a fresh entry. The only valid decisions are:
- **BUY**: Enter a new long position now the risk/reward is favorable for a new entry.
- **PASS**: Do not enter the risks are too high or the timing is wrong; skip this trade.
SELL is NOT a valid option since there is no existing position.
Guidelines for Decision-Making:
1. **Summarize Key Arguments**: Extract the strongest points from each analyst, focusing on relevance to the context.
1. **Summarize Key Arguments**: Extract the strongest points from each analyst about whether this is a good entry point, focusing on entry risk and reward.
2. **Provide Rationale**: Support your recommendation with direct quotes and counterarguments from the debate.
3. **Refine the Trader's Plan**: Start with the trader's original plan, **{trader_plan}**, and adjust it based on the analysts' insights.
4. **Learn from Past Mistakes**: Use lessons from **{past_memory_str}** to address prior misjudgments and improve the decision you are making now to make sure you don't make a wrong BUY/SELL/HOLD call that loses money.
3. **Refine the Trader's Plan**: Start with the trader's original plan, **{trader_plan}**, and adjust it based on the analysts' insights about entry risk.
4. **Learn from Past Mistakes**: Use lessons from **{past_memory_str}** to address prior misjudgments and improve the decision you are making now to make sure you don't make a wrong BUY/PASS call.
Deliverables:
- A clear and actionable recommendation: Buy, Sell, or Hold.
- A clear and actionable recommendation: Buy or Pass.
- Detailed reasoning anchored in the debate and past reflections.
---
@ -41,7 +52,11 @@ Deliverables:
---
Focus on actionable insights and continuous improvement. Build on past lessons, critically evaluate all perspectives, and ensure each decision advances better outcomes."""
Focus on actionable insights and continuous improvement. Build on past lessons, critically evaluate all perspectives, and ensure each decision advances better outcomes.
{KOREAN_INVESTOR_GUIDE}
{KOREAN_DEBATE_GUIDE}
{KOREAN_FINAL_DECISION_GUIDE}
"""
response = llm.invoke(prompt)

View File

@ -1,6 +1,10 @@
from langchain_core.messages import AIMessage
import time
import json
from tradingagents.agents.utils.korean_prompt import (
KOREAN_INVESTOR_GUIDE,
KOREAN_DEBATE_GUIDE,
)
def create_bear_researcher(llm, memory):
@ -22,14 +26,17 @@ def create_bear_researcher(llm, memory):
for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n"
prompt = f"""You are a Bear Analyst making the case against investing in the stock. Your goal is to present a well-reasoned argument emphasizing risks, challenges, and negative indicators. Leverage the provided research and data to highlight potential downsides and counter bullish arguments effectively.
prompt = f"""You are a Bear Analyst making the case AGAINST entering a NEW long position in this stock. There is currently NO existing position — the question is purely: "Should we buy this stock now as a fresh entry?" Your goal is to argue NO — that entering a new position now is too risky or ill-timed.
Present a well-reasoned argument emphasizing why this is a bad entry point, highlighting risks, challenges, and negative indicators that make buying now inadvisable.
Key points to focus on:
- Risks and Challenges: Highlight factors like market saturation, financial instability, or macroeconomic threats that could hinder the stock's performance.
- Poor Entry Timing: Argue why the current price, valuation, or market conditions make this a bad moment to initiate a new long position.
- Risks and Challenges: Highlight factors like market saturation, financial instability, or macroeconomic threats that could cause losses after entry.
- Competitive Weaknesses: Emphasize vulnerabilities such as weaker market positioning, declining innovation, or threats from competitors.
- Negative Indicators: Use evidence from financial data, market trends, or recent adverse news to support your position.
- Bull Counterpoints: Critically analyze the bull argument with specific data and sound reasoning, exposing weaknesses or over-optimistic assumptions.
- Negative Indicators: Use evidence from financial data, market trends, or recent adverse news to support passing on this trade.
- Bull Counterpoints: Critically analyze the bull argument with specific data and sound reasoning, exposing weaknesses or over-optimistic assumptions about entry timing.
- Engagement: Present your argument in a conversational style, directly engaging with the bull analyst's points and debating effectively rather than simply listing facts.
Resources available:
@ -41,7 +48,9 @@ Company fundamentals report: {fundamentals_report}
Conversation history of the debate: {history}
Last bull argument: {current_response}
Reflections from similar situations and lessons learned: {past_memory_str}
Use this information to deliver a compelling bear argument, refute the bull's claims, and engage in a dynamic debate that demonstrates the risks and weaknesses of investing in the stock. You must also address reflections and learn from lessons and mistakes you made in the past.
Use this information to deliver a compelling argument for passing on this trade, refute the bull's claims, and engage in a dynamic debate. You must also address reflections and learn from lessons and mistakes you made in the past.
{KOREAN_INVESTOR_GUIDE}
{KOREAN_DEBATE_GUIDE}
"""
response = llm.invoke(prompt)

View File

@ -1,6 +1,10 @@
from langchain_core.messages import AIMessage
import time
import json
from tradingagents.agents.utils.korean_prompt import (
KOREAN_INVESTOR_GUIDE,
KOREAN_DEBATE_GUIDE,
)
def create_bull_researcher(llm, memory):
@ -22,13 +26,16 @@ def create_bull_researcher(llm, memory):
for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n"
prompt = f"""You are a Bull Analyst advocating for investing in the stock. Your task is to build a strong, evidence-based case emphasizing growth potential, competitive advantages, and positive market indicators. Leverage the provided research and data to address concerns and counter bearish arguments effectively.
prompt = f"""You are a Bull Analyst evaluating whether to ENTER a NEW long position in this stock. There is currently NO existing position — the question is purely: "Should we buy this stock now as a fresh entry?"
Your task is to build a strong, evidence-based case for entering this new position, emphasizing why NOW is a good entry point based on growth potential, competitive advantages, and positive market indicators.
Key points to focus on:
- Growth Potential: Highlight the company's market opportunities, revenue projections, and scalability.
- Entry Timing: Argue why the current price and market conditions represent a good entry point for a new long position.
- Growth Potential: Highlight the company's market opportunities, revenue projections, and scalability that justify buying now.
- Competitive Advantages: Emphasize factors like unique products, strong branding, or dominant market positioning.
- Positive Indicators: Use financial health, industry trends, and recent positive news as evidence.
- Bear Counterpoints: Critically analyze the bear argument with specific data and sound reasoning, addressing concerns thoroughly and showing why the bull perspective holds stronger merit.
- Positive Indicators: Use financial health, industry trends, and recent positive news as evidence for entering now.
- Bear Counterpoints: Critically analyze the bear argument with specific data and sound reasoning, addressing concerns thoroughly and showing why entering a new position is still justified.
- Engagement: Present your argument in a conversational style, engaging directly with the bear analyst's points and debating effectively rather than just listing data.
Resources available:
@ -39,7 +46,9 @@ Company fundamentals report: {fundamentals_report}
Conversation history of the debate: {history}
Last bear argument: {current_response}
Reflections from similar situations and lessons learned: {past_memory_str}
Use this information to deliver a compelling bull argument, refute the bear's concerns, and engage in a dynamic debate that demonstrates the strengths of the bull position. You must also address reflections and learn from lessons and mistakes you made in the past.
Use this information to deliver a compelling argument for entering a new long position, refute the bear's concerns, and engage in a dynamic debate. You must also address reflections and learn from lessons and mistakes you made in the past.
{KOREAN_INVESTOR_GUIDE}
{KOREAN_DEBATE_GUIDE}
"""
response = llm.invoke(prompt)

View File

@ -1,5 +1,9 @@
import time
import json
from tradingagents.agents.utils.korean_prompt import (
KOREAN_INVESTOR_GUIDE,
KOREAN_DEBATE_GUIDE,
)
def create_aggressive_debator(llm):
@ -30,7 +34,9 @@ Latest World Affairs Report: {news_report}
Company Fundamentals Report: {fundamentals_report}
Here is the current conversation history: {history} Here are the last arguments from the conservative analyst: {current_conservative_response} Here are the last arguments from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not hallucinate and just present your point.
Engage actively by addressing any specific concerns raised, refuting the weaknesses in their logic, and asserting the benefits of risk-taking to outpace market norms. Maintain a focus on debating and persuading, not just presenting data. Challenge each counterpoint to underscore why a high-risk approach is optimal. Output conversationally as if you are speaking without any special formatting."""
Engage actively by addressing any specific concerns raised, refuting the weaknesses in their logic, and asserting the benefits of risk-taking to outpace market norms. Maintain a focus on debating and persuading, not just presenting data. Challenge each counterpoint to underscore why a high-risk approach is optimal. Output conversationally as if you are speaking without any special formatting.
{KOREAN_INVESTOR_GUIDE}
{KOREAN_DEBATE_GUIDE}"""
response = llm.invoke(prompt)

View File

@ -1,6 +1,10 @@
from langchain_core.messages import AIMessage
import time
import json
from tradingagents.agents.utils.korean_prompt import (
KOREAN_INVESTOR_GUIDE,
KOREAN_DEBATE_GUIDE,
)
def create_conservative_debator(llm):
@ -31,7 +35,9 @@ Latest World Affairs Report: {news_report}
Company Fundamentals Report: {fundamentals_report}
Here is the current conversation history: {history} Here is the last response from the aggressive analyst: {current_aggressive_response} Here is the last response from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not hallucinate and just present your point.
Engage by questioning their optimism and emphasizing the potential downsides they may have overlooked. Address each of their counterpoints to showcase why a conservative stance is ultimately the safest path for the firm's assets. Focus on debating and critiquing their arguments to demonstrate the strength of a low-risk strategy over their approaches. Output conversationally as if you are speaking without any special formatting."""
Engage by questioning their optimism and emphasizing the potential downsides they may have overlooked. Address each of their counterpoints to showcase why a conservative stance is ultimately the safest path for the firm's assets. Focus on debating and critiquing their arguments to demonstrate the strength of a low-risk strategy over their approaches. Output conversationally as if you are speaking without any special formatting.
{KOREAN_INVESTOR_GUIDE}
{KOREAN_DEBATE_GUIDE}"""
response = llm.invoke(prompt)

View File

@ -1,5 +1,9 @@
import time
import json
from tradingagents.agents.utils.korean_prompt import (
KOREAN_INVESTOR_GUIDE,
KOREAN_DEBATE_GUIDE,
)
def create_neutral_debator(llm):
@ -30,7 +34,9 @@ Latest World Affairs Report: {news_report}
Company Fundamentals Report: {fundamentals_report}
Here is the current conversation history: {history} Here is the last response from the aggressive analyst: {current_aggressive_response} Here is the last response from the conservative analyst: {current_conservative_response}. If there are no responses from the other viewpoints, do not hallucinate and just present your point.
Engage actively by analyzing both sides critically, addressing weaknesses in the aggressive and conservative arguments to advocate for a more balanced approach. Challenge each of their points to illustrate why a moderate risk strategy might offer the best of both worlds, providing growth potential while safeguarding against extreme volatility. Focus on debating rather than simply presenting data, aiming to show that a balanced view can lead to the most reliable outcomes. Output conversationally as if you are speaking without any special formatting."""
Engage actively by analyzing both sides critically, addressing weaknesses in the aggressive and conservative arguments to advocate for a more balanced approach. Challenge each of their points to illustrate why a moderate risk strategy might offer the best of both worlds, providing growth potential while safeguarding against extreme volatility. Focus on debating rather than simply presenting data, aiming to show that a balanced view can lead to the most reliable outcomes. Output conversationally as if you are speaking without any special formatting.
{KOREAN_INVESTOR_GUIDE}
{KOREAN_DEBATE_GUIDE}"""
response = llm.invoke(prompt)

View File

@ -0,0 +1,11 @@
from tradingagents.agents.screener.universe_builder import build_universe
from tradingagents.agents.screener.technical_screener import technical_screen
from tradingagents.agents.screener.fundamental_screener import fundamental_screen
from tradingagents.agents.screener.candidate_ranker import create_candidate_ranker
__all__ = [
"build_universe",
"technical_screen",
"fundamental_screen",
"create_candidate_ranker",
]

View File

@ -0,0 +1,115 @@
"""LLM-based candidate ranking for swing trading.
After technical + fundamental screening narrows candidates to ~10-20 stocks,
this ranker uses an LLM to rank them by swing trade attractiveness.
"""
import json
import logging
from langchain_openai import ChatOpenAI
logger = logging.getLogger(__name__)
def create_candidate_ranker(llm: ChatOpenAI):
"""Create a candidate ranking function bound to an LLM.
Args:
llm: LLM instance for ranking evaluation
Returns:
Callable that ranks candidates
"""
def rank_candidates(
candidates: list[dict],
portfolio_context: str = "",
max_candidates: int = 5,
) -> list[dict]:
"""Rank and select top swing trading candidates using LLM.
Args:
candidates: Filtered candidates from screeners
portfolio_context: Current portfolio summary
max_candidates: Maximum candidates to return
Returns:
Ranked list of top candidates with LLM reasoning
"""
if not candidates:
return []
if len(candidates) <= max_candidates:
return candidates
# Prepare candidate summaries for LLM
candidate_summaries = []
for i, c in enumerate(candidates):
summary = (
f"[{i + 1}] {c['ticker']} ({c['name']})\n"
f" 시장: {c['market']}\n"
f" 기술적 신호: {', '.join(c['signals'])}\n"
f" 펀더멘탈: {c.get('fundamental_check', 'N/A')}\n"
)
# Add key indicators if available
ind = c.get("indicators", {})
if ind:
price = ind.get("current_price", "N/A")
rsi = ind.get("rsi", "N/A")
vol_ratio = ind.get("volume_ratio", "N/A")
if isinstance(rsi, float):
rsi = f"{rsi:.1f}"
if isinstance(vol_ratio, float):
vol_ratio = f"{vol_ratio:.1f}x"
summary += (
f" 현재가: {price} / RSI: {rsi} / 거래량 비율: {vol_ratio}\n"
)
candidate_summaries.append(summary)
candidates_text = "\n".join(candidate_summaries)
system_prompt = f"""너는 스윙 트레이딩 종목 선정 전문가다.
아래 스크리닝을 통과한 후보 종목들을 스윙 트레이딩 매력도 기준으로 상위 {max_candidates}개를 선정하라.
평가 기준:
1. 기술적 신호의 강도 복합성 (여러 신호가 겹칠수록 강함)
2. 스윙 트레이딩에 적합한 변동성과 유동성
3. 펀더멘탈 건전성 (안전장치)
4. 현재 포트폴리오와의 분산 효과
반드시 아래 JSON 형식만 출력하라. 다른 텍스트는 절대 출력하지 마라.
{{"selected": [번호1, 번호2, ...], "reasoning": "선정 이유 한 줄 요약"}}
"""
messages = [
("system", system_prompt),
("human", f"포트폴리오 현황:\n{portfolio_context}\n\n후보 종목:\n{candidates_text}"),
]
try:
response = llm.invoke(messages).content
parsed = json.loads(response)
selected_indices = parsed.get("selected", [])
reasoning = parsed.get("reasoning", "")
ranked = []
for idx in selected_indices:
actual_idx = idx - 1 # 1-indexed in prompt
if 0 <= actual_idx < len(candidates):
c = candidates[actual_idx]
c["ranking_reason"] = reasoning
ranked.append(c)
if ranked:
return ranked[:max_candidates]
except (json.JSONDecodeError, KeyError, IndexError) as e:
logger.warning(f"LLM ranking parse error: {e}, returning top by signal count")
# Fallback: return top by signal count
return candidates[:max_candidates]
return rank_candidates

View File

@ -0,0 +1,178 @@
"""Fundamental screening for swing trading candidates.
Pure computational screening (no LLM) - filters stocks by fundamental health:
- Revenue growth (positive QoQ or YoY)
- Reasonable valuation (PER, PBR)
- Financial health (debt ratio, current ratio)
- Market cap threshold
"""
import logging
import pandas as pd
logger = logging.getLogger(__name__)
def fundamental_screen(
technical_candidates: list[dict],
trade_date: str,
market: str = "KRX",
) -> list[dict]:
"""Filter technical candidates by fundamental criteria.
Args:
technical_candidates: Output from technical_screener
trade_date: Current trading date (YYYY-MM-DD)
market: "KRX" or "US"
Returns:
Filtered list with fundamental data added
"""
if not technical_candidates:
return []
results = []
for candidate in technical_candidates:
ticker = candidate["ticker"]
try:
if market == "KRX":
fund_data = _get_krx_fundamentals(ticker, trade_date)
else:
fund_data = _get_us_fundamentals(ticker)
if not fund_data:
# No fundamental data available; still pass through
# with a warning flag
candidate["fundamental_check"] = "데이터 없음"
candidate["fundamental_pass"] = True # benefit of the doubt
results.append(candidate)
continue
# Apply fundamental filters
passes, reasons = _check_fundamentals(fund_data, market)
candidate["fundamentals"] = fund_data
candidate["fundamental_check"] = " / ".join(reasons) if reasons else "기본 통과"
candidate["fundamental_pass"] = passes
if passes:
results.append(candidate)
else:
logger.debug(
f"{ticker} failed fundamental screen: {reasons}"
)
except Exception as e:
logger.warning(f"Fundamental screening error for {ticker}: {e}")
candidate["fundamental_check"] = f"오류: {e}"
candidate["fundamental_pass"] = True
results.append(candidate)
logger.info(
f"Fundamental screening: {len(results)}/{len(technical_candidates)} passed"
)
return results
def _get_krx_fundamentals(ticker: str, trade_date: str) -> dict:
"""Get KRX fundamental data for screening."""
data = {}
try:
from pykrx import stock as krx_stock
date_str = trade_date.replace("-", "")
# PER, PBR, EPS, BPS, DIV
fund_df = krx_stock.get_market_fundamental_by_date(
date_str, date_str, ticker
)
if fund_df is not None and not fund_df.empty:
row = fund_df.iloc[0]
data["per"] = row.get("PER", None)
data["pbr"] = row.get("PBR", None)
data["eps"] = row.get("EPS", None)
data["bps"] = row.get("BPS", None)
data["div_yield"] = row.get("DIV", None)
# Market cap
cap_df = krx_stock.get_market_cap_by_date(date_str, date_str, ticker)
if cap_df is not None and not cap_df.empty:
cap_row = cap_df.iloc[0]
data["market_cap"] = cap_row.get("시가총액", None)
except ImportError:
logger.warning("pykrx not installed - limited fundamental screening")
except Exception as e:
logger.warning(f"Error getting KRX fundamentals for {ticker}: {e}")
return data
def _get_us_fundamentals(ticker: str) -> dict:
"""Get US fundamental data for screening."""
import yfinance as yf
data = {}
try:
info = yf.Ticker(ticker).info
data["per"] = info.get("trailingPE")
data["forward_pe"] = info.get("forwardPE")
data["pbr"] = info.get("priceToBook")
data["eps"] = info.get("trailingEps")
data["div_yield"] = info.get("dividendYield")
data["market_cap"] = info.get("marketCap")
data["debt_to_equity"] = info.get("debtToEquity")
data["current_ratio"] = info.get("currentRatio")
data["profit_margin"] = info.get("profitMargins")
data["revenue_growth"] = info.get("revenueGrowth")
data["roe"] = info.get("returnOnEquity")
except Exception as e:
logger.warning(f"Error getting US fundamentals for {ticker}: {e}")
return data
def _check_fundamentals(data: dict, market: str) -> tuple[bool, list[str]]:
"""Check if fundamentals pass screening criteria.
Returns (passes: bool, reasons: list of fail/pass reasons).
"""
reasons = []
fail = False
# PER check: not excessively high (allow negative for turnaround plays)
per = data.get("per")
if per is not None and per > 0:
if per > 100:
reasons.append(f"PER 과다 ({per:.1f})")
fail = True
elif per < 5:
reasons.append(f"PER 매력적 ({per:.1f})")
# PBR check: not excessively high
pbr = data.get("pbr")
if pbr is not None and pbr > 0:
if pbr > 10:
reasons.append(f"PBR 과다 ({pbr:.1f})")
fail = True
# Debt check (US only - data available)
debt_to_equity = data.get("debt_to_equity")
if debt_to_equity is not None and debt_to_equity > 300:
reasons.append(f"부채비율 과다 ({debt_to_equity:.0f}%)")
fail = True
# Revenue growth (positive is good, not a hard filter)
rev_growth = data.get("revenue_growth")
if rev_growth is not None and rev_growth > 0:
reasons.append(f"매출 성장 (+{rev_growth * 100:.1f}%)")
# Profit margin (negative is a warning but not disqualifying for swing)
profit_margin = data.get("profit_margin")
if profit_margin is not None and profit_margin < -0.20:
reasons.append(f"적자 심화 (마진 {profit_margin * 100:.1f}%)")
return not fail, reasons

View File

@ -0,0 +1,145 @@
"""Technical screening for swing trading candidates.
Pure computational screening (no LLM) - filters stocks by technical signals:
- Volume spikes (>2x 20-day average)
- MA crossovers (10 EMA crossing above 20/50 SMA)
- RSI oversold bounce (RSI was below 30, now rising)
- Bollinger Band breakout (price crossing above lower band)
- Price momentum patterns
"""
import logging
from datetime import datetime, timedelta
import pandas as pd
from tradingagents.dataflows.screening_data import (
compute_screening_indicators,
get_bulk_ohlcv,
)
logger = logging.getLogger(__name__)
def technical_screen(
universe: pd.DataFrame,
trade_date: str,
market: str = "KRX",
existing_positions: list[str] | None = None,
) -> list[dict]:
"""Screen stocks by technical indicators.
Args:
universe: DataFrame with Code, Name, Market columns
trade_date: Current trading date (YYYY-MM-DD)
market: "KRX" or "US"
existing_positions: Tickers already held (skip screening)
Returns:
List of dicts with ticker, name, market, signals (list of trigger reasons),
and indicator values
"""
existing = set(existing_positions or [])
tickers = [
row["Code"]
for _, row in universe.iterrows()
if row["Code"] not in existing
]
if not tickers:
return []
# Fetch OHLCV for last 60 trading days (enough for 50 SMA)
end_date = trade_date
start_dt = datetime.strptime(trade_date, "%Y-%m-%d") - timedelta(days=120)
start_date = start_dt.strftime("%Y-%m-%d")
logger.info(f"Fetching OHLCV for {len(tickers)} tickers...")
ohlcv_data = get_bulk_ohlcv(tickers, start_date, end_date, market=market)
candidates = []
ticker_names = dict(zip(universe["Code"], universe["Name"]))
for ticker, df in ohlcv_data.items():
indicators = compute_screening_indicators(df)
if not indicators:
continue
signals = _check_swing_signals(indicators)
if signals:
candidates.append({
"ticker": ticker,
"name": ticker_names.get(ticker, ticker),
"market": market,
"signals": signals,
"indicators": indicators,
"signal_count": len(signals),
})
# Sort by number of signals (more signals = stronger candidate)
candidates.sort(key=lambda x: x["signal_count"], reverse=True)
logger.info(f"Technical screening found {len(candidates)} candidates")
return candidates
def _check_swing_signals(ind: dict) -> list[str]:
"""Check for swing trading entry signals.
Returns list of triggered signal descriptions.
"""
signals = []
price = ind.get("current_price")
if price is None:
return signals
# 1. Volume spike: current volume > 2x 20-day average
vol_ratio = ind.get("volume_ratio", 0)
if vol_ratio >= 2.0:
signals.append(f"거래량 급증 (평균 대비 {vol_ratio:.1f}배)")
# 2. RSI oversold bounce: RSI was below 30 and is now rising
rsi = ind.get("rsi")
rsi_prev = ind.get("rsi_prev")
if rsi is not None and rsi_prev is not None:
if rsi_prev < 30 and rsi > rsi_prev:
signals.append(f"RSI 과매도 반등 ({rsi_prev:.0f}{rsi:.0f})")
elif 30 <= rsi <= 40 and rsi > rsi_prev:
signals.append(f"RSI 과매도권 탈출 중 ({rsi:.0f})")
# 3. MA crossover: 10 EMA crosses above 20 SMA (golden cross short-term)
ema10 = ind.get("ema_10")
sma20 = ind.get("sma_20")
if ema10 is not None and sma20 is not None:
if ema10 > sma20 and price > ema10:
signals.append(f"단기 골든크로스 (10 EMA > 20 SMA)")
# 4. Bollinger Band lower touch and bounce
boll_lower = ind.get("boll_lower")
prev_close = ind.get("prev_close")
if boll_lower is not None and prev_close is not None:
if prev_close <= boll_lower and price > boll_lower:
signals.append("볼린저 하단 반등")
# 5. Price above key moving averages (trend confirmation)
sma50 = ind.get("sma_50")
if sma50 is not None and price > sma50 and ema10 is not None and ema10 > sma50:
pct_above = (price / sma50 - 1) * 100
if 0 < pct_above < 10:
signals.append(f"50일 이평선 지지 (위 {pct_above:.1f}%)")
# 6. Recent pullback in uptrend (mean reversion opportunity)
pct_5d = ind.get("pct_change_5d")
pct_20d = ind.get("pct_change_20d")
if pct_5d is not None and pct_20d is not None:
if pct_20d > 5 and -8 < pct_5d < -2:
signals.append(f"상승 추세 내 조정 (20일 +{pct_20d:.1f}%, 5일 {pct_5d:.1f}%)")
# 7. Volume + price breakout combo
if vol_ratio >= 1.5:
pct_1d = ind.get("pct_change_1d", 0)
if pct_1d > 2:
signals.append(f"거래량 동반 상승 돌파 (+{pct_1d:.1f}%, 거래량 {vol_ratio:.1f}배)")
return signals

View File

@ -0,0 +1,65 @@
"""Universe builder for stock screening.
Builds the initial stock universe from KRX or US markets,
filtered by basic criteria (market cap, volume).
"""
import logging
import pandas as pd
from tradingagents.dataflows.screening_data import get_krx_universe, get_us_universe
logger = logging.getLogger(__name__)
def build_universe(config: dict) -> pd.DataFrame:
"""Build stock universe based on config settings.
Args:
config: Trading config with market, screening, and universe settings.
Returns:
DataFrame with columns: Code, Name, Market, Sector, MarketCap, Volume
"""
market = config.get("market", "KRX")
min_market_cap = config.get("screening_min_market_cap", 500_000_000_000)
min_volume = config.get("screening_min_volume", 100_000)
frames = []
if market in ("KRX", "ALL"):
logger.info("Building KRX universe...")
try:
krx_df = get_krx_universe(
min_market_cap=min_market_cap,
min_volume=min_volume,
)
if not krx_df.empty:
krx_df["Market"] = "KRX"
frames.append(krx_df)
logger.info(f"KRX universe: {len(krx_df)} stocks")
except Exception as e:
logger.error(f"Failed to build KRX universe: {e}")
if market in ("US", "ALL"):
logger.info("Building US universe...")
try:
us_df = get_us_universe(
universe_type=config.get("us_universe", "sp500"),
custom_watchlist=config.get("custom_watchlist"),
)
if not us_df.empty:
us_df["Market"] = "US"
frames.append(us_df)
logger.info(f"US universe: {len(us_df)} stocks")
except Exception as e:
logger.error(f"Failed to build US universe: {e}")
if not frames:
logger.warning("No stocks found in universe")
return pd.DataFrame()
universe = pd.concat(frames, ignore_index=True)
logger.info(f"Total universe: {len(universe)} stocks")
return universe

View File

@ -1,36 +1,94 @@
import functools
import time
import json
from tradingagents.agents.utils.korean_prompt import (
KOREAN_INVESTOR_GUIDE,
KOREAN_FINAL_DECISION_GUIDE,
SWING_TRADING_CONTEXT,
SWING_PORTFOLIO_CONTEXT,
)
def create_trader(llm, memory):
def trader_node(state, name):
company_name = state["company_of_interest"]
investment_plan = state["investment_plan"]
market_research_report = state["market_report"]
sentiment_report = state["sentiment_report"]
market_report = state["market_report"]
news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"]
screening_context = state.get("screening_context", "")
portfolio_context = state.get("portfolio_context", "")
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
# Build situation for memory lookup
curr_situation = f"{market_report}\n\n{news_report}\n\n{fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=2)
past_memory_str = ""
if past_memories:
for i, rec in enumerate(past_memories, 1):
for rec in past_memories:
past_memory_str += rec["recommendation"] + "\n\n"
else:
past_memory_str = "No past memories found."
past_memory_str = "과거 유사 사례 메모리가 없습니다."
# Build context message
reports_section = f"""## 기술적 분석 (Market Analyst)
{market_report}
## 뉴스 분석 (News Analyst)
{news_report}
## 기본적 분석 (Fundamentals Analyst)
{fundamentals_report}"""
screening_section = ""
if screening_context:
screening_section = f"\n## 스크리닝 선정 사유\n{screening_context}"
portfolio_section = ""
if portfolio_context:
portfolio_section = f"\n## 현재 포트폴리오 상태\n{portfolio_context}"
context = {
"role": "user",
"content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nLeverage these insights to make an informed and strategic decision.",
"content": f"""{company_name}에 대한 애널리스트 분석 리포트입니다. 이를 기반으로 스윙 트레이딩 매매 결정을 내려주세요.
{reports_section}{screening_section}{portfolio_section}""",
}
messages = [
{
"role": "system",
"content": f"""You are a trading agent analyzing market data to make investment decisions. Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. Do not forget to utilize lessons from past decisions to learn from your mistakes. Here is some reflections from similar situatiosn you traded in and the lessons learned: {past_memory_str}""",
"content": f"""You are a swing trading agent. You receive analyst reports and make direct BUY/SELL/PASS decisions for swing trades (2-20 day holding period).
Your options:
- **BUY**: Enter a new long position now.
- **SELL**: Exit an existing position (only if portfolio context shows an open position).
- **PASS**: Skip this trade.
Decision framework:
1. Technical setup (Market Analyst) Is the chart showing a favorable entry/exit?
2. News check (News Analyst) Any catalysts or red flags?
3. Fundamentals sanity check Is the company fundamentally sound?
4. Risk management Define stop loss, take profit, position size.
You MUST end your response with a structured order block:
```
SWING_ORDER:
ACTION: BUY|SELL|PASS
ENTRY_PRICE: 현재가 또는 목표 진입가
STOP_LOSS: 손절가
TAKE_PROFIT: 익절가
POSITION_SIZE_PCT: 자본 대비 비중 (0.05~0.20)
MAX_HOLD_DAYS: 최대 보유일 (2~20)
RATIONALE: 요약
```
Also conclude with: FINAL TRANSACTION PROPOSAL: **BUY/SELL/PASS**
Lessons from past similar trades:
{past_memory_str}
{KOREAN_INVESTOR_GUIDE}
{SWING_TRADING_CONTEXT}
{SWING_PORTFOLIO_CONTEXT}
{KOREAN_FINAL_DECISION_GUIDE}""",
},
context,
]
@ -39,7 +97,7 @@ def create_trader(llm, memory):
return {
"messages": [result],
"trader_investment_plan": result.content,
"trader_decision": result.content,
"sender": name,
}

View File

@ -1,76 +1,30 @@
from typing import Annotated, Sequence
from datetime import date, timedelta, datetime
from typing_extensions import TypedDict, Optional
from langchain_openai import ChatOpenAI
from tradingagents.agents import *
from langgraph.prebuilt import ToolNode
from langgraph.graph import END, StateGraph, START, MessagesState
# Researcher team state
class InvestDebateState(TypedDict):
bull_history: Annotated[
str, "Bullish Conversation history"
] # Bullish Conversation history
bear_history: Annotated[
str, "Bearish Conversation history"
] # Bullish Conversation history
history: Annotated[str, "Conversation history"] # Conversation history
current_response: Annotated[str, "Latest response"] # Last response
judge_decision: Annotated[str, "Final judge decision"] # Last response
count: Annotated[int, "Length of the current conversation"] # Conversation length
# Risk management team state
class RiskDebateState(TypedDict):
aggressive_history: Annotated[
str, "Aggressive Agent's Conversation history"
] # Conversation history
conservative_history: Annotated[
str, "Conservative Agent's Conversation history"
] # Conversation history
neutral_history: Annotated[
str, "Neutral Agent's Conversation history"
] # Conversation history
history: Annotated[str, "Conversation history"] # Conversation history
latest_speaker: Annotated[str, "Analyst that spoke last"]
current_aggressive_response: Annotated[
str, "Latest response by the aggressive analyst"
] # Last response
current_conservative_response: Annotated[
str, "Latest response by the conservative analyst"
] # Last response
current_neutral_response: Annotated[
str, "Latest response by the neutral analyst"
] # Last response
judge_decision: Annotated[str, "Judge's decision"]
count: Annotated[int, "Length of the current conversation"] # Conversation length
from typing import Annotated
from typing_extensions import TypedDict
from langgraph.graph import MessagesState
class AgentState(MessagesState):
company_of_interest: Annotated[str, "Company that we are interested in trading"]
trade_date: Annotated[str, "What date we are trading at"]
"""Simplified state for swing trading pipeline.
Flow: Analysts Trader Done
No debate states needed.
"""
company_of_interest: Annotated[str, "Company/ticker we are analyzing"]
trade_date: Annotated[str, "Trading date"]
sender: Annotated[str, "Agent that sent this message"]
# research step
# Analyst reports
market_report: Annotated[str, "Report from the Market Analyst"]
sentiment_report: Annotated[str, "Report from the Social Media Analyst"]
news_report: Annotated[
str, "Report from the News Researcher of current world affairs"
]
fundamentals_report: Annotated[str, "Report from the Fundamentals Researcher"]
news_report: Annotated[str, "Report from the News Analyst"]
fundamentals_report: Annotated[str, "Report from the Fundamentals Analyst"]
# researcher team discussion step
investment_debate_state: Annotated[
InvestDebateState, "Current state of the debate on if to invest or not"
]
investment_plan: Annotated[str, "Plan generated by the Analyst"]
# Screening context (from screener pipeline)
screening_context: Annotated[str, "Why this stock was flagged by screener"]
portfolio_context: Annotated[str, "Current portfolio state summary"]
trader_investment_plan: Annotated[str, "Plan generated by the Trader"]
# risk management team discussion step
risk_debate_state: Annotated[
RiskDebateState, "Current state of the debate on evaluating risk"
# Trader output
trader_decision: Annotated[str, "Trader's final swing decision"]
swing_order: Annotated[
str, "Structured order: action, entry, stop_loss, take_profit, size, hold_days"
]
final_trade_decision: Annotated[str, "Final decision made by the Risk Analysts"]

View File

@ -18,6 +18,19 @@ from tradingagents.agents.utils.news_data_tools import (
get_insider_transactions,
get_global_news
)
from tradingagents.agents.utils.korean_market_tools import (
get_krx_stock_data,
get_krx_indicators,
get_exchange_rate,
get_korea_index,
get_investor_trading,
get_krx_fundamentals,
get_dart_financials,
get_dart_disclosures,
get_dart_shareholders,
get_korean_news,
get_korean_global_news,
)
def create_msg_delete():
def delete_messages(state):

View File

@ -0,0 +1,228 @@
"""LangChain tool definitions for Korean market data.
These tools wrap the Korean data sources (FinanceDataReader, Naver, DART, pykrx)
and are used by agents through the vendor routing system.
"""
from langchain_core.tools import tool
from typing import Annotated
from tradingagents.dataflows.interface import route_to_vendor
# ── Korean Stock Data Tools ──────────────────────────────────────────────────
@tool
def get_krx_stock_data(
symbol: Annotated[str, "KRX ticker symbol (e.g., '005930' for Samsung, '000660' for SK Hynix)"],
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
) -> str:
"""
Retrieve KRX (Korea Exchange) stock OHLCV price data.
For Korean stocks listed on KOSPI/KOSDAQ.
Args:
symbol (str): KRX ticker code (6-digit, e.g., '005930')
start_date (str): Start date in yyyy-mm-dd format
end_date (str): End date in yyyy-mm-dd format
Returns:
str: Formatted stock price data in KRW
"""
return route_to_vendor("get_krx_stock_data", symbol, start_date, end_date)
@tool
def get_krx_indicators(
symbol: Annotated[str, "KRX ticker symbol (e.g., '005930')"],
indicator: Annotated[str, "Technical indicator (e.g., 'rsi', 'macd', 'close_50_sma')"],
curr_date: Annotated[str, "Current trading date in YYYY-mm-dd format"],
look_back_days: Annotated[int, "How many days to look back"] = 30,
) -> str:
"""
Calculate technical indicators for KRX-listed stocks.
Supported indicators: close_50_sma, close_200_sma, close_10_ema, macd, macds, macdh,
rsi, boll, boll_ub, boll_lb, atr, vwma, mfi
Args:
symbol (str): KRX ticker code
indicator (str): Technical indicator name
curr_date (str): Current trading date
look_back_days (int): Look-back period (default 30)
Returns:
str: Indicator values over the look-back period
"""
return route_to_vendor("get_krx_indicators", symbol, indicator, curr_date, look_back_days)
# ── Korean Market Context Tools ──────────────────────────────────────────────
@tool
def get_exchange_rate(
currency_pair: Annotated[str, "Currency pair (e.g., 'USD/KRW', 'JPY/KRW', 'EUR/KRW', 'CNY/KRW')"],
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
) -> str:
"""
Retrieve exchange rate data. Essential for Korean market analysis
as USD/KRW rate significantly impacts Korean stocks (especially export companies).
Args:
currency_pair (str): Currency pair (e.g., 'USD/KRW')
start_date (str): Start date
end_date (str): End date
Returns:
str: Exchange rate time series data
"""
return route_to_vendor("get_exchange_rate", currency_pair, start_date, end_date)
@tool
def get_korea_index(
index_code: Annotated[str, "Index code: 'KS11' (KOSPI), 'KQ11' (KOSDAQ), 'KS200' (KOSPI200)"],
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
) -> str:
"""
Retrieve Korean market index data (KOSPI, KOSDAQ, KOSPI200).
Critical for understanding overall Korean market trend and sector rotation.
Args:
index_code (str): 'KS11' for KOSPI, 'KQ11' for KOSDAQ, 'KS200' for KOSPI200
start_date (str): Start date
end_date (str): End date
Returns:
str: Index OHLCV data
"""
return route_to_vendor("get_korea_index", index_code, start_date, end_date)
@tool
def get_investor_trading(
symbol: Annotated[str, "KRX ticker symbol (e.g., '005930')"],
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
) -> str:
"""
Retrieve foreign and institutional investor trading data (수급 데이터).
Shows net buying/selling by investor type: foreigners (외국인), institutions (기관),
individuals (개인), pension funds (연기금), etc.
This is one of the MOST IMPORTANT indicators for Korean stocks.
Args:
symbol (str): KRX ticker code
start_date (str): Start date
end_date (str): End date
Returns:
str: Investor flow data with net buy/sell amounts in KRW
"""
return route_to_vendor("get_investor_trading", symbol, start_date, end_date)
# ── Korean Fundamentals (KRX + DART) ────────────────────────────────────────
@tool
def get_krx_fundamentals(
ticker: Annotated[str, "KRX ticker symbol (e.g., '005930')"],
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
) -> str:
"""
Retrieve fundamental data for a KRX-listed company.
Includes PER, PBR, EPS, BPS, dividend yield, market cap from KRX data.
Args:
ticker (str): KRX ticker code
curr_date (str): Current date
Returns:
str: Company fundamental ratios and market cap info
"""
return route_to_vendor("get_krx_fundamentals", ticker, curr_date)
@tool
def get_dart_financials(
ticker: Annotated[str, "KRX ticker symbol (e.g., '005930')"],
year: Annotated[str, "Business year (e.g., '2024')"],
report_code: Annotated[str, "Report code: '11013'=1Q, '11012'=반기, '11014'=3Q, '11011'=연간"] = "11011",
) -> str:
"""
Retrieve detailed financial statements from DART (전자공시시스템).
Includes balance sheet, income statement, and cash flow from official filings.
Requires DART_API_KEY environment variable.
Args:
ticker (str): KRX ticker code
year (str): Business year
report_code (str): '11011' for annual, '11013' for Q1, '11012' for half-year, '11014' for Q3
Returns:
str: Detailed consolidated financial statements
"""
return route_to_vendor("get_dart_financials", ticker, year, report_code)
@tool
def get_dart_disclosures(
ticker: Annotated[str, "KRX ticker symbol (e.g., '005930')"],
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
) -> str:
"""
Retrieve recent DART disclosures/filings (공시) for a Korean company.
DART 공시 is the primary source of corporate events, regulatory filings,
and material information for Korean stocks.
Args:
ticker (str): KRX ticker code
start_date (str): Start date
end_date (str): End date
Returns:
str: List of recent disclosures with filing dates and types
"""
return route_to_vendor("get_dart_disclosures", ticker, start_date, end_date)
@tool
def get_dart_shareholders(
ticker: Annotated[str, "KRX ticker symbol (e.g., '005930')"],
) -> str:
"""
Retrieve major shareholder information (대주주 지분 현황) from DART.
Shows ownership structure which is critical for Korean corporate governance analysis.
Args:
ticker (str): KRX ticker code
Returns:
str: Major shareholders with ownership percentages
"""
return route_to_vendor("get_dart_shareholders", ticker)
# ── Korean News Tools ────────────────────────────────────────────────────────
@tool
def get_korean_news(
ticker: Annotated[str, "KRX ticker symbol or company name (e.g., '005930' or '삼성전자')"],
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
) -> str:
"""
Retrieve Korean financial news for a specific stock.
Searches Korean news sources (Naver, Google News Korea) for company-specific news.
Args:
ticker (str): KRX ticker code or Korean company name
start_date (str): Start date
end_date (str): End date
Returns:
str: Korean financial news articles
"""
return route_to_vendor("get_korean_news", ticker, start_date, end_date)
@tool
def get_korean_global_news(
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
look_back_days: Annotated[int, "Number of days to look back"] = 7,
limit: Annotated[int, "Maximum number of articles to return"] = 10,
) -> str:
"""
Retrieve Korean macro/global economic news.
Covers: BOK base rate, KOSPI outlook, USD/KRW exchange rate,
Korean economy, foreign investment trends.
Args:
curr_date (str): Current date
look_back_days (int): Days to look back (default 7)
limit (int): Max articles (default 10)
Returns:
str: Korean macro/economic news articles
"""
return route_to_vendor("get_korean_global_news", curr_date, look_back_days, limit)

View File

@ -0,0 +1,105 @@
"""Shared Korean localization prompt fragments for all trading agents.
Includes swing trading context prompts for multi-agent swing strategy system.
"""
KOREAN_INVESTOR_GUIDE = """
[한국형 운영 가이드]
- 모든 응답은 자연스러운 한국어로 작성하고, 핵심 금융 용어는 필요 영어를 괄호로 병기한다.
- 날짜/시간 해석은 한국 시간(KST, UTC+9) 기준으로 명시한다.
- 가격/수익률/리스크 평가는 한국 투자자가 이해하기 쉽게 원화(KRW) 영향 관점으로 재해석하고, 데이터 통화도 함께 언급한다.
- 한국 투자자 관점의 핵심 변수(USD/KRW 환율, 한국은행 기준금리, 외국인/기관 수급, KOSPI/KOSDAQ 미국 지수 연동) 점검해 반영한다.
- "혼조세" 같은 모호한 결론으로 끝내지 말고, 관측 사실과 수치를 근거로 구체적으로 설명한다.
"""
KOREAN_REPORT_FORMAT_GUIDE = """
[리포트 형식 가이드]
- 본문은 한국어로 작성한다.
- 마지막에는 핵심 포인트를 요약한 Markdown 표를 반드시 포함한다.
- 컬럼은 `항목 | 관측 내용 | 매매 시사점` 기본으로 사용한다.
"""
KOREAN_DEBATE_GUIDE = """
[토론 형식 가이드]
- 한국어로 논리적으로 토론하되, 상대 주장에 대한 반박을 데이터 기반으로 명확히 제시한다.
- 주장마다 근거(지표/뉴스/펀더멘털/수급) 연결해 실전 의사결정에 바로 있게 작성한다.
"""
KOREAN_FINAL_DECISION_GUIDE = """
[최종 의사결정 출력 규칙]
- 전체 설명은 한국어로 작성한다.
- 시스템 파싱 호환을 위해 최종 결론 키워드는 반드시 영문 BUY 또는 PASS만 사용한다.
- 마지막 줄은 정확히 다음 형식 하나로 끝낸다.
- FINAL TRANSACTION PROPOSAL: **BUY**
- FINAL TRANSACTION PROPOSAL: **PASS**
"""
# ──────────────────────────────────────────────
# Swing Trading Prompt Fragments
# ──────────────────────────────────────────────
SWING_TRADING_CONTEXT = """
[스윙 트레이딩 분석 가이드]
- 분석 기간은 2~20 거래일 보유를 전제로 한다.
- 단기 가격 변동, 지지/저항선, 추세 전환 신호에 집중한다.
- 진입 타이밍(entry timing) 손절/익절 수준을 명시적으로 제안한다.
- 현재 추세의 모멘텀과 단기 반전 가능성을 함께 평가한다.
- 거래량 변화, 수급 패턴, 기관/외국인 동향을 단기 관점에서 분석한다.
"""
SWING_PORTFOLIO_CONTEXT = """
[포트폴리오 인식 가이드]
- 아래 제공되는 포트폴리오 현황을 반드시 참고하여 분석한다.
- 이미 보유 중인 종목(position_status=OPEN) 경우, HOLD(유지) vs SELL(매도) 관점에서 분석한다.
- 미보유 종목(position_status=NONE) 경우, BUY(매수) vs PASS(관망) 관점에서 분석한다.
- 포트폴리오 전체의 리스크 분산도, 섹터 집중도를 고려한다.
"""
SWING_DECISION_GUIDE = """
[스윙 트레이딩 의사결정 출력 규칙]
- 전체 설명은 한국어로 작성한다.
- 시스템 파싱 호환을 위해 최종 결론 키워드는 반드시 영문만 사용한다.
- 미보유 종목: BUY 또는 PASS
- 보유 종목: SELL 또는 HOLD
- 마지막에 반드시 아래 형식의 구조화된 주문 정보를 출력한다:
```
SWING_ORDER:
ACTION: BUY | SELL | HOLD | PASS
ENTRY_PRICE: <진입/현재 가격>
STOP_LOSS: <손절가>
TAKE_PROFIT: <익절가>
POSITION_SIZE_PCT: <자본 대비 비중 %, : 15>
MAX_HOLD_DAYS: <최대 보유일, : 15>
RATIONALE: < 근거>
```
- HOLD/PASS의 경우에도 SWING_ORDER 블록을 작성하되, 가격 정보는 현재 기준으로 기재한다.
- 마지막 줄은 정확히 다음 형식 하나로 끝낸다:
- FINAL TRANSACTION PROPOSAL: **BUY**
- FINAL TRANSACTION PROPOSAL: **SELL**
- FINAL TRANSACTION PROPOSAL: **HOLD**
- FINAL TRANSACTION PROPOSAL: **PASS**
"""
SWING_SCREENING_CONTEXT_TEMPLATE = """
[스크리닝 결과]
종목이 스크리닝에서 선정된 이유:
{screening_reason}
"""
SWING_BULL_DEBATE_GUIDE = """
[스윙 매수 옹호 토론 가이드]
- position_status가 NONE이면: 지금 매수해야 하는지, 스윙 트레이딩 진입 근거를 제시한다.
- position_status가 OPEN이면: 계속 보유해야 하는지, 추가 상승 여력을 제시한다.
- 구체적 진입가, 손절가, 목표가를 수치로 제안한다.
- 단기 모멘텀, 수급, 기술적 지지선 스윙 관련 근거를 우선한다.
"""
SWING_BEAR_DEBATE_GUIDE = """
[스윙 매도/관망 옹호 토론 가이드]
- position_status가 NONE이면: 지금 매수하면 되는지, 관망 근거를 제시한다.
- position_status가 OPEN이면: 지금 매도해야 하는지, 하락 리스크를 제시한다.
- 단기 저항선, 과열 지표, 수급 악화 스윙 관련 리스크를 우선한다.
"""

View File

@ -0,0 +1,297 @@
"""DART (Korean Electronic Disclosure System) API integration.
Provides access to Korean company financial statements, disclosures,
and fundamental data from the DART OpenAPI (https://opendart.fss.or.kr).
Requires DART_API_KEY environment variable to be set.
"""
import os
import requests
from datetime import datetime
from typing import Annotated, Optional
DART_BASE_URL = "https://opendart.fss.or.kr/api"
def _get_dart_api_key() -> str:
"""Get DART API key from environment."""
key = os.environ.get("DART_API_KEY", "")
if not key:
raise ValueError(
"DART_API_KEY 환경변수가 설정되지 않았습니다. "
"https://opendart.fss.or.kr 에서 API 키를 발급받으세요."
)
return key
def _dart_request(endpoint: str, params: dict) -> dict:
"""Make a DART API request."""
api_key = _get_dart_api_key()
params["crtfc_key"] = api_key
url = f"{DART_BASE_URL}/{endpoint}.json"
response = requests.get(url, params=params, timeout=15)
response.raise_for_status()
data = response.json()
if data.get("status") != "000":
msg = data.get("message", "Unknown error")
raise ValueError(f"DART API error: {msg} (status: {data.get('status')})")
return data
def _get_corp_code(ticker: str) -> str:
"""Get DART corporation code from stock code.
DART uses its own corp_code which differs from stock ticker.
This function maintains a cache for lookups.
"""
import zipfile
import io
import xml.etree.ElementTree as ET
from tradingagents.dataflows.config import get_config
config = get_config()
cache_dir = config.get("data_cache_dir", "./data_cache")
os.makedirs(cache_dir, exist_ok=True)
cache_file = os.path.join(cache_dir, "dart_corp_codes.xml")
# Download corp code list if not cached
if not os.path.exists(cache_file):
api_key = _get_dart_api_key()
url = f"{DART_BASE_URL}/corpCode.xml"
response = requests.get(url, params={"crtfc_key": api_key}, timeout=30)
response.raise_for_status()
with zipfile.ZipFile(io.BytesIO(response.content)) as zf:
xml_filename = zf.namelist()[0]
with zf.open(xml_filename) as f:
with open(cache_file, "wb") as out:
out.write(f.read())
# Parse XML and find corp_code
tree = ET.parse(cache_file)
root = tree.getroot()
ticker = ticker.strip().lstrip("0") # Handle leading zeros
ticker_padded = ticker.zfill(6)
for corp in root.findall("list"):
stock_code = corp.findtext("stock_code", "").strip()
if stock_code == ticker_padded:
return corp.findtext("corp_code", "").strip()
raise ValueError(f"DART corp_code not found for ticker: {ticker_padded}")
def get_dart_financial_statements(
ticker: Annotated[str, "KRX ticker symbol (e.g., '005930')"],
year: Annotated[str, "Business year (e.g., '2024')"],
report_code: Annotated[str, "Report code: '11013'=1Q, '11012'=반기, '11014'=3Q, '11011'=연간"] = "11011",
) -> str:
"""Retrieve financial statements from DART for a Korean company.
Args:
ticker: KRX stock code
year: Business year
report_code: '11013' (1Q), '11012' (반기), '11014' (3Q), '11011' (연간)
"""
try:
corp_code = _get_corp_code(ticker)
except ValueError as e:
return str(e)
report_names = {
"11013": "1분기보고서",
"11012": "반기보고서",
"11014": "3분기보고서",
"11011": "사업보고서(연간)",
}
report_name = report_names.get(report_code, report_code)
try:
# Fetch single-company financial statements
params = {
"corp_code": corp_code,
"bsns_year": year,
"reprt_code": report_code,
"fs_div": "CFS", # Consolidated (연결재무제표)
}
data = _dart_request("fnlttSinglAcntAll", params)
items = data.get("list", [])
if not items:
return f"No DART financial data for {ticker} ({year} {report_name})"
# Organize by statement type
statements = {}
for item in items:
sj_nm = item.get("sj_nm", "기타") # Statement name
if sj_nm not in statements:
statements[sj_nm] = []
statements[sj_nm].append(item)
result = f"# DART 재무제표: {ticker} ({year} {report_name})\n"
result += f"# 연결재무제표 (Consolidated)\n\n"
for sj_name, items_list in statements.items():
result += f"## {sj_name}\n"
result += f"{'계정명':<30} | {'당기금액':>20} | {'전기금액':>20}\n"
result += "-" * 75 + "\n"
for item in items_list:
account = item.get("account_nm", "")
current = item.get("thstrm_amount", "")
previous = item.get("frmtrm_amount", "")
# Format amounts
if current and current != "":
try:
current = f"{int(current.replace(',', '')):>20,}"
except (ValueError, AttributeError):
current = f"{current:>20}"
if previous and previous != "":
try:
previous = f"{int(previous.replace(',', '')):>20,}"
except (ValueError, AttributeError):
previous = f"{previous:>20}"
result += f"{account:<30} | {current:>20} | {previous:>20}\n"
result += "\n"
result += f"# 단위: 원 (KRW)\n"
result += f"# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n"
return result
except ValueError as e:
return f"DART API error for {ticker}: {str(e)}"
except Exception as e:
return f"Error fetching DART financial data for {ticker}: {str(e)}"
def get_dart_disclosures(
ticker: Annotated[str, "KRX ticker symbol (e.g., '005930')"],
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
disclosure_type: Annotated[str, "Type: 'A'=정기, 'B'=주요사항, 'C'=발행공시, 'D'=지분공시, 'E'=기타, 'F'=외부감사, 'G'=펀드, 'H'=자산유동화, 'I'=거래소공시, 'J'=공정위, ''=전체"] = "",
) -> str:
"""Retrieve recent disclosures/filings from DART for a Korean company.
This is crucial for Korean market analysis as DART disclosures
(공시) are the primary source of corporate events and regulatory filings.
"""
try:
corp_code = _get_corp_code(ticker)
except ValueError as e:
return str(e)
try:
params = {
"corp_code": corp_code,
"bgn_de": start_date.replace("-", ""),
"end_de": end_date.replace("-", ""),
"page_count": 20,
}
if disclosure_type:
params["pblntf_ty"] = disclosure_type
data = _dart_request("list", params)
items = data.get("list", [])
if not items:
return f"No DART disclosures for {ticker} between {start_date} and {end_date}"
result = f"# DART 공시목록: {ticker} ({start_date} ~ {end_date})\n\n"
for item in items:
report_nm = item.get("report_nm", "")
rcept_dt = item.get("rcept_dt", "")
flr_nm = item.get("flr_nm", "") # Filing entity
rcept_no = item.get("rcept_no", "")
# Format date
if len(rcept_dt) == 8:
rcept_dt = f"{rcept_dt[:4]}-{rcept_dt[4:6]}-{rcept_dt[6:]}"
result += f"### {report_nm}\n"
result += f"접수일: {rcept_dt} | 제출인: {flr_nm}\n"
result += f"DART 링크: https://dart.fss.or.kr/dsaf001/main.do?rcpNo={rcept_no}\n\n"
result += f"# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n"
return result
except ValueError as e:
return f"DART API error for {ticker}: {str(e)}"
except Exception as e:
return f"Error fetching DART disclosures for {ticker}: {str(e)}"
def get_dart_major_shareholders(
ticker: Annotated[str, "KRX ticker symbol (e.g., '005930')"],
) -> str:
"""Retrieve major shareholder information from DART.
Shows the largest shareholders and their ownership percentages,
which is important for Korean market analysis (대주주 지분 현황).
"""
try:
corp_code = _get_corp_code(ticker)
except ValueError as e:
return str(e)
try:
# Get the latest annual report year
current_year = datetime.now().year
data = None
# Try current year first, then previous year
for year in [str(current_year), str(current_year - 1)]:
try:
params = {
"corp_code": corp_code,
"bsns_year": year,
"reprt_code": "11011", # Annual report
}
data = _dart_request("hyslrSttus", params)
if data.get("list"):
break
except ValueError:
continue
if not data or not data.get("list"):
return f"No major shareholder data found for {ticker}"
items = data["list"]
result = f"# DART 대주주 현황: {ticker}\n\n"
result += f"{'주주명':<20} | {'관계':<15} | {'보유주식수':>15} | {'지분율':>10}\n"
result += "-" * 65 + "\n"
for item in items:
nm = item.get("nm", "")
relate = item.get("relate", "")
stock_cnt = item.get("trmend_posesn_stock_co", "")
ratio = item.get("trmend_posesn_stock_qota_rt", "")
if stock_cnt:
try:
stock_cnt = f"{int(stock_cnt.replace(',', '')):>15,}"
except (ValueError, AttributeError):
stock_cnt = f"{stock_cnt:>15}"
result += f"{nm:<20} | {relate:<15} | {stock_cnt:>15} | {ratio:>10}%\n"
result += f"\n# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n"
return result
except ValueError as e:
return f"DART API error for {ticker}: {str(e)}"
except Exception as e:
return f"Error fetching shareholder data for {ticker}: {str(e)}"

View File

@ -24,6 +24,25 @@ from .alpha_vantage import (
)
from .alpha_vantage_common import AlphaVantageRateLimitError
# Import Korean market data sources
from .korea_finance import (
get_krx_stock_data as get_krx_stock_data_impl,
get_krx_indicators as get_krx_indicators_impl,
get_exchange_rate as get_exchange_rate_impl,
get_korea_index_data as get_korea_index_impl,
get_investor_trading_data as get_investor_trading_impl,
get_krx_fundamentals as get_krx_fundamentals_impl,
)
from .korea_news import (
get_korean_news as get_korean_news_impl,
get_korean_global_news as get_korean_global_news_impl,
)
from .dart_api import (
get_dart_financial_statements as get_dart_financials_impl,
get_dart_disclosures as get_dart_disclosures_impl,
get_dart_major_shareholders as get_dart_shareholders_impl,
)
# Configuration and routing logic
from .config import get_config
@ -57,27 +76,62 @@ TOOLS_CATEGORIES = {
"get_global_news",
"get_insider_transactions",
]
}
},
# ── Korean Market Categories ─────────────────────────────────────────
"krx_stock_apis": {
"description": "KRX (Korea Exchange) stock price data",
"tools": [
"get_krx_stock_data",
"get_krx_indicators",
]
},
"korean_market_data": {
"description": "Korean market context data (exchange rates, indices, investor flows)",
"tools": [
"get_exchange_rate",
"get_korea_index",
"get_investor_trading",
]
},
"korean_fundamental_data": {
"description": "Korean company fundamentals (KRX + DART)",
"tools": [
"get_krx_fundamentals",
"get_dart_financials",
"get_dart_disclosures",
"get_dart_shareholders",
]
},
"korean_news_data": {
"description": "Korean financial news",
"tools": [
"get_korean_news",
"get_korean_global_news",
]
},
}
VENDOR_LIST = [
"yfinance",
"alpha_vantage",
"krx",
"dart",
"naver",
]
# Mapping of methods to their vendor-specific implementations
VENDOR_METHODS = {
# core_stock_apis
# ── Existing: core_stock_apis ─────────────────────────────────────────
"get_stock_data": {
"alpha_vantage": get_alpha_vantage_stock,
"yfinance": get_YFin_data_online,
},
# technical_indicators
# ── Existing: technical_indicators ────────────────────────────────────
"get_indicators": {
"alpha_vantage": get_alpha_vantage_indicator,
"yfinance": get_stock_stats_indicators_window,
},
# fundamental_data
# ── Existing: fundamental_data ───────────────────────────────────────
"get_fundamentals": {
"alpha_vantage": get_alpha_vantage_fundamentals,
"yfinance": get_yfinance_fundamentals,
@ -94,7 +148,7 @@ VENDOR_METHODS = {
"alpha_vantage": get_alpha_vantage_income_statement,
"yfinance": get_yfinance_income_statement,
},
# news_data
# ── Existing: news_data ──────────────────────────────────────────────
"get_news": {
"alpha_vantage": get_alpha_vantage_news,
"yfinance": get_news_yfinance,
@ -107,6 +161,43 @@ VENDOR_METHODS = {
"alpha_vantage": get_alpha_vantage_insider_transactions,
"yfinance": get_yfinance_insider_transactions,
},
# ── Korean: krx_stock_apis ───────────────────────────────────────────
"get_krx_stock_data": {
"krx": get_krx_stock_data_impl,
},
"get_krx_indicators": {
"krx": get_krx_indicators_impl,
},
# ── Korean: korean_market_data ───────────────────────────────────────
"get_exchange_rate": {
"krx": get_exchange_rate_impl,
},
"get_korea_index": {
"krx": get_korea_index_impl,
},
"get_investor_trading": {
"krx": get_investor_trading_impl,
},
# ── Korean: korean_fundamental_data ──────────────────────────────────
"get_krx_fundamentals": {
"krx": get_krx_fundamentals_impl,
},
"get_dart_financials": {
"dart": get_dart_financials_impl,
},
"get_dart_disclosures": {
"dart": get_dart_disclosures_impl,
},
"get_dart_shareholders": {
"dart": get_dart_shareholders_impl,
},
# ── Korean: korean_news_data ─────────────────────────────────────────
"get_korean_news": {
"naver": get_korean_news_impl,
},
"get_korean_global_news": {
"naver": get_korean_global_news_impl,
},
}
def get_category_for_method(method: str) -> str:
@ -159,4 +250,4 @@ def route_to_vendor(method: str, *args, **kwargs):
except AlphaVantageRateLimitError:
continue # Only rate limits trigger fallback
raise RuntimeError(f"No available vendor for '{method}'")
raise RuntimeError(f"No available vendor for '{method}'")

View File

@ -0,0 +1,397 @@
"""Korean market data source using FinanceDataReader and web scraping.
Provides KRX stock data (OHLCV), technical indicators, exchange rates,
KOSPI/KOSDAQ index data, and foreign/institutional investor flow data.
"""
from typing import Annotated
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import os
import pandas as pd
try:
import FinanceDataReader as fdr
except ImportError:
fdr = None
def _ensure_fdr():
if fdr is None:
raise ImportError(
"FinanceDataReader is required for Korean market data. "
"Install it with: pip install finance-datareader"
)
def _normalize_krx_symbol(symbol: str) -> str:
"""Normalize KRX stock symbol (e.g., '005930' for Samsung Electronics).
Handles both pure numeric codes and codes with market suffix like '005930.KS'.
"""
symbol = symbol.strip().upper()
# Remove market suffixes
for suffix in [".KS", ".KQ", ".KRX"]:
if symbol.endswith(suffix):
symbol = symbol[: -len(suffix)]
return symbol
def get_krx_stock_data(
symbol: Annotated[str, "KRX ticker symbol (e.g., '005930' for Samsung Electronics)"],
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
) -> str:
"""Retrieve KRX stock OHLCV data using FinanceDataReader."""
_ensure_fdr()
symbol = _normalize_krx_symbol(symbol)
try:
data = fdr.DataReader(symbol, start_date, end_date)
if data is None or data.empty:
return f"No data found for KRX symbol '{symbol}' between {start_date} and {end_date}"
# Standardize column names
col_map = {
"Open": "Open",
"High": "High",
"Low": "Low",
"Close": "Close",
"Volume": "Volume",
"Change": "Change",
}
data = data.rename(columns={k: v for k, v in col_map.items() if k in data.columns})
# Round numeric columns
for col in ["Open", "High", "Low", "Close"]:
if col in data.columns:
data[col] = data[col].round(0).astype(int)
csv_string = data.to_csv()
header = f"# KRX Stock data for {symbol} from {start_date} to {end_date}\n"
header += f"# Total records: {len(data)}\n"
header += f"# Currency: KRW (Korean Won)\n"
header += f"# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
return header + csv_string
except Exception as e:
return f"Error retrieving KRX stock data for {symbol}: {str(e)}"
def get_krx_indicators(
symbol: Annotated[str, "KRX ticker symbol"],
indicator: Annotated[str, "technical indicator name"],
curr_date: Annotated[str, "Current trading date, YYYY-mm-dd"],
look_back_days: Annotated[int, "how many days to look back"] = 30,
) -> str:
"""Calculate technical indicators for KRX stocks using FinanceDataReader + stockstats."""
_ensure_fdr()
from stockstats import wrap
symbol = _normalize_krx_symbol(symbol)
best_ind_params = {
"close_50_sma": "50 SMA: 중기 추세 지표. 추세 방향 및 동적 지지/저항 확인.",
"close_200_sma": "200 SMA: 장기 추세 기준선. 골든크로스/데드크로스 확인.",
"close_10_ema": "10 EMA: 단기 반응형 이동평균. 빠른 모멘텀 변화 포착.",
"macd": "MACD: EMA 차이 기반 모멘텀 지표. 크로스오버/다이버전스 확인.",
"macds": "MACD Signal: MACD 스무딩 라인. 매매 시그널 트리거.",
"macdh": "MACD Histogram: MACD와 시그널 차이. 모멘텀 강도/다이버전스.",
"rsi": "RSI: 과매수/과매도 판단. 70/30 기준선, 다이버전스 확인.",
"boll": "Bollinger Middle: 20 SMA 기반. 가격 움직임 기준선.",
"boll_ub": "Bollinger Upper: +2σ. 과매수/돌파 구간.",
"boll_lb": "Bollinger Lower: -2σ. 과매도/반등 구간.",
"atr": "ATR: 변동성 측정. 손절가/포지션 사이즈 결정 기준.",
"vwma": "VWMA: 거래량 가중 이동평균. 거래량과 가격 통합 추세 확인.",
"mfi": "MFI: 자금흐름지수. 가격+거래량 기반 과매수(>80)/과매도(<20) 판단.",
}
if indicator not in best_ind_params:
return (
f"Indicator '{indicator}' not supported. "
f"Available: {list(best_ind_params.keys())}"
)
try:
curr_date_dt = datetime.strptime(curr_date, "%Y-%m-%d")
# Fetch extra data for indicator calculation warmup
fetch_start = (curr_date_dt - relativedelta(years=1)).strftime("%Y-%m-%d")
fetch_end = curr_date
data = fdr.DataReader(symbol, fetch_start, fetch_end)
if data is None or data.empty:
return f"No data for KRX symbol '{symbol}'"
data = data.reset_index()
# Ensure Date column exists
if "Date" not in data.columns:
data = data.rename(columns={data.columns[0]: "Date"})
df = wrap(data)
df["Date"] = pd.to_datetime(df["Date"]).dt.strftime("%Y-%m-%d")
# Calculate indicator
df[indicator]
# Build result for look_back period
before = curr_date_dt - relativedelta(days=look_back_days)
result_dict = {}
for _, row in df.iterrows():
date_str = row["Date"]
val = row[indicator]
result_dict[date_str] = "N/A" if pd.isna(val) else str(round(float(val), 4))
ind_string = ""
current_dt = curr_date_dt
while current_dt >= before:
date_str = current_dt.strftime("%Y-%m-%d")
value = result_dict.get(date_str, "N/A: 비거래일 (주말/공휴일)")
ind_string += f"{date_str}: {value}\n"
current_dt -= timedelta(days=1)
return (
f"## {indicator} values for KRX:{symbol} "
f"from {before.strftime('%Y-%m-%d')} to {curr_date}:\n\n"
+ ind_string
+ f"\n\n{best_ind_params[indicator]}"
)
except Exception as e:
return f"Error calculating indicator for KRX:{symbol}: {str(e)}"
def get_exchange_rate(
currency_pair: Annotated[str, "Currency pair (e.g., 'USD/KRW', 'JPY/KRW', 'EUR/KRW')"],
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
) -> str:
"""Retrieve exchange rate data using FinanceDataReader."""
_ensure_fdr()
try:
data = fdr.DataReader(currency_pair, start_date, end_date)
if data is None or data.empty:
return f"No exchange rate data for '{currency_pair}' between {start_date} and {end_date}"
csv_string = data.to_csv()
header = f"# Exchange Rate: {currency_pair} from {start_date} to {end_date}\n"
header += f"# Total records: {len(data)}\n"
header += f"# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
return header + csv_string
except Exception as e:
return f"Error retrieving exchange rate for {currency_pair}: {str(e)}"
def get_korea_index_data(
index_code: Annotated[str, "Index code: 'KS11' (KOSPI), 'KQ11' (KOSDAQ), 'KS200' (KOSPI200)"],
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
) -> str:
"""Retrieve Korean market index data (KOSPI, KOSDAQ, KOSPI200)."""
_ensure_fdr()
index_names = {
"KS11": "KOSPI",
"KQ11": "KOSDAQ",
"KS200": "KOSPI 200",
"KS50": "KOSPI 50",
}
index_name = index_names.get(index_code, index_code)
try:
data = fdr.DataReader(index_code, start_date, end_date)
if data is None or data.empty:
return f"No index data found for '{index_name}' between {start_date} and {end_date}"
csv_string = data.to_csv()
header = f"# {index_name} Index Data from {start_date} to {end_date}\n"
header += f"# Total records: {len(data)}\n"
header += f"# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
return header + csv_string
except Exception as e:
return f"Error retrieving {index_name} index data: {str(e)}"
def get_investor_trading_data(
symbol: Annotated[str, "KRX ticker symbol (e.g., '005930')"],
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
) -> str:
"""Retrieve foreign and institutional investor trading (buy/sell) data for a KRX stock.
Uses pykrx for detailed investor flow data.
"""
symbol = _normalize_krx_symbol(symbol)
try:
from pykrx import stock as krx_stock
# Get investor trading data by investor type
df = krx_stock.get_market_trading_value_by_investor(
start_date.replace("-", ""),
end_date.replace("-", ""),
symbol,
)
if df is None or df.empty:
return f"No investor trading data found for '{symbol}' between {start_date} and {end_date}"
csv_string = df.to_csv()
header = f"# 투자자별 매매동향: {symbol} ({start_date} ~ {end_date})\n"
header += f"# 단위: KRW (원)\n"
header += f"# 양수 = 순매수 (Net Buy), 음수 = 순매도 (Net Sell)\n"
header += f"# 컬럼: 금융투자, 보험, 투신, 사모, 은행, 기타금융, 연기금, 기관합계, 기타법인, 개인, 외국인, 기타외국인, 전체\n"
header += f"# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
return header + csv_string
except ImportError:
# Fallback: try to get basic data from FinanceDataReader
return _get_investor_data_fallback(symbol, start_date, end_date)
except Exception as e:
return f"Error retrieving investor trading data for {symbol}: {str(e)}"
def _get_investor_data_fallback(symbol: str, start_date: str, end_date: str) -> str:
"""Fallback for investor data when pykrx is not available."""
return (
f"투자자별 매매동향 데이터를 가져올 수 없습니다 (symbol: {symbol}).\n"
f"pykrx 패키지가 필요합니다: pip install pykrx\n"
f"pykrx 설치 후 외국인/기관 수급 데이터를 확인할 수 있습니다."
)
def get_krx_market_cap(
symbol: Annotated[str, "KRX ticker symbol (e.g., '005930')"],
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
) -> str:
"""Retrieve market capitalization and trading info for a KRX stock."""
_ensure_fdr()
symbol = _normalize_krx_symbol(symbol)
try:
from pykrx import stock as krx_stock
date_str = curr_date.replace("-", "")
# Get market cap for specific date
df = krx_stock.get_market_cap_by_date(date_str, date_str, symbol)
if df is None or df.empty:
return f"No market cap data for '{symbol}' on {curr_date}"
result = f"# KRX 시가총액 정보: {symbol} ({curr_date})\n\n"
for _, row in df.iterrows():
result += f"시가총액: {row.get('시가총액', 'N/A'):,}\n"
result += f"거래량: {row.get('거래량', 'N/A'):,}\n"
result += f"거래대금: {row.get('거래대금', 'N/A'):,}\n"
result += f"상장주식수: {row.get('상장주식수', 'N/A'):,}\n"
return result
except ImportError:
return f"시가총액 데이터를 가져오려면 pykrx 패키지가 필요합니다: pip install pykrx"
except Exception as e:
return f"Error retrieving market cap for {symbol}: {str(e)}"
def get_krx_fundamentals(
ticker: Annotated[str, "KRX ticker symbol (e.g., '005930')"],
curr_date: Annotated[str, "current date in yyyy-mm-dd format"] = None,
) -> str:
"""Get fundamental data for a KRX-listed company.
Combines FinanceDataReader stock info with pykrx fundamental ratios.
"""
_ensure_fdr()
ticker = _normalize_krx_symbol(ticker)
result_lines = []
result_lines.append(f"# KRX 기업 기본정보: {ticker}\n")
# Try to get basic info from FinanceDataReader
try:
listing = fdr.StockListing("KRX")
if listing is not None and not listing.empty:
# Search for the ticker
match = listing[listing["Code"] == ticker]
if match.empty:
match = listing[listing["Symbol"] == ticker]
if not match.empty:
row = match.iloc[0]
name = row.get("Name", row.get("ISU_ABBRV", "N/A"))
market = row.get("Market", "N/A")
sector = row.get("Sector", row.get("업종명", "N/A"))
industry = row.get("Industry", "N/A")
result_lines.append(f"종목명: {name}")
result_lines.append(f"시장: {market}")
result_lines.append(f"업종: {sector}")
if industry != "N/A":
result_lines.append(f"산업: {industry}")
except Exception:
pass
# Try to get fundamental ratios from pykrx
try:
from pykrx import stock as krx_stock
if curr_date:
date_str = curr_date.replace("-", "")
else:
date_str = datetime.now().strftime("%Y%m%d")
# Get PER, PBR, DIV from pykrx
fund_df = krx_stock.get_market_fundamental_by_date(date_str, date_str, ticker)
if fund_df is not None and not fund_df.empty:
row = fund_df.iloc[0]
result_lines.append(f"\n## 투자지표 ({curr_date or 'latest'})")
if "BPS" in fund_df.columns:
result_lines.append(f"BPS (주당순자산): {row['BPS']:,.0f}")
if "PER" in fund_df.columns:
result_lines.append(f"PER (주가수익비율): {row['PER']:.2f}")
if "PBR" in fund_df.columns:
result_lines.append(f"PBR (주가순자산비율): {row['PBR']:.2f}")
if "EPS" in fund_df.columns:
result_lines.append(f"EPS (주당순이익): {row['EPS']:,.0f}")
if "DIV" in fund_df.columns:
result_lines.append(f"배당수익률: {row['DIV']:.2f}%")
if "DPS" in fund_df.columns:
result_lines.append(f"DPS (주당배당금): {row['DPS']:,.0f}")
# Get market cap
cap_df = krx_stock.get_market_cap_by_date(date_str, date_str, ticker)
if cap_df is not None and not cap_df.empty:
cap_row = cap_df.iloc[0]
result_lines.append(f"\n## 시가총액 정보")
if "시가총액" in cap_df.columns:
market_cap = cap_row["시가총액"]
# Format in 억 원
result_lines.append(f"시가총액: {market_cap:,.0f} 원 ({market_cap / 100_000_000:,.0f} 억원)")
if "상장주식수" in cap_df.columns:
result_lines.append(f"상장주식수: {cap_row['상장주식수']:,.0f}")
except ImportError:
result_lines.append("\n(pykrx 패키지 미설치 - 투자지표 데이터 제한)")
except Exception as e:
result_lines.append(f"\n투자지표 조회 오류: {str(e)}")
if len(result_lines) <= 1:
return f"No fundamental data found for KRX symbol '{ticker}'"
result_lines.append(f"\n# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
return "\n".join(result_lines)

View File

@ -0,0 +1,290 @@
"""Korean news data source using Naver Finance API and web scraping.
Provides Korean financial news, company-specific news, and macro news
relevant to the Korean market.
"""
import requests
from datetime import datetime, timedelta
from typing import Annotated
from dateutil.relativedelta import relativedelta
# Naver Search API headers (user should set NAVER_CLIENT_ID and NAVER_CLIENT_SECRET env vars)
_NAVER_HEADERS = None
def _get_naver_headers():
"""Get Naver API headers, lazy-loaded."""
global _NAVER_HEADERS
if _NAVER_HEADERS is None:
import os
client_id = os.environ.get("NAVER_CLIENT_ID", "")
client_secret = os.environ.get("NAVER_CLIENT_SECRET", "")
if client_id and client_secret:
_NAVER_HEADERS = {
"X-Naver-Client-Id": client_id,
"X-Naver-Client-Secret": client_secret,
}
else:
_NAVER_HEADERS = {}
return _NAVER_HEADERS
def _get_stock_name_from_code(code: str) -> str:
"""Try to resolve stock name from code for better news search."""
try:
import FinanceDataReader as fdr
listing = fdr.StockListing("KRX")
if listing is not None and not listing.empty:
match = listing[listing["Code"] == code]
if match.empty:
match = listing[listing["Symbol"] == code]
if not match.empty:
return match.iloc[0].get("Name", code)
except Exception:
pass
return code
def get_korean_news(
ticker: Annotated[str, "KRX ticker symbol or company name"],
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
) -> str:
"""Retrieve Korean financial news for a specific stock.
Uses Naver Search API if credentials are available, otherwise falls back
to RSS-based news fetching.
"""
# Try to resolve ticker to company name for better search
company_name = ticker
if ticker.isdigit():
company_name = _get_stock_name_from_code(ticker)
headers = _get_naver_headers()
if headers:
return _fetch_naver_api_news(company_name, ticker, start_date, end_date, headers)
else:
return _fetch_rss_news(company_name, ticker, start_date, end_date)
def _fetch_naver_api_news(
company_name: str,
ticker: str,
start_date: str,
end_date: str,
headers: dict,
) -> str:
"""Fetch news using Naver Search API."""
try:
query = f"{company_name} 주가"
url = "https://openapi.naver.com/v1/search/news.json"
params = {
"query": query,
"display": 20,
"sort": "date",
}
response = requests.get(url, headers=headers, params=params, timeout=10)
response.raise_for_status()
data = response.json()
items = data.get("items", [])
if not items:
return f"No Korean news found for {company_name} ({ticker})"
# Parse date range
start_dt = datetime.strptime(start_date, "%Y-%m-%d")
end_dt = datetime.strptime(end_date, "%Y-%m-%d") + timedelta(days=1)
news_str = ""
count = 0
for item in items:
# Parse pubDate
pub_date_str = item.get("pubDate", "")
try:
pub_date = datetime.strptime(pub_date_str, "%a, %d %b %Y %H:%M:%S %z")
pub_date_naive = pub_date.replace(tzinfo=None)
if not (start_dt <= pub_date_naive <= end_dt):
continue
except (ValueError, TypeError):
pass
title = _clean_html(item.get("title", ""))
description = _clean_html(item.get("description", ""))
link = item.get("originallink", item.get("link", ""))
news_str += f"### {title}\n"
if description:
news_str += f"{description}\n"
if link:
news_str += f"Link: {link}\n"
news_str += "\n"
count += 1
if count == 0:
return f"No Korean news found for {company_name} ({ticker}) between {start_date} and {end_date}"
return f"## {company_name} ({ticker}) 한국 뉴스 ({start_date} ~ {end_date}):\n\n{news_str}"
except Exception as e:
return f"Error fetching Korean news for {company_name}: {str(e)}"
def _fetch_rss_news(
company_name: str,
ticker: str,
start_date: str,
end_date: str,
) -> str:
"""Fallback: Fetch news using Google News RSS for Korean content."""
try:
import urllib.parse
query = urllib.parse.quote(f"{company_name} 주식")
rss_url = f"https://news.google.com/rss/search?q={query}&hl=ko&gl=KR&ceid=KR:ko"
response = requests.get(rss_url, timeout=10)
response.raise_for_status()
# Parse RSS XML
import xml.etree.ElementTree as ET
root = ET.fromstring(response.text)
items = root.findall(".//item")
if not items:
return f"No Korean news found for {company_name} ({ticker})"
news_str = ""
count = 0
for item in items[:15]:
title = item.findtext("title", "")
link = item.findtext("link", "")
pub_date = item.findtext("pubDate", "")
source = item.findtext("source", "")
news_str += f"### {title}"
if source:
news_str += f" (source: {source})"
news_str += "\n"
if pub_date:
news_str += f"Published: {pub_date}\n"
if link:
news_str += f"Link: {link}\n"
news_str += "\n"
count += 1
if count == 0:
return f"No Korean news found for {company_name} ({ticker})"
return f"## {company_name} ({ticker}) 한국 뉴스 ({start_date} ~ {end_date}):\n\n{news_str}"
except Exception as e:
return f"Error fetching Korean news via RSS: {str(e)}"
def get_korean_global_news(
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
look_back_days: Annotated[int, "Number of days to look back"] = 7,
limit: Annotated[int, "Maximum number of articles to return"] = 10,
) -> str:
"""Retrieve Korean macro/global economic news.
Searches for key Korean market topics: BOK base rate, KOSPI outlook,
USD/KRW exchange rate, Korean economy, etc.
"""
search_queries = [
"한국은행 기준금리",
"코스피 전망",
"원달러 환율",
"한국 경제 전망",
"외국인 투자 한국",
]
headers = _get_naver_headers()
curr_dt = datetime.strptime(curr_date, "%Y-%m-%d")
start_dt = curr_dt - timedelta(days=look_back_days)
start_date = start_dt.strftime("%Y-%m-%d")
all_news = []
seen_titles = set()
for query in search_queries:
try:
if headers:
url = "https://openapi.naver.com/v1/search/news.json"
params = {"query": query, "display": 5, "sort": "date"}
resp = requests.get(url, headers=headers, params=params, timeout=10)
resp.raise_for_status()
items = resp.json().get("items", [])
for item in items:
title = _clean_html(item.get("title", ""))
if title and title not in seen_titles:
seen_titles.add(title)
all_news.append({
"title": title,
"description": _clean_html(item.get("description", "")),
"link": item.get("originallink", item.get("link", "")),
"pubDate": item.get("pubDate", ""),
})
else:
# Fallback to Google News RSS
import urllib.parse
encoded_query = urllib.parse.quote(query)
rss_url = f"https://news.google.com/rss/search?q={encoded_query}&hl=ko&gl=KR&ceid=KR:ko"
resp = requests.get(rss_url, timeout=10)
if resp.status_code == 200:
import xml.etree.ElementTree as ET
root = ET.fromstring(resp.text)
for item in root.findall(".//item")[:3]:
title = item.findtext("title", "")
if title and title not in seen_titles:
seen_titles.add(title)
all_news.append({
"title": title,
"description": "",
"link": item.findtext("link", ""),
"pubDate": item.findtext("pubDate", ""),
})
except Exception:
continue
if len(all_news) >= limit:
break
if not all_news:
return f"No Korean global/macro news found for {curr_date}"
news_str = ""
for article in all_news[:limit]:
news_str += f"### {article['title']}\n"
if article["description"]:
news_str += f"{article['description']}\n"
if article["pubDate"]:
news_str += f"Published: {article['pubDate']}\n"
if article["link"]:
news_str += f"Link: {article['link']}\n"
news_str += "\n"
return f"## 한국 시장/거시경제 뉴스 ({start_date} ~ {curr_date}):\n\n{news_str}"
def _clean_html(text: str) -> str:
"""Remove HTML tags from text."""
import re
clean = re.sub(r"<[^>]+>", "", text)
clean = clean.replace("&quot;", '"').replace("&amp;", "&").replace("&lt;", "<").replace("&gt;", ">")
return clean.strip()

View File

@ -0,0 +1,349 @@
"""Bulk data retrieval for stock screening.
Provides efficient batch operations for scanning entire markets,
bypassing LangChain tool wrappers for performance.
"""
import logging
from datetime import datetime, timedelta
from typing import Optional
import pandas as pd
logger = logging.getLogger(__name__)
def get_krx_universe(
min_market_cap: float = 500_000_000_000,
min_volume: int = 100_000,
) -> pd.DataFrame:
"""Get all KRX-listed stocks filtered by market cap and volume.
Returns DataFrame with columns: Code, Name, Market, Sector, MarketCap, Volume.
"""
try:
import FinanceDataReader as fdr
except ImportError:
raise ImportError("FinanceDataReader required: pip install finance-datareader")
listing = None
is_fallback = False
try:
listing = fdr.StockListing("KRX")
except Exception as e:
logger.warning(f"FDR KRX listing failed: {e}")
if listing is None or listing.empty:
logger.info("Using fallback KRX universe (top stocks by market cap)")
listing = _get_krx_fallback_universe()
is_fallback = True
if listing.empty:
return pd.DataFrame()
# Normalize columns (FDR column names may vary)
col_renames = {}
for col in listing.columns:
lower = col.lower()
if lower in ("code", "symbol", "종목코드"):
col_renames[col] = "Code"
elif lower in ("name", "isu_abbrv", "종목명"):
col_renames[col] = "Name"
elif lower in ("market", "시장구분"):
col_renames[col] = "Market"
elif lower in ("sector", "업종명"):
col_renames[col] = "Sector"
elif lower in ("marketcap", "시가총액"):
col_renames[col] = "MarketCap"
listing = listing.rename(columns=col_renames)
# Skip market cap / volume filters for fallback (already curated)
if is_fallback:
if "Volume" not in listing.columns:
listing["Volume"] = 0
return listing.reset_index(drop=True)
# Filter by market cap if available
if "MarketCap" in listing.columns:
listing["MarketCap"] = pd.to_numeric(listing["MarketCap"], errors="coerce")
listing = listing[listing["MarketCap"] >= min_market_cap]
# Try to get volume data from pykrx for additional filtering
try:
from pykrx import stock as krx_stock
vol_df = None
for days_back in range(0, 10):
try:
target = (datetime.now() - timedelta(days=days_back)).strftime("%Y%m%d")
vol_df = krx_stock.get_market_ohlcv(target, market="ALL")
if vol_df is not None and not vol_df.empty:
break
except Exception:
continue
if vol_df is not None and not vol_df.empty:
vol_df = vol_df.reset_index()
vol_col = "거래량" if "거래량" in vol_df.columns else "Volume"
ticker_col = "티커" if "티커" in vol_df.columns else vol_df.columns[0]
vol_map = dict(zip(vol_df[ticker_col].astype(str), vol_df[vol_col]))
listing["Volume"] = listing["Code"].map(vol_map).fillna(0)
listing = listing[listing["Volume"] >= min_volume]
else:
logger.warning("Could not get volume data from pykrx")
listing["Volume"] = 0
except ImportError:
logger.warning("pykrx not installed - skipping volume filter")
listing["Volume"] = 0
except Exception as e:
logger.warning(f"Could not filter by volume: {e}")
listing["Volume"] = 0
return listing.reset_index(drop=True)
def get_us_universe(
universe_type: str = "sp500",
custom_watchlist: Optional[list[str]] = None,
) -> pd.DataFrame:
"""Get US stock universe.
Args:
universe_type: "sp500", "nasdaq100", or "custom"
custom_watchlist: List of tickers for custom universe
"""
import yfinance as yf
if universe_type == "custom" and custom_watchlist:
tickers = custom_watchlist
elif universe_type == "sp500":
tickers = _get_sp500_tickers()
elif universe_type == "nasdaq100":
tickers = _get_nasdaq100_tickers()
else:
tickers = custom_watchlist or []
if not tickers:
return pd.DataFrame()
rows = []
for ticker in tickers:
try:
info = yf.Ticker(ticker).info
rows.append({
"Code": ticker,
"Name": info.get("longName", ticker),
"Market": "US",
"Sector": info.get("sector", ""),
"MarketCap": info.get("marketCap", 0),
"Volume": info.get("averageVolume", 0),
})
except Exception:
rows.append({
"Code": ticker,
"Name": ticker,
"Market": "US",
"Sector": "",
"MarketCap": 0,
"Volume": 0,
})
return pd.DataFrame(rows)
def get_bulk_ohlcv(
tickers: list[str],
start_date: str,
end_date: str,
market: str = "KRX",
) -> dict[str, pd.DataFrame]:
"""Batch OHLCV retrieval for multiple tickers.
Returns dict mapping ticker -> OHLCV DataFrame.
"""
result = {}
if market == "KRX":
try:
import FinanceDataReader as fdr
except ImportError:
raise ImportError("FinanceDataReader required for KRX data")
for ticker in tickers:
try:
data = fdr.DataReader(ticker, start_date, end_date)
if data is not None and not data.empty:
result[ticker] = data
except Exception as e:
logger.warning(f"Failed to get OHLCV for {ticker}: {e}")
elif market == "US":
import yfinance as yf
for ticker in tickers:
try:
data = yf.download(
ticker,
start=start_date,
end=end_date,
progress=False,
multi_level_index=False,
auto_adjust=True,
)
if data is not None and not data.empty:
result[ticker] = data
except Exception as e:
logger.warning(f"Failed to get OHLCV for {ticker}: {e}")
return result
def compute_screening_indicators(df: pd.DataFrame) -> dict:
"""Compute screening indicators from OHLCV DataFrame.
Returns dict with indicator values for screening decisions.
"""
if df is None or len(df) < 20:
return {}
close = df["Close"]
volume = df["Volume"] if "Volume" in df.columns else None
indicators = {}
# Moving averages
if len(close) >= 50:
indicators["sma_10"] = close.rolling(10).mean().iloc[-1]
indicators["sma_20"] = close.rolling(20).mean().iloc[-1]
indicators["sma_50"] = close.rolling(50).mean().iloc[-1]
indicators["ema_10"] = close.ewm(span=10).mean().iloc[-1]
indicators["ema_20"] = close.ewm(span=20).mean().iloc[-1]
else:
indicators["sma_10"] = close.rolling(10).mean().iloc[-1]
indicators["sma_20"] = close.rolling(20).mean().iloc[-1]
indicators["ema_10"] = close.ewm(span=10).mean().iloc[-1]
indicators["current_price"] = close.iloc[-1]
indicators["prev_close"] = close.iloc[-2] if len(close) >= 2 else close.iloc[-1]
# RSI (14-day)
if len(close) >= 15:
delta = close.diff()
gain = delta.where(delta > 0, 0).rolling(14).mean()
loss = (-delta.where(delta < 0, 0)).rolling(14).mean()
rs = gain / loss.replace(0, float("inf"))
rsi = 100 - (100 / (1 + rs))
indicators["rsi"] = rsi.iloc[-1]
indicators["rsi_prev"] = rsi.iloc[-2] if len(rsi) >= 2 else rsi.iloc[-1]
# Volume analysis
if volume is not None and len(volume) >= 20:
indicators["volume_current"] = volume.iloc[-1]
indicators["volume_avg_20"] = volume.rolling(20).mean().iloc[-1]
vol_ratio = volume.iloc[-1] / volume.rolling(20).mean().iloc[-1]
indicators["volume_ratio"] = vol_ratio
# Bollinger Bands
if len(close) >= 20:
sma20 = close.rolling(20).mean()
std20 = close.rolling(20).std()
indicators["boll_upper"] = (sma20 + 2 * std20).iloc[-1]
indicators["boll_lower"] = (sma20 - 2 * std20).iloc[-1]
indicators["boll_middle"] = sma20.iloc[-1]
# Price change
if len(close) >= 5:
indicators["pct_change_1d"] = (close.iloc[-1] / close.iloc[-2] - 1) * 100
indicators["pct_change_5d"] = (close.iloc[-1] / close.iloc[-5] - 1) * 100
if len(close) >= 20:
indicators["pct_change_20d"] = (close.iloc[-1] / close.iloc[-20] - 1) * 100
return indicators
def _get_sp500_tickers() -> list[str]:
"""Get S&P 500 ticker list from Wikipedia."""
try:
table = pd.read_html("https://en.wikipedia.org/wiki/List_of_S%26P_500_companies")
return table[0]["Symbol"].tolist()
except Exception:
# Fallback: top 50 by market cap
return [
"AAPL", "MSFT", "AMZN", "NVDA", "GOOGL", "META", "BRK-B", "TSLA",
"UNH", "LLY", "JPM", "XOM", "V", "JNJ", "PG", "MA", "AVGO",
"HD", "MRK", "COST", "ABBV", "CVX", "PEP", "KO", "ADBE",
"WMT", "CRM", "MCD", "CSCO", "ACN", "BAC", "NFLX", "TMO",
"AMD", "LIN", "ABT", "ORCL", "DHR", "CMCSA", "PFE", "DIS",
"WFC", "PM", "INTC", "VZ", "INTU", "COP", "AMGN", "IBM", "GE",
]
def _get_nasdaq100_tickers() -> list[str]:
"""Get NASDAQ 100 ticker list."""
try:
table = pd.read_html("https://en.wikipedia.org/wiki/Nasdaq-100")
for t in table:
if "Ticker" in t.columns:
return t["Ticker"].tolist()
if "Symbol" in t.columns:
return t["Symbol"].tolist()
except Exception:
pass
# Fallback: major NASDAQ 100 components
return [
"AAPL", "MSFT", "AMZN", "NVDA", "GOOGL", "META", "TSLA", "AVGO",
"COST", "ADBE", "NFLX", "AMD", "PEP", "CSCO", "INTC", "INTU",
"CMCSA", "AMGN", "TMUS", "TXN", "QCOM", "ISRG", "BKNG", "HON",
"AMAT", "VRTX", "ADP", "GILD", "SBUX", "MDLZ", "ADI", "LRCX",
"PANW", "MU", "REGN", "SNPS", "KLAC", "CDNS", "PYPL", "MAR",
]
def _get_krx_fallback_universe() -> pd.DataFrame:
"""Fallback KRX universe when API listing is unavailable.
Returns top ~100 KRX stocks by market cap (hardcoded).
"""
# Top KRX stocks (KOSPI large-cap + select KOSDAQ)
stocks = [
("005930", "삼성전자"), ("000660", "SK하이닉스"), ("373220", "LG에너지솔루션"),
("207940", "삼성바이오로직스"), ("005380", "현대차"), ("000270", "기아"),
("006400", "삼성SDI"), ("051910", "LG화학"), ("035420", "NAVER"),
("035720", "카카오"), ("068270", "셀트리온"), ("028260", "삼성물산"),
("105560", "KB금융"), ("055550", "신한지주"), ("012330", "현대모비스"),
("066570", "LG전자"), ("003670", "포스코퓨처엠"), ("096770", "SK이노베이션"),
("034730", "SK"), ("015760", "한국전력"), ("003550", "LG"),
("032830", "삼성생명"), ("086790", "하나금융지주"), ("316140", "우리금융지주"),
("010130", "고려아연"), ("009150", "삼성전기"), ("033780", "KT&G"),
("030200", "KT"), ("017670", "SK텔레콤"), ("000810", "삼성화재"),
("018260", "삼성에스디에스"), ("036570", "엔씨소프트"), ("034020", "두산에너빌리티"),
("003490", "대한항공"), ("011200", "HMM"), ("010950", "S-Oil"),
("326030", "SK바이오팜"), ("259960", "크래프톤"), ("352820", "하이브"),
("011170", "롯데케미칼"), ("090430", "아모레퍼시픽"), ("051900", "LG생활건강"),
("000720", "현대건설"), ("034220", "LG디스플레이"), ("010140", "삼성중공업"),
("009540", "HD한국조선해양"), ("329180", "HD현대중공업"), ("042670", "HD현대인프라코어"),
("267260", "HD현대"), ("402340", "SK스퀘어"), ("361610", "SK아이이테크놀로지"),
("377300", "카카오페이"), ("035900", "JYP Ent."), ("041510", "에스엠"),
("263750", "펄어비스"), ("112040", "위메이드"), ("293490", "카카오게임즈"),
("047050", "포스코인터내셔널"), ("005490", "POSCO홀딩스"), ("138040", "메리츠금융지주"),
("006800", "미래에셋증권"), ("003410", "쌍용C&E"), ("069500", "KODEX 200"),
("161390", "한국타이어앤테크놀로지"), ("024110", "기업은행"), ("078930", "GS"),
("036460", "한국가스공사"), ("004020", "현대제철"), ("011790", "SKC"),
("180640", "한진칼"), ("097950", "CJ제일제당"), ("028050", "삼성엔지니어링"),
("000100", "유한양행"), ("128940", "한미약품"), ("004990", "롯데지주"),
("032640", "LG유플러스"), ("009830", "한화솔루션"), ("272210", "한화시스템"),
("016360", "삼성증권"), ("088350", "한화생명"), ("001570", "금양"),
("053800", "안랩"), ("122870", "와이지엔터테인먼트"), ("145020", "휴젤"),
("247540", "에코프로비엠"), ("086520", "에코프로"),
]
return pd.DataFrame([
{"Code": code, "Name": name, "Market": "KRX", "Sector": "", "MarketCap": 0, "Volume": 0}
for code, name in stocks
])
if __name__ == "__main__":
get_krx_universe()

View File

@ -15,20 +15,41 @@ DEFAULT_CONFIG = {
# Provider-specific thinking configuration
"google_thinking_level": None, # "high", "minimal", etc.
"openai_reasoning_effort": None, # "medium", "high", "low"
# Debate and discussion settings
"max_debate_rounds": 1,
"max_risk_discuss_rounds": 1,
# Graph settings
"max_recur_limit": 100,
# Swing trading settings
"market": "KRX", # "KRX" or "US"
"swing_hold_days_min": 2,
"swing_hold_days_max": 20,
# Portfolio settings
"portfolio_id": "default",
"total_capital": 100_000_000, # 1억원 (KRX) or $100,000 (US)
"max_positions": 5,
"max_position_pct": 0.20, # 20% of total capital per position
"default_stop_loss_pct": 0.05, # 5%
"default_take_profit_pct": 0.15, # 15%
# Screening settings
"screening_min_market_cap": 500_000_000_000, # 5000억원
"screening_min_volume": 100_000,
"screening_max_candidates": 5,
"us_universe": "sp500", # "sp500", "nasdaq100", or "custom"
"custom_watchlist": [], # custom ticker list for manual universe
# Data vendor configuration
# Category-level configuration (default for all tools in category)
"data_vendors": {
"core_stock_apis": "yfinance", # Options: alpha_vantage, yfinance
"technical_indicators": "yfinance", # Options: alpha_vantage, yfinance
"fundamental_data": "yfinance", # Options: alpha_vantage, yfinance
"news_data": "yfinance", # Options: alpha_vantage, yfinance
"core_stock_apis": "yfinance",
"technical_indicators": "yfinance",
"fundamental_data": "yfinance",
"news_data": "yfinance",
# Korean market data vendors
"krx_stock_apis": "krx",
"korean_market_data": "krx",
"korean_fundamental_data": "krx",
"korean_news_data": "naver",
},
# Tool-level configuration (takes precedence over category-level)
"tool_vendors": {
# Example: "get_stock_data": "alpha_vantage", # Override category default
"get_dart_financials": "dart",
"get_dart_disclosures": "dart",
"get_dart_shareholders": "dart",
},
}

View File

@ -4,31 +4,16 @@ from tradingagents.agents.utils.agent_states import AgentState
class ConditionalLogic:
"""Handles conditional logic for determining graph flow."""
def __init__(self, max_debate_rounds=1, max_risk_discuss_rounds=1):
"""Initialize with configuration parameters."""
self.max_debate_rounds = max_debate_rounds
self.max_risk_discuss_rounds = max_risk_discuss_rounds
"""Handles conditional logic for analyst tool-call routing."""
def should_continue_market(self, state: AgentState):
"""Determine if market analysis should continue."""
messages = state["messages"]
last_message = messages[-1]
if last_message.tool_calls:
return "tools_market"
return "Msg Clear Market"
def should_continue_social(self, state: AgentState):
"""Determine if social media analysis should continue."""
messages = state["messages"]
last_message = messages[-1]
if last_message.tool_calls:
return "tools_social"
return "Msg Clear Social"
def should_continue_news(self, state: AgentState):
"""Determine if news analysis should continue."""
messages = state["messages"]
last_message = messages[-1]
if last_message.tool_calls:
@ -36,32 +21,8 @@ class ConditionalLogic:
return "Msg Clear News"
def should_continue_fundamentals(self, state: AgentState):
"""Determine if fundamentals analysis should continue."""
messages = state["messages"]
last_message = messages[-1]
if last_message.tool_calls:
return "tools_fundamentals"
return "Msg Clear Fundamentals"
def should_continue_debate(self, state: AgentState) -> str:
"""Determine if debate should continue."""
if (
state["investment_debate_state"]["count"] >= 2 * self.max_debate_rounds
): # 3 rounds of back-and-forth between 2 agents
return "Research Manager"
if state["investment_debate_state"]["current_response"].startswith("Bull"):
return "Bear Researcher"
return "Bull Researcher"
def should_continue_risk_analysis(self, state: AgentState) -> str:
"""Determine if risk analysis should continue."""
if (
state["risk_debate_state"]["count"] >= 3 * self.max_risk_discuss_rounds
): # 3 rounds of back-and-forth between 3 agents
return "Risk Judge"
if state["risk_debate_state"]["latest_speaker"].startswith("Aggressive"):
return "Conservative Analyst"
if state["risk_debate_state"]["latest_speaker"].startswith("Conservative"):
return "Neutral Analyst"
return "Aggressive Analyst"

View File

@ -1,53 +1,37 @@
# TradingAgents/graph/propagation.py
from typing import Dict, Any, List, Optional
from tradingagents.agents.utils.agent_states import (
AgentState,
InvestDebateState,
RiskDebateState,
)
class Propagator:
"""Handles state initialization and propagation through the graph."""
def __init__(self, max_recur_limit=100):
"""Initialize with configuration parameters."""
self.max_recur_limit = max_recur_limit
def create_initial_state(
self, company_name: str, trade_date: str
self,
company_name: str,
trade_date: str,
screening_context: str = "",
portfolio_context: str = "",
) -> Dict[str, Any]:
"""Create the initial state for the agent graph."""
"""Create the initial state for the swing trading graph."""
return {
"messages": [("human", company_name)],
"company_of_interest": company_name,
"trade_date": str(trade_date),
"investment_debate_state": InvestDebateState(
{"history": "", "current_response": "", "count": 0}
),
"risk_debate_state": RiskDebateState(
{
"history": "",
"current_aggressive_response": "",
"current_conservative_response": "",
"current_neutral_response": "",
"count": 0,
}
),
"market_report": "",
"fundamentals_report": "",
"sentiment_report": "",
"news_report": "",
"screening_context": screening_context,
"portfolio_context": portfolio_context,
"trader_decision": "",
"swing_order": "",
}
def get_graph_args(self, callbacks: Optional[List] = None) -> Dict[str, Any]:
"""Get arguments for the graph invocation.
Args:
callbacks: Optional list of callback handlers for tool execution tracking.
Note: LLM callbacks are handled separately via LLM constructor.
"""
"""Get arguments for the graph invocation."""
config = {"recursion_limit": self.max_recur_limit}
if callbacks:
config["callbacks"] = callbacks

View File

@ -2,120 +2,48 @@
from typing import Dict, Any
from langchain_openai import ChatOpenAI
from tradingagents.agents.utils.korean_prompt import KOREAN_INVESTOR_GUIDE
class Reflector:
"""Handles reflection on decisions and updating memory."""
"""Handles reflection on trading decisions and updating memory."""
def __init__(self, quick_thinking_llm: ChatOpenAI):
"""Initialize the reflector with an LLM."""
self.quick_thinking_llm = quick_thinking_llm
self.reflection_system_prompt = self._get_reflection_prompt()
self.reflection_system_prompt = f"""You are an expert swing trading analyst reviewing past trading decisions.
def _get_reflection_prompt(self) -> str:
"""Get the system prompt for reflection."""
return """
You are an expert financial analyst tasked with reviewing trading decisions/analysis and providing a comprehensive, step-by-step analysis.
Your goal is to deliver detailed insights into investment decisions and highlight opportunities for improvement, adhering strictly to the following guidelines:
Analyze the decision and provide:
1. **Reasoning**: Was the decision correct? What factors contributed?
2. **Improvement**: For incorrect decisions, propose specific corrections.
3. **Summary**: Key lessons learned for future swing trades.
4. **Query**: Condensed insights (max 1000 tokens) for memory storage.
1. Reasoning:
- For each trading decision, determine whether it was correct or incorrect. A correct decision results in an increase in returns, while an incorrect decision does the opposite.
- Analyze the contributing factors to each success or mistake. Consider:
- Market intelligence.
- Technical indicators.
- Technical signals.
- Price movement analysis.
- Overall market data analysis
- News analysis.
- Social media and sentiment analysis.
- Fundamental data analysis.
- Weight the importance of each factor in the decision-making process.
Consider: technical indicators, price action, news catalysts, fundamental health, entry/exit timing, position sizing.
{KOREAN_INVESTOR_GUIDE}
2. Improvement:
- For any incorrect decisions, propose revisions to maximize returns.
- Provide a detailed list of corrective actions or improvements, including specific recommendations (e.g., changing a decision from HOLD to BUY on a particular date).
3. Summary:
- Summarize the lessons learned from the successes and mistakes.
- Highlight how these lessons can be adapted for future trading scenarios and draw connections between similar situations to apply the knowledge gained.
4. Query:
- Extract key insights from the summary into a concise sentence of no more than 1000 tokens.
- Ensure the condensed sentence captures the essence of the lessons and reasoning for easy reference.
Adhere strictly to these instructions, and ensure your output is detailed, accurate, and actionable. You will also be given objective descriptions of the market from a price movements, technical indicator, news, and sentiment perspective to provide more context for your analysis.
[반성 출력 가이드]
- 최종 출력은 한국어로 작성한다.
- 잘한 /실수/재발 방지 액션 아이템을 명확히 분리해 제시한다.
"""
def _extract_current_situation(self, current_state: Dict[str, Any]) -> str:
"""Extract the current market situation from the state."""
curr_market_report = current_state["market_report"]
curr_sentiment_report = current_state["sentiment_report"]
curr_news_report = current_state["news_report"]
curr_fundamentals_report = current_state["fundamentals_report"]
def _extract_situation(self, state: Dict[str, Any]) -> str:
market = state.get("market_report", "")
news = state.get("news_report", "")
fundamentals = state.get("fundamentals_report", "")
return f"{market}\n\n{news}\n\n{fundamentals}"
return f"{curr_market_report}\n\n{curr_sentiment_report}\n\n{curr_news_report}\n\n{curr_fundamentals_report}"
def reflect_trader(self, current_state, returns_losses, trader_memory):
"""Reflect on trader's decision and update memory."""
situation = self._extract_situation(current_state)
trader_decision = current_state.get("trader_decision", "")
def _reflect_on_component(
self, component_type: str, report: str, situation: str, returns_losses
) -> str:
"""Generate reflection for a component."""
messages = [
("system", self.reflection_system_prompt),
(
"human",
f"Returns: {returns_losses}\n\nAnalysis/Decision: {report}\n\nObjective Market Reports for Reference: {situation}",
f"Returns: {returns_losses}\n\nTrading Decision: {trader_decision}\n\nMarket Reports: {situation}",
),
]
result = self.quick_thinking_llm.invoke(messages).content
return result
def reflect_bull_researcher(self, current_state, returns_losses, bull_memory):
"""Reflect on bull researcher's analysis and update memory."""
situation = self._extract_current_situation(current_state)
bull_debate_history = current_state["investment_debate_state"]["bull_history"]
result = self._reflect_on_component(
"BULL", bull_debate_history, situation, returns_losses
)
bull_memory.add_situations([(situation, result)])
def reflect_bear_researcher(self, current_state, returns_losses, bear_memory):
"""Reflect on bear researcher's analysis and update memory."""
situation = self._extract_current_situation(current_state)
bear_debate_history = current_state["investment_debate_state"]["bear_history"]
result = self._reflect_on_component(
"BEAR", bear_debate_history, situation, returns_losses
)
bear_memory.add_situations([(situation, result)])
def reflect_trader(self, current_state, returns_losses, trader_memory):
"""Reflect on trader's decision and update memory."""
situation = self._extract_current_situation(current_state)
trader_decision = current_state["trader_investment_plan"]
result = self._reflect_on_component(
"TRADER", trader_decision, situation, returns_losses
)
trader_memory.add_situations([(situation, result)])
def reflect_invest_judge(self, current_state, returns_losses, invest_judge_memory):
"""Reflect on investment judge's decision and update memory."""
situation = self._extract_current_situation(current_state)
judge_decision = current_state["investment_debate_state"]["judge_decision"]
result = self._reflect_on_component(
"INVEST JUDGE", judge_decision, situation, returns_losses
)
invest_judge_memory.add_situations([(situation, result)])
def reflect_risk_manager(self, current_state, returns_losses, risk_manager_memory):
"""Reflect on risk manager's decision and update memory."""
situation = self._extract_current_situation(current_state)
judge_decision = current_state["risk_debate_state"]["judge_decision"]
result = self._reflect_on_component(
"RISK JUDGE", judge_decision, situation, returns_losses
)
risk_manager_memory.add_situations([(situation, result)])

View File

@ -0,0 +1,187 @@
"""Screening pipeline graph for swing trading stock discovery.
A simpler LangGraph StateGraph that scans the market universe
and produces ranked candidate stocks for deep analysis.
"""
import logging
from typing import Annotated
from langchain_openai import ChatOpenAI
from langgraph.graph import END, START, StateGraph
from typing_extensions import TypedDict
from tradingagents.agents.screener.candidate_ranker import create_candidate_ranker
from tradingagents.agents.screener.fundamental_screener import fundamental_screen
from tradingagents.agents.screener.technical_screener import technical_screen
from tradingagents.agents.screener.universe_builder import build_universe
logger = logging.getLogger(__name__)
class ScreeningState(TypedDict):
market: Annotated[str, "Target market: KRX or US"]
trade_date: Annotated[str, "Current trading date YYYY-MM-DD"]
existing_positions: Annotated[list[str], "Tickers already held"]
portfolio_context: Annotated[str, "Portfolio summary for ranking context"]
max_candidates: Annotated[int, "Maximum candidates to output"]
# Outputs
universe_size: Annotated[int, "Number of stocks in universe"]
technical_candidates: Annotated[list[dict], "Stocks passing technical screen"]
fundamental_candidates: Annotated[list[dict], "Stocks passing fundamental screen"]
final_candidates: Annotated[list[dict], "Ranked final candidates"]
screening_report: Annotated[str, "Human-readable screening report"]
class ScreeningGraph:
"""Screening pipeline for swing trading stock discovery."""
def __init__(self, config: dict):
self.config = config
self._build_graph()
def _build_graph(self):
"""Build the screening pipeline graph."""
graph = StateGraph(ScreeningState)
graph.add_node("build_universe", self._build_universe_node)
graph.add_node("technical_screen", self._technical_screen_node)
graph.add_node("fundamental_screen", self._fundamental_screen_node)
graph.add_node("rank_candidates", self._rank_candidates_node)
graph.add_node("generate_report", self._generate_report_node)
graph.add_edge(START, "build_universe")
graph.add_edge("build_universe", "technical_screen")
graph.add_edge("technical_screen", "fundamental_screen")
graph.add_edge("fundamental_screen", "rank_candidates")
graph.add_edge("rank_candidates", "generate_report")
graph.add_edge("generate_report", END)
self.graph = graph.compile()
def run(
self,
trade_date: str,
market: str = "KRX",
existing_positions: list[str] | None = None,
portfolio_context: str = "",
max_candidates: int = 5,
) -> dict:
"""Run the screening pipeline.
Returns:
Dict with final_candidates and screening_report
"""
initial_state: ScreeningState = {
"market": market,
"trade_date": trade_date,
"existing_positions": existing_positions or [],
"portfolio_context": portfolio_context,
"max_candidates": max_candidates,
"universe_size": 0,
"technical_candidates": [],
"fundamental_candidates": [],
"final_candidates": [],
"screening_report": "",
}
result = self.graph.invoke(initial_state)
return {
"candidates": result["final_candidates"],
"report": result["screening_report"],
"stats": {
"universe_size": result["universe_size"],
"technical_passed": len(result["technical_candidates"]),
"fundamental_passed": len(result["fundamental_candidates"]),
"final_selected": len(result["final_candidates"]),
},
}
def _build_universe_node(self, state: ScreeningState) -> dict:
"""Build stock universe."""
universe = build_universe(self.config)
return {"universe_size": len(universe), "_universe_df": universe}
def _technical_screen_node(self, state: ScreeningState) -> dict:
"""Run technical screening."""
# Rebuild universe (state doesn't carry DataFrames well)
universe = build_universe(self.config)
candidates = technical_screen(
universe=universe,
trade_date=state["trade_date"],
market=state["market"],
existing_positions=state["existing_positions"],
)
return {"technical_candidates": candidates}
def _fundamental_screen_node(self, state: ScreeningState) -> dict:
"""Run fundamental screening on technical candidates."""
candidates = fundamental_screen(
technical_candidates=state["technical_candidates"],
trade_date=state["trade_date"],
market=state["market"],
)
return {"fundamental_candidates": candidates}
def _rank_candidates_node(self, state: ScreeningState) -> dict:
"""Rank candidates using LLM."""
from tradingagents.llm_clients import create_llm_client
client = create_llm_client(
provider=self.config.get("llm_provider", "openai"),
model=self.config.get("quick_think_llm", "gpt-5-mini"),
base_url=self.config.get("backend_url"),
)
llm = client.get_llm()
ranker = create_candidate_ranker(llm)
ranked = ranker(
candidates=state["fundamental_candidates"],
portfolio_context=state["portfolio_context"],
max_candidates=state["max_candidates"],
)
return {"final_candidates": ranked}
def _generate_report_node(self, state: ScreeningState) -> dict:
"""Generate human-readable screening report."""
lines = [
"=" * 60,
f"스윙 트레이딩 스크리닝 리포트",
f"날짜: {state['trade_date']} / 시장: {state['market']}",
"=" * 60,
"",
f"유니버스 크기: {state['universe_size']}",
f"기술적 통과: {len(state['technical_candidates'])}",
f"펀더멘탈 통과: {len(state['fundamental_candidates'])}",
f"최종 선정: {len(state['final_candidates'])}",
"",
"-" * 60,
"최종 후보 종목",
"-" * 60,
]
for i, c in enumerate(state["final_candidates"], 1):
lines.append(f"\n[{i}] {c['ticker']} - {c['name']}")
lines.append(f" 기술적 신호: {', '.join(c['signals'])}")
lines.append(f" 펀더멘탈: {c.get('fundamental_check', 'N/A')}")
ind = c.get("indicators", {})
if ind:
price = ind.get("current_price", "N/A")
rsi = ind.get("rsi")
vol_ratio = ind.get("volume_ratio")
rsi_str = f"{rsi:.1f}" if isinstance(rsi, float) else "N/A"
vol_str = f"{vol_ratio:.1f}x" if isinstance(vol_ratio, float) else "N/A"
lines.append(f" 현재가: {price} / RSI: {rsi_str} / 거래량: {vol_str}")
if "ranking_reason" in c:
lines.append(f" 선정 이유: {c['ranking_reason']}")
if not state["final_candidates"]:
lines.append("\n 스크리닝 조건을 충족하는 종목이 없습니다.")
lines.append("\n" + "=" * 60)
report = "\n".join(lines)
return {"screening_report": report}

View File

@ -1,114 +1,82 @@
# TradingAgents/graph/setup.py
from typing import Dict, Any
from typing import Dict
from langchain_openai import ChatOpenAI
from langgraph.graph import END, StateGraph, START
from langgraph.prebuilt import ToolNode
from tradingagents.agents import *
from tradingagents.agents import (
create_market_analyst,
create_news_analyst,
create_fundamentals_analyst,
create_msg_delete,
create_trader,
)
from tradingagents.agents.utils.agent_states import AgentState
from .conditional_logic import ConditionalLogic
class GraphSetup:
"""Handles the setup and configuration of the agent graph."""
"""Handles the setup and configuration of the swing trading graph.
Simplified flow: Analysts Trader END
No debate or risk management nodes.
"""
def __init__(
self,
quick_thinking_llm: ChatOpenAI,
deep_thinking_llm: ChatOpenAI,
tool_nodes: Dict[str, ToolNode],
bull_memory,
bear_memory,
trader_memory,
invest_judge_memory,
risk_manager_memory,
conditional_logic: ConditionalLogic,
):
"""Initialize with required components."""
self.quick_thinking_llm = quick_thinking_llm
self.deep_thinking_llm = deep_thinking_llm
self.tool_nodes = tool_nodes
self.bull_memory = bull_memory
self.bear_memory = bear_memory
self.trader_memory = trader_memory
self.invest_judge_memory = invest_judge_memory
self.risk_manager_memory = risk_manager_memory
self.conditional_logic = conditional_logic
def setup_graph(
self, selected_analysts=["market", "social", "news", "fundamentals"]
self, selected_analysts=["market", "news", "fundamentals"]
):
"""Set up and compile the agent workflow graph.
"""Set up the swing trading workflow graph.
Args:
selected_analysts (list): List of analyst types to include. Options are:
- "market": Market analyst
- "social": Social media analyst
- "news": News analyst
- "fundamentals": Fundamentals analyst
selected_analysts: List of analyst types to include.
Options: "market", "news", "fundamentals"
"""
if len(selected_analysts) == 0:
raise ValueError("Trading Agents Graph Setup Error: no analysts selected!")
# Create analyst nodes
analyst_creators = {
"market": create_market_analyst,
"news": create_news_analyst,
"fundamentals": create_fundamentals_analyst,
}
analyst_nodes = {}
delete_nodes = {}
tool_nodes = {}
if "market" in selected_analysts:
analyst_nodes["market"] = create_market_analyst(
for analyst_type in selected_analysts:
if analyst_type not in analyst_creators:
continue
analyst_nodes[analyst_type] = analyst_creators[analyst_type](
self.quick_thinking_llm
)
delete_nodes["market"] = create_msg_delete()
tool_nodes["market"] = self.tool_nodes["market"]
delete_nodes[analyst_type] = create_msg_delete()
tool_nodes[analyst_type] = self.tool_nodes[analyst_type]
if "social" in selected_analysts:
analyst_nodes["social"] = create_social_media_analyst(
self.quick_thinking_llm
)
delete_nodes["social"] = create_msg_delete()
tool_nodes["social"] = self.tool_nodes["social"]
# Create trader node
trader_node = create_trader(self.deep_thinking_llm, self.trader_memory)
if "news" in selected_analysts:
analyst_nodes["news"] = create_news_analyst(
self.quick_thinking_llm
)
delete_nodes["news"] = create_msg_delete()
tool_nodes["news"] = self.tool_nodes["news"]
if "fundamentals" in selected_analysts:
analyst_nodes["fundamentals"] = create_fundamentals_analyst(
self.quick_thinking_llm
)
delete_nodes["fundamentals"] = create_msg_delete()
tool_nodes["fundamentals"] = self.tool_nodes["fundamentals"]
# Create researcher and manager nodes
bull_researcher_node = create_bull_researcher(
self.quick_thinking_llm, self.bull_memory
)
bear_researcher_node = create_bear_researcher(
self.quick_thinking_llm, self.bear_memory
)
research_manager_node = create_research_manager(
self.deep_thinking_llm, self.invest_judge_memory
)
trader_node = create_trader(self.quick_thinking_llm, self.trader_memory)
# Create risk analysis nodes
aggressive_analyst = create_aggressive_debator(self.quick_thinking_llm)
neutral_analyst = create_neutral_debator(self.quick_thinking_llm)
conservative_analyst = create_conservative_debator(self.quick_thinking_llm)
risk_manager_node = create_risk_manager(
self.deep_thinking_llm, self.risk_manager_memory
)
# Create workflow
# Build workflow
workflow = StateGraph(AgentState)
# Add analyst nodes to the graph
# Add analyst nodes
for analyst_type, node in analyst_nodes.items():
workflow.add_node(f"{analyst_type.capitalize()} Analyst", node)
workflow.add_node(
@ -116,28 +84,21 @@ class GraphSetup:
)
workflow.add_node(f"tools_{analyst_type}", tool_nodes[analyst_type])
# Add other nodes
workflow.add_node("Bull Researcher", bull_researcher_node)
workflow.add_node("Bear Researcher", bear_researcher_node)
workflow.add_node("Research Manager", research_manager_node)
# Add trader node
workflow.add_node("Trader", trader_node)
workflow.add_node("Aggressive Analyst", aggressive_analyst)
workflow.add_node("Neutral Analyst", neutral_analyst)
workflow.add_node("Conservative Analyst", conservative_analyst)
workflow.add_node("Risk Judge", risk_manager_node)
# Define edges
# Start with the first analyst
first_analyst = selected_analysts[0]
# Wire edges: START → first analyst
active_analysts = [a for a in selected_analysts if a in analyst_nodes]
first_analyst = active_analysts[0]
workflow.add_edge(START, f"{first_analyst.capitalize()} Analyst")
# Connect analysts in sequence
for i, analyst_type in enumerate(selected_analysts):
for i, analyst_type in enumerate(active_analysts):
current_analyst = f"{analyst_type.capitalize()} Analyst"
current_tools = f"tools_{analyst_type}"
current_clear = f"Msg Clear {analyst_type.capitalize()}"
# Add conditional edges for current analyst
# Tool call loop
workflow.add_conditional_edges(
current_analyst,
getattr(self.conditional_logic, f"should_continue_{analyst_type}"),
@ -145,58 +106,14 @@ class GraphSetup:
)
workflow.add_edge(current_tools, current_analyst)
# Connect to next analyst or to Bull Researcher if this is the last analyst
if i < len(selected_analysts) - 1:
next_analyst = f"{selected_analysts[i+1].capitalize()} Analyst"
# Connect to next analyst or to Trader
if i < len(active_analysts) - 1:
next_analyst = f"{active_analysts[i + 1].capitalize()} Analyst"
workflow.add_edge(current_clear, next_analyst)
else:
workflow.add_edge(current_clear, "Bull Researcher")
workflow.add_edge(current_clear, "Trader")
# Add remaining edges
workflow.add_conditional_edges(
"Bull Researcher",
self.conditional_logic.should_continue_debate,
{
"Bear Researcher": "Bear Researcher",
"Research Manager": "Research Manager",
},
)
workflow.add_conditional_edges(
"Bear Researcher",
self.conditional_logic.should_continue_debate,
{
"Bull Researcher": "Bull Researcher",
"Research Manager": "Research Manager",
},
)
workflow.add_edge("Research Manager", "Trader")
workflow.add_edge("Trader", "Aggressive Analyst")
workflow.add_conditional_edges(
"Aggressive Analyst",
self.conditional_logic.should_continue_risk_analysis,
{
"Conservative Analyst": "Conservative Analyst",
"Risk Judge": "Risk Judge",
},
)
workflow.add_conditional_edges(
"Conservative Analyst",
self.conditional_logic.should_continue_risk_analysis,
{
"Neutral Analyst": "Neutral Analyst",
"Risk Judge": "Risk Judge",
},
)
workflow.add_conditional_edges(
"Neutral Analyst",
self.conditional_logic.should_continue_risk_analysis,
{
"Aggressive Analyst": "Aggressive Analyst",
"Risk Judge": "Risk Judge",
},
)
# Trader → END
workflow.add_edge("Trader", END)
workflow.add_edge("Risk Judge", END)
# Compile and return
return workflow.compile()

View File

@ -1,5 +1,7 @@
# TradingAgents/graph/signal_processing.py
import re
from langchain_openai import ChatOpenAI
@ -18,14 +20,126 @@ class SignalProcessor:
full_signal: Complete trading signal text
Returns:
Extracted decision (BUY, SELL, or HOLD)
Extracted decision (BUY or PASS) - legacy mode
"""
messages = [
(
"system",
"You are an efficient assistant designed to analyze paragraphs or financial reports provided by a group of analysts. Your task is to extract the investment decision: SELL, BUY, or HOLD. Provide only the extracted decision (SELL, BUY, or HOLD) as your output, without adding any additional text or information.",
"너는 애널리스트 리포트에서 최종 투자 결론만 추출하는 파서다. 한국어/영어 혼합 문서도 처리한다. 우선순위는 (1) 마지막 `FINAL TRANSACTION PROPOSAL: **BUY/PASS**` 표기, (2) 명시적 최종 권고 문장이다. 출력은 반드시 BUY 또는 PASS 한 단어만 반환하고, 다른 문구는 절대 출력하지 마라.",
),
("human", full_signal),
]
return self.quick_thinking_llm.invoke(messages).content
def process_swing_signal(self, full_signal: str) -> dict:
"""
Process a swing trading signal to extract decision + order parameters.
Args:
full_signal: Complete trading signal text with SWING_ORDER block
Returns:
Dict with action, entry_price, stop_loss, take_profit,
position_size_pct, max_hold_days, rationale
"""
result = {
"action": "PASS",
"entry_price": None,
"stop_loss": None,
"take_profit": None,
"position_size_pct": None,
"max_hold_days": None,
"rationale": "",
}
# Try to extract SWING_ORDER block first
order_block = self._extract_swing_order(full_signal)
if order_block:
result.update(order_block)
else:
# Fallback: use LLM to extract
result = self._llm_extract_swing_signal(full_signal)
# Validate action
if result["action"] not in ("BUY", "SELL", "HOLD", "PASS"):
result["action"] = "PASS"
return result
def _extract_swing_order(self, text: str) -> dict | None:
"""Extract structured SWING_ORDER block from text using regex."""
pattern = r"SWING_ORDER:\s*\n(.*?)(?:\n```|\Z)"
match = re.search(pattern, text, re.DOTALL)
if not match:
# Try without code block
pattern = r"SWING_ORDER:\s*\n((?:\s+\w+:.*\n?)+)"
match = re.search(pattern, text, re.DOTALL)
if not match:
return None
block = match.group(1)
result = {}
field_map = {
"ACTION": ("action", str),
"ENTRY_PRICE": ("entry_price", float),
"STOP_LOSS": ("stop_loss", float),
"TAKE_PROFIT": ("take_profit", float),
"POSITION_SIZE_PCT": ("position_size_pct", float),
"MAX_HOLD_DAYS": ("max_hold_days", int),
"RATIONALE": ("rationale", str),
}
for key, (field_name, converter) in field_map.items():
field_pattern = rf"{key}:\s*(.+?)(?:\n|$)"
field_match = re.search(field_pattern, block)
if field_match:
raw_value = field_match.group(1).strip()
try:
if converter in (float, int):
# Remove commas and currency symbols
cleaned = re.sub(r"[^\d.\-]", "", raw_value)
if cleaned:
result[field_name] = converter(cleaned)
else:
result[field_name] = raw_value
except (ValueError, TypeError):
pass
return result if "action" in result else None
def _llm_extract_swing_signal(self, full_signal: str) -> dict:
"""Fallback: use LLM to extract swing trading signal."""
messages = [
(
"system",
"""너는 스윙 트레이딩 리포트에서 최종 투자 결론과 주문 정보를 추출하는 파서다.
반드시 아래 JSON 형식만 출력하라. 다른 텍스트는 절대 출력하지 마라.
{"action": "BUY|SELL|HOLD|PASS", "entry_price": 숫자|null, "stop_loss": 숫자|null, "take_profit": 숫자|null, "position_size_pct": 숫자|null, "max_hold_days": 숫자|null, "rationale": "한 줄 요약"}""",
),
("human", full_signal),
]
import json
response = self.quick_thinking_llm.invoke(messages).content
try:
return json.loads(response)
except json.JSONDecodeError:
# Last resort: extract action only
action = "PASS"
for keyword in ("BUY", "SELL", "HOLD", "PASS"):
if keyword in response.upper():
action = keyword
break
return {
"action": action,
"entry_price": None,
"stop_loss": None,
"take_profit": None,
"position_size_pct": None,
"max_hold_days": None,
"rationale": "",
}

View File

@ -3,24 +3,15 @@
import os
from pathlib import Path
import json
from datetime import date
from typing import Dict, Any, Tuple, List, Optional
from typing import Dict, Any, List, Optional
from langgraph.prebuilt import ToolNode
from tradingagents.llm_clients import create_llm_client
from tradingagents.agents import *
from tradingagents.default_config import DEFAULT_CONFIG
from tradingagents.agents.utils.memory import FinancialSituationMemory
from tradingagents.agents.utils.agent_states import (
AgentState,
InvestDebateState,
RiskDebateState,
)
from tradingagents.dataflows.config import set_config
# Import the new abstract tool methods from agent_utils
from tradingagents.agents.utils.agent_utils import (
get_stock_data,
get_indicators,
@ -30,7 +21,19 @@ from tradingagents.agents.utils.agent_utils import (
get_income_statement,
get_news,
get_insider_transactions,
get_global_news
get_global_news,
# Korean market tools
get_krx_stock_data,
get_krx_indicators,
get_exchange_rate,
get_korea_index,
get_investor_trading,
get_krx_fundamentals,
get_dart_financials,
get_dart_disclosures,
get_dart_shareholders,
get_korean_news,
get_korean_global_news,
)
from .conditional_logic import ConditionalLogic
@ -41,40 +44,31 @@ from .signal_processing import SignalProcessor
class TradingAgentsGraph:
"""Main class that orchestrates the trading agents framework."""
"""Swing trading graph: Analysts → Trader → Decision.
Simplified pipeline without debate or risk management stages.
"""
def __init__(
self,
selected_analysts=["market", "social", "news", "fundamentals"],
selected_analysts=["market", "news", "fundamentals"],
debug=False,
config: Dict[str, Any] = None,
callbacks: Optional[List] = None,
):
"""Initialize the trading agents graph and components.
Args:
selected_analysts: List of analyst types to include
debug: Whether to run in debug mode
config: Configuration dictionary. If None, uses default config
callbacks: Optional list of callback handlers (e.g., for tracking LLM/tool stats)
"""
self.debug = debug
self.config = config or DEFAULT_CONFIG
self.callbacks = callbacks or []
# Update the interface's config
set_config(self.config)
# Create necessary directories
os.makedirs(
os.path.join(self.config["project_dir"], "dataflows/data_cache"),
exist_ok=True,
)
# Initialize LLMs with provider-specific thinking configuration
# Initialize LLMs
llm_kwargs = self._get_provider_kwargs()
# Add callbacks to kwargs if provided (passed to LLM constructor)
if self.callbacks:
llm_kwargs["callbacks"] = self.callbacks
@ -93,13 +87,9 @@ class TradingAgentsGraph:
self.deep_thinking_llm = deep_client.get_llm()
self.quick_thinking_llm = quick_client.get_llm()
# Initialize memories
self.bull_memory = FinancialSituationMemory("bull_memory", self.config)
self.bear_memory = FinancialSituationMemory("bear_memory", self.config)
# Only trader memory needed
self.trader_memory = FinancialSituationMemory("trader_memory", self.config)
self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory", self.config)
self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory", self.config)
# Create tool nodes
self.tool_nodes = self._create_tool_nodes()
@ -110,174 +100,241 @@ class TradingAgentsGraph:
self.quick_thinking_llm,
self.deep_thinking_llm,
self.tool_nodes,
self.bull_memory,
self.bear_memory,
self.trader_memory,
self.invest_judge_memory,
self.risk_manager_memory,
self.conditional_logic,
)
self.propagator = Propagator()
self.propagator = Propagator(self.config.get("max_recur_limit", 100))
self.reflector = Reflector(self.quick_thinking_llm)
self.signal_processor = SignalProcessor(self.quick_thinking_llm)
# State tracking
self.curr_state = None
self.ticker = None
self.log_states_dict = {} # date to full state dict
# Set up the graph
self.graph = self.graph_setup.setup_graph(selected_analysts)
def _get_provider_kwargs(self) -> Dict[str, Any]:
"""Get provider-specific kwargs for LLM client creation."""
kwargs = {}
provider = self.config.get("llm_provider", "").lower()
if provider == "google":
thinking_level = self.config.get("google_thinking_level")
if thinking_level:
kwargs["thinking_level"] = thinking_level
elif provider == "openai":
reasoning_effort = self.config.get("openai_reasoning_effort")
if reasoning_effort:
kwargs["reasoning_effort"] = reasoning_effort
return kwargs
def _create_tool_nodes(self) -> Dict[str, ToolNode]:
"""Create tool nodes for different data sources using abstract methods."""
return {
"market": ToolNode(
[
# Core stock data tools
get_stock_data,
# Technical indicators
get_indicators,
]
),
"social": ToolNode(
[
# News tools for social media analysis
get_news,
get_krx_stock_data,
get_krx_indicators,
get_exchange_rate,
get_korea_index,
get_investor_trading,
]
),
"news": ToolNode(
[
# News and insider information
get_news,
get_global_news,
get_insider_transactions,
get_korean_news,
get_korean_global_news,
get_dart_disclosures,
]
),
"fundamentals": ToolNode(
[
# Fundamental analysis tools
get_fundamentals,
get_balance_sheet,
get_cashflow,
get_income_statement,
get_krx_fundamentals,
get_dart_financials,
get_dart_shareholders,
]
),
}
def propagate(self, company_name, trade_date):
"""Run the trading agents graph for a company on a specific date."""
def propagate(
self,
company_name: str,
trade_date: str,
screening_context: str = "",
portfolio_context: str = "",
):
"""Run the swing trading graph for a company.
Args:
company_name: Ticker symbol
trade_date: Trading date
screening_context: Why screener flagged this stock
portfolio_context: Current portfolio state summary
Returns:
(final_state, swing_signal_dict)
"""
self.ticker = company_name
# Initialize state
init_agent_state = self.propagator.create_initial_state(
company_name, trade_date
init_state = self.propagator.create_initial_state(
company_name, trade_date, screening_context, portfolio_context
)
args = self.propagator.get_graph_args()
if self.debug:
# Debug mode with tracing
trace = []
for chunk in self.graph.stream(init_agent_state, **args):
if len(chunk["messages"]) == 0:
pass
else:
for chunk in self.graph.stream(init_state, **args):
if chunk.get("messages"):
chunk["messages"][-1].pretty_print()
trace.append(chunk)
trace.append(chunk)
final_state = trace[-1]
else:
# Standard mode without tracing
final_state = self.graph.invoke(init_agent_state, **args)
final_state = self.graph.invoke(init_state, **args)
# Store current state for reflection
self.curr_state = final_state
# Log state
self._log_state(trade_date, final_state)
# Return decision and processed signal
return final_state, self.process_signal(final_state["final_trade_decision"])
# Process swing signal
swing_signal = self.signal_processor.process_swing_signal(
final_state["trader_decision"]
)
return final_state, swing_signal
def _log_state(self, trade_date, final_state):
"""Log the final state to a JSON file."""
self.log_states_dict[str(trade_date)] = {
"company_of_interest": final_state["company_of_interest"],
"trade_date": final_state["trade_date"],
"market_report": final_state["market_report"],
"sentiment_report": final_state["sentiment_report"],
"news_report": final_state["news_report"],
"fundamentals_report": final_state["fundamentals_report"],
"investment_debate_state": {
"bull_history": final_state["investment_debate_state"]["bull_history"],
"bear_history": final_state["investment_debate_state"]["bear_history"],
"history": final_state["investment_debate_state"]["history"],
"current_response": final_state["investment_debate_state"][
"current_response"
],
"judge_decision": final_state["investment_debate_state"][
"judge_decision"
],
},
"trader_investment_decision": final_state["trader_investment_plan"],
"risk_debate_state": {
"aggressive_history": final_state["risk_debate_state"]["aggressive_history"],
"conservative_history": final_state["risk_debate_state"]["conservative_history"],
"neutral_history": final_state["risk_debate_state"]["neutral_history"],
"history": final_state["risk_debate_state"]["history"],
"judge_decision": final_state["risk_debate_state"]["judge_decision"],
},
"investment_plan": final_state["investment_plan"],
"final_trade_decision": final_state["final_trade_decision"],
log_data = {
str(trade_date): {
"company_of_interest": final_state["company_of_interest"],
"trade_date": final_state["trade_date"],
"market_report": final_state["market_report"],
"news_report": final_state["news_report"],
"fundamentals_report": final_state["fundamentals_report"],
"trader_decision": final_state["trader_decision"],
"swing_order": final_state.get("swing_order", ""),
}
}
# Save to file
directory = Path(f"eval_results/{self.ticker}/TradingAgentsStrategy_logs/")
directory = Path(
self.config.get("results_dir", "./results")
) / self.ticker / "logs"
directory.mkdir(parents=True, exist_ok=True)
with open(
f"eval_results/{self.ticker}/TradingAgentsStrategy_logs/full_states_log_{trade_date}.json",
"w",
) as f:
json.dump(self.log_states_dict, f, indent=4)
with open(directory / f"state_{trade_date}.json", "w") as f:
json.dump(log_data, f, indent=4, ensure_ascii=False)
def reflect_and_remember(self, returns_losses):
"""Reflect on decisions and update memory based on returns."""
self.reflector.reflect_bull_researcher(
self.curr_state, returns_losses, self.bull_memory
)
self.reflector.reflect_bear_researcher(
self.curr_state, returns_losses, self.bear_memory
)
"""Reflect on trader's decision and update memory."""
self.reflector.reflect_trader(
self.curr_state, returns_losses, self.trader_memory
)
self.reflector.reflect_invest_judge(
self.curr_state, returns_losses, self.invest_judge_memory
)
self.reflector.reflect_risk_manager(
self.curr_state, returns_losses, self.risk_manager_memory
def process_signal(self, full_signal: str) -> dict:
"""Process a signal to extract swing order parameters."""
return self.signal_processor.process_swing_signal(full_signal)
def screen(
self,
trade_date: str,
existing_positions: list[str] | None = None,
portfolio_context: str = "",
) -> dict:
"""Run stock screening to discover swing trading candidates.
Returns:
Dict with candidates, report, stats
"""
from tradingagents.graph.screening_graph import ScreeningGraph
screener = ScreeningGraph(self.config)
return screener.run(
trade_date=trade_date,
market=self.config.get("market", "KRX"),
existing_positions=existing_positions,
portfolio_context=portfolio_context,
max_candidates=self.config.get("screening_max_candidates", 5),
)
def process_signal(self, full_signal):
"""Process a signal to extract the core decision."""
return self.signal_processor.process_signal(full_signal)
def run_swing_pipeline(
self,
trade_date: str,
existing_positions: list[str] | None = None,
portfolio_context: str = "",
on_screening_done=None,
on_candidate_start=None,
on_candidate_done=None,
) -> list[dict]:
"""Full swing trading pipeline: Screen → Analyze each candidate.
Args:
trade_date: Trading date
existing_positions: Tickers already held
portfolio_context: Portfolio summary
on_screening_done: Callback(screening_result) after screening
on_candidate_start: Callback(ticker, screening_context) before analysis
on_candidate_done: Callback(ticker, final_state, swing_signal) after analysis
Returns:
List of dicts: [{ticker, final_state, swing_signal}, ...]
"""
# Step 1: Screen
screening_result = self.screen(
trade_date=trade_date,
existing_positions=existing_positions,
portfolio_context=portfolio_context,
)
if on_screening_done:
on_screening_done(screening_result)
candidates = screening_result.get("candidates", [])
if not candidates:
return []
# Step 2: Analyze each candidate
results = []
for candidate in candidates:
ticker = candidate["ticker"]
screening_context = (
f"종목: {candidate['name']} ({ticker})\n"
f"기술적 신호: {', '.join(candidate.get('signals', []))}\n"
f"펀더멘탈: {candidate.get('fundamental_check', 'N/A')}"
)
if on_candidate_start:
on_candidate_start(ticker, screening_context)
try:
final_state, swing_signal = self.propagate(
company_name=ticker,
trade_date=trade_date,
screening_context=screening_context,
portfolio_context=portfolio_context,
)
result = {
"ticker": ticker,
"name": candidate.get("name", ticker),
"final_state": final_state,
"swing_signal": swing_signal,
"screening_context": screening_context,
}
results.append(result)
if on_candidate_done:
on_candidate_done(ticker, final_state, swing_signal)
except Exception as e:
import logging
logging.getLogger(__name__).error(
f"Analysis failed for {ticker}: {e}"
)
return results

View File

@ -0,0 +1,16 @@
from tradingagents.portfolio.state import (
PortfolioState,
Position,
ClosedTrade,
Order,
)
from tradingagents.portfolio.persistence import load_portfolio, save_portfolio
__all__ = [
"PortfolioState",
"Position",
"ClosedTrade",
"Order",
"load_portfolio",
"save_portfolio",
]

View File

@ -0,0 +1,72 @@
"""JSON-based portfolio persistence."""
import json
import os
from dataclasses import asdict
from typing import Optional
from tradingagents.portfolio.state import (
ClosedTrade,
Order,
PortfolioState,
Position,
)
def _get_portfolio_path(portfolio_id: str, results_dir: str = "./results") -> str:
portfolio_dir = os.path.join(results_dir, "portfolios")
os.makedirs(portfolio_dir, exist_ok=True)
return os.path.join(portfolio_dir, f"{portfolio_id}.json")
def save_portfolio(portfolio: PortfolioState, results_dir: str = "./results") -> str:
"""Save portfolio state to JSON file. Returns the file path."""
path = _get_portfolio_path(portfolio.portfolio_id, results_dir)
data = asdict(portfolio)
with open(path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=2)
return path
def load_portfolio(
portfolio_id: str = "default",
results_dir: str = "./results",
defaults: Optional[dict] = None,
) -> PortfolioState:
"""Load portfolio state from JSON file. Creates new if not found."""
path = _get_portfolio_path(portfolio_id, results_dir)
if not os.path.exists(path):
kwargs = {"portfolio_id": portfolio_id}
if defaults:
kwargs.update(defaults)
return PortfolioState(**kwargs)
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)
# Reconstruct nested dataclasses
positions = {}
for ticker, pos_data in data.get("positions", {}).items():
positions[ticker] = Position(**pos_data)
closed_trades = [
ClosedTrade(**t) for t in data.get("closed_trades", [])
]
orders_history = [
Order(**o) for o in data.get("orders_history", [])
]
return PortfolioState(
portfolio_id=data.get("portfolio_id", portfolio_id),
total_capital=data.get("total_capital", 100_000_000),
available_capital=data.get("available_capital", 100_000_000),
max_positions=data.get("max_positions", 5),
max_position_pct=data.get("max_position_pct", 0.20),
positions=positions,
closed_trades=closed_trades,
orders_history=orders_history,
created_at=data.get("created_at", ""),
updated_at=data.get("updated_at", ""),
)

View File

@ -0,0 +1,199 @@
"""Portfolio state models for swing trading."""
from dataclasses import dataclass, field
from datetime import datetime
from typing import Optional
@dataclass
class Position:
"""An open position in the portfolio."""
ticker: str
market: str # "KRX" or "US"
entry_date: str
entry_price: float
quantity: int
stop_loss: float
take_profit: float
max_hold_days: int = 20
current_price: float = 0.0
screening_reason: str = ""
@property
def days_held(self) -> int:
entry = datetime.strptime(self.entry_date, "%Y-%m-%d")
return (datetime.now() - entry).days
@property
def unrealized_pnl(self) -> float:
return (self.current_price - self.entry_price) * self.quantity
@property
def unrealized_pnl_pct(self) -> float:
if self.entry_price == 0:
return 0.0
return (self.current_price - self.entry_price) / self.entry_price * 100
@property
def cost_basis(self) -> float:
return self.entry_price * self.quantity
def should_check_exit(self, current_date: str) -> bool:
"""Check if position needs exit evaluation (stop-loss, take-profit, or max hold)."""
if self.current_price <= self.stop_loss:
return True
if self.current_price >= self.take_profit:
return True
if self.days_held >= self.max_hold_days:
return True
return False
@dataclass
class ClosedTrade:
"""A completed trade with realized P&L."""
ticker: str
market: str
entry_date: str
exit_date: str
entry_price: float
exit_price: float
quantity: int
exit_reason: str # "stop_loss", "take_profit", "max_hold", "agent_decision"
@property
def pnl(self) -> float:
return (self.exit_price - self.entry_price) * self.quantity
@property
def pnl_pct(self) -> float:
if self.entry_price == 0:
return 0.0
return (self.exit_price - self.entry_price) / self.entry_price * 100
@dataclass
class Order:
"""A trading order generated by the system."""
action: str # "BUY", "SELL"
ticker: str
market: str
price: float
stop_loss: float
take_profit: float
quantity: int
position_size_pct: float # % of total capital
max_hold_days: int = 20
rationale: str = ""
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
@dataclass
class PortfolioState:
"""Complete portfolio state for swing trading."""
portfolio_id: str = "default"
total_capital: float = 100_000_000 # 1억원 default
available_capital: float = 100_000_000
max_positions: int = 5
max_position_pct: float = 0.20 # 20% of total capital per position
positions: dict[str, Position] = field(default_factory=dict)
closed_trades: list[ClosedTrade] = field(default_factory=list)
orders_history: list[Order] = field(default_factory=list)
created_at: str = field(default_factory=lambda: datetime.now().isoformat())
updated_at: str = field(default_factory=lambda: datetime.now().isoformat())
@property
def invested_capital(self) -> float:
return sum(p.cost_basis for p in self.positions.values())
@property
def total_unrealized_pnl(self) -> float:
return sum(p.unrealized_pnl for p in self.positions.values())
@property
def total_realized_pnl(self) -> float:
return sum(t.pnl for t in self.closed_trades)
@property
def position_count(self) -> int:
return len(self.positions)
def can_add_position(self) -> bool:
return self.position_count < self.max_positions
def available_slots(self) -> int:
return self.max_positions - self.position_count
def max_position_capital(self) -> float:
return self.total_capital * self.max_position_pct
def has_position(self, ticker: str) -> bool:
return ticker in self.positions
def add_position(self, order: Order) -> None:
"""Add a new position from a BUY order."""
self.positions[order.ticker] = Position(
ticker=order.ticker,
market=order.market,
entry_date=datetime.now().strftime("%Y-%m-%d"),
entry_price=order.price,
quantity=order.quantity,
stop_loss=order.stop_loss,
take_profit=order.take_profit,
max_hold_days=order.max_hold_days,
current_price=order.price,
screening_reason=order.rationale,
)
self.available_capital -= order.price * order.quantity
self.orders_history.append(order)
self.updated_at = datetime.now().isoformat()
def close_position(self, ticker: str, exit_price: float, exit_reason: str) -> Optional[ClosedTrade]:
"""Close an existing position and record the trade."""
if ticker not in self.positions:
return None
pos = self.positions.pop(ticker)
trade = ClosedTrade(
ticker=pos.ticker,
market=pos.market,
entry_date=pos.entry_date,
exit_date=datetime.now().strftime("%Y-%m-%d"),
entry_price=pos.entry_price,
exit_price=exit_price,
quantity=pos.quantity,
exit_reason=exit_reason,
)
self.closed_trades.append(trade)
self.available_capital += exit_price * pos.quantity
self.updated_at = datetime.now().isoformat()
return trade
def summary(self) -> str:
"""Generate a text summary of the portfolio for agent context."""
lines = [
f"=== 포트폴리오 현황 ===",
f"총 자본: {self.total_capital:,.0f}",
f"가용 자본: {self.available_capital:,.0f}",
f"투자 중: {self.invested_capital:,.0f}",
f"포지션: {self.position_count}/{self.max_positions}",
f"미실현 손익: {self.total_unrealized_pnl:,.0f}",
f"실현 손익: {self.total_realized_pnl:,.0f}",
]
if self.positions:
lines.append("\n--- 보유 종목 ---")
for ticker, pos in self.positions.items():
lines.append(
f" {ticker}: 진입가 {pos.entry_price:,.0f} / "
f"현재가 {pos.current_price:,.0f} / "
f"수익률 {pos.unrealized_pnl_pct:+.1f}% / "
f"보유일 {pos.days_held}일 / "
f"손절 {pos.stop_loss:,.0f} / 익절 {pos.take_profit:,.0f}"
)
return "\n".join(lines)

143
translate_report.py Normal file
View File

@ -0,0 +1,143 @@
"""기존 complete_report.md를 한국어로 번역하는 테스트 스크립트.
사용법:
python translate_report.py # 최근 리포트 자동 선택
python translate_report.py reports/SPY_.../complete_report.md
"""
import sys
import re
import datetime
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
def translate_report_to_korean(report_content: str, llm) -> str:
"""cli/main.py의 translate_report_to_korean와 동일한 로직."""
from langchain_core.messages import HumanMessage, SystemMessage
system_prompt = (
"당신은 금융 전문가이자 비전공자 교육 전문가입니다.\n"
"영어 주식 트레이딩 분석 리포트를 한국어로 번역하고, "
"금융·기술 전문 용어를 비전공자도 쉽게 이해할 수 있도록 설명을 추가해주세요.\n\n"
"번역 지침:\n"
"1. 자연스러운 한국어로 번역하세요.\n"
"2. 처음 등장하는 전문 용어 뒤에 괄호로 쉬운 설명을 추가하세요.\n"
" 예) 200 SMA(200일 단순이동평균: 200일간 종가 평균으로 장기 추세를 나타내는 기준선)\n"
" 예) RSI(상대강도지수: 0~100 사이 값으로 과매수·과매도를 판단. 70 이상=과매수, 30 이하=과매도)\n"
" 예) MACD(이동평균수렴확산: 단기·장기 이동평균의 차이로 추세 전환 시점을 포착하는 지표)\n"
" 예) ATR(평균진폭: 주가의 하루 평균 변동 폭. 손절 위치 설정 등 리스크 관리에 활용)\n"
"3. 복잡한 분석 개념은 일상적인 비유를 사용해 쉽게 설명하세요.\n"
"4. 가격, 퍼센트 등 수치와 종목 코드는 그대로 유지하세요.\n"
"5. 마크다운 형식(##, ###, -, * 등)을 그대로 유지하세요.\n"
"6. 섹션 제목은 한국어로 번역하세요.\n"
"7. 최종 투자 의견과 권고 사항을 명확히 전달하세요."
)
parts = re.split(r"(?=^## )", report_content, flags=re.MULTILINE)
translated_parts = []
total = sum(1 for p in parts if p.strip())
done = 0
for part in parts:
if not part.strip():
continue
done += 1
# 섹션 제목 미리보기
first_line = part.strip().splitlines()[0][:60]
print(f" [{done}/{total}] 번역 중: {first_line} ...", flush=True)
response = llm.invoke([
SystemMessage(content=system_prompt),
HumanMessage(content=f"아래 내용을 한국어로 번역하고 전문 용어를 쉽게 설명해주세요:\n\n{part}"),
])
translated_parts.append(response.content)
return "\n\n".join(translated_parts)
def pick_report() -> Path:
"""번역할 리포트 파일 경로 결정."""
if len(sys.argv) > 1:
p = Path(sys.argv[1])
if not p.exists():
print(f"[오류] 파일을 찾을 수 없습니다: {p}")
sys.exit(1)
return p
# reports/ 하위에서 최근 수정된 complete_report.md 자동 선택
candidates = sorted(
Path("reports").glob("*/complete_report.md"),
key=lambda f: f.stat().st_mtime,
reverse=True,
)
if not candidates:
print("[오류] reports/ 폴더에 complete_report.md가 없습니다.")
sys.exit(1)
print("번역 가능한 리포트:")
for i, c in enumerate(candidates):
print(f" {i+1}. {c}")
if len(candidates) == 1:
return candidates[0]
choice = input(f"번역할 리포트 번호 선택 [1-{len(candidates)}] (기본=1): ").strip()
idx = int(choice) - 1 if choice.isdigit() else 0
return candidates[max(0, min(idx, len(candidates) - 1))]
def main():
report_path = pick_report()
print(f"\n대상 리포트: {report_path}")
# 이미 번역본이 있으면 알림
ko_path = report_path.parent / "complete_report_ko.md"
if ko_path.exists():
overwrite = input("번역본이 이미 존재합니다. 덮어쓰시겠습니까? [Y/n]: ").strip().upper()
if overwrite == "N":
print("취소되었습니다.")
sys.exit(0)
# LLM 생성 — API 키를 명시적으로 전달
import os
print("\nLLM 초기화 중...")
anthropic_key = os.environ.get("ANTHROPIC_API_KEY")
openai_key = os.environ.get("OPENAI_API_KEY")
if anthropic_key:
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(model="claude-sonnet-4-6", api_key=anthropic_key)
print(" 사용 모델: claude-sonnet-4-6 (Anthropic)")
elif openai_key:
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4o", api_key=openai_key)
print(" 사용 모델: gpt-4o (OpenAI)")
else:
print("[오류] ANTHROPIC_API_KEY 또는 OPENAI_API_KEY가 없습니다.")
sys.exit(1)
# 번역 실행
report_content = report_path.read_text(encoding="utf-8")
ticker = report_path.parent.name.split("_")[0].upper()
print(f"\n번역 시작 (총 {len(report_content):,}자)...\n")
korean_content = translate_report_to_korean(report_content, llm)
# 저장
ko_header = (
f"# 트레이딩 분석 리포트: {ticker} (한국어)\n\n"
f"> **안내**: 이 리포트는 영문 분석 결과를 AI가 한국어로 번역하고, "
f"금융 전문 용어를 비전공자도 쉽게 이해할 수 있도록 설명을 추가한 버전입니다.\n\n"
f"생성: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
f"---\n\n"
)
ko_path.write_text(ko_header + korean_content, encoding="utf-8")
print(f"\n✓ 번역 완료: {ko_path.resolve()}")
if __name__ == "__main__":
main()

76
uv.lock
View File

@ -1,5 +1,5 @@
version = 1
revision = 3
revision = 2
requires-python = ">=3.10"
resolution-markers = [
"python_full_version >= '3.13'",
@ -558,6 +558,23 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/18/79/1b8fa1bb3568781e84c9200f951c735f3f157429f44be0495da55894d620/filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25", size = 19970, upload-time = "2022-11-02T17:34:01.425Z" },
]
[[package]]
name = "finance-datareader"
version = "0.9.101"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "beautifulsoup4" },
{ name = "lxml" },
{ name = "pandas" },
{ name = "plotly" },
{ name = "requests" },
{ name = "requests-file" },
{ name = "tqdm" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/f2/e4/cfc7d63d8d93306537b5a9b256006acc5172373293bef15e84d6741ab839/finance_datareader-0.9.101-py3-none-any.whl", hash = "sha256:8ad71a63753a1ee8bd01b20ac5b63e451c63498c9233dd0805b873d5727762a5", size = 49974, upload-time = "2026-01-05T01:07:52.428Z" },
]
[[package]]
name = "frozendict"
version = "2.4.6"
@ -752,6 +769,7 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/92/db/b4c12cff13ebac2786f4f217f06588bccd8b53d260453404ef22b121fc3a/greenlet-3.2.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:1afd685acd5597349ee6d7a88a8bec83ce13c106ac78c196ee9dde7c04fe87be", size = 268977, upload-time = "2025-06-05T16:10:24.001Z" },
{ url = "https://files.pythonhosted.org/packages/52/61/75b4abd8147f13f70986df2801bf93735c1bd87ea780d70e3b3ecda8c165/greenlet-3.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:761917cac215c61e9dc7324b2606107b3b292a8349bdebb31503ab4de3f559ac", size = 627351, upload-time = "2025-06-05T16:38:50.685Z" },
{ url = "https://files.pythonhosted.org/packages/35/aa/6894ae299d059d26254779a5088632874b80ee8cf89a88bca00b0709d22f/greenlet-3.2.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:a433dbc54e4a37e4fff90ef34f25a8c00aed99b06856f0119dcf09fbafa16392", size = 638599, upload-time = "2025-06-05T16:41:34.057Z" },
{ url = "https://files.pythonhosted.org/packages/30/64/e01a8261d13c47f3c082519a5e9dbf9e143cc0498ed20c911d04e54d526c/greenlet-3.2.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:72e77ed69312bab0434d7292316d5afd6896192ac4327d44f3d613ecb85b037c", size = 634482, upload-time = "2025-06-05T16:48:16.26Z" },
{ url = "https://files.pythonhosted.org/packages/47/48/ff9ca8ba9772d083a4f5221f7b4f0ebe8978131a9ae0909cf202f94cd879/greenlet-3.2.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:68671180e3849b963649254a882cd544a3c75bfcd2c527346ad8bb53494444db", size = 633284, upload-time = "2025-06-05T16:13:01.599Z" },
{ url = "https://files.pythonhosted.org/packages/e9/45/626e974948713bc15775b696adb3eb0bd708bec267d6d2d5c47bb47a6119/greenlet-3.2.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49c8cfb18fb419b3d08e011228ef8a25882397f3a859b9fe1436946140b6756b", size = 582206, upload-time = "2025-06-05T16:12:48.51Z" },
{ url = "https://files.pythonhosted.org/packages/b1/8e/8b6f42c67d5df7db35b8c55c9a850ea045219741bb14416255616808c690/greenlet-3.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:efc6dc8a792243c31f2f5674b670b3a95d46fa1c6a912b8e310d6f542e7b0712", size = 1111412, upload-time = "2025-06-05T16:36:45.479Z" },
@ -760,6 +778,7 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/fc/2e/d4fcb2978f826358b673f779f78fa8a32ee37df11920dc2bb5589cbeecef/greenlet-3.2.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:784ae58bba89fa1fa5733d170d42486580cab9decda3484779f4759345b29822", size = 270219, upload-time = "2025-06-05T16:10:10.414Z" },
{ url = "https://files.pythonhosted.org/packages/16/24/929f853e0202130e4fe163bc1d05a671ce8dcd604f790e14896adac43a52/greenlet-3.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0921ac4ea42a5315d3446120ad48f90c3a6b9bb93dd9b3cf4e4d84a66e42de83", size = 630383, upload-time = "2025-06-05T16:38:51.785Z" },
{ url = "https://files.pythonhosted.org/packages/d1/b2/0320715eb61ae70c25ceca2f1d5ae620477d246692d9cc284c13242ec31c/greenlet-3.2.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:d2971d93bb99e05f8c2c0c2f4aa9484a18d98c4c3bd3c62b65b7e6ae33dfcfaf", size = 642422, upload-time = "2025-06-05T16:41:35.259Z" },
{ url = "https://files.pythonhosted.org/packages/bd/49/445fd1a210f4747fedf77615d941444349c6a3a4a1135bba9701337cd966/greenlet-3.2.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c667c0bf9d406b77a15c924ef3285e1e05250948001220368e039b6aa5b5034b", size = 638375, upload-time = "2025-06-05T16:48:18.235Z" },
{ url = "https://files.pythonhosted.org/packages/7e/c8/ca19760cf6eae75fa8dc32b487e963d863b3ee04a7637da77b616703bc37/greenlet-3.2.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:592c12fb1165be74592f5de0d70f82bc5ba552ac44800d632214b76089945147", size = 637627, upload-time = "2025-06-05T16:13:02.858Z" },
{ url = "https://files.pythonhosted.org/packages/65/89/77acf9e3da38e9bcfca881e43b02ed467c1dedc387021fc4d9bd9928afb8/greenlet-3.2.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:29e184536ba333003540790ba29829ac14bb645514fbd7e32af331e8202a62a5", size = 585502, upload-time = "2025-06-05T16:12:49.642Z" },
{ url = "https://files.pythonhosted.org/packages/97/c6/ae244d7c95b23b7130136e07a9cc5aadd60d59b5951180dc7dc7e8edaba7/greenlet-3.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:93c0bb79844a367782ec4f429d07589417052e621aa39a5ac1fb99c5aa308edc", size = 1114498, upload-time = "2025-06-05T16:36:46.598Z" },
@ -768,6 +787,7 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/f3/94/ad0d435f7c48debe960c53b8f60fb41c2026b1d0fa4a99a1cb17c3461e09/greenlet-3.2.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:25ad29caed5783d4bd7a85c9251c651696164622494c00802a139c00d639242d", size = 271992, upload-time = "2025-06-05T16:11:23.467Z" },
{ url = "https://files.pythonhosted.org/packages/93/5d/7c27cf4d003d6e77749d299c7c8f5fd50b4f251647b5c2e97e1f20da0ab5/greenlet-3.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:88cd97bf37fe24a6710ec6a3a7799f3f81d9cd33317dcf565ff9950c83f55e0b", size = 638820, upload-time = "2025-06-05T16:38:52.882Z" },
{ url = "https://files.pythonhosted.org/packages/c6/7e/807e1e9be07a125bb4c169144937910bf59b9d2f6d931578e57f0bce0ae2/greenlet-3.2.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:baeedccca94880d2f5666b4fa16fc20ef50ba1ee353ee2d7092b383a243b0b0d", size = 653046, upload-time = "2025-06-05T16:41:36.343Z" },
{ url = "https://files.pythonhosted.org/packages/9d/ab/158c1a4ea1068bdbc78dba5a3de57e4c7aeb4e7fa034320ea94c688bfb61/greenlet-3.2.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:be52af4b6292baecfa0f397f3edb3c6092ce071b499dd6fe292c9ac9f2c8f264", size = 647701, upload-time = "2025-06-05T16:48:19.604Z" },
{ url = "https://files.pythonhosted.org/packages/cc/0d/93729068259b550d6a0288da4ff72b86ed05626eaf1eb7c0d3466a2571de/greenlet-3.2.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0cc73378150b8b78b0c9fe2ce56e166695e67478550769536a6742dca3651688", size = 649747, upload-time = "2025-06-05T16:13:04.628Z" },
{ url = "https://files.pythonhosted.org/packages/f6/f6/c82ac1851c60851302d8581680573245c8fc300253fc1ff741ae74a6c24d/greenlet-3.2.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:706d016a03e78df129f68c4c9b4c4f963f7d73534e48a24f5f5a7101ed13dbbb", size = 605461, upload-time = "2025-06-05T16:12:50.792Z" },
{ url = "https://files.pythonhosted.org/packages/98/82/d022cf25ca39cf1200650fc58c52af32c90f80479c25d1cbf57980ec3065/greenlet-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:419e60f80709510c343c57b4bb5a339d8767bf9aef9b8ce43f4f143240f88b7c", size = 1121190, upload-time = "2025-06-05T16:36:48.59Z" },
@ -776,6 +796,7 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b1/cf/f5c0b23309070ae93de75c90d29300751a5aacefc0a3ed1b1d8edb28f08b/greenlet-3.2.3-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:500b8689aa9dd1ab26872a34084503aeddefcb438e2e7317b89b11eaea1901ad", size = 270732, upload-time = "2025-06-05T16:10:08.26Z" },
{ url = "https://files.pythonhosted.org/packages/48/ae/91a957ba60482d3fecf9be49bc3948f341d706b52ddb9d83a70d42abd498/greenlet-3.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a07d3472c2a93117af3b0136f246b2833fdc0b542d4a9799ae5f41c28323faef", size = 639033, upload-time = "2025-06-05T16:38:53.983Z" },
{ url = "https://files.pythonhosted.org/packages/6f/df/20ffa66dd5a7a7beffa6451bdb7400d66251374ab40b99981478c69a67a8/greenlet-3.2.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:8704b3768d2f51150626962f4b9a9e4a17d2e37c8a8d9867bbd9fa4eb938d3b3", size = 652999, upload-time = "2025-06-05T16:41:37.89Z" },
{ url = "https://files.pythonhosted.org/packages/51/b4/ebb2c8cb41e521f1d72bf0465f2f9a2fd803f674a88db228887e6847077e/greenlet-3.2.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5035d77a27b7c62db6cf41cf786cfe2242644a7a337a0e155c80960598baab95", size = 647368, upload-time = "2025-06-05T16:48:21.467Z" },
{ url = "https://files.pythonhosted.org/packages/8e/6a/1e1b5aa10dced4ae876a322155705257748108b7fd2e4fae3f2a091fe81a/greenlet-3.2.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2d8aa5423cd4a396792f6d4580f88bdc6efcb9205891c9d40d20f6e670992efb", size = 650037, upload-time = "2025-06-05T16:13:06.402Z" },
{ url = "https://files.pythonhosted.org/packages/26/f2/ad51331a157c7015c675702e2d5230c243695c788f8f75feba1af32b3617/greenlet-3.2.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2c724620a101f8170065d7dded3f962a2aea7a7dae133a009cada42847e04a7b", size = 608402, upload-time = "2025-06-05T16:12:51.91Z" },
{ url = "https://files.pythonhosted.org/packages/26/bc/862bd2083e6b3aff23300900a956f4ea9a4059de337f5c8734346b9b34fc/greenlet-3.2.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:873abe55f134c48e1f2a6f53f7d1419192a3d1a4e873bace00499a4e45ea6af0", size = 1119577, upload-time = "2025-06-05T16:36:49.787Z" },
@ -784,6 +805,7 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/d8/ca/accd7aa5280eb92b70ed9e8f7fd79dc50a2c21d8c73b9a0856f5b564e222/greenlet-3.2.3-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:3d04332dddb10b4a211b68111dabaee2e1a073663d117dc10247b5b1642bac86", size = 271479, upload-time = "2025-06-05T16:10:47.525Z" },
{ url = "https://files.pythonhosted.org/packages/55/71/01ed9895d9eb49223280ecc98a557585edfa56b3d0e965b9fa9f7f06b6d9/greenlet-3.2.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8186162dffde068a465deab08fc72c767196895c39db26ab1c17c0b77a6d8b97", size = 683952, upload-time = "2025-06-05T16:38:55.125Z" },
{ url = "https://files.pythonhosted.org/packages/ea/61/638c4bdf460c3c678a0a1ef4c200f347dff80719597e53b5edb2fb27ab54/greenlet-3.2.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f4bfbaa6096b1b7a200024784217defedf46a07c2eee1a498e94a1b5f8ec5728", size = 696917, upload-time = "2025-06-05T16:41:38.959Z" },
{ url = "https://files.pythonhosted.org/packages/22/cc/0bd1a7eb759d1f3e3cc2d1bc0f0b487ad3cc9f34d74da4b80f226fde4ec3/greenlet-3.2.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:ed6cfa9200484d234d8394c70f5492f144b20d4533f69262d530a1a082f6ee9a", size = 692443, upload-time = "2025-06-05T16:48:23.113Z" },
{ url = "https://files.pythonhosted.org/packages/67/10/b2a4b63d3f08362662e89c103f7fe28894a51ae0bc890fabf37d1d780e52/greenlet-3.2.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:02b0df6f63cd15012bed5401b47829cfd2e97052dc89da3cfaf2c779124eb892", size = 692995, upload-time = "2025-06-05T16:13:07.972Z" },
{ url = "https://files.pythonhosted.org/packages/5a/c6/ad82f148a4e3ce9564056453a71529732baf5448ad53fc323e37efe34f66/greenlet-3.2.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:86c2d68e87107c1792e2e8d5399acec2487a4e993ab76c792408e59394d52141", size = 655320, upload-time = "2025-06-05T16:12:53.453Z" },
{ url = "https://files.pythonhosted.org/packages/5c/4f/aab73ecaa6b3086a4c89863d94cf26fa84cbff63f52ce9bc4342b3087a06/greenlet-3.2.3-cp314-cp314-win_amd64.whl", hash = "sha256:8c47aae8fbbfcf82cc13327ae802ba13c9c36753b67e760023fd116bc124a62a", size = 301236, upload-time = "2025-06-05T16:15:20.111Z" },
@ -1618,6 +1640,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" },
]
[[package]]
name = "narwhals"
version = "2.17.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/75/59/81d0f4cad21484083466f278e6b392addd9f4205b48d45b5c8771670ebf8/narwhals-2.17.0.tar.gz", hash = "sha256:ebd5bc95bcfa2f8e89a8ac09e2765a63055162837208e67b42d6eeb6651d5e67", size = 620306, upload-time = "2026-02-23T09:44:34.142Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/4b/27/20770bd6bf8fbe1e16f848ba21da9df061f38d2e6483952c29d2bb5d1d8b/narwhals-2.17.0-py3-none-any.whl", hash = "sha256:2ac5307b7c2b275a7d66eeda906b8605e3d7a760951e188dcfff86e8ebe083dd", size = 444897, upload-time = "2026-02-23T09:44:32.006Z" },
]
[[package]]
name = "nest-asyncio"
version = "1.6.0"
@ -2599,6 +2630,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/fe/39/979e8e21520d4e47a0bbe349e2713c0aac6f3d853d0e5b34d76206c439aa/platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4", size = 18567, upload-time = "2025-05-07T22:47:40.376Z" },
]
[[package]]
name = "plotly"
version = "6.5.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "narwhals" },
{ name = "packaging" },
]
sdist = { url = "https://files.pythonhosted.org/packages/e3/4f/8a10a9b9f5192cb6fdef62f1d77fa7d834190b2c50c0cd256bd62879212b/plotly-6.5.2.tar.gz", hash = "sha256:7478555be0198562d1435dee4c308268187553cc15516a2f4dd034453699e393", size = 7015695, upload-time = "2026-01-14T21:26:51.222Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/8a/67/f95b5460f127840310d2187f916cf0023b5875c0717fdf893f71e1325e87/plotly-6.5.2-py3-none-any.whl", hash = "sha256:91757653bd9c550eeea2fa2404dba6b85d1e366d54804c340b2c874e5a7eb4a4", size = 9895973, upload-time = "2026-01-14T21:26:47.135Z" },
]
[[package]]
name = "posthog"
version = "3.25.0"
@ -3137,6 +3181,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" },
]
[[package]]
name = "requests-file"
version = "3.0.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "requests" },
]
sdist = { url = "https://files.pythonhosted.org/packages/3c/f8/5dc70102e4d337063452c82e1f0d95e39abfe67aa222ed8a5ddeb9df8de8/requests_file-3.0.1.tar.gz", hash = "sha256:f14243d7796c588f3521bd423c5dea2ee4cc730e54a3cac9574d78aca1272576", size = 6967, upload-time = "2025-10-20T18:56:42.279Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e1/d5/de8f089119205a09da657ed4784c584ede8381a0ce6821212a6d4ca47054/requests_file-3.0.1-py2.py3-none-any.whl", hash = "sha256:d0f5eb94353986d998f80ac63c7f146a307728be051d4d1cd390dbdb59c10fa2", size = 4514, upload-time = "2025-10-20T18:56:41.184Z" },
]
[[package]]
name = "requests-toolbelt"
version = "1.0.0"
@ -3321,6 +3377,17 @@ version = "2.0.3"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/8d/dd/d4dd75843692690d81f0a4b929212a1614b25d4896aa7c72f4c3546c7e3d/syncer-2.0.3.tar.gz", hash = "sha256:4340eb54b54368724a78c5c0763824470201804fe9180129daf3635cb500550f", size = 11512, upload-time = "2023-05-08T07:50:17.963Z" }
[[package]]
name = "ta"
version = "0.11.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
{ name = "numpy", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
{ name = "pandas" },
]
sdist = { url = "https://files.pythonhosted.org/packages/e0/9a/37d92a6b470dc9088612c2399a68f1a9ac22872d4e1eff416818e22ab11b/ta-0.11.0.tar.gz", hash = "sha256:de86af43418420bd6b088a2ea9b95483071bf453c522a8441bc2f12bcf8493fd", size = 25308, upload-time = "2023-11-02T13:53:35.434Z" }
[[package]]
name = "tenacity"
version = "9.1.2"
@ -3504,12 +3571,15 @@ source = { editable = "." }
dependencies = [
{ name = "backtrader" },
{ name = "chainlit" },
{ name = "finance-datareader" },
{ name = "langchain-anthropic" },
{ name = "langchain-core" },
{ name = "langchain-experimental" },
{ name = "langchain-google-genai" },
{ name = "langchain-openai" },
{ name = "langgraph" },
{ name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
{ name = "numpy", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
{ name = "pandas" },
{ name = "parsel" },
{ name = "pytz" },
@ -3520,6 +3590,7 @@ dependencies = [
{ name = "rich" },
{ name = "setuptools" },
{ name = "stockstats" },
{ name = "ta" },
{ name = "tqdm" },
{ name = "typer" },
{ name = "typing-extensions" },
@ -3530,12 +3601,14 @@ dependencies = [
requires-dist = [
{ name = "backtrader", specifier = ">=1.9.78.123" },
{ name = "chainlit", specifier = ">=2.5.5" },
{ name = "finance-datareader", specifier = "==0.9.101" },
{ name = "langchain-anthropic", specifier = ">=0.3.15" },
{ name = "langchain-core", specifier = ">=0.3.81" },
{ name = "langchain-experimental", specifier = ">=0.3.4" },
{ name = "langchain-google-genai", specifier = ">=2.1.5" },
{ name = "langchain-openai", specifier = ">=0.3.23" },
{ name = "langgraph", specifier = ">=0.4.8" },
{ name = "numpy", specifier = ">=2.2.6" },
{ name = "pandas", specifier = ">=2.3.0" },
{ name = "parsel", specifier = ">=1.10.0" },
{ name = "pytz", specifier = ">=2025.2" },
@ -3546,6 +3619,7 @@ requires-dist = [
{ name = "rich", specifier = ">=14.0.0" },
{ name = "setuptools", specifier = ">=80.9.0" },
{ name = "stockstats", specifier = ">=0.6.5" },
{ name = "ta", specifier = ">=0.11.0" },
{ name = "tqdm", specifier = ">=4.67.1" },
{ name = "typer", specifier = ">=0.21.0" },
{ name = "typing-extensions", specifier = ">=4.14.0" },